1use std::fs::File;
9use std::io::SeekFrom;
10use std::ops::{Deref, Range};
11use std::sync::{Arc, Mutex};
12
13use bitvec::vec::BitVec;
14use kvm_bindings::{KVM_MEM_LOG_DIRTY_PAGES, kvm_userspace_memory_region};
15use log::error;
16use serde::{Deserialize, Serialize};
17pub use vm_memory::bitmap::{AtomicBitmap, BS, Bitmap, BitmapSlice};
18pub use vm_memory::mmap::MmapRegionBuilder;
19use vm_memory::mmap::{MmapRegionError, NewBitmap};
20pub use vm_memory::{
21 Address, ByteValued, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion,
22 GuestUsize, MemoryRegionAddress, MmapRegion, address,
23};
24use vm_memory::{GuestMemoryError, GuestMemoryRegionBytes, VolatileSlice, WriteVolatile};
25use vmm_sys_util::errno;
26
27use crate::utils::{get_page_size, u64_to_usize};
28use crate::vmm_config::machine_config::HugePageConfig;
29use crate::vstate::vm::VmError;
30use crate::{DirtyBitmap, Vm};
31
32pub type GuestRegionMmap = vm_memory::GuestRegionMmap<Option<AtomicBitmap>>;
34pub type GuestMemoryMmap = vm_memory::GuestRegionCollection<GuestRegionMmapExt>;
36pub type GuestMmapRegion = vm_memory::MmapRegion<Option<AtomicBitmap>>;
38
39#[derive(Debug, thiserror::Error, displaydoc::Display)]
41pub enum MemoryError {
42 PageSize(errno::Error),
44 WriteMemory(GuestMemoryError),
46 MmapRegionError(MmapRegionError),
48 VmMemoryError,
50 Memfd(memfd::Error),
52 MemfdSetLen(std::io::Error),
54 OffsetTooLarge,
56 FileMetadata(std::io::Error),
58 Unaligned,
60 Mprotect(std::io::Error),
62}
63
64#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
66pub enum GuestRegionType {
67 Dram,
69 Hotpluggable,
71}
72
73#[derive(Debug)]
76pub struct GuestRegionMmapExt {
77 pub inner: GuestRegionMmap,
79 pub region_type: GuestRegionType,
81 pub slot_from: u32,
83 pub slot_size: usize,
85 pub plugged: Mutex<BitVec>,
87}
88
89#[derive(Debug)]
91pub struct GuestMemorySlot<'a> {
92 pub(crate) slot: u32,
94 pub(crate) guest_addr: GuestAddress,
96 pub(crate) slice: VolatileSlice<'a, BS<'a, Option<AtomicBitmap>>>,
98}
99
100impl From<&GuestMemorySlot<'_>> for kvm_userspace_memory_region {
101 fn from(mem_slot: &GuestMemorySlot) -> Self {
102 let flags = if mem_slot.slice.bitmap().is_some() {
103 KVM_MEM_LOG_DIRTY_PAGES
104 } else {
105 0
106 };
107 kvm_userspace_memory_region {
108 flags,
109 slot: mem_slot.slot,
110 guest_phys_addr: mem_slot.guest_addr.raw_value(),
111 memory_size: mem_slot.slice.len() as u64,
112 userspace_addr: mem_slot.slice.ptr_guard().as_ptr() as u64,
113 }
114 }
115}
116
117impl<'a> GuestMemorySlot<'a> {
118 pub(crate) fn dump_dirty<T: WriteVolatile + std::io::Seek>(
120 &self,
121 writer: &mut T,
122 kvm_bitmap: &[u64],
123 page_size: usize,
124 ) -> Result<(), GuestMemoryError> {
125 let firecracker_bitmap = self.slice.bitmap();
126 let mut write_size = 0;
127 let mut skip_size = 0;
128 let mut dirty_batch_start = 0;
129
130 for (i, v) in kvm_bitmap.iter().enumerate() {
131 for j in 0..64 {
132 let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
133 let page_offset = ((i * 64) + j) * page_size;
134 let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset);
135
136 if is_kvm_page_dirty || is_firecracker_page_dirty {
137 if skip_size > 0 {
139 writer
141 .seek(SeekFrom::Current(skip_size.try_into().unwrap()))
142 .unwrap();
143 dirty_batch_start = page_offset;
144 skip_size = 0;
145 }
146 write_size += page_size;
147 } else {
148 if write_size > 0 {
150 let slice = &self.slice.subslice(dirty_batch_start, write_size)?;
152 writer.write_all_volatile(slice)?;
153 write_size = 0;
154 }
155 skip_size += page_size;
156 }
157 }
158 }
159
160 if write_size > 0 {
161 writer.write_all_volatile(&self.slice.subslice(dirty_batch_start, write_size)?)?;
162 }
163
164 Ok(())
165 }
166
167 pub(crate) fn protect(&self, protected: bool) -> Result<(), MemoryError> {
169 let prot = if protected {
170 libc::PROT_NONE
171 } else {
172 libc::PROT_READ | libc::PROT_WRITE
173 };
174 let ret = unsafe {
176 libc::mprotect(
177 self.slice.ptr_guard_mut().as_ptr().cast(),
178 self.slice.len(),
179 prot,
180 )
181 };
182 if ret != 0 {
183 Err(MemoryError::Mprotect(std::io::Error::last_os_error()))
184 } else {
185 Ok(())
186 }
187 }
188}
189
190fn addr_in_range(addr: GuestAddress, start: GuestAddress, len: usize) -> bool {
191 if let Some(end) = start.checked_add(len as u64) {
192 addr >= start && addr < end
193 } else {
194 false
195 }
196}
197
198impl GuestRegionMmapExt {
199 pub(crate) fn dram_from_mmap_region(region: GuestRegionMmap, slot: u32) -> Self {
201 let slot_size = u64_to_usize(region.len());
202 GuestRegionMmapExt {
203 inner: region,
204 region_type: GuestRegionType::Dram,
205 slot_from: slot,
206 slot_size,
207 plugged: Mutex::new(BitVec::repeat(true, 1)),
208 }
209 }
210
211 pub(crate) fn hotpluggable_from_mmap_region(
213 region: GuestRegionMmap,
214 slot_from: u32,
215 slot_size: usize,
216 ) -> Self {
217 let slot_cnt = (u64_to_usize(region.len())) / slot_size;
218
219 GuestRegionMmapExt {
220 inner: region,
221 region_type: GuestRegionType::Hotpluggable,
222 slot_from,
223 slot_size,
224 plugged: Mutex::new(BitVec::repeat(false, slot_cnt)),
225 }
226 }
227
228 pub(crate) fn from_state(
229 region: GuestRegionMmap,
230 state: &GuestMemoryRegionState,
231 slot_from: u32,
232 ) -> Result<Self, MemoryError> {
233 let slot_cnt = state.plugged.len();
234 let slot_size = u64_to_usize(region.len())
235 .checked_div(slot_cnt)
236 .ok_or(MemoryError::Unaligned)?;
237
238 Ok(GuestRegionMmapExt {
239 inner: region,
240 slot_size,
241 region_type: state.region_type,
242 slot_from,
243 plugged: Mutex::new(BitVec::from_iter(state.plugged.iter())),
244 })
245 }
246
247 pub(crate) fn slot_cnt(&self) -> u32 {
248 u32::try_from(u64_to_usize(self.len()) / self.slot_size).unwrap()
249 }
250
251 pub fn slot_range(&self) -> Range<u32> {
253 self.slot_from..self.slot_from + self.slot_cnt()
254 }
255
256 pub fn slot_size(&self) -> usize {
258 self.slot_size
259 }
260
261 pub fn slot_base(&self, slot: u32) -> Option<GuestAddress> {
263 if slot < self.slot_from || slot >= self.slot_from + self.slot_cnt() {
264 return None;
265 }
266 let offset = ((slot - self.slot_from) as u64) * (self.slot_size as u64);
267 Some(self.start_addr().unchecked_add(offset))
268 }
269
270 pub(crate) fn mem_slot(&self, slot: u32) -> GuestMemorySlot<'_> {
271 assert!(slot >= self.slot_from && slot < self.slot_from + self.slot_cnt());
272
273 let offset = ((slot - self.slot_from) as u64) * (self.slot_size as u64);
274
275 GuestMemorySlot {
276 slot,
277 guest_addr: self.start_addr().unchecked_add(offset),
278 slice: self
279 .inner
280 .get_slice(MemoryRegionAddress(offset), self.slot_size)
281 .expect("slot range should be valid"),
282 }
283 }
284
285 pub(crate) fn slots(&self) -> impl Iterator<Item = (GuestMemorySlot<'_>, bool)> {
289 self.plugged
290 .lock()
291 .unwrap()
292 .iter()
293 .enumerate()
294 .map(|(i, b)| {
295 (
296 self.mem_slot(self.slot_from + u32::try_from(i).unwrap()),
297 *b,
298 )
299 })
300 .collect::<Vec<_>>()
301 .into_iter()
302 }
303
304 pub(crate) fn plugged_slots(&self) -> impl Iterator<Item = GuestMemorySlot<'_>> {
308 self.slots()
309 .filter(|(_, plugged)| *plugged)
310 .map(|(slot, _)| slot)
311 }
312
313 pub(crate) fn slots_intersecting_range(
314 &self,
315 from: GuestAddress,
316 len: usize,
317 ) -> impl Iterator<Item = GuestMemorySlot<'_>> {
318 self.slots().map(|(slot, _)| slot).filter(move |slot| {
319 if let Some(slot_end) = slot.guest_addr.checked_add(slot.slice.len() as u64) {
320 addr_in_range(slot.guest_addr, from, len) || addr_in_range(slot_end, from, len)
321 } else {
322 false
323 }
324 })
325 }
326
327 pub(crate) fn update_slot(
329 &self,
330 vm: &Vm,
331 mem_slot: &GuestMemorySlot<'_>,
332 plug: bool,
333 ) -> Result<(), VmError> {
334 assert!(self.region_type == GuestRegionType::Hotpluggable);
336
337 let mut bitmap_guard = self.plugged.lock().unwrap();
338 let prev = bitmap_guard.replace((mem_slot.slot - self.slot_from) as usize, plug);
339 if prev == plug {
341 return Ok(());
342 }
343
344 let mut kvm_region = kvm_userspace_memory_region::from(mem_slot);
345 if plug {
346 mem_slot.protect(false)?;
348 vm.set_user_memory_region(kvm_region)?;
349 } else {
350 kvm_region.memory_size = 0;
352 vm.set_user_memory_region(kvm_region)?;
353 mem_slot.protect(true)?;
355 }
356 Ok(())
357 }
358
359 pub(crate) fn discard_range(
360 &self,
361 caddr: MemoryRegionAddress,
362 len: usize,
363 ) -> Result<(), GuestMemoryError> {
364 let phys_address = self.get_host_address(caddr)?;
365
366 match (self.inner.file_offset(), self.inner.flags()) {
367 (Some(_), flags) if flags & libc::MAP_PRIVATE != 0 => {
370 let ret = unsafe {
379 libc::mmap(
380 phys_address.cast(),
381 len,
382 libc::PROT_READ | libc::PROT_WRITE,
383 libc::MAP_FIXED | libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
384 -1,
385 0,
386 )
387 };
388 if ret == libc::MAP_FAILED {
389 let os_error = std::io::Error::last_os_error();
390 error!("discard_range: mmap failed: {:?}", os_error);
391 Err(GuestMemoryError::IOError(os_error))
392 } else {
393 Ok(())
394 }
395 }
396 _ => {
403 let ret = unsafe { libc::madvise(phys_address.cast(), len, libc::MADV_DONTNEED) };
406 if ret < 0 {
407 let os_error = std::io::Error::last_os_error();
408 error!("discard_range: madvise failed: {:?}", os_error);
409 Err(GuestMemoryError::IOError(os_error))
410 } else {
411 Ok(())
412 }
413 }
414 }
415 }
416}
417
418impl Deref for GuestRegionMmapExt {
419 type Target = MmapRegion<Option<AtomicBitmap>>;
420
421 fn deref(&self) -> &MmapRegion<Option<AtomicBitmap>> {
422 &self.inner
423 }
424}
425
426impl GuestMemoryRegionBytes for GuestRegionMmapExt {}
427
428#[allow(clippy::cast_possible_wrap)]
429#[allow(clippy::cast_possible_truncation)]
430impl GuestMemoryRegion for GuestRegionMmapExt {
431 type B = Option<AtomicBitmap>;
432
433 fn len(&self) -> GuestUsize {
434 self.inner.len()
435 }
436
437 fn start_addr(&self) -> GuestAddress {
438 self.inner.start_addr()
439 }
440
441 fn bitmap(&self) -> BS<'_, Self::B> {
442 self.inner.bitmap()
443 }
444
445 fn get_host_address(
446 &self,
447 addr: MemoryRegionAddress,
448 ) -> vm_memory::guest_memory::Result<*mut u8> {
449 self.inner.get_host_address(addr)
450 }
451
452 fn file_offset(&self) -> Option<&FileOffset> {
453 self.inner.file_offset()
454 }
455
456 fn get_slice(
457 &self,
458 offset: MemoryRegionAddress,
459 count: usize,
460 ) -> vm_memory::guest_memory::Result<VolatileSlice<'_, BS<'_, Self::B>>> {
461 self.inner.get_slice(offset, count)
462 }
463}
464
465pub fn create(
467 regions: impl Iterator<Item = (GuestAddress, usize)>,
468 mmap_flags: libc::c_int,
469 file: Option<File>,
470 track_dirty_pages: bool,
471) -> Result<Vec<GuestRegionMmap>, MemoryError> {
472 let mut offset = 0;
473 let file = file.map(Arc::new);
474 regions
475 .map(|(start, size)| {
476 let mut builder = MmapRegionBuilder::new_with_bitmap(
477 size,
478 track_dirty_pages.then(|| AtomicBitmap::with_len(size)),
479 )
480 .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE)
481 .with_mmap_flags(libc::MAP_NORESERVE | mmap_flags);
482
483 if let Some(ref file) = file {
484 let file_offset = FileOffset::from_arc(Arc::clone(file), offset);
485
486 builder = builder.with_file_offset(file_offset);
487 }
488
489 offset = match offset.checked_add(size as u64) {
490 None => return Err(MemoryError::OffsetTooLarge),
491 Some(new_off) if new_off >= i64::MAX as u64 => {
492 return Err(MemoryError::OffsetTooLarge);
493 }
494 Some(new_off) => new_off,
495 };
496
497 GuestRegionMmap::new(
498 builder.build().map_err(MemoryError::MmapRegionError)?,
499 start,
500 )
501 .ok_or(MemoryError::VmMemoryError)
502 })
503 .collect::<Result<Vec<_>, _>>()
504}
505
506pub fn memfd_backed(
508 regions: &[(GuestAddress, usize)],
509 track_dirty_pages: bool,
510 huge_pages: HugePageConfig,
511) -> Result<Vec<GuestRegionMmap>, MemoryError> {
512 let size = regions.iter().map(|&(_, size)| size as u64).sum();
513 let memfd_file = create_memfd(size, huge_pages.into())?.into_file();
514
515 create(
516 regions.iter().copied(),
517 libc::MAP_SHARED | huge_pages.mmap_flags(),
518 Some(memfd_file),
519 track_dirty_pages,
520 )
521}
522
523pub fn anonymous(
525 regions: impl Iterator<Item = (GuestAddress, usize)>,
526 track_dirty_pages: bool,
527 huge_pages: HugePageConfig,
528) -> Result<Vec<GuestRegionMmap>, MemoryError> {
529 create(
530 regions,
531 libc::MAP_PRIVATE | libc::MAP_ANONYMOUS | huge_pages.mmap_flags(),
532 None,
533 track_dirty_pages,
534 )
535}
536
537pub fn snapshot_file(
540 file: File,
541 regions: impl Iterator<Item = (GuestAddress, usize)>,
542 track_dirty_pages: bool,
543) -> Result<Vec<GuestRegionMmap>, MemoryError> {
544 let regions: Vec<_> = regions.collect();
545 let memory_size = regions
546 .iter()
547 .try_fold(0u64, |acc, (_, size)| acc.checked_add(*size as u64))
548 .ok_or(MemoryError::OffsetTooLarge)?;
549 let file_size = file.metadata().map_err(MemoryError::FileMetadata)?.len();
550
551 if memory_size > file_size {
554 return Err(MemoryError::OffsetTooLarge);
555 }
556
557 create(
558 regions.into_iter(),
559 libc::MAP_PRIVATE,
560 Some(file),
561 track_dirty_pages,
562 )
563}
564
565pub trait GuestMemoryExtension
567where
568 Self: Sized,
569{
570 fn describe(&self) -> GuestMemoryState;
572
573 fn mark_dirty(&self, addr: GuestAddress, len: usize);
575
576 fn dump<T: WriteVolatile + std::io::Seek>(&self, writer: &mut T) -> Result<(), MemoryError>;
578
579 fn dump_dirty<T: WriteVolatile + std::io::Seek>(
581 &self,
582 writer: &mut T,
583 dirty_bitmap: &DirtyBitmap,
584 ) -> Result<(), MemoryError>;
585
586 fn reset_dirty(&self);
588
589 fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize);
591
592 fn try_for_each_region_in_range<F>(
594 &self,
595 addr: GuestAddress,
596 range_len: usize,
597 f: F,
598 ) -> Result<(), GuestMemoryError>
599 where
600 F: FnMut(&GuestRegionMmapExt, MemoryRegionAddress, usize) -> Result<(), GuestMemoryError>;
601
602 fn discard_range(&self, addr: GuestAddress, range_len: usize) -> Result<(), GuestMemoryError>;
604}
605
606#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
608pub struct GuestMemoryRegionState {
609 pub base_address: u64,
613 pub size: usize,
615 pub region_type: GuestRegionType,
617 pub plugged: Vec<bool>,
619}
620
621#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
623pub struct GuestMemoryState {
624 pub regions: Vec<GuestMemoryRegionState>,
626}
627
628impl GuestMemoryState {
629 pub fn regions(&self) -> impl Iterator<Item = (GuestAddress, usize)> + '_ {
632 self.regions
633 .iter()
634 .map(|region| (GuestAddress(region.base_address), region.size))
635 }
636}
637
638impl GuestMemoryExtension for GuestMemoryMmap {
639 fn describe(&self) -> GuestMemoryState {
641 let mut guest_memory_state = GuestMemoryState::default();
642 self.iter().for_each(|region| {
643 guest_memory_state.regions.push(GuestMemoryRegionState {
644 base_address: region.start_addr().0,
645 size: u64_to_usize(region.len()),
646 region_type: region.region_type,
647 plugged: region.plugged.lock().unwrap().iter().by_vals().collect(),
648 });
649 });
650 guest_memory_state
651 }
652
653 fn mark_dirty(&self, addr: GuestAddress, len: usize) {
655 for slice in self.get_slices(addr, len).flatten() {
657 slice.bitmap().mark_dirty(0, slice.len());
658 }
659 }
660
661 fn dump<T: WriteVolatile + std::io::Seek>(&self, writer: &mut T) -> Result<(), MemoryError> {
663 self.iter()
664 .flat_map(|region| region.slots())
665 .try_for_each(|(mem_slot, plugged)| {
666 if !plugged {
667 let ilen = i64::try_from(mem_slot.slice.len()).unwrap();
668 writer.seek(SeekFrom::Current(ilen)).unwrap();
669 } else {
670 writer.write_all_volatile(&mem_slot.slice)?;
671 }
672 Ok(())
673 })
674 .map_err(MemoryError::WriteMemory)
675 }
676
677 fn dump_dirty<T: WriteVolatile + std::io::Seek>(
679 &self,
680 writer: &mut T,
681 dirty_bitmap: &DirtyBitmap,
682 ) -> Result<(), MemoryError> {
683 let page_size = get_page_size().map_err(MemoryError::PageSize)?;
684
685 let write_result =
686 self.iter()
687 .flat_map(|region| region.slots())
688 .try_for_each(|(mem_slot, plugged)| {
689 if !plugged {
690 let ilen = i64::try_from(mem_slot.slice.len()).unwrap();
691 writer.seek(SeekFrom::Current(ilen)).unwrap();
692 } else {
693 let kvm_bitmap = dirty_bitmap.get(&mem_slot.slot).unwrap();
694 mem_slot.dump_dirty(writer, kvm_bitmap, page_size)?;
695 }
696 Ok(())
697 });
698
699 if write_result.is_err() {
700 self.store_dirty_bitmap(dirty_bitmap, page_size);
701 } else {
702 self.reset_dirty();
703 }
704
705 write_result.map_err(MemoryError::WriteMemory)
706 }
707
708 fn reset_dirty(&self) {
710 self.iter().for_each(|region| {
711 if let Some(bitmap) = (**region).bitmap() {
712 bitmap.reset();
713 }
714 })
715 }
716
717 fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize) {
719 self.iter()
720 .flat_map(|region| region.plugged_slots())
721 .for_each(|mem_slot| {
722 let kvm_bitmap = dirty_bitmap.get(&mem_slot.slot).unwrap();
723 let firecracker_bitmap = mem_slot.slice.bitmap();
724
725 for (i, v) in kvm_bitmap.iter().enumerate() {
726 for j in 0..64 {
727 let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
728
729 if is_kvm_page_dirty {
730 let page_offset = ((i * 64) + j) * page_size;
731
732 firecracker_bitmap.mark_dirty(page_offset, 1)
733 }
734 }
735 }
736 });
737 }
738
739 fn try_for_each_region_in_range<F>(
740 &self,
741 addr: GuestAddress,
742 range_len: usize,
743 mut f: F,
744 ) -> Result<(), GuestMemoryError>
745 where
746 F: FnMut(&GuestRegionMmapExt, MemoryRegionAddress, usize) -> Result<(), GuestMemoryError>,
747 {
748 let mut cur = addr;
749 let mut remaining = range_len;
750
751 while let Some(region) = self.find_region(cur) {
753 let start = region.to_region_addr(cur).unwrap();
754 let len = std::cmp::min(
755 u64_to_usize(region.len() - start.raw_value()),
757 remaining,
759 );
760
761 f(region, start, len)?;
762
763 remaining -= len;
764 if remaining == 0 {
765 return Ok(());
766 }
767
768 cur = cur
769 .checked_add(len as u64)
770 .ok_or(GuestMemoryError::GuestAddressOverflow)?;
771 }
772 Err(GuestMemoryError::InvalidGuestAddress(cur))
774 }
775
776 fn discard_range(&self, addr: GuestAddress, range_len: usize) -> Result<(), GuestMemoryError> {
777 self.try_for_each_region_in_range(addr, range_len, |region, start, len| {
778 region.discard_range(start, len)
779 })
780 }
781}
782
783fn create_memfd(
784 mem_size: u64,
785 hugetlb_size: Option<memfd::HugetlbSize>,
786) -> Result<memfd::Memfd, MemoryError> {
787 let opts = memfd::MemfdOptions::default()
789 .hugetlb(hugetlb_size)
790 .allow_sealing(true);
791 let mem_file = opts.create("guest_mem").map_err(MemoryError::Memfd)?;
792
793 mem_file
795 .as_file()
796 .set_len(mem_size)
797 .map_err(MemoryError::MemfdSetLen)?;
798
799 let mut seals = memfd::SealsHashSet::new();
801 seals.insert(memfd::FileSeal::SealShrink);
802 seals.insert(memfd::FileSeal::SealGrow);
803 mem_file.add_seals(&seals).map_err(MemoryError::Memfd)?;
804
805 mem_file
807 .add_seal(memfd::FileSeal::SealSeal)
808 .map_err(MemoryError::Memfd)?;
809
810 Ok(mem_file)
811}
812
813pub mod test_utils {
815 use super::*;
816
817 pub fn into_region_ext(regions: Vec<GuestRegionMmap>) -> GuestMemoryMmap {
819 GuestMemoryMmap::from_regions(
820 regions
821 .into_iter()
822 .zip(0u32..) .map(|(region, slot)| GuestRegionMmapExt::dram_from_mmap_region(region, slot))
824 .collect(),
825 )
826 .unwrap()
827 }
828}
829
830#[cfg(test)]
831mod tests {
832 #![allow(clippy::undocumented_unsafe_blocks)]
833
834 use std::collections::HashMap;
835 use std::io::{Read, Seek, Write};
836
837 use vmm_sys_util::tempfile::TempFile;
838
839 use super::*;
840 use crate::snapshot::Snapshot;
841 use crate::test_utils::single_region_mem;
842 use crate::utils::{get_page_size, mib_to_bytes};
843 use crate::vstate::memory::test_utils::into_region_ext;
844
845 #[test]
846 fn test_anonymous() {
847 for dirty_page_tracking in [true, false] {
848 let region_size = 0x10000;
849 let regions = vec![
850 (GuestAddress(0x0), region_size),
851 (GuestAddress(0x10000), region_size),
852 (GuestAddress(0x20000), region_size),
853 (GuestAddress(0x30000), region_size),
854 ];
855
856 let guest_memory = anonymous(
857 regions.into_iter(),
858 dirty_page_tracking,
859 HugePageConfig::None,
860 )
861 .unwrap();
862 guest_memory.iter().for_each(|region| {
863 assert_eq!(region.bitmap().is_some(), dirty_page_tracking);
864 });
865 }
866 }
867
868 #[test]
869 fn test_snapshot_file_success() {
870 for dirty_page_tracking in [true, false] {
871 let page_size = 0x1000;
872 let mut file = TempFile::new().unwrap().into_file();
873 file.set_len(page_size as u64).unwrap();
874 file.write_all(&vec![0x42u8; page_size]).unwrap();
875
876 let regions = vec![(GuestAddress(0), page_size)];
877 let guest_regions =
878 snapshot_file(file, regions.into_iter(), dirty_page_tracking).unwrap();
879 assert_eq!(guest_regions.len(), 1);
880 guest_regions.iter().for_each(|region| {
881 assert_eq!(region.bitmap().is_some(), dirty_page_tracking);
882 });
883 }
884 }
885
886 #[test]
887 fn test_snapshot_file_multiple_regions() {
888 let page_size = 0x1000;
889 let total_size = 3 * page_size;
890 let mut file = TempFile::new().unwrap().into_file();
891 file.set_len(total_size as u64).unwrap();
892 file.write_all(&vec![0x42u8; total_size]).unwrap();
893
894 let regions = vec![
895 (GuestAddress(0), page_size),
896 (GuestAddress(0x10000), page_size),
897 (GuestAddress(0x20000), page_size),
898 ];
899 let guest_regions = snapshot_file(file, regions.into_iter(), false).unwrap();
900 assert_eq!(guest_regions.len(), 3);
901 }
902
903 #[test]
904 fn test_snapshot_file_offset_too_large() {
905 let page_size = 0x1000;
906 let mut file = TempFile::new().unwrap().into_file();
907 file.set_len(page_size as u64).unwrap();
908 file.write_all(&vec![0x42u8; page_size]).unwrap();
909
910 let regions = vec![(GuestAddress(0), 2 * page_size)];
911 let result = snapshot_file(file, regions.into_iter(), false);
912 assert!(matches!(result.unwrap_err(), MemoryError::OffsetTooLarge));
913 }
914
915 #[test]
916 fn test_mark_dirty() {
917 let page_size = get_page_size().unwrap();
918 let region_size = page_size * 3;
919
920 let regions = vec![
921 (GuestAddress(0), region_size), (GuestAddress(region_size as u64), region_size), (GuestAddress(region_size as u64 * 2), region_size), ];
925 let guest_memory =
926 into_region_ext(anonymous(regions.into_iter(), true, HugePageConfig::None).unwrap());
927
928 let dirty_map = [
929 (0, page_size, false),
931 (page_size, page_size * 2, true),
933 (page_size * 3, page_size, false),
935 (page_size * 4, page_size * 4, true),
937 (page_size * 8, page_size, false),
939 ];
940
941 for (addr, len, dirty) in &dirty_map {
943 if *dirty {
944 guest_memory.mark_dirty(GuestAddress(*addr as u64), *len);
945 }
946 }
947
948 for (addr, len, dirty) in &dirty_map {
950 for slice in guest_memory
951 .get_slices(GuestAddress(*addr as u64), *len)
952 .flatten()
953 {
954 for i in 0..slice.len() {
955 assert_eq!(slice.bitmap().dirty_at(i), *dirty);
956 }
957 }
958 }
959 }
960
961 fn check_serde<M: GuestMemoryExtension>(guest_memory: &M) {
962 let mut snapshot_data = vec![0u8; 10000];
963 let original_state = guest_memory.describe();
964 Snapshot::new(&original_state)
965 .save(&mut snapshot_data.as_mut_slice())
966 .unwrap();
967 let restored_state = Snapshot::load_without_crc_check(snapshot_data.as_slice())
968 .unwrap()
969 .data;
970 assert_eq!(original_state, restored_state);
971 }
972
973 #[test]
974 fn test_serde() {
975 let page_size = get_page_size().unwrap();
976 let region_size = page_size * 3;
977
978 let guest_memory = into_region_ext(
980 anonymous(
981 [(GuestAddress(0), region_size)].into_iter(),
982 false,
983 HugePageConfig::None,
984 )
985 .unwrap(),
986 );
987 check_serde(&guest_memory);
988
989 let regions = vec![
991 (GuestAddress(0), region_size), (GuestAddress(region_size as u64), region_size), (GuestAddress(region_size as u64 * 2), region_size), ];
995 let guest_memory =
996 into_region_ext(anonymous(regions.into_iter(), true, HugePageConfig::None).unwrap());
997 check_serde(&guest_memory);
998 }
999
1000 #[test]
1001 fn test_describe() {
1002 let page_size: usize = get_page_size().unwrap();
1003
1004 let mem_regions = [
1006 (GuestAddress(0), page_size),
1007 (GuestAddress(page_size as u64 * 2), page_size),
1008 ];
1009 let guest_memory = into_region_ext(
1010 anonymous(mem_regions.into_iter(), true, HugePageConfig::None).unwrap(),
1011 );
1012
1013 let expected_memory_state = GuestMemoryState {
1014 regions: vec![
1015 GuestMemoryRegionState {
1016 base_address: 0,
1017 size: page_size,
1018 region_type: GuestRegionType::Dram,
1019 plugged: vec![true],
1020 },
1021 GuestMemoryRegionState {
1022 base_address: page_size as u64 * 2,
1023 size: page_size,
1024 region_type: GuestRegionType::Dram,
1025 plugged: vec![true],
1026 },
1027 ],
1028 };
1029
1030 let actual_memory_state = guest_memory.describe();
1031 assert_eq!(expected_memory_state, actual_memory_state);
1032
1033 let mem_regions = [
1035 (GuestAddress(0), page_size * 3),
1036 (GuestAddress(page_size as u64 * 4), page_size * 3),
1037 ];
1038 let guest_memory = into_region_ext(
1039 anonymous(mem_regions.into_iter(), true, HugePageConfig::None).unwrap(),
1040 );
1041
1042 let expected_memory_state = GuestMemoryState {
1043 regions: vec![
1044 GuestMemoryRegionState {
1045 base_address: 0,
1046 size: page_size * 3,
1047 region_type: GuestRegionType::Dram,
1048 plugged: vec![true],
1049 },
1050 GuestMemoryRegionState {
1051 base_address: page_size as u64 * 4,
1052 size: page_size * 3,
1053 region_type: GuestRegionType::Dram,
1054 plugged: vec![true],
1055 },
1056 ],
1057 };
1058
1059 let actual_memory_state = guest_memory.describe();
1060 assert_eq!(expected_memory_state, actual_memory_state);
1061 }
1062
1063 #[test]
1064 fn test_dump() {
1065 let page_size = get_page_size().unwrap();
1066
1067 let region_1_address = GuestAddress(0);
1069 let region_2_address = GuestAddress(page_size as u64 * 3);
1070 let region_size = page_size * 2;
1071 let mem_regions = [
1072 (region_1_address, region_size),
1073 (region_2_address, region_size),
1074 ];
1075 let guest_memory = into_region_ext(
1076 anonymous(mem_regions.into_iter(), true, HugePageConfig::None).unwrap(),
1077 );
1078 guest_memory.iter().for_each(|r| {
1080 assert!(!r.bitmap().dirty_at(0));
1081 assert!(!r.bitmap().dirty_at(1));
1082 });
1083
1084 let first_region = vec![1u8; region_size];
1086 guest_memory.write(&first_region, region_1_address).unwrap();
1087
1088 let second_region = vec![2u8; region_size];
1089 guest_memory
1090 .write(&second_region, region_2_address)
1091 .unwrap();
1092
1093 let memory_state = guest_memory.describe();
1094
1095 let mut memory_file = TempFile::new().unwrap().into_file();
1097 guest_memory.dump(&mut memory_file).unwrap();
1098
1099 let restored_guest_memory =
1100 into_region_ext(snapshot_file(memory_file, memory_state.regions(), false).unwrap());
1101
1102 let mut restored_region = vec![0u8; page_size * 2];
1104 restored_guest_memory
1105 .read(restored_region.as_mut_slice(), region_1_address)
1106 .unwrap();
1107 assert_eq!(first_region, restored_region);
1108
1109 restored_guest_memory
1110 .read(restored_region.as_mut_slice(), region_2_address)
1111 .unwrap();
1112 assert_eq!(second_region, restored_region);
1113 }
1114
1115 #[test]
1116 fn test_dump_dirty() {
1117 let page_size = get_page_size().unwrap();
1118
1119 let region_1_address = GuestAddress(0);
1121 let region_2_address = GuestAddress(page_size as u64 * 3);
1122 let region_size = page_size * 2;
1123 let mem_regions = [
1124 (region_1_address, region_size),
1125 (region_2_address, region_size),
1126 ];
1127 let guest_memory = into_region_ext(
1128 anonymous(mem_regions.into_iter(), true, HugePageConfig::None).unwrap(),
1129 );
1130 guest_memory.iter().for_each(|r| {
1132 assert!(!r.bitmap().dirty_at(0));
1133 assert!(!r.bitmap().dirty_at(1));
1134 });
1135
1136 let first_region = vec![1u8; region_size];
1138 guest_memory.write(&first_region, region_1_address).unwrap();
1139
1140 let second_region = vec![2u8; region_size];
1141 guest_memory
1142 .write(&second_region, region_2_address)
1143 .unwrap();
1144
1145 let memory_state = guest_memory.describe();
1146
1147 let mut dirty_bitmap: DirtyBitmap = HashMap::new();
1151 dirty_bitmap.insert(0, vec![0b01]);
1152 dirty_bitmap.insert(1, vec![0b10]);
1153
1154 let mut file = TempFile::new().unwrap().into_file();
1155 guest_memory.dump_dirty(&mut file, &dirty_bitmap).unwrap();
1156
1157 let restored_guest_memory =
1159 into_region_ext(snapshot_file(file, memory_state.regions(), false).unwrap());
1160
1161 let mut restored_region = vec![0u8; region_size];
1163 restored_guest_memory
1164 .read(restored_region.as_mut_slice(), region_1_address)
1165 .unwrap();
1166 assert_eq!(first_region, restored_region);
1167
1168 restored_guest_memory
1169 .read(restored_region.as_mut_slice(), region_2_address)
1170 .unwrap();
1171 assert_eq!(second_region, restored_region);
1172
1173 let file = TempFile::new().unwrap();
1175 let mut reader = file.into_file();
1176 let zeros = vec![0u8; page_size];
1177 let ones = vec![1u8; page_size];
1178 let twos = vec![2u8; page_size];
1179
1180 guest_memory
1184 .write(&twos, GuestAddress(page_size as u64))
1185 .unwrap();
1186
1187 guest_memory.dump_dirty(&mut reader, &dirty_bitmap).unwrap();
1188
1189 let mut diff_file_content = Vec::new();
1191 let expected_first_region = [
1192 ones.as_slice(),
1193 twos.as_slice(),
1194 zeros.as_slice(),
1195 twos.as_slice(),
1196 ]
1197 .concat();
1198 reader.seek(SeekFrom::Start(0)).unwrap();
1199 reader.read_to_end(&mut diff_file_content).unwrap();
1200 assert_eq!(expected_first_region, diff_file_content);
1201 }
1202
1203 #[test]
1204 fn test_store_dirty_bitmap() {
1205 let page_size = get_page_size().unwrap();
1206
1207 let region_1_address = GuestAddress(0);
1209 let region_2_address = GuestAddress(page_size as u64 * 4);
1210 let region_size = page_size * 3;
1211 let mem_regions = [
1212 (region_1_address, region_size),
1213 (region_2_address, region_size),
1214 ];
1215 let guest_memory = into_region_ext(
1216 anonymous(mem_regions.into_iter(), true, HugePageConfig::None).unwrap(),
1217 );
1218
1219 guest_memory.iter().for_each(|r| {
1221 assert!(!r.bitmap().dirty_at(0));
1222 assert!(!r.bitmap().dirty_at(page_size));
1223 assert!(!r.bitmap().dirty_at(page_size * 2));
1224 });
1225
1226 let mut dirty_bitmap: DirtyBitmap = HashMap::new();
1227 dirty_bitmap.insert(0, vec![0b101]);
1228 dirty_bitmap.insert(1, vec![0b101]);
1229
1230 guest_memory.store_dirty_bitmap(&dirty_bitmap, page_size);
1231
1232 guest_memory.iter().for_each(|r| {
1234 assert!(r.bitmap().dirty_at(0));
1235 assert!(!r.bitmap().dirty_at(page_size));
1236 assert!(r.bitmap().dirty_at(page_size * 2));
1237 });
1238 }
1239
1240 #[test]
1241 fn test_create_memfd() {
1242 let size_bytes = mib_to_bytes(1) as u64;
1243
1244 let memfd = create_memfd(size_bytes, None).unwrap();
1245
1246 assert_eq!(memfd.as_file().metadata().unwrap().len(), size_bytes);
1247 memfd.as_file().set_len(0x69).unwrap_err();
1248
1249 let mut seals = memfd::SealsHashSet::new();
1250 seals.insert(memfd::FileSeal::SealGrow);
1251 memfd.add_seals(&seals).unwrap_err();
1252 }
1253
1254 macro_rules! assert_match {
1256 ($lhs:expr, $rhs:pat) => {{ assert!(matches!($lhs, $rhs)) }};
1257 }
1258
1259 #[test]
1260 fn test_discard_range() {
1261 let page_size: usize = 0x1000;
1262 let mem = single_region_mem(2 * page_size);
1263
1264 let ones = vec![1u8; 2 * page_size];
1266 mem.write(&ones[..], GuestAddress(0)).unwrap();
1267
1268 mem.discard_range(GuestAddress(0), page_size).unwrap();
1270
1271 let mut actual_page = vec![0u8; page_size];
1273 mem.read(actual_page.as_mut_slice(), GuestAddress(0))
1274 .unwrap();
1275 assert_eq!(vec![0u8; page_size], actual_page);
1276 mem.read(actual_page.as_mut_slice(), GuestAddress(page_size as u64))
1278 .unwrap();
1279 assert_eq!(vec![1u8; page_size], actual_page);
1280
1281 assert_match!(
1283 mem.discard_range(GuestAddress(0), 0x10000).unwrap_err(),
1284 GuestMemoryError::InvalidGuestAddress(_)
1285 );
1286
1287 assert_match!(
1289 mem.discard_range(GuestAddress(0x10000), 0x10).unwrap_err(),
1290 GuestMemoryError::InvalidGuestAddress(_)
1291 );
1292
1293 assert_match!(
1295 mem.discard_range(GuestAddress(0x20), page_size)
1296 .unwrap_err(),
1297 GuestMemoryError::IOError(_)
1298 );
1299 }
1300
1301 #[test]
1302 fn test_discard_range_on_file() {
1303 let page_size: usize = 0x1000;
1304 let mut memory_file = TempFile::new().unwrap().into_file();
1305 memory_file.set_len(2 * page_size as u64).unwrap();
1306 memory_file.write_all(&vec![2u8; 2 * page_size]).unwrap();
1307 let mem = into_region_ext(
1308 snapshot_file(
1309 memory_file,
1310 std::iter::once((GuestAddress(0), 2 * page_size)),
1311 false,
1312 )
1313 .unwrap(),
1314 );
1315
1316 let ones = vec![1u8; 2 * page_size];
1318 mem.write(&ones[..], GuestAddress(0)).unwrap();
1319
1320 mem.discard_range(GuestAddress(0), page_size).unwrap();
1322
1323 let mut actual_page = vec![0u8; page_size];
1325 mem.read(actual_page.as_mut_slice(), GuestAddress(0))
1326 .unwrap();
1327 assert_eq!(vec![0u8; page_size], actual_page);
1328 mem.read(actual_page.as_mut_slice(), GuestAddress(page_size as u64))
1330 .unwrap();
1331 assert_eq!(vec![1u8; page_size], actual_page);
1332
1333 assert_match!(
1335 mem.discard_range(GuestAddress(0), 0x10000).unwrap_err(),
1336 GuestMemoryError::InvalidGuestAddress(_)
1337 );
1338
1339 assert_match!(
1341 mem.discard_range(GuestAddress(0x10000), 0x10).unwrap_err(),
1342 GuestMemoryError::InvalidGuestAddress(_)
1343 );
1344
1345 assert_match!(
1347 mem.discard_range(GuestAddress(0x20), page_size)
1348 .unwrap_err(),
1349 GuestMemoryError::IOError(_)
1350 );
1351 }
1352}