vmm/devices/virtio/mem/
device.rs

1// Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0
3
4use std::io;
5use std::ops::{Deref, Range};
6use std::sync::Arc;
7use std::sync::atomic::AtomicU32;
8
9use bitvec::vec::BitVec;
10use log::info;
11use serde::{Deserialize, Serialize};
12use vm_memory::{
13    Address, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryRegion, GuestUsize,
14};
15use vmm_sys_util::eventfd::EventFd;
16
17use super::{MEM_NUM_QUEUES, MEM_QUEUE};
18use crate::devices::virtio::ActivateError;
19use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
20use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
21use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_MEM;
22use crate::devices::virtio::generated::virtio_mem::{
23    self, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, virtio_mem_config,
24};
25use crate::devices::virtio::iov_deque::IovDequeError;
26use crate::devices::virtio::mem::VIRTIO_MEM_DEV_ID;
27use crate::devices::virtio::mem::metrics::METRICS;
28use crate::devices::virtio::mem::request::{BlockRangeState, Request, RequestedRange, Response};
29use crate::devices::virtio::queue::{
30    DescriptorChain, FIRECRACKER_MAX_QUEUE_SIZE, InvalidAvailIdx, Queue, QueueError,
31};
32use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
33use crate::logger::{IncMetric, debug, error};
34use crate::utils::{bytes_to_mib, mib_to_bytes, u64_to_usize, usize_to_u64};
35use crate::vstate::interrupts::InterruptError;
36use crate::vstate::memory::{
37    ByteValued, GuestMemoryExtension, GuestMemoryMmap, GuestRegionMmap, GuestRegionType,
38};
39use crate::vstate::vm::VmError;
40use crate::{Vm, impl_device_type};
41
42// SAFETY: virtio_mem_config only contains plain data types
43unsafe impl ByteValued for virtio_mem_config {}
44
45#[derive(Debug, thiserror::Error, displaydoc::Display)]
46pub enum VirtioMemError {
47    /// Error while handling an Event file descriptor: {0}
48    EventFd(#[from] io::Error),
49    /// Received error while sending an interrupt: {0}
50    InterruptError(#[from] InterruptError),
51    /// Size {0} is invalid: it must be a multiple of block size and less than the total size
52    InvalidSize(u64),
53    /// Device is not active
54    DeviceNotActive,
55    /// Descriptor is write-only
56    UnexpectedWriteOnlyDescriptor,
57    /// Error reading virtio descriptor
58    DescriptorWriteFailed,
59    /// Error writing virtio descriptor
60    DescriptorReadFailed,
61    /// Unknown request type: {0}
62    UnknownRequestType(u32),
63    /// Descriptor chain is too short
64    DescriptorChainTooShort,
65    /// Descriptor is too small
66    DescriptorLengthTooSmall,
67    /// Descriptor is read-only
68    UnexpectedReadOnlyDescriptor,
69    /// Error popping from virtio queue: {0}
70    InvalidAvailIdx(#[from] InvalidAvailIdx),
71    /// Error adding used queue: {0}
72    QueueError(#[from] QueueError),
73    /// Invalid requested range: {0:?}.
74    InvalidRange(RequestedRange),
75    /// The requested range cannot be plugged because it's {0:?}.
76    PlugRequestBlockStateInvalid(BlockRangeState),
77    /// Plug request rejected as plugged_size would be greater than requested_size
78    PlugRequestIsTooBig,
79    /// The requested range cannot be unplugged because it's {0:?}.
80    UnplugRequestBlockStateInvalid(BlockRangeState),
81    /// There was an error updating the KVM slot.
82    UpdateKvmSlot(VmError),
83}
84
85#[derive(Debug)]
86pub struct VirtioMem {
87    // VirtIO fields
88    avail_features: u64,
89    acked_features: u64,
90    activate_event: EventFd,
91
92    // Transport fields
93    device_state: DeviceState,
94    pub(crate) queues: Vec<Queue>,
95    queue_events: Vec<EventFd>,
96
97    // Device specific fields
98    pub(crate) config: virtio_mem_config,
99    pub(crate) slot_size: usize,
100    // Bitmap to track which blocks are plugged
101    pub(crate) plugged_blocks: BitVec,
102    vm: Arc<Vm>,
103}
104
105/// Memory hotplug device status information.
106#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
107#[serde(deny_unknown_fields)]
108pub struct VirtioMemStatus {
109    /// Block size in MiB.
110    pub block_size_mib: usize,
111    /// Total memory size in MiB that can be hotplugged.
112    pub total_size_mib: usize,
113    /// Size of the KVM slots in MiB.
114    pub slot_size_mib: usize,
115    /// Currently plugged memory size in MiB.
116    pub plugged_size_mib: usize,
117    /// Requested memory size in MiB.
118    pub requested_size_mib: usize,
119}
120
121impl VirtioMem {
122    pub fn new(
123        vm: Arc<Vm>,
124        addr: GuestAddress,
125        total_size_mib: usize,
126        block_size_mib: usize,
127        slot_size_mib: usize,
128    ) -> Result<Self, VirtioMemError> {
129        let queues = vec![Queue::new(FIRECRACKER_MAX_QUEUE_SIZE); MEM_NUM_QUEUES];
130        let config = virtio_mem_config {
131            addr: addr.raw_value(),
132            region_size: mib_to_bytes(total_size_mib) as u64,
133            block_size: mib_to_bytes(block_size_mib) as u64,
134            ..Default::default()
135        };
136        let plugged_blocks = BitVec::repeat(false, total_size_mib / block_size_mib);
137
138        Self::from_state(
139            vm,
140            queues,
141            config,
142            mib_to_bytes(slot_size_mib),
143            plugged_blocks,
144        )
145    }
146
147    pub fn from_state(
148        vm: Arc<Vm>,
149        queues: Vec<Queue>,
150        config: virtio_mem_config,
151        slot_size: usize,
152        plugged_blocks: BitVec,
153    ) -> Result<Self, VirtioMemError> {
154        let activate_event = EventFd::new(libc::EFD_NONBLOCK)?;
155        let queue_events = (0..MEM_NUM_QUEUES)
156            .map(|_| EventFd::new(libc::EFD_NONBLOCK))
157            .collect::<Result<Vec<EventFd>, io::Error>>()?;
158
159        Ok(Self {
160            avail_features: (1 << VIRTIO_F_VERSION_1) | (1 << VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE),
161            acked_features: 0u64,
162            activate_event,
163            device_state: DeviceState::Inactive,
164            queues,
165            queue_events,
166            config,
167            vm,
168            slot_size,
169            plugged_blocks,
170        })
171    }
172
173    pub fn id(&self) -> &str {
174        VIRTIO_MEM_DEV_ID
175    }
176
177    pub fn guest_address(&self) -> GuestAddress {
178        GuestAddress(self.config.addr)
179    }
180
181    /// Gets the total hotpluggable size.
182    pub fn total_size_mib(&self) -> usize {
183        bytes_to_mib(u64_to_usize(self.config.region_size))
184    }
185
186    /// Gets the block size.
187    pub fn block_size_mib(&self) -> usize {
188        bytes_to_mib(u64_to_usize(self.config.block_size))
189    }
190
191    /// Gets the block size.
192    pub fn slot_size_mib(&self) -> usize {
193        bytes_to_mib(self.slot_size)
194    }
195
196    /// Gets the total size of the plugged memory blocks.
197    pub fn plugged_size_mib(&self) -> usize {
198        bytes_to_mib(u64_to_usize(self.config.plugged_size))
199    }
200
201    /// Gets the requested size
202    pub fn requested_size_mib(&self) -> usize {
203        bytes_to_mib(u64_to_usize(self.config.requested_size))
204    }
205
206    pub fn status(&self) -> VirtioMemStatus {
207        VirtioMemStatus {
208            block_size_mib: self.block_size_mib(),
209            total_size_mib: self.total_size_mib(),
210            slot_size_mib: self.slot_size_mib(),
211            plugged_size_mib: self.plugged_size_mib(),
212            requested_size_mib: self.requested_size_mib(),
213        }
214    }
215
216    fn signal_used_queue(&self) -> Result<(), VirtioMemError> {
217        self.interrupt_trigger()
218            .trigger(VirtioInterruptType::Queue(MEM_QUEUE.try_into().unwrap()))
219            .map_err(VirtioMemError::InterruptError)
220    }
221
222    fn guest_memory(&self) -> &GuestMemoryMmap {
223        &self.device_state.active_state().unwrap().mem
224    }
225
226    fn nb_blocks_to_len(&self, nb_blocks: usize) -> usize {
227        nb_blocks * u64_to_usize(self.config.block_size)
228    }
229
230    /// Returns the state of all the blocks in the given range.
231    ///
232    /// Note: the range passed to this function must be within the device memory to avoid
233    /// out-of-bound panics.
234    fn range_state(&self, range: &RequestedRange) -> BlockRangeState {
235        let plugged_count = self.plugged_blocks[self.unchecked_block_range(range)].count_ones();
236
237        match plugged_count {
238            nb_blocks if nb_blocks == range.nb_blocks => BlockRangeState::Plugged,
239            0 => BlockRangeState::Unplugged,
240            _ => BlockRangeState::Mixed,
241        }
242    }
243
244    fn parse_request(
245        &self,
246        avail_desc: &DescriptorChain,
247    ) -> Result<(Request, GuestAddress, u16), VirtioMemError> {
248        // The head contains the request type which MUST be readable.
249        if avail_desc.is_write_only() {
250            return Err(VirtioMemError::UnexpectedWriteOnlyDescriptor);
251        }
252
253        if (avail_desc.len as usize) < size_of::<virtio_mem::virtio_mem_req>() {
254            return Err(VirtioMemError::DescriptorLengthTooSmall);
255        }
256
257        let request: virtio_mem::virtio_mem_req = self
258            .guest_memory()
259            .read_obj(avail_desc.addr)
260            .map_err(|_| VirtioMemError::DescriptorReadFailed)?;
261
262        let resp_desc = avail_desc
263            .next_descriptor()
264            .ok_or(VirtioMemError::DescriptorChainTooShort)?;
265
266        // The response MUST always be writable.
267        if !resp_desc.is_write_only() {
268            return Err(VirtioMemError::UnexpectedReadOnlyDescriptor);
269        }
270
271        if (resp_desc.len as usize) < std::mem::size_of::<virtio_mem::virtio_mem_resp>() {
272            return Err(VirtioMemError::DescriptorLengthTooSmall);
273        }
274
275        Ok((request.into(), resp_desc.addr, avail_desc.index))
276    }
277
278    fn write_response(
279        &mut self,
280        resp: Response,
281        resp_addr: GuestAddress,
282        used_idx: u16,
283    ) -> Result<(), VirtioMemError> {
284        debug!("virtio-mem: Response: {:?}", resp);
285        self.guest_memory()
286            .write_obj(virtio_mem::virtio_mem_resp::from(resp), resp_addr)
287            .map_err(|_| VirtioMemError::DescriptorWriteFailed)
288            .map(|_| size_of::<virtio_mem::virtio_mem_resp>())?;
289        self.queues[MEM_QUEUE]
290            .add_used(
291                used_idx,
292                u32::try_from(std::mem::size_of::<virtio_mem::virtio_mem_resp>()).unwrap(),
293            )
294            .map_err(VirtioMemError::QueueError)
295    }
296
297    /// Checks that the range provided by the driver is within the usable memory region
298    fn validate_range(&self, range: &RequestedRange) -> Result<(), VirtioMemError> {
299        // Ensure the range is aligned
300        if !range
301            .addr
302            .raw_value()
303            .is_multiple_of(self.config.block_size)
304        {
305            return Err(VirtioMemError::InvalidRange(*range));
306        }
307
308        if range.nb_blocks == 0 {
309            return Err(VirtioMemError::InvalidRange(*range));
310        }
311
312        // Ensure the start addr is within the usable region
313        let start_off = range
314            .addr
315            .checked_offset_from(self.guest_address())
316            .filter(|&off| off < self.config.usable_region_size)
317            .ok_or(VirtioMemError::InvalidRange(*range))?;
318
319        // Ensure the end offset (exclusive) is within the usable region
320        let end_off = start_off
321            .checked_add(usize_to_u64(self.nb_blocks_to_len(range.nb_blocks)))
322            .filter(|&end_off| end_off <= self.config.usable_region_size)
323            .ok_or(VirtioMemError::InvalidRange(*range))?;
324
325        Ok(())
326    }
327
328    fn unchecked_block_range(&self, range: &RequestedRange) -> Range<usize> {
329        let start_block = u64_to_usize((range.addr.0 - self.config.addr) / self.config.block_size);
330
331        start_block..(start_block + range.nb_blocks)
332    }
333
334    fn process_plug_request(&mut self, range: &RequestedRange) -> Result<(), VirtioMemError> {
335        self.validate_range(range)?;
336
337        if self.config.plugged_size + usize_to_u64(self.nb_blocks_to_len(range.nb_blocks))
338            > self.config.requested_size
339        {
340            return Err(VirtioMemError::PlugRequestIsTooBig);
341        }
342
343        match self.range_state(range) {
344            // the range was validated
345            BlockRangeState::Unplugged => self.update_range(range, true),
346            state => Err(VirtioMemError::PlugRequestBlockStateInvalid(state)),
347        }
348    }
349
350    fn handle_plug_request(
351        &mut self,
352        range: &RequestedRange,
353        resp_addr: GuestAddress,
354        used_idx: u16,
355    ) -> Result<(), VirtioMemError> {
356        METRICS.plug_count.inc();
357        let _metric = METRICS.plug_agg.record_latency_metrics();
358
359        let response = match self.process_plug_request(range) {
360            Err(err) => {
361                METRICS.plug_fails.inc();
362                error!("virtio-mem: Failed to plug range: {}", err);
363                Response::error()
364            }
365            Ok(_) => {
366                METRICS
367                    .plug_bytes
368                    .add(usize_to_u64(self.nb_blocks_to_len(range.nb_blocks)));
369                Response::ack()
370            }
371        };
372        self.write_response(response, resp_addr, used_idx)
373    }
374
375    fn process_unplug_request(&mut self, range: &RequestedRange) -> Result<(), VirtioMemError> {
376        self.validate_range(range)?;
377
378        match self.range_state(range) {
379            // the range was validated
380            BlockRangeState::Plugged => self.update_range(range, false),
381            state => Err(VirtioMemError::UnplugRequestBlockStateInvalid(state)),
382        }
383    }
384
385    fn handle_unplug_request(
386        &mut self,
387        range: &RequestedRange,
388        resp_addr: GuestAddress,
389        used_idx: u16,
390    ) -> Result<(), VirtioMemError> {
391        METRICS.unplug_count.inc();
392        let _metric = METRICS.unplug_agg.record_latency_metrics();
393        let response = match self.process_unplug_request(range) {
394            Err(err) => {
395                METRICS.unplug_fails.inc();
396                error!("virtio-mem: Failed to unplug range: {}", err);
397                Response::error()
398            }
399            Ok(_) => {
400                METRICS
401                    .unplug_bytes
402                    .add(usize_to_u64(self.nb_blocks_to_len(range.nb_blocks)));
403                Response::ack()
404            }
405        };
406        self.write_response(response, resp_addr, used_idx)
407    }
408
409    fn handle_unplug_all_request(
410        &mut self,
411        resp_addr: GuestAddress,
412        used_idx: u16,
413    ) -> Result<(), VirtioMemError> {
414        METRICS.unplug_all_count.inc();
415        let _metric = METRICS.unplug_all_agg.record_latency_metrics();
416        let range = RequestedRange {
417            addr: self.guest_address(),
418            nb_blocks: self.plugged_blocks.len(),
419        };
420        let response = match self.update_range(&range, false) {
421            Err(err) => {
422                METRICS.unplug_all_fails.inc();
423                error!("virtio-mem: Failed to unplug all: {}", err);
424                Response::error()
425            }
426            Ok(_) => {
427                self.config.usable_region_size = 0;
428                Response::ack()
429            }
430        };
431        self.write_response(response, resp_addr, used_idx)
432    }
433
434    fn handle_state_request(
435        &mut self,
436        range: &RequestedRange,
437        resp_addr: GuestAddress,
438        used_idx: u16,
439    ) -> Result<(), VirtioMemError> {
440        METRICS.state_count.inc();
441        let _metric = METRICS.state_agg.record_latency_metrics();
442        let response = match self.validate_range(range) {
443            Err(err) => {
444                METRICS.state_fails.inc();
445                error!("virtio-mem: Failed to retrieve state of range: {}", err);
446                Response::error()
447            }
448            // the range was validated
449            Ok(_) => Response::ack_with_state(self.range_state(range)),
450        };
451        self.write_response(response, resp_addr, used_idx)
452    }
453
454    fn process_mem_queue(&mut self) -> Result<(), VirtioMemError> {
455        while let Some(desc) = self.queues[MEM_QUEUE].pop()? {
456            let index = desc.index;
457
458            let (req, resp_addr, used_idx) = self.parse_request(&desc)?;
459            debug!("virtio-mem: Request: {:?}", req);
460            // Handle request and write response
461            match req {
462                Request::State(ref range) => self.handle_state_request(range, resp_addr, used_idx),
463                Request::Plug(ref range) => self.handle_plug_request(range, resp_addr, used_idx),
464                Request::Unplug(ref range) => {
465                    self.handle_unplug_request(range, resp_addr, used_idx)
466                }
467                Request::UnplugAll => self.handle_unplug_all_request(resp_addr, used_idx),
468                Request::Unsupported(t) => Err(VirtioMemError::UnknownRequestType(t)),
469            }?;
470        }
471
472        self.queues[MEM_QUEUE].advance_used_ring_idx();
473        self.signal_used_queue()?;
474
475        Ok(())
476    }
477
478    pub(crate) fn process_mem_queue_event(&mut self) {
479        METRICS.queue_event_count.inc();
480        if let Err(err) = self.queue_events[MEM_QUEUE].read() {
481            METRICS.queue_event_fails.inc();
482            error!("Failed to read mem queue event: {err}");
483            return;
484        }
485
486        if let Err(err) = self.process_mem_queue() {
487            METRICS.queue_event_fails.inc();
488            error!("virtio-mem: Failed to process queue: {err}");
489        }
490    }
491
492    pub fn process_virtio_queues(&mut self) -> Result<(), VirtioMemError> {
493        self.process_mem_queue()
494    }
495
496    pub(crate) fn set_avail_features(&mut self, features: u64) {
497        self.avail_features = features;
498    }
499
500    pub(crate) fn set_acked_features(&mut self, features: u64) {
501        self.acked_features = features;
502    }
503
504    pub(crate) fn activate_event(&self) -> &EventFd {
505        &self.activate_event
506    }
507
508    fn update_kvm_slots(&self, updated_range: &RequestedRange) -> Result<(), VirtioMemError> {
509        let hp_region = self
510            .guest_memory()
511            .iter()
512            .find(|r| r.region_type == GuestRegionType::Hotpluggable)
513            .expect("there should be one and only one hotpluggable region");
514        hp_region
515            .slots_intersecting_range(
516                updated_range.addr,
517                self.nb_blocks_to_len(updated_range.nb_blocks),
518            )
519            .try_for_each(|slot| {
520                let slot_range = RequestedRange {
521                    addr: slot.guest_addr,
522                    nb_blocks: slot.slice.len() / u64_to_usize(self.config.block_size),
523                };
524                match self.range_state(&slot_range) {
525                    BlockRangeState::Mixed | BlockRangeState::Plugged => {
526                        hp_region.update_slot(&self.vm, &slot, true)
527                    }
528                    BlockRangeState::Unplugged => hp_region.update_slot(&self.vm, &slot, false),
529                }
530                .map_err(VirtioMemError::UpdateKvmSlot)
531            })
532    }
533
534    /// Plugs/unplugs the given range
535    ///
536    /// Note: the range passed to this function must be within the device memory to avoid
537    /// out-of-bound panics.
538    fn update_range(&mut self, range: &RequestedRange, plug: bool) -> Result<(), VirtioMemError> {
539        // Update internal state
540        let block_range = self.unchecked_block_range(range);
541        let plugged_blocks_slice = &mut self.plugged_blocks[block_range];
542        let plugged_before = plugged_blocks_slice.count_ones();
543        plugged_blocks_slice.fill(plug);
544        let plugged_after = plugged_blocks_slice.count_ones();
545        self.config.plugged_size -= usize_to_u64(self.nb_blocks_to_len(plugged_before));
546        self.config.plugged_size += usize_to_u64(self.nb_blocks_to_len(plugged_after));
547
548        // If unplugging, discard the range
549        if !plug {
550            self.guest_memory()
551                .discard_range(range.addr, self.nb_blocks_to_len(range.nb_blocks))
552                .inspect_err(|err| {
553                    // Failure to discard is not fatal and is not reported to the driver. It only
554                    // gets logged.
555                    METRICS.unplug_discard_fails.inc();
556                    error!("virtio-mem: Failed to discard memory range: {}", err);
557                });
558        }
559
560        self.update_kvm_slots(range)
561    }
562
563    /// Updates the requested size of the virtio-mem device.
564    pub fn update_requested_size(
565        &mut self,
566        requested_size_mib: usize,
567    ) -> Result<(), VirtioMemError> {
568        let requested_size = usize_to_u64(mib_to_bytes(requested_size_mib));
569        if !self.is_activated() {
570            return Err(VirtioMemError::DeviceNotActive);
571        }
572
573        if requested_size % self.config.block_size != 0 {
574            return Err(VirtioMemError::InvalidSize(requested_size));
575        }
576        if requested_size > self.config.region_size {
577            return Err(VirtioMemError::InvalidSize(requested_size));
578        }
579
580        // Increase the usable_region_size if it's not enough for the guest to plug new
581        // memory blocks.
582        // The device cannot decrease the usable_region_size unless the guest requests
583        // to reset it with an UNPLUG_ALL request.
584        if self.config.usable_region_size < requested_size {
585            self.config.usable_region_size =
586                requested_size.next_multiple_of(usize_to_u64(self.slot_size));
587            debug!(
588                "virtio-mem: Updated usable size to {} bytes",
589                self.config.usable_region_size
590            );
591        }
592
593        self.config.requested_size = requested_size;
594        debug!(
595            "virtio-mem: Updated requested size to {} bytes",
596            requested_size
597        );
598        self.interrupt_trigger()
599            .trigger(VirtioInterruptType::Config)
600            .map_err(VirtioMemError::InterruptError)
601    }
602}
603
604impl VirtioDevice for VirtioMem {
605    impl_device_type!(VIRTIO_ID_MEM);
606
607    fn queues(&self) -> &[Queue] {
608        &self.queues
609    }
610
611    fn queues_mut(&mut self) -> &mut [Queue] {
612        &mut self.queues
613    }
614
615    fn queue_events(&self) -> &[EventFd] {
616        &self.queue_events
617    }
618
619    fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
620        self.device_state
621            .active_state()
622            .expect("Device is not activated")
623            .interrupt
624            .deref()
625    }
626
627    fn avail_features(&self) -> u64 {
628        self.avail_features
629    }
630
631    fn acked_features(&self) -> u64 {
632        self.acked_features
633    }
634
635    fn set_acked_features(&mut self, acked_features: u64) {
636        self.acked_features = acked_features;
637    }
638
639    fn read_config(&self, offset: u64, data: &mut [u8]) {
640        let offset = u64_to_usize(offset);
641        self.config
642            .as_slice()
643            .get(offset..offset + data.len())
644            .map(|s| data.copy_from_slice(s))
645            .unwrap_or_else(|| {
646                error!(
647                    "virtio-mem: Config read offset+length {offset}+{} out of bounds",
648                    data.len()
649                )
650            })
651    }
652
653    fn write_config(&mut self, offset: u64, _data: &[u8]) {
654        error!("virtio-mem: Attempted write to read-only config space at offset {offset}");
655    }
656
657    fn is_activated(&self) -> bool {
658        self.device_state.is_activated()
659    }
660
661    fn activate(
662        &mut self,
663        mem: GuestMemoryMmap,
664        interrupt: Arc<dyn VirtioInterrupt>,
665    ) -> Result<(), ActivateError> {
666        if (self.acked_features & (1 << VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE)) == 0 {
667            error!(
668                "virtio-mem: VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE feature not acknowledged by guest"
669            );
670            METRICS.activate_fails.inc();
671            return Err(ActivateError::RequiredFeatureNotAcked(
672                "VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE",
673            ));
674        }
675
676        for q in self.queues.iter_mut() {
677            q.initialize(&mem)
678                .map_err(ActivateError::QueueMemoryError)?;
679        }
680
681        self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
682        if self.activate_event.write(1).is_err() {
683            METRICS.activate_fails.inc();
684            self.device_state = DeviceState::Inactive;
685            return Err(ActivateError::EventFd);
686        }
687
688        Ok(())
689    }
690
691    fn kick(&mut self) {
692        if self.is_activated() {
693            info!("kick mem {}.", self.id());
694            self.process_virtio_queues();
695        }
696    }
697}
698
699#[cfg(test)]
700pub(crate) mod test_utils {
701    use super::*;
702    use crate::devices::virtio::test_utils::test::VirtioTestDevice;
703    use crate::test_utils::single_region_mem;
704    use crate::vmm_config::machine_config::HugePageConfig;
705    use crate::vstate::memory;
706    use crate::vstate::vm::tests::setup_vm_with_memory;
707
708    impl VirtioTestDevice for VirtioMem {
709        fn set_queues(&mut self, queues: Vec<Queue>) {
710            self.queues = queues;
711        }
712
713        fn num_queues(&self) -> usize {
714            MEM_NUM_QUEUES
715        }
716    }
717
718    pub(crate) fn default_virtio_mem() -> VirtioMem {
719        let (_, mut vm) = setup_vm_with_memory(0x1000);
720        let addr = GuestAddress(512 << 30);
721        vm.register_hotpluggable_memory_region(
722            memory::anonymous(
723                std::iter::once((addr, mib_to_bytes(1024))),
724                false,
725                HugePageConfig::None,
726            )
727            .unwrap()
728            .pop()
729            .unwrap(),
730            mib_to_bytes(128),
731        );
732        let vm = Arc::new(vm);
733        VirtioMem::new(vm, addr, 1024, 2, 128).unwrap()
734    }
735}
736
737#[cfg(test)]
738mod tests {
739    use std::ptr::null_mut;
740
741    use serde_json::de;
742    use vm_memory::guest_memory;
743    use vm_memory::mmap::MmapRegionBuilder;
744
745    use super::*;
746    use crate::devices::virtio::device::VirtioDevice;
747    use crate::devices::virtio::mem::device::test_utils::default_virtio_mem;
748    use crate::devices::virtio::queue::VIRTQ_DESC_F_WRITE;
749    use crate::devices::virtio::test_utils::test::VirtioTestHelper;
750    use crate::vstate::vm::tests::setup_vm_with_memory;
751
752    #[test]
753    fn test_new() {
754        let mem = default_virtio_mem();
755
756        assert_eq!(mem.total_size_mib(), 1024);
757        assert_eq!(mem.block_size_mib(), 2);
758        assert_eq!(mem.plugged_size_mib(), 0);
759        assert_eq!(mem.id(), VIRTIO_MEM_DEV_ID);
760        assert_eq!(mem.device_type(), VIRTIO_ID_MEM);
761
762        let features = (1 << VIRTIO_F_VERSION_1) | (1 << VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE);
763        assert_eq!(mem.avail_features(), features);
764        assert_eq!(mem.acked_features(), 0);
765
766        assert!(!mem.is_activated());
767
768        assert_eq!(mem.queues().len(), MEM_NUM_QUEUES);
769        assert_eq!(mem.queue_events().len(), MEM_NUM_QUEUES);
770    }
771
772    #[test]
773    fn test_from_state() {
774        let (_, vm) = setup_vm_with_memory(0x1000);
775        let vm = Arc::new(vm);
776        let queues = vec![Queue::new(FIRECRACKER_MAX_QUEUE_SIZE); MEM_NUM_QUEUES];
777        let addr = 512 << 30;
778        let region_size_mib = 2048;
779        let block_size_mib = 2;
780        let slot_size_mib = 128;
781        let plugged_size_mib = 512;
782        let usable_region_size = mib_to_bytes(1024) as u64;
783        let config = virtio_mem_config {
784            addr,
785            region_size: mib_to_bytes(region_size_mib) as u64,
786            block_size: mib_to_bytes(block_size_mib) as u64,
787            plugged_size: mib_to_bytes(plugged_size_mib) as u64,
788            usable_region_size,
789            ..Default::default()
790        };
791        let plugged_blocks = BitVec::repeat(
792            false,
793            mib_to_bytes(region_size_mib) / mib_to_bytes(block_size_mib),
794        );
795        let mem = VirtioMem::from_state(
796            vm,
797            queues,
798            config,
799            mib_to_bytes(slot_size_mib),
800            plugged_blocks,
801        )
802        .unwrap();
803        assert_eq!(mem.config.addr, addr);
804        assert_eq!(mem.total_size_mib(), region_size_mib);
805        assert_eq!(mem.block_size_mib(), block_size_mib);
806        assert_eq!(mem.slot_size_mib(), slot_size_mib);
807        assert_eq!(mem.plugged_size_mib(), plugged_size_mib);
808        assert_eq!(mem.config.usable_region_size, usable_region_size);
809    }
810
811    #[test]
812    fn test_read_config() {
813        let mem = default_virtio_mem();
814        let mut data = [0u8; 8];
815
816        mem.read_config(0, &mut data);
817        assert_eq!(
818            u64::from_le_bytes(data),
819            mib_to_bytes(mem.block_size_mib()) as u64
820        );
821
822        mem.read_config(16, &mut data);
823        assert_eq!(u64::from_le_bytes(data), 512 << 30);
824
825        mem.read_config(24, &mut data);
826        assert_eq!(
827            u64::from_le_bytes(data),
828            mib_to_bytes(mem.total_size_mib()) as u64
829        );
830    }
831
832    #[test]
833    fn test_read_config_out_of_bounds() {
834        let mem = default_virtio_mem();
835
836        let mut data = [0u8; 8];
837        let config_size = std::mem::size_of::<virtio_mem_config>();
838        mem.read_config(config_size as u64, &mut data);
839        assert_eq!(data, [0u8; 8]); // Should remain unchanged
840
841        let mut data = vec![0u8; config_size];
842        mem.read_config(8, &mut data);
843        assert_eq!(data, vec![0u8; config_size]); // Should remain unchanged
844    }
845
846    #[test]
847    fn test_write_config() {
848        let mut mem = default_virtio_mem();
849        let data = [1u8; 8];
850        mem.write_config(0, &data); // Should log error but not crash
851
852        // should not change config
853        let mut data = [0u8; 8];
854        mem.read_config(0, &mut data);
855        let block_size = u64::from_le_bytes(data);
856        assert_eq!(block_size, mib_to_bytes(2) as u64);
857    }
858
859    #[test]
860    fn test_set_features() {
861        let mut mem = default_virtio_mem();
862        mem.set_avail_features(123);
863        assert_eq!(mem.avail_features(), 123);
864        mem.set_acked_features(456);
865        assert_eq!(mem.acked_features(), 456);
866    }
867
868    #[test]
869    fn test_status() {
870        let mut mem = default_virtio_mem();
871        let status = mem.status();
872        assert_eq!(
873            status,
874            VirtioMemStatus {
875                block_size_mib: 2,
876                total_size_mib: 1024,
877                slot_size_mib: 128,
878                plugged_size_mib: 0,
879                requested_size_mib: 0,
880            }
881        );
882    }
883
884    #[allow(clippy::cast_possible_truncation)]
885    const REQ_SIZE: u32 = std::mem::size_of::<virtio_mem::virtio_mem_req>() as u32;
886    #[allow(clippy::cast_possible_truncation)]
887    const RESP_SIZE: u32 = std::mem::size_of::<virtio_mem::virtio_mem_resp>() as u32;
888
889    fn test_helper<'a>(
890        mut dev: VirtioMem,
891        mem: &'a GuestMemoryMmap,
892    ) -> VirtioTestHelper<'a, VirtioMem> {
893        dev.set_acked_features(dev.avail_features);
894
895        let mut th = VirtioTestHelper::<VirtioMem>::new(mem, dev);
896        th.activate_device(mem);
897        th
898    }
899
900    fn emulate_request(
901        th: &mut VirtioTestHelper<VirtioMem>,
902        mem: &GuestMemoryMmap,
903        req: Request,
904    ) -> Response {
905        th.add_desc_chain(
906            MEM_QUEUE,
907            0,
908            &[(0, REQ_SIZE, 0), (1, RESP_SIZE, VIRTQ_DESC_F_WRITE)],
909        );
910        mem.write_obj(
911            virtio_mem::virtio_mem_req::from(req),
912            th.desc_address(MEM_QUEUE, 0),
913        )
914        .unwrap();
915        assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
916        mem.read_obj::<virtio_mem::virtio_mem_resp>(th.desc_address(MEM_QUEUE, 1))
917            .unwrap()
918            .into()
919    }
920
921    #[test]
922    fn test_event_fail_descriptor_chain_too_short() {
923        let mut mem_dev = default_virtio_mem();
924        let guest_mem = mem_dev.vm.guest_memory().clone();
925        let mut th = test_helper(mem_dev, &guest_mem);
926
927        let queue_event_count = METRICS.queue_event_count.count();
928        let queue_event_fails = METRICS.queue_event_fails.count();
929
930        th.add_desc_chain(MEM_QUEUE, 0, &[(0, REQ_SIZE, 0)]);
931        assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
932
933        assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
934        assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
935    }
936
937    #[test]
938    fn test_event_fail_descriptor_length_too_small() {
939        let mut mem_dev = default_virtio_mem();
940        let guest_mem = mem_dev.vm.guest_memory().clone();
941        let mut th = test_helper(mem_dev, &guest_mem);
942
943        let queue_event_count = METRICS.queue_event_count.count();
944        let queue_event_fails = METRICS.queue_event_fails.count();
945
946        th.add_desc_chain(MEM_QUEUE, 0, &[(0, 1, 0)]);
947        assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
948
949        assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
950        assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
951    }
952
953    #[test]
954    fn test_event_fail_unexpected_writeonly_descriptor() {
955        let mut mem_dev = default_virtio_mem();
956        let guest_mem = mem_dev.vm.guest_memory().clone();
957        let mut th = test_helper(mem_dev, &guest_mem);
958
959        let queue_event_count = METRICS.queue_event_count.count();
960        let queue_event_fails = METRICS.queue_event_fails.count();
961
962        th.add_desc_chain(MEM_QUEUE, 0, &[(0, REQ_SIZE, VIRTQ_DESC_F_WRITE)]);
963        assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
964
965        assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
966        assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
967    }
968
969    #[test]
970    fn test_event_fail_unexpected_readonly_descriptor() {
971        let mut mem_dev = default_virtio_mem();
972        let guest_mem = mem_dev.vm.guest_memory().clone();
973        let mut th = test_helper(mem_dev, &guest_mem);
974
975        let queue_event_count = METRICS.queue_event_count.count();
976        let queue_event_fails = METRICS.queue_event_fails.count();
977
978        th.add_desc_chain(MEM_QUEUE, 0, &[(0, REQ_SIZE, 0), (1, RESP_SIZE, 0)]);
979        assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
980
981        assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
982        assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
983    }
984
985    #[test]
986    fn test_event_fail_response_descriptor_length_too_small() {
987        let mut mem_dev = default_virtio_mem();
988        let guest_mem = mem_dev.vm.guest_memory().clone();
989        let mut th = test_helper(mem_dev, &guest_mem);
990
991        let queue_event_count = METRICS.queue_event_count.count();
992        let queue_event_fails = METRICS.queue_event_fails.count();
993
994        th.add_desc_chain(
995            MEM_QUEUE,
996            0,
997            &[(0, REQ_SIZE, 0), (1, 1, VIRTQ_DESC_F_WRITE)],
998        );
999        assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
1000
1001        assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
1002        assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
1003    }
1004
1005    #[test]
1006    fn test_update_requested_size_device_not_active() {
1007        let mut mem_dev = default_virtio_mem();
1008        let result = mem_dev.update_requested_size(512);
1009        assert!(matches!(result, Err(VirtioMemError::DeviceNotActive)));
1010    }
1011
1012    #[test]
1013    fn test_update_requested_size_invalid_size() {
1014        let mut mem_dev = default_virtio_mem();
1015        let guest_mem = mem_dev.vm.guest_memory().clone();
1016        let mut th = test_helper(mem_dev, &guest_mem);
1017
1018        // Size not multiple of block size
1019        let result = th.device().update_requested_size(3);
1020        assert!(matches!(result, Err(VirtioMemError::InvalidSize(_))));
1021
1022        // Size too large
1023        let result = th.device().update_requested_size(2048);
1024        assert!(matches!(result, Err(VirtioMemError::InvalidSize(_))));
1025    }
1026
1027    #[test]
1028    fn test_update_requested_size_success() {
1029        let mut mem_dev = default_virtio_mem();
1030        let guest_mem = mem_dev.vm.guest_memory().clone();
1031        let mut th = test_helper(mem_dev, &guest_mem);
1032
1033        th.device().update_requested_size(512).unwrap();
1034        assert_eq!(th.device().requested_size_mib(), 512);
1035    }
1036
1037    #[test]
1038    fn test_plug_request_success() {
1039        let mut mem_dev = default_virtio_mem();
1040        let guest_mem = mem_dev.vm.guest_memory().clone();
1041        let mut th = test_helper(mem_dev, &guest_mem);
1042        th.device().update_requested_size(1024);
1043        let addr = th.device().guest_address();
1044
1045        let queue_event_count = METRICS.queue_event_count.count();
1046        let queue_event_fails = METRICS.queue_event_fails.count();
1047        let plug_count = METRICS.plug_count.count();
1048        let plug_bytes = METRICS.plug_bytes.count();
1049        let plug_fails = METRICS.plug_fails.count();
1050
1051        let resp = emulate_request(
1052            &mut th,
1053            &guest_mem,
1054            Request::Plug(RequestedRange { addr, nb_blocks: 1 }),
1055        );
1056        assert!(resp.is_ack());
1057        assert_eq!(th.device().plugged_size_mib(), 2);
1058
1059        assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
1060        assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails);
1061        assert_eq!(METRICS.plug_count.count(), plug_count + 1);
1062        assert_eq!(METRICS.plug_bytes.count(), plug_bytes + (2 << 20));
1063        assert_eq!(METRICS.plug_fails.count(), plug_fails);
1064    }
1065
1066    #[test]
1067    fn test_plug_request_too_big() {
1068        let mut mem_dev = default_virtio_mem();
1069        let guest_mem = mem_dev.vm.guest_memory().clone();
1070        let mut th = test_helper(mem_dev, &guest_mem);
1071        th.device().update_requested_size(2);
1072        let addr = th.device().guest_address();
1073
1074        let plug_count = METRICS.plug_count.count();
1075        let plug_bytes = METRICS.plug_bytes.count();
1076        let plug_fails = METRICS.plug_fails.count();
1077
1078        let resp = emulate_request(
1079            &mut th,
1080            &guest_mem,
1081            Request::Plug(RequestedRange { addr, nb_blocks: 2 }),
1082        );
1083        assert!(resp.is_error());
1084
1085        assert_eq!(METRICS.plug_count.count(), plug_count + 1);
1086        assert_eq!(METRICS.plug_bytes.count(), plug_bytes);
1087        assert_eq!(METRICS.plug_fails.count(), plug_fails + 1);
1088    }
1089
1090    #[test]
1091    fn test_plug_request_already_plugged() {
1092        let mut mem_dev = default_virtio_mem();
1093        let guest_mem = mem_dev.vm.guest_memory().clone();
1094        let mut th = test_helper(mem_dev, &guest_mem);
1095        th.device().update_requested_size(1024);
1096        let addr = th.device().guest_address();
1097
1098        // First plug succeeds
1099        let resp = emulate_request(
1100            &mut th,
1101            &guest_mem,
1102            Request::Plug(RequestedRange { addr, nb_blocks: 1 }),
1103        );
1104        assert!(resp.is_ack());
1105
1106        // Second plug fails
1107        let resp = emulate_request(
1108            &mut th,
1109            &guest_mem,
1110            Request::Plug(RequestedRange { addr, nb_blocks: 1 }),
1111        );
1112        assert!(resp.is_error());
1113    }
1114
1115    #[test]
1116    fn test_unplug_request_success() {
1117        let mut mem_dev = default_virtio_mem();
1118        let guest_mem = mem_dev.vm.guest_memory().clone();
1119        let mut th = test_helper(mem_dev, &guest_mem);
1120        th.device().update_requested_size(1024);
1121        let addr = th.device().guest_address();
1122
1123        let unplug_count = METRICS.unplug_count.count();
1124        let unplug_bytes = METRICS.unplug_bytes.count();
1125        let unplug_fails = METRICS.unplug_fails.count();
1126
1127        // First plug
1128        let resp = emulate_request(
1129            &mut th,
1130            &guest_mem,
1131            Request::Plug(RequestedRange { addr, nb_blocks: 1 }),
1132        );
1133        assert!(resp.is_ack());
1134        assert_eq!(th.device().plugged_size_mib(), 2);
1135
1136        // Then unplug
1137        let resp = emulate_request(
1138            &mut th,
1139            &guest_mem,
1140            Request::Unplug(RequestedRange { addr, nb_blocks: 1 }),
1141        );
1142        assert!(resp.is_ack());
1143        assert_eq!(th.device().plugged_size_mib(), 0);
1144
1145        assert_eq!(METRICS.unplug_count.count(), unplug_count + 1);
1146        assert_eq!(METRICS.unplug_bytes.count(), unplug_bytes + (2 << 20));
1147        assert_eq!(METRICS.unplug_fails.count(), unplug_fails);
1148    }
1149
1150    #[test]
1151    fn test_unplug_request_not_plugged() {
1152        let mut mem_dev = default_virtio_mem();
1153        let guest_mem = mem_dev.vm.guest_memory().clone();
1154        let mut th = test_helper(mem_dev, &guest_mem);
1155        th.device().update_requested_size(1024);
1156        let addr = th.device().guest_address();
1157
1158        let unplug_count = METRICS.unplug_count.count();
1159        let unplug_bytes = METRICS.unplug_bytes.count();
1160        let unplug_fails = METRICS.unplug_fails.count();
1161
1162        let resp = emulate_request(
1163            &mut th,
1164            &guest_mem,
1165            Request::Unplug(RequestedRange { addr, nb_blocks: 1 }),
1166        );
1167        assert!(resp.is_error());
1168
1169        assert_eq!(METRICS.unplug_count.count(), unplug_count + 1);
1170        assert_eq!(METRICS.unplug_bytes.count(), unplug_bytes);
1171        assert_eq!(METRICS.unplug_fails.count(), unplug_fails + 1);
1172    }
1173
1174    #[test]
1175    fn test_unplug_all_request() {
1176        let mut mem_dev = default_virtio_mem();
1177        let guest_mem = mem_dev.vm.guest_memory().clone();
1178        let mut th = test_helper(mem_dev, &guest_mem);
1179        th.device().update_requested_size(1024);
1180        let addr = th.device().guest_address();
1181
1182        let unplug_all_count = METRICS.unplug_all_count.count();
1183        let unplug_all_fails = METRICS.unplug_all_fails.count();
1184
1185        // Plug some blocks
1186        let resp = emulate_request(
1187            &mut th,
1188            &guest_mem,
1189            Request::Plug(RequestedRange { addr, nb_blocks: 2 }),
1190        );
1191        assert!(resp.is_ack());
1192        assert_eq!(th.device().plugged_size_mib(), 4);
1193
1194        // Unplug all
1195        let resp = emulate_request(&mut th, &guest_mem, Request::UnplugAll);
1196        assert!(resp.is_ack());
1197        assert_eq!(th.device().plugged_size_mib(), 0);
1198
1199        assert_eq!(METRICS.unplug_all_count.count(), unplug_all_count + 1);
1200        assert_eq!(METRICS.unplug_all_fails.count(), unplug_all_fails);
1201    }
1202
1203    #[test]
1204    fn test_state_request_unplugged() {
1205        let mut mem_dev = default_virtio_mem();
1206        let guest_mem = mem_dev.vm.guest_memory().clone();
1207        let mut th = test_helper(mem_dev, &guest_mem);
1208        th.device().update_requested_size(1024);
1209        let addr = th.device().guest_address();
1210
1211        let state_count = METRICS.state_count.count();
1212        let state_fails = METRICS.state_fails.count();
1213
1214        let resp = emulate_request(
1215            &mut th,
1216            &guest_mem,
1217            Request::State(RequestedRange { addr, nb_blocks: 1 }),
1218        );
1219        assert_eq!(resp, Response::ack_with_state(BlockRangeState::Unplugged));
1220
1221        assert_eq!(METRICS.state_count.count(), state_count + 1);
1222        assert_eq!(METRICS.state_fails.count(), state_fails);
1223    }
1224
1225    #[test]
1226    fn test_state_request_plugged() {
1227        let mut mem_dev = default_virtio_mem();
1228        let guest_mem = mem_dev.vm.guest_memory().clone();
1229        let mut th = test_helper(mem_dev, &guest_mem);
1230        th.device().update_requested_size(1024);
1231        let addr = th.device().guest_address();
1232
1233        // Plug first
1234        let resp = emulate_request(
1235            &mut th,
1236            &guest_mem,
1237            Request::Plug(RequestedRange { addr, nb_blocks: 1 }),
1238        );
1239        assert!(resp.is_ack());
1240
1241        // Check state
1242        let resp = emulate_request(
1243            &mut th,
1244            &guest_mem,
1245            Request::State(RequestedRange { addr, nb_blocks: 1 }),
1246        );
1247        assert_eq!(resp, Response::ack_with_state(BlockRangeState::Plugged));
1248    }
1249
1250    #[test]
1251    fn test_state_request_mixed() {
1252        let mut mem_dev = default_virtio_mem();
1253        let guest_mem = mem_dev.vm.guest_memory().clone();
1254        let mut th = test_helper(mem_dev, &guest_mem);
1255        th.device().update_requested_size(1024);
1256        let addr = th.device().guest_address();
1257
1258        // Plug first block only
1259        let resp = emulate_request(
1260            &mut th,
1261            &guest_mem,
1262            Request::Plug(RequestedRange { addr, nb_blocks: 1 }),
1263        );
1264        assert!(resp.is_ack());
1265
1266        // Check state of 2 blocks (one plugged, one unplugged)
1267        let resp = emulate_request(
1268            &mut th,
1269            &guest_mem,
1270            Request::State(RequestedRange { addr, nb_blocks: 2 }),
1271        );
1272        assert_eq!(resp, Response::ack_with_state(BlockRangeState::Mixed));
1273    }
1274
1275    #[test]
1276    fn test_invalid_range_unaligned() {
1277        let mut mem_dev = default_virtio_mem();
1278        let guest_mem = mem_dev.vm.guest_memory().clone();
1279        let mut th = test_helper(mem_dev, &guest_mem);
1280        th.device().update_requested_size(1024);
1281        let addr = th.device().guest_address().unchecked_add(1);
1282
1283        let state_count = METRICS.state_count.count();
1284        let state_fails = METRICS.state_fails.count();
1285
1286        let resp = emulate_request(
1287            &mut th,
1288            &guest_mem,
1289            Request::State(RequestedRange { addr, nb_blocks: 1 }),
1290        );
1291        assert!(resp.is_error());
1292
1293        assert_eq!(METRICS.state_count.count(), state_count + 1);
1294        assert_eq!(METRICS.state_fails.count(), state_fails + 1);
1295    }
1296
1297    #[test]
1298    fn test_invalid_range_zero_blocks() {
1299        let mut mem_dev = default_virtio_mem();
1300        let guest_mem = mem_dev.vm.guest_memory().clone();
1301        let mut th = test_helper(mem_dev, &guest_mem);
1302        th.device().update_requested_size(1024);
1303        let addr = th.device().guest_address();
1304
1305        let resp = emulate_request(
1306            &mut th,
1307            &guest_mem,
1308            Request::State(RequestedRange { addr, nb_blocks: 0 }),
1309        );
1310        assert!(resp.is_error());
1311    }
1312
1313    #[test]
1314    fn test_invalid_range_out_of_bounds() {
1315        let mut mem_dev = default_virtio_mem();
1316        let guest_mem = mem_dev.vm.guest_memory().clone();
1317        let mut th = test_helper(mem_dev, &guest_mem);
1318        th.device().update_requested_size(4);
1319        let addr = th.device().guest_address();
1320
1321        let resp = emulate_request(
1322            &mut th,
1323            &guest_mem,
1324            Request::State(RequestedRange {
1325                addr,
1326                nb_blocks: 1024,
1327            }),
1328        );
1329        assert!(resp.is_error());
1330    }
1331
1332    #[test]
1333    fn test_unsupported_request() {
1334        let mut mem_dev = default_virtio_mem();
1335        let guest_mem = mem_dev.vm.guest_memory().clone();
1336        let mut th = test_helper(mem_dev, &guest_mem);
1337
1338        let queue_event_count = METRICS.queue_event_count.count();
1339        let queue_event_fails = METRICS.queue_event_fails.count();
1340
1341        th.add_desc_chain(
1342            MEM_QUEUE,
1343            0,
1344            &[(0, REQ_SIZE, 0), (1, RESP_SIZE, VIRTQ_DESC_F_WRITE)],
1345        );
1346        guest_mem
1347            .write_obj(
1348                virtio_mem::virtio_mem_req::from(Request::Unsupported(999)),
1349                th.desc_address(MEM_QUEUE, 0),
1350            )
1351            .unwrap();
1352        assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
1353
1354        assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
1355        assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
1356    }
1357}