vmm/devices/virtio/transport/pci/
common_config.rs

1// Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// Copyright 2018 The Chromium OS Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE-BSD-3-Clause file.
5//
6// Copyright © 2019 Intel Corporation
7//
8// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
9
10use std::sync::atomic::{AtomicU16, Ordering};
11use std::sync::{Arc, Mutex};
12
13use byteorder::{ByteOrder, LittleEndian};
14use serde::{Deserialize, Serialize};
15use vm_memory::GuestAddress;
16
17use crate::devices::virtio::device::VirtioDevice;
18use crate::devices::virtio::queue::Queue;
19use crate::devices::virtio::transport::pci::device::VIRTQ_MSI_NO_VECTOR;
20use crate::logger::warn;
21
22pub const VIRTIO_PCI_COMMON_CONFIG_ID: &str = "virtio_pci_common_config";
23
24#[derive(Debug, Clone, Serialize, Deserialize)]
25pub struct VirtioPciCommonConfigState {
26    pub driver_status: u8,
27    pub config_generation: u8,
28    pub device_feature_select: u32,
29    pub driver_feature_select: u32,
30    pub queue_select: u16,
31    pub msix_config: u16,
32    pub msix_queues: Vec<u16>,
33}
34
35/// Contains the data for reading and writing the common configuration structure of a virtio PCI
36/// device.
37///
38/// * Registers:
39///
40/// ** About the whole device.
41///    le32 device_feature_select;     // 0x00 // read-write
42///    le32 device_feature;            // 0x04 // read-only for driver
43///    le32 driver_feature_select;     // 0x08 // read-write
44///    le32 driver_feature;            // 0x0C // read-write
45///    le16 msix_config;               // 0x10 // read-write
46///    le16 num_queues;                // 0x12 // read-only for driver
47///    u8 device_status;               // 0x14 // read-write (driver_status)
48///    u8 config_generation;           // 0x15 // read-only for driver
49///
50/// ** About a specific virtqueue.
51///    le16 queue_select;              // 0x16 // read-write
52///    le16 queue_size;                // 0x18 // read-write, power of 2, or 0.
53///    le16 queue_msix_vector;         // 0x1A // read-write
54///    le16 queue_enable;              // 0x1C // read-write (Ready)
55///    le16 queue_notify_off;          // 0x1E // read-only for driver
56///    le64 queue_desc;                // 0x20 // read-write
57///    le64 queue_avail;               // 0x28 // read-write
58///    le64 queue_used;                // 0x30 // read-write
59#[derive(Debug)]
60pub struct VirtioPciCommonConfig {
61    pub driver_status: u8,
62    pub config_generation: u8,
63    pub device_feature_select: u32,
64    pub driver_feature_select: u32,
65    pub queue_select: u16,
66    pub msix_config: Arc<AtomicU16>,
67    pub msix_queues: Arc<Mutex<Vec<u16>>>,
68}
69
70impl VirtioPciCommonConfig {
71    pub fn new(state: VirtioPciCommonConfigState) -> Self {
72        VirtioPciCommonConfig {
73            driver_status: state.driver_status,
74            config_generation: state.config_generation,
75            device_feature_select: state.device_feature_select,
76            driver_feature_select: state.driver_feature_select,
77            queue_select: state.queue_select,
78            msix_config: Arc::new(AtomicU16::new(state.msix_config)),
79            msix_queues: Arc::new(Mutex::new(state.msix_queues)),
80        }
81    }
82
83    pub fn state(&self) -> VirtioPciCommonConfigState {
84        VirtioPciCommonConfigState {
85            driver_status: self.driver_status,
86            config_generation: self.config_generation,
87            device_feature_select: self.device_feature_select,
88            driver_feature_select: self.driver_feature_select,
89            queue_select: self.queue_select,
90            msix_config: self.msix_config.load(Ordering::Acquire),
91            msix_queues: self.msix_queues.lock().unwrap().clone(),
92        }
93    }
94
95    pub fn read(&mut self, offset: u64, data: &mut [u8], device: Arc<Mutex<dyn VirtioDevice>>) {
96        assert!(data.len() <= 8);
97
98        match data.len() {
99            1 => {
100                let v = self.read_common_config_byte(offset);
101                data[0] = v;
102            }
103            2 => {
104                let v = self.read_common_config_word(offset, device.lock().unwrap().queues());
105                LittleEndian::write_u16(data, v);
106            }
107            4 => {
108                let v = self.read_common_config_dword(offset, device);
109                LittleEndian::write_u32(data, v);
110            }
111            _ => warn!(
112                "pci: invalid data length for virtio read: len {}",
113                data.len()
114            ),
115        }
116    }
117
118    pub fn write(&mut self, offset: u64, data: &[u8], device: Arc<Mutex<dyn VirtioDevice>>) {
119        assert!(data.len() <= 8);
120
121        match data.len() {
122            1 => self.write_common_config_byte(offset, data[0]),
123            2 => self.write_common_config_word(
124                offset,
125                LittleEndian::read_u16(data),
126                device.lock().unwrap().queues_mut(),
127            ),
128            4 => self.write_common_config_dword(offset, LittleEndian::read_u32(data), device),
129            _ => warn!(
130                "pci: invalid data length for virtio write: len {}",
131                data.len()
132            ),
133        }
134    }
135
136    fn read_common_config_byte(&self, offset: u64) -> u8 {
137        // The driver is only allowed to do aligned, properly sized access.
138        match offset {
139            0x14 => self.driver_status,
140            0x15 => self.config_generation,
141            _ => {
142                warn!("pci: invalid virtio config byte read: 0x{:x}", offset);
143                0
144            }
145        }
146    }
147
148    fn write_common_config_byte(&mut self, offset: u64, value: u8) {
149        match offset {
150            0x14 => self.driver_status = value,
151            _ => {
152                warn!("pci: invalid virtio config byte write: 0x{:x}", offset);
153            }
154        }
155    }
156
157    fn read_common_config_word(&self, offset: u64, queues: &[Queue]) -> u16 {
158        match offset {
159            0x10 => self.msix_config.load(Ordering::Acquire),
160            0x12 => queues.len().try_into().unwrap(), // num_queues
161            0x16 => self.queue_select,
162            0x18 => self.with_queue(queues, |q| q.size).unwrap_or(0),
163            // If `queue_select` points to an invalid queue we should return NO_VECTOR.
164            // Reading from here
165            // https://docs.oasis-open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.html#x1-1280005:
166            //
167            // > The device MUST return vector mapped to a given event, (NO_VECTOR if unmapped) on
168            // > read of config_msix_vector/queue_msix_vector.
169            0x1a => self
170                .msix_queues
171                .lock()
172                .unwrap()
173                .get(self.queue_select as usize)
174                .copied()
175                .unwrap_or(VIRTQ_MSI_NO_VECTOR),
176            0x1c => u16::from(self.with_queue(queues, |q| q.ready).unwrap_or(false)),
177            0x1e => self.queue_select, // notify_off
178            _ => {
179                warn!("pci: invalid virtio register word read: 0x{:x}", offset);
180                0
181            }
182        }
183    }
184
185    fn write_common_config_word(&mut self, offset: u64, value: u16, queues: &mut [Queue]) {
186        match offset {
187            0x10 => {
188                // Make sure that the guest doesn't select an invalid vector. We are offering
189                // `num_queues + 1` vectors (plus one for configuration updates). If an invalid
190                // vector has been selected, we just store the `NO_VECTOR` value.
191                let mut msix_queues = self.msix_queues.lock().expect("Poisoned lock");
192                let nr_vectors = msix_queues.len() + 1;
193
194                if (value as usize) < nr_vectors {
195                    self.msix_config.store(value, Ordering::Release);
196                } else {
197                    self.msix_config
198                        .store(VIRTQ_MSI_NO_VECTOR, Ordering::Release);
199                }
200            }
201            0x16 => self.queue_select = value,
202            0x18 => self.with_queue_mut(queues, |q| q.size = value),
203            0x1a => {
204                let mut msix_queues = self.msix_queues.lock().expect("Poisoned lock");
205                let nr_vectors = msix_queues.len() + 1;
206                // Make sure that `queue_select` points to a valid queue. If not, we won't do
207                // anything here and subsequent reads at 0x1a will return `NO_VECTOR`.
208                if let Some(queue) = msix_queues.get_mut(self.queue_select as usize) {
209                    // Make sure that the guest doesn't select an invalid vector. We are offering
210                    // `num_queues + 1` vectors (plus one for configuration updates). If an invalid
211                    // vector has been selected, we just store the `NO_VECTOR` value.
212                    if (value as usize) < nr_vectors {
213                        *queue = value;
214                    } else {
215                        *queue = VIRTQ_MSI_NO_VECTOR;
216                    }
217                }
218            }
219            0x1c => self.with_queue_mut(queues, |q| {
220                if value != 0 {
221                    q.ready = value == 1;
222                }
223            }),
224            _ => {
225                warn!("pci: invalid virtio register word write: 0x{:x}", offset);
226            }
227        }
228    }
229
230    fn read_common_config_dword(&self, offset: u64, device: Arc<Mutex<dyn VirtioDevice>>) -> u32 {
231        match offset {
232            0x00 => self.device_feature_select,
233            0x04 => {
234                let locked_device = device.lock().unwrap();
235                // Only 64 bits of features (2 pages) are defined for now, so limit
236                // device_feature_select to avoid shifting by 64 or more bits.
237                if self.device_feature_select < 2 {
238                    ((locked_device.avail_features() >> (self.device_feature_select * 32))
239                        & 0xffff_ffff) as u32
240                } else {
241                    0
242                }
243            }
244            0x08 => self.driver_feature_select,
245            0x20 => {
246                let locked_device = device.lock().unwrap();
247                self.with_queue(locked_device.queues(), |q| {
248                    (q.desc_table_address.0 & 0xffff_ffff) as u32
249                })
250                .unwrap_or_default()
251            }
252            0x24 => {
253                let locked_device = device.lock().unwrap();
254                self.with_queue(locked_device.queues(), |q| {
255                    (q.desc_table_address.0 >> 32) as u32
256                })
257                .unwrap_or_default()
258            }
259            0x28 => {
260                let locked_device = device.lock().unwrap();
261                self.with_queue(locked_device.queues(), |q| {
262                    (q.avail_ring_address.0 & 0xffff_ffff) as u32
263                })
264                .unwrap_or_default()
265            }
266            0x2c => {
267                let locked_device = device.lock().unwrap();
268                self.with_queue(locked_device.queues(), |q| {
269                    (q.avail_ring_address.0 >> 32) as u32
270                })
271                .unwrap_or_default()
272            }
273            0x30 => {
274                let locked_device = device.lock().unwrap();
275                self.with_queue(locked_device.queues(), |q| {
276                    (q.used_ring_address.0 & 0xffff_ffff) as u32
277                })
278                .unwrap_or_default()
279            }
280            0x34 => {
281                let locked_device = device.lock().unwrap();
282                self.with_queue(locked_device.queues(), |q| {
283                    (q.used_ring_address.0 >> 32) as u32
284                })
285                .unwrap_or_default()
286            }
287            _ => {
288                warn!("pci: invalid virtio register dword read: 0x{:x}", offset);
289                0
290            }
291        }
292    }
293
294    fn write_common_config_dword(
295        &mut self,
296        offset: u64,
297        value: u32,
298        device: Arc<Mutex<dyn VirtioDevice>>,
299    ) {
300        fn hi(v: &mut GuestAddress, x: u32) {
301            *v = (*v & 0xffff_ffff) | (u64::from(x) << 32)
302        }
303
304        fn lo(v: &mut GuestAddress, x: u32) {
305            *v = (*v & !0xffff_ffff) | u64::from(x)
306        }
307
308        let mut locked_device = device.lock().unwrap();
309
310        match offset {
311            0x00 => self.device_feature_select = value,
312            0x08 => self.driver_feature_select = value,
313            0x0c => locked_device.ack_features_by_page(self.driver_feature_select, value),
314            0x20 => self.with_queue_mut(locked_device.queues_mut(), |q| {
315                lo(&mut q.desc_table_address, value)
316            }),
317            0x24 => self.with_queue_mut(locked_device.queues_mut(), |q| {
318                hi(&mut q.desc_table_address, value)
319            }),
320            0x28 => self.with_queue_mut(locked_device.queues_mut(), |q| {
321                lo(&mut q.avail_ring_address, value)
322            }),
323            0x2c => self.with_queue_mut(locked_device.queues_mut(), |q| {
324                hi(&mut q.avail_ring_address, value)
325            }),
326            0x30 => self.with_queue_mut(locked_device.queues_mut(), |q| {
327                lo(&mut q.used_ring_address, value)
328            }),
329            0x34 => self.with_queue_mut(locked_device.queues_mut(), |q| {
330                hi(&mut q.used_ring_address, value)
331            }),
332            _ => {
333                warn!("pci: invalid virtio register dword write: 0x{:x}", offset);
334            }
335        }
336    }
337
338    fn with_queue<U, F>(&self, queues: &[Queue], f: F) -> Option<U>
339    where
340        F: FnOnce(&Queue) -> U,
341    {
342        queues.get(self.queue_select as usize).map(f)
343    }
344
345    fn with_queue_mut<F: FnOnce(&mut Queue)>(&self, queues: &mut [Queue], f: F) {
346        if let Some(queue) = queues.get_mut(self.queue_select as usize) {
347            f(queue);
348        }
349    }
350}
351
352#[cfg(test)]
353mod tests {
354    use vm_memory::ByteValued;
355
356    use super::*;
357    use crate::devices::virtio::transport::mmio::tests::DummyDevice;
358
359    fn default_device() -> Arc<Mutex<DummyDevice>> {
360        Arc::new(Mutex::new(DummyDevice::new()))
361    }
362
363    fn default_pci_common_config() -> VirtioPciCommonConfig {
364        VirtioPciCommonConfig {
365            driver_status: 0,
366            config_generation: 0,
367            device_feature_select: 0,
368            driver_feature_select: 0,
369            queue_select: 0,
370            msix_config: Arc::new(AtomicU16::new(0)),
371            msix_queues: Arc::new(Mutex::new(vec![0u16; 2])),
372        }
373    }
374
375    #[test]
376    fn write_base_regs() {
377        let mut regs = VirtioPciCommonConfig {
378            driver_status: 0xaa,
379            config_generation: 0x55,
380            device_feature_select: 0x0,
381            driver_feature_select: 0x0,
382            queue_select: 0xff,
383            msix_config: Arc::new(AtomicU16::new(0)),
384            msix_queues: Arc::new(Mutex::new(vec![0; 3])),
385        };
386
387        let dev = Arc::new(Mutex::new(DummyDevice::new()));
388        // Can set all bits of driver_status.
389        regs.write(0x14, &[0x55], dev.clone());
390        let mut read_back = vec![0x00];
391        regs.read(0x14, &mut read_back, dev.clone());
392        assert_eq!(read_back[0], 0x55);
393
394        // The config generation register is read only.
395        regs.write(0x15, &[0xaa], dev.clone());
396        let mut read_back = vec![0x00];
397        regs.read(0x15, &mut read_back, dev.clone());
398        assert_eq!(read_back[0], 0x55);
399
400        // Device features is read-only and passed through from the device.
401        regs.write(0x04, &[0, 0, 0, 0], dev.clone());
402        let mut read_back = vec![0, 0, 0, 0];
403        regs.read(0x04, &mut read_back, dev.clone());
404        assert_eq!(LittleEndian::read_u32(&read_back), 0u32);
405
406        // Feature select registers are read/write.
407        regs.write(0x00, &[1, 2, 3, 4], dev.clone());
408        let mut read_back = vec![0, 0, 0, 0];
409        regs.read(0x00, &mut read_back, dev.clone());
410        assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
411        regs.write(0x08, &[1, 2, 3, 4], dev.clone());
412        let mut read_back = vec![0, 0, 0, 0];
413        regs.read(0x08, &mut read_back, dev.clone());
414        assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
415
416        // 'queue_select' can be read and written.
417        regs.write(0x16, &[0xaa, 0x55], dev.clone());
418        let mut read_back = vec![0x00, 0x00];
419        regs.read(0x16, &mut read_back, dev.clone());
420        assert_eq!(read_back[0], 0xaa);
421        assert_eq!(read_back[1], 0x55);
422
423        // Getting the MSI vector when `queue_select` points to an invalid queue should return
424        // NO_VECTOR (0xffff)
425        regs.read(0x1a, &mut read_back, dev.clone());
426        assert_eq!(read_back, [0xff, 0xff]);
427
428        // Writing the MSI vector of an invalid `queue_select` does not have any effect.
429        regs.write(0x1a, &[0x12, 0x13], dev.clone());
430        assert_eq!(read_back, [0xff, 0xff]);
431        // Valid `queue_select` though should setup the corresponding MSI-X queue.
432        regs.write(0x16, &[0x1, 0x0], dev.clone());
433        assert_eq!(regs.queue_select, 1);
434        regs.write(0x1a, &[0x1, 0x0], dev.clone());
435        regs.read(0x1a, &mut read_back, dev);
436        assert_eq!(LittleEndian::read_u16(&read_back[..2]), 0x1);
437    }
438
439    #[test]
440    fn test_device_feature() {
441        let mut config = default_pci_common_config();
442        let mut device = default_device();
443        let mut features = 0u32;
444
445        device
446            .lock()
447            .unwrap()
448            .set_avail_features(0x0000_1312_0000_1110);
449
450        config.read(0x04, features.as_mut_slice(), device.clone());
451        assert_eq!(features, 0x1110);
452        // select second page
453        config.write(0x0, 1u32.as_slice(), device.clone());
454        config.read(0x04, features.as_mut_slice(), device.clone());
455        assert_eq!(features, 0x1312);
456        // Try a third page. It doesn't exist so we should get all 0s
457        config.write(0x0, 2u32.as_slice(), device.clone());
458        config.read(0x04, features.as_mut_slice(), device.clone());
459        assert_eq!(features, 0x0);
460    }
461
462    #[test]
463    fn test_driver_feature() {
464        let mut config = default_pci_common_config();
465        let mut device = default_device();
466        device
467            .lock()
468            .unwrap()
469            .set_avail_features(0x0000_1312_0000_1110);
470
471        // ACK some features of the first page
472        config.write(0x0c, 0x1100u32.as_slice(), device.clone());
473        assert_eq!(device.lock().unwrap().acked_features(), 0x1100);
474        // ACK some features of the second page
475        config.write(0x08, 1u32.as_slice(), device.clone());
476        config.write(0x0c, 0x0000_1310u32.as_slice(), device.clone());
477        assert_eq!(
478            device.lock().unwrap().acked_features(),
479            0x0000_1310_0000_1100
480        );
481    }
482
483    #[test]
484    fn test_num_queues() {
485        let mut config = default_pci_common_config();
486        let mut device = default_device();
487        let mut num_queues = 0u16;
488
489        config.read(0x12, num_queues.as_mut_slice(), device.clone());
490        assert_eq!(num_queues, 2);
491        // `num_queues` is read-only
492        config.write(0x12, 4u16.as_slice(), device.clone());
493        config.read(0x12, num_queues.as_mut_slice(), device.clone());
494        assert_eq!(num_queues, 2);
495    }
496
497    #[test]
498    fn test_device_status() {
499        let mut config = default_pci_common_config();
500        let mut device = default_device();
501        let mut status = 0u8;
502
503        config.read(0x14, status.as_mut_slice(), device.clone());
504        assert_eq!(status, 0);
505        config.write(0x14, 0x42u8.as_slice(), device.clone());
506        config.read(0x14, status.as_mut_slice(), device.clone());
507        assert_eq!(status, 0x42);
508    }
509
510    #[test]
511    fn test_config_msix_vector() {
512        let mut config = default_pci_common_config();
513        let device = default_device();
514        let mut vector: u16 = 0;
515
516        // Our device has 2 queues, so we should be using 3 vectors in total.
517        // Trying to set a vector bigger than that should fail. Observing the
518        // failure happens through a subsequent read that should return NO_VECTOR.
519        config.write(0x10, 3u16.as_slice(), device.clone());
520        config.read(0x10, vector.as_mut_slice(), device.clone());
521        assert_eq!(vector, VIRTQ_MSI_NO_VECTOR);
522
523        // Any of the 3 valid values should work
524        for i in 0u16..3 {
525            config.write(0x10, i.as_slice(), device.clone());
526            config.read(0x10, vector.as_mut_slice(), device.clone());
527            assert_eq!(vector, i);
528        }
529    }
530
531    #[test]
532    fn test_queue_size() {
533        let mut config = default_pci_common_config();
534        let device = default_device();
535        let mut len = 0u16;
536        let mut max_size = [0u16; 2];
537
538        for queue_id in 0u16..2 {
539            config.write(0x16, queue_id.as_slice(), device.clone());
540            config.read(0x18, len.as_mut_slice(), device.clone());
541            assert_eq!(
542                len,
543                device.lock().unwrap().queues()[queue_id as usize].max_size
544            );
545            max_size[queue_id as usize] = len;
546        }
547
548        config.write(0x16, 2u16.as_slice(), device.clone());
549        config.read(0x18, len.as_mut_slice(), device.clone());
550        assert_eq!(len, 0);
551
552        // Setup size smaller than what is the maximum offered
553        for queue_id in 0u16..2 {
554            config.write(0x16, queue_id.as_slice(), device.clone());
555            config.write(
556                0x18,
557                (max_size[queue_id as usize] - 1).as_slice(),
558                device.clone(),
559            );
560            config.read(0x18, len.as_mut_slice(), device.clone());
561            assert_eq!(len, max_size[queue_id as usize] - 1);
562        }
563    }
564
565    #[test]
566    fn test_queue_msix_vector() {
567        let mut config = default_pci_common_config();
568        let device = default_device();
569        let mut vector = 0u16;
570
571        // Our device has 2 queues, so we should be using 3 vectors in total.
572        // Trying to set a vector bigger than that should fail. Observing the
573        // failure happens through a subsequent read that should return NO_VECTOR.
574        for queue_id in 0u16..2 {
575            // Select queue
576            config.write(0x16, queue_id.as_slice(), device.clone());
577
578            config.write(0x1a, 3u16.as_slice(), device.clone());
579            config.read(0x1a, vector.as_mut_slice(), device.clone());
580            assert_eq!(vector, VIRTQ_MSI_NO_VECTOR);
581
582            // Any of the 3 valid values should work
583            for vector_id in 0u16..3 {
584                config.write(0x1a, vector_id.as_slice(), device.clone());
585                config.read(0x1a, vector.as_mut_slice(), device.clone());
586                assert_eq!(vector, vector_id);
587            }
588        }
589    }
590
591    #[test]
592    fn test_queue_enable() {
593        let mut config = default_pci_common_config();
594        let device = default_device();
595        let mut enabled = 0u16;
596
597        for queue_id in 0u16..2 {
598            config.write(0x16, queue_id.as_slice(), device.clone());
599
600            // Initially queue should be disabled
601            config.read(0x1c, enabled.as_mut_slice(), device.clone());
602            assert_eq!(enabled, 0);
603
604            // Enable queue
605            config.write(0x1c, 1u16.as_slice(), device.clone());
606            config.read(0x1c, enabled.as_mut_slice(), device.clone());
607            assert_eq!(enabled, 1);
608
609            // According to the specification "The driver MUST NOT write a 0 to queue_enable."
610            config.write(0x1c, 0u16.as_slice(), device.clone());
611            config.read(0x1c, enabled.as_mut_slice(), device.clone());
612            assert_eq!(enabled, 1);
613        }
614    }
615
616    #[test]
617    fn test_queue_notify_off() {
618        let mut config = default_pci_common_config();
619        let device = default_device();
620        let mut offset = 0u16;
621
622        // `queue_notify_off` is an offset (index not bytes) from the notification structure
623        // that helps locate the address of the queue notify within the device's BAR. This is
624        // a field setup by the device and should be read-only for the driver
625
626        for queue_id in 0u16..2 {
627            config.write(0x16, queue_id.as_slice(), device.clone());
628            config.read(0x1e, offset.as_mut_slice(), device.clone());
629            assert_eq!(offset, queue_id);
630
631            // Writing to it should not have any effect
632            config.write(0x1e, 0x42.as_slice(), device.clone());
633            config.read(0x1e, offset.as_mut_slice(), device.clone());
634            assert_eq!(offset, queue_id);
635        }
636    }
637
638    fn write_64bit_field(
639        config: &mut VirtioPciCommonConfig,
640        device: Arc<Mutex<DummyDevice>>,
641        offset: u64,
642        value: u64,
643    ) {
644        let lo32 = (value & 0xffff_ffff) as u32;
645        let hi32 = (value >> 32) as u32;
646
647        config.write(offset, lo32.as_slice(), device.clone());
648        config.write(offset + 4, hi32.as_slice(), device.clone());
649    }
650
651    fn read_64bit_field(
652        config: &mut VirtioPciCommonConfig,
653        device: Arc<Mutex<DummyDevice>>,
654        offset: u64,
655    ) -> u64 {
656        let mut lo32 = 0u32;
657        let mut hi32 = 0u32;
658
659        config.read(offset, lo32.as_mut_slice(), device.clone());
660        config.read(offset + 4, hi32.as_mut_slice(), device.clone());
661
662        (lo32 as u64) | ((hi32 as u64) << 32)
663    }
664
665    #[test]
666    fn test_queue_addresses() {
667        let mut config = default_pci_common_config();
668        let device = default_device();
669        let mut reg64bit = 0;
670
671        for queue_id in 0u16..2 {
672            config.write(0x16, queue_id.as_slice(), device.clone());
673
674            for offset in [0x20, 0x28, 0x30] {
675                write_64bit_field(&mut config, device.clone(), offset, 0x0000_1312_0000_1110);
676                assert_eq!(
677                    read_64bit_field(&mut config, device.clone(), offset),
678                    0x0000_1312_0000_1110
679                );
680            }
681        }
682    }
683
684    #[test]
685    fn test_bad_width_reads() {
686        let mut config = default_pci_common_config();
687        let mut device = default_device();
688
689        // According to the VirtIO specification (section 4.1.3.1)
690        //
691        // > For device configuration access, the driver MUST use 8-bit wide accesses for 8-bit
692        // > wide fields, 16-bit wide and aligned accesses for 16-bit wide fields and 32-bit wide
693        // > and aligned accesses for 32-bit and 64-bit wide fields. For 64-bit fields, the driver
694        // > MAY access each of the high and low 32-bit parts of the field independently.
695
696        // 64-bit fields
697        device.lock().unwrap().queues_mut()[0].desc_table_address =
698            GuestAddress(0x0000_1312_0000_1110);
699        let mut buffer = [0u8; 8];
700        config.read(0x20, &mut buffer[..1], device.clone());
701        assert_eq!(buffer, [0u8; 8]);
702        config.read(0x20, &mut buffer[..2], device.clone());
703        assert_eq!(buffer, [0u8; 8]);
704        config.read(0x20, &mut buffer[..8], device.clone());
705        assert_eq!(buffer, [0u8; 8]);
706        config.read(0x20, &mut buffer[..4], device.clone());
707        assert_eq!(LittleEndian::read_u32(&buffer[..4]), 0x1110);
708        config.read(0x24, &mut buffer[..4], device.clone());
709        assert_eq!(LittleEndian::read_u32(&buffer[..4]), 0x1312);
710
711        // 32-bit fields
712        config.device_feature_select = 0x42;
713        let mut buffer = [0u8; 8];
714        config.read(0, &mut buffer[..1], device.clone());
715        assert_eq!(buffer, [0u8; 8]);
716        config.read(0, &mut buffer[..2], device.clone());
717        assert_eq!(buffer, [0u8; 8]);
718        config.read(0, &mut buffer[..8], device.clone());
719        assert_eq!(buffer, [0u8; 8]);
720        config.read(0, &mut buffer[..4], device.clone());
721        assert_eq!(LittleEndian::read_u32(&buffer[..4]), 0x42);
722
723        // 16-bit fields
724        let mut buffer = [0u8; 8];
725        config.queue_select = 0x42;
726        config.read(0x16, &mut buffer[..1], device.clone());
727        assert_eq!(buffer, [0u8; 8]);
728        config.read(0x16, &mut buffer[..4], device.clone());
729        assert_eq!(buffer, [0u8; 8]);
730        config.read(0x16, &mut buffer[..8], device.clone());
731        assert_eq!(buffer, [0u8; 8]);
732        config.read(0x16, &mut buffer[..2], device.clone());
733        assert_eq!(LittleEndian::read_u16(&buffer[..2]), 0x42);
734
735        // 8-bit fields
736        let mut buffer = [0u8; 8];
737        config.driver_status = 0x42;
738        config.read(0x14, &mut buffer[..2], device.clone());
739        assert_eq!(buffer, [0u8; 8]);
740        config.read(0x14, &mut buffer[..4], device.clone());
741        assert_eq!(buffer, [0u8; 8]);
742        config.read(0x14, &mut buffer[..8], device.clone());
743        assert_eq!(buffer, [0u8; 8]);
744        config.read(0x14, &mut buffer[..1], device.clone());
745        assert_eq!(buffer[0], 0x42);
746    }
747}