1use std::fmt::Debug;
9use std::sync::atomic::{AtomicU32, Ordering};
10use std::sync::{Arc, Barrier, Mutex, MutexGuard};
11
12use vmm_sys_util::eventfd::EventFd;
13
14use super::{VirtioInterrupt, VirtioInterruptType};
15use crate::devices::virtio::device::VirtioDevice;
16use crate::devices::virtio::device_status;
17use crate::devices::virtio::queue::Queue;
18use crate::logger::{IncMetric, METRICS, error, warn};
19use crate::utils::byte_order;
20use crate::vstate::bus::BusDevice;
21use crate::vstate::interrupts::InterruptError;
22use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
23
24const VENDOR_ID: u32 = 0;
26
27pub const VIRTIO_MMIO_INT_VRING: u32 = 0x01;
30pub const VIRTIO_MMIO_INT_CONFIG: u32 = 0x02;
31
32const MMIO_MAGIC_VALUE: u32 = 0x7472_6976;
34
35const MMIO_VERSION: u32 = 2;
37
38#[derive(Debug, Clone)]
53pub struct MmioTransport {
54 device: Arc<Mutex<dyn VirtioDevice>>,
55 pub(crate) features_select: u32,
57 pub(crate) acked_features_select: u32,
59 pub(crate) queue_select: u32,
60 pub(crate) device_status: u32,
61 pub(crate) config_generation: u32,
62 mem: GuestMemoryMmap,
63 pub(crate) interrupt: Arc<IrqTrigger>,
64 pub is_vhost_user: bool,
65}
66
67impl MmioTransport {
68 pub fn new(
70 mem: GuestMemoryMmap,
71 interrupt: Arc<IrqTrigger>,
72 device: Arc<Mutex<dyn VirtioDevice>>,
73 is_vhost_user: bool,
74 ) -> MmioTransport {
75 MmioTransport {
76 device,
77 features_select: 0,
78 acked_features_select: 0,
79 queue_select: 0,
80 device_status: device_status::INIT,
81 config_generation: 0,
82 mem,
83 interrupt,
84 is_vhost_user,
85 }
86 }
87
88 pub fn locked_device(&self) -> MutexGuard<'_, dyn VirtioDevice + 'static> {
90 self.device.lock().expect("Poisoned lock")
91 }
92
93 pub fn device(&self) -> Arc<Mutex<dyn VirtioDevice>> {
95 self.device.clone()
96 }
97
98 fn check_device_status(&self, set: u32, clr: u32) -> bool {
99 self.device_status & (set | clr) == set
100 }
101
102 fn with_queue<U, F>(&self, d: U, f: F) -> U
103 where
104 F: FnOnce(&Queue) -> U,
105 U: Debug,
106 {
107 match self
108 .locked_device()
109 .queues()
110 .get(self.queue_select as usize)
111 {
112 Some(queue) => f(queue),
113 None => d,
114 }
115 }
116
117 fn with_queue_mut<F: FnOnce(&mut Queue)>(&mut self, f: F) -> bool {
118 if let Some(queue) = self
119 .locked_device()
120 .queues_mut()
121 .get_mut(self.queue_select as usize)
122 {
123 f(queue);
124 true
125 } else {
126 false
127 }
128 }
129
130 fn update_queue_field<F: FnOnce(&mut Queue)>(&mut self, f: F) {
131 if self.check_device_status(
132 device_status::FEATURES_OK,
133 device_status::DRIVER_OK | device_status::FAILED,
134 ) {
135 self.with_queue_mut(f);
136 } else {
137 warn!(
138 "update virtio queue in invalid state {:#x}",
139 self.device_status
140 );
141 }
142 }
143
144 fn reset(&mut self) {
145 if self.locked_device().is_activated() {
146 warn!("reset device while it's still in active state");
147 }
148 self.features_select = 0;
149 self.acked_features_select = 0;
150 self.queue_select = 0;
151 self.interrupt.irq_status.store(0, Ordering::SeqCst);
152 self.device_status = device_status::INIT;
153 for queue in self.locked_device().queues_mut() {
157 *queue = Queue::new(queue.max_size);
158 }
159 }
160
161 #[allow(unused_assignments)]
169 fn set_device_status(&mut self, status: u32) {
170 use device_status::*;
171 match !self.device_status & status {
173 ACKNOWLEDGE if self.device_status == INIT => {
174 self.device_status = status;
175 }
176 DRIVER if self.device_status == ACKNOWLEDGE => {
177 self.device_status = status;
178 }
179 FEATURES_OK if self.device_status == (ACKNOWLEDGE | DRIVER) => {
180 self.device_status = status;
181 }
182 DRIVER_OK if self.device_status == (ACKNOWLEDGE | DRIVER | FEATURES_OK) => {
183 self.device_status = status;
184 let mut locked_device = self.device.lock().expect("Poisoned lock");
185 let device_activated = locked_device.is_activated();
186 if !device_activated {
187 let activate_result =
189 locked_device.activate(self.mem.clone(), self.interrupt.clone());
190 if let Err(err) = activate_result {
191 self.device_status |= DEVICE_NEEDS_RESET;
192
193 let _ = self.interrupt.trigger(VirtioInterruptType::Config);
196
197 error!("Failed to activate virtio device: {}", err)
198 }
199 }
200 }
201 _ if (status & FAILED) != 0 => {
202 self.device_status |= FAILED;
204 }
205 _ if status == 0 => {
206 {
207 let mut locked_device = self.device.lock().expect("Poisoned lock");
208 if locked_device.is_activated() {
209 let mut device_status = self.device_status;
210 let reset_result = locked_device.reset();
211 match reset_result {
212 Some((_interrupt_evt, mut _queue_evts)) => {}
213 None => {
214 device_status |= FAILED;
215 }
216 }
217 self.device_status = device_status;
218 }
219 }
220
221 if self.device_status & FAILED == 0 {
224 self.reset();
225 }
226 }
227 _ => {
228 warn!(
229 "invalid virtio driver status transition: {:#x} -> {:#x}",
230 self.device_status, status
231 );
232 }
233 }
234 }
235}
236
237impl BusDevice for MmioTransport {
238 fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {
239 match offset {
240 0x00..=0xff if data.len() == 4 => {
241 let v = match offset {
242 0x0 => MMIO_MAGIC_VALUE,
243 0x04 => MMIO_VERSION,
244 0x08 => self.locked_device().device_type(),
245 0x0c => VENDOR_ID, 0x10 => {
247 let mut features = self
248 .locked_device()
249 .avail_features_by_page(self.features_select);
250 if self.features_select == 1 {
251 features |= 0x1; }
253 features
254 }
255 0x34 => self.with_queue(0, |q| u32::from(q.max_size)),
256 0x44 => self.with_queue(0, |q| u32::from(q.ready)),
257 0x60 => {
258 let is = self.interrupt.irq_status.load(Ordering::SeqCst);
273 if !self.is_vhost_user {
274 is
275 } else if is == VIRTIO_MMIO_INT_CONFIG {
276 VIRTIO_MMIO_INT_CONFIG
277 } else {
278 VIRTIO_MMIO_INT_VRING
279 }
280 }
281 0x70 => self.device_status,
282 0xfc => self.config_generation,
283 _ => {
284 warn!("unknown virtio mmio register read: {:#x}", offset);
285 return;
286 }
287 };
288 byte_order::write_le_u32(data, v);
289 }
290 0x100..=0xfff => self.locked_device().read_config(offset - 0x100, data),
291 _ => {
292 warn!(
293 "invalid virtio mmio read: {base:#x}:{offset:#x}:{:#x}",
294 data.len()
295 );
296 }
297 };
298 }
299
300 fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
301 fn hi(v: &mut GuestAddress, x: u32) {
302 *v = (*v & 0xffff_ffff) | (u64::from(x) << 32)
303 }
304
305 fn lo(v: &mut GuestAddress, x: u32) {
306 *v = (*v & !0xffff_ffff) | u64::from(x)
307 }
308
309 match offset {
310 0x00..=0xff if data.len() == 4 => {
311 let v = byte_order::read_le_u32(data);
312 match offset {
313 0x14 => self.features_select = v,
314 0x20 => {
315 if self.check_device_status(
316 device_status::DRIVER,
317 device_status::FEATURES_OK
318 | device_status::FAILED
319 | device_status::DEVICE_NEEDS_RESET,
320 ) {
321 self.locked_device()
322 .ack_features_by_page(self.acked_features_select, v);
323 } else {
324 warn!(
325 "ack virtio features in invalid state {:#x}",
326 self.device_status
327 );
328 }
329 }
330 0x24 => self.acked_features_select = v,
331 0x30 => self.queue_select = v,
332 0x38 => self.update_queue_field(|q| q.size = (v & 0xffff) as u16),
333 0x44 => self.update_queue_field(|q| q.ready = v == 1),
334 0x50 => {
335 if self.check_device_status(device_status::DRIVER_OK, 0) {
336 self.locked_device().nyx_handle_queue_event(v as u16);
337 } else {
338 warn!("queue notify in invalid state {:#x}", self.device_status);
339 }
340 }
341 0x64 => {
342 if self.check_device_status(device_status::DRIVER_OK, 0) {
343 self.interrupt.irq_status.fetch_and(!v, Ordering::SeqCst);
344 }
345 }
346 0x70 => self.set_device_status(v),
347 0x80 => self.update_queue_field(|q| lo(&mut q.desc_table_address, v)),
348 0x84 => self.update_queue_field(|q| hi(&mut q.desc_table_address, v)),
349 0x90 => self.update_queue_field(|q| lo(&mut q.avail_ring_address, v)),
350 0x94 => self.update_queue_field(|q| hi(&mut q.avail_ring_address, v)),
351 0xa0 => self.update_queue_field(|q| lo(&mut q.used_ring_address, v)),
352 0xa4 => self.update_queue_field(|q| hi(&mut q.used_ring_address, v)),
353 _ => {
354 warn!("unknown virtio mmio register write: {:#x}", offset);
355 }
356 }
357 }
358 0x100..=0xfff => {
359 if self.check_device_status(
360 device_status::DRIVER,
361 device_status::FAILED | device_status::DEVICE_NEEDS_RESET,
362 ) {
363 self.locked_device().write_config(offset - 0x100, data)
364 } else {
365 warn!("can not write to device config data area before driver is ready");
366 }
367 }
368 _ => {
369 warn!(
370 "invalid virtio mmio write: {base:#x}:{offset:#x}:{:#x}",
371 data.len()
372 );
373 }
374 }
375 None
376 }
377}
378
379#[derive(Debug)]
381pub enum IrqType {
382 Config,
384 Vring,
386}
387
388impl From<VirtioInterruptType> for IrqType {
389 fn from(interrupt_type: VirtioInterruptType) -> Self {
390 match interrupt_type {
391 VirtioInterruptType::Config => IrqType::Config,
392 VirtioInterruptType::Queue(_) => IrqType::Vring,
393 }
394 }
395}
396
397#[derive(Debug)]
399pub struct IrqTrigger {
400 pub(crate) irq_status: Arc<AtomicU32>,
401 pub(crate) irq_evt: EventFd,
402}
403
404impl Default for IrqTrigger {
405 fn default() -> Self {
406 Self::new()
407 }
408}
409
410impl VirtioInterrupt for IrqTrigger {
411 fn trigger(&self, interrupt_type: VirtioInterruptType) -> Result<(), InterruptError> {
412 METRICS.interrupts.triggers.inc();
413 match interrupt_type {
414 VirtioInterruptType::Config => self.trigger_irq(IrqType::Config),
415 VirtioInterruptType::Queue(_) => self.trigger_irq(IrqType::Vring),
416 }
417 }
418
419 fn trigger_queues(&self, queues: &[u16]) -> Result<(), InterruptError> {
420 if queues.is_empty() {
421 Ok(())
422 } else {
423 METRICS.interrupts.triggers.inc();
424 self.trigger_irq(IrqType::Vring)
425 }
426 }
427
428 fn notifier(&self, _interrupt_type: VirtioInterruptType) -> Option<&EventFd> {
429 Some(&self.irq_evt)
430 }
431
432 fn status(&self) -> Arc<AtomicU32> {
433 self.irq_status.clone()
434 }
435
436 #[cfg(test)]
437 fn has_pending_interrupt(&self, interrupt_type: VirtioInterruptType) -> bool {
438 if let Ok(num_irqs) = self.irq_evt.read() {
439 if num_irqs == 0 {
440 return false;
441 }
442
443 let irq_status = self.irq_status.load(Ordering::SeqCst);
444 return matches!(
445 (irq_status, interrupt_type.into()),
446 (VIRTIO_MMIO_INT_CONFIG, IrqType::Config) | (VIRTIO_MMIO_INT_VRING, IrqType::Vring)
447 );
448 }
449 false
450 }
451
452 #[cfg(test)]
453 fn ack_interrupt(&self, interrupt_type: VirtioInterruptType) {
454 let irq = match interrupt_type {
455 VirtioInterruptType::Config => VIRTIO_MMIO_INT_CONFIG,
456 VirtioInterruptType::Queue(_) => VIRTIO_MMIO_INT_VRING,
457 };
458 self.irq_status.fetch_and(!irq, Ordering::SeqCst);
459 }
460}
461
462impl IrqTrigger {
463 pub fn new() -> Self {
464 Self {
465 irq_status: Arc::new(AtomicU32::new(0)),
466 irq_evt: EventFd::new(libc::EFD_NONBLOCK)
467 .expect("Could not create EventFd for IrqTrigger"),
468 }
469 }
470
471 fn trigger_irq(&self, irq_type: IrqType) -> Result<(), InterruptError> {
472 let irq = match irq_type {
473 IrqType::Config => VIRTIO_MMIO_INT_CONFIG,
474 IrqType::Vring => VIRTIO_MMIO_INT_VRING,
475 };
476 self.irq_status.fetch_or(irq, Ordering::SeqCst);
477
478 self.irq_evt.write(1).map_err(|err| {
479 error!("Failed to send irq to the guest: {:?}", err);
480 err
481 })?;
482
483 Ok(())
484 }
485}
486
487#[cfg(test)]
488pub(crate) mod tests {
489
490 use std::ops::Deref;
491
492 use vmm_sys_util::eventfd::EventFd;
493
494 use super::*;
495 use crate::devices::virtio::ActivateError;
496 use crate::devices::virtio::device_status::DEVICE_NEEDS_RESET;
497 use crate::impl_device_type;
498 use crate::test_utils::single_region_mem;
499 use crate::utils::byte_order::{read_le_u32, write_le_u32};
500 use crate::utils::u64_to_usize;
501 use crate::vstate::memory::GuestMemoryMmap;
502
503 #[derive(Debug)]
504 pub(crate) struct DummyDevice {
505 acked_features: u64,
506 avail_features: u64,
507 interrupt_trigger: Option<Arc<dyn VirtioInterrupt>>,
508 queue_evts: Vec<EventFd>,
509 queues: Vec<Queue>,
510 device_activated: bool,
511 config_bytes: [u8; 0xeff],
512 activate_should_error: bool,
513 }
514
515 impl DummyDevice {
516 pub(crate) fn new() -> Self {
517 DummyDevice {
518 acked_features: 0,
519 avail_features: 0,
520 interrupt_trigger: None,
521 queue_evts: vec![
522 EventFd::new(libc::EFD_NONBLOCK).unwrap(),
523 EventFd::new(libc::EFD_NONBLOCK).unwrap(),
524 ],
525 queues: vec![Queue::new(16), Queue::new(32)],
526 device_activated: false,
527 config_bytes: [0; 0xeff],
528 activate_should_error: false,
529 }
530 }
531
532 pub fn set_avail_features(&mut self, avail_features: u64) {
533 self.avail_features = avail_features;
534 }
535 }
536
537 impl VirtioDevice for DummyDevice {
538 impl_device_type!(123);
539
540 fn avail_features(&self) -> u64 {
541 self.avail_features
542 }
543
544 fn acked_features(&self) -> u64 {
545 self.acked_features
546 }
547
548 fn set_acked_features(&mut self, acked_features: u64) {
549 self.acked_features = acked_features;
550 }
551
552 fn queues(&self) -> &[Queue] {
553 &self.queues
554 }
555
556 fn queues_mut(&mut self) -> &mut [Queue] {
557 &mut self.queues
558 }
559
560 fn queue_events(&self) -> &[EventFd] {
561 &self.queue_evts
562 }
563
564 fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
565 self.interrupt_trigger
566 .as_ref()
567 .expect("Device is not activated")
568 .deref()
569 }
570
571 fn read_config(&self, offset: u64, data: &mut [u8]) {
572 data.copy_from_slice(&self.config_bytes[u64_to_usize(offset)..]);
573 }
574
575 fn write_config(&mut self, offset: u64, data: &[u8]) {
576 for (i, item) in data.iter().enumerate() {
577 self.config_bytes[u64_to_usize(offset) + i] = *item;
578 }
579 }
580
581 fn activate(
582 &mut self,
583 _: GuestMemoryMmap,
584 interrupt: Arc<dyn VirtioInterrupt>,
585 ) -> Result<(), ActivateError> {
586 self.device_activated = true;
587 self.interrupt_trigger = Some(interrupt);
588 if self.activate_should_error {
589 Err(ActivateError::EventFd)
590 } else {
591 Ok(())
592 }
593 }
594
595 fn is_activated(&self) -> bool {
596 self.device_activated
597 }
598 }
599
600 fn set_device_status(d: &mut MmioTransport, status: u32) {
601 let mut buf = [0; 4];
602 write_le_u32(&mut buf[..], status);
603 d.write(0x0, 0x70, &buf[..]);
604 }
605
606 #[test]
607 fn test_new() {
608 let m = single_region_mem(0x1000);
609 let interrupt = Arc::new(IrqTrigger::new());
610 let mut dummy = DummyDevice::new();
611 assert!(dummy.reset().is_none());
613 let mut d = MmioTransport::new(m, interrupt, Arc::new(Mutex::new(dummy)), false);
614
615 assert_eq!(d.locked_device().queue_events().len(), 2);
619
620 d.queue_select = 0;
621 assert_eq!(d.with_queue(0, |q| q.max_size), 16);
622 assert!(d.with_queue_mut(|q| q.size = 16));
623 assert_eq!(d.locked_device().queues()[d.queue_select as usize].size, 16);
624
625 d.queue_select = 1;
626 assert_eq!(d.with_queue(0, |q| q.max_size), 32);
627 assert!(d.with_queue_mut(|q| q.size = 16));
628 assert_eq!(d.locked_device().queues()[d.queue_select as usize].size, 16);
629
630 d.queue_select = 2;
631 assert_eq!(d.with_queue(0, |q| q.max_size), 0);
632 assert!(!d.with_queue_mut(|q| q.size = 16));
633 }
634
635 #[test]
636 fn test_bus_device_read() {
637 let m = single_region_mem(0x1000);
638 let interrupt = Arc::new(IrqTrigger::new());
639 let mut d = MmioTransport::new(
640 m,
641 interrupt,
642 Arc::new(Mutex::new(DummyDevice::new())),
643 false,
644 );
645
646 let mut buf = vec![0xff, 0, 0xfe, 0];
647 let buf_copy = buf.to_vec();
648
649 buf.push(0);
651 d.read(0x0, 0, &mut buf[..]);
652 assert_eq!(buf[..4], buf_copy[..]);
653
654 buf.pop();
656
657 d.read(0x0, 0, &mut buf[..]);
660 assert_eq!(read_le_u32(&buf[..]), MMIO_MAGIC_VALUE);
661
662 d.read(0x0, 0x04, &mut buf[..]);
663 assert_eq!(read_le_u32(&buf[..]), MMIO_VERSION);
664
665 d.read(0x0, 0x08, &mut buf[..]);
666 assert_eq!(read_le_u32(&buf[..]), d.locked_device().device_type());
667
668 d.read(0x0, 0x0c, &mut buf[..]);
669 assert_eq!(read_le_u32(&buf[..]), VENDOR_ID);
670
671 d.features_select = 0;
672 d.read(0x0, 0x10, &mut buf[..]);
673 assert_eq!(
674 read_le_u32(&buf[..]),
675 d.locked_device().avail_features_by_page(0)
676 );
677
678 d.features_select = 1;
679 d.read(0x0, 0x10, &mut buf[..]);
680 assert_eq!(
681 read_le_u32(&buf[..]),
682 d.locked_device().avail_features_by_page(0) | 0x1
683 );
684
685 d.read(0x0, 0x34, &mut buf[..]);
686 assert_eq!(read_le_u32(&buf[..]), 16);
687
688 d.read(0x0, 0x44, &mut buf[..]);
689 assert_eq!(read_le_u32(&buf[..]), u32::from(false));
690
691 d.interrupt.irq_status.store(111, Ordering::SeqCst);
692 d.read(0x0, 0x60, &mut buf[..]);
693 assert_eq!(read_le_u32(&buf[..]), 111);
694
695 d.is_vhost_user = true;
696 d.interrupt.status().store(0, Ordering::SeqCst);
697 d.read(0x0, 0x60, &mut buf[..]);
698 assert_eq!(read_le_u32(&buf[..]), VIRTIO_MMIO_INT_VRING);
699
700 d.is_vhost_user = true;
701 d.interrupt
702 .irq_status
703 .store(VIRTIO_MMIO_INT_CONFIG, Ordering::SeqCst);
704 d.read(0x0, 0x60, &mut buf[..]);
705 assert_eq!(read_le_u32(&buf[..]), VIRTIO_MMIO_INT_CONFIG);
706
707 d.read(0x0, 0x70, &mut buf[..]);
708 assert_eq!(read_le_u32(&buf[..]), 0);
709
710 d.config_generation = 5;
711 d.read(0x0, 0xfc, &mut buf[..]);
712 assert_eq!(read_le_u32(&buf[..]), 5);
713
714 buf = buf_copy.to_vec();
718 d.read(0x0, 0xfd, &mut buf[..]);
719 assert_eq!(buf[..], buf_copy[..]);
720
721 d.read(0x0, 0xfb, &mut buf[..]);
723 assert_eq!(buf[..], buf_copy[..]);
724
725 d.read(0x0, 0xfc, &mut buf[..3]);
727 assert_eq!(buf[..], buf_copy[..]);
728 }
729
730 #[test]
731 #[allow(clippy::cognitive_complexity)]
732 fn test_bus_device_write() {
733 let m = single_region_mem(0x1000);
734 let interrupt = Arc::new(IrqTrigger::new());
735 let dummy_dev = Arc::new(Mutex::new(DummyDevice::new()));
736 let mut d = MmioTransport::new(m, interrupt, dummy_dev.clone(), false);
737 let mut buf = vec![0; 5];
738 write_le_u32(&mut buf[..4], 1);
739
740 d.features_select = 0;
742 d.write(0x0, 0x14, &buf[..]);
743 assert_eq!(d.features_select, 0);
744
745 buf.pop();
746
747 assert_eq!(d.device_status, device_status::INIT);
748 set_device_status(&mut d, device_status::ACKNOWLEDGE);
749
750 assert_eq!(d.locked_device().acked_features(), 0x0);
752 d.acked_features_select = 0x0;
753 write_le_u32(&mut buf[..], 1);
754 d.write(0x0, 0x20, &buf[..]);
755 assert_eq!(d.locked_device().acked_features(), 0x0);
756
757 let buf1 = vec![1; 0xeff];
760 for i in (0..0xeff).rev() {
761 let mut buf2 = vec![0; 0xeff];
762
763 d.write(0x0, 0x100 + i as u64, &buf1[i..]);
764 d.read(0x0, 0x100, &mut buf2[..]);
765
766 for item in buf2.iter().take(0xeff) {
767 assert_eq!(*item, 0);
768 }
769 }
770
771 set_device_status(&mut d, device_status::ACKNOWLEDGE | device_status::DRIVER);
772 assert_eq!(
773 d.device_status,
774 device_status::ACKNOWLEDGE | device_status::DRIVER
775 );
776
777 d.features_select = 0;
779 write_le_u32(&mut buf[..], 1);
780 d.write(0x0, 0x14, &buf[..]);
781 assert_eq!(d.features_select, 1);
782
783 d.acked_features_select = 0;
785 write_le_u32(&mut buf[..], 0x124);
786
787 dummy_dev.lock().unwrap().set_avail_features(0x124);
789 d.write(0x0, 0x20, &buf[..]);
790 assert_eq!(d.locked_device().acked_features(), 0x124);
791
792 d.acked_features_select = 0;
793 write_le_u32(&mut buf[..], 2);
794 d.write(0x0, 0x24, &buf[..]);
795 assert_eq!(d.acked_features_select, 2);
796 set_device_status(
797 &mut d,
798 device_status::ACKNOWLEDGE | device_status::DRIVER | device_status::FEATURES_OK,
799 );
800
801 assert_eq!(d.locked_device().acked_features(), 0x124);
803 d.acked_features_select = 0x0;
804 write_le_u32(&mut buf[..], 1);
805 d.write(0x0, 0x20, &buf[..]);
806 assert_eq!(d.locked_device().acked_features(), 0x124);
807
808 d.queue_select = 0;
810 write_le_u32(&mut buf[..], 3);
811 d.write(0x0, 0x30, &buf[..]);
812 assert_eq!(d.queue_select, 3);
813
814 d.queue_select = 0;
815 assert_eq!(d.locked_device().queues()[0].size, 16);
816 write_le_u32(&mut buf[..], 16);
817 d.write(0x0, 0x38, &buf[..]);
818 assert_eq!(d.locked_device().queues()[0].size, 16);
819
820 assert!(!d.locked_device().queues()[0].ready);
821 write_le_u32(&mut buf[..], 1);
822 d.write(0x0, 0x44, &buf[..]);
823 assert!(d.locked_device().queues()[0].ready);
824
825 assert_eq!(d.locked_device().queues()[0].desc_table_address.0, 0);
826 write_le_u32(&mut buf[..], 123);
827 d.write(0x0, 0x80, &buf[..]);
828 assert_eq!(d.locked_device().queues()[0].desc_table_address.0, 123);
829 d.write(0x0, 0x84, &buf[..]);
830 assert_eq!(
831 d.locked_device().queues()[0].desc_table_address.0,
832 123 + (123 << 32)
833 );
834
835 assert_eq!(d.locked_device().queues()[0].avail_ring_address.0, 0);
836 write_le_u32(&mut buf[..], 124);
837 d.write(0x0, 0x90, &buf[..]);
838 assert_eq!(d.locked_device().queues()[0].avail_ring_address.0, 124);
839 d.write(0x0, 0x94, &buf[..]);
840 assert_eq!(
841 d.locked_device().queues()[0].avail_ring_address.0,
842 124 + (124 << 32)
843 );
844
845 assert_eq!(d.locked_device().queues()[0].used_ring_address.0, 0);
846 write_le_u32(&mut buf[..], 125);
847 d.write(0x0, 0xa0, &buf[..]);
848 assert_eq!(d.locked_device().queues()[0].used_ring_address.0, 125);
849 d.write(0x0, 0xa4, &buf[..]);
850 assert_eq!(
851 d.locked_device().queues()[0].used_ring_address.0,
852 125 + (125 << 32)
853 );
854
855 set_device_status(
856 &mut d,
857 device_status::ACKNOWLEDGE
858 | device_status::DRIVER
859 | device_status::FEATURES_OK
860 | device_status::DRIVER_OK,
861 );
862
863 d.interrupt.irq_status.store(0b10_1010, Ordering::Relaxed);
864 write_le_u32(&mut buf[..], 0b111);
865 d.write(0x0, 0x64, &buf[..]);
866 assert_eq!(d.interrupt.irq_status.load(Ordering::Relaxed), 0b10_1000);
867
868 write_le_u32(&mut buf[..], 0xf);
870 d.config_generation = 0;
871 d.write(0x0, 0xfb, &buf[..]);
872 assert_eq!(d.config_generation, 0);
873
874 d.write(0x0, 0xfc, &buf[..2]);
876 assert_eq!(d.config_generation, 0);
877
878 let buf1 = vec![1; 0xeff];
880 for i in (0..0xeff).rev() {
881 let mut buf2 = vec![0; 0xeff];
882
883 d.write(0x0, 0x100 + i as u64, &buf1[i..]);
884 d.read(0x0, 0x100, &mut buf2[..]);
885
886 for item in buf2.iter().take(i) {
887 assert_eq!(*item, 0);
888 }
889
890 assert_eq!(buf1[i..], buf2[i..]);
891 }
892 }
893
894 #[test]
895 fn test_bus_device_activate() {
896 let m = single_region_mem(0x1000);
897 let interrupt = Arc::new(IrqTrigger::new());
898 let mut d = MmioTransport::new(
899 m,
900 interrupt,
901 Arc::new(Mutex::new(DummyDevice::new())),
902 false,
903 );
904
905 assert!(!d.locked_device().is_activated());
906 assert_eq!(d.device_status, device_status::INIT);
907
908 set_device_status(&mut d, device_status::ACKNOWLEDGE);
909 set_device_status(&mut d, device_status::ACKNOWLEDGE | device_status::DRIVER);
910 assert_eq!(
911 d.device_status,
912 device_status::ACKNOWLEDGE | device_status::DRIVER
913 );
914
915 set_device_status(
917 &mut d,
918 device_status::ACKNOWLEDGE | device_status::DRIVER | device_status::DRIVER_OK,
919 );
920 assert_eq!(
921 d.device_status,
922 device_status::ACKNOWLEDGE | device_status::DRIVER
923 );
924
925 set_device_status(
926 &mut d,
927 device_status::ACKNOWLEDGE | device_status::DRIVER | device_status::FEATURES_OK,
928 );
929 assert_eq!(
930 d.device_status,
931 device_status::ACKNOWLEDGE | device_status::DRIVER | device_status::FEATURES_OK
932 );
933
934 let mut buf = [0; 4];
935 let queue_len = d.locked_device().queues().len();
936 for q in 0..queue_len {
937 d.queue_select = q.try_into().unwrap();
938 write_le_u32(&mut buf[..], 16);
939 d.write(0x0, 0x38, &buf[..]);
940 write_le_u32(&mut buf[..], 1);
941 d.write(0x0, 0x44, &buf[..]);
942 }
943 assert!(!d.locked_device().is_activated());
944
945 d.write(0x0, 0xa8, &buf[..]);
949 d.write(0x0, 0x1000, &buf[..]);
950 assert!(!d.locked_device().is_activated());
951
952 set_device_status(
953 &mut d,
954 device_status::ACKNOWLEDGE
955 | device_status::DRIVER
956 | device_status::FEATURES_OK
957 | device_status::DRIVER_OK,
958 );
959 assert_eq!(
960 d.device_status,
961 device_status::ACKNOWLEDGE
962 | device_status::DRIVER
963 | device_status::FEATURES_OK
964 | device_status::DRIVER_OK
965 );
966 assert!(d.locked_device().is_activated());
967
968 write_le_u32(&mut buf[..], 0);
971 d.queue_select = 0;
972 d.write(0x0, 0x44, &buf[..]);
973 d.read(0x0, 0x44, &mut buf[..]);
974 assert_eq!(read_le_u32(&buf[..]), 1);
975 }
976
977 #[test]
978 fn test_bus_device_activate_failure() {
979 let m = single_region_mem(0x1000);
980 let interrupt = Arc::new(IrqTrigger::new());
981 let device = DummyDevice {
982 activate_should_error: true,
983 ..DummyDevice::new()
984 };
985 let mut d = MmioTransport::new(m, interrupt, Arc::new(Mutex::new(device)), false);
986
987 set_device_status(&mut d, device_status::ACKNOWLEDGE);
988 set_device_status(&mut d, device_status::ACKNOWLEDGE | device_status::DRIVER);
989 set_device_status(
990 &mut d,
991 device_status::ACKNOWLEDGE | device_status::DRIVER | device_status::FEATURES_OK,
992 );
993
994 let mut buf = [0; 4];
995 let queue_len = d.locked_device().queues().len();
996 for q in 0..queue_len {
997 d.queue_select = q.try_into().unwrap();
998 write_le_u32(&mut buf[..], 16);
999 d.write(0x0, 0x38, &buf[..]);
1000 write_le_u32(&mut buf[..], 1);
1001 d.write(0x0, 0x44, &buf[..]);
1002 }
1003 assert!(!d.locked_device().is_activated());
1004
1005 set_device_status(
1006 &mut d,
1007 device_status::ACKNOWLEDGE
1008 | device_status::DRIVER
1009 | device_status::FEATURES_OK
1010 | device_status::DRIVER_OK,
1011 );
1012
1013 assert_ne!(d.device_status & DEVICE_NEEDS_RESET, 0);
1015 assert_eq!(
1017 d.locked_device().interrupt_status().load(Ordering::SeqCst),
1018 VIRTIO_MMIO_INT_CONFIG
1019 );
1020 assert_eq!(
1022 d.locked_device()
1023 .interrupt_trigger()
1024 .notifier(VirtioInterruptType::Config)
1025 .unwrap()
1026 .read()
1027 .unwrap(),
1028 1
1029 );
1030 }
1031
1032 fn activate_device(d: &mut MmioTransport) {
1033 set_device_status(d, device_status::ACKNOWLEDGE);
1034 set_device_status(d, device_status::ACKNOWLEDGE | device_status::DRIVER);
1035 set_device_status(
1036 d,
1037 device_status::ACKNOWLEDGE | device_status::DRIVER | device_status::FEATURES_OK,
1038 );
1039
1040 let mut buf = [0; 4];
1042 let queues_count = d.locked_device().queues().len();
1043 for q in 0..queues_count {
1044 d.queue_select = q.try_into().unwrap();
1045 write_le_u32(&mut buf[..], 16);
1046 d.write(0x0, 0x38, &buf[..]);
1047 write_le_u32(&mut buf[..], 1);
1048 d.write(0x0, 0x44, &buf[..]);
1049 }
1050 assert!(!d.locked_device().is_activated());
1051
1052 set_device_status(
1054 d,
1055 device_status::ACKNOWLEDGE
1056 | device_status::DRIVER
1057 | device_status::FEATURES_OK
1058 | device_status::DRIVER_OK,
1059 );
1060 assert_eq!(
1061 d.device_status,
1062 device_status::ACKNOWLEDGE
1063 | device_status::DRIVER
1064 | device_status::FEATURES_OK
1065 | device_status::DRIVER_OK
1066 );
1067 assert!(d.locked_device().is_activated());
1068 }
1069
1070 #[test]
1071 fn test_bus_device_reset() {
1072 let m = single_region_mem(0x1000);
1073 let interrupt = Arc::new(IrqTrigger::new());
1074 let mut d = MmioTransport::new(
1075 m,
1076 interrupt,
1077 Arc::new(Mutex::new(DummyDevice::new())),
1078 false,
1079 );
1080 let mut buf = [0; 4];
1081
1082 assert!(!d.locked_device().is_activated());
1083 assert_eq!(d.device_status, 0);
1084 activate_device(&mut d);
1085
1086 write_le_u32(&mut buf[..], 0x8f);
1088 d.write(0x0, 0x70, &buf[..]);
1089 assert_eq!(d.device_status, 0x8f);
1090 assert!(d.locked_device().is_activated());
1091
1092 write_le_u32(&mut buf[..], 0x0);
1094 d.write(0x0, 0x70, &buf[..]);
1095 assert_eq!(d.device_status, 0x8f);
1096 assert!(d.locked_device().is_activated());
1097 }
1098
1099 #[test]
1100 fn test_get_avail_features() {
1101 let dummy_dev = DummyDevice::new();
1102 assert_eq!(dummy_dev.avail_features(), dummy_dev.avail_features);
1103 }
1104
1105 #[test]
1106 fn test_get_acked_features() {
1107 let dummy_dev = DummyDevice::new();
1108 assert_eq!(dummy_dev.acked_features(), dummy_dev.acked_features);
1109 }
1110
1111 #[test]
1112 fn test_set_acked_features() {
1113 let mut dummy_dev = DummyDevice::new();
1114
1115 assert_eq!(dummy_dev.acked_features(), 0);
1116 dummy_dev.set_acked_features(16);
1117 assert_eq!(dummy_dev.acked_features(), dummy_dev.acked_features);
1118 }
1119
1120 #[test]
1121 fn test_ack_features_by_page() {
1122 let mut dummy_dev = DummyDevice::new();
1123 dummy_dev.set_acked_features(16);
1124 dummy_dev.set_avail_features(8);
1125 dummy_dev.ack_features_by_page(0, 8);
1126 assert_eq!(dummy_dev.acked_features(), 24);
1127 }
1128
1129 #[test]
1130 fn irq_trigger() {
1131 let irq_trigger = IrqTrigger::new();
1132 assert_eq!(irq_trigger.irq_status.load(Ordering::SeqCst), 0);
1133
1134 assert!(!irq_trigger.has_pending_interrupt(VirtioInterruptType::Config));
1136 assert!(!irq_trigger.has_pending_interrupt(VirtioInterruptType::Queue(0)));
1137
1138 irq_trigger.trigger(VirtioInterruptType::Config).unwrap();
1140 assert!(irq_trigger.has_pending_interrupt(VirtioInterruptType::Config));
1141 irq_trigger.irq_status.store(0, Ordering::SeqCst);
1142 irq_trigger.trigger(VirtioInterruptType::Queue(0)).unwrap();
1143 assert!(irq_trigger.has_pending_interrupt(VirtioInterruptType::Queue(0)));
1144
1145 irq_trigger.irq_evt.write(u64::MAX - 1).unwrap();
1147 irq_trigger
1148 .trigger(VirtioInterruptType::Config)
1149 .unwrap_err();
1150 irq_trigger
1151 .trigger(VirtioInterruptType::Queue(0))
1152 .unwrap_err();
1153 }
1154}