1#![doc(hidden)]
5
6use std::fmt::Debug;
7use std::marker::PhantomData;
8use std::mem;
9use std::sync::Arc;
10use std::sync::atomic::{AtomicUsize, Ordering};
11
12use crate::devices::virtio::queue::Queue;
13use crate::devices::virtio::transport::VirtioInterrupt;
14use crate::devices::virtio::transport::mmio::IrqTrigger;
15use crate::test_utils::single_region_mem;
16use crate::utils::{align_up, u64_to_usize};
17use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
18
19#[macro_export]
20macro_rules! check_metric_after_block {
21 ($metric:expr, $delta:expr, $block:expr) => {{
22 let before = $metric.count();
23 let _ = $block;
24 assert_eq!($metric.count() - before, $delta, "unexpected metric value");
25 }};
26}
27
28pub fn default_mem() -> GuestMemoryMmap {
31 single_region_mem(0x10000)
32}
33
34pub fn default_interrupt() -> Arc<dyn VirtioInterrupt> {
36 Arc::new(IrqTrigger::new())
37}
38
39#[derive(Debug)]
40pub struct InputData {
41 pub data: Vec<u8>,
42 pub read_pos: AtomicUsize,
43}
44
45impl InputData {
46 pub fn get_slice(&self, len: usize) -> &[u8] {
47 let old_pos = self.read_pos.fetch_add(len, Ordering::AcqRel);
48 &self.data[old_pos..old_pos + len]
49 }
50}
51
52#[derive(Debug)]
54pub struct SomeplaceInMemory<'a, T> {
55 pub location: GuestAddress,
56 mem: &'a GuestMemoryMmap,
57 phantom: PhantomData<*const T>,
58}
59
60impl<'a, T> SomeplaceInMemory<'a, T>
62where
63 T: Debug + crate::vstate::memory::ByteValued,
64{
65 fn new(location: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
66 SomeplaceInMemory {
67 location,
68 mem,
69 phantom: PhantomData,
70 }
71 }
72
73 pub fn get(&self) -> T {
75 self.mem.read_obj(self.location).unwrap()
76 }
77
78 pub fn set(&self, val: T) {
80 self.mem.write_obj(val, self.location).unwrap()
81 }
82
83 fn map_offset<U: Debug>(&self, offset: usize) -> SomeplaceInMemory<'a, U> {
86 SomeplaceInMemory {
87 location: self.location.checked_add(offset as u64).unwrap(),
88 mem: self.mem,
89 phantom: PhantomData,
90 }
91 }
92
93 fn next_place<U: Debug>(&self) -> SomeplaceInMemory<'a, U> {
96 self.map_offset::<U>(mem::size_of::<T>())
97 }
98
99 fn end(&self) -> GuestAddress {
100 self.location
101 .checked_add(mem::size_of::<T>() as u64)
102 .unwrap()
103 }
104}
105
106#[derive(Debug)]
108pub struct VirtqDesc<'a> {
109 pub addr: SomeplaceInMemory<'a, u64>,
110 pub len: SomeplaceInMemory<'a, u32>,
111 pub flags: SomeplaceInMemory<'a, u16>,
112 pub next: SomeplaceInMemory<'a, u16>,
113}
114
115impl<'a> VirtqDesc<'a> {
116 pub const ALIGNMENT: u64 = 16;
117
118 fn new(start: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
119 assert_eq!(start.0 & (Self::ALIGNMENT - 1), 0);
120
121 let addr = SomeplaceInMemory::new(start, mem);
122 let len = addr.next_place();
123 let flags = len.next_place();
124 let next = flags.next_place();
125
126 VirtqDesc {
127 addr,
128 len,
129 flags,
130 next,
131 }
132 }
133
134 fn start(&self) -> GuestAddress {
135 self.addr.location
136 }
137
138 fn end(&self) -> GuestAddress {
139 self.next.end()
140 }
141
142 pub fn set(&self, addr: u64, len: u32, flags: u16, next: u16) {
143 self.addr.set(addr);
144 self.len.set(len);
145 self.flags.set(flags);
146 self.next.set(next);
147 }
148
149 pub fn memory(&self) -> &'a GuestMemoryMmap {
150 self.addr.mem
151 }
152
153 pub fn set_data(&mut self, data: &[u8]) {
154 assert!(self.len.get() as usize >= data.len());
155 let mem = self.addr.mem;
156 mem.write_slice(data, GuestAddress::new(self.addr.get()))
157 .unwrap();
158 }
159
160 pub fn check_data(&self, expected_data: &[u8]) {
161 assert!(self.len.get() as usize >= expected_data.len());
162 let mem = self.addr.mem;
163 let mut buf = vec![0; expected_data.len()];
164 mem.read_slice(&mut buf, GuestAddress::new(self.addr.get()))
165 .unwrap();
166 assert_eq!(buf.as_slice(), expected_data);
167 }
168}
169
170#[derive(Debug)]
173pub struct VirtqRing<'a, T> {
174 pub flags: SomeplaceInMemory<'a, u16>,
175 pub idx: SomeplaceInMemory<'a, u16>,
176 pub ring: Vec<SomeplaceInMemory<'a, T>>,
177 pub event: SomeplaceInMemory<'a, u16>,
178}
179
180impl<'a, T> VirtqRing<'a, T>
181where
182 T: Debug + crate::vstate::memory::ByteValued,
183{
184 fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16, alignment: usize) -> Self {
185 assert_eq!(start.0 & (alignment as u64 - 1), 0);
186
187 let flags = SomeplaceInMemory::new(start, mem);
188 let idx = flags.next_place();
189
190 let mut ring = Vec::with_capacity(qsize as usize);
191
192 ring.push(idx.next_place());
193
194 for _ in 1..qsize as usize {
195 let x = ring.last().unwrap().next_place();
196 ring.push(x)
197 }
198
199 let event = ring.last().unwrap().next_place();
200
201 flags.set(0);
202 idx.set(0);
203 event.set(0);
204
205 VirtqRing {
206 flags,
207 idx,
208 ring,
209 event,
210 }
211 }
212
213 pub fn end(&self) -> GuestAddress {
214 self.event.end()
215 }
216}
217
218#[repr(C)]
219#[derive(Debug, Clone, Copy, Default)]
220pub struct VirtqUsedElem {
221 pub id: u32,
222 pub len: u32,
223}
224
225unsafe impl crate::vstate::memory::ByteValued for VirtqUsedElem {}
227
228pub type VirtqAvail<'a> = VirtqRing<'a, u16>;
229pub type VirtqUsed<'a> = VirtqRing<'a, VirtqUsedElem>;
230
231#[derive(Debug)]
232pub struct VirtQueue<'a> {
233 pub dtable: Vec<VirtqDesc<'a>>,
234 pub avail: VirtqAvail<'a>,
235 pub used: VirtqUsed<'a>,
236}
237
238impl<'a> VirtQueue<'a> {
239 pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16) -> Self {
241 assert!(qsize > 0 && qsize & (qsize - 1) == 0);
243
244 let mut dtable = Vec::with_capacity(qsize as usize);
245
246 let mut end = start;
247
248 for _ in 0..qsize {
249 let d = VirtqDesc::new(end, mem);
250 end = d.end();
251 dtable.push(d);
252 }
253
254 const AVAIL_ALIGN: usize = 2;
255
256 let avail = VirtqAvail::new(end, mem, qsize, AVAIL_ALIGN);
257
258 const USED_ALIGN: u64 = 4;
259
260 let mut x = avail.end().0;
261 x = align_up(x, USED_ALIGN);
262
263 let used = VirtqUsed::new(GuestAddress(x), mem, qsize, u64_to_usize(USED_ALIGN));
264
265 VirtQueue {
266 dtable,
267 avail,
268 used,
269 }
270 }
271
272 pub fn memory(&self) -> &'a GuestMemoryMmap {
273 self.used.flags.mem
274 }
275
276 pub fn size(&self) -> u16 {
277 self.dtable.len().try_into().unwrap()
279 }
280
281 pub fn dtable_start(&self) -> GuestAddress {
282 self.dtable.first().unwrap().start()
283 }
284
285 pub fn avail_start(&self) -> GuestAddress {
286 self.avail.flags.location
287 }
288
289 pub fn used_start(&self) -> GuestAddress {
290 self.used.flags.location
291 }
292
293 pub fn create_queue(&self) -> Queue {
295 let mut q = Queue::new(self.size());
296
297 q.size = self.size();
298 q.ready = true;
299 q.desc_table_address = self.dtable_start();
300 q.avail_ring_address = self.avail_start();
301 q.used_ring_address = self.used_start();
302
303 q.initialize(self.memory()).unwrap();
304
305 q
306 }
307
308 pub fn start(&self) -> GuestAddress {
309 self.dtable_start()
310 }
311
312 pub fn end(&self) -> GuestAddress {
313 self.used.end()
314 }
315
316 pub fn check_used_elem(&self, used_index: u16, expected_id: u16, expected_len: u32) {
317 let used_elem = self.used.ring[used_index as usize].get();
318 assert_eq!(used_elem.id, u32::from(expected_id));
319 assert_eq!(used_elem.len, expected_len);
320 }
321}
322
323#[cfg(test)]
324pub(crate) mod test {
325
326 use std::fmt::{self, Debug};
327 use std::sync::{Arc, Mutex, MutexGuard};
328
329 use event_manager::{EventManager, MutEventSubscriber, SubscriberId, SubscriberOps};
330
331 use crate::devices::virtio::device::VirtioDevice;
332 use crate::devices::virtio::net::MAX_BUFFER_SIZE;
333 use crate::devices::virtio::queue::{Queue, VIRTQ_DESC_F_NEXT};
334 use crate::devices::virtio::test_utils::{VirtQueue, VirtqDesc, default_interrupt};
335 use crate::test_utils::single_region_mem;
336 use crate::vstate::memory::{Address, GuestAddress, GuestMemoryMmap};
337
338 pub fn create_virtio_mem() -> GuestMemoryMmap {
339 single_region_mem(MAX_BUFFER_SIZE)
340 }
341
342 pub trait VirtioTestDevice: VirtioDevice {
345 fn set_queues(&mut self, queues: Vec<Queue>);
347 fn num_queues(&self) -> usize;
349 }
350
351 pub struct VirtioTestHelper<'a, T>
361 where
362 T: VirtioTestDevice + MutEventSubscriber,
363 {
364 event_manager: EventManager<Arc<Mutex<T>>>,
365 _subscriber_id: SubscriberId,
366 device: Arc<Mutex<T>>,
367 virtqueues: Vec<VirtQueue<'a>>,
368 }
369
370 impl<T: VirtioTestDevice + MutEventSubscriber + Debug> fmt::Debug for VirtioTestHelper<'_, T> {
371 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
372 f.debug_struct("VirtioTestHelper")
373 .field("event_manager", &"?")
374 .field("_subscriber_id", &self._subscriber_id)
375 .field("device", &self.device)
376 .field("virtqueues", &self.virtqueues)
377 .finish()
378 }
379 }
380
381 impl<'a, T> VirtioTestHelper<'a, T>
382 where
383 T: VirtioTestDevice + MutEventSubscriber + Debug,
384 {
385 const QUEUE_SIZE: u16 = 16;
386
387 fn create_virtqueues(mem: &'a GuestMemoryMmap, num_queues: usize) -> Vec<VirtQueue<'a>> {
389 (0..num_queues)
390 .scan(GuestAddress(0), |next_addr, _| {
391 let vqueue = VirtQueue::new(*next_addr, mem, Self::QUEUE_SIZE);
392 *next_addr = vqueue.end().unchecked_align_up(VirtqDesc::ALIGNMENT);
395 Some(vqueue)
396 })
397 .collect::<Vec<_>>()
398 }
399
400 pub fn new(mem: &'a GuestMemoryMmap, mut device: T) -> VirtioTestHelper<'a, T> {
402 let mut event_manager = EventManager::new().unwrap();
403
404 let virtqueues = Self::create_virtqueues(mem, device.num_queues());
405 let queues = virtqueues.iter().map(|vq| vq.create_queue()).collect();
406 device.set_queues(queues);
407 let device = Arc::new(Mutex::new(device));
408 let _subscriber_id = event_manager.add_subscriber(device.clone());
409
410 Self {
411 event_manager,
412 _subscriber_id,
413 device,
414 virtqueues,
415 }
416 }
417
418 pub fn device(&mut self) -> MutexGuard<'_, T> {
420 self.device.lock().unwrap()
421 }
422
423 pub fn activate_device(&mut self, mem: &'a GuestMemoryMmap) {
425 let interrupt = default_interrupt();
426 self.device
427 .lock()
428 .unwrap()
429 .activate(mem.clone(), interrupt)
430 .unwrap();
431 let ev_count = self.event_manager.run_with_timeout(100).unwrap();
433 assert_eq!(ev_count, 1);
434 }
435
436 pub fn data_address(&self) -> u64 {
442 self.virtqueues.last().unwrap().end().raw_value()
443 }
444
445 pub fn add_scatter_gather(
461 &mut self,
462 queue: usize,
463 addr_offset: u64,
464 desc_list: &[(u16, u64, u32, u16)],
465 ) {
466 let device = self.device.lock().unwrap();
467
468 let event_fd = &device.queue_events()[queue];
469 let vq = &self.virtqueues[queue];
470
471 let mut iter = desc_list.iter().peekable();
473 while let Some(&(index, addr, len, flags)) = iter.next() {
474 let desc = &vq.dtable[index as usize];
475 desc.set(addr, len, flags, 0);
476 if let Some(&&(next_index, _, _, _)) = iter.peek() {
477 desc.flags.set(flags | VIRTQ_DESC_F_NEXT);
478 desc.next.set(next_index);
479 }
480 }
481
482 if let Some(&(index, _, _, _)) = desc_list.first() {
484 let ring_index = vq.avail.idx.get();
485 vq.avail.ring[ring_index as usize].set(index);
486 vq.avail.idx.set(ring_index + 1);
487 }
488 event_fd.write(1).unwrap();
489 }
490
491 pub fn desc_address(&self, queue: usize, index: usize) -> GuestAddress {
493 GuestAddress(self.virtqueues[queue].dtable[index].addr.get())
494 }
495
496 pub fn add_desc_chain(
512 &mut self,
513 queue: usize,
514 addr_offset: u64,
515 desc_list: &[(u16, u32, u16)],
516 ) {
517 let device = self.device.lock().unwrap();
518
519 let event_fd = &device.queue_events()[queue];
520 let vq = &self.virtqueues[queue];
521
522 let mut iter = desc_list.iter().peekable();
524 let mut addr = self.data_address() + addr_offset;
525 while let Some(&(index, len, flags)) = iter.next() {
526 let desc = &vq.dtable[index as usize];
527 desc.set(addr, len, flags, 0);
528 if let Some(&&(next_index, _, _)) = iter.peek() {
529 desc.flags.set(flags | VIRTQ_DESC_F_NEXT);
530 desc.next.set(next_index);
531 }
532
533 addr += u64::from(len);
534 addr += u64::from(vmm_sys_util::rand::xor_pseudo_rng_u32()) % 10;
537 }
538
539 if let Some(&(index, _, _)) = desc_list.first() {
541 let ring_index = vq.avail.idx.get();
542 vq.avail.ring[ring_index as usize].set(index);
543 vq.avail.idx.set(ring_index + 1);
544 }
545 event_fd.write(1).unwrap();
546 }
547
548 pub fn emulate_for_msec(&mut self, msec: i32) -> Result<usize, event_manager::Error> {
554 self.event_manager.run_with_timeout(msec)
555 }
556 }
557}