1use std::sync::Arc;
8
9use byteorder::{ByteOrder, LittleEndian};
10use pci::PciCapabilityId;
11use serde::{Deserialize, Serialize};
12use vm_memory::ByteValued;
13
14use crate::Vm;
15use crate::logger::{debug, error, warn};
16use crate::pci::configuration::PciCapability;
17use crate::snapshot::Persist;
18use crate::vstate::interrupts::{InterruptError, MsixVectorConfig, MsixVectorGroup};
19
20const MAX_MSIX_VECTORS_PER_DEVICE: u16 = 2048;
21const MSIX_TABLE_ENTRIES_MODULO: u64 = 16;
22const MSIX_PBA_ENTRIES_MODULO: u64 = 8;
23const BITS_PER_PBA_ENTRY: usize = 64;
24const FUNCTION_MASK_BIT: u8 = 14;
25const MSIX_ENABLE_BIT: u8 = 15;
26
27#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
28pub struct MsixTableEntry {
30 pub msg_addr_lo: u32,
32 pub msg_addr_hi: u32,
34 pub msg_data: u32,
36 pub vector_ctl: u32,
38}
39
40impl MsixTableEntry {
41 pub fn masked(&self) -> bool {
43 self.vector_ctl & 0x1 == 0x1
44 }
45}
46
47impl Default for MsixTableEntry {
48 fn default() -> Self {
49 MsixTableEntry {
50 msg_addr_lo: 0,
51 msg_addr_hi: 0,
52 msg_data: 0,
53 vector_ctl: 0x1,
54 }
55 }
56}
57
58#[derive(Debug, Clone, Serialize, Deserialize)]
59pub struct MsixConfigState {
61 table_entries: Vec<MsixTableEntry>,
62 pba_entries: Vec<u64>,
63 masked: bool,
64 enabled: bool,
65 vectors: Vec<u32>,
66}
67
68pub struct MsixConfig {
70 pub table_entries: Vec<MsixTableEntry>,
72 pub pba_entries: Vec<u64>,
74 pub devid: u32,
76 pub vectors: Arc<MsixVectorGroup>,
78 pub masked: bool,
80 pub enabled: bool,
82}
83
84impl std::fmt::Debug for MsixConfig {
85 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
86 f.debug_struct("MsixConfig")
87 .field("table_entries", &self.table_entries)
88 .field("pba_entries", &self.pba_entries)
89 .field("devid", &self.devid)
90 .field("masked", &self.masked)
91 .field("enabled", &self.enabled)
92 .finish()
93 }
94}
95
96impl MsixConfig {
97 pub fn new(vectors: Arc<MsixVectorGroup>, devid: u32) -> Self {
99 assert!(vectors.num_vectors() <= MAX_MSIX_VECTORS_PER_DEVICE);
100
101 let mut table_entries: Vec<MsixTableEntry> = Vec::new();
102 table_entries.resize_with(vectors.num_vectors() as usize, Default::default);
103 let mut pba_entries: Vec<u64> = Vec::new();
104 let num_pba_entries: usize = (vectors.num_vectors() as usize).div_ceil(BITS_PER_PBA_ENTRY);
105 pba_entries.resize_with(num_pba_entries, Default::default);
106
107 MsixConfig {
108 table_entries,
109 pba_entries,
110 devid,
111 vectors,
112 masked: true,
113 enabled: false,
114 }
115 }
116
117 pub fn from_state(
119 state: MsixConfigState,
120 vm: Arc<Vm>,
121 devid: u32,
122 ) -> Result<Self, InterruptError> {
123 let vectors = Arc::new(MsixVectorGroup::restore(vm, &state.vectors)?);
124 if state.enabled && !state.masked {
125 for (idx, table_entry) in state.table_entries.iter().enumerate() {
126 if table_entry.masked() {
127 continue;
128 }
129
130 let config = MsixVectorConfig {
131 high_addr: table_entry.msg_addr_hi,
132 low_addr: table_entry.msg_addr_lo,
133 data: table_entry.msg_data,
134 devid,
135 };
136
137 vectors.update(idx, config, state.masked, true)?;
138 vectors.enable()?;
139 }
140 }
141
142 Ok(MsixConfig {
143 table_entries: state.table_entries,
144 pba_entries: state.pba_entries,
145 devid,
146 vectors,
147 masked: state.masked,
148 enabled: state.enabled,
149 })
150 }
151
152 pub fn state(&self) -> MsixConfigState {
154 MsixConfigState {
155 table_entries: self.table_entries.clone(),
156 pba_entries: self.pba_entries.clone(),
157 masked: self.masked,
158 enabled: self.enabled,
159 vectors: self.vectors.save(),
160 }
161 }
162
163 pub fn set_msg_ctl(&mut self, reg: u16) {
165 let old_masked = self.masked;
166 let old_enabled = self.enabled;
167
168 self.masked = ((reg >> FUNCTION_MASK_BIT) & 1u16) == 1u16;
169 self.enabled = ((reg >> MSIX_ENABLE_BIT) & 1u16) == 1u16;
170
171 if old_masked != self.masked || old_enabled != self.enabled {
173 if self.enabled && !self.masked {
174 debug!("MSI-X enabled for device 0x{:x}", self.devid);
175 for (idx, table_entry) in self.table_entries.iter().enumerate() {
176 let config = MsixVectorConfig {
177 high_addr: table_entry.msg_addr_hi,
178 low_addr: table_entry.msg_addr_lo,
179 data: table_entry.msg_data,
180 devid: self.devid,
181 };
182
183 if let Err(e) = self.vectors.update(idx, config, table_entry.masked(), true) {
184 error!("Failed updating vector: {:?}", e);
185 }
186 }
187 } else if old_enabled || !old_masked {
188 debug!("MSI-X disabled for device 0x{:x}", self.devid);
189 if let Err(e) = self.vectors.disable() {
190 error!("Failed disabling irq_fd: {:?}", e);
191 }
192 }
193 }
194
195 if old_masked && !self.masked {
200 for (index, entry) in self.table_entries.clone().iter().enumerate() {
201 if !entry.masked() && self.get_pba_bit(index.try_into().unwrap()) == 1 {
202 self.inject_msix_and_clear_pba(index);
203 }
204 }
205 }
206 }
207
208 pub fn read_table(&self, offset: u64, data: &mut [u8]) {
210 assert!(data.len() <= 8);
211
212 let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
213 let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
214
215 if index >= self.table_entries.len() {
216 warn!("Invalid MSI-X table entry index {index}");
217 data.fill(0xff);
218 return;
219 }
220
221 match data.len() {
222 4 => {
223 let value = match modulo_offset {
224 0x0 => self.table_entries[index].msg_addr_lo,
225 0x4 => self.table_entries[index].msg_addr_hi,
226 0x8 => self.table_entries[index].msg_data,
227 0xc => self.table_entries[index].vector_ctl,
228 off => {
229 warn!("msi-x: invalid offset in table entry read: {off}");
230 0xffff_ffff
231 }
232 };
233
234 LittleEndian::write_u32(data, value);
235 }
236 8 => {
237 let value = match modulo_offset {
238 0x0 => {
239 (u64::from(self.table_entries[index].msg_addr_hi) << 32)
240 | u64::from(self.table_entries[index].msg_addr_lo)
241 }
242 0x8 => {
243 (u64::from(self.table_entries[index].vector_ctl) << 32)
244 | u64::from(self.table_entries[index].msg_data)
245 }
246 off => {
247 warn!("msi-x: invalid offset in table entry read: {off}");
248 0xffff_ffff_ffff_ffff
249 }
250 };
251
252 LittleEndian::write_u64(data, value);
253 }
254 len => {
255 warn!("msi-x: invalid length in table entry read: {len}");
256 data.fill(0xff);
257 }
258 }
259 }
260
261 pub fn write_table(&mut self, offset: u64, data: &[u8]) {
263 assert!(data.len() <= 8);
264
265 let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
266 let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
267
268 if index >= self.table_entries.len() {
269 warn!("msi-x: invalid table entry index {index}");
270 return;
271 }
272
273 let old_entry = self.table_entries[index].clone();
275
276 match data.len() {
277 4 => {
278 let value = LittleEndian::read_u32(data);
279 match modulo_offset {
280 0x0 => self.table_entries[index].msg_addr_lo = value,
281 0x4 => self.table_entries[index].msg_addr_hi = value,
282 0x8 => self.table_entries[index].msg_data = value,
283 0xc => {
284 self.table_entries[index].vector_ctl = value;
285 }
286 off => warn!("msi-x: invalid offset in table entry write: {off}"),
287 };
288 }
289 8 => {
290 let value = LittleEndian::read_u64(data);
291 match modulo_offset {
292 0x0 => {
293 self.table_entries[index].msg_addr_lo = (value & 0xffff_ffffu64) as u32;
294 self.table_entries[index].msg_addr_hi = (value >> 32) as u32;
295 }
296 0x8 => {
297 self.table_entries[index].msg_data = (value & 0xffff_ffffu64) as u32;
298 self.table_entries[index].vector_ctl = (value >> 32) as u32;
299 }
300 off => warn!("msi-x: invalid offset in table entry write: {off}"),
301 };
302 }
303 len => warn!("msi-x: invalid length in table entry write: {len}"),
304 };
305
306 let table_entry = &self.table_entries[index];
307
308 if &old_entry == table_entry {
310 return;
311 }
312
313 if self.enabled && !self.masked && !table_entry.masked() {
318 let config = MsixVectorConfig {
319 high_addr: table_entry.msg_addr_hi,
320 low_addr: table_entry.msg_addr_lo,
321 data: table_entry.msg_data,
322 devid: self.devid,
323 };
324
325 if let Err(e) = self
326 .vectors
327 .update(index, config, table_entry.masked(), true)
328 {
329 error!("Failed updating vector: {:?}", e);
330 }
331 }
332
333 if !self.masked
343 && self.enabled
344 && old_entry.masked()
345 && !table_entry.masked()
346 && self.get_pba_bit(index.try_into().unwrap()) == 1
347 {
348 self.inject_msix_and_clear_pba(index);
349 }
350 }
351
352 pub fn read_pba(&self, offset: u64, data: &mut [u8]) {
354 let index: usize = (offset / MSIX_PBA_ENTRIES_MODULO) as usize;
355 let modulo_offset = offset % MSIX_PBA_ENTRIES_MODULO;
356
357 if index >= self.pba_entries.len() {
358 warn!("msi-x: invalid PBA entry index {index}");
359 data.fill(0xff);
360 return;
361 }
362
363 match data.len() {
364 4 => {
365 let value: u32 = match modulo_offset {
366 0x0 => (self.pba_entries[index] & 0xffff_ffffu64) as u32,
367 0x4 => (self.pba_entries[index] >> 32) as u32,
368 off => {
369 warn!("msi-x: invalid offset in pba entry read: {off}");
370 0xffff_ffff
371 }
372 };
373
374 LittleEndian::write_u32(data, value);
375 }
376 8 => {
377 let value: u64 = match modulo_offset {
378 0x0 => self.pba_entries[index],
379 off => {
380 warn!("msi-x: invalid offset in pba entry read: {off}");
381 0xffff_ffff_ffff_ffff
382 }
383 };
384
385 LittleEndian::write_u64(data, value);
386 }
387 len => {
388 warn!("msi-x: invalid length in table entry read: {len}");
389 data.fill(0xff);
390 }
391 }
392 }
393
394 pub fn write_pba(&mut self, _offset: u64, _data: &[u8]) {
396 error!("Pending Bit Array is read only");
397 }
398
399 pub fn set_pba_bit(&mut self, vector: u16, reset: bool) {
401 assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
402
403 if (vector as usize) >= self.table_entries.len() {
404 return;
405 }
406
407 let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
408 let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
409 let mut mask: u64 = 1u64 << shift;
410
411 if reset {
412 mask = !mask;
413 self.pba_entries[index] &= mask;
414 } else {
415 self.pba_entries[index] |= mask;
416 }
417 }
418
419 fn get_pba_bit(&self, vector: u16) -> u8 {
421 assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
422
423 if (vector as usize) >= self.table_entries.len() {
424 return 0xff;
425 }
426
427 let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
428 let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
429
430 ((self.pba_entries[index] >> shift) & 0x0000_0001u64) as u8
431 }
432
433 fn inject_msix_and_clear_pba(&mut self, vector: usize) {
435 match self.vectors.trigger(vector) {
437 Ok(_) => debug!("MSI-X injected on vector control flip"),
438 Err(e) => error!("failed to inject MSI-X: {}", e),
439 }
440
441 self.set_pba_bit(vector.try_into().unwrap(), true);
443 }
444}
445
446#[repr(C, packed)]
447#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
448pub struct MsixCap {
450 pub msg_ctl: u16,
456 pub table: u32,
460 pub pba: u32,
464}
465
466unsafe impl ByteValued for MsixCap {}
468
469impl PciCapability for MsixCap {
470 fn bytes(&self) -> &[u8] {
471 self.as_slice()
472 }
473
474 fn id(&self) -> PciCapabilityId {
475 PciCapabilityId::MsiX
476 }
477}
478
479impl MsixCap {
480 pub fn new(
482 table_pci_bar: u8,
483 table_size: u16,
484 table_off: u32,
485 pba_pci_bar: u8,
486 pba_off: u32,
487 ) -> Self {
488 assert!(table_size < MAX_MSIX_VECTORS_PER_DEVICE);
489
490 let msg_ctl: u16 = 0x8000u16 + table_size - 1;
492
493 MsixCap {
494 msg_ctl,
495 table: (table_off & 0xffff_fff8u32) | u32::from(table_pci_bar & 0x7u8),
496 pba: (pba_off & 0xffff_fff8u32) | u32::from(pba_pci_bar & 0x7u8),
497 }
498 }
499}
500
501#[cfg(test)]
502mod tests {
503 use super::*;
504 use crate::builder::tests::default_vmm;
505 use crate::logger::{IncMetric, METRICS};
506 use crate::{Vm, check_metric_after_block};
507
508 fn msix_vector_group(nr_vectors: u16) -> Arc<MsixVectorGroup> {
509 let vmm = default_vmm();
510 Arc::new(Vm::create_msix_group(vmm.vm.clone(), nr_vectors).unwrap())
511 }
512
513 #[test]
514 #[should_panic]
515 fn test_too_many_vectors() {
516 MsixConfig::new(msix_vector_group(2049), 0x42);
517 }
518
519 #[test]
520 fn test_new_msix_config() {
521 let config = MsixConfig::new(msix_vector_group(2), 0x42);
522 assert_eq!(config.devid, 0x42);
523 assert!(config.masked);
524 assert!(!config.enabled);
525 assert_eq!(config.table_entries.len(), 2);
526 assert_eq!(config.pba_entries.len(), 1);
527 }
528
529 #[test]
530 fn test_enable_msix_vectors() {
531 let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
532
533 assert!(!config.enabled);
534 assert!(config.masked);
535
536 config.set_msg_ctl(0x8000);
539 assert!(config.enabled);
540 assert!(!config.masked);
541
542 config.set_msg_ctl(0x4000);
543 assert!(!config.enabled);
544 assert!(config.masked);
545
546 config.set_msg_ctl(0xC000);
547 assert!(config.enabled);
548 assert!(config.masked);
549
550 config.set_msg_ctl(0x0);
551 assert!(!config.enabled);
552 assert!(!config.masked);
553 }
554
555 #[test]
556 #[should_panic]
557 fn test_table_access_read_too_big() {
558 let config = MsixConfig::new(msix_vector_group(2), 0x42);
559 let mut buffer = [0u8; 16];
560
561 config.read_table(0, &mut buffer);
562 }
563
564 #[test]
565 fn test_read_table_past_end() {
566 let config = MsixConfig::new(msix_vector_group(2), 0x42);
567 let mut buffer = [0u8; 8];
568
569 config.read_table(32, &mut buffer);
572 assert_eq!(buffer, [0xff; 8]);
573 }
574
575 #[test]
576 fn test_read_table_bad_length() {
577 let config = MsixConfig::new(msix_vector_group(2), 0x42);
578 let mut buffer = [0u8; 8];
579
580 config.read_table(0, &mut buffer[..0]);
582 assert_eq!(buffer, [0x0; 8]);
583 config.read_table(0, &mut buffer[..1]);
584 assert_eq!(buffer[..1], [0xff; 1]);
585 config.read_table(0, &mut buffer[..2]);
586 assert_eq!(buffer[..2], [0xff; 2]);
587 config.read_table(0, &mut buffer[..3]);
588 assert_eq!(buffer[..3], [0xff; 3]);
589 config.read_table(0, &mut buffer[..5]);
590 assert_eq!(buffer[..5], [0xff; 5]);
591 config.read_table(0, &mut buffer[..6]);
592 assert_eq!(buffer[..6], [0xff; 6]);
593 config.read_table(0, &mut buffer[..7]);
594 assert_eq!(buffer[..7], [0xff; 7]);
595 config.read_table(0, &mut buffer[..4]);
596 assert_eq!(buffer, u64::to_le_bytes(0x00ff_ffff_0000_0000));
597 config.read_table(0, &mut buffer);
598 assert_eq!(buffer, u64::to_le_bytes(0));
599 }
600
601 #[test]
602 fn test_access_table() {
603 let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
604 check_metric_after_block!(
606 METRICS.interrupts.config_updates,
607 2,
608 config.set_msg_ctl(0x8000)
609 );
610 let mut buffer = [0u8; 8];
611
612 check_metric_after_block!(
615 METRICS.interrupts.config_updates,
616 0,
617 config.write_table(0, &u64::to_le_bytes(0x0000_1312_0000_1110))
618 );
619
620 check_metric_after_block!(
623 METRICS.interrupts.config_updates,
624 1,
625 config.write_table(8, &u64::to_le_bytes(0x0_0000_0020))
626 );
627
628 check_metric_after_block!(
631 METRICS.interrupts.config_updates,
632 0,
633 config.write_table(16, &u32::to_le_bytes(0x4241))
634 );
635 check_metric_after_block!(
637 METRICS.interrupts.config_updates,
638 0,
639 config.write_table(20, &u32::to_le_bytes(0x4443))
640 );
641 check_metric_after_block!(
643 METRICS.interrupts.config_updates,
644 0,
645 config.write_table(24, &u32::to_le_bytes(0x21))
646 );
647 check_metric_after_block!(
649 METRICS.interrupts.config_updates,
650 1,
651 config.write_table(28, &u32::to_le_bytes(0x0))
652 );
653
654 assert_eq!(config.table_entries[0].msg_addr_hi, 0x1312);
655 assert_eq!(config.table_entries[0].msg_addr_lo, 0x1110);
656 assert_eq!(config.table_entries[0].msg_data, 0x20);
657 assert_eq!(config.table_entries[0].vector_ctl, 0);
658
659 assert_eq!(config.table_entries[1].msg_addr_hi, 0x4443);
660 assert_eq!(config.table_entries[1].msg_addr_lo, 0x4241);
661 assert_eq!(config.table_entries[1].msg_data, 0x21);
662 assert_eq!(config.table_entries[1].vector_ctl, 0);
663
664 assert_eq!(config.table_entries.len(), 2);
665 assert_eq!(config.pba_entries.len(), 1);
666
667 config.read_table(1, &mut buffer[..4]);
669 assert_eq!(buffer[..4], [0xff; 4]);
670 config.read_table(0, &mut buffer[..4]);
672 assert_eq!(
673 buffer[..4],
674 u32::to_le_bytes(config.table_entries[0].msg_addr_lo)
675 );
676 config.read_table(4, &mut buffer[4..]);
678 assert_eq!(0x0000_1312_0000_1110, u64::from_le_bytes(buffer));
679 config.read_table(24, &mut buffer[..4]);
681 assert_eq!(u32::to_le_bytes(0x21), &buffer[..4]);
682 config.read_table(28, &mut buffer[..4]);
684 assert_eq!(u32::to_le_bytes(0x0), &buffer[..4]);
685
686 config.read_table(19, &mut buffer);
688 assert_eq!(buffer, [0xff; 8]);
689
690 config.read_table(16, &mut buffer);
692 assert_eq!(0x0000_4443_0000_4241, u64::from_le_bytes(buffer));
693
694 config.read_table(8, &mut buffer);
696 assert_eq!(0x0_0000_0020, u64::from_le_bytes(buffer));
697
698 check_metric_after_block!(METRICS.interrupts.config_updates, 0, {
700 config.write_table(12, &u32::to_le_bytes(0x1));
701 config.write_table(28, &u32::to_le_bytes(0x1));
702 });
703
704 check_metric_after_block!(METRICS.interrupts.config_updates, 2, {
706 config.write_table(12, &u32::to_le_bytes(0x0));
707 config.write_table(28, &u32::to_le_bytes(0x0));
708 });
709
710 check_metric_after_block!(METRICS.interrupts.config_updates, 0, {
712 config.write_table(12, &u32::to_le_bytes(0x0));
713 config.write_table(28, &u32::to_le_bytes(0x0));
714 });
715 }
716
717 #[test]
718 #[should_panic]
719 fn test_table_access_write_too_big() {
720 let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
721 let buffer = [0u8; 16];
722
723 config.write_table(0, &buffer);
724 }
725
726 #[test]
727 fn test_pba_read_too_big() {
728 let config = MsixConfig::new(msix_vector_group(2), 0x42);
729 let mut buffer = [0u8; 16];
730
731 config.read_pba(0, &mut buffer);
732 assert_eq!(buffer, [0xff; 16]);
733 }
734
735 #[test]
736 fn test_pba_invalid_offset() {
737 let config = MsixConfig::new(msix_vector_group(2), 0x42);
738 let mut buffer = [0u8; 8];
739
740 config.read_pba(128, &mut buffer);
742 assert_eq!(buffer, [0xffu8; 8]);
743
744 let mut buffer = [0u8; 8];
746 config.read_pba(3, &mut buffer[..4]);
747 assert_eq!(buffer[..4], [0xffu8; 4]);
748 config.read_pba(3, &mut buffer);
749 assert_eq!(buffer, [0xffu8; 8]);
750 }
751
752 #[test]
753 #[should_panic]
754 fn test_set_pba_bit_vector_too_big() {
755 let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
756
757 config.set_pba_bit(2048, false);
758 }
759
760 #[test]
761 #[should_panic]
762 fn test_get_pba_bit_vector_too_big() {
763 let config = MsixConfig::new(msix_vector_group(2), 0x42);
764
765 config.get_pba_bit(2048);
766 }
767
768 #[test]
769 fn test_pba_bit_invalid_vector() {
770 let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
771
772 config.set_pba_bit(2, false);
775 assert_eq!(config.pba_entries[0], 0);
776
777 assert_eq!(config.get_pba_bit(2), 0xff);
779 }
780
781 #[test]
782 fn test_pba_read() {
783 let mut config = MsixConfig::new(msix_vector_group(128), 0x42);
784 let mut buffer = [0u8; 8];
785
786 config.set_pba_bit(1, false);
787 assert_eq!(config.pba_entries[0], 2);
788 assert_eq!(config.pba_entries[1], 0);
789 config.read_pba(0, &mut buffer);
790 assert_eq!(0x2, u64::from_le_bytes(buffer));
791
792 let mut buffer = [0u8; 4];
793 config.set_pba_bit(96, false);
794 assert_eq!(config.pba_entries[0], 2);
795 assert_eq!(config.pba_entries[1], 0x1_0000_0000);
796 config.read_pba(8, &mut buffer);
797 assert_eq!(0x0, u32::from_le_bytes(buffer));
798 config.read_pba(12, &mut buffer);
799 assert_eq!(0x1, u32::from_le_bytes(buffer));
800 }
801
802 #[test]
803 fn test_pending_interrupt() {
804 let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
805 config.set_pba_bit(1, false);
806 assert_eq!(config.get_pba_bit(1), 1);
807 check_metric_after_block!(METRICS.interrupts.triggers, 0, config.set_msg_ctl(0x8000));
810
811 check_metric_after_block!(METRICS.interrupts.triggers, 1, {
815 config.write_table(8, &u64::to_le_bytes(0x0_0000_0020));
816 config.write_table(24, &u64::to_le_bytes(0x0_0000_0020));
817 });
818 assert_eq!(config.get_pba_bit(1), 0);
819
820 check_metric_after_block!(METRICS.interrupts.triggers, 0, {
825 config.set_msg_ctl(0xc000);
826 config.set_pba_bit(0, false);
827 });
828
829 check_metric_after_block!(METRICS.interrupts.triggers, 1, config.set_msg_ctl(0x8000));
831 assert_eq!(config.get_pba_bit(0), 0);
832 }
833}