1use std::convert::TryFrom;
9use std::fmt::Debug;
10use std::mem::{self, size_of};
11
12use libc::c_char;
13use log::debug;
14use vm_allocator::AllocPolicy;
15
16use crate::arch::GSI_LEGACY_END;
17use crate::arch::x86_64::generated::mpspec;
18use crate::vstate::memory::{
19 Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap,
20};
21use crate::vstate::resources::ResourceAllocator;
22
23unsafe impl ByteValued for mpspec::mpc_bus {}
26unsafe impl ByteValued for mpspec::mpc_cpu {}
28unsafe impl ByteValued for mpspec::mpc_intsrc {}
30unsafe impl ByteValued for mpspec::mpc_ioapic {}
32unsafe impl ByteValued for mpspec::mpc_table {}
34unsafe impl ByteValued for mpspec::mpc_lintsrc {}
36unsafe impl ByteValued for mpspec::mpf_intel {}
38
39#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
40pub enum MptableError {
41 NotEnoughMemory,
43 AddressOverflow,
45 Clear,
47 TooManyCpus,
49 TooManyIrqs,
51 WriteMpfIntel,
53 WriteMpcCpu,
55 WriteMpcIoapic,
57 WriteMpcBus,
59 WriteMpcIntsrc,
61 WriteMpcLintsrc,
63 WriteMpcTable,
65 AllocateMemory(#[from] vm_allocator::Error),
67}
68
69pub const MAX_SUPPORTED_CPUS: u8 = 254;
73
74macro_rules! char_array {
76 ($t:ty; $( $c:expr ),*) => ( [ $( $c as $t ),* ] )
77}
78
79const SMP_MAGIC_IDENT: [c_char; 4] = char_array!(c_char; '_', 'M', 'P', '_');
81const MPC_SIGNATURE: [c_char; 4] = char_array!(c_char; 'P', 'C', 'M', 'P');
82const MPC_SPEC: i8 = 4;
83const MPC_OEM: [c_char; 8] = char_array!(c_char; 'F', 'C', ' ', ' ', ' ', ' ', ' ', ' ');
84const MPC_PRODUCT_ID: [c_char; 12] = ['0' as c_char; 12];
85const BUS_TYPE_ISA: [u8; 6] = [b'I', b'S', b'A', b' ', b' ', b' '];
86const IO_APIC_DEFAULT_PHYS_BASE: u32 = 0xfec0_0000; const APIC_DEFAULT_PHYS_BASE: u32 = 0xfee0_0000; const APIC_VERSION: u8 = 0x14;
89const CPU_STEPPING: u32 = 0x600;
90const CPU_FEATURE_APIC: u32 = 0x200;
91const CPU_FEATURE_FPU: u32 = 0x001;
92
93fn compute_checksum<T: ByteValued>(v: &T) -> u8 {
94 let mut checksum: u8 = 0;
95 for i in v.as_slice() {
96 checksum = checksum.wrapping_add(*i);
97 }
98 checksum
99}
100
101fn mpf_intel_compute_checksum(v: &mpspec::mpf_intel) -> u8 {
102 let checksum = compute_checksum(v).wrapping_sub(v.checksum);
103 (!checksum).wrapping_add(1)
104}
105
106fn compute_mp_size(num_cpus: u8) -> usize {
107 mem::size_of::<mpspec::mpf_intel>()
108 + mem::size_of::<mpspec::mpc_table>()
109 + mem::size_of::<mpspec::mpc_cpu>() * (num_cpus as usize)
110 + mem::size_of::<mpspec::mpc_ioapic>()
111 + mem::size_of::<mpspec::mpc_bus>()
112 + mem::size_of::<mpspec::mpc_intsrc>() * (GSI_LEGACY_END as usize + 1)
113 + mem::size_of::<mpspec::mpc_lintsrc>() * 2
114}
115
116pub fn setup_mptable(
118 mem: &GuestMemoryMmap,
119 resource_allocator: &mut ResourceAllocator,
120 num_cpus: u8,
121) -> Result<(), MptableError> {
122 if num_cpus > MAX_SUPPORTED_CPUS {
123 return Err(MptableError::TooManyCpus);
124 }
125
126 let mp_size = compute_mp_size(num_cpus);
127 let mptable_addr =
128 resource_allocator.allocate_system_memory(mp_size as u64, 1, AllocPolicy::FirstMatch)?;
129 debug!(
130 "mptable: Allocated {mp_size} bytes for MPTable {num_cpus} vCPUs at address {:#010x}",
131 mptable_addr
132 );
133
134 let mut base_mp = GuestAddress(mptable_addr);
136 let mut mp_num_entries: u16 = 0;
137
138 let mut checksum: u8 = 0;
139 let ioapicid: u8 = num_cpus + 1;
140
141 if let Some(end_mp) = base_mp.checked_add((mp_size - 1) as u64) {
144 if !mem.address_in_range(end_mp) {
145 return Err(MptableError::NotEnoughMemory);
146 }
147 } else {
148 return Err(MptableError::AddressOverflow);
149 }
150
151 mem.write_slice(&vec![0; mp_size], base_mp)
152 .map_err(|_| MptableError::Clear)?;
153
154 {
155 let size = mem::size_of::<mpspec::mpf_intel>() as u64;
156 let mut mpf_intel = mpspec::mpf_intel {
157 signature: SMP_MAGIC_IDENT,
158 physptr: u32::try_from(base_mp.raw_value() + size).unwrap(),
159 length: 1,
160 specification: 4,
161 ..mpspec::mpf_intel::default()
162 };
163 mpf_intel.checksum = mpf_intel_compute_checksum(&mpf_intel);
164 mem.write_obj(mpf_intel, base_mp)
165 .map_err(|_| MptableError::WriteMpfIntel)?;
166 base_mp = base_mp.unchecked_add(size);
167 mp_num_entries += 1;
168 }
169
170 let table_base = base_mp;
173 base_mp = base_mp.unchecked_add(mem::size_of::<mpspec::mpc_table>() as u64);
174
175 {
176 let size = mem::size_of::<mpspec::mpc_cpu>() as u64;
177 for cpu_id in 0..num_cpus {
178 let mpc_cpu = mpspec::mpc_cpu {
179 type_: mpspec::MP_PROCESSOR.try_into().unwrap(),
180 apicid: cpu_id,
181 apicver: APIC_VERSION,
182 cpuflag: u8::try_from(mpspec::CPU_ENABLED).unwrap()
183 | if cpu_id == 0 {
184 u8::try_from(mpspec::CPU_BOOTPROCESSOR).unwrap()
185 } else {
186 0
187 },
188 cpufeature: CPU_STEPPING,
189 featureflag: CPU_FEATURE_APIC | CPU_FEATURE_FPU,
190 ..Default::default()
191 };
192 mem.write_obj(mpc_cpu, base_mp)
193 .map_err(|_| MptableError::WriteMpcCpu)?;
194 base_mp = base_mp.unchecked_add(size);
195 checksum = checksum.wrapping_add(compute_checksum(&mpc_cpu));
196 mp_num_entries += 1;
197 }
198 }
199 {
200 let size = mem::size_of::<mpspec::mpc_bus>() as u64;
201 let mpc_bus = mpspec::mpc_bus {
202 type_: mpspec::MP_BUS.try_into().unwrap(),
203 busid: 0,
204 bustype: BUS_TYPE_ISA,
205 };
206 mem.write_obj(mpc_bus, base_mp)
207 .map_err(|_| MptableError::WriteMpcBus)?;
208 base_mp = base_mp.unchecked_add(size);
209 checksum = checksum.wrapping_add(compute_checksum(&mpc_bus));
210 mp_num_entries += 1;
211 }
212 {
213 let size = mem::size_of::<mpspec::mpc_ioapic>() as u64;
214 let mpc_ioapic = mpspec::mpc_ioapic {
215 type_: mpspec::MP_IOAPIC.try_into().unwrap(),
216 apicid: ioapicid,
217 apicver: APIC_VERSION,
218 flags: mpspec::MPC_APIC_USABLE.try_into().unwrap(),
219 apicaddr: IO_APIC_DEFAULT_PHYS_BASE,
220 };
221 mem.write_obj(mpc_ioapic, base_mp)
222 .map_err(|_| MptableError::WriteMpcIoapic)?;
223 base_mp = base_mp.unchecked_add(size);
224 checksum = checksum.wrapping_add(compute_checksum(&mpc_ioapic));
225 mp_num_entries += 1;
226 }
227 for i in 0..=u8::try_from(GSI_LEGACY_END).map_err(|_| MptableError::TooManyIrqs)? {
229 let size = mem::size_of::<mpspec::mpc_intsrc>() as u64;
230 let mpc_intsrc = mpspec::mpc_intsrc {
231 type_: mpspec::MP_INTSRC.try_into().unwrap(),
232 irqtype: mpspec::mp_irq_source_types::mp_INT.try_into().unwrap(),
233 irqflag: mpspec::MP_IRQPOL_DEFAULT.try_into().unwrap(),
234 srcbus: 0,
235 srcbusirq: i,
236 dstapic: ioapicid,
237 dstirq: i,
238 };
239 mem.write_obj(mpc_intsrc, base_mp)
240 .map_err(|_| MptableError::WriteMpcIntsrc)?;
241 base_mp = base_mp.unchecked_add(size);
242 checksum = checksum.wrapping_add(compute_checksum(&mpc_intsrc));
243 mp_num_entries += 1;
244 }
245 {
246 let size = mem::size_of::<mpspec::mpc_lintsrc>() as u64;
247 let mpc_lintsrc = mpspec::mpc_lintsrc {
248 type_: mpspec::MP_LINTSRC.try_into().unwrap(),
249 irqtype: mpspec::mp_irq_source_types::mp_ExtINT.try_into().unwrap(),
250 irqflag: mpspec::MP_IRQPOL_DEFAULT.try_into().unwrap(),
251 srcbusid: 0,
252 srcbusirq: 0,
253 destapic: 0,
254 destapiclint: 0,
255 };
256 mem.write_obj(mpc_lintsrc, base_mp)
257 .map_err(|_| MptableError::WriteMpcLintsrc)?;
258 base_mp = base_mp.unchecked_add(size);
259 checksum = checksum.wrapping_add(compute_checksum(&mpc_lintsrc));
260 mp_num_entries += 1;
261 }
262 {
263 let size = mem::size_of::<mpspec::mpc_lintsrc>() as u64;
264 let mpc_lintsrc = mpspec::mpc_lintsrc {
265 type_: mpspec::MP_LINTSRC.try_into().unwrap(),
266 irqtype: mpspec::mp_irq_source_types::mp_NMI.try_into().unwrap(),
267 irqflag: mpspec::MP_IRQPOL_DEFAULT.try_into().unwrap(),
268 srcbusid: 0,
269 srcbusirq: 0,
270 destapic: 0xFF,
271 destapiclint: 1,
272 };
273 mem.write_obj(mpc_lintsrc, base_mp)
274 .map_err(|_| MptableError::WriteMpcLintsrc)?;
275 base_mp = base_mp.unchecked_add(size);
276 checksum = checksum.wrapping_add(compute_checksum(&mpc_lintsrc));
277 mp_num_entries += 1;
278 }
279
280 let table_end = base_mp;
282
283 {
284 let mut mpc_table = mpspec::mpc_table {
285 signature: MPC_SIGNATURE,
286 length: table_end
289 .unchecked_offset_from(table_base)
290 .try_into()
291 .unwrap(),
292 spec: MPC_SPEC,
293 oem: MPC_OEM,
294 oemcount: mp_num_entries,
295 productid: MPC_PRODUCT_ID,
296 lapic: APIC_DEFAULT_PHYS_BASE,
297 ..Default::default()
298 };
299 debug_assert_eq!(
300 mpc_table.length as usize + size_of::<mpspec::mpf_intel>(),
301 mp_size
302 );
303 checksum = checksum.wrapping_add(compute_checksum(&mpc_table));
304 #[allow(clippy::cast_possible_wrap)]
305 let checksum_final = (!checksum).wrapping_add(1) as i8;
306 mpc_table.checksum = checksum_final;
307 mem.write_obj(mpc_table, table_base)
308 .map_err(|_| MptableError::WriteMpcTable)?;
309 }
310
311 Ok(())
312}
313
314#[cfg(test)]
315mod tests {
316
317 use super::*;
318 use crate::arch::SYSTEM_MEM_START;
319 use crate::test_utils::single_region_mem_at;
320 use crate::vstate::memory::Bytes;
321
322 fn table_entry_size(type_: u8) -> usize {
323 match u32::from(type_) {
324 mpspec::MP_PROCESSOR => mem::size_of::<mpspec::mpc_cpu>(),
325 mpspec::MP_BUS => mem::size_of::<mpspec::mpc_bus>(),
326 mpspec::MP_IOAPIC => mem::size_of::<mpspec::mpc_ioapic>(),
327 mpspec::MP_INTSRC => mem::size_of::<mpspec::mpc_intsrc>(),
328 mpspec::MP_LINTSRC => mem::size_of::<mpspec::mpc_lintsrc>(),
329 _ => panic!("unrecognized mpc table entry type: {}", type_),
330 }
331 }
332
333 #[test]
334 fn bounds_check() {
335 let num_cpus = 4;
336 let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
337 let mut resource_allocator = ResourceAllocator::new();
338
339 setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
340 }
341
342 #[test]
343 fn bounds_check_fails() {
344 let num_cpus = 4;
345 let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus) - 1);
346 let mut resource_allocator = ResourceAllocator::new();
347
348 setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap_err();
349 }
350
351 #[test]
352 fn mpf_intel_checksum() {
353 let num_cpus = 1;
354 let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
355 let mut resource_allocator = ResourceAllocator::new();
356
357 setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
358
359 let mpf_intel: mpspec::mpf_intel = mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
360
361 assert_eq!(mpf_intel_compute_checksum(&mpf_intel), mpf_intel.checksum);
362 }
363
364 #[test]
365 fn mpc_table_checksum() {
366 let num_cpus = 4;
367 let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
368 let mut resource_allocator = ResourceAllocator::new();
369
370 setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
371
372 let mpf_intel: mpspec::mpf_intel = mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
373 let mpc_offset = GuestAddress(u64::from(mpf_intel.physptr));
374 let mpc_table: mpspec::mpc_table = mem.read_obj(mpc_offset).unwrap();
375
376 let mut buffer = Vec::new();
377 mem.write_volatile_to(mpc_offset, &mut buffer, mpc_table.length as usize)
378 .unwrap();
379 assert_eq!(
380 buffer
381 .iter()
382 .fold(0u8, |accum, &item| accum.wrapping_add(item)),
383 0
384 );
385 }
386
387 #[test]
388 fn mpc_entry_count() {
389 let num_cpus = 1;
390 let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
391 let mut resource_allocator = ResourceAllocator::new();
392
393 setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
394
395 let mpf_intel: mpspec::mpf_intel = mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
396 let mpc_offset = GuestAddress(u64::from(mpf_intel.physptr));
397 let mpc_table: mpspec::mpc_table = mem.read_obj(mpc_offset).unwrap();
398
399 let expected_entry_count =
400 1
402 + u16::from(num_cpus)
404 + 1
406 + 1
408 + u16::try_from(GSI_LEGACY_END).unwrap() + 1
410 + 1
412 + 1;
414 assert_eq!(mpc_table.oemcount, expected_entry_count);
415 }
416
417 #[test]
418 fn cpu_entry_count() {
419 let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(MAX_SUPPORTED_CPUS));
420
421 for i in 0..MAX_SUPPORTED_CPUS {
422 let mut resource_allocator = ResourceAllocator::new();
423
424 setup_mptable(&mem, &mut resource_allocator, i).unwrap();
425
426 let mpf_intel: mpspec::mpf_intel =
427 mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
428 let mpc_offset = GuestAddress(u64::from(mpf_intel.physptr));
429 let mpc_table: mpspec::mpc_table = mem.read_obj(mpc_offset).unwrap();
430 let mpc_end = mpc_offset.checked_add(u64::from(mpc_table.length)).unwrap();
431
432 let mut entry_offset = mpc_offset
433 .checked_add(mem::size_of::<mpspec::mpc_table>() as u64)
434 .unwrap();
435 let mut cpu_count = 0;
436 while entry_offset < mpc_end {
437 let entry_type: u8 = mem.read_obj(entry_offset).unwrap();
438 entry_offset = entry_offset
439 .checked_add(table_entry_size(entry_type) as u64)
440 .unwrap();
441 assert!(entry_offset <= mpc_end);
442 if u32::from(entry_type) == mpspec::MP_PROCESSOR {
443 cpu_count += 1;
444 }
445 }
446 assert_eq!(cpu_count, i);
447 }
448 }
449
450 #[test]
451 fn cpu_entry_count_max() {
452 let cpus = MAX_SUPPORTED_CPUS + 1;
453 let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(cpus));
454 let mut resource_allocator = ResourceAllocator::new();
455
456 let result = setup_mptable(&mem, &mut resource_allocator, cpus).unwrap_err();
457 assert_eq!(result, MptableError::TooManyCpus);
458 }
459}