vmm/
builder.rs

1// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0
3
4//! Enables pre-boot setup, instantiation and booting of a Firecracker VMM.
5
6use std::fmt::Debug;
7use std::io;
8#[cfg(feature = "gdb")]
9use std::sync::mpsc;
10use std::sync::{Arc, Mutex};
11
12use event_manager::SubscriberOps;
13use linux_loader::cmdline::Cmdline as LoaderKernelCmdline;
14use userfaultfd::Uffd;
15use utils::time::TimestampUs;
16use vm_allocator::AllocPolicy;
17use vm_memory::GuestAddress;
18
19use crate::Vcpu;
20use crate::arch::{ConfigurationError, configure_system_for_boot, load_kernel};
21#[cfg(target_arch = "aarch64")]
22use crate::construct_kvm_mpidrs;
23use crate::cpu_config::templates::{
24    GetCpuTemplate, GetCpuTemplateError, GuestConfigError, KvmCapability,
25};
26#[cfg(target_arch = "x86_64")]
27use crate::device_manager;
28use crate::device_manager::pci_mngr::PciManagerError;
29use crate::device_manager::{
30    AttachDeviceError, DeviceManager, DeviceManagerCreateError, DevicePersistError,
31    DeviceRestoreArgs,
32};
33use crate::devices::virtio::balloon::Balloon;
34use crate::devices::virtio::block::device::Block;
35use crate::devices::virtio::mem::{VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB, VirtioMem};
36use crate::devices::virtio::net::Net;
37use crate::devices::virtio::pmem::device::Pmem;
38use crate::devices::virtio::rng::Entropy;
39use crate::devices::virtio::vsock::{Vsock, VsockUnixBackend};
40#[cfg(feature = "gdb")]
41use crate::gdb;
42use crate::initrd::{InitrdConfig, InitrdError};
43use crate::logger::debug;
44use crate::persist::{MicrovmState, MicrovmStateError};
45use crate::resources::VmResources;
46use crate::seccomp::BpfThreadMap;
47use crate::snapshot::Persist;
48use crate::utils::mib_to_bytes;
49use crate::vmm_config::instance_info::InstanceInfo;
50use crate::vmm_config::machine_config::MachineConfigError;
51use crate::vmm_config::memory_hotplug::MemoryHotplugConfig;
52use crate::vstate::kvm::{Kvm, KvmError};
53use crate::vstate::memory::GuestRegionMmap;
54#[cfg(target_arch = "aarch64")]
55use crate::vstate::resources::ResourceAllocator;
56use crate::vstate::vcpu::VcpuError;
57use crate::vstate::vm::{Vm, VmError};
58use crate::{EventManager, Vmm, VmmError};
59
60/// Errors associated with starting the instance.
61#[derive(Debug, thiserror::Error, displaydoc::Display)]
62pub enum StartMicrovmError {
63    /// Unable to attach block device to Vmm: {0}
64    AttachBlockDevice(io::Error),
65    /// Could not attach device: {0}
66    AttachDevice(#[from] AttachDeviceError),
67    /// System configuration error: {0}
68    ConfigureSystem(#[from] ConfigurationError),
69    /// Failed to create device manager: {0}
70    CreateDeviceManager(#[from] DeviceManagerCreateError),
71    /// Failed to create guest config: {0}
72    CreateGuestConfig(#[from] GuestConfigError),
73    /// Cannot create network device: {0}
74    CreateNetDevice(crate::devices::virtio::net::NetError),
75    /// Cannot create pmem device: {0}
76    CreatePmemDevice(#[from] crate::devices::virtio::pmem::device::PmemError),
77    /// Cannot create RateLimiter: {0}
78    CreateRateLimiter(io::Error),
79    /// Error creating legacy device: {0}
80    #[cfg(target_arch = "x86_64")]
81    CreateLegacyDevice(device_manager::legacy::LegacyDeviceError),
82    /// Error enabling PCIe support: {0}
83    EnablePciDevices(#[from] PciManagerError),
84    /// Error enabling pvtime on vcpu: {0}
85    #[cfg(target_arch = "aarch64")]
86    EnablePVTime(crate::arch::VcpuArchError),
87    /// Invalid Memory Configuration: {0}
88    GuestMemory(crate::vstate::memory::MemoryError),
89    /// Error with initrd initialization: {0}.
90    Initrd(#[from] InitrdError),
91    /// Internal error while starting microVM: {0}
92    Internal(#[from] VmmError),
93    /// Failed to get CPU template: {0}
94    GetCpuTemplate(#[from] GetCpuTemplateError),
95    /// Invalid kernel command line: {0}
96    KernelCmdline(String),
97    /// Kvm error: {0}
98    Kvm(#[from] KvmError),
99    /// Cannot load command line string: {0}
100    LoadCommandline(linux_loader::loader::Error),
101    /// Cannot start microvm without kernel configuration.
102    MissingKernelConfig,
103    /// Cannot start microvm without guest mem_size config.
104    MissingMemSizeConfig,
105    /// No seccomp filter for thread category: {0}
106    MissingSeccompFilters(String),
107    /// The net device configuration is missing the tap device.
108    NetDeviceNotConfigured,
109    /// Cannot open the block device backing file: {0}
110    OpenBlockDevice(io::Error),
111    /// Cannot restore microvm state: {0}
112    RestoreMicrovmState(MicrovmStateError),
113    /// Cannot set vm resources: {0}
114    SetVmResources(MachineConfigError),
115    /// Cannot create the entropy device: {0}
116    CreateEntropyDevice(crate::devices::virtio::rng::EntropyError),
117    /// Failed to allocate guest resource: {0}
118    AllocateResources(#[from] vm_allocator::Error),
119    /// Error starting GDB debug session: {0}
120    #[cfg(feature = "gdb")]
121    GdbServer(gdb::target::GdbTargetError),
122    /// Error cloning Vcpu fds
123    VcpuFdCloneError(#[from] crate::vstate::vcpu::CopyKvmFdError),
124    /// Error with the Vm object: {0}
125    Vm(#[from] VmError),
126    /// Nested virtualization requested but unsupported: {0}
127    NestedVirtUnsupported(String),
128}
129
130/// It's convenient to automatically convert `linux_loader::cmdline::Error`s
131/// to `StartMicrovmError`s.
132impl std::convert::From<linux_loader::cmdline::Error> for StartMicrovmError {
133    fn from(err: linux_loader::cmdline::Error) -> StartMicrovmError {
134        StartMicrovmError::KernelCmdline(err.to_string())
135    }
136}
137
138#[cfg_attr(target_arch = "aarch64", allow(unused))]
139pub fn create_vmm_and_vcpus(
140    instance_info: &InstanceInfo,
141    event_manager: &mut EventManager,
142    guest_memory: Vec<GuestRegionMmap>,
143    uffd: Option<Uffd>,
144    _track_dirty_pages: bool,
145    vcpu_count: u8,
146    kvm_capabilities: Vec<KvmCapability>,
147) -> Result<(Vmm, Vec<Vcpu>), StartMicrovmError> {
148    let kvm = Kvm::new(kvm_capabilities).map_err(StartMicrovmError::Kvm)?;
149    let mut vm = Vm::new(&kvm).map_err(StartMicrovmError::Vm)?;
150    let (vcpus, vcpus_exit_evt) = vm.create_vcpus(vcpu_count).map_err(StartMicrovmError::Vm)?;
151    vm.register_dram_memory_regions(guest_memory)
152        .map_err(StartMicrovmError::Vm)?;
153
154    let device_manager = DeviceManager::new(event_manager, &vcpus_exit_evt, &vm, None)
155        .map_err(StartMicrovmError::CreateDeviceManager)?;
156
157    let vmm = Vmm {
158        instance_info: instance_info.clone(),
159        shutdown_exit_code: None,
160        kvm,
161        vm: Arc::new(vm),
162        uffd,
163        vcpus_handles: Vec::new(),
164        vcpus_exit_evt,
165        device_manager,
166    };
167
168    Ok((vmm, vcpus))
169}
170
171/// Builds and starts a microVM based on the current Firecracker VmResources configuration.
172///
173/// The built microVM and all the created vCPUs start off in the paused state.
174/// To boot the microVM and run those vCPUs, `Vmm::resume_vm()` needs to be
175/// called.
176pub fn build_microvm_for_boot(
177    instance_info: &InstanceInfo,
178    vm_resources: &super::resources::VmResources,
179    event_manager: &mut EventManager,
180    seccomp_filters: &BpfThreadMap,
181) -> Result<Arc<Mutex<Vmm>>, StartMicrovmError> {
182    // Timestamp for measuring microVM boot duration.
183    let request_ts = TimestampUs::default();
184
185    let boot_config = vm_resources
186        .boot_source
187        .builder
188        .as_ref()
189        .ok_or(StartMicrovmError::MissingKernelConfig)?;
190
191    let guest_memory = vm_resources
192        .allocate_guest_memory()
193        .map_err(StartMicrovmError::GuestMemory)?;
194
195    // Clone the command-line so that a failed boot doesn't pollute the original.
196    #[allow(unused_mut)]
197    let mut boot_cmdline = boot_config.cmdline.clone();
198
199    let cpu_template = vm_resources
200        .machine_config
201        .cpu_template
202        .get_cpu_template()?;
203
204    let kvm = Kvm::new(cpu_template.kvm_capabilities.clone())?;
205    // Set up Kvm Vm and register memory regions.
206    // Build custom CPU config if a custom template is provided.
207    let mut vm = Vm::new(&kvm)?;
208    let (mut vcpus, vcpus_exit_evt) = vm.create_vcpus(vm_resources.machine_config.vcpu_count)?;
209    vm.register_dram_memory_regions(guest_memory)?;
210
211    // Allocate memory as soon as possible to make hotpluggable memory available to all consumers,
212    // before they clone the GuestMemoryMmap object
213    let virtio_mem_addr = if let Some(memory_hotplug) = &vm_resources.memory_hotplug {
214        let addr = allocate_virtio_mem_address(&vm, memory_hotplug.total_size_mib)?;
215        let hotplug_memory_region = vm_resources
216            .allocate_memory_region(addr, mib_to_bytes(memory_hotplug.total_size_mib))
217            .map_err(StartMicrovmError::GuestMemory)?;
218        vm.register_hotpluggable_memory_region(
219            hotplug_memory_region,
220            mib_to_bytes(memory_hotplug.slot_size_mib),
221        )?;
222        Some(addr)
223    } else {
224        None
225    };
226
227    let mut device_manager = DeviceManager::new(
228        event_manager,
229        &vcpus_exit_evt,
230        &vm,
231        vm_resources.serial_out_path.as_ref(),
232    )?;
233
234    let vm = Arc::new(vm);
235
236    let entry_point = load_kernel(&boot_config.kernel_file, vm.guest_memory())?;
237    let initrd = InitrdConfig::from_config(boot_config, vm.guest_memory())?;
238
239    if vm_resources.pci_enabled {
240        device_manager.enable_pci(&vm)?;
241    } else {
242        boot_cmdline.insert("pci", "off")?;
243    }
244
245    // The boot timer device needs to be the first device attached in order
246    // to maintain the same MMIO address referenced in the documentation
247    // and tests.
248    if vm_resources.boot_timer {
249        device_manager.attach_boot_timer_device(&vm, request_ts)?;
250    }
251
252    if let Some(balloon) = vm_resources.balloon.get() {
253        attach_balloon_device(
254            &mut device_manager,
255            &vm,
256            &mut boot_cmdline,
257            balloon,
258            event_manager,
259        )?;
260    }
261
262    attach_block_devices(
263        &mut device_manager,
264        &vm,
265        &mut boot_cmdline,
266        vm_resources.block.devices.iter(),
267        event_manager,
268    )?;
269    attach_net_devices(
270        &mut device_manager,
271        &vm,
272        &mut boot_cmdline,
273        vm_resources.net_builder.iter(),
274        event_manager,
275    )?;
276    attach_pmem_devices(
277        &mut device_manager,
278        &vm,
279        &mut boot_cmdline,
280        vm_resources.pmem.devices.iter(),
281        event_manager,
282    )?;
283
284    if let Some(unix_vsock) = vm_resources.vsock.get() {
285        attach_unixsock_vsock_device(
286            &mut device_manager,
287            &vm,
288            &mut boot_cmdline,
289            unix_vsock,
290            event_manager,
291        )?;
292    }
293
294    if let Some(entropy) = vm_resources.entropy.get() {
295        attach_entropy_device(
296            &mut device_manager,
297            &vm,
298            &mut boot_cmdline,
299            entropy,
300            event_manager,
301        )?;
302    }
303
304    // Attach virtio-mem device if configured
305    if let Some(memory_hotplug) = &vm_resources.memory_hotplug {
306        attach_virtio_mem_device(
307            &mut device_manager,
308            &vm,
309            &mut boot_cmdline,
310            memory_hotplug,
311            event_manager,
312            virtio_mem_addr.expect("address should be allocated"),
313        )?;
314    }
315
316    #[cfg(target_arch = "aarch64")]
317    device_manager.attach_legacy_devices_aarch64(
318        &vm,
319        event_manager,
320        &mut boot_cmdline,
321        vm_resources.serial_out_path.as_ref(),
322    )?;
323
324    device_manager.attach_vmgenid_device(&vm)?;
325    #[cfg(target_arch = "x86_64")]
326    device_manager.attach_vmclock_device(&vm)?;
327
328    #[cfg(target_arch = "aarch64")]
329    if vcpus[0].kvm_vcpu.supports_pvtime() {
330        setup_pvtime(&mut vm.resource_allocator(), &mut vcpus)?;
331    } else {
332        log::warn!("Vcpus do not support pvtime, steal time will not be reported to guest");
333    }
334
335    configure_system_for_boot(
336        &kvm,
337        &vm,
338        &mut device_manager,
339        vcpus.as_mut(),
340        &vm_resources.machine_config,
341        &cpu_template,
342        entry_point,
343        &initrd,
344        boot_cmdline,
345    )?;
346
347    let vmm = Vmm {
348        instance_info: instance_info.clone(),
349        shutdown_exit_code: None,
350        kvm,
351        vm,
352        uffd: None,
353        vcpus_handles: Vec::new(),
354        vcpus_exit_evt,
355        device_manager,
356    };
357    let vmm = Arc::new(Mutex::new(vmm));
358
359    #[cfg(feature = "gdb")]
360    let (gdb_tx, gdb_rx) = mpsc::channel();
361
362    #[cfg(feature = "gdb")]
363    vcpus
364        .iter_mut()
365        .for_each(|vcpu| vcpu.attach_debug_info(gdb_tx.clone()));
366
367    // Move vcpus to their own threads and start their state machine in the 'Paused' state.
368    vmm.lock()
369        .unwrap()
370        .start_vcpus(
371            vcpus,
372            seccomp_filters
373                .get("vcpu")
374                .ok_or_else(|| StartMicrovmError::MissingSeccompFilters("vcpu".to_string()))?
375                .clone(),
376        )
377        .map_err(VmmError::VcpuStart)?;
378
379    #[cfg(feature = "gdb")]
380    if let Some(gdb_socket_path) = &vm_resources.machine_config.gdb_socket_path {
381        gdb::gdb_thread(vmm.clone(), gdb_rx, entry_point.entry_addr, gdb_socket_path)
382            .map_err(StartMicrovmError::GdbServer)?;
383    } else {
384        debug!("No GDB socket provided not starting gdb server.");
385    }
386
387    // Load seccomp filters for the VMM thread.
388    // Execution panics if filters cannot be loaded, use --no-seccomp if skipping filters
389    // altogether is the desired behaviour.
390    // Keep this as the last step before resuming vcpus.
391    crate::seccomp::apply_filter(
392        seccomp_filters
393            .get("vmm")
394            .ok_or_else(|| StartMicrovmError::MissingSeccompFilters("vmm".to_string()))?,
395    )
396    .map_err(VmmError::SeccompFilters)?;
397
398    event_manager.add_subscriber(vmm.clone());
399
400    Ok(vmm)
401}
402
403/// Builds and boots a microVM based on the current Firecracker VmResources configuration.
404///
405/// This is the default build recipe, one could build other microVM flavors by using the
406/// independent functions in this module instead of calling this recipe.
407///
408/// An `Arc` reference of the built `Vmm` is also plugged in the `EventManager`, while another
409/// is returned.
410pub fn build_and_boot_microvm(
411    instance_info: &InstanceInfo,
412    vm_resources: &super::resources::VmResources,
413    event_manager: &mut EventManager,
414    seccomp_filters: &BpfThreadMap,
415) -> Result<Arc<Mutex<Vmm>>, StartMicrovmError> {
416    debug!("event_start: build microvm for boot");
417    let vmm = build_microvm_for_boot(instance_info, vm_resources, event_manager, seccomp_filters)?;
418    debug!("event_end: build microvm for boot");
419    // The vcpus start off in the `Paused` state, let them run.
420    debug!("event_start: boot microvm");
421    vmm.lock().unwrap().resume_vm()?;
422    debug!("event_end: boot microvm");
423    Ok(vmm)
424}
425
426/// Error type for [`build_microvm_from_snapshot`].
427#[derive(Debug, thiserror::Error, displaydoc::Display)]
428pub enum BuildMicrovmFromSnapshotError {
429    /// Failed to create microVM and vCPUs: {0}
430    CreateMicrovmAndVcpus(#[from] StartMicrovmError),
431    /// Could not access KVM: {0}
432    KvmAccess(#[from] vmm_sys_util::errno::Error),
433    /// Error configuring the TSC, frequency not present in the given snapshot.
434    TscFrequencyNotPresent,
435    #[cfg(target_arch = "x86_64")]
436    /// Could not get TSC to check if TSC scaling was required with the snapshot: {0}
437    GetTsc(#[from] crate::arch::GetTscError),
438    #[cfg(target_arch = "x86_64")]
439    /// Could not set TSC scaling within the snapshot: {0}
440    SetTsc(#[from] crate::arch::SetTscError),
441    /// Failed to restore microVM state: {0}
442    RestoreState(#[from] crate::vstate::vm::ArchVmError),
443    /// Failed to update microVM configuration: {0}
444    VmUpdateConfig(#[from] MachineConfigError),
445    /// Failed to restore MMIO device: {0}
446    RestoreMmioDevice(#[from] MicrovmStateError),
447    /// Failed to emulate MMIO serial: {0}
448    EmulateSerialInit(#[from] crate::EmulateSerialInitError),
449    /// Failed to start vCPUs as no vCPU seccomp filter found.
450    MissingVcpuSeccompFilters,
451    /// Failed to start vCPUs: {0}
452    StartVcpus(#[from] crate::StartVcpusError),
453    /// Failed to restore vCPUs: {0}
454    RestoreVcpus(#[from] VcpuError),
455    /// Failed to apply VMM secccomp filter as none found.
456    MissingVmmSeccompFilters,
457    /// Failed to apply VMM secccomp filter: {0}
458    SeccompFiltersInternal(#[from] crate::seccomp::InstallationError),
459    /// Failed to restore devices: {0}
460    RestoreDevices(#[from] DevicePersistError),
461}
462
463/// Builds and starts a microVM based on the provided MicrovmState.
464///
465/// An `Arc` reference of the built `Vmm` is also plugged in the `EventManager`, while another
466/// is returned.
467#[allow(clippy::too_many_arguments)]
468pub fn build_microvm_from_snapshot(
469    instance_info: &InstanceInfo,
470    event_manager: &mut EventManager,
471    microvm_state: MicrovmState,
472    guest_memory: Vec<GuestRegionMmap>,
473    uffd: Option<Uffd>,
474    seccomp_filters: &BpfThreadMap,
475    vm_resources: &mut VmResources,
476) -> Result<Arc<Mutex<Vmm>>, BuildMicrovmFromSnapshotError> {
477    // Build Vmm.
478    debug!("event_start: build microvm from snapshot");
479
480    let kvm = Kvm::new(microvm_state.kvm_state.kvm_cap_modifiers.clone())
481        .map_err(StartMicrovmError::Kvm)?;
482    // Set up Kvm Vm and register memory regions.
483    // Build custom CPU config if a custom template is provided.
484    let mut vm = Vm::new(&kvm).map_err(StartMicrovmError::Vm)?;
485
486    let (mut vcpus, vcpus_exit_evt) = vm
487        .create_vcpus(vm_resources.machine_config.vcpu_count)
488        .map_err(StartMicrovmError::Vm)?;
489
490    vm.restore_memory_regions(guest_memory, &microvm_state.vm_state.memory)
491        .map_err(StartMicrovmError::Vm)?;
492
493    #[cfg(target_arch = "x86_64")]
494    {
495        // Scale TSC to match, extract the TSC freq from the state if specified
496        if let Some(state_tsc) = microvm_state.vcpu_states[0].tsc_khz {
497            // Scale the TSC frequency for all VCPUs. If a TSC frequency is not specified in the
498            // snapshot, by default it uses the host frequency.
499            if vcpus[0].kvm_vcpu.is_tsc_scaling_required(state_tsc)? {
500                for vcpu in &vcpus {
501                    vcpu.kvm_vcpu.set_tsc_khz(state_tsc)?;
502                }
503            }
504        }
505    }
506
507    // Restore vcpus kvm state.
508    for (vcpu, state) in vcpus.iter_mut().zip(microvm_state.vcpu_states.iter()) {
509        vcpu.kvm_vcpu
510            .restore_state(state)
511            .map_err(VcpuError::VcpuResponse)
512            .map_err(BuildMicrovmFromSnapshotError::RestoreVcpus)?;
513    }
514
515    #[cfg(target_arch = "aarch64")]
516    {
517        let mpidrs = construct_kvm_mpidrs(&microvm_state.vcpu_states);
518        // Restore kvm vm state.
519        vm.restore_state(&mpidrs, &microvm_state.vm_state)?;
520    }
521
522    // Restore kvm vm state.
523    #[cfg(target_arch = "x86_64")]
524    vm.restore_state(&microvm_state.vm_state)?;
525
526    // Restore the boot source config paths.
527    vm_resources.boot_source.config = microvm_state.vm_info.boot_source;
528
529    let vm = Arc::new(vm);
530
531    // Restore devices states.
532    // Restoring VMGenID injects an interrupt in the guest to notify it about the new generation
533    // ID. As a result, we need to restore DeviceManager after restoring the KVM state, otherwise
534    // the injected interrupt will be overwritten.
535    let device_ctor_args = DeviceRestoreArgs {
536        mem: vm.guest_memory(),
537        vm: &vm,
538        event_manager,
539        vm_resources,
540        instance_id: &instance_info.id,
541        vcpus_exit_evt: &vcpus_exit_evt,
542    };
543    #[allow(unused_mut)]
544    let mut device_manager =
545        DeviceManager::restore(device_ctor_args, &microvm_state.device_states)?;
546
547    let mut vmm = Vmm {
548        instance_info: instance_info.clone(),
549        shutdown_exit_code: None,
550        kvm,
551        vm,
552        uffd,
553        vcpus_handles: Vec::new(),
554        vcpus_exit_evt,
555        device_manager,
556    };
557
558    // Move vcpus to their own threads and start their state machine in the 'Paused' state.
559    vmm.start_vcpus(
560        vcpus,
561        seccomp_filters
562            .get("vcpu")
563            .ok_or(BuildMicrovmFromSnapshotError::MissingVcpuSeccompFilters)?
564            .clone(),
565    )?;
566
567    let vmm = Arc::new(Mutex::new(vmm));
568    event_manager.add_subscriber(vmm.clone());
569
570    // Load seccomp filters for the VMM thread.
571    // Keep this as the last step of the building process.
572    crate::seccomp::apply_filter(
573        seccomp_filters
574            .get("vmm")
575            .ok_or(BuildMicrovmFromSnapshotError::MissingVmmSeccompFilters)?,
576    )?;
577    debug!("event_end: build microvm from snapshot");
578
579    Ok(vmm)
580}
581
582/// 64 bytes due to alignment requirement in 3.1 of https://www.kernel.org/doc/html/v5.8/virt/kvm/devices/vcpu.html#attribute-kvm-arm-vcpu-pvtime-ipa
583#[cfg(target_arch = "aarch64")]
584const STEALTIME_STRUCT_MEM_SIZE: u64 = 64;
585
586/// Helper method to allocate steal time region
587#[cfg(target_arch = "aarch64")]
588fn allocate_pvtime_region(
589    resource_allocator: &mut ResourceAllocator,
590    vcpu_count: usize,
591    policy: vm_allocator::AllocPolicy,
592) -> Result<GuestAddress, StartMicrovmError> {
593    let size = STEALTIME_STRUCT_MEM_SIZE * vcpu_count as u64;
594    let addr = resource_allocator
595        .allocate_system_memory(size, STEALTIME_STRUCT_MEM_SIZE, policy)
596        .map_err(StartMicrovmError::AllocateResources)?;
597    Ok(GuestAddress(addr))
598}
599
600/// Sets up pvtime for all vcpus
601#[cfg(target_arch = "aarch64")]
602fn setup_pvtime(
603    resource_allocator: &mut ResourceAllocator,
604    vcpus: &mut [Vcpu],
605) -> Result<(), StartMicrovmError> {
606    // Alloc sys mem for steal time region
607    let pvtime_mem: GuestAddress = allocate_pvtime_region(
608        resource_allocator,
609        vcpus.len(),
610        vm_allocator::AllocPolicy::LastMatch,
611    )?;
612
613    // Register all vcpus with pvtime device
614    for (i, vcpu) in vcpus.iter_mut().enumerate() {
615        vcpu.kvm_vcpu
616            .enable_pvtime(GuestAddress(
617                pvtime_mem.0 + i as u64 * STEALTIME_STRUCT_MEM_SIZE,
618            ))
619            .map_err(StartMicrovmError::EnablePVTime)?;
620    }
621
622    Ok(())
623}
624
625fn attach_entropy_device(
626    device_manager: &mut DeviceManager,
627    vm: &Arc<Vm>,
628    cmdline: &mut LoaderKernelCmdline,
629    entropy_device: &Arc<Mutex<Entropy>>,
630    event_manager: &mut EventManager,
631) -> Result<(), AttachDeviceError> {
632    let id = entropy_device
633        .lock()
634        .expect("Poisoned lock")
635        .id()
636        .to_string();
637
638    event_manager.add_subscriber(entropy_device.clone());
639    device_manager.attach_virtio_device(vm, id, entropy_device.clone(), cmdline, false)
640}
641
642fn allocate_virtio_mem_address(
643    vm: &Vm,
644    total_size_mib: usize,
645) -> Result<GuestAddress, StartMicrovmError> {
646    let addr = vm
647        .resource_allocator()
648        .past_mmio64_memory
649        .allocate(
650            mib_to_bytes(total_size_mib) as u64,
651            mib_to_bytes(VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB) as u64,
652            AllocPolicy::FirstMatch,
653        )?
654        .start();
655    Ok(GuestAddress(addr))
656}
657
658fn attach_virtio_mem_device(
659    device_manager: &mut DeviceManager,
660    vm: &Arc<Vm>,
661    cmdline: &mut LoaderKernelCmdline,
662    config: &MemoryHotplugConfig,
663    event_manager: &mut EventManager,
664    addr: GuestAddress,
665) -> Result<(), StartMicrovmError> {
666    let virtio_mem = Arc::new(Mutex::new(
667        VirtioMem::new(
668            Arc::clone(vm),
669            addr,
670            config.total_size_mib,
671            config.block_size_mib,
672            config.slot_size_mib,
673        )
674        .map_err(|e| StartMicrovmError::Internal(VmmError::VirtioMem(e)))?,
675    ));
676
677    let id = virtio_mem.lock().expect("Poisoned lock").id().to_string();
678    event_manager.add_subscriber(virtio_mem.clone());
679    device_manager.attach_virtio_device(vm, id, virtio_mem.clone(), cmdline, false)?;
680    Ok(())
681}
682
683pub fn attach_block_devices<'a, I: Iterator<Item = &'a Arc<Mutex<Block>>> + Debug>(
684    device_manager: &mut DeviceManager,
685    vm: &Arc<Vm>,
686    cmdline: &mut LoaderKernelCmdline,
687    blocks: I,
688    event_manager: &mut EventManager,
689) -> Result<(), StartMicrovmError> {
690    for block in blocks {
691        let (id, is_vhost_user) = {
692            let locked = block.lock().expect("Poisoned lock");
693            if locked.root_device() {
694                match locked.partuuid() {
695                    Some(partuuid) => cmdline.insert_str(format!("root=PARTUUID={}", partuuid))?,
696                    None => cmdline.insert_str("root=/dev/vda")?,
697                }
698                match locked.read_only() {
699                    true => cmdline.insert_str("ro")?,
700                    false => cmdline.insert_str("rw")?,
701                }
702            }
703            (locked.id().to_string(), locked.is_vhost_user())
704        };
705        // The device mutex mustn't be locked here otherwise it will deadlock.
706        event_manager.add_subscriber(block.clone());
707        device_manager.attach_virtio_device(vm, id, block.clone(), cmdline, is_vhost_user)?;
708    }
709    Ok(())
710}
711
712pub fn attach_net_devices<'a, I: Iterator<Item = &'a Arc<Mutex<Net>>> + Debug>(
713    device_manager: &mut DeviceManager,
714    vm: &Arc<Vm>,
715    cmdline: &mut LoaderKernelCmdline,
716    net_devices: I,
717    event_manager: &mut EventManager,
718) -> Result<(), StartMicrovmError> {
719    for net_device in net_devices {
720        let id = net_device.lock().expect("Poisoned lock").id().clone();
721        event_manager.add_subscriber(net_device.clone());
722        // The device mutex mustn't be locked here otherwise it will deadlock.
723        device_manager.attach_virtio_device(vm, id, net_device.clone(), cmdline, false)?;
724    }
725    Ok(())
726}
727
728fn attach_pmem_devices<'a, I: Iterator<Item = &'a Arc<Mutex<Pmem>>> + Debug>(
729    device_manager: &mut DeviceManager,
730    vm: &Arc<Vm>,
731    cmdline: &mut LoaderKernelCmdline,
732    pmem_devices: I,
733    event_manager: &mut EventManager,
734) -> Result<(), StartMicrovmError> {
735    for (i, device) in pmem_devices.enumerate() {
736        let id = {
737            let mut locked_dev = device.lock().expect("Poisoned lock");
738            if locked_dev.config.root_device {
739                cmdline.insert_str(format!("root=/dev/pmem{i}"))?;
740                match locked_dev.config.read_only {
741                    true => cmdline.insert_str("ro")?,
742                    false => cmdline.insert_str("rw")?,
743                }
744            }
745            locked_dev.alloc_region(vm.as_ref());
746            locked_dev.set_mem_region(vm.as_ref())?;
747            locked_dev.config.id.to_string()
748        };
749
750        event_manager.add_subscriber(device.clone());
751        device_manager.attach_virtio_device(vm, id, device.clone(), cmdline, false)?;
752    }
753    Ok(())
754}
755
756fn attach_unixsock_vsock_device(
757    device_manager: &mut DeviceManager,
758    vm: &Arc<Vm>,
759    cmdline: &mut LoaderKernelCmdline,
760    unix_vsock: &Arc<Mutex<Vsock<VsockUnixBackend>>>,
761    event_manager: &mut EventManager,
762) -> Result<(), AttachDeviceError> {
763    let id = String::from(unix_vsock.lock().expect("Poisoned lock").id());
764    event_manager.add_subscriber(unix_vsock.clone());
765    // The device mutex mustn't be locked here otherwise it will deadlock.
766    device_manager.attach_virtio_device(vm, id, unix_vsock.clone(), cmdline, false)
767}
768
769fn attach_balloon_device(
770    device_manager: &mut DeviceManager,
771    vm: &Arc<Vm>,
772    cmdline: &mut LoaderKernelCmdline,
773    balloon: &Arc<Mutex<Balloon>>,
774    event_manager: &mut EventManager,
775) -> Result<(), AttachDeviceError> {
776    let id = String::from(balloon.lock().expect("Poisoned lock").id());
777    event_manager.add_subscriber(balloon.clone());
778    // The device mutex mustn't be locked here otherwise it will deadlock.
779    device_manager.attach_virtio_device(vm, id, balloon.clone(), cmdline, false)
780}
781
782#[cfg(test)]
783pub(crate) mod tests {
784
785    use linux_loader::cmdline::Cmdline;
786    use vmm_sys_util::tempfile::TempFile;
787
788    use super::*;
789    use crate::device_manager::tests::default_device_manager;
790    use crate::devices::virtio::block::CacheType;
791    use crate::devices::virtio::generated::virtio_ids;
792    use crate::devices::virtio::rng::device::ENTROPY_DEV_ID;
793    use crate::devices::virtio::vsock::VSOCK_DEV_ID;
794    use crate::mmds::data_store::{Mmds, MmdsVersion};
795    use crate::mmds::ns::MmdsNetworkStack;
796    use crate::utils::mib_to_bytes;
797    use crate::vmm_config::balloon::{BALLOON_DEV_ID, BalloonBuilder, BalloonDeviceConfig};
798    use crate::vmm_config::boot_source::DEFAULT_KERNEL_CMDLINE;
799    use crate::vmm_config::drive::{BlockBuilder, BlockDeviceConfig};
800    use crate::vmm_config::entropy::{EntropyDeviceBuilder, EntropyDeviceConfig};
801    use crate::vmm_config::net::{NetBuilder, NetworkInterfaceConfig};
802    use crate::vmm_config::pmem::{PmemBuilder, PmemConfig};
803    use crate::vmm_config::vsock::tests::default_config;
804    use crate::vmm_config::vsock::{VsockBuilder, VsockDeviceConfig};
805    use crate::vstate::vm::tests::setup_vm_with_memory;
806
807    #[derive(Debug)]
808    pub(crate) struct CustomBlockConfig {
809        drive_id: String,
810        is_root_device: bool,
811        partuuid: Option<String>,
812        is_read_only: bool,
813        cache_type: CacheType,
814    }
815
816    impl CustomBlockConfig {
817        pub(crate) fn new(
818            drive_id: String,
819            is_root_device: bool,
820            partuuid: Option<String>,
821            is_read_only: bool,
822            cache_type: CacheType,
823        ) -> Self {
824            CustomBlockConfig {
825                drive_id,
826                is_root_device,
827                partuuid,
828                is_read_only,
829                cache_type,
830            }
831        }
832    }
833
834    fn cmdline_contains(cmdline: &Cmdline, slug: &str) -> bool {
835        // The following unwraps can never fail; the only way any of these methods
836        // would return an `Err` is if one of the following conditions is met:
837        //    1. The command line is empty: We just added things to it, and if insertion of an
838        //       argument goes wrong, then `Cmdline::insert` would have already returned `Err`.
839        //    2. There's a spurious null character somewhere in the command line: The
840        //       `Cmdline::insert` methods verify that this is not the case.
841        //    3. The `CString` is not valid UTF8: It just got created from a `String`, which was
842        //       valid UTF8.
843
844        cmdline
845            .as_cstring()
846            .unwrap()
847            .into_string()
848            .unwrap()
849            .contains(slug)
850    }
851
852    pub(crate) fn default_kernel_cmdline() -> Cmdline {
853        linux_loader::cmdline::Cmdline::try_from(
854            DEFAULT_KERNEL_CMDLINE,
855            crate::arch::CMDLINE_MAX_SIZE,
856        )
857        .unwrap()
858    }
859
860    pub(crate) fn default_vmm() -> Vmm {
861        let (kvm, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
862
863        let (_, vcpus_exit_evt) = vm.create_vcpus(1).unwrap();
864
865        Vmm {
866            instance_info: InstanceInfo::default(),
867            shutdown_exit_code: None,
868            kvm,
869            vm: Arc::new(vm),
870            uffd: None,
871            vcpus_handles: Vec::new(),
872            vcpus_exit_evt,
873            device_manager: default_device_manager(),
874        }
875    }
876
877    pub(crate) fn insert_block_devices(
878        vmm: &mut Vmm,
879        cmdline: &mut Cmdline,
880        event_manager: &mut EventManager,
881        custom_block_cfgs: Vec<CustomBlockConfig>,
882    ) -> Vec<TempFile> {
883        let mut block_dev_configs = BlockBuilder::new();
884        let mut block_files = Vec::new();
885        for custom_block_cfg in custom_block_cfgs {
886            block_files.push(TempFile::new().unwrap());
887
888            let block_device_config = BlockDeviceConfig {
889                drive_id: String::from(&custom_block_cfg.drive_id),
890                partuuid: custom_block_cfg.partuuid,
891                is_root_device: custom_block_cfg.is_root_device,
892                cache_type: custom_block_cfg.cache_type,
893
894                is_read_only: Some(custom_block_cfg.is_read_only),
895                path_on_host: Some(
896                    block_files
897                        .last()
898                        .unwrap()
899                        .as_path()
900                        .to_str()
901                        .unwrap()
902                        .to_string(),
903                ),
904                rate_limiter: None,
905                file_engine_type: None,
906
907                socket: None,
908            };
909
910            block_dev_configs
911                .insert(block_device_config, false)
912                .unwrap();
913        }
914
915        attach_block_devices(
916            &mut vmm.device_manager,
917            &vmm.vm,
918            cmdline,
919            block_dev_configs.devices.iter(),
920            event_manager,
921        )
922        .unwrap();
923        block_files
924    }
925
926    pub(crate) fn insert_net_device(
927        vmm: &mut Vmm,
928        cmdline: &mut Cmdline,
929        event_manager: &mut EventManager,
930        net_config: NetworkInterfaceConfig,
931    ) {
932        let mut net_builder = NetBuilder::new();
933        net_builder.build(net_config).unwrap();
934
935        let res = attach_net_devices(
936            &mut vmm.device_manager,
937            &vmm.vm,
938            cmdline,
939            net_builder.iter(),
940            event_manager,
941        );
942        res.unwrap();
943    }
944
945    pub(crate) fn insert_net_device_with_mmds(
946        vmm: &mut Vmm,
947        cmdline: &mut Cmdline,
948        event_manager: &mut EventManager,
949        net_config: NetworkInterfaceConfig,
950        mmds_version: MmdsVersion,
951    ) {
952        let mut net_builder = NetBuilder::new();
953        net_builder.build(net_config).unwrap();
954        let net = net_builder.iter().next().unwrap();
955        let mut mmds = Mmds::default();
956        mmds.set_version(mmds_version);
957        net.lock().unwrap().configure_mmds_network_stack(
958            MmdsNetworkStack::default_ipv4_addr(),
959            Arc::new(Mutex::new(mmds)),
960        );
961
962        attach_net_devices(
963            &mut vmm.device_manager,
964            &vmm.vm,
965            cmdline,
966            net_builder.iter(),
967            event_manager,
968        )
969        .unwrap();
970    }
971
972    pub(crate) fn insert_vsock_device(
973        vmm: &mut Vmm,
974        cmdline: &mut Cmdline,
975        event_manager: &mut EventManager,
976        vsock_config: VsockDeviceConfig,
977    ) {
978        let vsock_dev_id = VSOCK_DEV_ID.to_owned();
979        let vsock = VsockBuilder::create_unixsock_vsock(vsock_config).unwrap();
980        let vsock = Arc::new(Mutex::new(vsock));
981
982        attach_unixsock_vsock_device(
983            &mut vmm.device_manager,
984            &vmm.vm,
985            cmdline,
986            &vsock,
987            event_manager,
988        )
989        .unwrap();
990
991        assert!(
992            vmm.device_manager
993                .get_virtio_device(virtio_ids::VIRTIO_ID_VSOCK, &vsock_dev_id)
994                .is_some()
995        );
996    }
997
998    pub(crate) fn insert_entropy_device(
999        vmm: &mut Vmm,
1000        cmdline: &mut Cmdline,
1001        event_manager: &mut EventManager,
1002        entropy_config: EntropyDeviceConfig,
1003    ) {
1004        let mut builder = EntropyDeviceBuilder::new();
1005        let entropy = builder.build(entropy_config).unwrap();
1006
1007        attach_entropy_device(
1008            &mut vmm.device_manager,
1009            &vmm.vm,
1010            cmdline,
1011            &entropy,
1012            event_manager,
1013        )
1014        .unwrap();
1015
1016        assert!(
1017            vmm.device_manager
1018                .get_virtio_device(virtio_ids::VIRTIO_ID_RNG, ENTROPY_DEV_ID)
1019                .is_some()
1020        );
1021    }
1022
1023    pub(crate) fn insert_pmem_devices(
1024        vmm: &mut Vmm,
1025        cmdline: &mut Cmdline,
1026        event_manager: &mut EventManager,
1027        configs: Vec<PmemConfig>,
1028    ) -> Vec<TempFile> {
1029        let mut builder = PmemBuilder::default();
1030        let mut files = Vec::new();
1031        for mut config in configs {
1032            let tmp_file = TempFile::new().unwrap();
1033            tmp_file.as_file().set_len(0x20_0000).unwrap();
1034            let tmp_file_path = tmp_file.as_path().to_str().unwrap().to_string();
1035            files.push(tmp_file);
1036            config.path_on_host = tmp_file_path;
1037            builder.build(config, false).unwrap();
1038        }
1039
1040        attach_pmem_devices(
1041            &mut vmm.device_manager,
1042            &vmm.vm,
1043            cmdline,
1044            builder.devices.iter(),
1045            event_manager,
1046        )
1047        .unwrap();
1048        files
1049    }
1050
1051    #[cfg(target_arch = "x86_64")]
1052    pub(crate) fn insert_vmgenid_device(vmm: &mut Vmm) {
1053        vmm.device_manager.attach_vmgenid_device(&vmm.vm).unwrap();
1054    }
1055
1056    #[cfg(target_arch = "x86_64")]
1057    pub(crate) fn insert_vmclock_device(vmm: &mut Vmm) {
1058        vmm.device_manager.attach_vmclock_device(&vmm.vm).unwrap();
1059    }
1060
1061    pub(crate) fn insert_balloon_device(
1062        vmm: &mut Vmm,
1063        cmdline: &mut Cmdline,
1064        event_manager: &mut EventManager,
1065        balloon_config: BalloonDeviceConfig,
1066    ) {
1067        let mut builder = BalloonBuilder::new();
1068        builder.set(balloon_config).unwrap();
1069        let balloon = builder.get().unwrap();
1070
1071        attach_balloon_device(
1072            &mut vmm.device_manager,
1073            &vmm.vm,
1074            cmdline,
1075            balloon,
1076            event_manager,
1077        )
1078        .unwrap();
1079
1080        assert!(
1081            vmm.device_manager
1082                .get_virtio_device(virtio_ids::VIRTIO_ID_BALLOON, BALLOON_DEV_ID)
1083                .is_some()
1084        );
1085    }
1086
1087    #[test]
1088    fn test_attach_net_devices() {
1089        let mut event_manager = EventManager::new().expect("Unable to create EventManager");
1090        let mut vmm = default_vmm();
1091
1092        let network_interface = NetworkInterfaceConfig {
1093            iface_id: String::from("netif"),
1094            host_dev_name: String::from("hostname"),
1095            guest_mac: None,
1096            rx_rate_limiter: None,
1097            tx_rate_limiter: None,
1098        };
1099
1100        let mut cmdline = default_kernel_cmdline();
1101        insert_net_device(
1102            &mut vmm,
1103            &mut cmdline,
1104            &mut event_manager,
1105            network_interface.clone(),
1106        );
1107
1108        // We can not attach it once more.
1109        let mut net_builder = NetBuilder::new();
1110        net_builder.build(network_interface).unwrap_err();
1111    }
1112
1113    #[test]
1114    fn test_attach_block_devices() {
1115        let mut event_manager = EventManager::new().expect("Unable to create EventManager");
1116
1117        // Use case 1: root block device is not specified through PARTUUID.
1118        {
1119            let drive_id = String::from("root");
1120            let block_configs = vec![CustomBlockConfig::new(
1121                drive_id.clone(),
1122                true,
1123                None,
1124                true,
1125                CacheType::Unsafe,
1126            )];
1127            let mut vmm = default_vmm();
1128            let mut cmdline = default_kernel_cmdline();
1129            insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
1130            assert!(cmdline_contains(&cmdline, "root=/dev/vda ro"));
1131            assert!(
1132                vmm.device_manager
1133                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, drive_id.as_str())
1134                    .is_some()
1135            );
1136        }
1137
1138        // Use case 2: root block device is specified through PARTUUID.
1139        {
1140            let drive_id = String::from("root");
1141            let block_configs = vec![CustomBlockConfig::new(
1142                drive_id.clone(),
1143                true,
1144                Some("0eaa91a0-01".to_string()),
1145                false,
1146                CacheType::Unsafe,
1147            )];
1148            let mut vmm = default_vmm();
1149            let mut cmdline = default_kernel_cmdline();
1150            insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
1151            assert!(cmdline_contains(&cmdline, "root=PARTUUID=0eaa91a0-01 rw"));
1152            assert!(
1153                vmm.device_manager
1154                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, drive_id.as_str())
1155                    .is_some()
1156            );
1157        }
1158
1159        // Use case 3: root block device is not added at all.
1160        {
1161            let drive_id = String::from("non_root");
1162            let block_configs = vec![CustomBlockConfig::new(
1163                drive_id.clone(),
1164                false,
1165                Some("0eaa91a0-01".to_string()),
1166                false,
1167                CacheType::Unsafe,
1168            )];
1169            let mut vmm = default_vmm();
1170            let mut cmdline = default_kernel_cmdline();
1171            insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
1172            assert!(!cmdline_contains(&cmdline, "root=PARTUUID="));
1173            assert!(!cmdline_contains(&cmdline, "root=/dev/vda"));
1174            assert!(
1175                vmm.device_manager
1176                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, drive_id.as_str())
1177                    .is_some()
1178            );
1179        }
1180
1181        // Use case 4: rw root block device and other rw and ro drives.
1182        {
1183            let block_configs = vec![
1184                CustomBlockConfig::new(
1185                    String::from("root"),
1186                    true,
1187                    Some("0eaa91a0-01".to_string()),
1188                    false,
1189                    CacheType::Unsafe,
1190                ),
1191                CustomBlockConfig::new(
1192                    String::from("secondary"),
1193                    false,
1194                    None,
1195                    true,
1196                    CacheType::Unsafe,
1197                ),
1198                CustomBlockConfig::new(
1199                    String::from("third"),
1200                    false,
1201                    None,
1202                    false,
1203                    CacheType::Unsafe,
1204                ),
1205            ];
1206            let mut vmm = default_vmm();
1207            let mut cmdline = default_kernel_cmdline();
1208            insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
1209
1210            assert!(cmdline_contains(&cmdline, "root=PARTUUID=0eaa91a0-01 rw"));
1211            assert!(
1212                vmm.device_manager
1213                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, "root")
1214                    .is_some()
1215            );
1216            assert!(
1217                vmm.device_manager
1218                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, "secondary")
1219                    .is_some()
1220            );
1221            assert!(
1222                vmm.device_manager
1223                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, "third")
1224                    .is_some()
1225            );
1226
1227            // Check if these three block devices are inserted in kernel_cmdline.
1228            #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1229            assert!(cmdline_contains(
1230                &cmdline,
1231                "virtio_mmio.device=4K@0xc0001000:5 virtio_mmio.device=4K@0xc0002000:6 \
1232                 virtio_mmio.device=4K@0xc0003000:7"
1233            ));
1234        }
1235
1236        // Use case 5: root block device is rw.
1237        {
1238            let drive_id = String::from("root");
1239            let block_configs = vec![CustomBlockConfig::new(
1240                drive_id.clone(),
1241                true,
1242                None,
1243                false,
1244                CacheType::Unsafe,
1245            )];
1246            let mut vmm = default_vmm();
1247            let mut cmdline = default_kernel_cmdline();
1248            insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
1249            assert!(cmdline_contains(&cmdline, "root=/dev/vda rw"));
1250            assert!(
1251                vmm.device_manager
1252                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, drive_id.as_str())
1253                    .is_some()
1254            );
1255        }
1256
1257        // Use case 6: root block device is ro, with PARTUUID.
1258        {
1259            let drive_id = String::from("root");
1260            let block_configs = vec![CustomBlockConfig::new(
1261                drive_id.clone(),
1262                true,
1263                Some("0eaa91a0-01".to_string()),
1264                true,
1265                CacheType::Unsafe,
1266            )];
1267            let mut vmm = default_vmm();
1268            let mut cmdline = default_kernel_cmdline();
1269            insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
1270            assert!(cmdline_contains(&cmdline, "root=PARTUUID=0eaa91a0-01 ro"));
1271            assert!(
1272                vmm.device_manager
1273                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, drive_id.as_str())
1274                    .is_some()
1275            );
1276        }
1277
1278        // Use case 7: root block device is rw with flush enabled
1279        {
1280            let drive_id = String::from("root");
1281            let block_configs = vec![CustomBlockConfig::new(
1282                drive_id.clone(),
1283                true,
1284                None,
1285                false,
1286                CacheType::Writeback,
1287            )];
1288            let mut vmm = default_vmm();
1289            let mut cmdline = default_kernel_cmdline();
1290            insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
1291            assert!(cmdline_contains(&cmdline, "root=/dev/vda rw"));
1292            assert!(
1293                vmm.device_manager
1294                    .get_virtio_device(virtio_ids::VIRTIO_ID_BLOCK, drive_id.as_str())
1295                    .is_some()
1296            );
1297        }
1298    }
1299
1300    #[test]
1301    fn test_attach_pmem_devices() {
1302        let mut event_manager = EventManager::new().expect("Unable to create EventManager");
1303
1304        let id = String::from("root");
1305        let configs = vec![PmemConfig {
1306            id: id.clone(),
1307            path_on_host: "".into(),
1308            root_device: true,
1309            read_only: true,
1310        }];
1311        let mut vmm = default_vmm();
1312        let mut cmdline = default_kernel_cmdline();
1313        _ = insert_pmem_devices(&mut vmm, &mut cmdline, &mut event_manager, configs);
1314        assert!(cmdline_contains(&cmdline, "root=/dev/pmem0 ro"));
1315        assert!(
1316            vmm.device_manager
1317                .get_virtio_device(virtio_ids::VIRTIO_ID_PMEM, id.as_str())
1318                .is_some()
1319        );
1320    }
1321
1322    #[test]
1323    fn test_attach_boot_timer_device() {
1324        let mut vmm = default_vmm();
1325        let request_ts = TimestampUs::default();
1326
1327        let res = vmm
1328            .device_manager
1329            .attach_boot_timer_device(&vmm.vm, request_ts);
1330        res.unwrap();
1331        assert!(vmm.device_manager.mmio_devices.boot_timer.is_some());
1332    }
1333
1334    #[test]
1335    fn test_attach_balloon_device() {
1336        let mut event_manager = EventManager::new().expect("Unable to create EventManager");
1337        let mut vmm = default_vmm();
1338
1339        let balloon_config = BalloonDeviceConfig {
1340            amount_mib: 0,
1341            deflate_on_oom: false,
1342            stats_polling_interval_s: 0,
1343            free_page_hinting: false,
1344            free_page_reporting: false,
1345        };
1346
1347        let mut cmdline = default_kernel_cmdline();
1348        insert_balloon_device(&mut vmm, &mut cmdline, &mut event_manager, balloon_config);
1349        // Check if the vsock device is described in kernel_cmdline.
1350        #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1351        assert!(cmdline_contains(
1352            &cmdline,
1353            "virtio_mmio.device=4K@0xc0001000:5"
1354        ));
1355    }
1356
1357    #[test]
1358    fn test_attach_entropy_device() {
1359        let mut event_manager = EventManager::new().expect("Unable to create EventManager");
1360        let mut vmm = default_vmm();
1361
1362        let entropy_config = EntropyDeviceConfig::default();
1363
1364        let mut cmdline = default_kernel_cmdline();
1365        insert_entropy_device(&mut vmm, &mut cmdline, &mut event_manager, entropy_config);
1366        // Check if the vsock device is described in kernel_cmdline.
1367        #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1368        assert!(cmdline_contains(
1369            &cmdline,
1370            "virtio_mmio.device=4K@0xc0001000:5"
1371        ));
1372    }
1373
1374    #[test]
1375    fn test_attach_vsock_device() {
1376        let mut event_manager = EventManager::new().expect("Unable to create EventManager");
1377        let mut vmm = default_vmm();
1378
1379        let mut tmp_sock_file = TempFile::new().unwrap();
1380        tmp_sock_file.remove().unwrap();
1381        let vsock_config = default_config(&tmp_sock_file);
1382
1383        let mut cmdline = default_kernel_cmdline();
1384        insert_vsock_device(&mut vmm, &mut cmdline, &mut event_manager, vsock_config);
1385        // Check if the vsock device is described in kernel_cmdline.
1386        #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1387        assert!(cmdline_contains(
1388            &cmdline,
1389            "virtio_mmio.device=4K@0xc0001000:5"
1390        ));
1391    }
1392
1393    pub(crate) fn insert_virtio_mem_device(
1394        vmm: &mut Vmm,
1395        cmdline: &mut Cmdline,
1396        event_manager: &mut EventManager,
1397        config: MemoryHotplugConfig,
1398    ) {
1399        attach_virtio_mem_device(
1400            &mut vmm.device_manager,
1401            &vmm.vm,
1402            cmdline,
1403            &config,
1404            event_manager,
1405            GuestAddress(512 << 30),
1406        )
1407        .unwrap();
1408    }
1409
1410    #[test]
1411    fn test_attach_virtio_mem_device() {
1412        let mut event_manager = EventManager::new().expect("Unable to create EventManager");
1413        let mut vmm = default_vmm();
1414
1415        let config = MemoryHotplugConfig {
1416            total_size_mib: 1024,
1417            block_size_mib: 2,
1418            slot_size_mib: 128,
1419        };
1420
1421        let mut cmdline = default_kernel_cmdline();
1422        insert_virtio_mem_device(&mut vmm, &mut cmdline, &mut event_manager, config);
1423
1424        // Check if the vsock device is described in kernel_cmdline.
1425        #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1426        assert!(cmdline_contains(
1427            &cmdline,
1428            "virtio_mmio.device=4K@0xc0001000:5"
1429        ));
1430    }
1431}