mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-10 13:56:48 +00:00
Use macro to handle TDX-related code
This commit is contained in:
parent
dd3aa8fe81
commit
49e6cd2712
@ -14,12 +14,11 @@ cfg_if! {
|
||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
||||
mod tdxguest;
|
||||
|
||||
use tdx_guest::tdx_is_enabled;
|
||||
|
||||
pub use tdxguest::TdxGuest;
|
||||
}
|
||||
}
|
||||
|
||||
use ostd::if_tdx_enabled;
|
||||
pub use pty::{new_pty_pair, PtyMaster, PtySlave};
|
||||
pub use random::Random;
|
||||
pub use urandom::Urandom;
|
||||
@ -41,15 +40,10 @@ pub fn init() -> Result<()> {
|
||||
add_node(console, "console")?;
|
||||
let tty = Arc::new(tty::TtyDevice);
|
||||
add_node(tty, "tty")?;
|
||||
cfg_if! {
|
||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
||||
let tdx_guest = Arc::new(tdxguest::TdxGuest);
|
||||
|
||||
if tdx_is_enabled() {
|
||||
add_node(tdx_guest, "tdx_guest")?;
|
||||
}
|
||||
}
|
||||
}
|
||||
if_tdx_enabled!({
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
add_node(Arc::new(tdxguest::TdxGuest), "tdx_guest")?;
|
||||
});
|
||||
let random = Arc::new(random::Random);
|
||||
add_node(random, "random")?;
|
||||
let urandom = Arc::new(urandom::Urandom);
|
||||
|
@ -8,10 +8,12 @@ use core::ffi::CStr;
|
||||
use linux_boot_params::{BootParams, E820Type, LINUX_BOOT_HEADER_MAGIC};
|
||||
|
||||
use crate::{
|
||||
arch::init_cvm_guest,
|
||||
boot::{
|
||||
memory_region::{MemoryRegion, MemoryRegionArray, MemoryRegionType},
|
||||
BootloaderAcpiArg, BootloaderFramebufferArg,
|
||||
},
|
||||
if_tdx_enabled,
|
||||
mm::{
|
||||
kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
|
||||
Paddr,
|
||||
@ -114,6 +116,18 @@ fn parse_memory_regions(boot_params: &BootParams) -> MemoryRegionArray {
|
||||
// Add regions from E820.
|
||||
let num_entries = boot_params.e820_entries as usize;
|
||||
for e820_entry in &boot_params.e820_table[0..num_entries] {
|
||||
if_tdx_enabled!({
|
||||
if (e820_entry.addr..(e820_entry.addr + e820_entry.size)).contains(&0x800000) {
|
||||
regions
|
||||
.push(MemoryRegion::new(
|
||||
e820_entry.addr as usize,
|
||||
e820_entry.size as usize,
|
||||
MemoryRegionType::Reclaimable,
|
||||
))
|
||||
.unwrap();
|
||||
continue;
|
||||
}
|
||||
});
|
||||
regions
|
||||
.push(MemoryRegion::new(
|
||||
e820_entry.addr as usize,
|
||||
@ -165,6 +179,9 @@ unsafe extern "sysv64" fn __linux_boot(params_ptr: *const BootParams) -> ! {
|
||||
|
||||
use crate::boot::{call_ostd_main, EarlyBootInfo, EARLY_INFO};
|
||||
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
init_cvm_guest();
|
||||
|
||||
EARLY_INFO.call_once(|| EarlyBootInfo {
|
||||
bootloader_name: parse_bootloader_name(params),
|
||||
kernel_cmdline: parse_kernel_commandline(params),
|
||||
|
@ -27,8 +27,6 @@
|
||||
//! This sequence does not need to be strictly followed, and there may be
|
||||
//! different considerations in different systems.
|
||||
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
use crate::{
|
||||
arch::x86::kernel::{
|
||||
acpi::get_acpi_tables,
|
||||
@ -37,17 +35,10 @@ use crate::{
|
||||
Level, TriggerMode,
|
||||
},
|
||||
},
|
||||
if_tdx_enabled,
|
||||
mm::{paddr_to_vaddr, PAGE_SIZE},
|
||||
};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "cvm_guest")] {
|
||||
use tdx_guest::tdx_is_enabled;
|
||||
use crate::arch::x86::kernel::acpi::AcpiMemoryHandler;
|
||||
use acpi::platform::wakeup_aps;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of processors
|
||||
///
|
||||
/// This function needs to be called after the OS initializes the ACPI table.
|
||||
@ -73,26 +64,24 @@ pub(crate) fn bringup_all_aps(num_cpus: u32) {
|
||||
copy_ap_boot_code();
|
||||
fill_boot_stack_array_ptr();
|
||||
fill_boot_pt_ptr();
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "cvm_guest")] {
|
||||
if tdx_is_enabled() {
|
||||
for ap_num in 1..num_cpus {
|
||||
wakeup_aps(
|
||||
&ACPI_TABLES.get().unwrap().lock(),
|
||||
AcpiMemoryHandler {},
|
||||
ap_num,
|
||||
AP_BOOT_START_PA as u64,
|
||||
1000,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
} else {
|
||||
send_boot_ipis();
|
||||
}
|
||||
} else {
|
||||
send_boot_ipis();
|
||||
if_tdx_enabled!({
|
||||
use crate::arch::x86::kernel::acpi::AcpiMemoryHandler;
|
||||
use acpi::platform::wakeup_aps;
|
||||
|
||||
let acpi_tables = get_acpi_tables().unwrap();
|
||||
for ap_num in 1..num_cpus {
|
||||
wakeup_aps(
|
||||
&acpi_tables,
|
||||
AcpiMemoryHandler {},
|
||||
ap_num,
|
||||
AP_BOOT_START_PA as u64,
|
||||
1000,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
send_boot_ipis();
|
||||
});
|
||||
}
|
||||
|
||||
/// This is where the linker load the symbols in the `.ap_boot` section.
|
||||
|
@ -16,6 +16,7 @@ use volatile::{
|
||||
|
||||
use crate::{
|
||||
arch::{iommu::has_interrupt_remapping, x86::kernel::acpi::get_platform_info},
|
||||
if_tdx_enabled,
|
||||
mm::paddr_to_vaddr,
|
||||
sync::SpinLock,
|
||||
trap::IrqLine,
|
||||
@ -24,7 +25,6 @@ use crate::{
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "cvm_guest")] {
|
||||
use ::tdx_guest::tdx_is_enabled;
|
||||
use crate::arch::tdx_guest;
|
||||
}
|
||||
}
|
||||
@ -184,16 +184,15 @@ pub fn init() {
|
||||
// FIXME: Is it possible to have an address that is not the default 0xFEC0_0000?
|
||||
// Need to find a way to determine if it is a valid address or not.
|
||||
const IO_APIC_DEFAULT_ADDRESS: usize = 0xFEC0_0000;
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the `IO_APIC_DEFAULT_ADDRESS` is a valid MMIO address before this operation.
|
||||
// The `IO_APIC_DEFAULT_ADDRESS` is a well-known address used for IO APICs in x86 systems.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the `IO_APIC_DEFAULT_ADDRESS` is a valid MMIO address before this operation.
|
||||
// The `IO_APIC_DEFAULT_ADDRESS` is a well-known address used for IO APICs in x86 systems.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
unsafe {
|
||||
tdx_guest::unprotect_gpa_range(IO_APIC_DEFAULT_ADDRESS, 1).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
let mut io_apic = unsafe { IoApicAccess::new(IO_APIC_DEFAULT_ADDRESS) };
|
||||
io_apic.set_id(0);
|
||||
let id = io_apic.id();
|
||||
@ -217,15 +216,14 @@ pub fn init() {
|
||||
let mut vec = Vec::new();
|
||||
for id in 0..apic.io_apics.len() {
|
||||
let io_apic = apic.io_apics.get(id).unwrap();
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the `io_apic.address` is a valid MMIO address before this operation.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the `io_apic.address` is a valid MMIO address before this operation.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
unsafe {
|
||||
tdx_guest::unprotect_gpa_range(io_apic.address as usize, 1).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
let interrupt_base = io_apic.global_system_interrupt_base;
|
||||
let mut io_apic = unsafe { IoApicAccess::new(io_apic.address as usize) };
|
||||
io_apic.set_id(id as u8);
|
||||
|
@ -21,13 +21,15 @@ use cfg_if::cfg_if;
|
||||
use spin::Once;
|
||||
use x86::cpuid::{CpuId, FeatureInfo};
|
||||
|
||||
use crate::if_tdx_enabled;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "cvm_guest")] {
|
||||
pub(crate) mod tdx_guest;
|
||||
|
||||
use {
|
||||
crate::early_println,
|
||||
::tdx_guest::{init_tdx, tdcall::InitError, tdx_is_enabled},
|
||||
::tdx_guest::{init_tdx, tdcall::InitError},
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -94,21 +96,13 @@ pub(crate) unsafe fn late_init_on_bsp() {
|
||||
|
||||
timer::init();
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "cvm_guest")] {
|
||||
if !tdx_is_enabled() {
|
||||
match iommu::init() {
|
||||
Ok(_) => {}
|
||||
Err(err) => warn!("IOMMU initialization error:{:?}", err),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match iommu::init() {
|
||||
Ok(_) => {}
|
||||
Err(err) => warn!("IOMMU initialization error:{:?}", err),
|
||||
}
|
||||
if_tdx_enabled!({
|
||||
} else {
|
||||
match iommu::init() {
|
||||
Ok(_) => {}
|
||||
Err(err) => warn!("IOMMU initialization error:{:?}", err),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Some driver like serial may use PIC
|
||||
kernel::pic::init();
|
||||
@ -215,3 +209,39 @@ pub(crate) fn enable_cpu_features() {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a TDX-specific code block.
|
||||
///
|
||||
/// This macro conditionally executes a TDX-specific code block based on the following conditions:
|
||||
/// (1) The `cvm_guest` feature is enabled at compile time.
|
||||
/// (2) The TDX feature is detected at runtime via `::tdx_guest::tdx_is_enabled()`.
|
||||
///
|
||||
/// If both conditions are met, the `if_block` is executed. If an `else_block` is provided, it will be executed
|
||||
/// when either the `cvm_guest` feature is not enabled or the TDX feature is not detected at runtime.
|
||||
#[macro_export]
|
||||
macro_rules! if_tdx_enabled {
|
||||
// Match when there is an else block
|
||||
($if_block:block else $else_block:block) => {{
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
{
|
||||
if ::tdx_guest::tdx_is_enabled() {
|
||||
$if_block
|
||||
} else {
|
||||
$else_block
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "cvm_guest"))]
|
||||
{
|
||||
$else_block
|
||||
}
|
||||
}};
|
||||
// Match when there is no else block
|
||||
($if_block:block) => {{
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
{
|
||||
if ::tdx_guest::tdx_is_enabled() {
|
||||
$if_block
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ use super::ex_table::ExTable;
|
||||
use crate::{
|
||||
arch::irq::{disable_local, enable_local},
|
||||
cpu::{CpuException, CpuExceptionInfo, PageFaultErrorCode},
|
||||
cpu_local_cell,
|
||||
cpu_local_cell, if_tdx_enabled,
|
||||
mm::{
|
||||
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
|
||||
page_prop::{CachePolicy, PageProperty},
|
||||
@ -40,7 +40,7 @@ use crate::{
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "cvm_guest")] {
|
||||
use tdx_guest::{tdcall, tdx_is_enabled, handle_virtual_exception};
|
||||
use tdx_guest::{tdcall, handle_virtual_exception};
|
||||
use crate::arch::tdx_guest::TrapFrameWrapper;
|
||||
}
|
||||
}
|
||||
@ -354,17 +354,11 @@ fn handle_kernel_page_fault(f: &TrapFrame, page_fault_vaddr: u64) {
|
||||
let vaddr = (page_fault_vaddr as usize).align_down(PAGE_SIZE);
|
||||
let paddr = vaddr - LINEAR_MAPPING_BASE_VADDR;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "cvm_guest")] {
|
||||
let priv_flags = if tdx_is_enabled() {
|
||||
PrivFlags::SHARED | PrivFlags::GLOBAL
|
||||
} else {
|
||||
PrivFlags::GLOBAL
|
||||
};
|
||||
} else {
|
||||
let priv_flags = PrivFlags::GLOBAL;
|
||||
}
|
||||
}
|
||||
let priv_flags = if_tdx_enabled!({
|
||||
PrivFlags::SHARED | PrivFlags::GLOBAL
|
||||
} else {
|
||||
PrivFlags::GLOBAL
|
||||
});
|
||||
|
||||
// SAFETY:
|
||||
// 1. We have checked that the page fault address falls within the address range of the direct
|
||||
|
@ -7,8 +7,6 @@ use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use spin::Once;
|
||||
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
use crate::mm::frame::allocator;
|
||||
use crate::{
|
||||
arch::boot::smp::{bringup_all_aps, get_num_processors},
|
||||
cpu,
|
||||
@ -103,9 +101,6 @@ pub fn boot_all_aps() {
|
||||
bringup_all_aps(num_cpus);
|
||||
wait_for_all_aps_started();
|
||||
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
allocator::reclaim_tdx_ap_boot_memory();
|
||||
|
||||
log::info!("All application processors started. The BSP continues to run.");
|
||||
}
|
||||
|
||||
|
@ -15,12 +15,12 @@ use log::debug;
|
||||
|
||||
use self::bus::MmioBus;
|
||||
use crate::{
|
||||
bus::mmio::common_device::MmioCommonDevice, mm::paddr_to_vaddr, sync::SpinLock, trap::IrqLine,
|
||||
bus::mmio::common_device::MmioCommonDevice, if_tdx_enabled, mm::paddr_to_vaddr, sync::SpinLock,
|
||||
trap::IrqLine,
|
||||
};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
||||
use ::tdx_guest::tdx_is_enabled;
|
||||
use crate::arch::tdx_guest;
|
||||
}
|
||||
}
|
||||
@ -32,17 +32,17 @@ pub static MMIO_BUS: SpinLock<MmioBus> = SpinLock::new(MmioBus::new());
|
||||
static IRQS: SpinLock<Vec<IrqLine>> = SpinLock::new(Vec::new());
|
||||
|
||||
pub(crate) fn init() {
|
||||
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the address range 0xFEB0_0000 to 0xFEB0_4000 is valid before this operation.
|
||||
// The address range is page-aligned and falls within the MMIO range, which is a requirement for the `unprotect_gpa_range` function.
|
||||
// We are also ensuring that we are only unprotecting four pages.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the address range 0xFEB0_0000 to 0xFEB0_4000 is valid before this operation.
|
||||
// The address range is page-aligned and falls within the MMIO range, which is a requirement for the `unprotect_gpa_range` function.
|
||||
// We are also ensuring that we are only unprotecting four pages.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
||||
unsafe {
|
||||
tdx_guest::unprotect_gpa_range(0xFEB0_0000, 4).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
// FIXME: The address 0xFEB0_0000 is obtained from an instance of microvm, and it may not work in other architecture.
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
iter_range(0xFEB0_0000..0xFEB0_4000);
|
||||
|
@ -16,13 +16,13 @@ use crate::{
|
||||
common_device::PciCommonDevice,
|
||||
device_info::PciDeviceLocation,
|
||||
},
|
||||
if_tdx_enabled,
|
||||
mm::VmIoOnce,
|
||||
trap::IrqLine,
|
||||
};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
||||
use ::tdx_guest::tdx_is_enabled;
|
||||
use crate::arch::tdx_guest;
|
||||
}
|
||||
}
|
||||
@ -108,20 +108,20 @@ impl CapabilityMsixData {
|
||||
|
||||
// Set message address 0xFEE0_0000
|
||||
for i in 0..table_size {
|
||||
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address of the MSI-X table is valid before this operation.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
// The MSI-X table will not exceed one page size, because the size of an MSI-X entry is 16 bytes, and 256 entries are required to fill a page,
|
||||
// which is just equal to the number of all the interrupt numbers on the x86 platform.
|
||||
// It is better to add a judgment here in case the device deliberately uses so many interrupt numbers.
|
||||
// In addition, due to granularity, the minimum value that can be set here is only one page.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address of the MSI-X table is valid before this operation.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
// The MSI-X table will not exceed one page size, because the size of an MSI-X entry is 16 bytes, and 256 entries are required to fill a page,
|
||||
// which is just equal to the number of all the interrupt numbers on the x86 platform.
|
||||
// It is better to add a judgment here in case the device deliberately uses so many interrupt numbers.
|
||||
// In addition, due to granularity, the minimum value that can be set here is only one page.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
||||
unsafe {
|
||||
tdx_guest::unprotect_gpa_range(table_bar.io_mem().paddr(), 1).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
// Set message address and disable this msix entry
|
||||
table_bar
|
||||
.io_mem()
|
||||
|
@ -5,9 +5,9 @@
|
||||
use core::ops::{Deref, Range};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
use crate::{
|
||||
if_tdx_enabled,
|
||||
mm::{
|
||||
kspace::kvirt_area::{KVirtArea, Untracked},
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||
@ -47,17 +47,11 @@ impl IoMem {
|
||||
let last_page_end = range.end.align_up(PAGE_SIZE);
|
||||
let mut new_kvirt_area = KVirtArea::<Untracked>::new(last_page_end - first_page_start);
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(all(feature = "cvm_guest", target_arch = "x86_64"))] {
|
||||
let priv_flags = if tdx_guest::tdx_is_enabled() {
|
||||
PrivilegedPageFlags::SHARED
|
||||
} else {
|
||||
PrivilegedPageFlags::empty()
|
||||
};
|
||||
} else {
|
||||
let priv_flags = PrivilegedPageFlags::empty();
|
||||
}
|
||||
}
|
||||
let priv_flags = if_tdx_enabled!({
|
||||
PrivilegedPageFlags::SHARED
|
||||
} else {
|
||||
PrivilegedPageFlags::empty()
|
||||
});
|
||||
|
||||
let prop = PageProperty {
|
||||
flags,
|
||||
|
@ -79,9 +79,6 @@ unsafe fn init() {
|
||||
|
||||
arch::serial::init();
|
||||
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
arch::init_cvm_guest();
|
||||
|
||||
logger::init();
|
||||
|
||||
// SAFETY: They are only called once on BSP and ACPI has been initialized.
|
||||
|
@ -8,6 +8,7 @@ use cfg_if::cfg_if;
|
||||
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr};
|
||||
use crate::{
|
||||
arch::iommu,
|
||||
if_tdx_enabled,
|
||||
mm::{
|
||||
dma::{dma_type, Daddr, DmaType},
|
||||
io::VmIoOnce,
|
||||
@ -21,7 +22,6 @@ use crate::{
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
||||
use ::tdx_guest::tdx_is_enabled;
|
||||
use crate::arch::tdx_guest;
|
||||
}
|
||||
}
|
||||
@ -75,17 +75,17 @@ impl DmaCoherent {
|
||||
}
|
||||
let start_daddr = match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
|
||||
unsafe {
|
||||
tdx_guest::unprotect_gpa_range(start_paddr, frame_count).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
start_paddr as Daddr
|
||||
}
|
||||
DmaType::Iommu => {
|
||||
@ -135,17 +135,17 @@ impl Drop for DmaCoherentInner {
|
||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||
match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
|
||||
unsafe {
|
||||
tdx_guest::protect_gpa_range(start_paddr, frame_count).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
|
@ -9,6 +9,7 @@ use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr
|
||||
use crate::{
|
||||
arch::iommu,
|
||||
error::Error,
|
||||
if_tdx_enabled,
|
||||
mm::{
|
||||
dma::{dma_type, Daddr, DmaType},
|
||||
HasPaddr, Infallible, Paddr, USegment, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
||||
@ -17,7 +18,6 @@ use crate::{
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
||||
use ::tdx_guest::tdx_is_enabled;
|
||||
use crate::arch::tdx_guest;
|
||||
}
|
||||
}
|
||||
@ -72,17 +72,17 @@ impl DmaStream {
|
||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||
let start_daddr = match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
|
||||
unsafe {
|
||||
tdx_guest::unprotect_gpa_range(start_paddr, frame_count).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
start_paddr as Daddr
|
||||
}
|
||||
DmaType::Iommu => {
|
||||
@ -182,17 +182,17 @@ impl Drop for DmaStreamInner {
|
||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||
match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
|
||||
if tdx_is_enabled() {
|
||||
if_tdx_enabled!({
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
|
||||
unsafe {
|
||||
tdx_guest::protect_gpa_range(start_paddr, frame_count).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
|
@ -356,38 +356,3 @@ pub(crate) unsafe fn init_early_allocator() {
|
||||
let mut early_allocator = EARLY_ALLOCATOR.lock();
|
||||
*early_allocator = Some(EarlyFrameAllocator::new());
|
||||
}
|
||||
|
||||
#[cfg(feature = "cvm_guest")]
|
||||
pub(crate) fn reclaim_tdx_ap_boot_memory() {
|
||||
let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
|
||||
for region in regions.iter() {
|
||||
if region.typ() == MemoryRegionType::Usable {
|
||||
// Make the memory region page-aligned, and skip if it is too small.
|
||||
let start = region.base().align_up(PAGE_SIZE) / PAGE_SIZE;
|
||||
let region_end = region.base().checked_add(region.len()).unwrap();
|
||||
let end = region_end.align_down(PAGE_SIZE) / PAGE_SIZE;
|
||||
if end <= start {
|
||||
continue;
|
||||
}
|
||||
// 0x800000 is temporarily used for AP boot in Intel TDX environment.
|
||||
// We should include this frame into page allocator after AP initialization.
|
||||
if (start..end).contains(&(0x800000 / PAGE_SIZE)) {
|
||||
info!(
|
||||
"Found usable region, start:{:x}, end:{:x}",
|
||||
region.base(),
|
||||
region.base() + region.len()
|
||||
);
|
||||
FRAME_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.disable_irq()
|
||||
.lock()
|
||||
.allocator
|
||||
.add_frame(start, end);
|
||||
|
||||
FRAME_ALLOCATOR.get().unwrap().disable_irq().lock().total +=
|
||||
(end - start) * PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user