mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-09 13:26:48 +00:00
Make if_tdx_enabled macro x86-specific
This commit is contained in:
parent
dd67a9a175
commit
e4aa261c48
@ -67,7 +67,6 @@ riscv = { version = "0.11.1", features = ["s-mode"] }
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
all = ["cvm_guest"]
|
all = ["cvm_guest"]
|
||||||
|
|
||||||
cvm_guest = ["dep:tdx-guest", "ostd/cvm_guest"]
|
cvm_guest = ["dep:tdx-guest", "ostd/cvm_guest"]
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
|
|
||||||
mod null;
|
mod null;
|
||||||
mod pty;
|
mod pty;
|
||||||
mod random;
|
mod random;
|
||||||
@ -10,15 +8,9 @@ pub mod tty;
|
|||||||
mod urandom;
|
mod urandom;
|
||||||
mod zero;
|
mod zero;
|
||||||
|
|
||||||
cfg_if! {
|
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
|
||||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
|
||||||
mod tdxguest;
|
mod tdxguest;
|
||||||
|
|
||||||
pub use tdxguest::TdxGuest;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use ostd::if_tdx_enabled;
|
|
||||||
pub use pty::{new_pty_pair, PtyMaster, PtySlave};
|
pub use pty::{new_pty_pair, PtyMaster, PtySlave};
|
||||||
pub use random::Random;
|
pub use random::Random;
|
||||||
pub use urandom::Urandom;
|
pub use urandom::Urandom;
|
||||||
@ -40,8 +32,8 @@ pub fn init() -> Result<()> {
|
|||||||
add_node(console, "console")?;
|
add_node(console, "console")?;
|
||||||
let tty = Arc::new(tty::TtyDevice);
|
let tty = Arc::new(tty::TtyDevice);
|
||||||
add_node(tty, "tty")?;
|
add_node(tty, "tty")?;
|
||||||
if_tdx_enabled!({
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
ostd::if_tdx_enabled!({
|
||||||
add_node(Arc::new(tdxguest::TdxGuest), "tdx_guest")?;
|
add_node(Arc::new(tdxguest::TdxGuest), "tdx_guest")?;
|
||||||
});
|
});
|
||||||
let random = Arc::new(random::Random);
|
let random = Arc::new(random::Random);
|
||||||
|
@ -5,13 +5,14 @@
|
|||||||
|
|
||||||
use linux_boot_params::{BootParams, E820Type, LINUX_BOOT_HEADER_MAGIC};
|
use linux_boot_params::{BootParams, E820Type, LINUX_BOOT_HEADER_MAGIC};
|
||||||
|
|
||||||
|
#[cfg(feature = "cvm_guest")]
|
||||||
|
use crate::arch::x86::init_cvm_guest;
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::init_cvm_guest,
|
arch::x86::if_tdx_enabled,
|
||||||
boot::{
|
boot::{
|
||||||
memory_region::{MemoryRegion, MemoryRegionArray, MemoryRegionType},
|
memory_region::{MemoryRegion, MemoryRegionArray, MemoryRegionType},
|
||||||
BootloaderAcpiArg, BootloaderFramebufferArg,
|
BootloaderAcpiArg, BootloaderFramebufferArg,
|
||||||
},
|
},
|
||||||
if_tdx_enabled,
|
|
||||||
mm::kspace::paddr_to_vaddr,
|
mm::kspace::paddr_to_vaddr,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -30,18 +30,20 @@
|
|||||||
use acpi::madt::MadtEntry;
|
use acpi::madt::MadtEntry;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::x86::kernel::{
|
arch::{
|
||||||
|
if_tdx_enabled,
|
||||||
|
kernel::{
|
||||||
acpi::get_acpi_tables,
|
acpi::get_acpi_tables,
|
||||||
apic::{
|
apic::{
|
||||||
self, ApicId, DeliveryMode, DeliveryStatus, DestinationMode, DestinationShorthand, Icr,
|
self, ApicId, DeliveryMode, DeliveryStatus, DestinationMode, DestinationShorthand,
|
||||||
Level, TriggerMode,
|
Icr, Level, TriggerMode,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
boot::{
|
boot::{
|
||||||
memory_region::{MemoryRegion, MemoryRegionType},
|
memory_region::{MemoryRegion, MemoryRegionType},
|
||||||
smp::PerApRawInfo,
|
smp::PerApRawInfo,
|
||||||
},
|
},
|
||||||
if_tdx_enabled,
|
|
||||||
mm::{Paddr, PAGE_SIZE},
|
mm::{Paddr, PAGE_SIZE},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -15,8 +15,7 @@ use volatile::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::{iommu::has_interrupt_remapping, x86::kernel::acpi::get_platform_info},
|
arch::{if_tdx_enabled, iommu::has_interrupt_remapping, kernel::acpi::get_platform_info},
|
||||||
if_tdx_enabled,
|
|
||||||
io::IoMemAllocatorBuilder,
|
io::IoMemAllocatorBuilder,
|
||||||
mm::paddr_to_vaddr,
|
mm::paddr_to_vaddr,
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
|
@ -18,21 +18,13 @@ pub mod task;
|
|||||||
pub mod timer;
|
pub mod timer;
|
||||||
pub mod trap;
|
pub mod trap;
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
use io::construct_io_mem_allocator_builder;
|
use io::construct_io_mem_allocator_builder;
|
||||||
use spin::Once;
|
use spin::Once;
|
||||||
use x86::cpuid::{CpuId, FeatureInfo};
|
use x86::cpuid::{CpuId, FeatureInfo};
|
||||||
|
|
||||||
use crate::if_tdx_enabled;
|
#[cfg(feature = "cvm_guest")]
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(feature = "cvm_guest")] {
|
|
||||||
pub(crate) mod tdx_guest;
|
pub(crate) mod tdx_guest;
|
||||||
|
|
||||||
use ::tdx_guest::{init_tdx, tdcall::InitError};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use core::{
|
use core::{
|
||||||
arch::x86_64::{_rdrand64_step, _rdtsc},
|
arch::x86_64::{_rdrand64_step, _rdtsc},
|
||||||
sync::atomic::Ordering,
|
sync::atomic::Ordering,
|
||||||
@ -43,7 +35,7 @@ use log::{info, warn};
|
|||||||
|
|
||||||
#[cfg(feature = "cvm_guest")]
|
#[cfg(feature = "cvm_guest")]
|
||||||
pub(crate) fn init_cvm_guest() {
|
pub(crate) fn init_cvm_guest() {
|
||||||
match init_tdx() {
|
match ::tdx_guest::init_tdx() {
|
||||||
Ok(td_info) => {
|
Ok(td_info) => {
|
||||||
crate::early_println!(
|
crate::early_println!(
|
||||||
"[kernel] Intel TDX initialized\n[kernel] td gpaw: {}, td attributes: {:?}",
|
"[kernel] Intel TDX initialized\n[kernel] td gpaw: {}, td attributes: {:?}",
|
||||||
@ -51,7 +43,7 @@ pub(crate) fn init_cvm_guest() {
|
|||||||
td_info.attributes
|
td_info.attributes
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Err(InitError::TdxGetVpInfoError(td_call_error)) => {
|
Err(::tdx_guest::tdcall::InitError::TdxGetVpInfoError(td_call_error)) => {
|
||||||
panic!(
|
panic!(
|
||||||
"[kernel] Intel TDX not initialized, Failed to get TD info: {:?}",
|
"[kernel] Intel TDX not initialized, Failed to get TD info: {:?}",
|
||||||
td_call_error
|
td_call_error
|
||||||
@ -273,3 +265,5 @@ macro_rules! if_tdx_enabled {
|
|||||||
}
|
}
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use if_tdx_enabled;
|
||||||
|
@ -27,9 +27,12 @@ use spin::Once;
|
|||||||
|
|
||||||
use super::ex_table::ExTable;
|
use super::ex_table::ExTable;
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::irq::{disable_local, enable_local},
|
arch::{
|
||||||
|
if_tdx_enabled,
|
||||||
|
irq::{disable_local, enable_local},
|
||||||
|
},
|
||||||
cpu::context::{CpuException, CpuExceptionInfo, PageFaultErrorCode},
|
cpu::context::{CpuException, CpuExceptionInfo, PageFaultErrorCode},
|
||||||
cpu_local_cell, if_tdx_enabled,
|
cpu_local_cell,
|
||||||
mm::{
|
mm::{
|
||||||
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
|
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
|
||||||
page_prop::{CachePolicy, PageProperty},
|
page_prop::{CachePolicy, PageProperty},
|
||||||
|
@ -10,21 +10,13 @@ pub mod common_device;
|
|||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
|
|
||||||
use self::bus::MmioBus;
|
use self::bus::MmioBus;
|
||||||
use crate::{
|
use crate::{
|
||||||
bus::mmio::common_device::MmioCommonDevice, if_tdx_enabled, mm::paddr_to_vaddr, sync::SpinLock,
|
bus::mmio::common_device::MmioCommonDevice, mm::paddr_to_vaddr, sync::SpinLock, trap::IrqLine,
|
||||||
trap::IrqLine,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
|
||||||
use crate::arch::tdx_guest;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const VIRTIO_MMIO_MAGIC: u32 = 0x74726976;
|
const VIRTIO_MMIO_MAGIC: u32 = 0x74726976;
|
||||||
|
|
||||||
/// MMIO bus instance
|
/// MMIO bus instance
|
||||||
@ -32,21 +24,22 @@ pub static MMIO_BUS: SpinLock<MmioBus> = SpinLock::new(MmioBus::new());
|
|||||||
static IRQS: SpinLock<Vec<IrqLine>> = SpinLock::new(Vec::new());
|
static IRQS: SpinLock<Vec<IrqLine>> = SpinLock::new(Vec::new());
|
||||||
|
|
||||||
pub(crate) fn init() {
|
pub(crate) fn init() {
|
||||||
if_tdx_enabled!({
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
{
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
// SAFETY:
|
// SAFETY:
|
||||||
// This is safe because we are ensuring that the address range 0xFEB0_0000 to 0xFEB0_4000 is valid before this operation.
|
// This is safe because we are ensuring that the address range 0xFEB0_0000 to 0xFEB0_4000 is valid before this operation.
|
||||||
// The address range is page-aligned and falls within the MMIO range, which is a requirement for the `unprotect_gpa_range` function.
|
// The address range is page-aligned and falls within the MMIO range, which is a requirement for the `unprotect_gpa_range` function.
|
||||||
// We are also ensuring that we are only unprotecting four pages.
|
// We are also ensuring that we are only unprotecting four pages.
|
||||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
||||||
unsafe {
|
unsafe {
|
||||||
tdx_guest::unprotect_gpa_range(0xFEB0_0000, 4).unwrap();
|
crate::arch::tdx_guest::unprotect_gpa_range(0xFEB0_0000, 4).unwrap();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// FIXME: The address 0xFEB0_0000 is obtained from an instance of microvm, and it may not work in other architecture.
|
// FIXME: The address 0xFEB0_0000 is obtained from an instance of microvm, and it may not work in other architecture.
|
||||||
#[cfg(target_arch = "x86_64")]
|
|
||||||
iter_range(0xFEB0_0000..0xFEB0_4000);
|
iter_range(0xFEB0_0000..0xFEB0_4000);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
fn iter_range(range: Range<usize>) {
|
fn iter_range(range: Range<usize>) {
|
||||||
|
@ -7,8 +7,6 @@
|
|||||||
|
|
||||||
use alloc::{sync::Arc, vec::Vec};
|
use alloc::{sync::Arc, vec::Vec};
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::iommu::has_interrupt_remapping,
|
arch::iommu::has_interrupt_remapping,
|
||||||
bus::pci::{
|
bus::pci::{
|
||||||
@ -16,17 +14,10 @@ use crate::{
|
|||||||
common_device::PciCommonDevice,
|
common_device::PciCommonDevice,
|
||||||
device_info::PciDeviceLocation,
|
device_info::PciDeviceLocation,
|
||||||
},
|
},
|
||||||
if_tdx_enabled,
|
|
||||||
mm::VmIoOnce,
|
mm::VmIoOnce,
|
||||||
trap::IrqLine,
|
trap::IrqLine,
|
||||||
};
|
};
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
|
||||||
use crate::arch::tdx_guest;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// MSI-X capability. It will set the BAR space it uses to be hidden.
|
/// MSI-X capability. It will set the BAR space it uses to be hidden.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
@ -108,8 +99,8 @@ impl CapabilityMsixData {
|
|||||||
|
|
||||||
// Set message address 0xFEE0_0000
|
// Set message address 0xFEE0_0000
|
||||||
for i in 0..table_size {
|
for i in 0..table_size {
|
||||||
if_tdx_enabled!({
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
// SAFETY:
|
// SAFETY:
|
||||||
// This is safe because we are ensuring that the physical address of the MSI-X table is valid before this operation.
|
// This is safe because we are ensuring that the physical address of the MSI-X table is valid before this operation.
|
||||||
// We are also ensuring that we are only unprotecting a single page.
|
// We are also ensuring that we are only unprotecting a single page.
|
||||||
@ -119,7 +110,8 @@ impl CapabilityMsixData {
|
|||||||
// In addition, due to granularity, the minimum value that can be set here is only one page.
|
// In addition, due to granularity, the minimum value that can be set here is only one page.
|
||||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `unprotect_gpa_range` function.
|
||||||
unsafe {
|
unsafe {
|
||||||
tdx_guest::unprotect_gpa_range(table_bar.io_mem().paddr(), 1).unwrap();
|
crate::arch::tdx_guest::unprotect_gpa_range(table_bar.io_mem().paddr(), 1)
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// Set message address and disable this msix entry
|
// Set message address and disable this msix entry
|
||||||
|
@ -4,10 +4,7 @@
|
|||||||
|
|
||||||
use core::fmt::{self, Arguments, Write};
|
use core::fmt::{self, Arguments, Write};
|
||||||
|
|
||||||
use crate::{
|
use crate::sync::{LocalIrqDisabled, SpinLock};
|
||||||
if_tdx_enabled,
|
|
||||||
sync::{LocalIrqDisabled, SpinLock},
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Stdout;
|
struct Stdout;
|
||||||
|
|
||||||
@ -24,13 +21,16 @@ static STDOUT: SpinLock<Stdout, LocalIrqDisabled> = SpinLock::new(Stdout);
|
|||||||
|
|
||||||
/// Prints formatted arguments to the console.
|
/// Prints formatted arguments to the console.
|
||||||
pub fn early_print(args: Arguments) {
|
pub fn early_print(args: Arguments) {
|
||||||
if_tdx_enabled!({
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
// Hold the lock to prevent the logs from interleaving.
|
// Hold the lock to prevent the logs from interleaving.
|
||||||
let _guard = STDOUT.lock();
|
let _guard = STDOUT.lock();
|
||||||
tdx_guest::print(args);
|
tdx_guest::print(args);
|
||||||
} else {
|
} else {
|
||||||
STDOUT.lock().write_fmt(args).unwrap();
|
STDOUT.lock().write_fmt(args).unwrap();
|
||||||
});
|
});
|
||||||
|
#[cfg(not(target_arch = "x86_64"))]
|
||||||
|
crate::arch::serial::print(args);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prints to the console.
|
/// Prints to the console.
|
||||||
|
@ -11,7 +11,6 @@ use align_ext::AlignExt;
|
|||||||
pub(super) use self::allocator::init;
|
pub(super) use self::allocator::init;
|
||||||
pub(crate) use self::allocator::IoMemAllocatorBuilder;
|
pub(crate) use self::allocator::IoMemAllocatorBuilder;
|
||||||
use crate::{
|
use crate::{
|
||||||
if_tdx_enabled,
|
|
||||||
mm::{
|
mm::{
|
||||||
kspace::kvirt_area::{KVirtArea, Untracked},
|
kspace::kvirt_area::{KVirtArea, Untracked},
|
||||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||||
@ -87,11 +86,18 @@ impl IoMem {
|
|||||||
let first_page_start = range.start.align_down(PAGE_SIZE);
|
let first_page_start = range.start.align_down(PAGE_SIZE);
|
||||||
let last_page_end = range.end.align_up(PAGE_SIZE);
|
let last_page_end = range.end.align_up(PAGE_SIZE);
|
||||||
|
|
||||||
let priv_flags = if_tdx_enabled!({
|
let priv_flags = {
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
{
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
PrivilegedPageFlags::SHARED
|
PrivilegedPageFlags::SHARED
|
||||||
} else {
|
} else {
|
||||||
PrivilegedPageFlags::empty()
|
PrivilegedPageFlags::empty()
|
||||||
});
|
})
|
||||||
|
}
|
||||||
|
#[cfg(not(target_arch = "x86_64"))]
|
||||||
|
PrivilegedPageFlags::empty()
|
||||||
|
};
|
||||||
|
|
||||||
let prop = PageProperty {
|
let prop = PageProperty {
|
||||||
flags,
|
flags,
|
||||||
|
@ -76,11 +76,13 @@ unsafe fn init() {
|
|||||||
unsafe {
|
unsafe {
|
||||||
mm::frame::allocator::init_early_allocator();
|
mm::frame::allocator::init_early_allocator();
|
||||||
}
|
}
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
if_tdx_enabled!({
|
arch::if_tdx_enabled!({
|
||||||
} else {
|
} else {
|
||||||
arch::serial::init();
|
arch::serial::init();
|
||||||
});
|
});
|
||||||
|
#[cfg(not(target_arch = "x86_64"))]
|
||||||
|
arch::serial::init();
|
||||||
|
|
||||||
logger::init();
|
logger::init();
|
||||||
|
|
||||||
@ -107,7 +109,8 @@ unsafe fn init() {
|
|||||||
|
|
||||||
unsafe { arch::late_init_on_bsp() };
|
unsafe { arch::late_init_on_bsp() };
|
||||||
|
|
||||||
if_tdx_enabled!({
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
arch::if_tdx_enabled!({
|
||||||
arch::serial::init();
|
arch::serial::init();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@ use cfg_if::cfg_if;
|
|||||||
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr};
|
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr};
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::iommu,
|
arch::iommu,
|
||||||
if_tdx_enabled,
|
|
||||||
mm::{
|
mm::{
|
||||||
dma::{dma_type, Daddr, DmaType},
|
dma::{dma_type, Daddr, DmaType},
|
||||||
io::VmIoOnce,
|
io::VmIoOnce,
|
||||||
@ -75,8 +74,8 @@ impl DmaCoherent {
|
|||||||
}
|
}
|
||||||
let start_daddr = match dma_type() {
|
let start_daddr = match dma_type() {
|
||||||
DmaType::Direct => {
|
DmaType::Direct => {
|
||||||
if_tdx_enabled!({
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
// SAFETY:
|
// SAFETY:
|
||||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||||
@ -135,8 +134,8 @@ impl Drop for DmaCoherentInner {
|
|||||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||||
match dma_type() {
|
match dma_type() {
|
||||||
DmaType::Direct => {
|
DmaType::Direct => {
|
||||||
if_tdx_enabled!({
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
// SAFETY:
|
// SAFETY:
|
||||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||||
|
@ -3,25 +3,16 @@
|
|||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
|
|
||||||
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr};
|
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr};
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::iommu,
|
arch::iommu,
|
||||||
error::Error,
|
error::Error,
|
||||||
if_tdx_enabled,
|
|
||||||
mm::{
|
mm::{
|
||||||
dma::{dma_type, Daddr, DmaType},
|
dma::{dma_type, Daddr, DmaType},
|
||||||
HasPaddr, Infallible, Paddr, USegment, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
HasPaddr, Infallible, Paddr, USegment, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
|
|
||||||
use crate::arch::tdx_guest;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A streaming DMA mapping. Users must synchronize data
|
/// A streaming DMA mapping. Users must synchronize data
|
||||||
/// before reading or after writing to ensure consistency.
|
/// before reading or after writing to ensure consistency.
|
||||||
///
|
///
|
||||||
@ -72,15 +63,16 @@ impl DmaStream {
|
|||||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||||
let start_daddr = match dma_type() {
|
let start_daddr = match dma_type() {
|
||||||
DmaType::Direct => {
|
DmaType::Direct => {
|
||||||
if_tdx_enabled!({
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
// SAFETY:
|
// SAFETY:
|
||||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
|
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
|
||||||
unsafe {
|
unsafe {
|
||||||
tdx_guest::unprotect_gpa_range(start_paddr, frame_count).unwrap();
|
crate::arch::tdx_guest::unprotect_gpa_range(start_paddr, frame_count)
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
start_paddr as Daddr
|
start_paddr as Daddr
|
||||||
@ -182,15 +174,16 @@ impl Drop for DmaStreamInner {
|
|||||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||||
match dma_type() {
|
match dma_type() {
|
||||||
DmaType::Direct => {
|
DmaType::Direct => {
|
||||||
if_tdx_enabled!({
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
crate::arch::if_tdx_enabled!({
|
||||||
// SAFETY:
|
// SAFETY:
|
||||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
|
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
|
||||||
unsafe {
|
unsafe {
|
||||||
tdx_guest::protect_gpa_range(start_paddr, frame_count).unwrap();
|
crate::arch::tdx_guest::protect_gpa_range(start_paddr, frame_count)
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user