From 282a0f216fa81c799562565e10303038aab27577 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Tue, 29 Apr 2025 19:33:16 +0800 Subject: [PATCH] Clarify safety conditions in `tdx_guest.rs` --- ostd/src/arch/x86/kernel/apic/ioapic.rs | 39 ++++++++++++---------- ostd/src/arch/x86/tdx_guest.rs | 38 +++++++++++++-------- ostd/src/io/io_mem/mod.rs | 44 ++++++++++++++----------- ostd/src/mm/dma/dma_coherent.rs | 20 ++++++----- ostd/src/mm/dma/dma_stream.rs | 20 ++++++----- 5 files changed, 94 insertions(+), 67 deletions(-) diff --git a/ostd/src/arch/x86/kernel/apic/ioapic.rs b/ostd/src/arch/x86/kernel/apic/ioapic.rs index 0c67df12..f34174cc 100644 --- a/ostd/src/arch/x86/kernel/apic/ioapic.rs +++ b/ostd/src/arch/x86/kernel/apic/ioapic.rs @@ -133,9 +133,29 @@ struct IoApicAccess { impl IoApicAccess { /// # Safety /// - /// User must ensure the base address is valid. + /// The caller must ensure that the base address is a valid I/O APIC base address. unsafe fn new(base_address: usize, io_mem_builder: &IoMemAllocatorBuilder) -> Self { io_mem_builder.remove(base_address..(base_address + 0x20)); + if_tdx_enabled!({ + assert_eq!( + base_address % crate::mm::PAGE_SIZE, + 0, + "[IOAPIC]: I/O memory is not page aligned, which cannot be unprotected in TDX: {:#x}", + base_address, + ); + // SAFETY: + // - The address range is page aligned, as we've checked above. + // - The caller guarantees that the address range represents the MMIO region for I/O + // APICs, so the address range must fall in the GPA limit. + // - FIXME: The I/O memory can be at a high address, so it may not be contained in the + // linear mapping. + // - Operations on the I/O memory can have side effects that may cause soundness + // problems, so the pages are not trivially untyped memory. However, since + // `io_mem_builder.remove()` ensures exclusive ownership, it's still fine to + // unprotect only once, before the I/O memory is used. + unsafe { tdx_guest::unprotect_gpa_range(base_address, 1).unwrap() }; + }); + let base = NonNull::new(paddr_to_vaddr(base_address) as *mut u8).unwrap(); let register = VolatileRef::new_restricted(WriteOnly, base.cast::()); let data = VolatileRef::new(base.add(0x10).cast::()); @@ -177,15 +197,6 @@ pub fn init(io_mem_builder: &IoMemAllocatorBuilder) { // FIXME: Is it possible to have an address that is not the default 0xFEC0_0000? // Need to find a way to determine if it is a valid address or not. const IO_APIC_DEFAULT_ADDRESS: usize = 0xFEC0_0000; - if_tdx_enabled!({ - // SAFETY: - // This is safe because we are ensuring that the `IO_APIC_DEFAULT_ADDRESS` is a valid MMIO address before this operation. - // The `IO_APIC_DEFAULT_ADDRESS` is a well-known address used for IO APICs in x86 systems. - // We are also ensuring that we are only unprotecting a single page. - unsafe { - tdx_guest::unprotect_gpa_range(IO_APIC_DEFAULT_ADDRESS, 1).unwrap(); - } - }); let mut io_apic = unsafe { IoApicAccess::new(IO_APIC_DEFAULT_ADDRESS, io_mem_builder) }; io_apic.set_id(0); let id = io_apic.id(); @@ -209,14 +220,6 @@ pub fn init(io_mem_builder: &IoMemAllocatorBuilder) { let mut vec = Vec::new(); for id in 0..apic.io_apics.len() { let io_apic = apic.io_apics.get(id).unwrap(); - if_tdx_enabled!({ - // SAFETY: - // This is safe because we are ensuring that the `io_apic.address` is a valid MMIO address before this operation. - // We are also ensuring that we are only unprotecting a single page. - unsafe { - tdx_guest::unprotect_gpa_range(io_apic.address as usize, 1).unwrap(); - } - }); let interrupt_base = io_apic.global_system_interrupt_base; let mut io_apic = unsafe { IoApicAccess::new(io_apic.address as usize, io_mem_builder) }; diff --git a/ostd/src/arch/x86/tdx_guest.rs b/ostd/src/arch/x86/tdx_guest.rs index ef19ac3f..e2b404dc 100644 --- a/ostd/src/arch/x86/tdx_guest.rs +++ b/ostd/src/arch/x86/tdx_guest.rs @@ -25,16 +25,21 @@ pub enum PageConvertError { TdVmcall, } -/// Sets the given physical address range to Intel TDX shared pages. -/// Clears the data within the given address range. -/// Make sure the provided physical address is page size aligned. +/// Converts physical pages to Intel TDX shared pages. +/// +/// This function sets the [`PrivFlags::SHARED`] bit in the linear mapping of physical pages. Then, +/// it invokes the [`map_gpa`] TDVMCALL to convert those pages into Intel TDX shared pages. Due to +/// the conversion, any existing data on the pages will be erased. /// /// # Safety /// -/// To safely use this function, the caller must ensure that: -/// - The given guest physical address range is currently mapped in the page table. -/// - The `page_num` argument represents a valid number of pages. -/// - This function will erase any valid data in the range and should not assume that the data will still be there after the operation. +/// The caller must ensure that: +/// - The provided physical address is page aligned. +/// - The provided physical address range is in bounds, i.e., it should fall within the maximum +/// Guest Physical Address (GPA) limit. +/// - The provided physical address range is part of the linear mapping. +/// - All of the physical pages are untyped memory. Therefore, converting and erasing the data +/// will not cause memory safety issues. pub unsafe fn unprotect_gpa_range(gpa: Paddr, page_num: usize) -> Result<(), PageConvertError> { const PAGE_MASK: usize = PAGE_SIZE - 1; if gpa & PAGE_MASK != 0 { @@ -68,15 +73,22 @@ pub unsafe fn unprotect_gpa_range(gpa: Paddr, page_num: usize) -> Result<(), Pag .map_err(|_| PageConvertError::TdVmcall) } -/// Sets the given physical address range to Intel TDX private pages. -/// Make sure the provided physical address is page size aligned. +/// Converts physical pages to Intel TDX private pages. +/// +/// This function clears the [`PrivFlags::SHARED`] bit in the linear mapping of physical pages. +/// Then, it invokes the [`map_gpa`] TDVMCALL and the [`accept_page`] TDCALL to convert those pages +/// into Intel TDX private pages. Due to the conversion, any existing data on the pages will be +/// erased. /// /// # Safety /// -/// To safely use this function, the caller must ensure that: -/// - The given guest physical address range is currently mapped in the page table. -/// - The `page_num` argument represents a valid number of pages. -/// +/// The caller must ensure that: +/// - The provided physical address is page aligned. +/// - The provided physical address range is in bounds, i.e., it should fall within the maximum +/// Guest Physical Address (GPA) limit. +/// - The provided physical address range is part of the linear mapping. +/// - All of the physical pages are untyped memory. Therefore, converting and erasing the data +/// will not cause memory safety issues. pub unsafe fn protect_gpa_range(gpa: Paddr, page_num: usize) -> Result<(), PageConvertError> { const PAGE_MASK: usize = PAGE_SIZE - 1; if gpa & !PAGE_MASK == 0 { diff --git a/ostd/src/io/io_mem/mod.rs b/ostd/src/io/io_mem/mod.rs index 55b9d685..02be4325 100644 --- a/ostd/src/io/io_mem/mod.rs +++ b/ostd/src/io/io_mem/mod.rs @@ -86,29 +86,33 @@ impl IoMem { let first_page_start = range.start.align_down(PAGE_SIZE); let last_page_end = range.end.align_up(PAGE_SIZE); - let priv_flags = { - #[cfg(target_arch = "x86_64")] - { - crate::arch::if_tdx_enabled!({ - if first_page_start != range.start || last_page_end != range.end { - panic!("Alignment check failed when TDX is enabled. Requested IoMem range: {:#x?}..{:#x?}", range.start, range.end); - } + #[cfg(target_arch = "x86_64")] + let priv_flags = crate::arch::if_tdx_enabled!({ + assert!( + first_page_start == range.start && last_page_end == range.end, + "I/O memory is not page aligned, which cannot be unprotected in TDX: {:#x?}..{:#x?}", + range.start, + range.end, + ); - let pages = (last_page_end - first_page_start) / PAGE_SIZE; - // SAFETY: - // This is safe because we are ensuring that the physical address must be in the I/O memory region, and only unprotecting this region. - unsafe { - crate::arch::tdx_guest::unprotect_gpa_range(first_page_start, pages).unwrap(); - } + let pages = (last_page_end - first_page_start) / PAGE_SIZE; + // SAFETY: + // - The range `first_page_start..last_page_end` is always page aligned. + // - FIXME: We currently do not limit the I/O memory allocator with the maximum GPA, + // so the address range may not fall in the GPA limit. + // - FIXME: The I/O memory can be at a high address, so it may not be contained in the + // linear mapping. + // - The caller guarantees that operations on the I/O memory do not have any side + // effects that may cause soundness problems, so the pages can safely be viewed as + // untyped memory. + unsafe { crate::arch::tdx_guest::unprotect_gpa_range(first_page_start, pages).unwrap() }; - PrivilegedPageFlags::SHARED - } else { - PrivilegedPageFlags::empty() - }) - } - #[cfg(not(target_arch = "x86_64"))] + PrivilegedPageFlags::SHARED + } else { PrivilegedPageFlags::empty() - }; + }); + #[cfg(not(target_arch = "x86_64"))] + let priv_flags = PrivilegedPageFlags::empty(); let prop = PageProperty { flags, diff --git a/ostd/src/mm/dma/dma_coherent.rs b/ostd/src/mm/dma/dma_coherent.rs index 8ddc3a83..937bdd24 100644 --- a/ostd/src/mm/dma/dma_coherent.rs +++ b/ostd/src/mm/dma/dma_coherent.rs @@ -77,10 +77,12 @@ impl DmaCoherent { #[cfg(target_arch = "x86_64")] crate::arch::if_tdx_enabled!({ // SAFETY: - // This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations. - // The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped. - // We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`. - // Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function. + // - The address of a `USegment` is always page aligned. + // - A `USegment` always points to normal physical memory, so the address + // range falls in the GPA limit. + // - A `USegment` always points to normal physical memory, so all the pages + // are contained in the linear mapping. + // - The pages belong to a `USegment`, so they're all untyped memory. unsafe { tdx_guest::unprotect_gpa_range(start_paddr, frame_count).unwrap(); } @@ -137,10 +139,12 @@ impl Drop for DmaCoherentInner { #[cfg(target_arch = "x86_64")] crate::arch::if_tdx_enabled!({ // SAFETY: - // This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations. - // The `start_paddr()` ensures the `start_paddr` is page-aligned. - // We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`. - // Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function. + // - The address of a `USegment` is always page aligned. + // - A `USegment` always points to normal physical memory, so the address + // range falls in the GPA limit. + // - A `USegment` always points to normal physical memory, so all the pages + // are contained in the linear mapping. + // - The pages belong to a `USegment`, so they're all untyped memory. unsafe { tdx_guest::protect_gpa_range(start_paddr, frame_count).unwrap(); } diff --git a/ostd/src/mm/dma/dma_stream.rs b/ostd/src/mm/dma/dma_stream.rs index a9ca2bec..e959b680 100644 --- a/ostd/src/mm/dma/dma_stream.rs +++ b/ostd/src/mm/dma/dma_stream.rs @@ -66,10 +66,12 @@ impl DmaStream { #[cfg(target_arch = "x86_64")] crate::arch::if_tdx_enabled!({ // SAFETY: - // This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations. - // The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped. - // We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`. - // Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function. + // - The address of a `USegment` is always page aligned. + // - A `USegment` always points to normal physical memory, so the address + // range falls in the GPA limit. + // - A `USegment` always points to normal physical memory, so all the pages + // are contained in the linear mapping. + // - The pages belong to a `USegment`, so they're all untyped memory. unsafe { crate::arch::tdx_guest::unprotect_gpa_range(start_paddr, frame_count) .unwrap(); @@ -177,10 +179,12 @@ impl Drop for DmaStreamInner { #[cfg(target_arch = "x86_64")] crate::arch::if_tdx_enabled!({ // SAFETY: - // This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations. - // The `start_paddr()` ensures the `start_paddr` is page-aligned. - // We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`. - // Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function. + // - The address of a `USegment` is always page aligned. + // - A `USegment` always points to normal physical memory, so the address + // range falls in the GPA limit. + // - A `USegment` always points to normal physical memory, so all the pages + // are contained in the linear mapping. + // - The pages belong to a `USegment`, so they're all untyped memory. unsafe { crate::arch::tdx_guest::protect_gpa_range(start_paddr, frame_count) .unwrap();