Fix a potential race condition during PTE read/write operations

This commit is contained in:
Chen Chengjun
2025-02-20 20:06:00 +08:00
committed by Tate, Hongliang Tian
parent 3e47a6fdaa
commit 5834f299bc
8 changed files with 89 additions and 16 deletions

View File

@ -9,6 +9,7 @@ use crate::{
page_table::PageTableEntryTrait, page_table::PageTableEntryTrait,
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE, Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
}, },
util::SameSizeAs,
Pod, Pod,
}; };
@ -122,6 +123,9 @@ macro_rules! parse_flags {
}; };
} }
// SAFETY: `PageTableEntry` has the same size as `usize`
unsafe impl SameSizeAs<usize> for PageTableEntry {}
impl PageTableEntryTrait for PageTableEntry { impl PageTableEntryTrait for PageTableEntry {
fn is_present(&self) -> bool { fn is_present(&self) -> bool {
self.0 & PageTableFlags::VALID.bits() != 0 self.0 & PageTableFlags::VALID.bits() != 0

View File

@ -10,6 +10,7 @@ use crate::{
page_table::{PageTableEntryTrait, PageTableMode}, page_table::{PageTableEntryTrait, PageTableMode},
Paddr, PageProperty, PagingConstsTrait, PagingLevel, Vaddr, Paddr, PageProperty, PagingConstsTrait, PagingLevel, Vaddr,
}, },
util::SameSizeAs,
Pod, Pod,
}; };
@ -74,6 +75,9 @@ impl PageTableEntry {
const PROP_MASK: u64 = !Self::PHYS_MASK & !PageTableFlags::LAST_PAGE.bits(); const PROP_MASK: u64 = !Self::PHYS_MASK & !PageTableFlags::LAST_PAGE.bits();
} }
// SAFETY: `PageTableEntry` has the same size as `usize` in our supported x86 architecture.
unsafe impl SameSizeAs<usize> for PageTableEntry {}
impl PageTableEntryTrait for PageTableEntry { impl PageTableEntryTrait for PageTableEntry {
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self { fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
let mut pte = Self(paddr as u64 & Self::PHYS_MASK | PageTableFlags::LAST_PAGE.bits()); let mut pte = Self(paddr as u64 & Self::PHYS_MASK | PageTableFlags::LAST_PAGE.bits());

View File

@ -15,6 +15,7 @@ use crate::{
page_table::PageTableEntryTrait, page_table::PageTableEntryTrait,
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE, Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
}, },
util::SameSizeAs,
Pod, Pod,
}; };
@ -163,6 +164,9 @@ macro_rules! parse_flags {
}; };
} }
// SAFETY: `PageTableEntry` has the same size as `usize`
unsafe impl SameSizeAs<usize> for PageTableEntry {}
impl PageTableEntryTrait for PageTableEntry { impl PageTableEntryTrait for PageTableEntry {
fn is_present(&self) -> bool { fn is_present(&self) -> bool {
// For PT child, `PRESENT` should be set; for huge page, `HUGE` should // For PT child, `PRESENT` should be set; for huge page, `HUGE` should

View File

@ -48,6 +48,7 @@ pub mod task;
pub mod timer; pub mod timer;
pub mod trap; pub mod trap;
pub mod user; pub mod user;
mod util;
use core::sync::atomic::{AtomicBool, Ordering}; use core::sync::atomic::{AtomicBool, Ordering};

View File

@ -65,7 +65,9 @@
//! table cursor should add additional entry point checks to prevent these defined //! table cursor should add additional entry point checks to prevent these defined
//! behaviors if they are not wanted. //! behaviors if they are not wanted.
use core::{any::TypeId, marker::PhantomData, mem::ManuallyDrop, ops::Range}; use core::{
any::TypeId, marker::PhantomData, mem::ManuallyDrop, ops::Range, sync::atomic::Ordering,
};
use align_ext::AlignExt; use align_ext::AlignExt;
@ -184,10 +186,12 @@ where
} }
let cur_pt_ptr = paddr_to_vaddr(cur_pt_addr) as *mut E; let cur_pt_ptr = paddr_to_vaddr(cur_pt_addr) as *mut E;
// SAFETY: The pointer and index is valid since the root page table // SAFETY:
// does not short-live it. The child page table node won't be // - The page table node is alive because (1) the root node is alive and (2) all child nodes cannot
// recycled by another thread while we are using it. // be recycled if there are cursors.
let cur_pte = unsafe { cur_pt_ptr.add(start_idx).read() }; // - The index is inside the bound, so the page table entry is valid.
// - All page table entries are aligned and accessed with atomic operations only.
let cur_pte = unsafe { super::load_pte(cur_pt_ptr.add(start_idx), Ordering::Acquire) };
if cur_pte.is_present() { if cur_pte.is_present() {
if cur_pte.is_last(cursor.level) { if cur_pte.is_last(cursor.level) {
break; break;

View File

@ -1,13 +1,20 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use core::{fmt::Debug, marker::PhantomData, ops::Range}; use core::{
fmt::Debug,
intrinsics::transmute_unchecked,
marker::PhantomData,
ops::Range,
sync::atomic::{AtomicUsize, Ordering},
};
use super::{ use super::{
nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait, PagingLevel, io::PodOnce, nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait,
PodOnce, Vaddr, PagingLevel, Vaddr,
}; };
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
util::SameSizeAs,
Pod, Pod,
}; };
@ -339,7 +346,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
/// ///
/// Note that a default PTE should be a PTE that points to nothing. /// Note that a default PTE should be a PTE that points to nothing.
pub trait PageTableEntryTrait: pub trait PageTableEntryTrait:
Clone + Copy + Debug + Default + Pod + PodOnce + Sized + Send + Sync + 'static Clone + Copy + Debug + Default + Pod + PodOnce + SameSizeAs<usize> + Sized + Send + Sync + 'static
{ {
/// Create a set of new invalid page table flags that indicates an absent page. /// Create a set of new invalid page table flags that indicates an absent page.
/// ///
@ -380,4 +387,40 @@ pub trait PageTableEntryTrait:
/// The level of the page table the entry resides is given since architectures /// The level of the page table the entry resides is given since architectures
/// like amd64 only uses a huge bit in intermediate levels. /// like amd64 only uses a huge bit in intermediate levels.
fn is_last(&self, level: PagingLevel) -> bool; fn is_last(&self, level: PagingLevel) -> bool;
/// Converts the PTE into its corresponding `usize` value.
fn as_usize(self) -> usize {
// SAFETY: `Self` is `Pod` and has the same memory representation as `usize`.
unsafe { transmute_unchecked(self) }
}
/// Converts a usize `pte_raw` into a PTE.
fn from_usize(pte_raw: usize) -> Self {
// SAFETY: `Self` is `Pod` and has the same memory representation as `usize`.
unsafe { transmute_unchecked(pte_raw) }
}
}
/// Loads a page table entry with an atomic instruction.
///
/// # Safety
///
/// The safety preconditions are same as those of [`AtomicUsize::from_ptr`].
pub unsafe fn load_pte<E: PageTableEntryTrait>(ptr: *mut E, ordering: Ordering) -> E {
// SAFETY: The safety is upheld by the caller.
let atomic = unsafe { AtomicUsize::from_ptr(ptr.cast()) };
let pte_raw = atomic.load(ordering);
E::from_usize(pte_raw)
}
/// Stores a page table entry with an atomic instruction.
///
/// # Safety
///
/// The safety preconditions are same as those of [`AtomicUsize::from_ptr`].
pub unsafe fn store_pte<E: PageTableEntryTrait>(ptr: *mut E, new_val: E, ordering: Ordering) {
let new_raw = new_val.as_usize();
// SAFETY: The safety is upheld by the caller.
let atomic = unsafe { AtomicUsize::from_ptr(ptr.cast()) };
atomic.store(new_raw, ordering)
} }

View File

@ -41,8 +41,9 @@ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
mm::{ mm::{
frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame}, frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame},
paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel, paddr_to_vaddr,
VmReader, page_table::{load_pte, store_pte},
FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel, VmReader,
}, },
}; };
@ -309,9 +310,11 @@ where
/// The caller must ensure that the index is within the bound. /// The caller must ensure that the index is within the bound.
unsafe fn read_pte(&self, idx: usize) -> E { unsafe fn read_pte(&self, idx: usize) -> E {
debug_assert!(idx < nr_subpage_per_huge::<C>()); debug_assert!(idx < nr_subpage_per_huge::<C>());
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *const E; let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E;
// SAFETY: The index is within the bound and the PTE is plain-old-data. // SAFETY:
unsafe { ptr.add(idx).read() } // - The page table node is alive. The index is inside the bound, so the page table entry is valid.
// - All page table entries are aligned and accessed with atomic operations only.
unsafe { load_pte(ptr.add(idx), Ordering::Relaxed) }
} }
/// Writes a page table entry at a given index. /// Writes a page table entry at a given index.
@ -330,8 +333,10 @@ where
unsafe fn write_pte(&mut self, idx: usize, pte: E) { unsafe fn write_pte(&mut self, idx: usize, pte: E) {
debug_assert!(idx < nr_subpage_per_huge::<C>()); debug_assert!(idx < nr_subpage_per_huge::<C>());
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E; let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E;
// SAFETY: The index is within the bound and the PTE is plain-old-data. // SAFETY:
unsafe { ptr.add(idx).write(pte) } // - The page table node is alive. The index is inside the bound, so the page table entry is valid.
// - All page table entries are aligned and accessed with atomic operations only.
unsafe { store_pte(ptr.add(idx), pte, Ordering::Release) }
} }
/// Gets the mutable reference to the number of valid PTEs in the node. /// Gets the mutable reference to the number of valid PTEs in the node.

8
ostd/src/util.rs Normal file
View File

@ -0,0 +1,8 @@
// SPDX-License-Identifier: MPL-2.0
/// A marker trait that represents a type has the same size as `T`.
///
/// # Safety
///
/// Types that implement `SameSizeAs<T>` must have the same size as `T`.
pub unsafe trait SameSizeAs<T> {}