mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-27 19:33:23 +00:00
Fix a potential race condition during PTE read/write operations
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
3e47a6fdaa
commit
5834f299bc
@ -9,6 +9,7 @@ use crate::{
|
||||
page_table::PageTableEntryTrait,
|
||||
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
|
||||
},
|
||||
util::SameSizeAs,
|
||||
Pod,
|
||||
};
|
||||
|
||||
@ -122,6 +123,9 @@ macro_rules! parse_flags {
|
||||
};
|
||||
}
|
||||
|
||||
// SAFETY: `PageTableEntry` has the same size as `usize`
|
||||
unsafe impl SameSizeAs<usize> for PageTableEntry {}
|
||||
|
||||
impl PageTableEntryTrait for PageTableEntry {
|
||||
fn is_present(&self) -> bool {
|
||||
self.0 & PageTableFlags::VALID.bits() != 0
|
||||
|
@ -10,6 +10,7 @@ use crate::{
|
||||
page_table::{PageTableEntryTrait, PageTableMode},
|
||||
Paddr, PageProperty, PagingConstsTrait, PagingLevel, Vaddr,
|
||||
},
|
||||
util::SameSizeAs,
|
||||
Pod,
|
||||
};
|
||||
|
||||
@ -74,6 +75,9 @@ impl PageTableEntry {
|
||||
const PROP_MASK: u64 = !Self::PHYS_MASK & !PageTableFlags::LAST_PAGE.bits();
|
||||
}
|
||||
|
||||
// SAFETY: `PageTableEntry` has the same size as `usize` in our supported x86 architecture.
|
||||
unsafe impl SameSizeAs<usize> for PageTableEntry {}
|
||||
|
||||
impl PageTableEntryTrait for PageTableEntry {
|
||||
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
|
||||
let mut pte = Self(paddr as u64 & Self::PHYS_MASK | PageTableFlags::LAST_PAGE.bits());
|
||||
|
@ -15,6 +15,7 @@ use crate::{
|
||||
page_table::PageTableEntryTrait,
|
||||
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
|
||||
},
|
||||
util::SameSizeAs,
|
||||
Pod,
|
||||
};
|
||||
|
||||
@ -163,6 +164,9 @@ macro_rules! parse_flags {
|
||||
};
|
||||
}
|
||||
|
||||
// SAFETY: `PageTableEntry` has the same size as `usize`
|
||||
unsafe impl SameSizeAs<usize> for PageTableEntry {}
|
||||
|
||||
impl PageTableEntryTrait for PageTableEntry {
|
||||
fn is_present(&self) -> bool {
|
||||
// For PT child, `PRESENT` should be set; for huge page, `HUGE` should
|
||||
|
@ -48,6 +48,7 @@ pub mod task;
|
||||
pub mod timer;
|
||||
pub mod trap;
|
||||
pub mod user;
|
||||
mod util;
|
||||
|
||||
use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
|
@ -65,7 +65,9 @@
|
||||
//! table cursor should add additional entry point checks to prevent these defined
|
||||
//! behaviors if they are not wanted.
|
||||
|
||||
use core::{any::TypeId, marker::PhantomData, mem::ManuallyDrop, ops::Range};
|
||||
use core::{
|
||||
any::TypeId, marker::PhantomData, mem::ManuallyDrop, ops::Range, sync::atomic::Ordering,
|
||||
};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
|
||||
@ -184,10 +186,12 @@ where
|
||||
}
|
||||
|
||||
let cur_pt_ptr = paddr_to_vaddr(cur_pt_addr) as *mut E;
|
||||
// SAFETY: The pointer and index is valid since the root page table
|
||||
// does not short-live it. The child page table node won't be
|
||||
// recycled by another thread while we are using it.
|
||||
let cur_pte = unsafe { cur_pt_ptr.add(start_idx).read() };
|
||||
// SAFETY:
|
||||
// - The page table node is alive because (1) the root node is alive and (2) all child nodes cannot
|
||||
// be recycled if there are cursors.
|
||||
// - The index is inside the bound, so the page table entry is valid.
|
||||
// - All page table entries are aligned and accessed with atomic operations only.
|
||||
let cur_pte = unsafe { super::load_pte(cur_pt_ptr.add(start_idx), Ordering::Acquire) };
|
||||
if cur_pte.is_present() {
|
||||
if cur_pte.is_last(cursor.level) {
|
||||
break;
|
||||
|
@ -1,13 +1,20 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use core::{fmt::Debug, marker::PhantomData, ops::Range};
|
||||
use core::{
|
||||
fmt::Debug,
|
||||
intrinsics::transmute_unchecked,
|
||||
marker::PhantomData,
|
||||
ops::Range,
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
use super::{
|
||||
nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait, PagingLevel,
|
||||
PodOnce, Vaddr,
|
||||
io::PodOnce, nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait,
|
||||
PagingLevel, Vaddr,
|
||||
};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
util::SameSizeAs,
|
||||
Pod,
|
||||
};
|
||||
|
||||
@ -339,7 +346,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
|
||||
///
|
||||
/// Note that a default PTE should be a PTE that points to nothing.
|
||||
pub trait PageTableEntryTrait:
|
||||
Clone + Copy + Debug + Default + Pod + PodOnce + Sized + Send + Sync + 'static
|
||||
Clone + Copy + Debug + Default + Pod + PodOnce + SameSizeAs<usize> + Sized + Send + Sync + 'static
|
||||
{
|
||||
/// Create a set of new invalid page table flags that indicates an absent page.
|
||||
///
|
||||
@ -380,4 +387,40 @@ pub trait PageTableEntryTrait:
|
||||
/// The level of the page table the entry resides is given since architectures
|
||||
/// like amd64 only uses a huge bit in intermediate levels.
|
||||
fn is_last(&self, level: PagingLevel) -> bool;
|
||||
|
||||
/// Converts the PTE into its corresponding `usize` value.
|
||||
fn as_usize(self) -> usize {
|
||||
// SAFETY: `Self` is `Pod` and has the same memory representation as `usize`.
|
||||
unsafe { transmute_unchecked(self) }
|
||||
}
|
||||
|
||||
/// Converts a usize `pte_raw` into a PTE.
|
||||
fn from_usize(pte_raw: usize) -> Self {
|
||||
// SAFETY: `Self` is `Pod` and has the same memory representation as `usize`.
|
||||
unsafe { transmute_unchecked(pte_raw) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads a page table entry with an atomic instruction.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The safety preconditions are same as those of [`AtomicUsize::from_ptr`].
|
||||
pub unsafe fn load_pte<E: PageTableEntryTrait>(ptr: *mut E, ordering: Ordering) -> E {
|
||||
// SAFETY: The safety is upheld by the caller.
|
||||
let atomic = unsafe { AtomicUsize::from_ptr(ptr.cast()) };
|
||||
let pte_raw = atomic.load(ordering);
|
||||
E::from_usize(pte_raw)
|
||||
}
|
||||
|
||||
/// Stores a page table entry with an atomic instruction.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The safety preconditions are same as those of [`AtomicUsize::from_ptr`].
|
||||
pub unsafe fn store_pte<E: PageTableEntryTrait>(ptr: *mut E, new_val: E, ordering: Ordering) {
|
||||
let new_raw = new_val.as_usize();
|
||||
// SAFETY: The safety is upheld by the caller.
|
||||
let atomic = unsafe { AtomicUsize::from_ptr(ptr.cast()) };
|
||||
atomic.store(new_raw, ordering)
|
||||
}
|
||||
|
@ -41,8 +41,9 @@ use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
mm::{
|
||||
frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame},
|
||||
paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel,
|
||||
VmReader,
|
||||
paddr_to_vaddr,
|
||||
page_table::{load_pte, store_pte},
|
||||
FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel, VmReader,
|
||||
},
|
||||
};
|
||||
|
||||
@ -309,9 +310,11 @@ where
|
||||
/// The caller must ensure that the index is within the bound.
|
||||
unsafe fn read_pte(&self, idx: usize) -> E {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *const E;
|
||||
// SAFETY: The index is within the bound and the PTE is plain-old-data.
|
||||
unsafe { ptr.add(idx).read() }
|
||||
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E;
|
||||
// SAFETY:
|
||||
// - The page table node is alive. The index is inside the bound, so the page table entry is valid.
|
||||
// - All page table entries are aligned and accessed with atomic operations only.
|
||||
unsafe { load_pte(ptr.add(idx), Ordering::Relaxed) }
|
||||
}
|
||||
|
||||
/// Writes a page table entry at a given index.
|
||||
@ -330,8 +333,10 @@ where
|
||||
unsafe fn write_pte(&mut self, idx: usize, pte: E) {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E;
|
||||
// SAFETY: The index is within the bound and the PTE is plain-old-data.
|
||||
unsafe { ptr.add(idx).write(pte) }
|
||||
// SAFETY:
|
||||
// - The page table node is alive. The index is inside the bound, so the page table entry is valid.
|
||||
// - All page table entries are aligned and accessed with atomic operations only.
|
||||
unsafe { store_pte(ptr.add(idx), pte, Ordering::Release) }
|
||||
}
|
||||
|
||||
/// Gets the mutable reference to the number of valid PTEs in the node.
|
||||
|
8
ostd/src/util.rs
Normal file
8
ostd/src/util.rs
Normal file
@ -0,0 +1,8 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
/// A marker trait that represents a type has the same size as `T`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Types that implement `SameSizeAs<T>` must have the same size as `T`.
|
||||
pub unsafe trait SameSizeAs<T> {}
|
Reference in New Issue
Block a user