Identify the page table free-reuse-then-read problem and feature gate it

This commit is contained in:
Zhang Junyang 2024-05-10 10:24:35 +08:00 committed by Tate, Hongliang Tian
parent 679e5dac68
commit 035e12a4bd
3 changed files with 27 additions and 5 deletions

View File

@ -44,3 +44,6 @@ iced-x86 = { version = "1.21.0", default-features = false, features = [ "no_std"
[features] [features]
intel_tdx = ["dep:tdx-guest", "dep:iced-x86"] intel_tdx = ["dep:tdx-guest", "dep:iced-x86"]
# To actively recycle page table nodes while the `VmSpace` is alive, this saves
# memory but may lead to the page table free-reuse-then-read problem.
page_table_recycle = []

View File

@ -427,9 +427,12 @@ where
/// ///
/// This method requires locks acquired before calling it. The discarded level will be unlocked. /// This method requires locks acquired before calling it. The discarded level will be unlocked.
fn level_up(&mut self) { fn level_up(&mut self) {
#[cfg(feature = "page_table_recycle")]
let last_node_all_unmapped = self.cur_node().nr_valid_children() == 0; let last_node_all_unmapped = self.cur_node().nr_valid_children() == 0;
self.guards[C::NR_LEVELS - self.level] = None; self.guards[C::NR_LEVELS - self.level] = None;
self.level += 1; self.level += 1;
#[cfg(feature = "page_table_recycle")]
{
let can_release_child = let can_release_child =
TypeId::of::<M>() == TypeId::of::<KernelMode>() && self.level < C::NR_LEVELS; TypeId::of::<M>() == TypeId::of::<KernelMode>() && self.level < C::NR_LEVELS;
if can_release_child && last_node_all_unmapped { if can_release_child && last_node_all_unmapped {
@ -437,6 +440,7 @@ where
self.cur_node_mut().set_child(idx, Child::None, None, false); self.cur_node_mut().set_child(idx, Child::None, None, false);
} }
} }
}
/// A level down operation during traversal. It may create a new child frame if the /// A level down operation during traversal. It may create a new child frame if the
/// current frame does not have one. It may also split an untyped huge page into /// current frame does not have one. It may also split an untyped huge page into
@ -511,6 +515,7 @@ where
} }
} }
#[cfg(feature = "page_table_recycle")]
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Drop for CursorMut<'_, M, E, C> impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Drop for CursorMut<'_, M, E, C>
where where
[(); nr_ptes_per_node::<C>()]:, [(); nr_ptes_per_node::<C>()]:,

View File

@ -339,10 +339,24 @@ where
/// ///
/// The caller must ensure that the root_paddr is a valid pointer to the root /// The caller must ensure that the root_paddr is a valid pointer to the root
/// page table frame. /// page table frame.
///
/// # Notes on the page table free-reuse-then-read problem
///
/// Because neither the hardware MMU nor the software page walk method
/// would get the locks of the page table while reading, they can enter
/// a to-be-recycled page table frame and read the page table entries
/// after the frame is recycled and reused.
///
/// To mitigate this problem, the page table nodes are by default not
/// actively recycled, until we find an appropriate solution.
pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>( pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
root_paddr: Paddr, root_paddr: Paddr,
vaddr: Vaddr, vaddr: Vaddr,
) -> Option<(Paddr, PageProperty)> { ) -> Option<(Paddr, PageProperty)> {
// We disable preemt here to mimic the MMU walk, which will not be interrupted
// then must finish within a given time.
let _guard = crate::task::disable_preempt();
let mut cur_level = C::NR_LEVELS; let mut cur_level = C::NR_LEVELS;
let mut cur_pte = { let mut cur_pte = {
let frame_addr = paddr_to_vaddr(root_paddr); let frame_addr = paddr_to_vaddr(root_paddr);