From 035e12a4bd56e2d84e96e318f613abb9c9a02765 Mon Sep 17 00:00:00 2001 From: Zhang Junyang Date: Fri, 10 May 2024 10:24:35 +0800 Subject: [PATCH] Identify the page table free-reuse-then-read problem and feature gate it --- framework/aster-frame/Cargo.toml | 3 +++ framework/aster-frame/src/vm/page_table/cursor.rs | 15 ++++++++++----- framework/aster-frame/src/vm/page_table/mod.rs | 14 ++++++++++++++ 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/framework/aster-frame/Cargo.toml b/framework/aster-frame/Cargo.toml index f25140f00..924f62ce3 100644 --- a/framework/aster-frame/Cargo.toml +++ b/framework/aster-frame/Cargo.toml @@ -44,3 +44,6 @@ iced-x86 = { version = "1.21.0", default-features = false, features = [ "no_std" [features] intel_tdx = ["dep:tdx-guest", "dep:iced-x86"] +# To actively recycle page table nodes while the `VmSpace` is alive, this saves +# memory but may lead to the page table free-reuse-then-read problem. +page_table_recycle = [] diff --git a/framework/aster-frame/src/vm/page_table/cursor.rs b/framework/aster-frame/src/vm/page_table/cursor.rs index a168da16a..525385e54 100644 --- a/framework/aster-frame/src/vm/page_table/cursor.rs +++ b/framework/aster-frame/src/vm/page_table/cursor.rs @@ -427,14 +427,18 @@ where /// /// This method requires locks acquired before calling it. The discarded level will be unlocked. fn level_up(&mut self) { + #[cfg(feature = "page_table_recycle")] let last_node_all_unmapped = self.cur_node().nr_valid_children() == 0; self.guards[C::NR_LEVELS - self.level] = None; self.level += 1; - let can_release_child = - TypeId::of::() == TypeId::of::() && self.level < C::NR_LEVELS; - if can_release_child && last_node_all_unmapped { - let idx = self.cur_idx(); - self.cur_node_mut().set_child(idx, Child::None, None, false); + #[cfg(feature = "page_table_recycle")] + { + let can_release_child = + TypeId::of::() == TypeId::of::() && self.level < C::NR_LEVELS; + if can_release_child && last_node_all_unmapped { + let idx = self.cur_idx(); + self.cur_node_mut().set_child(idx, Child::None, None, false); + } } } @@ -511,6 +515,7 @@ where } } +#[cfg(feature = "page_table_recycle")] impl Drop for CursorMut<'_, M, E, C> where [(); nr_ptes_per_node::()]:, diff --git a/framework/aster-frame/src/vm/page_table/mod.rs b/framework/aster-frame/src/vm/page_table/mod.rs index 36ff01e16..23d97bebf 100644 --- a/framework/aster-frame/src/vm/page_table/mod.rs +++ b/framework/aster-frame/src/vm/page_table/mod.rs @@ -339,10 +339,24 @@ where /// /// The caller must ensure that the root_paddr is a valid pointer to the root /// page table frame. +/// +/// # Notes on the page table free-reuse-then-read problem +/// +/// Because neither the hardware MMU nor the software page walk method +/// would get the locks of the page table while reading, they can enter +/// a to-be-recycled page table frame and read the page table entries +/// after the frame is recycled and reused. +/// +/// To mitigate this problem, the page table nodes are by default not +/// actively recycled, until we find an appropriate solution. pub(super) unsafe fn page_walk( root_paddr: Paddr, vaddr: Vaddr, ) -> Option<(Paddr, PageProperty)> { + // We disable preemt here to mimic the MMU walk, which will not be interrupted + // then must finish within a given time. + let _guard = crate::task::disable_preempt(); + let mut cur_level = C::NR_LEVELS; let mut cur_pte = { let frame_addr = paddr_to_vaddr(root_paddr);