Revised page table activation and drop management

This commit is contained in:
Zhang Junyang
2024-06-03 11:28:31 +00:00
committed by Tate, Hongliang Tian
parent 7095b37e7e
commit 3fa3d7f15a
4 changed files with 53 additions and 26 deletions

View File

@ -201,11 +201,13 @@ pub fn init_kernel_page_table(
// SAFETY: the kernel page table is initialized properly.
unsafe {
kpt.activate_unchecked();
kpt.first_activate_unchecked();
crate::arch::mm::tlb_flush_all_including_global();
}
KERNEL_PAGE_TABLE.call_once(|| kpt);
drop(boot_pt);
// SAFETY: the boot page table is OK to be retired now since
// the kernel page table is activated.
unsafe { boot_pt.retire() };
}

View File

@ -85,13 +85,37 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE) };
frame
}
/// Retire this boot-stage page table.
///
/// Do not drop a boot-stage page table. Instead, retire it.
///
/// # Safety
///
/// This method can only be called when this boot-stage page table is no longer in use,
/// e.g., after the permanent kernel page table has been activated.
pub unsafe fn retire(mut self) {
// Manually free all heap and frame memory allocated.
let frames = core::mem::take(&mut self.frames);
for frame in frames {
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(frame, 1);
}
// We do not want or need to trigger drop.
core::mem::forget(self);
// FIXME: an empty `Vec` is leaked on the heap here since the drop is not called
// and we have no ways to free it.
// The best solution to recycle the boot-phase page table is to initialize all
// page table page metadata of the boot page table by page walk after the metadata
// pages are mapped. Therefore the boot page table can be recycled or dropped by
// the routines in the [`super::node`] module. There's even without a need of
// `first_activate` concept if the boot page table can be managed by page table
// pages.
}
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> {
fn drop(&mut self) {
for frame in &self.frames {
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1);
}
panic!("the boot page table is dropped rather than retired.");
}
}
@ -130,4 +154,6 @@ fn test_boot_pt() {
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from2 + 2) },
Some((to2 * PAGE_SIZE + 2, prop2))
);
unsafe { boot_pt.retire() }
}

View File

@ -183,6 +183,10 @@ where
self.root.activate();
}
pub(in crate::mm) unsafe fn first_activate_unchecked(&self) {
self.root.first_activate();
}
/// The physical address of the root page table.
///
/// It is dangerous to directly provide the physical address of the root page table to the

View File

@ -120,8 +120,6 @@ where
/// proper mappings for the kernel and has the correct const parameters
/// matching the current CPU.
pub(crate) unsafe fn activate(&self) {
use core::sync::atomic::AtomicBool;
use crate::{
arch::mm::{activate_page_table, current_page_table_paddr},
mm::CachePolicy,
@ -140,26 +138,23 @@ where
// Increment the reference count of the current page table.
self.inc_ref();
// Decrement the reference count of the last activated page table.
// Restore and drop the last activated page table.
drop(Self {
raw: last_activated_paddr,
level: PagingConsts::NR_LEVELS,
_phantom: PhantomData,
});
}
// Boot page tables are not tracked with [`PageTableNode`], but
// all page tables after the boot stage are tracked.
//
// TODO: the `cpu_local` implementation currently is underpowered,
// there's no need using `AtomicBool` here.
crate::cpu_local! {
static CURRENT_IS_BOOT_PT: AtomicBool = AtomicBool::new(true);
}
if !CURRENT_IS_BOOT_PT.load(Ordering::Acquire) {
// Restore and drop the last activated page table.
let _last_activated_pt = Self {
raw: last_activated_paddr,
level: PagingConsts::NR_LEVELS,
_phantom: PhantomData,
};
} else {
CURRENT_IS_BOOT_PT.store(false, Ordering::Release);
}
/// Activate the (root) page table assuming it is the first activation.
///
/// It will not try dropping the last activate page table. It is the same
/// with [`Self::activate()`] in other senses.
pub(super) unsafe fn first_activate(&self) {
use crate::{arch::mm::activate_page_table, mm::CachePolicy};
debug_assert_eq!(self.level, PagingConsts::NR_LEVELS);
self.inc_ref();
activate_page_table(self.raw, CachePolicy::Writeback);
}
fn inc_ref(&self) {