Refactor the boot PT initialization for SMP

This commit is contained in:
Zhang Junyang
2024-08-22 18:48:33 +08:00
committed by Tate, Hongliang Tian
parent e04fa6c69d
commit 5feb8f5de8
7 changed files with 120 additions and 60 deletions

View File

@ -6,9 +6,10 @@ use trapframe::TrapFrame;
use crate::{
mm::{
kspace::{BOOT_PAGE_TABLE, KERNEL_PAGE_TABLE},
kspace::KERNEL_PAGE_TABLE,
paddr_to_vaddr,
page_prop::{PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::boot_pt,
PAGE_SIZE,
},
prelude::Paddr,
@ -48,15 +49,12 @@ pub unsafe fn unprotect_gpa_range(gpa: Paddr, page_num: usize) -> Result<(), Pag
priv_flags: prop.priv_flags | PrivFlags::SHARED,
}
};
{
let mut boot_pt_lock = BOOT_PAGE_TABLE.lock();
if let Some(boot_pt) = boot_pt_lock.as_mut() {
for i in 0..page_num {
let vaddr = paddr_to_vaddr(gpa + i * PAGE_SIZE);
boot_pt.protect_base_page(vaddr, protect_op);
}
let _ = boot_pt::with_borrow(|boot_pt| {
for i in 0..page_num {
let vaddr = paddr_to_vaddr(gpa + i * PAGE_SIZE);
boot_pt.protect_base_page(vaddr, protect_op);
}
}
});
// Protect the page in the kernel page table.
let pt = KERNEL_PAGE_TABLE.get().unwrap();
let vaddr = paddr_to_vaddr(gpa);
@ -93,15 +91,12 @@ pub unsafe fn protect_gpa_range(gpa: Paddr, page_num: usize) -> Result<(), PageC
priv_flags: prop.priv_flags - PrivFlags::SHARED,
}
};
{
let mut boot_pt_lock = BOOT_PAGE_TABLE.lock();
if let Some(boot_pt) = boot_pt_lock.as_mut() {
for i in 0..page_num {
let vaddr = paddr_to_vaddr(gpa + i * PAGE_SIZE);
boot_pt.protect_base_page(vaddr, protect_op);
}
let _ = boot_pt::with_borrow(|boot_pt| {
for i in 0..page_num {
let vaddr = paddr_to_vaddr(gpa + i * PAGE_SIZE);
boot_pt.protect_base_page(vaddr, protect_op);
}
}
});
// Protect the page in the kernel page table.
let pt = KERNEL_PAGE_TABLE.get().unwrap();
let vaddr = paddr_to_vaddr(gpa);

View File

@ -125,6 +125,11 @@ fn ap_early_entry(local_apic_id: u32) -> ! {
}
crate::arch::irq::enable_local();
// SAFETY: this function is only called once on this AP.
unsafe {
crate::mm::kspace::activate_kernel_page_table();
}
// Mark the AP as started.
let ap_boot_info = AP_BOOT_INFO.get().unwrap();
ap_boot_info

View File

@ -82,7 +82,6 @@ pub unsafe fn init() {
logger::init();
mm::page::allocator::init();
mm::kspace::init_boot_page_table();
mm::kspace::init_kernel_page_table(mm::init_page_meta());
mm::misc_init();
@ -93,7 +92,10 @@ pub unsafe fn init() {
bus::init();
mm::kspace::activate_kernel_page_table();
// SAFETY: This function is called only once on the BSP.
unsafe {
mm::kspace::activate_kernel_page_table();
}
arch::irq::enable_local();

View File

@ -39,7 +39,7 @@
//! 39 bits or 57 bits, the memory space just adjust proportionally.
use alloc::vec::Vec;
use core::{mem::ManuallyDrop, ops::Range};
use core::ops::Range;
use align_ext::AlignExt;
use log::info;
@ -52,13 +52,10 @@ use super::{
Page,
},
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
page_table::{boot_pt::BootPageTable, KernelMode, PageTable},
page_table::{KernelMode, PageTable},
MemoryRegionType, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE,
};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
sync::SpinLock,
};
use crate::arch::mm::{PageTableEntry, PagingConsts};
/// The shortest supported address width is 39 bits. And the literal
/// values are written for 48 bits address width. Adjust the values
@ -101,12 +98,6 @@ pub fn paddr_to_vaddr(pa: Paddr) -> usize {
pa + LINEAR_MAPPING_BASE_VADDR
}
/// The boot page table instance.
///
/// It is used in the initialization phase before [`KERNEL_PAGE_TABLE`] is activated.
/// Since we want dropping the boot page table unsafe, it is wrapped in a [`ManuallyDrop`].
pub static BOOT_PAGE_TABLE: SpinLock<Option<ManuallyDrop<BootPageTable>>> = SpinLock::new(None);
/// The kernel page table instance.
///
/// It manages the kernel mapping of all address spaces by sharing the kernel part. And it
@ -114,12 +105,6 @@ pub static BOOT_PAGE_TABLE: SpinLock<Option<ManuallyDrop<BootPageTable>>> = Spin
pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PagingConsts>> =
Once::new();
/// Initializes the boot page table.
pub(crate) fn init_boot_page_table() {
let boot_pt = BootPageTable::from_current_pt();
*BOOT_PAGE_TABLE.lock() = Some(ManuallyDrop::new(boot_pt));
}
/// Initializes the kernel page table.
///
/// This function should be called after:
@ -222,7 +207,12 @@ pub fn init_kernel_page_table(meta_pages: Vec<Page<MetaPageMeta>>) {
KERNEL_PAGE_TABLE.call_once(|| kpt);
}
pub fn activate_kernel_page_table() {
/// Activates the kernel page table.
///
/// # Safety
///
/// This function should only be called once per CPU.
pub unsafe fn activate_kernel_page_table() {
let kpt = KERNEL_PAGE_TABLE
.get()
.expect("The kernel page table is not initialized yet");
@ -232,8 +222,9 @@ pub fn activate_kernel_page_table() {
crate::arch::mm::tlb_flush_all_including_global();
}
// SAFETY: the boot page table is OK to be dropped now since
// SAFETY: the boot page table is OK to be dismissed now since
// the kernel page table is activated.
let mut boot_pt = BOOT_PAGE_TABLE.lock().take().unwrap();
unsafe { ManuallyDrop::drop(&mut boot_pt) };
unsafe {
crate::mm::page_table::boot_pt::dismiss();
}
}

View File

@ -52,7 +52,8 @@ use super::{allocator, Page};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
mm::{
kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page_size, page_table::PageTableEntryTrait,
paddr_to_vaddr, page_size,
page_table::{boot_pt, PageTableEntryTrait},
CachePolicy, Paddr, PageFlags, PageProperty, PagingConstsTrait, PagingLevel,
PrivilegedPageFlags, Vaddr, PAGE_SIZE,
},
@ -268,20 +269,19 @@ pub(crate) fn init() -> Vec<Page<MetaPageMeta>> {
let num_meta_pages = (num_pages * size_of::<MetaSlot>()).div_ceil(PAGE_SIZE);
let meta_pages = alloc_meta_pages(num_meta_pages);
// Map the metadata pages.
let mut boot_pt_lock = BOOT_PAGE_TABLE.lock();
let boot_pt = boot_pt_lock
.as_mut()
.expect("boot page table not initialized");
for (i, frame_paddr) in meta_pages.iter().enumerate() {
let vaddr = mapping::page_to_meta::<PagingConsts>(0) + i * PAGE_SIZE;
let prop = PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::GLOBAL,
};
// SAFETY: we are doing the metadata mappings for the kernel.
unsafe { boot_pt.map_base_page(vaddr, frame_paddr / PAGE_SIZE, prop) };
}
boot_pt::with_borrow(|boot_pt| {
for (i, frame_paddr) in meta_pages.iter().enumerate() {
let vaddr = mapping::page_to_meta::<PagingConsts>(0) + i * PAGE_SIZE;
let prop = PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::GLOBAL,
};
// SAFETY: we are doing the metadata mappings for the kernel.
unsafe { boot_pt.map_base_page(vaddr, frame_paddr / PAGE_SIZE, prop) };
}
})
.unwrap();
// Now the metadata pages are mapped, we can initialize the metadata.
meta_pages
.into_iter()

View File

@ -5,19 +5,79 @@
//! in order to initialize the running phase page tables.
use alloc::vec::Vec;
use core::{
result::Result,
sync::atomic::{AtomicU32, Ordering},
};
use super::{pte_index, PageTableEntryTrait};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
cpu::num_cpus,
mm::{
nr_subpage_per_huge, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, PageProperty,
PagingConstsTrait, Vaddr, PAGE_SIZE,
},
sync::SpinLock,
};
type FrameNumber = usize;
/// A simple boot page table for boot stage mapping management.
/// The accessor to the boot page table singleton [`BootPageTable`].
///
/// The user should provide a closure to access the boot page table. The
/// function will acquire the lock and call the closure with a mutable
/// reference to the boot page table as the argument.
///
/// The boot page table will be dropped when there's no CPU activating it.
/// This function will return an [`Err`] if the boot page table is dropped.
pub(crate) fn with_borrow<F>(f: F) -> Result<(), ()>
where
F: FnOnce(&mut BootPageTable),
{
let mut boot_pt = BOOT_PAGE_TABLE.lock();
let dismiss_count = DISMISS_COUNT.load(Ordering::SeqCst);
// This function may be called on the BSP before we can get the number of
// CPUs. So we short-circuit the check if the number of CPUs is zero.
if dismiss_count != 0 && dismiss_count < num_cpus() {
return Err(());
}
// Lazy initialization.
if boot_pt.is_none() {
// SAFETY: This function is called only once.
*boot_pt = Some(unsafe { BootPageTable::from_current_pt() });
}
f(boot_pt.as_mut().unwrap());
Ok(())
}
/// Dismiss the boot page table.
///
/// By calling it on a CPU, the caller claims that the boot page table is no
/// longer needed on this CPU.
///
/// # Safety
///
/// The caller should ensure that:
/// - another legitimate page table is activated on this CPU;
/// - this function should be called only once per CPU;
/// - no [`with`] calls are performed on this CPU after this dismissal.
pub(crate) unsafe fn dismiss() {
if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) == num_cpus() - 1 {
BOOT_PAGE_TABLE.lock().take();
}
}
/// The boot page table singleton instance.
static BOOT_PAGE_TABLE: SpinLock<Option<BootPageTable>> = SpinLock::new(None);
/// If it reaches the number of CPUs, the boot page table will be dropped.
static DISMISS_COUNT: AtomicU32 = AtomicU32::new(0);
/// A simple boot page table singleton for boot stage mapping management.
/// If applicable, the boot page table could track the lifetime of page table
/// frames that are set up by the firmware, loader or the setup code.
pub struct BootPageTable<
@ -33,8 +93,15 @@ pub struct BootPageTable<
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// Creates a new boot page table from the current page table root physical address.
pub fn from_current_pt() -> Self {
/// Creates a new boot page table from the current page table root
/// physical address.
///
/// # Safety
///
/// This function should be called only once in the initialization phase.
/// Otherwise, It would lead to double-drop of the page table frames set up
/// by the firmware, loader or the setup code.
unsafe fn from_current_pt() -> Self {
let root_paddr = crate::arch::mm::current_page_table_paddr();
Self {
root_pt: root_paddr / C::BASE_PAGE_SIZE,

View File

@ -18,7 +18,7 @@ pub use cursor::{Cursor, CursorMut, PageTableItem};
#[cfg(ktest)]
mod test;
pub(in crate::mm) mod boot_pt;
pub(crate) mod boot_pt;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PageTableError {