Inject a scalable buddy system allocator to OSTD

Co-authored-by: Zhe Tang <tangzh@stu.pku.edu.cn>
This commit is contained in:
Zhang Junyang
2025-03-11 16:57:10 +08:00
committed by Tate, Hongliang Tian
parent 92bc8cbbf7
commit 5f05963ee5
27 changed files with 1301 additions and 236 deletions

View File

@ -67,6 +67,13 @@ pub use self::{error::Error, prelude::Result};
#[doc(hidden)]
unsafe fn init() {
arch::enable_cpu_features();
// SAFETY: This function is called only once, before `allocator::init`
// and after memory regions are initialized.
unsafe {
mm::frame::allocator::init_early_allocator();
}
arch::serial::init();
#[cfg(feature = "cvm_guest")]

View File

@ -2,18 +2,18 @@
//! The physical memory allocator.
use core::{alloc::Layout, ops::Range};
use align_ext::AlignExt;
use buddy_system_allocator::FrameAllocator;
use log::info;
use spin::Once;
use super::{meta::AnyFrameMeta, segment::Segment, Frame};
use crate::{
boot::memory_region::MemoryRegionType,
error::Error,
impl_frame_meta_for,
mm::{paddr_to_vaddr, Paddr, PAGE_SIZE},
prelude::*,
sync::SpinLock,
util::range_difference,
};
/// Options for allocating physical memory frames.
@ -52,16 +52,9 @@ impl FrameAllocOptions {
/// Allocates a single frame with additional metadata.
pub fn alloc_frame_with<M: AnyFrameMeta>(&self, metadata: M) -> Result<Frame<M>> {
let frame = FRAME_ALLOCATOR
.get()
.unwrap()
.disable_irq()
.lock()
.alloc(1)
.map(|idx| {
let paddr = idx * PAGE_SIZE;
Frame::from_unused(paddr, metadata).unwrap()
})
let single_layout = Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap();
let frame = alloc_upcall(single_layout)
.map(|paddr| Frame::from_unused(paddr, metadata).unwrap())
.ok_or(Error::NoMemory)?;
if self.zeroed {
@ -93,18 +86,10 @@ impl FrameAllocOptions {
if nframes == 0 {
return Err(Error::InvalidArgs);
}
let segment = FRAME_ALLOCATOR
.get()
.unwrap()
.disable_irq()
.lock()
.alloc(nframes)
let layout = Layout::from_size_align(nframes * PAGE_SIZE, PAGE_SIZE).unwrap();
let segment = alloc_upcall(layout)
.map(|start| {
Segment::from_unused(
start * PAGE_SIZE..start * PAGE_SIZE + nframes * PAGE_SIZE,
metadata_fn,
)
.unwrap()
Segment::from_unused(start..start + nframes * PAGE_SIZE, metadata_fn).unwrap()
})
.ok_or(Error::NoMemory)?;
@ -140,73 +125,236 @@ fn test_alloc_dealloc() {
}
}
/// FrameAllocator with a counter for allocated memory
pub(in crate::mm) struct CountingFrameAllocator {
allocator: FrameAllocator,
total: usize,
allocated: usize,
/// The trait for the global frame allocator.
///
/// OSTD allows a customized frame allocator by the [`global_frame_allocator`]
/// attribute, which marks a static variable of this type.
///
/// The API mimics the standard Rust allocator API ([`GlobalAlloc`] and
/// [`global_allocator`]). However, this trait is much safer. Double free
/// or freeing in-use memory through this trait only mess up the allocator's
/// state rather than causing undefined behavior.
///
/// Whenever OSTD or other modules need to allocate or deallocate frames via
/// [`FrameAllocOptions`], they are forwarded to the global frame allocator.
/// It is not encoraged to call the global allocator directly.
///
/// [`global_frame_allocator`]: crate::global_frame_allocator
/// [`GlobalAlloc`]: core::alloc::GlobalAlloc
pub trait GlobalFrameAllocator: Sync {
/// Allocates a contiguous range of frames.
///
/// The caller guarantees that `layout.size()` is aligned to [`PAGE_SIZE`].
///
/// When the allocated memory is not in use, OSTD return them by calling
/// [`GlobalFrameAllocator::add_free_memory`].
fn alloc(&self, layout: Layout) -> Option<Paddr>;
/// Adds a contiguous range of frames to the allocator.
///
/// The caller guarantees that `addr` and `size` are both aligned to
/// [`PAGE_SIZE`]. The added memory can be uninitialized.
///
/// The memory being added would never overlap with any memory that is
/// already added, i.e., a frame cannot be added twice without being
/// allocated in between.
///
/// However, if [`GlobalFrameAllocator::alloc`] returns multiple frames,
/// it is possible that some of them are added back before others.
fn add_free_memory(&self, addr: Paddr, size: usize);
}
impl CountingFrameAllocator {
pub fn new(allocator: FrameAllocator, total: usize) -> Self {
CountingFrameAllocator {
allocator,
total,
allocated: 0,
}
}
pub fn alloc(&mut self, count: usize) -> Option<usize> {
match self.allocator.alloc(count) {
Some(value) => {
self.allocated += count * PAGE_SIZE;
Some(value)
}
None => None,
}
}
// TODO: this method should be marked unsafe as invalid arguments will mess
// up the underlying allocator.
pub fn dealloc(&mut self, start_frame: usize, count: usize) {
self.allocator.dealloc(start_frame, count);
self.allocated -= count * PAGE_SIZE;
}
pub fn mem_total(&self) -> usize {
self.total
}
pub fn mem_available(&self) -> usize {
self.total - self.allocated
}
extern "Rust" {
/// The global frame allocator's reference exported by
/// [`crate::global_frame_allocator`].
static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator;
}
pub(in crate::mm) static FRAME_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>> = Once::new();
/// Directly allocates a contiguous range of frames.
fn alloc_upcall(layout: core::alloc::Layout) -> Option<Paddr> {
// SAFETY: We believe that the global frame allocator is set up correctly
// with the `global_frame_allocator` attribute. If they use safe code only
// then the up-call is safe.
unsafe { __GLOBAL_FRAME_ALLOCATOR_REF.alloc(layout) }
}
pub(crate) fn init() {
/// Up-call to add a range of frames to the global frame allocator.
///
/// It would return the frame to the allocator for further use. This would like
/// to be done after the release of the metadata to avoid re-allocation before
/// the metadata is reset.
pub(super) fn add_free_memory_upcall(addr: Paddr, size: usize) {
// SAFETY: We believe that the global frame allocator is set up correctly
// with the `global_frame_allocator` attribute. If they use safe code only
// then the up-call is safe.
unsafe { __GLOBAL_FRAME_ALLOCATOR_REF.add_free_memory(addr, size) }
}
/// Initializes the global frame allocator.
///
/// It just does adds the frames to the global frame allocator. Calling it
/// multiple times would be not safe.
///
/// # Safety
///
/// This function should be called only once.
pub(crate) unsafe fn init() {
let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
let mut total: usize = 0;
let mut allocator = FrameAllocator::<32>::new();
// Retire the early allocator.
let early_allocator = EARLY_ALLOCATOR.lock().take().unwrap();
let (range_1, range_2) = early_allocator.allocated_regions();
for region in regions.iter() {
if region.typ() == MemoryRegionType::Usable {
// Make the memory region page-aligned, and skip if it is too small.
let start = region.base().align_up(PAGE_SIZE) / PAGE_SIZE;
let region_end = region.base().checked_add(region.len()).unwrap();
let end = region_end.align_down(PAGE_SIZE) / PAGE_SIZE;
if end <= start {
continue;
}
debug_assert!(region.base() % PAGE_SIZE == 0);
debug_assert!(region.len() % PAGE_SIZE == 0);
// Add global free pages to the frame allocator.
allocator.add_frame(start, end);
total += (end - start) * PAGE_SIZE;
info!(
"Found usable region, start:{:x}, end:{:x}",
region.base(),
region.base() + region.len()
);
// Truncate the early allocated frames if there is an overlap.
for r1 in range_difference(&(region.base()..region.end()), &range_1) {
for r2 in range_difference(&r1, &range_2) {
log::info!("Adding free frames to the allocator: {:x?}", r2);
add_free_memory_upcall(r2.start, r2.len());
}
}
}
}
let counting_allocator = CountingFrameAllocator::new(allocator, total);
FRAME_ALLOCATOR.call_once(|| SpinLock::new(counting_allocator));
}
/// An allocator in the early boot phase when frame metadata is not available.
pub(super) struct EarlyFrameAllocator {
// We need to allocate from under 4G first since the linear mapping for
// the higher region is not constructed yet.
under_4g_range: Range<Paddr>,
under_4g_end: Paddr,
// And also sometimes 4G is not enough for early phase. This, if not `0..0`,
// is the largest region above 4G.
max_range: Range<Paddr>,
max_end: Paddr,
}
/// The global frame allocator in the early boot phase.
///
/// It is used to allocate frames before the frame metadata is initialized.
/// The allocated frames are not tracked by the frame metadata. After the
/// metadata is initialized with [`super::meta::init`], the frames are tracked
/// with metadata and the early allocator is no longer used.
///
/// This is protected by the [`spin::Mutex`] rather than [`crate::sync::SpinLock`]
/// since the latter uses CPU-local storage, which isn't available in the early
/// boot phase. So we must make sure that no interrupts are enabled when using
/// this allocator.
pub(super) static EARLY_ALLOCATOR: spin::Mutex<Option<EarlyFrameAllocator>> =
spin::Mutex::new(None);
impl EarlyFrameAllocator {
/// Creates a new early frame allocator.
///
/// It uses at most 2 regions, the first is the maximum usable region below
/// 4 GiB. The other is the maximum usable region above 4 GiB and is only
/// usable when linear mapping is constructed.
pub fn new() -> Self {
let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
let mut under_4g_range = 0..0;
let mut max_range = 0..0;
for region in regions.iter() {
if region.typ() != MemoryRegionType::Usable {
continue;
}
const PADDR4G: Paddr = 0x1_0000_0000;
if region.base() < PADDR4G {
let range = region.base()..region.end().min(PADDR4G);
if range.len() > under_4g_range.len() {
under_4g_range = range;
}
}
if region.end() >= PADDR4G {
let range = region.base().max(PADDR4G)..region.end();
if range.len() > max_range.len() {
max_range = range;
}
}
}
log::debug!(
"Early frame allocator (below 4G) at: {:#x?}",
under_4g_range
);
if !max_range.is_empty() {
log::debug!("Early frame allocator (above 4G) at: {:#x?}", max_range);
}
Self {
under_4g_range: under_4g_range.clone(),
under_4g_end: under_4g_range.start,
max_range: max_range.clone(),
max_end: max_range.start,
}
}
/// Allocates a contiguous range of frames.
pub fn alloc(&mut self, layout: Layout) -> Option<Paddr> {
let size = layout.size().align_up(PAGE_SIZE);
let allocated = self.under_4g_end.align_up(layout.align());
if allocated + size <= self.under_4g_range.end {
// Allocated below 4G.
self.under_4g_end = allocated + size;
Some(allocated)
} else {
// Try above 4G.
let allocated = self.max_end.align_up(layout.align());
if allocated + size <= self.max_range.end {
self.max_end = allocated + size;
}
Some(allocated)
}
}
pub(super) fn allocated_regions(&self) -> (Range<Paddr>, Range<Paddr>) {
(
self.under_4g_range.start..self.under_4g_end,
self.max_range.start..self.max_end,
)
}
}
/// Metadata for frames allocated in the early boot phase.
///
/// Frames allocated with [`early_alloc`] are not immediately tracked with
/// frame metadata. But [`super::meta::init`] will track them later.
#[derive(Debug)]
pub(crate) struct EarlyAllocatedFrameMeta;
impl_frame_meta_for!(EarlyAllocatedFrameMeta);
/// Allocates a contiguous range of frames in the early boot phase.
///
/// The early allocated frames will not be reclaimable, until the metadata is
/// initialized by [`super::meta::init`]. Then we can use [`Frame::from_raw`]
/// to free the frames.
///
/// # Panics
///
/// This function panics if:
/// - it is called before [`init_early_allocator`],
/// - or if is called after [`init`].
pub(crate) fn early_alloc(layout: Layout) -> Option<Paddr> {
let mut early_allocator = EARLY_ALLOCATOR.lock();
early_allocator.as_mut().unwrap().alloc(layout)
}
/// Initializes the early frame allocator.
///
/// [`early_alloc`] should be used after this initialization. After [`init`], the
/// early allocator.
///
/// # Safety
///
/// This function should be called only once after the memory regions are ready.
pub(crate) unsafe fn init_early_allocator() {
let mut early_allocator = EARLY_ALLOCATOR.lock();
*early_allocator = Some(EarlyFrameAllocator::new());
}

View File

@ -39,10 +39,11 @@ pub(crate) mod mapping {
}
use core::{
alloc::Layout,
any::Any,
cell::UnsafeCell,
fmt::Debug,
mem::{size_of, MaybeUninit},
mem::{size_of, ManuallyDrop, MaybeUninit},
result::Result,
sync::atomic::{AtomicU64, Ordering},
};
@ -50,16 +51,19 @@ use core::{
use align_ext::AlignExt;
use log::info;
use super::{allocator, Segment};
use crate::{
arch::mm::PagingConsts,
const_assert,
mm::{
kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, page_size, page_table::boot_pt,
CachePolicy, Infallible, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr,
VmReader, PAGE_SIZE,
frame::allocator::{self, EarlyAllocatedFrameMeta},
kspace::LINEAR_MAPPING_BASE_VADDR,
paddr_to_vaddr, page_size,
page_table::boot_pt,
CachePolicy, Infallible, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Segment,
Vaddr, VmReader, PAGE_SIZE,
},
panic::abort,
util::range_difference,
};
/// The maximum number of bytes of the metadata of a frame.
@ -383,16 +387,6 @@ impl MetaSlot {
// `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures
// `drop_meta_in_place` won't be reordered after this memory store.
self.ref_count.store(REF_COUNT_UNUSED, Ordering::Release);
// Deallocate the frame.
// It would return the frame to the allocator for further use. This would be done
// after the release of the metadata to avoid re-allocation before the metadata
// is reset.
allocator::FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.dealloc(self.frame_paddr() / PAGE_SIZE, 1);
}
/// Drops the metadata of a slot in place.
@ -460,8 +454,6 @@ pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
add_temp_linear_mapping(max_paddr);
super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames);
@ -482,10 +474,33 @@ pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
.unwrap();
// Now the metadata frames are mapped, we can initialize the metadata.
Segment::from_unused(meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE, |_| {
MetaPageMeta {}
})
.unwrap()
super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
let meta_page_range = meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE;
let (range_1, range_2) = allocator::EARLY_ALLOCATOR
.lock()
.as_ref()
.unwrap()
.allocated_regions();
for r in range_difference(&range_1, &meta_page_range) {
let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
let _ = ManuallyDrop::new(early_seg);
}
for r in range_difference(&range_2, &meta_page_range) {
let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
let _ = ManuallyDrop::new(early_seg);
}
Segment::from_unused(meta_page_range, |_| MetaPageMeta {}).unwrap()
}
/// Returns whether the global frame allocator is initialized.
pub(in crate::mm) fn is_initialized() -> bool {
// `init` sets it with relaxed ordering somewhere in the middle. But due
// to the safety requirement of the `init` function, we can assume that
// there is no race conditions.
super::MAX_PADDR.load(Ordering::Relaxed) != 0
}
fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
@ -493,13 +508,10 @@ fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
.checked_mul(size_of::<MetaSlot>())
.unwrap()
.div_ceil(PAGE_SIZE);
let start_paddr = allocator::FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(nr_meta_pages)
.unwrap()
* PAGE_SIZE;
let start_paddr = allocator::early_alloc(
Layout::from_size_align(nr_meta_pages * PAGE_SIZE, PAGE_SIZE).unwrap(),
)
.unwrap();
let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
@ -523,14 +535,6 @@ fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
(nr_meta_pages, start_paddr)
}
/// Returns whether the global frame allocator is initialized.
pub(in crate::mm) fn is_initialized() -> bool {
// `init` sets it somewhere in the middle. But due to the safety
// requirement of the `init` function, we can assume that there
// is no race condition.
super::MAX_PADDR.load(Ordering::Relaxed) != 0
}
/// Adds a temporary linear mapping for the metadata frames.
///
/// We only assume boot page table to contain 4G linear mapping. Thus if the

View File

@ -44,6 +44,7 @@ use core::{
sync::atomic::{AtomicUsize, Ordering},
};
pub use allocator::GlobalFrameAllocator;
use meta::{mapping, AnyFrameMeta, GetFrameError, MetaSlot, REF_COUNT_UNUSED};
pub use segment::Segment;
use untyped::{AnyUFrameMeta, UFrame};
@ -220,6 +221,8 @@ impl<M: AnyFrameMeta + ?Sized> Drop for Frame<M> {
// SAFETY: this is the last reference and is about to be dropped.
unsafe { self.slot().drop_last_in_place() };
allocator::add_free_memory_upcall(self.start_paddr(), PAGE_SIZE);
}
}
}

View File

@ -99,6 +99,26 @@ impl<M: AnyFrameMeta + ?Sized> UniqueFrame<M> {
unsafe { &mut *self.slot().dyn_meta_ptr() }
}
/// Resets the frame to unused without up-calling the allocator.
///
/// This is solely useful for the allocator implementation/testing and
/// is highly experimental. Usage of this function is discouraged.
///
/// Usage of this function other than the allocator would actually leak
/// the frame since the allocator would not be aware of the frame.
//
// FIXME: We may have a better `Segment` and `UniqueSegment` design to
// allow the allocator hold the ownership of all the frames in a chunk
// instead of the head. Then this weird public API can be `#[cfg(ktest)]`.
pub fn reset_as_unused(self) {
let this = ManuallyDrop::new(self);
this.slot().ref_count.store(0, Ordering::Release);
// SAFETY: We are the sole owner and the reference count is 0.
// The slot is initialized.
unsafe { this.slot().drop_last_in_place() };
}
/// Converts this frame into a raw physical address.
pub(crate) fn into_raw(self) -> Paddr {
let this = ManuallyDrop::new(self);
@ -134,6 +154,8 @@ impl<M: AnyFrameMeta + ?Sized> Drop for UniqueFrame<M> {
// SAFETY: We are the sole owner and the reference count is 0.
// The slot is initialized.
unsafe { self.slot().drop_last_in_place() };
super::allocator::add_free_memory_upcall(self.start_paddr(), PAGE_SIZE);
}
}

View File

@ -2,7 +2,10 @@
mod slab_allocator;
use core::alloc::{GlobalAlloc, Layout};
use core::{
alloc::{GlobalAlloc, Layout},
mem::ManuallyDrop,
};
use align_ext::AlignExt;
use log::debug;
@ -11,11 +14,11 @@ use spin::Once;
use super::paddr_to_vaddr;
use crate::{
mm::{frame::allocator::FRAME_ALLOCATOR, PAGE_SIZE},
impl_frame_meta_for,
mm::{FrameAllocOptions, PAGE_SIZE},
prelude::*,
sync::SpinLock,
trap::disable_local,
Error,
};
#[global_allocator]
@ -49,6 +52,12 @@ struct LockedHeapWithRescue {
heap: Once<SpinLock<Heap>>,
}
/// The metadata for the kernel heap frames.
#[derive(Debug)]
pub struct KernelHeapMeta;
impl_frame_meta_for!(KernelHeapMeta);
impl LockedHeapWithRescue {
/// Creates an new heap
pub const fn new() -> Self {
@ -94,22 +103,26 @@ impl LockedHeapWithRescue {
};
let allocation_start = {
let mut page_allocator = FRAME_ALLOCATOR.get().unwrap().lock();
if num_frames >= MIN_NUM_FRAMES {
page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?
let mut options = FrameAllocOptions::new();
options.zeroed(false);
let segment = if num_frames >= MIN_NUM_FRAMES {
options
.alloc_segment_with(num_frames, |_| KernelHeapMeta)
.unwrap()
} else {
match page_allocator.alloc(MIN_NUM_FRAMES) {
None => page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?,
Some(start) => {
match options.alloc_segment_with(MIN_NUM_FRAMES, |_| KernelHeapMeta) {
Ok(seg) => {
num_frames = MIN_NUM_FRAMES;
start
seg
}
Err(_) => options.alloc_segment_with(num_frames, |_| KernelHeapMeta)?,
}
}
};
let paddr = segment.start_paddr();
let _ = ManuallyDrop::new(segment);
paddr
};
// FIXME: the alloc function internally allocates heap memory(inside FrameAllocator).
// So if the heap is nearly run out, allocating frame will fail too.
let vaddr = paddr_to_vaddr(allocation_start * PAGE_SIZE);
let vaddr = paddr_to_vaddr(allocation_start);
// SAFETY: the frame is allocated from FrameAllocator and never be deallocated,
// so the addr is always valid.

View File

@ -16,7 +16,6 @@ pub(crate) mod kspace;
mod offset;
pub(crate) mod page_prop;
pub(crate) mod page_table;
pub mod stat;
pub mod tlb;
pub mod vm_space;

View File

@ -5,6 +5,7 @@
//! in order to initialize the running phase page tables.
use core::{
alloc::Layout,
result::Result,
sync::atomic::{AtomicU32, Ordering},
};
@ -15,7 +16,11 @@ use crate::{
cpu::num_cpus,
cpu_local_cell,
mm::{
frame::allocator::FRAME_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, PageFlags,
frame::{
self,
allocator::{self, EarlyAllocatedFrameMeta},
},
nr_subpage_per_huge, paddr_to_vaddr, Frame, FrameAllocOptions, Paddr, PageFlags,
PageProperty, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
},
sync::SpinLock,
@ -62,13 +67,27 @@ where
/// The caller should ensure that:
/// - another legitimate page table is activated on this CPU;
/// - this function should be called only once per CPU;
/// - no [`with`] calls are performed on this CPU after this dismissal;
/// - no [`with`] calls are performed on this CPU after the activation of
/// another page table and before this dismissal.
/// - no [`with_borrow`] calls are performed on this CPU after this dismissal;
/// - no [`with_borrow`] calls are performed on this CPU after the activation
/// of another page table and before this dismissal.
pub(crate) unsafe fn dismiss() {
IS_DISMISSED.store(true);
if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) as usize == num_cpus() - 1 {
BOOT_PAGE_TABLE.lock().take();
let boot_pt = BOOT_PAGE_TABLE.lock().take().unwrap();
dfs_walk_on_leave::<PageTableEntry, PagingConsts>(
boot_pt.root_pt,
PagingConsts::NR_LEVELS,
&mut |pte| {
if !pte.prop().flags.contains(PTE_POINTS_TO_FIRMWARE_PT) {
// SAFETY: The pointed frame is allocated and forgotten with `into_raw`.
drop(unsafe { Frame::<EarlyAllocatedFrameMeta>::from_raw(pte.paddr()) })
}
// Firmware provided page tables may be a DAG instead of a tree.
// Clear it to avoid double-free when we meet it the second time.
*pte = PageTableEntry::new_absent();
},
);
}
}
@ -97,6 +116,13 @@ pub(crate) struct BootPageTable<
_pretend_to_use: core::marker::PhantomData<(E, C)>,
}
// We use extra two available bits in the boot PT for memory management.
//
// The first available bit is used to differentiate firmware page tables from
// the page tables allocated here. The second is for identifying double-visits
// when walking the page tables since the PT can be a DAG.
const PTE_POINTS_TO_FIRMWARE_PT: PageFlags = PageFlags::AVAIL1;
impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// Creates a new boot page table from the current page table root
/// physical address.
@ -108,15 +134,13 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// by the firmware, loader or the setup code.
unsafe fn from_current_pt() -> Self {
let root_pt = crate::arch::mm::current_page_table_paddr() / C::BASE_PAGE_SIZE;
// Make sure the first available bit is not set for firmware page tables.
// Make sure the 2 available bits are not set for firmware page tables.
dfs_walk_on_leave::<E, C>(root_pt, C::NR_LEVELS, &mut |pte: &mut E| {
let prop = pte.prop();
if prop.flags.contains(PageFlags::AVAIL1) {
pte.set_prop(PageProperty::new(
prop.flags - PageFlags::AVAIL1,
prop.cache,
));
}
pte.set_prop(PageProperty::new(
prop.flags | PTE_POINTS_TO_FIRMWARE_PT,
prop.cache,
));
});
Self {
root_pt,
@ -230,17 +254,26 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
}
fn alloc_child(&mut self) -> E {
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap();
let frame_paddr = if frame::meta::is_initialized() {
let frame = FrameAllocOptions::new()
.zeroed(false)
.alloc_frame_with(EarlyAllocatedFrameMeta)
.unwrap();
frame.into_raw()
} else {
allocator::early_alloc(
Layout::from_size_align(C::BASE_PAGE_SIZE, C::BASE_PAGE_SIZE).unwrap(),
)
.unwrap()
};
// Zero it out.
let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8;
let vaddr = paddr_to_vaddr(frame_paddr) as *mut u8;
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE) };
let mut pte = E::new_pt(frame * C::BASE_PAGE_SIZE);
let mut pte = E::new_pt(frame_paddr);
let prop = pte.prop();
pte.set_prop(PageProperty::new(
prop.flags | PageFlags::AVAIL1,
prop.cache,
));
pte.set_prop(PageProperty::new(prop.flags, prop.cache));
pte
}
@ -267,20 +300,6 @@ fn dfs_walk_on_leave<E: PageTableEntryTrait, C: PagingConstsTrait>(
}
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> {
fn drop(&mut self) {
dfs_walk_on_leave::<E, C>(self.root_pt, C::NR_LEVELS, &mut |pte| {
if pte.prop().flags.contains(PageFlags::AVAIL1) {
let pt = pte.paddr() / C::BASE_PAGE_SIZE;
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(pt, 1);
}
// Firmware provided page tables may be a DAG instead of a tree.
// Clear it to avoid double-free when we meet it the second time.
*pte = E::new_absent();
});
}
}
#[cfg(ktest)]
use crate::prelude::*;

View File

@ -1,21 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! APIs for memory statistics.
use crate::mm::frame::allocator::FRAME_ALLOCATOR;
/// Total memory available for any usages in the system (in bytes).
///
/// It would be only a slightly less than total physical memory of the system
/// in most occasions. For example, bad memory, kernel statically-allocated
/// memory or firmware reserved memories do not count.
pub fn mem_total() -> usize {
FRAME_ALLOCATOR.get().unwrap().lock().mem_total()
}
/// Current readily available memory (in bytes).
///
/// Such memory can be directly used for allocation without reclaiming.
pub fn mem_available() -> usize {
FRAME_ALLOCATOR.get().unwrap().lock().mem_available()
}