Inject a scalable buddy system allocator to OSTD

Co-authored-by: Zhe Tang <tangzh@stu.pku.edu.cn>
This commit is contained in:
Zhang Junyang
2025-03-11 16:57:10 +08:00
committed by Tate, Hongliang Tian
parent 92bc8cbbf7
commit 5f05963ee5
27 changed files with 1301 additions and 236 deletions

9
Cargo.lock generated
View File

@ -211,6 +211,7 @@ dependencies = [
"libflate", "libflate",
"log", "log",
"lru", "lru",
"osdk-frame-allocator",
"ostd", "ostd",
"paste", "paste",
"rand", "rand",
@ -1254,6 +1255,14 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
[[package]]
name = "osdk-frame-allocator"
version = "0.1.0"
dependencies = [
"log",
"ostd",
]
[[package]] [[package]]
name = "osdk-test-kernel" name = "osdk-test-kernel"
version = "0.11.3" version = "0.11.3"

View File

@ -1,6 +1,7 @@
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
"osdk/deps/frame-allocator",
"osdk/deps/test-kernel", "osdk/deps/test-kernel",
"ostd", "ostd",
"ostd/libs/align_ext", "ostd/libs/align_ext",

View File

@ -144,6 +144,7 @@ NON_OSDK_CRATES := \
# In contrast, OSDK crates depend on OSTD (or being `ostd` itself) # In contrast, OSDK crates depend on OSTD (or being `ostd` itself)
# and need to be built or tested with OSDK. # and need to be built or tested with OSDK.
OSDK_CRATES := \ OSDK_CRATES := \
osdk/deps/frame-allocator \
osdk/deps/test-kernel \ osdk/deps/test-kernel \
ostd \ ostd \
ostd/libs/linux-bzimage/setup \ ostd/libs/linux-bzimage/setup \

View File

@ -19,6 +19,7 @@ aster-virtio = { path = "comps/virtio" }
aster-rights = { path = "libs/aster-rights" } aster-rights = { path = "libs/aster-rights" }
component = { path = "libs/comp-sys/component" } component = { path = "libs/comp-sys/component" }
controlled = { path = "libs/comp-sys/controlled" } controlled = { path = "libs/comp-sys/controlled" }
osdk-frame-allocator = { path = "../osdk/deps/frame-allocator" }
ostd = { path = "../ostd" } ostd = { path = "../ostd" }
typeflags = { path = "libs/typeflags" } typeflags = { path = "libs/typeflags" }
typeflags-util = { path = "libs/typeflags-util" } typeflags-util = { path = "libs/typeflags-util" }

View File

@ -8,8 +8,6 @@
use alloc::format; use alloc::format;
use ostd::mm::stat;
use crate::{ use crate::{
fs::{ fs::{
procfs::template::{FileOps, ProcFileBuilder}, procfs::template::{FileOps, ProcFileBuilder},
@ -27,21 +25,17 @@ impl MemInfoFileOps {
} }
} }
/// Total memory in the entire system in bytes.
fn mem_total() -> usize {
stat::mem_total()
}
/// An estimation of how much memory is available for starting new
/// applications, without disk operations.
fn mem_available() -> usize {
stat::mem_available()
}
impl FileOps for MemInfoFileOps { impl FileOps for MemInfoFileOps {
fn data(&self) -> Result<Vec<u8>> { fn data(&self) -> Result<Vec<u8>> {
let total = mem_total() / 1024; // The total amount of physical memory available to the system.
let available = mem_available() / 1024; let total = crate::vm::mem_total();
// An estimation of how much memory is available for starting new
// applications, without disk operations.
let available = osdk_frame_allocator::load_total_free_size();
// Convert the values to KiB.
let total = total / 1024;
let available = available / 1024;
let free = total - available; let free = total - available;
let output = format!( let output = format!(
"MemTotal:\t{} kB\nMemFree:\t{} kB\nMemAvailable:\t{} kB\n", "MemTotal:\t{} kB\nMemFree:\t{} kB\nMemAvailable:\t{} kB\n",

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use aster_time::read_monotonic_time; use aster_time::read_monotonic_time;
use ostd::mm::stat::{mem_available, mem_total};
use super::SyscallReturn; use super::SyscallReturn;
use crate::prelude::*; use crate::prelude::*;
@ -26,8 +25,8 @@ pub struct sysinfo {
pub fn sys_sysinfo(sysinfo_addr: Vaddr, ctx: &Context) -> Result<SyscallReturn> { pub fn sys_sysinfo(sysinfo_addr: Vaddr, ctx: &Context) -> Result<SyscallReturn> {
let info = sysinfo { let info = sysinfo {
uptime: read_monotonic_time().as_secs() as i64, uptime: read_monotonic_time().as_secs() as i64,
totalram: mem_total() as u64, totalram: crate::vm::mem_total() as u64,
freeram: mem_available() as u64, freeram: osdk_frame_allocator::load_total_free_size() as u64,
..Default::default() // TODO: add other system information ..Default::default() // TODO: add other system information
}; };
ctx.user_space().write_val(sysinfo_addr, &info)?; ctx.user_space().write_val(sysinfo_addr, &info)?;

View File

@ -16,8 +16,27 @@
//! In Asterinas, VMARs and VMOs, as well as other capabilities, are implemented //! In Asterinas, VMARs and VMOs, as well as other capabilities, are implemented
//! as zero-cost capabilities. //! as zero-cost capabilities.
use osdk_frame_allocator::FrameAllocator;
pub mod page_fault_handler; pub mod page_fault_handler;
pub mod perms; pub mod perms;
pub mod util; pub mod util;
pub mod vmar; pub mod vmar;
pub mod vmo; pub mod vmo;
#[ostd::global_frame_allocator]
static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator;
/// Total physical memory in the entire system in bytes.
pub fn mem_total() -> usize {
use ostd::boot::{boot_info, memory_region::MemoryRegionType};
let regions = &boot_info().memory_regions;
let total = regions
.iter()
.filter(|region| region.typ() == MemoryRegionType::Usable)
.map(|region| region.len())
.sum::<usize>();
total
}

View File

@ -0,0 +1,11 @@
[package]
name = "osdk-frame-allocator"
version = "0.1.0"
edition = "2021"
[dependencies]
log = "0.4"
ostd = { version = "0.11.1", path = "../../../ostd" }
[lints]
workspace = true

View File

@ -0,0 +1,293 @@
// SPDX-License-Identifier: MPL-2.0
use core::{
alloc::Layout,
cell::RefCell,
ops::DerefMut,
sync::atomic::{AtomicUsize, Ordering},
};
use ostd::{
cpu::{all_cpus, PinCurrentCpu},
cpu_local,
mm::{frame::GlobalFrameAllocator, Paddr, PAGE_SIZE},
sync::{LocalIrqDisabled, SpinLock},
trap,
};
use crate::chunk::{size_of_order, BuddyOrder};
use super::set::BuddySet;
/// The global free buddies.
static GLOBAL_POOL: SpinLock<BuddySet<MAX_BUDDY_ORDER>, LocalIrqDisabled> =
SpinLock::new(BuddySet::new_empty());
static GLOBAL_POOL_SIZE: AtomicUsize = AtomicUsize::new(0);
// CPU-local free buddies.
cpu_local! {
static LOCAL_POOL: RefCell<BuddySet<MAX_LOCAL_BUDDY_ORDER>> = RefCell::new(BuddySet::new_empty());
static LOCAL_POOL_SIZE: AtomicUsize = AtomicUsize::new(0);
}
/// Maximum supported order of the buddy system.
///
/// i.e., it is the number of classes of free blocks. It determines the
/// maximum size of each allocation.
///
/// A maximum buddy order of 32 supports up to 4KiB*2^31 = 8 TiB of chunks.
const MAX_BUDDY_ORDER: BuddyOrder = 32;
/// Maximum supported order of the buddy system for CPU-local buddy system.
///
/// Since large blocks are rarely allocated, caching such blocks will lead
/// to much fragmentation.
///
/// Lock guards are also allocated on stack. We can limit the stack usage
/// for common paths in this way.
///
/// A maximum local buddy order of 18 supports up to 4KiB*2^17 = 512 MiB of
/// chunks.
const MAX_LOCAL_BUDDY_ORDER: BuddyOrder = 18;
/// The global frame allocator provided by OSDK.
///
/// It is a singleton that provides frame allocation for the kernel. If
/// multiple instances of this struct are created, all the member functions
/// will eventually access the same allocator.
pub struct FrameAllocator;
impl GlobalFrameAllocator for FrameAllocator {
fn alloc(&self, layout: Layout) -> Option<Paddr> {
let irq_guard = trap::disable_local();
let local_pool_cell = LOCAL_POOL.get_with(&irq_guard);
let mut local_pool = local_pool_cell.borrow_mut();
let size_order = greater_order_of(layout.size());
let align_order = greater_order_of(layout.align());
let order = size_order.max(align_order);
let mut chunk_addr = None;
if order < MAX_LOCAL_BUDDY_ORDER {
chunk_addr = local_pool.alloc_chunk(order);
}
// Fall back to the global free lists if the local free lists are empty.
if chunk_addr.is_none() {
chunk_addr = alloc_from_global_pool(order);
}
// TODO: On memory pressure the global pool may be not enough. We may need
// to merge all buddy chunks from the local pools to the global pool and
// try again.
// If the alignment order is larger than the size order, we need to split
// the chunk and return the rest part back to the free lists.
if align_order > size_order {
if let Some(chunk_addr) = chunk_addr {
let addr = chunk_addr + size_of_order(size_order);
let size = size_of_order(align_order) - size_of_order(size_order);
self.add_free_memory(addr, size);
}
} else {
balancing::balance(local_pool.deref_mut());
}
LOCAL_POOL_SIZE
.get_on_cpu(irq_guard.current_cpu())
.store(local_pool.total_size(), Ordering::Relaxed);
chunk_addr
}
fn add_free_memory(&self, mut addr: Paddr, mut size: usize) {
let irq_guard = trap::disable_local();
let local_pool_cell = LOCAL_POOL.get_with(&irq_guard);
let mut local_pool = local_pool_cell.borrow_mut();
// Split the range into chunks and return them to the local free lists
// respectively.
while size > 0 {
let next_chunk_order = max_order_from(addr).min(lesser_order_of(size));
if next_chunk_order >= MAX_LOCAL_BUDDY_ORDER {
dealloc_to_global_pool(addr, next_chunk_order);
} else {
local_pool.insert_chunk(addr, next_chunk_order);
}
size -= size_of_order(next_chunk_order);
addr += size_of_order(next_chunk_order);
}
balancing::balance(local_pool.deref_mut());
LOCAL_POOL_SIZE
.get_on_cpu(irq_guard.current_cpu())
.store(local_pool.total_size(), Ordering::Relaxed);
}
}
fn alloc_from_global_pool(order: BuddyOrder) -> Option<Paddr> {
let mut lock_guard = GLOBAL_POOL.lock();
let res = lock_guard.alloc_chunk(order);
GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed);
res
}
fn dealloc_to_global_pool(addr: Paddr, order: BuddyOrder) {
let mut lock_guard = GLOBAL_POOL.lock();
lock_guard.insert_chunk(addr, order);
GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed);
}
/// Loads the total size (in bytes) of free memory in the allocator.
pub fn load_total_free_size() -> usize {
let mut total = 0;
total += GLOBAL_POOL_SIZE.load(Ordering::Relaxed);
for cpu in all_cpus() {
total += LOCAL_POOL_SIZE.get_on_cpu(cpu).load(Ordering::Relaxed);
}
total
}
/// Returns an order that covers at least the given size.
fn greater_order_of(size: usize) -> BuddyOrder {
let size = size / PAGE_SIZE;
size.next_power_of_two().trailing_zeros() as BuddyOrder
}
/// Returns a order that covers at most the given size.
fn lesser_order_of(size: usize) -> BuddyOrder {
let size = size / PAGE_SIZE;
(usize::BITS - size.leading_zeros() - 1) as BuddyOrder
}
/// Returns the maximum order starting from the address.
///
/// If the start address is not aligned to the order, the address/order pair
/// cannot form a buddy chunk.
///
/// # Panics
///
/// Panics if the address is not page-aligned in debug mode.
fn max_order_from(addr: Paddr) -> BuddyOrder {
(addr.trailing_zeros() - PAGE_SIZE.trailing_zeros()) as BuddyOrder
}
pub mod balancing {
//! Controlling the balancing between CPU-local free pools and the global free pool.
use core::sync::atomic::Ordering;
use ostd::cpu::num_cpus;
use super::{
lesser_order_of, BuddyOrder, BuddySet, GLOBAL_POOL, GLOBAL_POOL_SIZE, MAX_LOCAL_BUDDY_ORDER,
};
use crate::chunk::size_of_order;
/// Controls the expected size of cache for each CPU-local free pool.
///
/// The expected size will be the size of `GLOBAL_POOL` divided by the number
/// of the CPUs, and then divided by this constant.
const CACHE_EXPECTED_PORTION: usize = 2;
/// Returns the expected size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_expected_size(global_size: usize) -> usize {
global_size / num_cpus() / CACHE_EXPECTED_PORTION
}
/// Controls the minimal size of cache for each CPU-local free pool.
///
/// The minimal will be the expected size divided by this constant.
const CACHE_MINIMAL_PORTION: usize = 8;
/// Returns the minimal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_minimal_size(global_size: usize) -> usize {
cache_expected_size(global_size) / CACHE_MINIMAL_PORTION
}
/// Controls the maximal size of cache for each CPU-local free pool.
///
/// The maximal will be the expected size multiplied by this constant.
const CACHE_MAXIMAL_MULTIPLIER: usize = 2;
/// Returns the maximal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_maximal_size(global_size: usize) -> usize {
cache_expected_size(global_size) * CACHE_MAXIMAL_MULTIPLIER
}
/// Balances a local cache and the global free pool.
pub fn balance(local: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>) {
let global_size = GLOBAL_POOL_SIZE.load(Ordering::Relaxed);
let minimal_local_size = cache_minimal_size(global_size);
let expected_local_size = cache_expected_size(global_size);
let maximal_local_size = cache_maximal_size(global_size);
let local_size = local.total_size();
if local_size >= maximal_local_size {
// Move local frames to the global pool.
if local_size == 0 {
return;
}
let expected_removal = local_size - expected_local_size;
let lesser_order = lesser_order_of(expected_removal);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(local, &mut *global_pool_lock, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
} else if local_size < minimal_local_size {
// Move global frames to the local pool.
if global_size == 0 {
return;
}
let expected_allocation = expected_local_size - local_size;
let lesser_order = lesser_order_of(expected_allocation);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(&mut *global_pool_lock, local, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
}
}
/// Balances from `a` to `b`.
fn balance_to<const MAX_ORDER1: BuddyOrder, const MAX_ORDER2: BuddyOrder>(
a: &mut BuddySet<MAX_ORDER1>,
b: &mut BuddySet<MAX_ORDER2>,
order: BuddyOrder,
) {
let allocated_from_a = a.alloc_chunk(order);
if let Some(addr) = allocated_from_a {
if order >= MAX_ORDER2 {
let inserted_order = MAX_ORDER2 - 1;
for i in 0..(1 << (order - inserted_order)) as usize {
let split_addr = addr + size_of_order(inserted_order) * i;
b.insert_chunk(split_addr, inserted_order);
}
} else {
b.insert_chunk(addr, order);
}
} else {
// Maybe the chunk size is too large.
// Try to reduce the order and balance again.
if order > 1 {
balance_to(a, b, order - 1);
balance_to(a, b, order - 1);
}
}
}
}

View File

@ -0,0 +1,208 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::{
impl_frame_meta_for,
mm::{frame::linked_list::Link, Paddr, UniqueFrame, PAGE_SIZE},
};
/// The order of a buddy chunk.
///
/// The size of a buddy chunk is `(1 << order) * PAGE_SIZE`.
pub(crate) type BuddyOrder = usize;
/// Returns the size of a buddy chunk of the given order.
pub(crate) const fn size_of_order(order: BuddyOrder) -> usize {
(1 << order) * PAGE_SIZE
}
/// The metadata of the head frame in a free buddy chunk.
#[derive(Debug)]
pub(crate) struct FreeHeadMeta {
/// The order of the buddy chunk.
order: BuddyOrder,
}
impl_frame_meta_for!(FreeHeadMeta);
impl FreeHeadMeta {
/// Returns the order of the buddy chunk.
pub(crate) fn order(&self) -> BuddyOrder {
self.order
}
}
/// A free buddy chunk.
#[derive(Debug)]
pub(crate) struct FreeChunk {
head: UniqueFrame<Link<FreeHeadMeta>>,
}
impl FreeChunk {
/// Gets a buddy chunk from the head frame.
///
/// The caller must ensure that the head frame should be uniquely free.
/// Otherwise it waits indefinitely.
///
/// We need a unique ownership of this chunk. Other threads may be
/// deallocating it's buddy and inspecting this chunk (see
/// [`Self::buddy`]). So we may spuriously fail to acquire it. But
/// they will soon release it so we can acquire it ultimately.
pub(crate) fn from_free_head(head: UniqueFrame<Link<FreeHeadMeta>>) -> FreeChunk {
FreeChunk { head }
}
/// Gets a buddy chunk from unused frames.
///
/// # Panics
///
/// Panics if:
/// - the range is not actually unused;
/// - the address is not aligned to the order.
pub(crate) fn from_unused(addr: Paddr, order: BuddyOrder) -> FreeChunk {
assert!(addr % size_of_order(order) == 0);
let head = UniqueFrame::from_unused(addr, Link::new(FreeHeadMeta { order }))
.expect("The head frame is not unused");
#[cfg(debug_assertions)]
{
use ostd::mm::{
frame::meta::{AnyFrameMeta, GetFrameError},
Frame,
};
let end = addr + size_of_order(order);
for paddr in (addr + PAGE_SIZE..end).step_by(PAGE_SIZE) {
let Err(GetFrameError::Unused) = Frame::<dyn AnyFrameMeta>::from_in_use(paddr)
else {
panic!("The range is not actually unused");
};
}
}
FreeChunk { head }
}
/// Turns the free chunk into a pointer to the head frame.
pub(crate) fn into_unique_head(self) -> UniqueFrame<Link<FreeHeadMeta>> {
self.head
}
/// Returns the order of the buddy chunk.
pub(crate) fn order(&self) -> BuddyOrder {
self.head.meta().order()
}
/// Returns the address of the buddy chunk.
pub(crate) fn addr(&self) -> Paddr {
self.head.start_paddr()
}
/// Gets the address of the buddy of this chunk.
pub(crate) fn buddy(&self) -> Paddr {
let addr = self.addr();
let order = self.order();
addr ^ size_of_order(order)
}
/// Splits the buddy chunk into two smaller buddies.
///
/// # Panics
///
/// Panics if the buddy chunk is not uniquely free.
pub(crate) fn split_free(self) -> (FreeChunk, FreeChunk) {
let order = self.order();
let addr = self.addr();
let new_order = order - 1;
let left_child_addr = addr;
let right_child_addr = addr ^ size_of_order(new_order);
let mut unique_head = self.into_unique_head();
debug_assert_eq!(unique_head.start_paddr(), left_child_addr);
unique_head.meta_mut().order = new_order;
let left_child = FreeChunk { head: unique_head };
let right_child = FreeChunk {
head: UniqueFrame::from_unused(
right_child_addr,
Link::new(FreeHeadMeta { order: new_order }),
)
.expect("Tail frames are not unused"),
};
(left_child, right_child)
}
/// Merges the buddy chunk with the sibling buddy.
///
/// # Panics
///
/// Panics if either the buddy chunks are not free or not buddies.
pub(crate) fn merge_free(mut self, mut buddy: FreeChunk) -> FreeChunk {
if self.addr() > buddy.addr() {
core::mem::swap(&mut self, &mut buddy);
}
let order = self.order();
let addr = self.addr();
let buddy_order = buddy.order();
let buddy_addr = buddy.addr();
buddy.into_unique_head().reset_as_unused(); // This will "drop" the frame without up-calling us.
assert_eq!(order, buddy_order);
assert_eq!(addr ^ size_of_order(order), buddy_addr);
let new_order = order + 1;
let mut unique_head = self.into_unique_head();
unique_head.meta_mut().order = new_order;
FreeChunk { head: unique_head }
}
}
#[cfg(ktest)]
mod test {
use super::*;
use crate::test::MockMemoryRegion;
use ostd::prelude::ktest;
#[ktest]
fn test_free_chunk_ops() {
let order = 3;
let size = size_of_order(order);
let region = MockMemoryRegion::alloc(size);
let addr1 = region.start_paddr();
let addr2 = addr1 + size_of_order(order - 2);
let addr3 = addr1 + size_of_order(order - 2) * 2;
let chunk = FreeChunk::from_unused(addr1, order);
assert_eq!(chunk.order(), order);
assert_eq!(chunk.addr(), addr1);
assert_eq!(chunk.buddy(), addr1 ^ size);
let (left, right) = chunk.split_free();
assert_eq!(left.order(), order - 1);
assert_eq!(left.addr(), addr1);
assert_eq!(left.buddy(), addr3);
assert_eq!(right.order(), order - 1);
assert_eq!(right.addr(), addr3);
assert_eq!(right.buddy(), addr1);
let (r1, r2) = left.split_free();
assert_eq!(r1.order(), order - 2);
assert_eq!(r1.addr(), addr1);
assert_eq!(r1.buddy(), addr2);
assert_eq!(r2.order(), order - 2);
assert_eq!(r2.addr(), addr2);
assert_eq!(r2.buddy(), addr1);
let left = r1.merge_free(r2);
let chunk = left.merge_free(right);
assert_eq!(chunk.order(), order);
assert_eq!(chunk.addr(), addr1);
chunk.into_unique_head().reset_as_unused();
}
}

View File

@ -0,0 +1,32 @@
// SPDX-License-Identifier: MPL-2.0
#![no_std]
#![deny(unsafe_code)]
//! An implementation of the global physical memory frame allocator for
//! [OSTD](https://crates.io/crates/ostd) based kernels.
//!
//! # Background
//!
//! `OSTD` has provided a page allocator interface, namely [`GlobalFrameAllocator`]
//! and [`global_frame_allocator`] procedure macro, allowing users to plug in
//! their own frame allocator into the kernel safely. You can refer to the
//! [`ostd::mm::frame::allocator`] module for detailed introduction.
//!
//! # Introduction
//!
//! This crate is an implementation of a scalable and efficient global frame
//! allocator based on the buddy system. It is by default shipped with OSDK
//! for users that don't have special requirements on the frame allocator.
//!
//! [`GlobalFrameAllocator`]: ostd::mm::GlobalFrameAllocator
//! [`global_frame_allocator`]: ostd::global_frame_allocator
mod allocator;
mod chunk;
mod set;
#[cfg(ktest)]
mod test;
pub use allocator::{load_total_free_size, FrameAllocator};

View File

@ -0,0 +1,148 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::mm::{frame::linked_list::LinkedList, Paddr};
use crate::chunk::{size_of_order, BuddyOrder, FreeChunk, FreeHeadMeta};
/// A set of free buddy chunks.
pub(crate) struct BuddySet<const MAX_ORDER: BuddyOrder> {
/// The sum of the sizes of all free chunks.
total_size: usize,
/// The lists of free buddy chunks for each orders.
lists: [LinkedList<FreeHeadMeta>; MAX_ORDER],
}
impl<const MAX_ORDER: BuddyOrder> BuddySet<MAX_ORDER> {
/// Creates a new empty set of free lists.
pub(crate) const fn new_empty() -> Self {
Self {
total_size: 0,
lists: [const { LinkedList::new() }; MAX_ORDER],
}
}
/// Gets the total size of free chunks.
pub(crate) fn total_size(&self) -> usize {
self.total_size
}
/// Inserts a free chunk into the set.
pub(crate) fn insert_chunk(&mut self, addr: Paddr, order: BuddyOrder) {
debug_assert!(order < MAX_ORDER);
let inserted_size = size_of_order(order);
let mut chunk = FreeChunk::from_unused(addr, order);
let order = chunk.order();
// Coalesce the chunk with its buddy whenever possible.
for (i, list) in self.lists.iter_mut().enumerate().skip(order) {
if i + 1 >= MAX_ORDER {
// The chunk is already the largest one.
break;
}
let buddy_addr = chunk.buddy();
let Some(mut cursor) = list.cursor_mut_at(buddy_addr) else {
// The buddy is not in this free list, so we can't coalesce.
break;
};
let taken = cursor.take_current().unwrap();
debug_assert_eq!(buddy_addr, taken.start_paddr());
chunk = chunk.merge_free(FreeChunk::from_free_head(taken));
}
// Insert the coalesced chunk into the free lists.
let order = chunk.order();
self.lists[order].push_front(chunk.into_unique_head());
self.total_size += inserted_size;
}
/// Allocates a chunk from the set.
///
/// The function will choose and remove a buddy chunk of the given order
/// from the set. The address of the chunk will be returned.
pub(crate) fn alloc_chunk(&mut self, order: BuddyOrder) -> Option<Paddr> {
// Find the first non-empty size class larger than the requested order.
let mut non_empty = None;
for (i, list) in self.lists.iter_mut().enumerate().skip(order) {
if !list.is_empty() {
non_empty = Some(i);
break;
}
}
let non_empty = non_empty?;
let mut chunk = {
let head = self.lists[non_empty].pop_front().unwrap();
debug_assert_eq!(head.meta().order(), non_empty as BuddyOrder);
Some(FreeChunk::from_free_head(head))
};
// Split the chunk.
for i in (order + 1..=non_empty).rev() {
let (left_sub, right_sub) = chunk.take().unwrap().split_free();
// Push the right sub-chunk back to the free lists.
let right_sub = right_sub.into_unique_head();
debug_assert_eq!(right_sub.meta().order(), (i - 1) as BuddyOrder);
self.lists[i - 1].push_front(right_sub);
// Pass the left sub-chunk to the next iteration.
chunk = Some(left_sub);
}
let allocated_size = size_of_order(order);
self.total_size -= allocated_size;
// The remaining chunk is the one we want.
let head_frame = chunk.take().unwrap().into_unique_head();
let paddr = head_frame.start_paddr();
head_frame.reset_as_unused(); // It will "drop" the frame without up-calling us.
Some(paddr)
}
}
#[cfg(ktest)]
mod test {
use super::*;
use crate::test::MockMemoryRegion;
use ostd::prelude::ktest;
#[ktest]
fn test_buddy_set_insert_alloc() {
let region_order = 4;
let region_size = size_of_order(region_order);
let region = MockMemoryRegion::alloc(region_size);
let region_start = region.start_paddr();
let mut set = BuddySet::<5>::new_empty();
set.insert_chunk(region_start, region_order);
assert!(set.total_size() == region_size);
// Allocating chunks of orders of 0, 0, 1, 2, 3 should be okay.
let chunk1 = set.alloc_chunk(0).unwrap();
assert!(set.total_size() == region_size - size_of_order(0));
let chunk2 = set.alloc_chunk(0).unwrap();
assert!(set.total_size() == region_size - size_of_order(1));
let chunk3 = set.alloc_chunk(1).unwrap();
assert!(set.total_size() == region_size - size_of_order(2));
let chunk4 = set.alloc_chunk(2).unwrap();
assert!(set.total_size() == region_size - size_of_order(3));
let chunk5 = set.alloc_chunk(3).unwrap();
assert!(set.total_size() == 0);
// Putting them back should enable us to allocate the original region.
set.insert_chunk(chunk3, 1);
assert!(set.total_size() == size_of_order(1));
set.insert_chunk(chunk1, 0);
assert!(set.total_size() == size_of_order(0) + size_of_order(1));
set.insert_chunk(chunk5, 3);
assert!(set.total_size() == size_of_order(0) + size_of_order(1) + size_of_order(3));
set.insert_chunk(chunk2, 0);
assert!(set.total_size() == size_of_order(2) + size_of_order(3));
set.insert_chunk(chunk4, 2);
assert!(set.total_size() == size_of_order(4));
let chunk = set.alloc_chunk(region_order).unwrap();
assert!(chunk == region_start);
assert!(set.total_size() == 0);
}
}

View File

@ -0,0 +1,86 @@
// SPDX-License-Identifier: MPL-2.0
//! Providing test utilities and high-level test cases for the frame allocator.
use core::alloc::Layout;
use ostd::{
mm::{frame::GlobalFrameAllocator, FrameAllocOptions, Paddr, Segment, UniqueFrame, PAGE_SIZE},
prelude::ktest,
};
use super::FrameAllocator;
#[ktest]
fn frame_allocator_alloc_layout_match() {
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 2, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 3, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 4, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 2, PAGE_SIZE * 2).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 4, PAGE_SIZE * 4).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 8, PAGE_SIZE * 8).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 16, PAGE_SIZE * 16).unwrap());
}
#[track_caller]
fn assert_allocation_well_formed(layout: Layout) {
let instance = FrameAllocator;
let allocated = instance.alloc(layout).unwrap();
assert_eq!(
allocated % layout.align(),
0,
"Allocation alignment mismatch"
);
for offset in (0..layout.size()).step_by(PAGE_SIZE) {
let frame = allocated + offset;
let frame = UniqueFrame::from_unused(frame, ()).unwrap_or_else(|e| {
panic!(
"Metadata not well-formed after allocation at offset {:#x}: {:#?}",
offset, e
)
});
frame.reset_as_unused();
}
instance.add_free_memory(allocated, layout.size());
}
/// A mocked memory region for testing.
///
/// All the frames in the returned memory region will be marked as used.
/// When the region is dropped, all the frames will be returned to the global
/// frame allocator. If any frame is not unused by that time, the drop will panic.
pub(crate) struct MockMemoryRegion {
addr: Paddr,
size: usize,
}
impl MockMemoryRegion {
/// Gets a memory region for testing.
pub(crate) fn alloc(size: usize) -> Self {
let seg = FrameAllocOptions::new()
.alloc_segment(size / PAGE_SIZE)
.unwrap();
let addr = seg.start_paddr();
for frame in seg {
UniqueFrame::try_from(frame).unwrap().reset_as_unused();
}
Self { addr, size }
}
/// Gets the start address of the memory region.
pub(crate) fn start_paddr(&self) -> Paddr {
self.addr
}
}
impl Drop for MockMemoryRegion {
fn drop(&mut self) {
let seg = Segment::from_unused(self.addr..self.addr + self.size, |_| ()).unwrap();
drop(seg);
}
}

View File

@ -1,6 +1,8 @@
#![no_std] #![no_std]
#![no_main] #![no_main]
#![feature(linkage)]
extern crate #TARGET_NAME#; extern crate #TARGET_NAME#;
#[panic_handler] #[panic_handler]
@ -10,3 +12,12 @@ fn panic(info: &core::panic::PanicInfo) -> ! {
} }
unsafe { __ostd_panic_handler(info); } unsafe { __ostd_panic_handler(info); }
} }
use ostd::mm::frame::GlobalFrameAllocator;
use osdk_frame_allocator::FrameAllocator;
static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator;
#[no_mangle]
#[linkage = "weak"]
static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator = &FRAME_ALLOCATOR;

View File

@ -57,8 +57,8 @@ pub enum BaseCrateType {
/// Create a new base crate that will be built by cargo. /// Create a new base crate that will be built by cargo.
/// ///
/// The dependencies of the base crate will be the target crate. If /// The dependencies of the base crate will be the target crate. If
/// `link_unit_test_runner` is set to true, the base crate will also depend on /// `link_unit_test_kernel` is set to true, the base crate will also depend on
/// the `ostd-test-runner` crate. /// the `ostd-test-kernel` crate.
/// ///
/// It returns the path to the base crate. /// It returns the path to the base crate.
pub fn new_base_crate( pub fn new_base_crate(
@ -66,7 +66,7 @@ pub fn new_base_crate(
base_crate_path_stem: impl AsRef<Path>, base_crate_path_stem: impl AsRef<Path>,
dep_crate_name: &str, dep_crate_name: &str,
dep_crate_path: impl AsRef<Path>, dep_crate_path: impl AsRef<Path>,
link_unit_test_runner: bool, link_unit_test_kernel: bool,
) -> PathBuf { ) -> PathBuf {
let base_crate_path: PathBuf = PathBuf::from( let base_crate_path: PathBuf = PathBuf::from(
(base_crate_path_stem.as_ref().as_os_str().to_string_lossy() (base_crate_path_stem.as_ref().as_os_str().to_string_lossy()
@ -85,7 +85,7 @@ pub fn new_base_crate(
&base_crate_tmp_path, &base_crate_tmp_path,
dep_crate_name, dep_crate_name,
&dep_crate_path, &dep_crate_path,
link_unit_test_runner, link_unit_test_kernel,
); );
let cargo_result = are_files_identical( let cargo_result = are_files_identical(
&base_crate_path.join("Cargo.toml"), &base_crate_path.join("Cargo.toml"),
@ -105,7 +105,7 @@ pub fn new_base_crate(
&base_crate_path, &base_crate_path,
dep_crate_name, dep_crate_name,
dep_crate_path, dep_crate_path,
link_unit_test_runner, link_unit_test_kernel,
); );
base_crate_path base_crate_path
@ -115,7 +115,7 @@ fn do_new_base_crate(
base_crate_path: impl AsRef<Path>, base_crate_path: impl AsRef<Path>,
dep_crate_name: &str, dep_crate_name: &str,
dep_crate_path: impl AsRef<Path>, dep_crate_path: impl AsRef<Path>,
link_unit_test_runner: bool, link_unit_test_kernel: bool,
) { ) {
let workspace_root = { let workspace_root = {
let meta = get_cargo_metadata(None::<&str>, None::<&[&str]>).unwrap(); let meta = get_cargo_metadata(None::<&str>, None::<&[&str]>).unwrap();
@ -182,7 +182,7 @@ fn do_new_base_crate(
fs::write("src/main.rs", main_rs).unwrap(); fs::write("src/main.rs", main_rs).unwrap();
// Add dependencies to the Cargo.toml // Add dependencies to the Cargo.toml
add_manifest_dependency(dep_crate_name, dep_crate_path, link_unit_test_runner); add_manifest_dependency(dep_crate_name, dep_crate_path, link_unit_test_kernel);
// Copy the manifest configurations from the target crate to the base crate // Copy the manifest configurations from the target crate to the base crate
copy_profile_configurations(workspace_root); copy_profile_configurations(workspace_root);
@ -197,7 +197,7 @@ fn do_new_base_crate(
fn add_manifest_dependency( fn add_manifest_dependency(
crate_name: &str, crate_name: &str,
crate_path: impl AsRef<Path>, crate_path: impl AsRef<Path>,
link_unit_test_runner: bool, link_unit_test_kernel: bool,
) { ) {
let manifest_path = "Cargo.toml"; let manifest_path = "Cargo.toml";
@ -224,31 +224,47 @@ fn add_manifest_dependency(
.unwrap(); .unwrap();
dependencies.as_table_mut().unwrap().extend(target_dep); dependencies.as_table_mut().unwrap().extend(target_dep);
if link_unit_test_runner { if link_unit_test_kernel {
let dep_str = match option_env!("OSDK_LOCAL_DEV") { add_manifest_dependency_to(
Some("1") => { dependencies,
let crate_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); "osdk-test-kernel",
let test_kernel_dir = crate_dir.join("deps").join("test-kernel"); Path::new("deps").join("test-kernel"),
format!( );
"osdk-test-kernel = {{ path = \"{}\" }}",
test_kernel_dir.display()
)
}
_ => concat!(
"osdk-test-kernel = { version = \"",
env!("CARGO_PKG_VERSION"),
"\" }"
)
.to_owned(),
};
let test_runner_dep = toml::Table::from_str(&dep_str).unwrap();
dependencies.as_table_mut().unwrap().extend(test_runner_dep);
} }
add_manifest_dependency_to(
dependencies,
"osdk-frame-allocator",
Path::new("deps").join("frame-allocator"),
);
add_manifest_dependency_to(dependencies, "ostd", Path::new("..").join("ostd"));
let content = toml::to_string(&manifest).unwrap(); let content = toml::to_string(&manifest).unwrap();
fs::write(manifest_path, content).unwrap(); fs::write(manifest_path, content).unwrap();
} }
fn add_manifest_dependency_to(manifest: &mut toml::Value, dep_name: &str, path: PathBuf) {
let dep_str = match option_env!("OSDK_LOCAL_DEV") {
Some("1") => {
let crate_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let dep_crate_dir = crate_dir.join(path);
format!(
"{} = {{ path = \"{}\" }}",
dep_name,
dep_crate_dir.display()
)
}
_ => format!(
"{} = {{ version = \"{}\" }}",
dep_name,
env!("CARGO_PKG_VERSION"),
),
};
let dep_val = toml::Table::from_str(&dep_str).unwrap();
manifest.as_table_mut().unwrap().extend(dep_val);
}
fn copy_profile_configurations(workspace_root: impl AsRef<Path>) { fn copy_profile_configurations(workspace_root: impl AsRef<Path>) {
let target_manifest_path = workspace_root.as_ref().join("Cargo.toml"); let target_manifest_path = workspace_root.as_ref().join("Cargo.toml");
let manifest_path = "Cargo.toml"; let manifest_path = "Cargo.toml";

View File

@ -65,6 +65,43 @@ pub fn test_main(_attr: TokenStream, item: TokenStream) -> TokenStream {
.into() .into()
} }
/// A macro attribute for the global frame allocator.
///
/// The attributed static variable will be used to provide frame allocation
/// for the kernel. The variable should have type `ostd::mm::GlobalFrameAllocator`.
///
/// # Example
///
/// ```ignore
/// use ostd::{mm::{frame::GlobalFrameAllocator, Paddr}, global_frame_allocator};
///
/// // Of course it won't work because all allocations will fail.
/// // It's just an example.
/// #[global_frame_allocator]
/// static ALLOCATOR: MyFrameAllocator = MyFrameAllocator;
///
/// struct MyFrameAllocator;
///
/// impl GlobalFrameAllocator for MyFrameAllocator {
/// fn alloc(&self, _layout: Layout) -> Option<Paddr> { None }
/// fn dealloc(&self, _paddr: Paddr, _size: usize) {}
/// }
/// ```
#[proc_macro_attribute]
pub fn global_frame_allocator(_attr: TokenStream, item: TokenStream) -> TokenStream {
// Make a `static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator`
// That points to the annotated static variable.
let item = parse_macro_input!(item as syn::ItemStatic);
let static_name = &item.ident;
quote!(
#[no_mangle]
static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn ostd::mm::frame::GlobalFrameAllocator = &#static_name;
#item
)
.into()
}
/// A macro attribute for the panic handler. /// A macro attribute for the panic handler.
/// ///
/// The attributed function will be used to override OSTD's default /// The attributed function will be used to override OSTD's default

View File

@ -67,6 +67,13 @@ pub use self::{error::Error, prelude::Result};
#[doc(hidden)] #[doc(hidden)]
unsafe fn init() { unsafe fn init() {
arch::enable_cpu_features(); arch::enable_cpu_features();
// SAFETY: This function is called only once, before `allocator::init`
// and after memory regions are initialized.
unsafe {
mm::frame::allocator::init_early_allocator();
}
arch::serial::init(); arch::serial::init();
#[cfg(feature = "cvm_guest")] #[cfg(feature = "cvm_guest")]

View File

@ -2,18 +2,18 @@
//! The physical memory allocator. //! The physical memory allocator.
use core::{alloc::Layout, ops::Range};
use align_ext::AlignExt; use align_ext::AlignExt;
use buddy_system_allocator::FrameAllocator;
use log::info;
use spin::Once;
use super::{meta::AnyFrameMeta, segment::Segment, Frame}; use super::{meta::AnyFrameMeta, segment::Segment, Frame};
use crate::{ use crate::{
boot::memory_region::MemoryRegionType, boot::memory_region::MemoryRegionType,
error::Error, error::Error,
impl_frame_meta_for,
mm::{paddr_to_vaddr, Paddr, PAGE_SIZE}, mm::{paddr_to_vaddr, Paddr, PAGE_SIZE},
prelude::*, prelude::*,
sync::SpinLock, util::range_difference,
}; };
/// Options for allocating physical memory frames. /// Options for allocating physical memory frames.
@ -52,16 +52,9 @@ impl FrameAllocOptions {
/// Allocates a single frame with additional metadata. /// Allocates a single frame with additional metadata.
pub fn alloc_frame_with<M: AnyFrameMeta>(&self, metadata: M) -> Result<Frame<M>> { pub fn alloc_frame_with<M: AnyFrameMeta>(&self, metadata: M) -> Result<Frame<M>> {
let frame = FRAME_ALLOCATOR let single_layout = Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap();
.get() let frame = alloc_upcall(single_layout)
.unwrap() .map(|paddr| Frame::from_unused(paddr, metadata).unwrap())
.disable_irq()
.lock()
.alloc(1)
.map(|idx| {
let paddr = idx * PAGE_SIZE;
Frame::from_unused(paddr, metadata).unwrap()
})
.ok_or(Error::NoMemory)?; .ok_or(Error::NoMemory)?;
if self.zeroed { if self.zeroed {
@ -93,18 +86,10 @@ impl FrameAllocOptions {
if nframes == 0 { if nframes == 0 {
return Err(Error::InvalidArgs); return Err(Error::InvalidArgs);
} }
let segment = FRAME_ALLOCATOR let layout = Layout::from_size_align(nframes * PAGE_SIZE, PAGE_SIZE).unwrap();
.get() let segment = alloc_upcall(layout)
.unwrap()
.disable_irq()
.lock()
.alloc(nframes)
.map(|start| { .map(|start| {
Segment::from_unused( Segment::from_unused(start..start + nframes * PAGE_SIZE, metadata_fn).unwrap()
start * PAGE_SIZE..start * PAGE_SIZE + nframes * PAGE_SIZE,
metadata_fn,
)
.unwrap()
}) })
.ok_or(Error::NoMemory)?; .ok_or(Error::NoMemory)?;
@ -140,73 +125,236 @@ fn test_alloc_dealloc() {
} }
} }
/// FrameAllocator with a counter for allocated memory /// The trait for the global frame allocator.
pub(in crate::mm) struct CountingFrameAllocator { ///
allocator: FrameAllocator, /// OSTD allows a customized frame allocator by the [`global_frame_allocator`]
total: usize, /// attribute, which marks a static variable of this type.
allocated: usize, ///
/// The API mimics the standard Rust allocator API ([`GlobalAlloc`] and
/// [`global_allocator`]). However, this trait is much safer. Double free
/// or freeing in-use memory through this trait only mess up the allocator's
/// state rather than causing undefined behavior.
///
/// Whenever OSTD or other modules need to allocate or deallocate frames via
/// [`FrameAllocOptions`], they are forwarded to the global frame allocator.
/// It is not encoraged to call the global allocator directly.
///
/// [`global_frame_allocator`]: crate::global_frame_allocator
/// [`GlobalAlloc`]: core::alloc::GlobalAlloc
pub trait GlobalFrameAllocator: Sync {
/// Allocates a contiguous range of frames.
///
/// The caller guarantees that `layout.size()` is aligned to [`PAGE_SIZE`].
///
/// When the allocated memory is not in use, OSTD return them by calling
/// [`GlobalFrameAllocator::add_free_memory`].
fn alloc(&self, layout: Layout) -> Option<Paddr>;
/// Adds a contiguous range of frames to the allocator.
///
/// The caller guarantees that `addr` and `size` are both aligned to
/// [`PAGE_SIZE`]. The added memory can be uninitialized.
///
/// The memory being added would never overlap with any memory that is
/// already added, i.e., a frame cannot be added twice without being
/// allocated in between.
///
/// However, if [`GlobalFrameAllocator::alloc`] returns multiple frames,
/// it is possible that some of them are added back before others.
fn add_free_memory(&self, addr: Paddr, size: usize);
} }
impl CountingFrameAllocator { extern "Rust" {
pub fn new(allocator: FrameAllocator, total: usize) -> Self { /// The global frame allocator's reference exported by
CountingFrameAllocator { /// [`crate::global_frame_allocator`].
allocator, static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator;
total,
allocated: 0,
}
}
pub fn alloc(&mut self, count: usize) -> Option<usize> {
match self.allocator.alloc(count) {
Some(value) => {
self.allocated += count * PAGE_SIZE;
Some(value)
}
None => None,
}
}
// TODO: this method should be marked unsafe as invalid arguments will mess
// up the underlying allocator.
pub fn dealloc(&mut self, start_frame: usize, count: usize) {
self.allocator.dealloc(start_frame, count);
self.allocated -= count * PAGE_SIZE;
}
pub fn mem_total(&self) -> usize {
self.total
}
pub fn mem_available(&self) -> usize {
self.total - self.allocated
}
} }
pub(in crate::mm) static FRAME_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>> = Once::new(); /// Directly allocates a contiguous range of frames.
fn alloc_upcall(layout: core::alloc::Layout) -> Option<Paddr> {
// SAFETY: We believe that the global frame allocator is set up correctly
// with the `global_frame_allocator` attribute. If they use safe code only
// then the up-call is safe.
unsafe { __GLOBAL_FRAME_ALLOCATOR_REF.alloc(layout) }
}
pub(crate) fn init() { /// Up-call to add a range of frames to the global frame allocator.
///
/// It would return the frame to the allocator for further use. This would like
/// to be done after the release of the metadata to avoid re-allocation before
/// the metadata is reset.
pub(super) fn add_free_memory_upcall(addr: Paddr, size: usize) {
// SAFETY: We believe that the global frame allocator is set up correctly
// with the `global_frame_allocator` attribute. If they use safe code only
// then the up-call is safe.
unsafe { __GLOBAL_FRAME_ALLOCATOR_REF.add_free_memory(addr, size) }
}
/// Initializes the global frame allocator.
///
/// It just does adds the frames to the global frame allocator. Calling it
/// multiple times would be not safe.
///
/// # Safety
///
/// This function should be called only once.
pub(crate) unsafe fn init() {
let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions; let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
let mut total: usize = 0;
let mut allocator = FrameAllocator::<32>::new(); // Retire the early allocator.
let early_allocator = EARLY_ALLOCATOR.lock().take().unwrap();
let (range_1, range_2) = early_allocator.allocated_regions();
for region in regions.iter() { for region in regions.iter() {
if region.typ() == MemoryRegionType::Usable { if region.typ() == MemoryRegionType::Usable {
// Make the memory region page-aligned, and skip if it is too small. debug_assert!(region.base() % PAGE_SIZE == 0);
let start = region.base().align_up(PAGE_SIZE) / PAGE_SIZE; debug_assert!(region.len() % PAGE_SIZE == 0);
let region_end = region.base().checked_add(region.len()).unwrap();
let end = region_end.align_down(PAGE_SIZE) / PAGE_SIZE;
if end <= start {
continue;
}
// Add global free pages to the frame allocator. // Add global free pages to the frame allocator.
allocator.add_frame(start, end); // Truncate the early allocated frames if there is an overlap.
total += (end - start) * PAGE_SIZE; for r1 in range_difference(&(region.base()..region.end()), &range_1) {
info!( for r2 in range_difference(&r1, &range_2) {
"Found usable region, start:{:x}, end:{:x}", log::info!("Adding free frames to the allocator: {:x?}", r2);
region.base(), add_free_memory_upcall(r2.start, r2.len());
region.base() + region.len() }
); }
} }
} }
let counting_allocator = CountingFrameAllocator::new(allocator, total); }
FRAME_ALLOCATOR.call_once(|| SpinLock::new(counting_allocator));
/// An allocator in the early boot phase when frame metadata is not available.
pub(super) struct EarlyFrameAllocator {
// We need to allocate from under 4G first since the linear mapping for
// the higher region is not constructed yet.
under_4g_range: Range<Paddr>,
under_4g_end: Paddr,
// And also sometimes 4G is not enough for early phase. This, if not `0..0`,
// is the largest region above 4G.
max_range: Range<Paddr>,
max_end: Paddr,
}
/// The global frame allocator in the early boot phase.
///
/// It is used to allocate frames before the frame metadata is initialized.
/// The allocated frames are not tracked by the frame metadata. After the
/// metadata is initialized with [`super::meta::init`], the frames are tracked
/// with metadata and the early allocator is no longer used.
///
/// This is protected by the [`spin::Mutex`] rather than [`crate::sync::SpinLock`]
/// since the latter uses CPU-local storage, which isn't available in the early
/// boot phase. So we must make sure that no interrupts are enabled when using
/// this allocator.
pub(super) static EARLY_ALLOCATOR: spin::Mutex<Option<EarlyFrameAllocator>> =
spin::Mutex::new(None);
impl EarlyFrameAllocator {
/// Creates a new early frame allocator.
///
/// It uses at most 2 regions, the first is the maximum usable region below
/// 4 GiB. The other is the maximum usable region above 4 GiB and is only
/// usable when linear mapping is constructed.
pub fn new() -> Self {
let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
let mut under_4g_range = 0..0;
let mut max_range = 0..0;
for region in regions.iter() {
if region.typ() != MemoryRegionType::Usable {
continue;
}
const PADDR4G: Paddr = 0x1_0000_0000;
if region.base() < PADDR4G {
let range = region.base()..region.end().min(PADDR4G);
if range.len() > under_4g_range.len() {
under_4g_range = range;
}
}
if region.end() >= PADDR4G {
let range = region.base().max(PADDR4G)..region.end();
if range.len() > max_range.len() {
max_range = range;
}
}
}
log::debug!(
"Early frame allocator (below 4G) at: {:#x?}",
under_4g_range
);
if !max_range.is_empty() {
log::debug!("Early frame allocator (above 4G) at: {:#x?}", max_range);
}
Self {
under_4g_range: under_4g_range.clone(),
under_4g_end: under_4g_range.start,
max_range: max_range.clone(),
max_end: max_range.start,
}
}
/// Allocates a contiguous range of frames.
pub fn alloc(&mut self, layout: Layout) -> Option<Paddr> {
let size = layout.size().align_up(PAGE_SIZE);
let allocated = self.under_4g_end.align_up(layout.align());
if allocated + size <= self.under_4g_range.end {
// Allocated below 4G.
self.under_4g_end = allocated + size;
Some(allocated)
} else {
// Try above 4G.
let allocated = self.max_end.align_up(layout.align());
if allocated + size <= self.max_range.end {
self.max_end = allocated + size;
}
Some(allocated)
}
}
pub(super) fn allocated_regions(&self) -> (Range<Paddr>, Range<Paddr>) {
(
self.under_4g_range.start..self.under_4g_end,
self.max_range.start..self.max_end,
)
}
}
/// Metadata for frames allocated in the early boot phase.
///
/// Frames allocated with [`early_alloc`] are not immediately tracked with
/// frame metadata. But [`super::meta::init`] will track them later.
#[derive(Debug)]
pub(crate) struct EarlyAllocatedFrameMeta;
impl_frame_meta_for!(EarlyAllocatedFrameMeta);
/// Allocates a contiguous range of frames in the early boot phase.
///
/// The early allocated frames will not be reclaimable, until the metadata is
/// initialized by [`super::meta::init`]. Then we can use [`Frame::from_raw`]
/// to free the frames.
///
/// # Panics
///
/// This function panics if:
/// - it is called before [`init_early_allocator`],
/// - or if is called after [`init`].
pub(crate) fn early_alloc(layout: Layout) -> Option<Paddr> {
let mut early_allocator = EARLY_ALLOCATOR.lock();
early_allocator.as_mut().unwrap().alloc(layout)
}
/// Initializes the early frame allocator.
///
/// [`early_alloc`] should be used after this initialization. After [`init`], the
/// early allocator.
///
/// # Safety
///
/// This function should be called only once after the memory regions are ready.
pub(crate) unsafe fn init_early_allocator() {
let mut early_allocator = EARLY_ALLOCATOR.lock();
*early_allocator = Some(EarlyFrameAllocator::new());
} }

View File

@ -39,10 +39,11 @@ pub(crate) mod mapping {
} }
use core::{ use core::{
alloc::Layout,
any::Any, any::Any,
cell::UnsafeCell, cell::UnsafeCell,
fmt::Debug, fmt::Debug,
mem::{size_of, MaybeUninit}, mem::{size_of, ManuallyDrop, MaybeUninit},
result::Result, result::Result,
sync::atomic::{AtomicU64, Ordering}, sync::atomic::{AtomicU64, Ordering},
}; };
@ -50,16 +51,19 @@ use core::{
use align_ext::AlignExt; use align_ext::AlignExt;
use log::info; use log::info;
use super::{allocator, Segment};
use crate::{ use crate::{
arch::mm::PagingConsts, arch::mm::PagingConsts,
const_assert, const_assert,
mm::{ mm::{
kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, page_size, page_table::boot_pt, frame::allocator::{self, EarlyAllocatedFrameMeta},
CachePolicy, Infallible, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr, kspace::LINEAR_MAPPING_BASE_VADDR,
VmReader, PAGE_SIZE, paddr_to_vaddr, page_size,
page_table::boot_pt,
CachePolicy, Infallible, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Segment,
Vaddr, VmReader, PAGE_SIZE,
}, },
panic::abort, panic::abort,
util::range_difference,
}; };
/// The maximum number of bytes of the metadata of a frame. /// The maximum number of bytes of the metadata of a frame.
@ -383,16 +387,6 @@ impl MetaSlot {
// `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures // `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures
// `drop_meta_in_place` won't be reordered after this memory store. // `drop_meta_in_place` won't be reordered after this memory store.
self.ref_count.store(REF_COUNT_UNUSED, Ordering::Release); self.ref_count.store(REF_COUNT_UNUSED, Ordering::Release);
// Deallocate the frame.
// It would return the frame to the allocator for further use. This would be done
// after the release of the metadata to avoid re-allocation before the metadata
// is reset.
allocator::FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.dealloc(self.frame_paddr() / PAGE_SIZE, 1);
} }
/// Drops the metadata of a slot in place. /// Drops the metadata of a slot in place.
@ -460,8 +454,6 @@ pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
add_temp_linear_mapping(max_paddr); add_temp_linear_mapping(max_paddr);
super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1); let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames); let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames);
@ -482,10 +474,33 @@ pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
.unwrap(); .unwrap();
// Now the metadata frames are mapped, we can initialize the metadata. // Now the metadata frames are mapped, we can initialize the metadata.
Segment::from_unused(meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE, |_| { super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
MetaPageMeta {}
}) let meta_page_range = meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE;
.unwrap()
let (range_1, range_2) = allocator::EARLY_ALLOCATOR
.lock()
.as_ref()
.unwrap()
.allocated_regions();
for r in range_difference(&range_1, &meta_page_range) {
let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
let _ = ManuallyDrop::new(early_seg);
}
for r in range_difference(&range_2, &meta_page_range) {
let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
let _ = ManuallyDrop::new(early_seg);
}
Segment::from_unused(meta_page_range, |_| MetaPageMeta {}).unwrap()
}
/// Returns whether the global frame allocator is initialized.
pub(in crate::mm) fn is_initialized() -> bool {
// `init` sets it with relaxed ordering somewhere in the middle. But due
// to the safety requirement of the `init` function, we can assume that
// there is no race conditions.
super::MAX_PADDR.load(Ordering::Relaxed) != 0
} }
fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) { fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
@ -493,13 +508,10 @@ fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
.checked_mul(size_of::<MetaSlot>()) .checked_mul(size_of::<MetaSlot>())
.unwrap() .unwrap()
.div_ceil(PAGE_SIZE); .div_ceil(PAGE_SIZE);
let start_paddr = allocator::FRAME_ALLOCATOR let start_paddr = allocator::early_alloc(
.get() Layout::from_size_align(nr_meta_pages * PAGE_SIZE, PAGE_SIZE).unwrap(),
.unwrap() )
.lock() .unwrap();
.alloc(nr_meta_pages)
.unwrap()
* PAGE_SIZE;
let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot; let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
@ -523,14 +535,6 @@ fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
(nr_meta_pages, start_paddr) (nr_meta_pages, start_paddr)
} }
/// Returns whether the global frame allocator is initialized.
pub(in crate::mm) fn is_initialized() -> bool {
// `init` sets it somewhere in the middle. But due to the safety
// requirement of the `init` function, we can assume that there
// is no race condition.
super::MAX_PADDR.load(Ordering::Relaxed) != 0
}
/// Adds a temporary linear mapping for the metadata frames. /// Adds a temporary linear mapping for the metadata frames.
/// ///
/// We only assume boot page table to contain 4G linear mapping. Thus if the /// We only assume boot page table to contain 4G linear mapping. Thus if the

View File

@ -44,6 +44,7 @@ use core::{
sync::atomic::{AtomicUsize, Ordering}, sync::atomic::{AtomicUsize, Ordering},
}; };
pub use allocator::GlobalFrameAllocator;
use meta::{mapping, AnyFrameMeta, GetFrameError, MetaSlot, REF_COUNT_UNUSED}; use meta::{mapping, AnyFrameMeta, GetFrameError, MetaSlot, REF_COUNT_UNUSED};
pub use segment::Segment; pub use segment::Segment;
use untyped::{AnyUFrameMeta, UFrame}; use untyped::{AnyUFrameMeta, UFrame};
@ -220,6 +221,8 @@ impl<M: AnyFrameMeta + ?Sized> Drop for Frame<M> {
// SAFETY: this is the last reference and is about to be dropped. // SAFETY: this is the last reference and is about to be dropped.
unsafe { self.slot().drop_last_in_place() }; unsafe { self.slot().drop_last_in_place() };
allocator::add_free_memory_upcall(self.start_paddr(), PAGE_SIZE);
} }
} }
} }

View File

@ -99,6 +99,26 @@ impl<M: AnyFrameMeta + ?Sized> UniqueFrame<M> {
unsafe { &mut *self.slot().dyn_meta_ptr() } unsafe { &mut *self.slot().dyn_meta_ptr() }
} }
/// Resets the frame to unused without up-calling the allocator.
///
/// This is solely useful for the allocator implementation/testing and
/// is highly experimental. Usage of this function is discouraged.
///
/// Usage of this function other than the allocator would actually leak
/// the frame since the allocator would not be aware of the frame.
//
// FIXME: We may have a better `Segment` and `UniqueSegment` design to
// allow the allocator hold the ownership of all the frames in a chunk
// instead of the head. Then this weird public API can be `#[cfg(ktest)]`.
pub fn reset_as_unused(self) {
let this = ManuallyDrop::new(self);
this.slot().ref_count.store(0, Ordering::Release);
// SAFETY: We are the sole owner and the reference count is 0.
// The slot is initialized.
unsafe { this.slot().drop_last_in_place() };
}
/// Converts this frame into a raw physical address. /// Converts this frame into a raw physical address.
pub(crate) fn into_raw(self) -> Paddr { pub(crate) fn into_raw(self) -> Paddr {
let this = ManuallyDrop::new(self); let this = ManuallyDrop::new(self);
@ -134,6 +154,8 @@ impl<M: AnyFrameMeta + ?Sized> Drop for UniqueFrame<M> {
// SAFETY: We are the sole owner and the reference count is 0. // SAFETY: We are the sole owner and the reference count is 0.
// The slot is initialized. // The slot is initialized.
unsafe { self.slot().drop_last_in_place() }; unsafe { self.slot().drop_last_in_place() };
super::allocator::add_free_memory_upcall(self.start_paddr(), PAGE_SIZE);
} }
} }

View File

@ -2,7 +2,10 @@
mod slab_allocator; mod slab_allocator;
use core::alloc::{GlobalAlloc, Layout}; use core::{
alloc::{GlobalAlloc, Layout},
mem::ManuallyDrop,
};
use align_ext::AlignExt; use align_ext::AlignExt;
use log::debug; use log::debug;
@ -11,11 +14,11 @@ use spin::Once;
use super::paddr_to_vaddr; use super::paddr_to_vaddr;
use crate::{ use crate::{
mm::{frame::allocator::FRAME_ALLOCATOR, PAGE_SIZE}, impl_frame_meta_for,
mm::{FrameAllocOptions, PAGE_SIZE},
prelude::*, prelude::*,
sync::SpinLock, sync::SpinLock,
trap::disable_local, trap::disable_local,
Error,
}; };
#[global_allocator] #[global_allocator]
@ -49,6 +52,12 @@ struct LockedHeapWithRescue {
heap: Once<SpinLock<Heap>>, heap: Once<SpinLock<Heap>>,
} }
/// The metadata for the kernel heap frames.
#[derive(Debug)]
pub struct KernelHeapMeta;
impl_frame_meta_for!(KernelHeapMeta);
impl LockedHeapWithRescue { impl LockedHeapWithRescue {
/// Creates an new heap /// Creates an new heap
pub const fn new() -> Self { pub const fn new() -> Self {
@ -94,22 +103,26 @@ impl LockedHeapWithRescue {
}; };
let allocation_start = { let allocation_start = {
let mut page_allocator = FRAME_ALLOCATOR.get().unwrap().lock(); let mut options = FrameAllocOptions::new();
if num_frames >= MIN_NUM_FRAMES { options.zeroed(false);
page_allocator.alloc(num_frames).ok_or(Error::NoMemory)? let segment = if num_frames >= MIN_NUM_FRAMES {
options
.alloc_segment_with(num_frames, |_| KernelHeapMeta)
.unwrap()
} else { } else {
match page_allocator.alloc(MIN_NUM_FRAMES) { match options.alloc_segment_with(MIN_NUM_FRAMES, |_| KernelHeapMeta) {
None => page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?, Ok(seg) => {
Some(start) => {
num_frames = MIN_NUM_FRAMES; num_frames = MIN_NUM_FRAMES;
start seg
} }
Err(_) => options.alloc_segment_with(num_frames, |_| KernelHeapMeta)?,
} }
} };
let paddr = segment.start_paddr();
let _ = ManuallyDrop::new(segment);
paddr
}; };
// FIXME: the alloc function internally allocates heap memory(inside FrameAllocator). let vaddr = paddr_to_vaddr(allocation_start);
// So if the heap is nearly run out, allocating frame will fail too.
let vaddr = paddr_to_vaddr(allocation_start * PAGE_SIZE);
// SAFETY: the frame is allocated from FrameAllocator and never be deallocated, // SAFETY: the frame is allocated from FrameAllocator and never be deallocated,
// so the addr is always valid. // so the addr is always valid.

View File

@ -16,7 +16,6 @@ pub(crate) mod kspace;
mod offset; mod offset;
pub(crate) mod page_prop; pub(crate) mod page_prop;
pub(crate) mod page_table; pub(crate) mod page_table;
pub mod stat;
pub mod tlb; pub mod tlb;
pub mod vm_space; pub mod vm_space;

View File

@ -5,6 +5,7 @@
//! in order to initialize the running phase page tables. //! in order to initialize the running phase page tables.
use core::{ use core::{
alloc::Layout,
result::Result, result::Result,
sync::atomic::{AtomicU32, Ordering}, sync::atomic::{AtomicU32, Ordering},
}; };
@ -15,7 +16,11 @@ use crate::{
cpu::num_cpus, cpu::num_cpus,
cpu_local_cell, cpu_local_cell,
mm::{ mm::{
frame::allocator::FRAME_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, PageFlags, frame::{
self,
allocator::{self, EarlyAllocatedFrameMeta},
},
nr_subpage_per_huge, paddr_to_vaddr, Frame, FrameAllocOptions, Paddr, PageFlags,
PageProperty, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE, PageProperty, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
}, },
sync::SpinLock, sync::SpinLock,
@ -62,13 +67,27 @@ where
/// The caller should ensure that: /// The caller should ensure that:
/// - another legitimate page table is activated on this CPU; /// - another legitimate page table is activated on this CPU;
/// - this function should be called only once per CPU; /// - this function should be called only once per CPU;
/// - no [`with`] calls are performed on this CPU after this dismissal; /// - no [`with_borrow`] calls are performed on this CPU after this dismissal;
/// - no [`with`] calls are performed on this CPU after the activation of /// - no [`with_borrow`] calls are performed on this CPU after the activation
/// another page table and before this dismissal. /// of another page table and before this dismissal.
pub(crate) unsafe fn dismiss() { pub(crate) unsafe fn dismiss() {
IS_DISMISSED.store(true); IS_DISMISSED.store(true);
if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) as usize == num_cpus() - 1 { if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) as usize == num_cpus() - 1 {
BOOT_PAGE_TABLE.lock().take(); let boot_pt = BOOT_PAGE_TABLE.lock().take().unwrap();
dfs_walk_on_leave::<PageTableEntry, PagingConsts>(
boot_pt.root_pt,
PagingConsts::NR_LEVELS,
&mut |pte| {
if !pte.prop().flags.contains(PTE_POINTS_TO_FIRMWARE_PT) {
// SAFETY: The pointed frame is allocated and forgotten with `into_raw`.
drop(unsafe { Frame::<EarlyAllocatedFrameMeta>::from_raw(pte.paddr()) })
}
// Firmware provided page tables may be a DAG instead of a tree.
// Clear it to avoid double-free when we meet it the second time.
*pte = PageTableEntry::new_absent();
},
);
} }
} }
@ -97,6 +116,13 @@ pub(crate) struct BootPageTable<
_pretend_to_use: core::marker::PhantomData<(E, C)>, _pretend_to_use: core::marker::PhantomData<(E, C)>,
} }
// We use extra two available bits in the boot PT for memory management.
//
// The first available bit is used to differentiate firmware page tables from
// the page tables allocated here. The second is for identifying double-visits
// when walking the page tables since the PT can be a DAG.
const PTE_POINTS_TO_FIRMWARE_PT: PageFlags = PageFlags::AVAIL1;
impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> { impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// Creates a new boot page table from the current page table root /// Creates a new boot page table from the current page table root
/// physical address. /// physical address.
@ -108,15 +134,13 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// by the firmware, loader or the setup code. /// by the firmware, loader or the setup code.
unsafe fn from_current_pt() -> Self { unsafe fn from_current_pt() -> Self {
let root_pt = crate::arch::mm::current_page_table_paddr() / C::BASE_PAGE_SIZE; let root_pt = crate::arch::mm::current_page_table_paddr() / C::BASE_PAGE_SIZE;
// Make sure the first available bit is not set for firmware page tables. // Make sure the 2 available bits are not set for firmware page tables.
dfs_walk_on_leave::<E, C>(root_pt, C::NR_LEVELS, &mut |pte: &mut E| { dfs_walk_on_leave::<E, C>(root_pt, C::NR_LEVELS, &mut |pte: &mut E| {
let prop = pte.prop(); let prop = pte.prop();
if prop.flags.contains(PageFlags::AVAIL1) { pte.set_prop(PageProperty::new(
pte.set_prop(PageProperty::new( prop.flags | PTE_POINTS_TO_FIRMWARE_PT,
prop.flags - PageFlags::AVAIL1, prop.cache,
prop.cache, ));
));
}
}); });
Self { Self {
root_pt, root_pt,
@ -230,17 +254,26 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
} }
fn alloc_child(&mut self) -> E { fn alloc_child(&mut self) -> E {
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); let frame_paddr = if frame::meta::is_initialized() {
let frame = FrameAllocOptions::new()
.zeroed(false)
.alloc_frame_with(EarlyAllocatedFrameMeta)
.unwrap();
frame.into_raw()
} else {
allocator::early_alloc(
Layout::from_size_align(C::BASE_PAGE_SIZE, C::BASE_PAGE_SIZE).unwrap(),
)
.unwrap()
};
// Zero it out. // Zero it out.
let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8; let vaddr = paddr_to_vaddr(frame_paddr) as *mut u8;
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE) }; unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE) };
let mut pte = E::new_pt(frame * C::BASE_PAGE_SIZE); let mut pte = E::new_pt(frame_paddr);
let prop = pte.prop(); let prop = pte.prop();
pte.set_prop(PageProperty::new( pte.set_prop(PageProperty::new(prop.flags, prop.cache));
prop.flags | PageFlags::AVAIL1,
prop.cache,
));
pte pte
} }
@ -267,20 +300,6 @@ fn dfs_walk_on_leave<E: PageTableEntryTrait, C: PagingConstsTrait>(
} }
} }
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> {
fn drop(&mut self) {
dfs_walk_on_leave::<E, C>(self.root_pt, C::NR_LEVELS, &mut |pte| {
if pte.prop().flags.contains(PageFlags::AVAIL1) {
let pt = pte.paddr() / C::BASE_PAGE_SIZE;
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(pt, 1);
}
// Firmware provided page tables may be a DAG instead of a tree.
// Clear it to avoid double-free when we meet it the second time.
*pte = E::new_absent();
});
}
}
#[cfg(ktest)] #[cfg(ktest)]
use crate::prelude::*; use crate::prelude::*;

View File

@ -1,21 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! APIs for memory statistics.
use crate::mm::frame::allocator::FRAME_ALLOCATOR;
/// Total memory available for any usages in the system (in bytes).
///
/// It would be only a slightly less than total physical memory of the system
/// in most occasions. For example, bad memory, kernel statically-allocated
/// memory or firmware reserved memories do not count.
pub fn mem_total() -> usize {
FRAME_ALLOCATOR.get().unwrap().lock().mem_total()
}
/// Current readily available memory (in bytes).
///
/// Such memory can be directly used for allocation without reclaiming.
pub fn mem_available() -> usize {
FRAME_ALLOCATOR.get().unwrap().lock().mem_available()
}

View File

@ -111,6 +111,7 @@ LINUX_BZIMAGE_BUILDER_CARGO_TOML_PATH=${ASTER_SRC_DIR}/ostd/libs/linux-bzimage/b
LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH=${ASTER_SRC_DIR}/ostd/libs/linux-bzimage/setup/Cargo.toml LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH=${ASTER_SRC_DIR}/ostd/libs/linux-bzimage/setup/Cargo.toml
OSDK_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/Cargo.toml OSDK_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/Cargo.toml
OSDK_TEST_RUNNER_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/test-kernel/Cargo.toml OSDK_TEST_RUNNER_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/test-kernel/Cargo.toml
OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/frame-allocator/Cargo.toml
VERSION_PATH=${ASTER_SRC_DIR}/VERSION VERSION_PATH=${ASTER_SRC_DIR}/VERSION
current_version=$(cat ${VERSION_PATH}) current_version=$(cat ${VERSION_PATH})
@ -131,13 +132,16 @@ update_package_version ${OSTD_CARGO_TOML_PATH}
update_package_version ${LINUX_BOOT_PARAMS_CARGO_TOML_PATH} update_package_version ${LINUX_BOOT_PARAMS_CARGO_TOML_PATH}
update_package_version ${LINUX_BZIMAGE_BUILDER_CARGO_TOML_PATH} update_package_version ${LINUX_BZIMAGE_BUILDER_CARGO_TOML_PATH}
update_package_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH} update_package_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH}
update_package_version ${OSDK_CARGO_TOML_PATH}
update_package_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH}
update_package_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH}
update_dep_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} ostd
update_dep_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH} ostd
update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-test update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-test
update_dep_version ${OSTD_CARGO_TOML_PATH} linux-boot-params update_dep_version ${OSTD_CARGO_TOML_PATH} linux-boot-params
update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-macros update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-macros
update_dep_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH} linux-boot-params update_dep_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH} linux-boot-params
update_package_version ${OSDK_CARGO_TOML_PATH}
update_package_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH}
update_dep_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} ostd
update_dep_version ${OSDK_CARGO_TOML_PATH} linux-bzimage-builder update_dep_version ${OSDK_CARGO_TOML_PATH} linux-bzimage-builder
# Automatically bump Cargo.lock files # Automatically bump Cargo.lock files

View File

@ -83,6 +83,7 @@ for TARGET in $TARGETS; do
do_publish_for ostd/libs/ostd-test $TARGET do_publish_for ostd/libs/ostd-test $TARGET
do_publish_for ostd/libs/linux-bzimage/setup $TARGET do_publish_for ostd/libs/linux-bzimage/setup $TARGET
do_publish_for ostd $TARGET do_publish_for ostd $TARGET
do_publish_for osdk/deps/frame-allocator $TARGET
do_publish_for osdk/deps/test-kernel $TARGET do_publish_for osdk/deps/test-kernel $TARGET
# For actual publishing, we should only publish once. Using any target that # For actual publishing, we should only publish once. Using any target that