Inject a scalable slab allocator

This commit is contained in:
Zhang Junyang
2025-01-13 14:24:32 +08:00
committed by Tate, Hongliang Tian
parent fdbe52c2ee
commit a708a0c046
23 changed files with 1166 additions and 626 deletions

9
Cargo.lock generated
View File

@ -212,6 +212,7 @@ dependencies = [
"log",
"lru",
"osdk-frame-allocator",
"osdk-heap-allocator",
"ostd",
"paste",
"rand",
@ -1261,6 +1262,14 @@ dependencies = [
"ostd",
]
[[package]]
name = "osdk-heap-allocator"
version = "0.12.0"
dependencies = [
"log",
"ostd",
]
[[package]]
name = "osdk-test-kernel"
version = "0.12.0"

View File

@ -2,6 +2,7 @@
resolver = "2"
members = [
"osdk/deps/frame-allocator",
"osdk/deps/heap-allocator",
"osdk/deps/test-kernel",
"ostd",
"ostd/libs/align_ext",

View File

@ -145,6 +145,7 @@ NON_OSDK_CRATES := \
# and need to be built or tested with OSDK.
OSDK_CRATES := \
osdk/deps/frame-allocator \
osdk/deps/heap-allocator \
osdk/deps/test-kernel \
ostd \
ostd/libs/linux-bzimage/setup \

View File

@ -20,6 +20,7 @@ aster-rights = { path = "libs/aster-rights" }
component = { path = "libs/comp-sys/component" }
controlled = { path = "libs/comp-sys/controlled" }
osdk-frame-allocator = { path = "../osdk/deps/frame-allocator" }
osdk-heap-allocator = { path = "../osdk/deps/heap-allocator" }
ostd = { path = "../ostd" }
typeflags = { path = "libs/typeflags" }
typeflags-util = { path = "libs/typeflags-util" }

View File

@ -17,6 +17,7 @@
//! as zero-cost capabilities.
use osdk_frame_allocator::FrameAllocator;
use osdk_heap_allocator::{type_from_layout, HeapAllocator};
pub mod page_fault_handler;
pub mod perms;
@ -27,6 +28,14 @@ pub mod vmo;
#[ostd::global_frame_allocator]
static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator;
#[ostd::global_heap_allocator]
static HEAP_ALLOCATOR: HeapAllocator = HeapAllocator;
#[ostd::global_heap_allocator_slot_type_map]
const fn slot_type_from_layout(layout: core::alloc::Layout) -> Option<ostd::mm::heap::SlotInfo> {
type_from_layout(layout)
}
/// Total physical memory in the entire system in bytes.
pub fn mem_total() -> usize {
use ostd::boot::{boot_info, memory_region::MemoryRegionType};

View File

@ -0,0 +1,11 @@
[package]
name = "osdk-heap-allocator"
version = "0.12.0"
edition = "2021"
[dependencies]
log = "0.4"
ostd = { version = "0.12.0", path = "../../../ostd" }
[lints]
workspace = true

View File

@ -0,0 +1,318 @@
// SPDX-License-Identifier: MPL-2.0
//! A global allocator implementation of many slab caches.
use core::{
alloc::{AllocError, Layout},
cell::RefCell,
};
use ostd::{
cpu_local,
mm::{
heap::{GlobalHeapAllocator, HeapSlot, SlabSlotList, SlotInfo},
PAGE_SIZE,
},
sync::{LocalIrqDisabled, SpinLock},
trap,
};
use crate::slab_cache::SlabCache;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(usize)]
enum CommonSizeClass {
Bytes8 = 8,
Bytes16 = 16,
Bytes32 = 32,
Bytes64 = 64,
Bytes128 = 128,
Bytes256 = 256,
Bytes512 = 512,
Bytes1024 = 1024,
Bytes2048 = 2048,
}
impl CommonSizeClass {
const fn from_layout(layout: Layout) -> Option<Self> {
let size_class = match layout.size() {
0..=8 => CommonSizeClass::Bytes8,
9..=16 => CommonSizeClass::Bytes16,
17..=32 => CommonSizeClass::Bytes32,
33..=64 => CommonSizeClass::Bytes64,
65..=128 => CommonSizeClass::Bytes128,
129..=256 => CommonSizeClass::Bytes256,
257..=512 => CommonSizeClass::Bytes512,
513..=1024 => CommonSizeClass::Bytes1024,
1025..=2048 => CommonSizeClass::Bytes2048,
_ => return None,
};
// Alignment must be non-zero and power-of-two.
let align_class = match layout.align() {
1 | 2 | 4 | 8 => CommonSizeClass::Bytes8,
16 => CommonSizeClass::Bytes16,
32 => CommonSizeClass::Bytes32,
64 => CommonSizeClass::Bytes64,
128 => CommonSizeClass::Bytes128,
256 => CommonSizeClass::Bytes256,
512 => CommonSizeClass::Bytes512,
1024 => CommonSizeClass::Bytes1024,
2048 => CommonSizeClass::Bytes2048,
_ => return None,
};
Some(if (size_class as usize) < (align_class as usize) {
align_class
} else {
size_class
})
}
fn from_size(size: usize) -> Option<Self> {
match size {
8 => Some(CommonSizeClass::Bytes8),
16 => Some(CommonSizeClass::Bytes16),
32 => Some(CommonSizeClass::Bytes32),
64 => Some(CommonSizeClass::Bytes64),
128 => Some(CommonSizeClass::Bytes128),
256 => Some(CommonSizeClass::Bytes256),
512 => Some(CommonSizeClass::Bytes512),
1024 => Some(CommonSizeClass::Bytes1024),
2048 => Some(CommonSizeClass::Bytes2048),
_ => None,
}
}
}
/// Get the type of the slot from the layout.
///
/// It should be used to define [`ostd::global_heap_allocator_slot_type_map`].
pub const fn type_from_layout(layout: Layout) -> Option<SlotInfo> {
if let Some(class) = CommonSizeClass::from_layout(layout) {
return Some(SlotInfo::SlabSlot(class as usize));
}
if layout.size() > PAGE_SIZE / 2 && layout.align() <= PAGE_SIZE {
return Some(SlotInfo::LargeSlot(
layout.size().div_ceil(PAGE_SIZE) * PAGE_SIZE,
));
}
None
}
struct Heap {
slab8: SlabCache<8>,
slab16: SlabCache<16>,
slab32: SlabCache<32>,
slab64: SlabCache<64>,
slab128: SlabCache<128>,
slab256: SlabCache<256>,
slab512: SlabCache<512>,
slab1024: SlabCache<1024>,
slab2048: SlabCache<2048>,
}
impl Heap {
const fn new() -> Self {
Self {
slab8: SlabCache::new(),
slab16: SlabCache::new(),
slab32: SlabCache::new(),
slab64: SlabCache::new(),
slab128: SlabCache::new(),
slab256: SlabCache::new(),
slab512: SlabCache::new(),
slab1024: SlabCache::new(),
slab2048: SlabCache::new(),
}
}
fn alloc(&mut self, class: CommonSizeClass) -> Result<HeapSlot, AllocError> {
match class {
CommonSizeClass::Bytes8 => self.slab8.alloc(),
CommonSizeClass::Bytes16 => self.slab16.alloc(),
CommonSizeClass::Bytes32 => self.slab32.alloc(),
CommonSizeClass::Bytes64 => self.slab64.alloc(),
CommonSizeClass::Bytes128 => self.slab128.alloc(),
CommonSizeClass::Bytes256 => self.slab256.alloc(),
CommonSizeClass::Bytes512 => self.slab512.alloc(),
CommonSizeClass::Bytes1024 => self.slab1024.alloc(),
CommonSizeClass::Bytes2048 => self.slab2048.alloc(),
}
}
fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> {
match class {
CommonSizeClass::Bytes8 => self.slab8.dealloc(slot),
CommonSizeClass::Bytes16 => self.slab16.dealloc(slot),
CommonSizeClass::Bytes32 => self.slab32.dealloc(slot),
CommonSizeClass::Bytes64 => self.slab64.dealloc(slot),
CommonSizeClass::Bytes128 => self.slab128.dealloc(slot),
CommonSizeClass::Bytes256 => self.slab256.dealloc(slot),
CommonSizeClass::Bytes512 => self.slab512.dealloc(slot),
CommonSizeClass::Bytes1024 => self.slab1024.dealloc(slot),
CommonSizeClass::Bytes2048 => self.slab2048.dealloc(slot),
}
}
}
static GLOBAL_POOL: SpinLock<Heap, LocalIrqDisabled> = SpinLock::new(Heap::new());
/// The maximum size in bytes of the object cache of each slot size class.
const OBJ_CACHE_MAX_SIZE: usize = 8 * PAGE_SIZE;
/// The expected size in bytes of the object cache of each slot size class.
///
/// If the cache exceeds the maximum size or is empty, it will be adjusted to
/// this size.
const OBJ_CACHE_EXPECTED_SIZE: usize = 2 * PAGE_SIZE;
struct ObjectCache<const SLOT_SIZE: usize> {
list: SlabSlotList<SLOT_SIZE>,
list_size: usize,
}
impl<const SLOT_SIZE: usize> ObjectCache<SLOT_SIZE> {
const fn new() -> Self {
Self {
list: SlabSlotList::new(),
list_size: 0,
}
}
fn alloc(&mut self) -> Result<HeapSlot, AllocError> {
if let Some(slot) = self.list.pop() {
self.list_size -= SLOT_SIZE;
return Ok(slot);
}
let size_class = CommonSizeClass::from_size(SLOT_SIZE).unwrap();
let mut global_pool = GLOBAL_POOL.lock();
for _ in 0..OBJ_CACHE_EXPECTED_SIZE / SLOT_SIZE {
if let Ok(slot) = global_pool.alloc(size_class) {
self.list.push(slot);
self.list_size += SLOT_SIZE;
} else {
break;
}
}
if let Ok(new_slot) = global_pool.alloc(size_class) {
Ok(new_slot)
} else if let Some(popped) = self.list.pop() {
self.list_size -= SLOT_SIZE;
Ok(popped)
} else {
Err(AllocError)
}
}
fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> {
if self.list_size + SLOT_SIZE < OBJ_CACHE_MAX_SIZE {
self.list.push(slot);
self.list_size += SLOT_SIZE;
return Ok(());
}
let mut global_pool = GLOBAL_POOL.lock();
global_pool.dealloc(slot, class)?;
for _ in 0..(self.list_size - OBJ_CACHE_EXPECTED_SIZE) / SLOT_SIZE {
let slot = self.list.pop().expect("The cache size should be ample");
global_pool.dealloc(slot, class)?;
self.list_size -= SLOT_SIZE;
}
Ok(())
}
}
struct LocalCache {
cache8: ObjectCache<8>,
cache16: ObjectCache<16>,
cache32: ObjectCache<32>,
cache64: ObjectCache<64>,
cache128: ObjectCache<128>,
cache256: ObjectCache<256>,
cache512: ObjectCache<512>,
cache1024: ObjectCache<1024>,
cache2048: ObjectCache<2048>,
}
impl LocalCache {
const fn new() -> Self {
Self {
cache8: ObjectCache::new(),
cache16: ObjectCache::new(),
cache32: ObjectCache::new(),
cache64: ObjectCache::new(),
cache128: ObjectCache::new(),
cache256: ObjectCache::new(),
cache512: ObjectCache::new(),
cache1024: ObjectCache::new(),
cache2048: ObjectCache::new(),
}
}
fn alloc(&mut self, class: CommonSizeClass) -> Result<HeapSlot, AllocError> {
match class {
CommonSizeClass::Bytes8 => self.cache8.alloc(),
CommonSizeClass::Bytes16 => self.cache16.alloc(),
CommonSizeClass::Bytes32 => self.cache32.alloc(),
CommonSizeClass::Bytes64 => self.cache64.alloc(),
CommonSizeClass::Bytes128 => self.cache128.alloc(),
CommonSizeClass::Bytes256 => self.cache256.alloc(),
CommonSizeClass::Bytes512 => self.cache512.alloc(),
CommonSizeClass::Bytes1024 => self.cache1024.alloc(),
CommonSizeClass::Bytes2048 => self.cache2048.alloc(),
}
}
fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> {
match class {
CommonSizeClass::Bytes8 => self.cache8.dealloc(slot, class),
CommonSizeClass::Bytes16 => self.cache16.dealloc(slot, class),
CommonSizeClass::Bytes32 => self.cache32.dealloc(slot, class),
CommonSizeClass::Bytes64 => self.cache64.dealloc(slot, class),
CommonSizeClass::Bytes128 => self.cache128.dealloc(slot, class),
CommonSizeClass::Bytes256 => self.cache256.dealloc(slot, class),
CommonSizeClass::Bytes512 => self.cache512.dealloc(slot, class),
CommonSizeClass::Bytes1024 => self.cache1024.dealloc(slot, class),
CommonSizeClass::Bytes2048 => self.cache2048.dealloc(slot, class),
}
}
}
cpu_local! {
static LOCAL_POOL: RefCell<LocalCache> = RefCell::new(LocalCache::new());
}
/// The global heap allocator provided by OSDK.
///
/// It is a singleton that provides heap allocation for the kernel. If
/// multiple instances of this struct are created, all the member functions
/// will eventually access the same allocator.
pub struct HeapAllocator;
impl GlobalHeapAllocator for HeapAllocator {
fn alloc(&self, layout: Layout) -> Result<HeapSlot, AllocError> {
let Some(class) = CommonSizeClass::from_layout(layout) else {
return HeapSlot::alloc_large(layout.size().div_ceil(PAGE_SIZE) * PAGE_SIZE);
};
let irq_guard = trap::disable_local();
let this_cache = LOCAL_POOL.get_with(&irq_guard);
let mut local_cache = this_cache.borrow_mut();
local_cache.alloc(class)
}
fn dealloc(&self, slot: HeapSlot) -> Result<(), AllocError> {
let Some(class) = CommonSizeClass::from_size(slot.size()) else {
slot.dealloc_large();
return Ok(());
};
let irq_guard = trap::disable_local();
let this_cache = LOCAL_POOL.get_with(&irq_guard);
let mut local_cache = this_cache.borrow_mut();
local_cache.dealloc(slot, class)
}
}

View File

@ -0,0 +1,10 @@
// SPDX-License-Identifier: MPL-2.0
#![feature(allocator_api)]
#![no_std]
#![deny(unsafe_code)]
mod allocator;
mod slab_cache;
pub use allocator::{type_from_layout, HeapAllocator};

View File

@ -0,0 +1,144 @@
// SPDX-License-Identifier: MPL-2.0
//! The slab cache that is composed of slabs.
use core::alloc::AllocError;
use ostd::mm::{
frame::linked_list::LinkedList,
heap::{HeapSlot, Slab, SlabMeta},
Paddr, PAGE_SIZE,
};
const EXPECTED_EMPTY_SLABS: usize = 4;
const MAX_EMPTY_SLABS: usize = 16;
/// A slab cache.
///
/// A slab cache contains 3 parts:
/// - a list of empty slabs;
/// - a list of partially allocated slabs;
/// - and a list of full slabs.
///
/// So the cache is partially sorted, to allow caching and reusing memory.
pub struct SlabCache<const SLOT_SIZE: usize> {
empty: LinkedList<SlabMeta<SLOT_SIZE>>,
partial: LinkedList<SlabMeta<SLOT_SIZE>>,
full: LinkedList<SlabMeta<SLOT_SIZE>>,
}
impl<const SLOT_SIZE: usize> SlabCache<SLOT_SIZE> {
/// Creates a new slab cache.
pub const fn new() -> Self {
Self {
empty: LinkedList::new(),
partial: LinkedList::new(),
full: LinkedList::new(),
}
}
/// Allocates a slot from the cache.
///
/// The caller must provide which cache is it because we don't know from
/// `&mut self`. The information is used for deallocation.
pub fn alloc(&mut self) -> Result<HeapSlot, AllocError> {
// Try to allocate from the partial slabs first.
if !self.partial.is_empty() {
let mut cursor = self.partial.cursor_back_mut();
let current = cursor.current_meta().unwrap();
let allocated = current.alloc().unwrap();
if current.nr_allocated() == current.capacity() {
self.full.push_front(cursor.take_current().unwrap());
}
return Ok(allocated);
}
// If no partial slab is available, try to get an empty slab.
if !self.empty.is_empty() {
let mut slab = self.empty.pop_front().unwrap();
let allocated = slab.meta_mut().alloc().unwrap();
self.add_slab(slab);
return Ok(allocated);
}
// If no empty slab is available, allocate new slabs.
let Ok(mut allocated_empty) = Slab::new() else {
log::error!("Failed to allocate a new slab");
return Err(AllocError);
};
let allocated = allocated_empty.meta_mut().alloc().unwrap();
self.add_slab(allocated_empty);
// Allocate more empty slabs and push them into the cache.
for _ in 0..EXPECTED_EMPTY_SLABS {
if let Ok(allocated_empty) = Slab::new() {
self.empty.push_front(allocated_empty);
} else {
break;
}
}
Ok(allocated)
}
/// Deallocates a slot into the cache.
///
/// The slot must be allocated from the cache.
pub fn dealloc(&mut self, slot: HeapSlot) -> Result<(), AllocError> {
let which = which_slab(&slot).ok_or_else(|| {
log::error!("Can't find the slab for the slot");
AllocError
})?;
let mut extracted_slab = None;
if self.partial.contains(which) {
extracted_slab = self.partial.cursor_mut_at(which).unwrap().take_current();
} else if self.full.contains(which) {
extracted_slab = self.full.cursor_mut_at(which).unwrap().take_current();
}
let mut slab = extracted_slab.ok_or_else(|| {
log::error!("Deallocating a slot that is not allocated from the cache");
AllocError
})?;
slab.dealloc(slot)?;
self.add_slab(slab);
// If the slab cache has too many empty slabs, free some of them.
if self.empty.size() > MAX_EMPTY_SLABS {
while self.empty.size() > EXPECTED_EMPTY_SLABS {
self.empty.pop_front();
}
}
Ok(())
}
fn add_slab(&mut self, slab: Slab<SLOT_SIZE>) {
if slab.meta().nr_allocated() == slab.meta().capacity() {
self.full.push_front(slab);
} else if slab.meta().nr_allocated() > 0 {
self.partial.push_back(slab);
} else {
self.empty.push_front(slab);
}
}
}
/// Gets which slab the slot belongs to.
///
/// If the slot size is larger than [`PAGE_SIZE`], it is not from a slab
/// and this function will return `None`.
///
/// `SLOT_SIZE` can be larger than `slot.size()` but not smaller.
fn which_slab(slot: &HeapSlot) -> Option<Paddr> {
if slot.size() > PAGE_SIZE {
return None;
}
let frame_paddr = slot.paddr() / PAGE_SIZE * PAGE_SIZE;
Some(frame_paddr)
}

View File

@ -13,11 +13,31 @@ fn panic(info: &core::panic::PanicInfo) -> ! {
unsafe { __ostd_panic_handler(info); }
}
use ostd::mm::frame::GlobalFrameAllocator;
mod default_frame_allocator {
use ostd::mm::frame::GlobalFrameAllocator;
use osdk_frame_allocator::FrameAllocator;
static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator;
use osdk_frame_allocator::FrameAllocator;
static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator;
#[no_mangle]
#[linkage = "weak"]
static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator = &FRAME_ALLOCATOR;
#[no_mangle]
#[linkage = "weak"]
static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator = &FRAME_ALLOCATOR;
}
mod default_heap_allocator {
use ostd::mm::heap::GlobalHeapAllocator;
use osdk_heap_allocator::{HeapAllocator, type_from_layout};
static HEAP_ALLOCATOR: HeapAllocator = HeapAllocator;
#[no_mangle]
#[linkage = "weak"]
static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn GlobalHeapAllocator = &HEAP_ALLOCATOR;
#[no_mangle]
#[linkage = "weak"]
#[expect(non_snake_case)]
fn __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout: core::alloc::Layout) -> Option<ostd::mm::heap::SlotInfo> {
type_from_layout(layout)
}
}

View File

@ -238,6 +238,12 @@ fn add_manifest_dependency(
Path::new("deps").join("frame-allocator"),
);
add_manifest_dependency_to(
dependencies,
"osdk-heap-allocator",
Path::new("deps").join("heap-allocator"),
);
add_manifest_dependency_to(dependencies, "ostd", Path::new("..").join("ostd"));
let content = toml::to_string(&manifest).unwrap();

View File

@ -68,11 +68,12 @@ pub fn test_main(_attr: TokenStream, item: TokenStream) -> TokenStream {
/// A macro attribute for the global frame allocator.
///
/// The attributed static variable will be used to provide frame allocation
/// for the kernel. The variable should have type `ostd::mm::GlobalFrameAllocator`.
/// for the kernel.
///
/// # Example
///
/// ```ignore
/// use core::alloc::Layout;
/// use ostd::{mm::{frame::GlobalFrameAllocator, Paddr}, global_frame_allocator};
///
/// // Of course it won't work because all allocations will fail.
@ -102,6 +103,78 @@ pub fn global_frame_allocator(_attr: TokenStream, item: TokenStream) -> TokenStr
.into()
}
/// A macro attribute to register the global heap allocator.
///
/// The attributed static variable will be used to provide heap allocation
/// for the kernel.
///
/// This attribute is not to be confused with Rust's built-in
/// [`global_allocator`] attribute, which applies to a static variable
/// implementing the unsafe `GlobalAlloc` trait. In contrast, the
/// [`global_heap_allocator`] attribute does not require the heap allocator to
/// implement an unsafe trait. [`global_heap_allocator`] eventually relies on
/// [`global_allocator`] to customize Rust's heap allocator.
///
/// # Example
///
/// ```ignore
/// use core::alloc::{AllocError, Layout};
/// use ostd::{mm::heap::{GlobalHeapAllocator, HeapSlot}, global_heap_allocator};
///
/// // Of course it won't work and all allocations will fail.
/// // It's just an example.
/// #[global_heap_allocator]
/// static ALLOCATOR: MyHeapAllocator = MyHeapAllocator;
///
/// struct MyHeapAllocator;
///
/// impl GlobalHeapAllocator for MyHeapAllocator {
/// fn alloc(&self, _layout: Layout) -> Result<HeapSlot, AllocError> { None }
/// fn dealloc(&self, _slot: HeapSlot) -> Result<(), AllocError> {}
/// }
/// ```
#[proc_macro_attribute]
pub fn global_heap_allocator(_attr: TokenStream, item: TokenStream) -> TokenStream {
// Make a `static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn GlobalHeapAllocator`
// That points to the annotated static variable.
let item = parse_macro_input!(item as syn::ItemStatic);
let static_name = &item.ident;
quote!(
#[no_mangle]
static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn ostd::mm::heap::GlobalHeapAllocator = &#static_name;
#item
)
.into()
}
/// A macro attribute to provide the heap slot type given the layout.
///
/// The users must decide the size and the type of the heap slot to serve an
/// allocation with the layout. The function should return `None` if the layout
/// is not supported.
///
/// The annotated function should be idempotent, i.e., the result should be the
/// same for the same layout. OSDK enforces this by only allowing the function
/// to be `const`.
#[proc_macro_attribute]
pub fn global_heap_allocator_slot_type_map(_attr: TokenStream, item: TokenStream) -> TokenStream {
// Rewrite the input `const fn __any_name__(layout: Layout) -> Option<SlotInfo> { ... }` to
// `const extern "Rust" fn __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout: Layout) -> Option<SlotInfo> { ... }`.
// Reject if the input is not a `const fn`.
let item = parse_macro_input!(item as syn::ItemFn);
assert!(
item.sig.constness.is_some(),
"the annotated function must be `const`"
);
quote!(
#[export_name = "__GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT"]
#item
)
.into()
}
/// A macro attribute for the panic handler.
///
/// The attributed function will be used to override OSTD's default

View File

@ -47,7 +47,10 @@ mod util;
use core::sync::atomic::{AtomicBool, Ordering};
pub use ostd_macros::{global_frame_allocator, main, panic_handler};
pub use ostd_macros::{
global_frame_allocator, global_heap_allocator, global_heap_allocator_slot_type_map, main,
panic_handler,
};
pub use ostd_pod::Pod;
pub use self::{error::Error, prelude::Result};
@ -98,9 +101,6 @@ unsafe fn init() {
mm::kspace::init_kernel_page_table(meta_pages);
// SAFETY: This function is called only once and only on the BSP.
unsafe { mm::heap_allocator::init() };
crate::sync::init();
boot::init_after_heap();

View File

@ -107,6 +107,20 @@ impl<M: AnyFrameMeta> Segment<M> {
}
Ok(segment)
}
/// Restores the [`Segment`] from the raw physical address range.
///
/// # Safety
///
/// The range must be a forgotten [`Segment`] that matches the type `M`.
/// It could be manually forgotten by [`core::mem::forget`],
/// [`ManuallyDrop`], or [`Self::into_raw`].
pub(crate) unsafe fn from_raw(range: Range<Paddr>) -> Self {
Self {
range,
_marker: core::marker::PhantomData,
}
}
}
impl<M: AnyFrameMeta + ?Sized> Segment<M> {
@ -180,6 +194,13 @@ impl<M: AnyFrameMeta + ?Sized> Segment<M> {
_marker: core::marker::PhantomData,
}
}
/// Forgets the [`Segment`] and gets a raw range of physical addresses.
pub(crate) fn into_raw(self) -> Range<Paddr> {
let range = self.range.clone();
let _ = ManuallyDrop::new(self);
range
}
}
impl<M: AnyFrameMeta + ?Sized> From<Frame<M>> for Segment<M> {

151
ostd/src/mm/heap/mod.rs Normal file
View File

@ -0,0 +1,151 @@
// SPDX-License-Identifier: MPL-2.0
//! Manages the kernel heap using slab or buddy allocation strategies.
use core::{
alloc::{AllocError, GlobalAlloc, Layout},
ptr::NonNull,
};
use crate::mm::Vaddr;
mod slab;
mod slot;
mod slot_list;
pub use self::{
slab::{SharedSlab, Slab, SlabMeta},
slot::{HeapSlot, SlotInfo},
slot_list::SlabSlotList,
};
/// The trait for the global heap allocator.
///
/// By providing the slab ([`Slab`]) and heap slot ([`HeapSlot`])
/// mechanisms, OSTD allows users to implement their own kernel heap in a safe
/// manner, as an alternative to the unsafe [`core::alloc::GlobalAlloc`].
///
/// To provide the global heap allocator, use [`crate::global_heap_allocator`]
/// to mark a static variable that implements this trait. Use
/// [`crate::global_heap_allocator_slot_type_map`] to specify the sizes of
/// slots for different layouts. This latter restriction may be lifted in the
/// future.
pub trait GlobalHeapAllocator: Sync {
/// Allocates a [`HeapSlot`] according to the layout.
///
/// OSTD calls this method to allocate memory from the global heap.
///
/// The returned [`HeapSlot`] must be valid for the layout, i.e., the size
/// must be at least the size of the layout and the alignment must be at
/// least the alignment of the layout. Furthermore, the size of the
/// returned [`HeapSlot`] must match the size returned by the function
/// marked with [`crate::global_heap_allocator_slot_type_map`].
fn alloc(&self, layout: Layout) -> Result<HeapSlot, AllocError>;
/// Deallocates a [`HeapSlot`].
///
/// OSTD calls this method to deallocate memory back to the global heap.
///
/// Each deallocation must correspond to exactly one previous allocation. The provided
/// [`HeapSlot`] must match the one returned from the original allocation.
fn dealloc(&self, slot: HeapSlot) -> Result<(), AllocError>;
}
extern "Rust" {
/// The reference to the global heap allocator generated by the
/// [`crate::global_heap_allocator`] attribute.
static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn GlobalHeapAllocator;
/// Gets the size and type of the heap slot to serve an allocation.
/// See [`crate::global_heap_allocator_slot_type_map`].
fn __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout: Layout) -> Option<SlotInfo>;
}
/// Gets the reference to the user-defined global heap allocator.
fn get_global_heap_allocator() -> &'static dyn GlobalHeapAllocator {
// SAFETY: This up-call is redirected safely to Rust code by OSDK.
unsafe { __GLOBAL_HEAP_ALLOCATOR_REF }
}
/// Gets the size and type of the heap slot to serve an allocation.
///
/// This function is defined by the OSTD user and should be idempotent,
/// as we require it to be implemented as a `const fn`.
///
/// See [`crate::global_heap_allocator_slot_type_map`].
fn slot_size_from_layout(layout: Layout) -> Option<SlotInfo> {
// SAFETY: This up-call is redirected safely to Rust code by OSDK.
unsafe { __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout) }
}
macro_rules! abort_with_message {
($($arg:tt)*) => {
log::error!($($arg)*);
crate::panic::abort();
};
}
#[alloc_error_handler]
fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
abort_with_message!("Heap allocation error, layout = {:#x?}", layout);
}
#[global_allocator]
static HEAP_ALLOCATOR: AllocDispatch = AllocDispatch;
struct AllocDispatch;
// TODO: Somehow restrict unwinding in the user-provided global allocator.
// Panicking should be fine, but we shouldn't unwind on panics.
unsafe impl GlobalAlloc for AllocDispatch {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let Some(required_slot) = slot_size_from_layout(layout) else {
abort_with_message!("Heap allocation size not found for layout = {:#x?}", layout);
};
let res = get_global_heap_allocator().alloc(layout);
let Ok(slot) = res else {
return core::ptr::null_mut();
};
if required_slot.size() != slot.size()
|| slot.size() < layout.size()
|| slot.as_ptr() as Vaddr % layout.align() != 0
{
abort_with_message!(
"Heap allocation mismatch: slot ptr = {:p}, size = {:x}; layout = {:#x?}; required_slot = {:#x?}",
slot.as_ptr(),
slot.size(),
layout,
required_slot,
);
}
slot.as_ptr()
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// Now we restore the `HeapSlot` from the pointer and the layout.
let Some(required_slot) = slot_size_from_layout(layout) else {
abort_with_message!(
"Heap deallocation size not found for layout = {:#x?}",
layout
);
};
// SAFETY: The validity of the pointer is guaranteed by the caller. The
// size must match the size of the slot when it was allocated, since we
// require `slot_size_from_layout` to be idempotent.
let slot = unsafe { HeapSlot::new(NonNull::new_unchecked(ptr), required_slot) };
let res = get_global_heap_allocator().dealloc(slot);
if res.is_err() {
abort_with_message!(
"Heap deallocation error, ptr = {:p}, layout = {:#x?}, required_slot = {:#x?}",
ptr,
layout,
required_slot,
);
}
}
}

140
ostd/src/mm/heap/slab.rs Normal file
View File

@ -0,0 +1,140 @@
// SPDX-License-Identifier: MPL-2.0
//! Slabs for implementing the slab allocator.
use core::{alloc::AllocError, ptr::NonNull};
use super::{slot::HeapSlot, slot_list::SlabSlotList};
use crate::mm::{
frame::{linked_list::Link, meta::AnyFrameMeta},
paddr_to_vaddr, Frame, FrameAllocOptions, UniqueFrame, PAGE_SIZE,
};
/// A slab.
///
/// The slot size is the maximum size and alignment of the objects that can be
/// allocated from the slab. The slab is divided into slots of this size.
///
/// The size of the slot cannot be smaller than the size of [`usize`] and must
/// be a power of two. The size of the slab should be larger than the slot
/// size and [`PAGE_SIZE`].
///
/// The `SLOT_SIZE` is the size of the slot in bytes. It must be smaller than or
/// equal to [`PAGE_SIZE`]. This restriction may be lifted in the future.
pub type Slab<const SLOT_SIZE: usize> = UniqueFrame<Link<SlabMeta<SLOT_SIZE>>>;
/// A shared pointer to a slab.
///
/// It is solely useful to point to a slab from a stray slot. When an object of
/// this type exists no mutable references can be created to the slab. So don't
/// hold it for long.
pub type SharedSlab<const SLOT_SIZE: usize> = Frame<Link<SlabMeta<SLOT_SIZE>>>;
/// Frame metadata of a slab.
///
/// Each slab is backed by a [`UniqueFrame`].
#[derive(Debug)]
pub struct SlabMeta<const SLOT_SIZE: usize> {
/// The list of free slots inside the slab.
///
/// Slots not inside the slab should not be in the list.
free_list: SlabSlotList<SLOT_SIZE>,
/// The number of allocated slots in the slab.
///
/// Even if a slot is free, as long as it does not stay in the
/// [`Self::free_list`], it is considered allocated.
nr_allocated: u16,
}
unsafe impl<const SLOT_SIZE: usize> Send for SlabMeta<SLOT_SIZE> {}
unsafe impl<const SLOT_SIZE: usize> Sync for SlabMeta<SLOT_SIZE> {}
unsafe impl<const SLOT_SIZE: usize> AnyFrameMeta for SlabMeta<SLOT_SIZE> {
fn on_drop(&mut self, _reader: &mut crate::mm::VmReader<crate::mm::Infallible>) {
if self.nr_allocated != 0 {
// FIXME: We have no mechanisms to forget the slab once we are here,
// so we require the user to deallocate all slots before dropping.
panic!("{} slots allocated when dropping a slab", self.nr_allocated);
}
}
fn is_untyped(&self) -> bool {
false
}
}
impl<const SLOT_SIZE: usize> SlabMeta<SLOT_SIZE> {
/// Gets the capacity of the slab (regardless of the number of allocated slots).
pub const fn capacity(&self) -> u16 {
(PAGE_SIZE / SLOT_SIZE) as u16
}
/// Gets the number of allocated slots.
pub fn nr_allocated(&self) -> u16 {
self.nr_allocated
}
/// Allocates a slot from the slab.
pub fn alloc(&mut self) -> Result<HeapSlot, AllocError> {
let Some(allocated) = self.free_list.pop() else {
log::error!("Allocating a slot from a full slab");
return Err(AllocError);
};
self.nr_allocated += 1;
Ok(allocated)
}
}
impl<const SLOT_SIZE: usize> Slab<SLOT_SIZE> {
/// Allocates a new slab of the given size.
///
/// If the size is less than `SLOT_SIZE` or [`PAGE_SIZE`], the size will be
/// the maximum of the two.
pub fn new() -> crate::prelude::Result<Self> {
const { assert!(SLOT_SIZE <= PAGE_SIZE) };
// To ensure we can store a pointer in each slot.
const { assert!(SLOT_SIZE >= core::mem::size_of::<usize>()) };
// To ensure `nr_allocated` can be stored in a `u16`.
const { assert!(PAGE_SIZE / SLOT_SIZE <= u16::MAX as usize) };
let mut slab: Slab<SLOT_SIZE> = FrameAllocOptions::new()
.zeroed(false)
.alloc_frame_with(Link::new(SlabMeta::<SLOT_SIZE> {
free_list: SlabSlotList::new(),
nr_allocated: 0,
}))?
.try_into()
.unwrap();
let head_paddr = slab.start_paddr();
let head_vaddr = paddr_to_vaddr(head_paddr);
// Push each slot to the free list.
for slot_offset in (0..PAGE_SIZE).step_by(SLOT_SIZE) {
// SAFETY: The slot is within the slab so it can't be NULL.
let slot_ptr = unsafe { NonNull::new_unchecked((head_vaddr + slot_offset) as *mut u8) };
// SAFETY: The slot is newly allocated in the slab.
slab.meta_mut()
.free_list
.push(unsafe { HeapSlot::new(slot_ptr, super::SlotInfo::SlabSlot(SLOT_SIZE)) });
}
Ok(slab)
}
/// Deallocates a slot to the slab.
///
/// If the slot does not belong to the slab it returns [`AllocError`].
pub fn dealloc(&mut self, slot: HeapSlot) -> Result<(), AllocError> {
if !(self.start_paddr()..self.start_paddr() + self.size()).contains(&slot.paddr()) {
log::error!("Deallocating a slot to a slab that does not own the slot");
return Err(AllocError);
}
debug_assert_eq!(slot.size(), SLOT_SIZE);
self.meta_mut().free_list.push(slot);
self.meta_mut().nr_allocated -= 1;
Ok(())
}
}

154
ostd/src/mm/heap/slot.rs Normal file
View File

@ -0,0 +1,154 @@
// SPDX-License-Identifier: MPL-2.0
//! Heap slots for allocations.
use core::{alloc::AllocError, ptr::NonNull};
use crate::{
impl_frame_meta_for,
mm::{
kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, FrameAllocOptions, Paddr, Segment,
Vaddr, PAGE_SIZE,
},
};
/// A slot that will become or has been turned from a heap allocation.
///
/// Heap slots can come from [`Slab`] or directly from a typed [`Segment`].
///
/// Heap slots can be used to fulfill heap allocations requested by the allocator.
/// Upon deallocation, the deallocated memory also becomes a heap slot.
///
/// The size of the heap slot must match the slot size of the [`Slab`] or the
/// size of the [`Segment`].
///
/// [`Slab`]: super::Slab
pub struct HeapSlot {
/// The address of the slot.
addr: NonNull<u8>,
/// The type and size of the slot.
info: SlotInfo,
}
/// The type and size of the heap slot that should be used for the allocation.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SlotInfo {
/// The slot is from a [`super::Slab`].
///
/// The size of the slot and the corresponding slab are provided.
/// Both values are identical.
SlabSlot(usize),
/// The slot is from a [`Segment`].
///
/// The size of the slot and the corresponding segment are provided.
/// Both values are identical.
LargeSlot(usize),
}
impl SlotInfo {
/// Gets the size of the slot.
pub fn size(&self) -> usize {
match self {
Self::SlabSlot(size) => *size,
Self::LargeSlot(size) => *size,
}
}
}
impl HeapSlot {
/// Creates a new pointer to a heap slot.
///
/// # Safety
///
/// The pointer to the slot must either:
/// - be a free slot in a [`super::Slab`], or
/// - be a free slot in a [`Segment`].
///
/// If the pointer is from a [`super::Slab`] or [`Segment`], the slot must
/// have a size that matches the slot size of the slab or segment respectively.
pub(super) unsafe fn new(addr: NonNull<u8>, info: SlotInfo) -> Self {
Self { addr, info }
}
/// Allocates a large slot.
///
/// This function allocates in units of [`PAGE_SIZE`] bytes.
///
/// This function returns an error if the frame allocation fails.
///
/// # Panics
///
/// This function panics if the size is not a multiple of [`PAGE_SIZE`].
pub fn alloc_large(size: usize) -> Result<Self, AllocError> {
assert_eq!(size % PAGE_SIZE, 0);
let nframes = size / PAGE_SIZE;
let segment = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment_with(nframes, |_| LargeAllocFrameMeta)
.map_err(|_| {
log::error!("Failed to allocate a large slot");
AllocError
})?;
let paddr_range = segment.into_raw();
let vaddr = paddr_to_vaddr(paddr_range.start);
Ok(Self {
addr: NonNull::new(vaddr as *mut u8).unwrap(),
info: SlotInfo::LargeSlot(size),
})
}
/// Deallocates a large slot.
///
/// # Panics
///
/// This function aborts if the slot was not allocated with
/// [`HeapSlot::alloc_large`], as it requires specific memory management
/// operations that only apply to large slots.
pub fn dealloc_large(self) {
let SlotInfo::LargeSlot(size) = self.info else {
log::error!(
"Deallocating a large slot that was not allocated with `HeapSlot::alloc_large`"
);
crate::panic::abort();
};
debug_assert_eq!(size % PAGE_SIZE, 0);
debug_assert_eq!(self.paddr() % PAGE_SIZE, 0);
let nframes = size / PAGE_SIZE;
let range = self.paddr()..self.paddr() + nframes;
// SAFETY: The segment was once forgotten when allocated.
drop(unsafe { Segment::<LargeAllocFrameMeta>::from_raw(range) });
}
/// Gets the physical address of the slot.
pub fn paddr(&self) -> Paddr {
self.addr.as_ptr() as Vaddr - LINEAR_MAPPING_BASE_VADDR
}
/// Gets the size of the slot.
pub fn size(&self) -> usize {
match self.info {
SlotInfo::SlabSlot(size) => size,
SlotInfo::LargeSlot(size) => size,
}
}
/// Gets the type and size of the slot.
pub fn info(&self) -> SlotInfo {
self.info
}
/// Gets the pointer to the slot.
pub fn as_ptr(&self) -> *mut u8 {
self.addr.as_ptr()
}
}
/// The frames allocated for a large allocation.
#[derive(Debug)]
pub struct LargeAllocFrameMeta;
impl_frame_meta_for!(LargeAllocFrameMeta);

View File

@ -0,0 +1,82 @@
// SPDX-License-Identifier: MPL-2.0
//! Implementation of the free heap slot list.
use core::ptr::NonNull;
use super::HeapSlot;
/// A singly-linked list of [`HeapSlot`]s from [`super::Slab`]s.
///
/// The slots inside this list will have a size of `SLOT_SIZE`. They can come
/// from different slabs.
#[derive(Debug)]
pub struct SlabSlotList<const SLOT_SIZE: usize> {
/// The head of the list.
head: Option<NonNull<u8>>,
}
impl<const SLOT_SIZE: usize> Default for SlabSlotList<SLOT_SIZE> {
fn default() -> Self {
Self::new()
}
}
impl<const SLOT_SIZE: usize> SlabSlotList<SLOT_SIZE> {
/// Creates a new empty list.
pub const fn new() -> Self {
Self { head: None }
}
/// Pushes a slot to the front of the list.
///
/// # Panics
///
/// Panics if
/// - the slot does not come from a slab
/// (i.e., `!matches(slot.info(), SlotInfo::SlabSlot(_))`);
/// - the size of the slot does not match `SLOT_SIZE`.
pub fn push(&mut self, slot: HeapSlot) {
let slot_ptr = slot.as_ptr();
let super::SlotInfo::SlabSlot(slot_size) = slot.info() else {
panic!("The slot does not come from a slab");
};
assert_eq!(slot_size, SLOT_SIZE);
const { assert!(SLOT_SIZE >= core::mem::size_of::<usize>()) };
let original_head = self.head;
debug_assert!(!slot_ptr.is_null());
// SAFETY: A pointer to a slot must not be NULL;
self.head = Some(unsafe { NonNull::new_unchecked(slot_ptr) });
// Write the original head to the slot.
// SAFETY: A heap slot must be free so the pointer to the slot can be
// written to. The slot size is at least the size of a pointer.
unsafe {
slot_ptr
.cast::<usize>()
.write(original_head.map_or(0, |h| h.as_ptr() as usize));
}
}
/// Pops a slot from the front of the list.
///
/// It returns `None` if the list is empty.
pub fn pop(&mut self) -> Option<HeapSlot> {
let original_head = self.head?;
// SAFETY: The head is a valid pointer to a free slot.
// The slot contains a pointer to the next slot.
let next = unsafe { original_head.as_ptr().cast::<usize>().read() } as *mut u8;
self.head = if next.is_null() {
None
} else {
// SAFETY: We already verified that the next slot is not NULL.
Some(unsafe { NonNull::new_unchecked(next) })
};
Some(unsafe { HeapSlot::new(original_head, super::SlotInfo::SlabSlot(SLOT_SIZE)) })
}
}

View File

@ -1,174 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
mod slab_allocator;
use core::{
alloc::{GlobalAlloc, Layout},
mem::ManuallyDrop,
};
use align_ext::AlignExt;
use log::debug;
use slab_allocator::Heap;
use spin::Once;
use super::paddr_to_vaddr;
use crate::{
impl_frame_meta_for,
mm::{FrameAllocOptions, PAGE_SIZE},
prelude::*,
sync::SpinLock,
trap::disable_local,
};
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeapWithRescue = LockedHeapWithRescue::new();
#[alloc_error_handler]
pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
panic!("Heap allocation error, layout = {:?}", layout);
}
const INIT_KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256;
#[repr(align(4096))]
struct InitHeapSpace([u8; INIT_KERNEL_HEAP_SIZE]);
/// Initialize the heap allocator.
///
/// # Safety
///
/// This function should be called only once.
pub unsafe fn init() {
static mut HEAP_SPACE: InitHeapSpace = InitHeapSpace([0; INIT_KERNEL_HEAP_SIZE]);
// SAFETY: The HEAP_SPACE is a static memory range, so it's always valid.
unsafe {
#[expect(static_mut_refs)]
HEAP_ALLOCATOR.init(HEAP_SPACE.0.as_mut_ptr(), INIT_KERNEL_HEAP_SIZE);
}
}
struct LockedHeapWithRescue {
heap: Once<SpinLock<Heap>>,
}
/// The metadata for the kernel heap frames.
#[derive(Debug)]
pub struct KernelHeapMeta;
impl_frame_meta_for!(KernelHeapMeta);
impl LockedHeapWithRescue {
/// Creates an new heap
pub const fn new() -> Self {
Self { heap: Once::new() }
}
/// SAFETY: The range [start, start + size) must be a valid memory region.
pub unsafe fn init(&self, start: *mut u8, size: usize) {
self.heap
.call_once(|| SpinLock::new(Heap::new(start as usize, size)));
}
/// SAFETY: The range [start, start + size) must be a valid memory region.
unsafe fn add_to_heap(&self, start: usize, size: usize) {
self.heap
.get()
.unwrap()
.disable_irq()
.lock()
.add_memory(start, size);
}
fn rescue_if_low_memory(&self, remain_bytes: usize, layout: Layout) {
if remain_bytes <= PAGE_SIZE * 4 {
debug!(
"Low memory in heap allocator, try to call rescue. Remaining bytes: {:x?}",
remain_bytes
);
// We don't care if the rescue returns ok or not since we can still do heap allocation.
let _ = self.rescue(&layout);
}
}
fn rescue(&self, layout: &Layout) -> Result<()> {
const MIN_NUM_FRAMES: usize = 0x4000000 / PAGE_SIZE; // 64MB
debug!("enlarge heap, layout = {:?}", layout);
let mut num_frames = {
let align = PAGE_SIZE.max(layout.align());
debug_assert!(align % PAGE_SIZE == 0);
let size = layout.size().align_up(align);
size / PAGE_SIZE
};
let allocation_start = {
let mut options = FrameAllocOptions::new();
options.zeroed(false);
let segment = if num_frames >= MIN_NUM_FRAMES {
options
.alloc_segment_with(num_frames, |_| KernelHeapMeta)
.unwrap()
} else {
match options.alloc_segment_with(MIN_NUM_FRAMES, |_| KernelHeapMeta) {
Ok(seg) => {
num_frames = MIN_NUM_FRAMES;
seg
}
Err(_) => options.alloc_segment_with(num_frames, |_| KernelHeapMeta)?,
}
};
let paddr = segment.start_paddr();
let _ = ManuallyDrop::new(segment);
paddr
};
let vaddr = paddr_to_vaddr(allocation_start);
// SAFETY: the frame is allocated from FrameAllocator and never be deallocated,
// so the addr is always valid.
unsafe {
debug!(
"add frames to heap: addr = 0x{:x}, size = 0x{:x}",
vaddr,
PAGE_SIZE * num_frames
);
self.add_to_heap(vaddr, PAGE_SIZE * num_frames);
}
Ok(())
}
}
unsafe impl GlobalAlloc for LockedHeapWithRescue {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let _guard = disable_local();
let res = self.heap.get().unwrap().lock().allocate(layout);
if let Ok((allocation, remain_bytes)) = res {
self.rescue_if_low_memory(remain_bytes, layout);
return allocation;
}
if self.rescue(&layout).is_err() {
return core::ptr::null_mut::<u8>();
}
let res = self.heap.get().unwrap().lock().allocate(layout);
if let Ok((allocation, remain_bytes)) = res {
self.rescue_if_low_memory(remain_bytes, layout);
allocation
} else {
core::ptr::null_mut::<u8>()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
debug_assert!(ptr as usize != 0);
self.heap
.get()
.unwrap()
.disable_irq()
.lock()
.deallocate(ptr, layout)
}
}

View File

@ -1,289 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
// Modified from lib.rs in slab_allocator project
//
// MIT License
//
// Copyright (c) 2024 Asterinas Developers
// Copyright (c) 2024 ArceOS Developers
// Copyright (c) 2017 Robert Węcławski
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
//! Slab allocator for `no_std` systems. It uses multiple slabs with blocks of
//! different sizes and a [buddy_system_allocator] for blocks larger than 4096
//! bytes.
//!
//! It's based on <https://github.com/weclaw1/slab_allocator>.
//!
//! [buddy_system_allocator]: https://docs.rs/buddy_system_allocator/latest/buddy_system_allocator/
extern crate alloc;
extern crate buddy_system_allocator;
use alloc::alloc::{AllocError, Layout};
use core::ptr::NonNull;
mod slab;
use slab::Slab;
const SET_SIZE: usize = 64;
const MIN_HEAP_SIZE: usize = 0x8000;
enum HeapAllocator {
Slab64Bytes,
Slab128Bytes,
Slab256Bytes,
Slab512Bytes,
Slab1024Bytes,
Slab2048Bytes,
Slab4096Bytes,
BuddyAllocator,
}
/// A fixed size heap backed by multiple slabs with blocks of different sizes.
/// Allocations over 4096 bytes are served by linked list allocator.
pub struct Heap {
slab_64_bytes: Slab<64>,
slab_128_bytes: Slab<128>,
slab_256_bytes: Slab<256>,
slab_512_bytes: Slab<512>,
slab_1024_bytes: Slab<1024>,
slab_2048_bytes: Slab<2048>,
slab_4096_bytes: Slab<4096>,
buddy_allocator: buddy_system_allocator::Heap<32>,
}
impl Heap {
/// Creates a new heap with the given `heap_start_addr` and `heap_size`. The start address must be valid
/// and the memory in the `[heap_start_addr, heap_start_addr + heap_size)` range must not be used for
/// anything else.
///
/// # Safety
/// This function is unsafe because it can cause undefined behavior if the
/// given address is invalid.
pub unsafe fn new(heap_start_addr: usize, heap_size: usize) -> Heap {
assert!(
heap_start_addr % 4096 == 0,
"Start address should be page aligned"
);
assert!(
heap_size >= MIN_HEAP_SIZE,
"Heap size should be greater or equal to minimum heap size"
);
assert!(
heap_size % MIN_HEAP_SIZE == 0,
"Heap size should be a multiple of minimum heap size"
);
Heap {
slab_64_bytes: Slab::<64>::new(0, 0),
slab_128_bytes: Slab::<128>::new(0, 0),
slab_256_bytes: Slab::<256>::new(0, 0),
slab_512_bytes: Slab::<512>::new(0, 0),
slab_1024_bytes: Slab::<1024>::new(0, 0),
slab_2048_bytes: Slab::<2048>::new(0, 0),
slab_4096_bytes: Slab::<4096>::new(0, 0),
buddy_allocator: {
let mut buddy = buddy_system_allocator::Heap::<32>::new();
buddy.init(heap_start_addr, heap_size);
buddy
},
}
}
/// Adds memory to the heap. The start address must be valid
/// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for
/// anything else.
///
/// # Safety
/// This function is unsafe because it can cause undefined behavior if the
/// given address is invalid.
pub unsafe fn add_memory(&mut self, heap_start_addr: usize, heap_size: usize) {
assert!(
heap_start_addr % 4096 == 0,
"Start address should be page aligned"
);
assert!(
heap_size % 4096 == 0,
"Add Heap size should be a multiple of page size"
);
self.buddy_allocator
.add_to_heap(heap_start_addr, heap_start_addr + heap_size);
}
/// Adds memory to the heap. The start address must be valid
/// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for
/// anything else.
/// In case of linked list allocator the memory can only be extended.
///
/// # Safety
/// This function is unsafe because it can cause undefined behavior if the
/// given address is invalid.
unsafe fn _grow(&mut self, mem_start_addr: usize, mem_size: usize, slab: HeapAllocator) {
match slab {
HeapAllocator::Slab64Bytes => self.slab_64_bytes.grow(mem_start_addr, mem_size),
HeapAllocator::Slab128Bytes => self.slab_128_bytes.grow(mem_start_addr, mem_size),
HeapAllocator::Slab256Bytes => self.slab_256_bytes.grow(mem_start_addr, mem_size),
HeapAllocator::Slab512Bytes => self.slab_512_bytes.grow(mem_start_addr, mem_size),
HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.grow(mem_start_addr, mem_size),
HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.grow(mem_start_addr, mem_size),
HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.grow(mem_start_addr, mem_size),
HeapAllocator::BuddyAllocator => self
.buddy_allocator
.add_to_heap(mem_start_addr, mem_start_addr + mem_size),
}
}
/// Allocates a chunk of the given size with the given alignment. Returns a pointer to the
/// beginning of that chunk and remaining bytes in buddy system allocator if it was successful.
/// Else it returns `Err`.
///
/// This function finds the slab of lowest size which can still accommodate the given chunk.
/// The runtime is in `O(1)` for chunks of size <= 4096, and `O(n)` when chunk size is > 4096,
pub fn allocate(&mut self, layout: Layout) -> Result<(*mut u8, usize), AllocError> {
let addr = match Heap::layout_to_allocator(&layout) {
HeapAllocator::Slab64Bytes => self
.slab_64_bytes
.allocate(layout, &mut self.buddy_allocator)?,
HeapAllocator::Slab128Bytes => self
.slab_128_bytes
.allocate(layout, &mut self.buddy_allocator)?,
HeapAllocator::Slab256Bytes => self
.slab_256_bytes
.allocate(layout, &mut self.buddy_allocator)?,
HeapAllocator::Slab512Bytes => self
.slab_512_bytes
.allocate(layout, &mut self.buddy_allocator)?,
HeapAllocator::Slab1024Bytes => self
.slab_1024_bytes
.allocate(layout, &mut self.buddy_allocator)?,
HeapAllocator::Slab2048Bytes => self
.slab_2048_bytes
.allocate(layout, &mut self.buddy_allocator)?,
HeapAllocator::Slab4096Bytes => self
.slab_4096_bytes
.allocate(layout, &mut self.buddy_allocator)?,
HeapAllocator::BuddyAllocator => self
.buddy_allocator
.alloc(layout)
.map(|ptr| ptr.as_ptr() as usize)
.map_err(|_| AllocError)?,
};
Ok((
addr as *mut u8,
self.buddy_allocator.stats_total_bytes() - self.buddy_allocator.stats_alloc_actual(),
))
}
/// Frees the given allocation. `ptr` must be a pointer returned
/// by a call to the `allocate` function with identical size and alignment. Undefined
/// behavior may occur for invalid arguments, thus this function is unsafe.
///
/// This function finds the slab which contains address of `ptr` and adds the blocks beginning
/// with `ptr` address to the list of free blocks.
/// This operation is in `O(1)` for blocks <= 4096 bytes and `O(n)` for blocks > 4096 bytes.
///
/// # Safety
/// This function is unsafe because it can cause undefined behavior if the
/// given address is invalid.
pub unsafe fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
let ptr = ptr as usize;
match Heap::layout_to_allocator(&layout) {
HeapAllocator::Slab64Bytes => self.slab_64_bytes.deallocate(ptr),
HeapAllocator::Slab128Bytes => self.slab_128_bytes.deallocate(ptr),
HeapAllocator::Slab256Bytes => self.slab_256_bytes.deallocate(ptr),
HeapAllocator::Slab512Bytes => self.slab_512_bytes.deallocate(ptr),
HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.deallocate(ptr),
HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.deallocate(ptr),
HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.deallocate(ptr),
HeapAllocator::BuddyAllocator => self
.buddy_allocator
.dealloc(NonNull::new(ptr as *mut u8).unwrap(), layout),
}
}
/// Returns bounds on the guaranteed usable size of a successful
/// allocation created with the specified `layout`.
#[expect(unused)]
pub fn usable_size(&self, layout: Layout) -> (usize, usize) {
match Heap::layout_to_allocator(&layout) {
HeapAllocator::Slab64Bytes => (layout.size(), 64),
HeapAllocator::Slab128Bytes => (layout.size(), 128),
HeapAllocator::Slab256Bytes => (layout.size(), 256),
HeapAllocator::Slab512Bytes => (layout.size(), 512),
HeapAllocator::Slab1024Bytes => (layout.size(), 1024),
HeapAllocator::Slab2048Bytes => (layout.size(), 2048),
HeapAllocator::Slab4096Bytes => (layout.size(), 4096),
HeapAllocator::BuddyAllocator => (layout.size(), layout.size()),
}
}
/// Finds allocator to use based on layout size and alignment
fn layout_to_allocator(layout: &Layout) -> HeapAllocator {
if layout.size() > 4096 {
HeapAllocator::BuddyAllocator
} else if layout.size() <= 64 && layout.align() <= 64 {
HeapAllocator::Slab64Bytes
} else if layout.size() <= 128 && layout.align() <= 128 {
HeapAllocator::Slab128Bytes
} else if layout.size() <= 256 && layout.align() <= 256 {
HeapAllocator::Slab256Bytes
} else if layout.size() <= 512 && layout.align() <= 512 {
HeapAllocator::Slab512Bytes
} else if layout.size() <= 1024 && layout.align() <= 1024 {
HeapAllocator::Slab1024Bytes
} else if layout.size() <= 2048 && layout.align() <= 2048 {
HeapAllocator::Slab2048Bytes
} else {
HeapAllocator::Slab4096Bytes
}
}
/// Returns total memory size in bytes of the heap.
pub fn total_bytes(&self) -> usize {
self.slab_64_bytes.total_blocks() * 64
+ self.slab_128_bytes.total_blocks() * 128
+ self.slab_256_bytes.total_blocks() * 256
+ self.slab_512_bytes.total_blocks() * 512
+ self.slab_1024_bytes.total_blocks() * 1024
+ self.slab_2048_bytes.total_blocks() * 2048
+ self.slab_4096_bytes.total_blocks() * 4096
+ self.buddy_allocator.stats_total_bytes()
}
/// Returns allocated memory size in bytes.
pub fn used_bytes(&self) -> usize {
self.slab_64_bytes.used_blocks() * 64
+ self.slab_128_bytes.used_blocks() * 128
+ self.slab_256_bytes.used_blocks() * 256
+ self.slab_512_bytes.used_blocks() * 512
+ self.slab_1024_bytes.used_blocks() * 1024
+ self.slab_2048_bytes.used_blocks() * 2048
+ self.slab_4096_bytes.used_blocks() * 4096
+ self.buddy_allocator.stats_alloc_actual()
}
/// Returns available memory size in bytes.
#[expect(unused)]
pub fn available_bytes(&self) -> usize {
self.total_bytes() - self.used_bytes()
}
}

View File

@ -1,151 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
// Modified from slab.rs in slab_allocator project
//
// MIT License
//
// Copyright (c) 2024 Asterinas Developers
// Copyright (c) 2024 ArceOS Developers
// Copyright (c) 2017 Robert Węcławski
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
use alloc::alloc::{AllocError, Layout};
use super::SET_SIZE;
pub struct Slab<const BLK_SIZE: usize> {
free_block_list: FreeBlockList<BLK_SIZE>,
total_blocks: usize,
}
impl<const BLK_SIZE: usize> Slab<BLK_SIZE> {
pub unsafe fn new(start_addr: usize, slab_size: usize) -> Slab<BLK_SIZE> {
let num_of_blocks = slab_size / BLK_SIZE;
Slab {
free_block_list: FreeBlockList::new(start_addr, BLK_SIZE, num_of_blocks),
total_blocks: num_of_blocks,
}
}
pub fn total_blocks(&self) -> usize {
self.total_blocks
}
pub fn used_blocks(&self) -> usize {
self.total_blocks - self.free_block_list.len()
}
pub unsafe fn grow(&mut self, start_addr: usize, slab_size: usize) {
let num_of_blocks = slab_size / BLK_SIZE;
self.total_blocks += num_of_blocks;
let mut block_list = FreeBlockList::<BLK_SIZE>::new(start_addr, BLK_SIZE, num_of_blocks);
while let Some(block) = block_list.pop() {
self.free_block_list.push(block);
}
}
pub fn allocate(
&mut self,
_layout: Layout,
buddy: &mut buddy_system_allocator::Heap<32>,
) -> Result<usize, AllocError> {
match self.free_block_list.pop() {
Some(block) => Ok(block.addr()),
None => {
let layout =
unsafe { Layout::from_size_align_unchecked(SET_SIZE * BLK_SIZE, 4096) };
if let Ok(ptr) = buddy.alloc(layout) {
unsafe {
self.grow(ptr.as_ptr() as usize, SET_SIZE * BLK_SIZE);
}
Ok(self.free_block_list.pop().unwrap().addr())
} else {
Err(AllocError)
}
}
}
}
pub fn deallocate(&mut self, ptr: usize) {
let ptr = ptr as *mut FreeBlock;
unsafe {
self.free_block_list.push(&mut *ptr);
}
}
}
struct FreeBlockList<const BLK_SIZE: usize> {
len: usize,
head: Option<&'static mut FreeBlock>,
}
impl<const BLK_SIZE: usize> FreeBlockList<BLK_SIZE> {
unsafe fn new(
start_addr: usize,
block_size: usize,
num_of_blocks: usize,
) -> FreeBlockList<BLK_SIZE> {
let mut new_list = FreeBlockList::new_empty();
for i in (0..num_of_blocks).rev() {
let new_block = (start_addr + i * block_size) as *mut FreeBlock;
new_list.push(&mut *new_block);
}
new_list
}
fn new_empty() -> FreeBlockList<BLK_SIZE> {
FreeBlockList { len: 0, head: None }
}
fn len(&self) -> usize {
self.len
}
fn pop(&mut self) -> Option<&'static mut FreeBlock> {
#[expect(clippy::manual_inspect)]
self.head.take().map(|node| {
self.head = node.next.take();
self.len -= 1;
node
})
}
fn push(&mut self, free_block: &'static mut FreeBlock) {
free_block.next = self.head.take();
self.len += 1;
self.head = Some(free_block);
}
#[expect(dead_code)]
fn is_empty(&self) -> bool {
self.head.is_none()
}
}
struct FreeBlock {
next: Option<&'static mut FreeBlock>,
}
impl FreeBlock {
fn addr(&self) -> usize {
self as *const _ as usize
}
}

View File

@ -10,7 +10,7 @@ pub type Paddr = usize;
pub(crate) mod dma;
pub mod frame;
pub(crate) mod heap_allocator;
pub mod heap;
mod io;
pub(crate) mod kspace;
mod offset;

View File

@ -112,6 +112,7 @@ LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH=${ASTER_SRC_DIR}/ostd/libs/linux-bzimage/set
OSDK_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/Cargo.toml
OSDK_TEST_RUNNER_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/test-kernel/Cargo.toml
OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/frame-allocator/Cargo.toml
OSDK_HEAP_ALLOCATOR_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/heap-allocator/Cargo.toml
VERSION_PATH=${ASTER_SRC_DIR}/VERSION
current_version=$(cat ${VERSION_PATH})
@ -135,9 +136,11 @@ update_package_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH}
update_package_version ${OSDK_CARGO_TOML_PATH}
update_package_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH}
update_package_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH}
update_package_version ${OSDK_HEAP_ALLOCATOR_CARGO_TOML_PATH}
update_dep_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} ostd
update_dep_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH} ostd
update_dep_version ${OSDK_HEAP_ALLOCATOR_CARGO_TOML_PATH} ostd
update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-test
update_dep_version ${OSTD_CARGO_TOML_PATH} linux-boot-params
update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-macros