From a708a0c046cbd9259f96ccc67359239bff5ea40a Mon Sep 17 00:00:00 2001 From: Zhang Junyang Date: Mon, 13 Jan 2025 14:24:32 +0800 Subject: [PATCH] Inject a scalable slab allocator --- Cargo.lock | 9 + Cargo.toml | 1 + Makefile | 1 + kernel/Cargo.toml | 1 + kernel/src/vm/mod.rs | 9 + osdk/deps/heap-allocator/Cargo.toml | 11 + osdk/deps/heap-allocator/src/allocator.rs | 318 ++++++++++++++++++ osdk/deps/heap-allocator/src/lib.rs | 10 + osdk/deps/heap-allocator/src/slab_cache.rs | 144 ++++++++ osdk/src/base_crate/main.rs.template | 32 +- osdk/src/base_crate/mod.rs | 6 + ostd/libs/ostd-macros/src/lib.rs | 75 ++++- ostd/src/lib.rs | 8 +- ostd/src/mm/frame/segment.rs | 21 ++ ostd/src/mm/heap/mod.rs | 151 +++++++++ ostd/src/mm/heap/slab.rs | 140 ++++++++ ostd/src/mm/heap/slot.rs | 154 +++++++++ ostd/src/mm/heap/slot_list.rs | 82 +++++ ostd/src/mm/heap_allocator/mod.rs | 174 ---------- .../mm/heap_allocator/slab_allocator/mod.rs | 289 ---------------- .../mm/heap_allocator/slab_allocator/slab.rs | 151 --------- ostd/src/mm/mod.rs | 2 +- tools/bump_version.sh | 3 + 23 files changed, 1166 insertions(+), 626 deletions(-) create mode 100644 osdk/deps/heap-allocator/Cargo.toml create mode 100644 osdk/deps/heap-allocator/src/allocator.rs create mode 100644 osdk/deps/heap-allocator/src/lib.rs create mode 100644 osdk/deps/heap-allocator/src/slab_cache.rs create mode 100644 ostd/src/mm/heap/mod.rs create mode 100644 ostd/src/mm/heap/slab.rs create mode 100644 ostd/src/mm/heap/slot.rs create mode 100644 ostd/src/mm/heap/slot_list.rs delete mode 100644 ostd/src/mm/heap_allocator/mod.rs delete mode 100644 ostd/src/mm/heap_allocator/slab_allocator/mod.rs delete mode 100644 ostd/src/mm/heap_allocator/slab_allocator/slab.rs diff --git a/Cargo.lock b/Cargo.lock index e1b2f18d5..01ac79f99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,6 +212,7 @@ dependencies = [ "log", "lru", "osdk-frame-allocator", + "osdk-heap-allocator", "ostd", "paste", "rand", @@ -1261,6 +1262,14 @@ dependencies = [ "ostd", ] +[[package]] +name = "osdk-heap-allocator" +version = "0.12.0" +dependencies = [ + "log", + "ostd", +] + [[package]] name = "osdk-test-kernel" version = "0.12.0" diff --git a/Cargo.toml b/Cargo.toml index d4f85f477..04efe78a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ resolver = "2" members = [ "osdk/deps/frame-allocator", + "osdk/deps/heap-allocator", "osdk/deps/test-kernel", "ostd", "ostd/libs/align_ext", diff --git a/Makefile b/Makefile index 478bca236..cbf5581be 100644 --- a/Makefile +++ b/Makefile @@ -145,6 +145,7 @@ NON_OSDK_CRATES := \ # and need to be built or tested with OSDK. OSDK_CRATES := \ osdk/deps/frame-allocator \ + osdk/deps/heap-allocator \ osdk/deps/test-kernel \ ostd \ ostd/libs/linux-bzimage/setup \ diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 58d494e2c..842b31020 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -20,6 +20,7 @@ aster-rights = { path = "libs/aster-rights" } component = { path = "libs/comp-sys/component" } controlled = { path = "libs/comp-sys/controlled" } osdk-frame-allocator = { path = "../osdk/deps/frame-allocator" } +osdk-heap-allocator = { path = "../osdk/deps/heap-allocator" } ostd = { path = "../ostd" } typeflags = { path = "libs/typeflags" } typeflags-util = { path = "libs/typeflags-util" } diff --git a/kernel/src/vm/mod.rs b/kernel/src/vm/mod.rs index d29e02503..ec7b1d2eb 100644 --- a/kernel/src/vm/mod.rs +++ b/kernel/src/vm/mod.rs @@ -17,6 +17,7 @@ //! as zero-cost capabilities. use osdk_frame_allocator::FrameAllocator; +use osdk_heap_allocator::{type_from_layout, HeapAllocator}; pub mod page_fault_handler; pub mod perms; @@ -27,6 +28,14 @@ pub mod vmo; #[ostd::global_frame_allocator] static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator; +#[ostd::global_heap_allocator] +static HEAP_ALLOCATOR: HeapAllocator = HeapAllocator; + +#[ostd::global_heap_allocator_slot_type_map] +const fn slot_type_from_layout(layout: core::alloc::Layout) -> Option { + type_from_layout(layout) +} + /// Total physical memory in the entire system in bytes. pub fn mem_total() -> usize { use ostd::boot::{boot_info, memory_region::MemoryRegionType}; diff --git a/osdk/deps/heap-allocator/Cargo.toml b/osdk/deps/heap-allocator/Cargo.toml new file mode 100644 index 000000000..0dc4494e8 --- /dev/null +++ b/osdk/deps/heap-allocator/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "osdk-heap-allocator" +version = "0.12.0" +edition = "2021" + +[dependencies] +log = "0.4" +ostd = { version = "0.12.0", path = "../../../ostd" } + +[lints] +workspace = true diff --git a/osdk/deps/heap-allocator/src/allocator.rs b/osdk/deps/heap-allocator/src/allocator.rs new file mode 100644 index 000000000..377afaae6 --- /dev/null +++ b/osdk/deps/heap-allocator/src/allocator.rs @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! A global allocator implementation of many slab caches. + +use core::{ + alloc::{AllocError, Layout}, + cell::RefCell, +}; + +use ostd::{ + cpu_local, + mm::{ + heap::{GlobalHeapAllocator, HeapSlot, SlabSlotList, SlotInfo}, + PAGE_SIZE, + }, + sync::{LocalIrqDisabled, SpinLock}, + trap, +}; + +use crate::slab_cache::SlabCache; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[repr(usize)] +enum CommonSizeClass { + Bytes8 = 8, + Bytes16 = 16, + Bytes32 = 32, + Bytes64 = 64, + Bytes128 = 128, + Bytes256 = 256, + Bytes512 = 512, + Bytes1024 = 1024, + Bytes2048 = 2048, +} + +impl CommonSizeClass { + const fn from_layout(layout: Layout) -> Option { + let size_class = match layout.size() { + 0..=8 => CommonSizeClass::Bytes8, + 9..=16 => CommonSizeClass::Bytes16, + 17..=32 => CommonSizeClass::Bytes32, + 33..=64 => CommonSizeClass::Bytes64, + 65..=128 => CommonSizeClass::Bytes128, + 129..=256 => CommonSizeClass::Bytes256, + 257..=512 => CommonSizeClass::Bytes512, + 513..=1024 => CommonSizeClass::Bytes1024, + 1025..=2048 => CommonSizeClass::Bytes2048, + _ => return None, + }; + // Alignment must be non-zero and power-of-two. + let align_class = match layout.align() { + 1 | 2 | 4 | 8 => CommonSizeClass::Bytes8, + 16 => CommonSizeClass::Bytes16, + 32 => CommonSizeClass::Bytes32, + 64 => CommonSizeClass::Bytes64, + 128 => CommonSizeClass::Bytes128, + 256 => CommonSizeClass::Bytes256, + 512 => CommonSizeClass::Bytes512, + 1024 => CommonSizeClass::Bytes1024, + 2048 => CommonSizeClass::Bytes2048, + _ => return None, + }; + Some(if (size_class as usize) < (align_class as usize) { + align_class + } else { + size_class + }) + } + + fn from_size(size: usize) -> Option { + match size { + 8 => Some(CommonSizeClass::Bytes8), + 16 => Some(CommonSizeClass::Bytes16), + 32 => Some(CommonSizeClass::Bytes32), + 64 => Some(CommonSizeClass::Bytes64), + 128 => Some(CommonSizeClass::Bytes128), + 256 => Some(CommonSizeClass::Bytes256), + 512 => Some(CommonSizeClass::Bytes512), + 1024 => Some(CommonSizeClass::Bytes1024), + 2048 => Some(CommonSizeClass::Bytes2048), + _ => None, + } + } +} + +/// Get the type of the slot from the layout. +/// +/// It should be used to define [`ostd::global_heap_allocator_slot_type_map`]. +pub const fn type_from_layout(layout: Layout) -> Option { + if let Some(class) = CommonSizeClass::from_layout(layout) { + return Some(SlotInfo::SlabSlot(class as usize)); + } + if layout.size() > PAGE_SIZE / 2 && layout.align() <= PAGE_SIZE { + return Some(SlotInfo::LargeSlot( + layout.size().div_ceil(PAGE_SIZE) * PAGE_SIZE, + )); + } + None +} + +struct Heap { + slab8: SlabCache<8>, + slab16: SlabCache<16>, + slab32: SlabCache<32>, + slab64: SlabCache<64>, + slab128: SlabCache<128>, + slab256: SlabCache<256>, + slab512: SlabCache<512>, + slab1024: SlabCache<1024>, + slab2048: SlabCache<2048>, +} + +impl Heap { + const fn new() -> Self { + Self { + slab8: SlabCache::new(), + slab16: SlabCache::new(), + slab32: SlabCache::new(), + slab64: SlabCache::new(), + slab128: SlabCache::new(), + slab256: SlabCache::new(), + slab512: SlabCache::new(), + slab1024: SlabCache::new(), + slab2048: SlabCache::new(), + } + } + + fn alloc(&mut self, class: CommonSizeClass) -> Result { + match class { + CommonSizeClass::Bytes8 => self.slab8.alloc(), + CommonSizeClass::Bytes16 => self.slab16.alloc(), + CommonSizeClass::Bytes32 => self.slab32.alloc(), + CommonSizeClass::Bytes64 => self.slab64.alloc(), + CommonSizeClass::Bytes128 => self.slab128.alloc(), + CommonSizeClass::Bytes256 => self.slab256.alloc(), + CommonSizeClass::Bytes512 => self.slab512.alloc(), + CommonSizeClass::Bytes1024 => self.slab1024.alloc(), + CommonSizeClass::Bytes2048 => self.slab2048.alloc(), + } + } + + fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> { + match class { + CommonSizeClass::Bytes8 => self.slab8.dealloc(slot), + CommonSizeClass::Bytes16 => self.slab16.dealloc(slot), + CommonSizeClass::Bytes32 => self.slab32.dealloc(slot), + CommonSizeClass::Bytes64 => self.slab64.dealloc(slot), + CommonSizeClass::Bytes128 => self.slab128.dealloc(slot), + CommonSizeClass::Bytes256 => self.slab256.dealloc(slot), + CommonSizeClass::Bytes512 => self.slab512.dealloc(slot), + CommonSizeClass::Bytes1024 => self.slab1024.dealloc(slot), + CommonSizeClass::Bytes2048 => self.slab2048.dealloc(slot), + } + } +} + +static GLOBAL_POOL: SpinLock = SpinLock::new(Heap::new()); + +/// The maximum size in bytes of the object cache of each slot size class. +const OBJ_CACHE_MAX_SIZE: usize = 8 * PAGE_SIZE; +/// The expected size in bytes of the object cache of each slot size class. +/// +/// If the cache exceeds the maximum size or is empty, it will be adjusted to +/// this size. +const OBJ_CACHE_EXPECTED_SIZE: usize = 2 * PAGE_SIZE; + +struct ObjectCache { + list: SlabSlotList, + list_size: usize, +} + +impl ObjectCache { + const fn new() -> Self { + Self { + list: SlabSlotList::new(), + list_size: 0, + } + } + + fn alloc(&mut self) -> Result { + if let Some(slot) = self.list.pop() { + self.list_size -= SLOT_SIZE; + return Ok(slot); + } + + let size_class = CommonSizeClass::from_size(SLOT_SIZE).unwrap(); + let mut global_pool = GLOBAL_POOL.lock(); + for _ in 0..OBJ_CACHE_EXPECTED_SIZE / SLOT_SIZE { + if let Ok(slot) = global_pool.alloc(size_class) { + self.list.push(slot); + self.list_size += SLOT_SIZE; + } else { + break; + } + } + + if let Ok(new_slot) = global_pool.alloc(size_class) { + Ok(new_slot) + } else if let Some(popped) = self.list.pop() { + self.list_size -= SLOT_SIZE; + Ok(popped) + } else { + Err(AllocError) + } + } + + fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> { + if self.list_size + SLOT_SIZE < OBJ_CACHE_MAX_SIZE { + self.list.push(slot); + self.list_size += SLOT_SIZE; + return Ok(()); + } + + let mut global_pool = GLOBAL_POOL.lock(); + global_pool.dealloc(slot, class)?; + for _ in 0..(self.list_size - OBJ_CACHE_EXPECTED_SIZE) / SLOT_SIZE { + let slot = self.list.pop().expect("The cache size should be ample"); + global_pool.dealloc(slot, class)?; + self.list_size -= SLOT_SIZE; + } + + Ok(()) + } +} + +struct LocalCache { + cache8: ObjectCache<8>, + cache16: ObjectCache<16>, + cache32: ObjectCache<32>, + cache64: ObjectCache<64>, + cache128: ObjectCache<128>, + cache256: ObjectCache<256>, + cache512: ObjectCache<512>, + cache1024: ObjectCache<1024>, + cache2048: ObjectCache<2048>, +} + +impl LocalCache { + const fn new() -> Self { + Self { + cache8: ObjectCache::new(), + cache16: ObjectCache::new(), + cache32: ObjectCache::new(), + cache64: ObjectCache::new(), + cache128: ObjectCache::new(), + cache256: ObjectCache::new(), + cache512: ObjectCache::new(), + cache1024: ObjectCache::new(), + cache2048: ObjectCache::new(), + } + } + + fn alloc(&mut self, class: CommonSizeClass) -> Result { + match class { + CommonSizeClass::Bytes8 => self.cache8.alloc(), + CommonSizeClass::Bytes16 => self.cache16.alloc(), + CommonSizeClass::Bytes32 => self.cache32.alloc(), + CommonSizeClass::Bytes64 => self.cache64.alloc(), + CommonSizeClass::Bytes128 => self.cache128.alloc(), + CommonSizeClass::Bytes256 => self.cache256.alloc(), + CommonSizeClass::Bytes512 => self.cache512.alloc(), + CommonSizeClass::Bytes1024 => self.cache1024.alloc(), + CommonSizeClass::Bytes2048 => self.cache2048.alloc(), + } + } + + fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> { + match class { + CommonSizeClass::Bytes8 => self.cache8.dealloc(slot, class), + CommonSizeClass::Bytes16 => self.cache16.dealloc(slot, class), + CommonSizeClass::Bytes32 => self.cache32.dealloc(slot, class), + CommonSizeClass::Bytes64 => self.cache64.dealloc(slot, class), + CommonSizeClass::Bytes128 => self.cache128.dealloc(slot, class), + CommonSizeClass::Bytes256 => self.cache256.dealloc(slot, class), + CommonSizeClass::Bytes512 => self.cache512.dealloc(slot, class), + CommonSizeClass::Bytes1024 => self.cache1024.dealloc(slot, class), + CommonSizeClass::Bytes2048 => self.cache2048.dealloc(slot, class), + } + } +} + +cpu_local! { + static LOCAL_POOL: RefCell = RefCell::new(LocalCache::new()); +} + +/// The global heap allocator provided by OSDK. +/// +/// It is a singleton that provides heap allocation for the kernel. If +/// multiple instances of this struct are created, all the member functions +/// will eventually access the same allocator. +pub struct HeapAllocator; + +impl GlobalHeapAllocator for HeapAllocator { + fn alloc(&self, layout: Layout) -> Result { + let Some(class) = CommonSizeClass::from_layout(layout) else { + return HeapSlot::alloc_large(layout.size().div_ceil(PAGE_SIZE) * PAGE_SIZE); + }; + + let irq_guard = trap::disable_local(); + let this_cache = LOCAL_POOL.get_with(&irq_guard); + let mut local_cache = this_cache.borrow_mut(); + + local_cache.alloc(class) + } + + fn dealloc(&self, slot: HeapSlot) -> Result<(), AllocError> { + let Some(class) = CommonSizeClass::from_size(slot.size()) else { + slot.dealloc_large(); + return Ok(()); + }; + + let irq_guard = trap::disable_local(); + let this_cache = LOCAL_POOL.get_with(&irq_guard); + let mut local_cache = this_cache.borrow_mut(); + + local_cache.dealloc(slot, class) + } +} diff --git a/osdk/deps/heap-allocator/src/lib.rs b/osdk/deps/heap-allocator/src/lib.rs new file mode 100644 index 000000000..8463f80d1 --- /dev/null +++ b/osdk/deps/heap-allocator/src/lib.rs @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MPL-2.0 + +#![feature(allocator_api)] +#![no_std] +#![deny(unsafe_code)] + +mod allocator; +mod slab_cache; + +pub use allocator::{type_from_layout, HeapAllocator}; diff --git a/osdk/deps/heap-allocator/src/slab_cache.rs b/osdk/deps/heap-allocator/src/slab_cache.rs new file mode 100644 index 000000000..3a4742f93 --- /dev/null +++ b/osdk/deps/heap-allocator/src/slab_cache.rs @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! The slab cache that is composed of slabs. + +use core::alloc::AllocError; + +use ostd::mm::{ + frame::linked_list::LinkedList, + heap::{HeapSlot, Slab, SlabMeta}, + Paddr, PAGE_SIZE, +}; + +const EXPECTED_EMPTY_SLABS: usize = 4; +const MAX_EMPTY_SLABS: usize = 16; + +/// A slab cache. +/// +/// A slab cache contains 3 parts: +/// - a list of empty slabs; +/// - a list of partially allocated slabs; +/// - and a list of full slabs. +/// +/// So the cache is partially sorted, to allow caching and reusing memory. +pub struct SlabCache { + empty: LinkedList>, + partial: LinkedList>, + full: LinkedList>, +} + +impl SlabCache { + /// Creates a new slab cache. + pub const fn new() -> Self { + Self { + empty: LinkedList::new(), + partial: LinkedList::new(), + full: LinkedList::new(), + } + } + + /// Allocates a slot from the cache. + /// + /// The caller must provide which cache is it because we don't know from + /// `&mut self`. The information is used for deallocation. + pub fn alloc(&mut self) -> Result { + // Try to allocate from the partial slabs first. + if !self.partial.is_empty() { + let mut cursor = self.partial.cursor_back_mut(); + let current = cursor.current_meta().unwrap(); + let allocated = current.alloc().unwrap(); + if current.nr_allocated() == current.capacity() { + self.full.push_front(cursor.take_current().unwrap()); + } + return Ok(allocated); + } + + // If no partial slab is available, try to get an empty slab. + if !self.empty.is_empty() { + let mut slab = self.empty.pop_front().unwrap(); + let allocated = slab.meta_mut().alloc().unwrap(); + self.add_slab(slab); + return Ok(allocated); + } + + // If no empty slab is available, allocate new slabs. + let Ok(mut allocated_empty) = Slab::new() else { + log::error!("Failed to allocate a new slab"); + return Err(AllocError); + }; + let allocated = allocated_empty.meta_mut().alloc().unwrap(); + self.add_slab(allocated_empty); + + // Allocate more empty slabs and push them into the cache. + for _ in 0..EXPECTED_EMPTY_SLABS { + if let Ok(allocated_empty) = Slab::new() { + self.empty.push_front(allocated_empty); + } else { + break; + } + } + + Ok(allocated) + } + + /// Deallocates a slot into the cache. + /// + /// The slot must be allocated from the cache. + pub fn dealloc(&mut self, slot: HeapSlot) -> Result<(), AllocError> { + let which = which_slab(&slot).ok_or_else(|| { + log::error!("Can't find the slab for the slot"); + AllocError + })?; + + let mut extracted_slab = None; + + if self.partial.contains(which) { + extracted_slab = self.partial.cursor_mut_at(which).unwrap().take_current(); + } else if self.full.contains(which) { + extracted_slab = self.full.cursor_mut_at(which).unwrap().take_current(); + } + + let mut slab = extracted_slab.ok_or_else(|| { + log::error!("Deallocating a slot that is not allocated from the cache"); + AllocError + })?; + + slab.dealloc(slot)?; + + self.add_slab(slab); + + // If the slab cache has too many empty slabs, free some of them. + if self.empty.size() > MAX_EMPTY_SLABS { + while self.empty.size() > EXPECTED_EMPTY_SLABS { + self.empty.pop_front(); + } + } + + Ok(()) + } + + fn add_slab(&mut self, slab: Slab) { + if slab.meta().nr_allocated() == slab.meta().capacity() { + self.full.push_front(slab); + } else if slab.meta().nr_allocated() > 0 { + self.partial.push_back(slab); + } else { + self.empty.push_front(slab); + } + } +} + +/// Gets which slab the slot belongs to. +/// +/// If the slot size is larger than [`PAGE_SIZE`], it is not from a slab +/// and this function will return `None`. +/// +/// `SLOT_SIZE` can be larger than `slot.size()` but not smaller. +fn which_slab(slot: &HeapSlot) -> Option { + if slot.size() > PAGE_SIZE { + return None; + } + + let frame_paddr = slot.paddr() / PAGE_SIZE * PAGE_SIZE; + Some(frame_paddr) +} diff --git a/osdk/src/base_crate/main.rs.template b/osdk/src/base_crate/main.rs.template index fe31d285c..73f68a4bd 100644 --- a/osdk/src/base_crate/main.rs.template +++ b/osdk/src/base_crate/main.rs.template @@ -13,11 +13,31 @@ fn panic(info: &core::panic::PanicInfo) -> ! { unsafe { __ostd_panic_handler(info); } } -use ostd::mm::frame::GlobalFrameAllocator; +mod default_frame_allocator { + use ostd::mm::frame::GlobalFrameAllocator; -use osdk_frame_allocator::FrameAllocator; -static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator; + use osdk_frame_allocator::FrameAllocator; + static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator; -#[no_mangle] -#[linkage = "weak"] -static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator = &FRAME_ALLOCATOR; + #[no_mangle] + #[linkage = "weak"] + static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator = &FRAME_ALLOCATOR; +} + +mod default_heap_allocator { + use ostd::mm::heap::GlobalHeapAllocator; + + use osdk_heap_allocator::{HeapAllocator, type_from_layout}; + static HEAP_ALLOCATOR: HeapAllocator = HeapAllocator; + + #[no_mangle] + #[linkage = "weak"] + static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn GlobalHeapAllocator = &HEAP_ALLOCATOR; + + #[no_mangle] + #[linkage = "weak"] + #[expect(non_snake_case)] + fn __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout: core::alloc::Layout) -> Option { + type_from_layout(layout) + } +} diff --git a/osdk/src/base_crate/mod.rs b/osdk/src/base_crate/mod.rs index 4d32913f9..6918c8ab6 100644 --- a/osdk/src/base_crate/mod.rs +++ b/osdk/src/base_crate/mod.rs @@ -238,6 +238,12 @@ fn add_manifest_dependency( Path::new("deps").join("frame-allocator"), ); + add_manifest_dependency_to( + dependencies, + "osdk-heap-allocator", + Path::new("deps").join("heap-allocator"), + ); + add_manifest_dependency_to(dependencies, "ostd", Path::new("..").join("ostd")); let content = toml::to_string(&manifest).unwrap(); diff --git a/ostd/libs/ostd-macros/src/lib.rs b/ostd/libs/ostd-macros/src/lib.rs index 68173d22a..4758f31be 100644 --- a/ostd/libs/ostd-macros/src/lib.rs +++ b/ostd/libs/ostd-macros/src/lib.rs @@ -68,11 +68,12 @@ pub fn test_main(_attr: TokenStream, item: TokenStream) -> TokenStream { /// A macro attribute for the global frame allocator. /// /// The attributed static variable will be used to provide frame allocation -/// for the kernel. The variable should have type `ostd::mm::GlobalFrameAllocator`. +/// for the kernel. /// /// # Example /// /// ```ignore +/// use core::alloc::Layout; /// use ostd::{mm::{frame::GlobalFrameAllocator, Paddr}, global_frame_allocator}; /// /// // Of course it won't work because all allocations will fail. @@ -102,6 +103,78 @@ pub fn global_frame_allocator(_attr: TokenStream, item: TokenStream) -> TokenStr .into() } +/// A macro attribute to register the global heap allocator. +/// +/// The attributed static variable will be used to provide heap allocation +/// for the kernel. +/// +/// This attribute is not to be confused with Rust's built-in +/// [`global_allocator`] attribute, which applies to a static variable +/// implementing the unsafe `GlobalAlloc` trait. In contrast, the +/// [`global_heap_allocator`] attribute does not require the heap allocator to +/// implement an unsafe trait. [`global_heap_allocator`] eventually relies on +/// [`global_allocator`] to customize Rust's heap allocator. +/// +/// # Example +/// +/// ```ignore +/// use core::alloc::{AllocError, Layout}; +/// use ostd::{mm::heap::{GlobalHeapAllocator, HeapSlot}, global_heap_allocator}; +/// +/// // Of course it won't work and all allocations will fail. +/// // It's just an example. +/// #[global_heap_allocator] +/// static ALLOCATOR: MyHeapAllocator = MyHeapAllocator; +/// +/// struct MyHeapAllocator; +/// +/// impl GlobalHeapAllocator for MyHeapAllocator { +/// fn alloc(&self, _layout: Layout) -> Result { None } +/// fn dealloc(&self, _slot: HeapSlot) -> Result<(), AllocError> {} +/// } +/// ``` +#[proc_macro_attribute] +pub fn global_heap_allocator(_attr: TokenStream, item: TokenStream) -> TokenStream { + // Make a `static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn GlobalHeapAllocator` + // That points to the annotated static variable. + let item = parse_macro_input!(item as syn::ItemStatic); + let static_name = &item.ident; + + quote!( + #[no_mangle] + static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn ostd::mm::heap::GlobalHeapAllocator = &#static_name; + #item + ) + .into() +} + +/// A macro attribute to provide the heap slot type given the layout. +/// +/// The users must decide the size and the type of the heap slot to serve an +/// allocation with the layout. The function should return `None` if the layout +/// is not supported. +/// +/// The annotated function should be idempotent, i.e., the result should be the +/// same for the same layout. OSDK enforces this by only allowing the function +/// to be `const`. +#[proc_macro_attribute] +pub fn global_heap_allocator_slot_type_map(_attr: TokenStream, item: TokenStream) -> TokenStream { + // Rewrite the input `const fn __any_name__(layout: Layout) -> Option { ... }` to + // `const extern "Rust" fn __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout: Layout) -> Option { ... }`. + // Reject if the input is not a `const fn`. + let item = parse_macro_input!(item as syn::ItemFn); + assert!( + item.sig.constness.is_some(), + "the annotated function must be `const`" + ); + + quote!( + #[export_name = "__GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT"] + #item + ) + .into() +} + /// A macro attribute for the panic handler. /// /// The attributed function will be used to override OSTD's default diff --git a/ostd/src/lib.rs b/ostd/src/lib.rs index 44a1c10c5..e0c879efd 100644 --- a/ostd/src/lib.rs +++ b/ostd/src/lib.rs @@ -47,7 +47,10 @@ mod util; use core::sync::atomic::{AtomicBool, Ordering}; -pub use ostd_macros::{global_frame_allocator, main, panic_handler}; +pub use ostd_macros::{ + global_frame_allocator, global_heap_allocator, global_heap_allocator_slot_type_map, main, + panic_handler, +}; pub use ostd_pod::Pod; pub use self::{error::Error, prelude::Result}; @@ -98,9 +101,6 @@ unsafe fn init() { mm::kspace::init_kernel_page_table(meta_pages); - // SAFETY: This function is called only once and only on the BSP. - unsafe { mm::heap_allocator::init() }; - crate::sync::init(); boot::init_after_heap(); diff --git a/ostd/src/mm/frame/segment.rs b/ostd/src/mm/frame/segment.rs index 038c924a1..6bb2bed3f 100644 --- a/ostd/src/mm/frame/segment.rs +++ b/ostd/src/mm/frame/segment.rs @@ -107,6 +107,20 @@ impl Segment { } Ok(segment) } + + /// Restores the [`Segment`] from the raw physical address range. + /// + /// # Safety + /// + /// The range must be a forgotten [`Segment`] that matches the type `M`. + /// It could be manually forgotten by [`core::mem::forget`], + /// [`ManuallyDrop`], or [`Self::into_raw`]. + pub(crate) unsafe fn from_raw(range: Range) -> Self { + Self { + range, + _marker: core::marker::PhantomData, + } + } } impl Segment { @@ -180,6 +194,13 @@ impl Segment { _marker: core::marker::PhantomData, } } + + /// Forgets the [`Segment`] and gets a raw range of physical addresses. + pub(crate) fn into_raw(self) -> Range { + let range = self.range.clone(); + let _ = ManuallyDrop::new(self); + range + } } impl From> for Segment { diff --git a/ostd/src/mm/heap/mod.rs b/ostd/src/mm/heap/mod.rs new file mode 100644 index 000000000..55fa6b39a --- /dev/null +++ b/ostd/src/mm/heap/mod.rs @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Manages the kernel heap using slab or buddy allocation strategies. + +use core::{ + alloc::{AllocError, GlobalAlloc, Layout}, + ptr::NonNull, +}; + +use crate::mm::Vaddr; + +mod slab; +mod slot; +mod slot_list; + +pub use self::{ + slab::{SharedSlab, Slab, SlabMeta}, + slot::{HeapSlot, SlotInfo}, + slot_list::SlabSlotList, +}; + +/// The trait for the global heap allocator. +/// +/// By providing the slab ([`Slab`]) and heap slot ([`HeapSlot`]) +/// mechanisms, OSTD allows users to implement their own kernel heap in a safe +/// manner, as an alternative to the unsafe [`core::alloc::GlobalAlloc`]. +/// +/// To provide the global heap allocator, use [`crate::global_heap_allocator`] +/// to mark a static variable that implements this trait. Use +/// [`crate::global_heap_allocator_slot_type_map`] to specify the sizes of +/// slots for different layouts. This latter restriction may be lifted in the +/// future. +pub trait GlobalHeapAllocator: Sync { + /// Allocates a [`HeapSlot`] according to the layout. + /// + /// OSTD calls this method to allocate memory from the global heap. + /// + /// The returned [`HeapSlot`] must be valid for the layout, i.e., the size + /// must be at least the size of the layout and the alignment must be at + /// least the alignment of the layout. Furthermore, the size of the + /// returned [`HeapSlot`] must match the size returned by the function + /// marked with [`crate::global_heap_allocator_slot_type_map`]. + fn alloc(&self, layout: Layout) -> Result; + + /// Deallocates a [`HeapSlot`]. + /// + /// OSTD calls this method to deallocate memory back to the global heap. + /// + /// Each deallocation must correspond to exactly one previous allocation. The provided + /// [`HeapSlot`] must match the one returned from the original allocation. + fn dealloc(&self, slot: HeapSlot) -> Result<(), AllocError>; +} + +extern "Rust" { + /// The reference to the global heap allocator generated by the + /// [`crate::global_heap_allocator`] attribute. + static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn GlobalHeapAllocator; + + /// Gets the size and type of the heap slot to serve an allocation. + /// See [`crate::global_heap_allocator_slot_type_map`]. + fn __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout: Layout) -> Option; +} + +/// Gets the reference to the user-defined global heap allocator. +fn get_global_heap_allocator() -> &'static dyn GlobalHeapAllocator { + // SAFETY: This up-call is redirected safely to Rust code by OSDK. + unsafe { __GLOBAL_HEAP_ALLOCATOR_REF } +} + +/// Gets the size and type of the heap slot to serve an allocation. +/// +/// This function is defined by the OSTD user and should be idempotent, +/// as we require it to be implemented as a `const fn`. +/// +/// See [`crate::global_heap_allocator_slot_type_map`]. +fn slot_size_from_layout(layout: Layout) -> Option { + // SAFETY: This up-call is redirected safely to Rust code by OSDK. + unsafe { __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout) } +} + +macro_rules! abort_with_message { + ($($arg:tt)*) => { + log::error!($($arg)*); + crate::panic::abort(); + }; +} + +#[alloc_error_handler] +fn handle_alloc_error(layout: core::alloc::Layout) -> ! { + abort_with_message!("Heap allocation error, layout = {:#x?}", layout); +} + +#[global_allocator] +static HEAP_ALLOCATOR: AllocDispatch = AllocDispatch; + +struct AllocDispatch; + +// TODO: Somehow restrict unwinding in the user-provided global allocator. +// Panicking should be fine, but we shouldn't unwind on panics. +unsafe impl GlobalAlloc for AllocDispatch { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let Some(required_slot) = slot_size_from_layout(layout) else { + abort_with_message!("Heap allocation size not found for layout = {:#x?}", layout); + }; + + let res = get_global_heap_allocator().alloc(layout); + let Ok(slot) = res else { + return core::ptr::null_mut(); + }; + + if required_slot.size() != slot.size() + || slot.size() < layout.size() + || slot.as_ptr() as Vaddr % layout.align() != 0 + { + abort_with_message!( + "Heap allocation mismatch: slot ptr = {:p}, size = {:x}; layout = {:#x?}; required_slot = {:#x?}", + slot.as_ptr(), + slot.size(), + layout, + required_slot, + ); + } + + slot.as_ptr() + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + // Now we restore the `HeapSlot` from the pointer and the layout. + let Some(required_slot) = slot_size_from_layout(layout) else { + abort_with_message!( + "Heap deallocation size not found for layout = {:#x?}", + layout + ); + }; + + // SAFETY: The validity of the pointer is guaranteed by the caller. The + // size must match the size of the slot when it was allocated, since we + // require `slot_size_from_layout` to be idempotent. + let slot = unsafe { HeapSlot::new(NonNull::new_unchecked(ptr), required_slot) }; + let res = get_global_heap_allocator().dealloc(slot); + + if res.is_err() { + abort_with_message!( + "Heap deallocation error, ptr = {:p}, layout = {:#x?}, required_slot = {:#x?}", + ptr, + layout, + required_slot, + ); + } + } +} diff --git a/ostd/src/mm/heap/slab.rs b/ostd/src/mm/heap/slab.rs new file mode 100644 index 000000000..1d3155e72 --- /dev/null +++ b/ostd/src/mm/heap/slab.rs @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Slabs for implementing the slab allocator. + +use core::{alloc::AllocError, ptr::NonNull}; + +use super::{slot::HeapSlot, slot_list::SlabSlotList}; +use crate::mm::{ + frame::{linked_list::Link, meta::AnyFrameMeta}, + paddr_to_vaddr, Frame, FrameAllocOptions, UniqueFrame, PAGE_SIZE, +}; + +/// A slab. +/// +/// The slot size is the maximum size and alignment of the objects that can be +/// allocated from the slab. The slab is divided into slots of this size. +/// +/// The size of the slot cannot be smaller than the size of [`usize`] and must +/// be a power of two. The size of the slab should be larger than the slot +/// size and [`PAGE_SIZE`]. +/// +/// The `SLOT_SIZE` is the size of the slot in bytes. It must be smaller than or +/// equal to [`PAGE_SIZE`]. This restriction may be lifted in the future. +pub type Slab = UniqueFrame>>; + +/// A shared pointer to a slab. +/// +/// It is solely useful to point to a slab from a stray slot. When an object of +/// this type exists no mutable references can be created to the slab. So don't +/// hold it for long. +pub type SharedSlab = Frame>>; + +/// Frame metadata of a slab. +/// +/// Each slab is backed by a [`UniqueFrame`]. +#[derive(Debug)] +pub struct SlabMeta { + /// The list of free slots inside the slab. + /// + /// Slots not inside the slab should not be in the list. + free_list: SlabSlotList, + + /// The number of allocated slots in the slab. + /// + /// Even if a slot is free, as long as it does not stay in the + /// [`Self::free_list`], it is considered allocated. + nr_allocated: u16, +} + +unsafe impl Send for SlabMeta {} +unsafe impl Sync for SlabMeta {} + +unsafe impl AnyFrameMeta for SlabMeta { + fn on_drop(&mut self, _reader: &mut crate::mm::VmReader) { + if self.nr_allocated != 0 { + // FIXME: We have no mechanisms to forget the slab once we are here, + // so we require the user to deallocate all slots before dropping. + panic!("{} slots allocated when dropping a slab", self.nr_allocated); + } + } + + fn is_untyped(&self) -> bool { + false + } +} + +impl SlabMeta { + /// Gets the capacity of the slab (regardless of the number of allocated slots). + pub const fn capacity(&self) -> u16 { + (PAGE_SIZE / SLOT_SIZE) as u16 + } + + /// Gets the number of allocated slots. + pub fn nr_allocated(&self) -> u16 { + self.nr_allocated + } + + /// Allocates a slot from the slab. + pub fn alloc(&mut self) -> Result { + let Some(allocated) = self.free_list.pop() else { + log::error!("Allocating a slot from a full slab"); + return Err(AllocError); + }; + self.nr_allocated += 1; + Ok(allocated) + } +} + +impl Slab { + /// Allocates a new slab of the given size. + /// + /// If the size is less than `SLOT_SIZE` or [`PAGE_SIZE`], the size will be + /// the maximum of the two. + pub fn new() -> crate::prelude::Result { + const { assert!(SLOT_SIZE <= PAGE_SIZE) }; + // To ensure we can store a pointer in each slot. + const { assert!(SLOT_SIZE >= core::mem::size_of::()) }; + // To ensure `nr_allocated` can be stored in a `u16`. + const { assert!(PAGE_SIZE / SLOT_SIZE <= u16::MAX as usize) }; + + let mut slab: Slab = FrameAllocOptions::new() + .zeroed(false) + .alloc_frame_with(Link::new(SlabMeta:: { + free_list: SlabSlotList::new(), + nr_allocated: 0, + }))? + .try_into() + .unwrap(); + + let head_paddr = slab.start_paddr(); + let head_vaddr = paddr_to_vaddr(head_paddr); + + // Push each slot to the free list. + for slot_offset in (0..PAGE_SIZE).step_by(SLOT_SIZE) { + // SAFETY: The slot is within the slab so it can't be NULL. + let slot_ptr = unsafe { NonNull::new_unchecked((head_vaddr + slot_offset) as *mut u8) }; + // SAFETY: The slot is newly allocated in the slab. + slab.meta_mut() + .free_list + .push(unsafe { HeapSlot::new(slot_ptr, super::SlotInfo::SlabSlot(SLOT_SIZE)) }); + } + + Ok(slab) + } + + /// Deallocates a slot to the slab. + /// + /// If the slot does not belong to the slab it returns [`AllocError`]. + pub fn dealloc(&mut self, slot: HeapSlot) -> Result<(), AllocError> { + if !(self.start_paddr()..self.start_paddr() + self.size()).contains(&slot.paddr()) { + log::error!("Deallocating a slot to a slab that does not own the slot"); + return Err(AllocError); + } + debug_assert_eq!(slot.size(), SLOT_SIZE); + self.meta_mut().free_list.push(slot); + self.meta_mut().nr_allocated -= 1; + + Ok(()) + } +} diff --git a/ostd/src/mm/heap/slot.rs b/ostd/src/mm/heap/slot.rs new file mode 100644 index 000000000..1893ee9cd --- /dev/null +++ b/ostd/src/mm/heap/slot.rs @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Heap slots for allocations. + +use core::{alloc::AllocError, ptr::NonNull}; + +use crate::{ + impl_frame_meta_for, + mm::{ + kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, FrameAllocOptions, Paddr, Segment, + Vaddr, PAGE_SIZE, + }, +}; + +/// A slot that will become or has been turned from a heap allocation. +/// +/// Heap slots can come from [`Slab`] or directly from a typed [`Segment`]. +/// +/// Heap slots can be used to fulfill heap allocations requested by the allocator. +/// Upon deallocation, the deallocated memory also becomes a heap slot. +/// +/// The size of the heap slot must match the slot size of the [`Slab`] or the +/// size of the [`Segment`]. +/// +/// [`Slab`]: super::Slab +pub struct HeapSlot { + /// The address of the slot. + addr: NonNull, + /// The type and size of the slot. + info: SlotInfo, +} + +/// The type and size of the heap slot that should be used for the allocation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SlotInfo { + /// The slot is from a [`super::Slab`]. + /// + /// The size of the slot and the corresponding slab are provided. + /// Both values are identical. + SlabSlot(usize), + /// The slot is from a [`Segment`]. + /// + /// The size of the slot and the corresponding segment are provided. + /// Both values are identical. + LargeSlot(usize), +} + +impl SlotInfo { + /// Gets the size of the slot. + pub fn size(&self) -> usize { + match self { + Self::SlabSlot(size) => *size, + Self::LargeSlot(size) => *size, + } + } +} + +impl HeapSlot { + /// Creates a new pointer to a heap slot. + /// + /// # Safety + /// + /// The pointer to the slot must either: + /// - be a free slot in a [`super::Slab`], or + /// - be a free slot in a [`Segment`]. + /// + /// If the pointer is from a [`super::Slab`] or [`Segment`], the slot must + /// have a size that matches the slot size of the slab or segment respectively. + pub(super) unsafe fn new(addr: NonNull, info: SlotInfo) -> Self { + Self { addr, info } + } + + /// Allocates a large slot. + /// + /// This function allocates in units of [`PAGE_SIZE`] bytes. + /// + /// This function returns an error if the frame allocation fails. + /// + /// # Panics + /// + /// This function panics if the size is not a multiple of [`PAGE_SIZE`]. + pub fn alloc_large(size: usize) -> Result { + assert_eq!(size % PAGE_SIZE, 0); + let nframes = size / PAGE_SIZE; + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment_with(nframes, |_| LargeAllocFrameMeta) + .map_err(|_| { + log::error!("Failed to allocate a large slot"); + AllocError + })?; + + let paddr_range = segment.into_raw(); + let vaddr = paddr_to_vaddr(paddr_range.start); + + Ok(Self { + addr: NonNull::new(vaddr as *mut u8).unwrap(), + info: SlotInfo::LargeSlot(size), + }) + } + + /// Deallocates a large slot. + /// + /// # Panics + /// + /// This function aborts if the slot was not allocated with + /// [`HeapSlot::alloc_large`], as it requires specific memory management + /// operations that only apply to large slots. + pub fn dealloc_large(self) { + let SlotInfo::LargeSlot(size) = self.info else { + log::error!( + "Deallocating a large slot that was not allocated with `HeapSlot::alloc_large`" + ); + crate::panic::abort(); + }; + + debug_assert_eq!(size % PAGE_SIZE, 0); + debug_assert_eq!(self.paddr() % PAGE_SIZE, 0); + let nframes = size / PAGE_SIZE; + let range = self.paddr()..self.paddr() + nframes; + + // SAFETY: The segment was once forgotten when allocated. + drop(unsafe { Segment::::from_raw(range) }); + } + + /// Gets the physical address of the slot. + pub fn paddr(&self) -> Paddr { + self.addr.as_ptr() as Vaddr - LINEAR_MAPPING_BASE_VADDR + } + + /// Gets the size of the slot. + pub fn size(&self) -> usize { + match self.info { + SlotInfo::SlabSlot(size) => size, + SlotInfo::LargeSlot(size) => size, + } + } + + /// Gets the type and size of the slot. + pub fn info(&self) -> SlotInfo { + self.info + } + + /// Gets the pointer to the slot. + pub fn as_ptr(&self) -> *mut u8 { + self.addr.as_ptr() + } +} + +/// The frames allocated for a large allocation. +#[derive(Debug)] +pub struct LargeAllocFrameMeta; + +impl_frame_meta_for!(LargeAllocFrameMeta); diff --git a/ostd/src/mm/heap/slot_list.rs b/ostd/src/mm/heap/slot_list.rs new file mode 100644 index 000000000..22818af14 --- /dev/null +++ b/ostd/src/mm/heap/slot_list.rs @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Implementation of the free heap slot list. + +use core::ptr::NonNull; + +use super::HeapSlot; + +/// A singly-linked list of [`HeapSlot`]s from [`super::Slab`]s. +/// +/// The slots inside this list will have a size of `SLOT_SIZE`. They can come +/// from different slabs. +#[derive(Debug)] +pub struct SlabSlotList { + /// The head of the list. + head: Option>, +} + +impl Default for SlabSlotList { + fn default() -> Self { + Self::new() + } +} + +impl SlabSlotList { + /// Creates a new empty list. + pub const fn new() -> Self { + Self { head: None } + } + + /// Pushes a slot to the front of the list. + /// + /// # Panics + /// + /// Panics if + /// - the slot does not come from a slab + /// (i.e., `!matches(slot.info(), SlotInfo::SlabSlot(_))`); + /// - the size of the slot does not match `SLOT_SIZE`. + pub fn push(&mut self, slot: HeapSlot) { + let slot_ptr = slot.as_ptr(); + let super::SlotInfo::SlabSlot(slot_size) = slot.info() else { + panic!("The slot does not come from a slab"); + }; + + assert_eq!(slot_size, SLOT_SIZE); + const { assert!(SLOT_SIZE >= core::mem::size_of::()) }; + + let original_head = self.head; + + debug_assert!(!slot_ptr.is_null()); + // SAFETY: A pointer to a slot must not be NULL; + self.head = Some(unsafe { NonNull::new_unchecked(slot_ptr) }); + // Write the original head to the slot. + // SAFETY: A heap slot must be free so the pointer to the slot can be + // written to. The slot size is at least the size of a pointer. + unsafe { + slot_ptr + .cast::() + .write(original_head.map_or(0, |h| h.as_ptr() as usize)); + } + } + + /// Pops a slot from the front of the list. + /// + /// It returns `None` if the list is empty. + pub fn pop(&mut self) -> Option { + let original_head = self.head?; + + // SAFETY: The head is a valid pointer to a free slot. + // The slot contains a pointer to the next slot. + let next = unsafe { original_head.as_ptr().cast::().read() } as *mut u8; + + self.head = if next.is_null() { + None + } else { + // SAFETY: We already verified that the next slot is not NULL. + Some(unsafe { NonNull::new_unchecked(next) }) + }; + + Some(unsafe { HeapSlot::new(original_head, super::SlotInfo::SlabSlot(SLOT_SIZE)) }) + } +} diff --git a/ostd/src/mm/heap_allocator/mod.rs b/ostd/src/mm/heap_allocator/mod.rs deleted file mode 100644 index 69839d2fa..000000000 --- a/ostd/src/mm/heap_allocator/mod.rs +++ /dev/null @@ -1,174 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -mod slab_allocator; - -use core::{ - alloc::{GlobalAlloc, Layout}, - mem::ManuallyDrop, -}; - -use align_ext::AlignExt; -use log::debug; -use slab_allocator::Heap; -use spin::Once; - -use super::paddr_to_vaddr; -use crate::{ - impl_frame_meta_for, - mm::{FrameAllocOptions, PAGE_SIZE}, - prelude::*, - sync::SpinLock, - trap::disable_local, -}; - -#[global_allocator] -static HEAP_ALLOCATOR: LockedHeapWithRescue = LockedHeapWithRescue::new(); - -#[alloc_error_handler] -pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! { - panic!("Heap allocation error, layout = {:?}", layout); -} - -const INIT_KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256; - -#[repr(align(4096))] -struct InitHeapSpace([u8; INIT_KERNEL_HEAP_SIZE]); - -/// Initialize the heap allocator. -/// -/// # Safety -/// -/// This function should be called only once. -pub unsafe fn init() { - static mut HEAP_SPACE: InitHeapSpace = InitHeapSpace([0; INIT_KERNEL_HEAP_SIZE]); - // SAFETY: The HEAP_SPACE is a static memory range, so it's always valid. - unsafe { - #[expect(static_mut_refs)] - HEAP_ALLOCATOR.init(HEAP_SPACE.0.as_mut_ptr(), INIT_KERNEL_HEAP_SIZE); - } -} - -struct LockedHeapWithRescue { - heap: Once>, -} - -/// The metadata for the kernel heap frames. -#[derive(Debug)] -pub struct KernelHeapMeta; - -impl_frame_meta_for!(KernelHeapMeta); - -impl LockedHeapWithRescue { - /// Creates an new heap - pub const fn new() -> Self { - Self { heap: Once::new() } - } - - /// SAFETY: The range [start, start + size) must be a valid memory region. - pub unsafe fn init(&self, start: *mut u8, size: usize) { - self.heap - .call_once(|| SpinLock::new(Heap::new(start as usize, size))); - } - - /// SAFETY: The range [start, start + size) must be a valid memory region. - unsafe fn add_to_heap(&self, start: usize, size: usize) { - self.heap - .get() - .unwrap() - .disable_irq() - .lock() - .add_memory(start, size); - } - - fn rescue_if_low_memory(&self, remain_bytes: usize, layout: Layout) { - if remain_bytes <= PAGE_SIZE * 4 { - debug!( - "Low memory in heap allocator, try to call rescue. Remaining bytes: {:x?}", - remain_bytes - ); - // We don't care if the rescue returns ok or not since we can still do heap allocation. - let _ = self.rescue(&layout); - } - } - - fn rescue(&self, layout: &Layout) -> Result<()> { - const MIN_NUM_FRAMES: usize = 0x4000000 / PAGE_SIZE; // 64MB - - debug!("enlarge heap, layout = {:?}", layout); - let mut num_frames = { - let align = PAGE_SIZE.max(layout.align()); - debug_assert!(align % PAGE_SIZE == 0); - let size = layout.size().align_up(align); - size / PAGE_SIZE - }; - - let allocation_start = { - let mut options = FrameAllocOptions::new(); - options.zeroed(false); - let segment = if num_frames >= MIN_NUM_FRAMES { - options - .alloc_segment_with(num_frames, |_| KernelHeapMeta) - .unwrap() - } else { - match options.alloc_segment_with(MIN_NUM_FRAMES, |_| KernelHeapMeta) { - Ok(seg) => { - num_frames = MIN_NUM_FRAMES; - seg - } - Err(_) => options.alloc_segment_with(num_frames, |_| KernelHeapMeta)?, - } - }; - let paddr = segment.start_paddr(); - let _ = ManuallyDrop::new(segment); - paddr - }; - let vaddr = paddr_to_vaddr(allocation_start); - - // SAFETY: the frame is allocated from FrameAllocator and never be deallocated, - // so the addr is always valid. - unsafe { - debug!( - "add frames to heap: addr = 0x{:x}, size = 0x{:x}", - vaddr, - PAGE_SIZE * num_frames - ); - self.add_to_heap(vaddr, PAGE_SIZE * num_frames); - } - - Ok(()) - } -} - -unsafe impl GlobalAlloc for LockedHeapWithRescue { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let _guard = disable_local(); - - let res = self.heap.get().unwrap().lock().allocate(layout); - if let Ok((allocation, remain_bytes)) = res { - self.rescue_if_low_memory(remain_bytes, layout); - return allocation; - } - - if self.rescue(&layout).is_err() { - return core::ptr::null_mut::(); - } - - let res = self.heap.get().unwrap().lock().allocate(layout); - if let Ok((allocation, remain_bytes)) = res { - self.rescue_if_low_memory(remain_bytes, layout); - allocation - } else { - core::ptr::null_mut::() - } - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - debug_assert!(ptr as usize != 0); - self.heap - .get() - .unwrap() - .disable_irq() - .lock() - .deallocate(ptr, layout) - } -} diff --git a/ostd/src/mm/heap_allocator/slab_allocator/mod.rs b/ostd/src/mm/heap_allocator/slab_allocator/mod.rs deleted file mode 100644 index b7c505704..000000000 --- a/ostd/src/mm/heap_allocator/slab_allocator/mod.rs +++ /dev/null @@ -1,289 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -// Modified from lib.rs in slab_allocator project -// -// MIT License -// -// Copyright (c) 2024 Asterinas Developers -// Copyright (c) 2024 ArceOS Developers -// Copyright (c) 2017 Robert Węcławski -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -// - -//! Slab allocator for `no_std` systems. It uses multiple slabs with blocks of -//! different sizes and a [buddy_system_allocator] for blocks larger than 4096 -//! bytes. -//! -//! It's based on . -//! -//! [buddy_system_allocator]: https://docs.rs/buddy_system_allocator/latest/buddy_system_allocator/ - -extern crate alloc; -extern crate buddy_system_allocator; - -use alloc::alloc::{AllocError, Layout}; -use core::ptr::NonNull; - -mod slab; -use slab::Slab; - -const SET_SIZE: usize = 64; -const MIN_HEAP_SIZE: usize = 0x8000; - -enum HeapAllocator { - Slab64Bytes, - Slab128Bytes, - Slab256Bytes, - Slab512Bytes, - Slab1024Bytes, - Slab2048Bytes, - Slab4096Bytes, - BuddyAllocator, -} - -/// A fixed size heap backed by multiple slabs with blocks of different sizes. -/// Allocations over 4096 bytes are served by linked list allocator. -pub struct Heap { - slab_64_bytes: Slab<64>, - slab_128_bytes: Slab<128>, - slab_256_bytes: Slab<256>, - slab_512_bytes: Slab<512>, - slab_1024_bytes: Slab<1024>, - slab_2048_bytes: Slab<2048>, - slab_4096_bytes: Slab<4096>, - buddy_allocator: buddy_system_allocator::Heap<32>, -} - -impl Heap { - /// Creates a new heap with the given `heap_start_addr` and `heap_size`. The start address must be valid - /// and the memory in the `[heap_start_addr, heap_start_addr + heap_size)` range must not be used for - /// anything else. - /// - /// # Safety - /// This function is unsafe because it can cause undefined behavior if the - /// given address is invalid. - pub unsafe fn new(heap_start_addr: usize, heap_size: usize) -> Heap { - assert!( - heap_start_addr % 4096 == 0, - "Start address should be page aligned" - ); - assert!( - heap_size >= MIN_HEAP_SIZE, - "Heap size should be greater or equal to minimum heap size" - ); - assert!( - heap_size % MIN_HEAP_SIZE == 0, - "Heap size should be a multiple of minimum heap size" - ); - Heap { - slab_64_bytes: Slab::<64>::new(0, 0), - slab_128_bytes: Slab::<128>::new(0, 0), - slab_256_bytes: Slab::<256>::new(0, 0), - slab_512_bytes: Slab::<512>::new(0, 0), - slab_1024_bytes: Slab::<1024>::new(0, 0), - slab_2048_bytes: Slab::<2048>::new(0, 0), - slab_4096_bytes: Slab::<4096>::new(0, 0), - buddy_allocator: { - let mut buddy = buddy_system_allocator::Heap::<32>::new(); - buddy.init(heap_start_addr, heap_size); - buddy - }, - } - } - - /// Adds memory to the heap. The start address must be valid - /// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for - /// anything else. - /// - /// # Safety - /// This function is unsafe because it can cause undefined behavior if the - /// given address is invalid. - pub unsafe fn add_memory(&mut self, heap_start_addr: usize, heap_size: usize) { - assert!( - heap_start_addr % 4096 == 0, - "Start address should be page aligned" - ); - assert!( - heap_size % 4096 == 0, - "Add Heap size should be a multiple of page size" - ); - self.buddy_allocator - .add_to_heap(heap_start_addr, heap_start_addr + heap_size); - } - - /// Adds memory to the heap. The start address must be valid - /// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for - /// anything else. - /// In case of linked list allocator the memory can only be extended. - /// - /// # Safety - /// This function is unsafe because it can cause undefined behavior if the - /// given address is invalid. - unsafe fn _grow(&mut self, mem_start_addr: usize, mem_size: usize, slab: HeapAllocator) { - match slab { - HeapAllocator::Slab64Bytes => self.slab_64_bytes.grow(mem_start_addr, mem_size), - HeapAllocator::Slab128Bytes => self.slab_128_bytes.grow(mem_start_addr, mem_size), - HeapAllocator::Slab256Bytes => self.slab_256_bytes.grow(mem_start_addr, mem_size), - HeapAllocator::Slab512Bytes => self.slab_512_bytes.grow(mem_start_addr, mem_size), - HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.grow(mem_start_addr, mem_size), - HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.grow(mem_start_addr, mem_size), - HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.grow(mem_start_addr, mem_size), - HeapAllocator::BuddyAllocator => self - .buddy_allocator - .add_to_heap(mem_start_addr, mem_start_addr + mem_size), - } - } - - /// Allocates a chunk of the given size with the given alignment. Returns a pointer to the - /// beginning of that chunk and remaining bytes in buddy system allocator if it was successful. - /// Else it returns `Err`. - /// - /// This function finds the slab of lowest size which can still accommodate the given chunk. - /// The runtime is in `O(1)` for chunks of size <= 4096, and `O(n)` when chunk size is > 4096, - pub fn allocate(&mut self, layout: Layout) -> Result<(*mut u8, usize), AllocError> { - let addr = match Heap::layout_to_allocator(&layout) { - HeapAllocator::Slab64Bytes => self - .slab_64_bytes - .allocate(layout, &mut self.buddy_allocator)?, - HeapAllocator::Slab128Bytes => self - .slab_128_bytes - .allocate(layout, &mut self.buddy_allocator)?, - HeapAllocator::Slab256Bytes => self - .slab_256_bytes - .allocate(layout, &mut self.buddy_allocator)?, - HeapAllocator::Slab512Bytes => self - .slab_512_bytes - .allocate(layout, &mut self.buddy_allocator)?, - HeapAllocator::Slab1024Bytes => self - .slab_1024_bytes - .allocate(layout, &mut self.buddy_allocator)?, - HeapAllocator::Slab2048Bytes => self - .slab_2048_bytes - .allocate(layout, &mut self.buddy_allocator)?, - HeapAllocator::Slab4096Bytes => self - .slab_4096_bytes - .allocate(layout, &mut self.buddy_allocator)?, - HeapAllocator::BuddyAllocator => self - .buddy_allocator - .alloc(layout) - .map(|ptr| ptr.as_ptr() as usize) - .map_err(|_| AllocError)?, - }; - - Ok(( - addr as *mut u8, - self.buddy_allocator.stats_total_bytes() - self.buddy_allocator.stats_alloc_actual(), - )) - } - - /// Frees the given allocation. `ptr` must be a pointer returned - /// by a call to the `allocate` function with identical size and alignment. Undefined - /// behavior may occur for invalid arguments, thus this function is unsafe. - /// - /// This function finds the slab which contains address of `ptr` and adds the blocks beginning - /// with `ptr` address to the list of free blocks. - /// This operation is in `O(1)` for blocks <= 4096 bytes and `O(n)` for blocks > 4096 bytes. - /// - /// # Safety - /// This function is unsafe because it can cause undefined behavior if the - /// given address is invalid. - pub unsafe fn deallocate(&mut self, ptr: *mut u8, layout: Layout) { - let ptr = ptr as usize; - match Heap::layout_to_allocator(&layout) { - HeapAllocator::Slab64Bytes => self.slab_64_bytes.deallocate(ptr), - HeapAllocator::Slab128Bytes => self.slab_128_bytes.deallocate(ptr), - HeapAllocator::Slab256Bytes => self.slab_256_bytes.deallocate(ptr), - HeapAllocator::Slab512Bytes => self.slab_512_bytes.deallocate(ptr), - HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.deallocate(ptr), - HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.deallocate(ptr), - HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.deallocate(ptr), - HeapAllocator::BuddyAllocator => self - .buddy_allocator - .dealloc(NonNull::new(ptr as *mut u8).unwrap(), layout), - } - } - - /// Returns bounds on the guaranteed usable size of a successful - /// allocation created with the specified `layout`. - #[expect(unused)] - pub fn usable_size(&self, layout: Layout) -> (usize, usize) { - match Heap::layout_to_allocator(&layout) { - HeapAllocator::Slab64Bytes => (layout.size(), 64), - HeapAllocator::Slab128Bytes => (layout.size(), 128), - HeapAllocator::Slab256Bytes => (layout.size(), 256), - HeapAllocator::Slab512Bytes => (layout.size(), 512), - HeapAllocator::Slab1024Bytes => (layout.size(), 1024), - HeapAllocator::Slab2048Bytes => (layout.size(), 2048), - HeapAllocator::Slab4096Bytes => (layout.size(), 4096), - HeapAllocator::BuddyAllocator => (layout.size(), layout.size()), - } - } - - /// Finds allocator to use based on layout size and alignment - fn layout_to_allocator(layout: &Layout) -> HeapAllocator { - if layout.size() > 4096 { - HeapAllocator::BuddyAllocator - } else if layout.size() <= 64 && layout.align() <= 64 { - HeapAllocator::Slab64Bytes - } else if layout.size() <= 128 && layout.align() <= 128 { - HeapAllocator::Slab128Bytes - } else if layout.size() <= 256 && layout.align() <= 256 { - HeapAllocator::Slab256Bytes - } else if layout.size() <= 512 && layout.align() <= 512 { - HeapAllocator::Slab512Bytes - } else if layout.size() <= 1024 && layout.align() <= 1024 { - HeapAllocator::Slab1024Bytes - } else if layout.size() <= 2048 && layout.align() <= 2048 { - HeapAllocator::Slab2048Bytes - } else { - HeapAllocator::Slab4096Bytes - } - } - - /// Returns total memory size in bytes of the heap. - pub fn total_bytes(&self) -> usize { - self.slab_64_bytes.total_blocks() * 64 - + self.slab_128_bytes.total_blocks() * 128 - + self.slab_256_bytes.total_blocks() * 256 - + self.slab_512_bytes.total_blocks() * 512 - + self.slab_1024_bytes.total_blocks() * 1024 - + self.slab_2048_bytes.total_blocks() * 2048 - + self.slab_4096_bytes.total_blocks() * 4096 - + self.buddy_allocator.stats_total_bytes() - } - - /// Returns allocated memory size in bytes. - pub fn used_bytes(&self) -> usize { - self.slab_64_bytes.used_blocks() * 64 - + self.slab_128_bytes.used_blocks() * 128 - + self.slab_256_bytes.used_blocks() * 256 - + self.slab_512_bytes.used_blocks() * 512 - + self.slab_1024_bytes.used_blocks() * 1024 - + self.slab_2048_bytes.used_blocks() * 2048 - + self.slab_4096_bytes.used_blocks() * 4096 - + self.buddy_allocator.stats_alloc_actual() - } - - /// Returns available memory size in bytes. - #[expect(unused)] - pub fn available_bytes(&self) -> usize { - self.total_bytes() - self.used_bytes() - } -} diff --git a/ostd/src/mm/heap_allocator/slab_allocator/slab.rs b/ostd/src/mm/heap_allocator/slab_allocator/slab.rs deleted file mode 100644 index 4cb1adff0..000000000 --- a/ostd/src/mm/heap_allocator/slab_allocator/slab.rs +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -// Modified from slab.rs in slab_allocator project -// -// MIT License -// -// Copyright (c) 2024 Asterinas Developers -// Copyright (c) 2024 ArceOS Developers -// Copyright (c) 2017 Robert Węcławski -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -// - -use alloc::alloc::{AllocError, Layout}; - -use super::SET_SIZE; - -pub struct Slab { - free_block_list: FreeBlockList, - total_blocks: usize, -} - -impl Slab { - pub unsafe fn new(start_addr: usize, slab_size: usize) -> Slab { - let num_of_blocks = slab_size / BLK_SIZE; - Slab { - free_block_list: FreeBlockList::new(start_addr, BLK_SIZE, num_of_blocks), - total_blocks: num_of_blocks, - } - } - - pub fn total_blocks(&self) -> usize { - self.total_blocks - } - - pub fn used_blocks(&self) -> usize { - self.total_blocks - self.free_block_list.len() - } - - pub unsafe fn grow(&mut self, start_addr: usize, slab_size: usize) { - let num_of_blocks = slab_size / BLK_SIZE; - self.total_blocks += num_of_blocks; - let mut block_list = FreeBlockList::::new(start_addr, BLK_SIZE, num_of_blocks); - while let Some(block) = block_list.pop() { - self.free_block_list.push(block); - } - } - - pub fn allocate( - &mut self, - _layout: Layout, - buddy: &mut buddy_system_allocator::Heap<32>, - ) -> Result { - match self.free_block_list.pop() { - Some(block) => Ok(block.addr()), - None => { - let layout = - unsafe { Layout::from_size_align_unchecked(SET_SIZE * BLK_SIZE, 4096) }; - if let Ok(ptr) = buddy.alloc(layout) { - unsafe { - self.grow(ptr.as_ptr() as usize, SET_SIZE * BLK_SIZE); - } - Ok(self.free_block_list.pop().unwrap().addr()) - } else { - Err(AllocError) - } - } - } - } - - pub fn deallocate(&mut self, ptr: usize) { - let ptr = ptr as *mut FreeBlock; - unsafe { - self.free_block_list.push(&mut *ptr); - } - } -} - -struct FreeBlockList { - len: usize, - head: Option<&'static mut FreeBlock>, -} - -impl FreeBlockList { - unsafe fn new( - start_addr: usize, - block_size: usize, - num_of_blocks: usize, - ) -> FreeBlockList { - let mut new_list = FreeBlockList::new_empty(); - for i in (0..num_of_blocks).rev() { - let new_block = (start_addr + i * block_size) as *mut FreeBlock; - new_list.push(&mut *new_block); - } - new_list - } - - fn new_empty() -> FreeBlockList { - FreeBlockList { len: 0, head: None } - } - - fn len(&self) -> usize { - self.len - } - - fn pop(&mut self) -> Option<&'static mut FreeBlock> { - #[expect(clippy::manual_inspect)] - self.head.take().map(|node| { - self.head = node.next.take(); - self.len -= 1; - node - }) - } - - fn push(&mut self, free_block: &'static mut FreeBlock) { - free_block.next = self.head.take(); - self.len += 1; - self.head = Some(free_block); - } - - #[expect(dead_code)] - fn is_empty(&self) -> bool { - self.head.is_none() - } -} - -struct FreeBlock { - next: Option<&'static mut FreeBlock>, -} - -impl FreeBlock { - fn addr(&self) -> usize { - self as *const _ as usize - } -} diff --git a/ostd/src/mm/mod.rs b/ostd/src/mm/mod.rs index 34bcc1c2d..b0c07a886 100644 --- a/ostd/src/mm/mod.rs +++ b/ostd/src/mm/mod.rs @@ -10,7 +10,7 @@ pub type Paddr = usize; pub(crate) mod dma; pub mod frame; -pub(crate) mod heap_allocator; +pub mod heap; mod io; pub(crate) mod kspace; mod offset; diff --git a/tools/bump_version.sh b/tools/bump_version.sh index 57f32def5..c5373c10c 100755 --- a/tools/bump_version.sh +++ b/tools/bump_version.sh @@ -112,6 +112,7 @@ LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH=${ASTER_SRC_DIR}/ostd/libs/linux-bzimage/set OSDK_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/Cargo.toml OSDK_TEST_RUNNER_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/test-kernel/Cargo.toml OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/frame-allocator/Cargo.toml +OSDK_HEAP_ALLOCATOR_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/heap-allocator/Cargo.toml VERSION_PATH=${ASTER_SRC_DIR}/VERSION current_version=$(cat ${VERSION_PATH}) @@ -135,9 +136,11 @@ update_package_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH} update_package_version ${OSDK_CARGO_TOML_PATH} update_package_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} update_package_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH} +update_package_version ${OSDK_HEAP_ALLOCATOR_CARGO_TOML_PATH} update_dep_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} ostd update_dep_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH} ostd +update_dep_version ${OSDK_HEAP_ALLOCATOR_CARGO_TOML_PATH} ostd update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-test update_dep_version ${OSTD_CARGO_TOML_PATH} linux-boot-params update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-macros