mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-29 04:13:24 +00:00
Inject a scalable slab allocator
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
fdbe52c2ee
commit
a708a0c046
@ -47,7 +47,10 @@ mod util;
|
||||
|
||||
use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
pub use ostd_macros::{global_frame_allocator, main, panic_handler};
|
||||
pub use ostd_macros::{
|
||||
global_frame_allocator, global_heap_allocator, global_heap_allocator_slot_type_map, main,
|
||||
panic_handler,
|
||||
};
|
||||
pub use ostd_pod::Pod;
|
||||
|
||||
pub use self::{error::Error, prelude::Result};
|
||||
@ -98,9 +101,6 @@ unsafe fn init() {
|
||||
|
||||
mm::kspace::init_kernel_page_table(meta_pages);
|
||||
|
||||
// SAFETY: This function is called only once and only on the BSP.
|
||||
unsafe { mm::heap_allocator::init() };
|
||||
|
||||
crate::sync::init();
|
||||
|
||||
boot::init_after_heap();
|
||||
|
@ -107,6 +107,20 @@ impl<M: AnyFrameMeta> Segment<M> {
|
||||
}
|
||||
Ok(segment)
|
||||
}
|
||||
|
||||
/// Restores the [`Segment`] from the raw physical address range.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The range must be a forgotten [`Segment`] that matches the type `M`.
|
||||
/// It could be manually forgotten by [`core::mem::forget`],
|
||||
/// [`ManuallyDrop`], or [`Self::into_raw`].
|
||||
pub(crate) unsafe fn from_raw(range: Range<Paddr>) -> Self {
|
||||
Self {
|
||||
range,
|
||||
_marker: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: AnyFrameMeta + ?Sized> Segment<M> {
|
||||
@ -180,6 +194,13 @@ impl<M: AnyFrameMeta + ?Sized> Segment<M> {
|
||||
_marker: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Forgets the [`Segment`] and gets a raw range of physical addresses.
|
||||
pub(crate) fn into_raw(self) -> Range<Paddr> {
|
||||
let range = self.range.clone();
|
||||
let _ = ManuallyDrop::new(self);
|
||||
range
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: AnyFrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
|
||||
|
151
ostd/src/mm/heap/mod.rs
Normal file
151
ostd/src/mm/heap/mod.rs
Normal file
@ -0,0 +1,151 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! Manages the kernel heap using slab or buddy allocation strategies.
|
||||
|
||||
use core::{
|
||||
alloc::{AllocError, GlobalAlloc, Layout},
|
||||
ptr::NonNull,
|
||||
};
|
||||
|
||||
use crate::mm::Vaddr;
|
||||
|
||||
mod slab;
|
||||
mod slot;
|
||||
mod slot_list;
|
||||
|
||||
pub use self::{
|
||||
slab::{SharedSlab, Slab, SlabMeta},
|
||||
slot::{HeapSlot, SlotInfo},
|
||||
slot_list::SlabSlotList,
|
||||
};
|
||||
|
||||
/// The trait for the global heap allocator.
|
||||
///
|
||||
/// By providing the slab ([`Slab`]) and heap slot ([`HeapSlot`])
|
||||
/// mechanisms, OSTD allows users to implement their own kernel heap in a safe
|
||||
/// manner, as an alternative to the unsafe [`core::alloc::GlobalAlloc`].
|
||||
///
|
||||
/// To provide the global heap allocator, use [`crate::global_heap_allocator`]
|
||||
/// to mark a static variable that implements this trait. Use
|
||||
/// [`crate::global_heap_allocator_slot_type_map`] to specify the sizes of
|
||||
/// slots for different layouts. This latter restriction may be lifted in the
|
||||
/// future.
|
||||
pub trait GlobalHeapAllocator: Sync {
|
||||
/// Allocates a [`HeapSlot`] according to the layout.
|
||||
///
|
||||
/// OSTD calls this method to allocate memory from the global heap.
|
||||
///
|
||||
/// The returned [`HeapSlot`] must be valid for the layout, i.e., the size
|
||||
/// must be at least the size of the layout and the alignment must be at
|
||||
/// least the alignment of the layout. Furthermore, the size of the
|
||||
/// returned [`HeapSlot`] must match the size returned by the function
|
||||
/// marked with [`crate::global_heap_allocator_slot_type_map`].
|
||||
fn alloc(&self, layout: Layout) -> Result<HeapSlot, AllocError>;
|
||||
|
||||
/// Deallocates a [`HeapSlot`].
|
||||
///
|
||||
/// OSTD calls this method to deallocate memory back to the global heap.
|
||||
///
|
||||
/// Each deallocation must correspond to exactly one previous allocation. The provided
|
||||
/// [`HeapSlot`] must match the one returned from the original allocation.
|
||||
fn dealloc(&self, slot: HeapSlot) -> Result<(), AllocError>;
|
||||
}
|
||||
|
||||
extern "Rust" {
|
||||
/// The reference to the global heap allocator generated by the
|
||||
/// [`crate::global_heap_allocator`] attribute.
|
||||
static __GLOBAL_HEAP_ALLOCATOR_REF: &'static dyn GlobalHeapAllocator;
|
||||
|
||||
/// Gets the size and type of the heap slot to serve an allocation.
|
||||
/// See [`crate::global_heap_allocator_slot_type_map`].
|
||||
fn __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout: Layout) -> Option<SlotInfo>;
|
||||
}
|
||||
|
||||
/// Gets the reference to the user-defined global heap allocator.
|
||||
fn get_global_heap_allocator() -> &'static dyn GlobalHeapAllocator {
|
||||
// SAFETY: This up-call is redirected safely to Rust code by OSDK.
|
||||
unsafe { __GLOBAL_HEAP_ALLOCATOR_REF }
|
||||
}
|
||||
|
||||
/// Gets the size and type of the heap slot to serve an allocation.
|
||||
///
|
||||
/// This function is defined by the OSTD user and should be idempotent,
|
||||
/// as we require it to be implemented as a `const fn`.
|
||||
///
|
||||
/// See [`crate::global_heap_allocator_slot_type_map`].
|
||||
fn slot_size_from_layout(layout: Layout) -> Option<SlotInfo> {
|
||||
// SAFETY: This up-call is redirected safely to Rust code by OSDK.
|
||||
unsafe { __GLOBAL_HEAP_SLOT_SIZE_FROM_LAYOUT(layout) }
|
||||
}
|
||||
|
||||
macro_rules! abort_with_message {
|
||||
($($arg:tt)*) => {
|
||||
log::error!($($arg)*);
|
||||
crate::panic::abort();
|
||||
};
|
||||
}
|
||||
|
||||
#[alloc_error_handler]
|
||||
fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
|
||||
abort_with_message!("Heap allocation error, layout = {:#x?}", layout);
|
||||
}
|
||||
|
||||
#[global_allocator]
|
||||
static HEAP_ALLOCATOR: AllocDispatch = AllocDispatch;
|
||||
|
||||
struct AllocDispatch;
|
||||
|
||||
// TODO: Somehow restrict unwinding in the user-provided global allocator.
|
||||
// Panicking should be fine, but we shouldn't unwind on panics.
|
||||
unsafe impl GlobalAlloc for AllocDispatch {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
let Some(required_slot) = slot_size_from_layout(layout) else {
|
||||
abort_with_message!("Heap allocation size not found for layout = {:#x?}", layout);
|
||||
};
|
||||
|
||||
let res = get_global_heap_allocator().alloc(layout);
|
||||
let Ok(slot) = res else {
|
||||
return core::ptr::null_mut();
|
||||
};
|
||||
|
||||
if required_slot.size() != slot.size()
|
||||
|| slot.size() < layout.size()
|
||||
|| slot.as_ptr() as Vaddr % layout.align() != 0
|
||||
{
|
||||
abort_with_message!(
|
||||
"Heap allocation mismatch: slot ptr = {:p}, size = {:x}; layout = {:#x?}; required_slot = {:#x?}",
|
||||
slot.as_ptr(),
|
||||
slot.size(),
|
||||
layout,
|
||||
required_slot,
|
||||
);
|
||||
}
|
||||
|
||||
slot.as_ptr()
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
// Now we restore the `HeapSlot` from the pointer and the layout.
|
||||
let Some(required_slot) = slot_size_from_layout(layout) else {
|
||||
abort_with_message!(
|
||||
"Heap deallocation size not found for layout = {:#x?}",
|
||||
layout
|
||||
);
|
||||
};
|
||||
|
||||
// SAFETY: The validity of the pointer is guaranteed by the caller. The
|
||||
// size must match the size of the slot when it was allocated, since we
|
||||
// require `slot_size_from_layout` to be idempotent.
|
||||
let slot = unsafe { HeapSlot::new(NonNull::new_unchecked(ptr), required_slot) };
|
||||
let res = get_global_heap_allocator().dealloc(slot);
|
||||
|
||||
if res.is_err() {
|
||||
abort_with_message!(
|
||||
"Heap deallocation error, ptr = {:p}, layout = {:#x?}, required_slot = {:#x?}",
|
||||
ptr,
|
||||
layout,
|
||||
required_slot,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
140
ostd/src/mm/heap/slab.rs
Normal file
140
ostd/src/mm/heap/slab.rs
Normal file
@ -0,0 +1,140 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! Slabs for implementing the slab allocator.
|
||||
|
||||
use core::{alloc::AllocError, ptr::NonNull};
|
||||
|
||||
use super::{slot::HeapSlot, slot_list::SlabSlotList};
|
||||
use crate::mm::{
|
||||
frame::{linked_list::Link, meta::AnyFrameMeta},
|
||||
paddr_to_vaddr, Frame, FrameAllocOptions, UniqueFrame, PAGE_SIZE,
|
||||
};
|
||||
|
||||
/// A slab.
|
||||
///
|
||||
/// The slot size is the maximum size and alignment of the objects that can be
|
||||
/// allocated from the slab. The slab is divided into slots of this size.
|
||||
///
|
||||
/// The size of the slot cannot be smaller than the size of [`usize`] and must
|
||||
/// be a power of two. The size of the slab should be larger than the slot
|
||||
/// size and [`PAGE_SIZE`].
|
||||
///
|
||||
/// The `SLOT_SIZE` is the size of the slot in bytes. It must be smaller than or
|
||||
/// equal to [`PAGE_SIZE`]. This restriction may be lifted in the future.
|
||||
pub type Slab<const SLOT_SIZE: usize> = UniqueFrame<Link<SlabMeta<SLOT_SIZE>>>;
|
||||
|
||||
/// A shared pointer to a slab.
|
||||
///
|
||||
/// It is solely useful to point to a slab from a stray slot. When an object of
|
||||
/// this type exists no mutable references can be created to the slab. So don't
|
||||
/// hold it for long.
|
||||
pub type SharedSlab<const SLOT_SIZE: usize> = Frame<Link<SlabMeta<SLOT_SIZE>>>;
|
||||
|
||||
/// Frame metadata of a slab.
|
||||
///
|
||||
/// Each slab is backed by a [`UniqueFrame`].
|
||||
#[derive(Debug)]
|
||||
pub struct SlabMeta<const SLOT_SIZE: usize> {
|
||||
/// The list of free slots inside the slab.
|
||||
///
|
||||
/// Slots not inside the slab should not be in the list.
|
||||
free_list: SlabSlotList<SLOT_SIZE>,
|
||||
|
||||
/// The number of allocated slots in the slab.
|
||||
///
|
||||
/// Even if a slot is free, as long as it does not stay in the
|
||||
/// [`Self::free_list`], it is considered allocated.
|
||||
nr_allocated: u16,
|
||||
}
|
||||
|
||||
unsafe impl<const SLOT_SIZE: usize> Send for SlabMeta<SLOT_SIZE> {}
|
||||
unsafe impl<const SLOT_SIZE: usize> Sync for SlabMeta<SLOT_SIZE> {}
|
||||
|
||||
unsafe impl<const SLOT_SIZE: usize> AnyFrameMeta for SlabMeta<SLOT_SIZE> {
|
||||
fn on_drop(&mut self, _reader: &mut crate::mm::VmReader<crate::mm::Infallible>) {
|
||||
if self.nr_allocated != 0 {
|
||||
// FIXME: We have no mechanisms to forget the slab once we are here,
|
||||
// so we require the user to deallocate all slots before dropping.
|
||||
panic!("{} slots allocated when dropping a slab", self.nr_allocated);
|
||||
}
|
||||
}
|
||||
|
||||
fn is_untyped(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<const SLOT_SIZE: usize> SlabMeta<SLOT_SIZE> {
|
||||
/// Gets the capacity of the slab (regardless of the number of allocated slots).
|
||||
pub const fn capacity(&self) -> u16 {
|
||||
(PAGE_SIZE / SLOT_SIZE) as u16
|
||||
}
|
||||
|
||||
/// Gets the number of allocated slots.
|
||||
pub fn nr_allocated(&self) -> u16 {
|
||||
self.nr_allocated
|
||||
}
|
||||
|
||||
/// Allocates a slot from the slab.
|
||||
pub fn alloc(&mut self) -> Result<HeapSlot, AllocError> {
|
||||
let Some(allocated) = self.free_list.pop() else {
|
||||
log::error!("Allocating a slot from a full slab");
|
||||
return Err(AllocError);
|
||||
};
|
||||
self.nr_allocated += 1;
|
||||
Ok(allocated)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const SLOT_SIZE: usize> Slab<SLOT_SIZE> {
|
||||
/// Allocates a new slab of the given size.
|
||||
///
|
||||
/// If the size is less than `SLOT_SIZE` or [`PAGE_SIZE`], the size will be
|
||||
/// the maximum of the two.
|
||||
pub fn new() -> crate::prelude::Result<Self> {
|
||||
const { assert!(SLOT_SIZE <= PAGE_SIZE) };
|
||||
// To ensure we can store a pointer in each slot.
|
||||
const { assert!(SLOT_SIZE >= core::mem::size_of::<usize>()) };
|
||||
// To ensure `nr_allocated` can be stored in a `u16`.
|
||||
const { assert!(PAGE_SIZE / SLOT_SIZE <= u16::MAX as usize) };
|
||||
|
||||
let mut slab: Slab<SLOT_SIZE> = FrameAllocOptions::new()
|
||||
.zeroed(false)
|
||||
.alloc_frame_with(Link::new(SlabMeta::<SLOT_SIZE> {
|
||||
free_list: SlabSlotList::new(),
|
||||
nr_allocated: 0,
|
||||
}))?
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
let head_paddr = slab.start_paddr();
|
||||
let head_vaddr = paddr_to_vaddr(head_paddr);
|
||||
|
||||
// Push each slot to the free list.
|
||||
for slot_offset in (0..PAGE_SIZE).step_by(SLOT_SIZE) {
|
||||
// SAFETY: The slot is within the slab so it can't be NULL.
|
||||
let slot_ptr = unsafe { NonNull::new_unchecked((head_vaddr + slot_offset) as *mut u8) };
|
||||
// SAFETY: The slot is newly allocated in the slab.
|
||||
slab.meta_mut()
|
||||
.free_list
|
||||
.push(unsafe { HeapSlot::new(slot_ptr, super::SlotInfo::SlabSlot(SLOT_SIZE)) });
|
||||
}
|
||||
|
||||
Ok(slab)
|
||||
}
|
||||
|
||||
/// Deallocates a slot to the slab.
|
||||
///
|
||||
/// If the slot does not belong to the slab it returns [`AllocError`].
|
||||
pub fn dealloc(&mut self, slot: HeapSlot) -> Result<(), AllocError> {
|
||||
if !(self.start_paddr()..self.start_paddr() + self.size()).contains(&slot.paddr()) {
|
||||
log::error!("Deallocating a slot to a slab that does not own the slot");
|
||||
return Err(AllocError);
|
||||
}
|
||||
debug_assert_eq!(slot.size(), SLOT_SIZE);
|
||||
self.meta_mut().free_list.push(slot);
|
||||
self.meta_mut().nr_allocated -= 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
154
ostd/src/mm/heap/slot.rs
Normal file
154
ostd/src/mm/heap/slot.rs
Normal file
@ -0,0 +1,154 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! Heap slots for allocations.
|
||||
|
||||
use core::{alloc::AllocError, ptr::NonNull};
|
||||
|
||||
use crate::{
|
||||
impl_frame_meta_for,
|
||||
mm::{
|
||||
kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, FrameAllocOptions, Paddr, Segment,
|
||||
Vaddr, PAGE_SIZE,
|
||||
},
|
||||
};
|
||||
|
||||
/// A slot that will become or has been turned from a heap allocation.
|
||||
///
|
||||
/// Heap slots can come from [`Slab`] or directly from a typed [`Segment`].
|
||||
///
|
||||
/// Heap slots can be used to fulfill heap allocations requested by the allocator.
|
||||
/// Upon deallocation, the deallocated memory also becomes a heap slot.
|
||||
///
|
||||
/// The size of the heap slot must match the slot size of the [`Slab`] or the
|
||||
/// size of the [`Segment`].
|
||||
///
|
||||
/// [`Slab`]: super::Slab
|
||||
pub struct HeapSlot {
|
||||
/// The address of the slot.
|
||||
addr: NonNull<u8>,
|
||||
/// The type and size of the slot.
|
||||
info: SlotInfo,
|
||||
}
|
||||
|
||||
/// The type and size of the heap slot that should be used for the allocation.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum SlotInfo {
|
||||
/// The slot is from a [`super::Slab`].
|
||||
///
|
||||
/// The size of the slot and the corresponding slab are provided.
|
||||
/// Both values are identical.
|
||||
SlabSlot(usize),
|
||||
/// The slot is from a [`Segment`].
|
||||
///
|
||||
/// The size of the slot and the corresponding segment are provided.
|
||||
/// Both values are identical.
|
||||
LargeSlot(usize),
|
||||
}
|
||||
|
||||
impl SlotInfo {
|
||||
/// Gets the size of the slot.
|
||||
pub fn size(&self) -> usize {
|
||||
match self {
|
||||
Self::SlabSlot(size) => *size,
|
||||
Self::LargeSlot(size) => *size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapSlot {
|
||||
/// Creates a new pointer to a heap slot.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The pointer to the slot must either:
|
||||
/// - be a free slot in a [`super::Slab`], or
|
||||
/// - be a free slot in a [`Segment`].
|
||||
///
|
||||
/// If the pointer is from a [`super::Slab`] or [`Segment`], the slot must
|
||||
/// have a size that matches the slot size of the slab or segment respectively.
|
||||
pub(super) unsafe fn new(addr: NonNull<u8>, info: SlotInfo) -> Self {
|
||||
Self { addr, info }
|
||||
}
|
||||
|
||||
/// Allocates a large slot.
|
||||
///
|
||||
/// This function allocates in units of [`PAGE_SIZE`] bytes.
|
||||
///
|
||||
/// This function returns an error if the frame allocation fails.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if the size is not a multiple of [`PAGE_SIZE`].
|
||||
pub fn alloc_large(size: usize) -> Result<Self, AllocError> {
|
||||
assert_eq!(size % PAGE_SIZE, 0);
|
||||
let nframes = size / PAGE_SIZE;
|
||||
let segment = FrameAllocOptions::new()
|
||||
.zeroed(false)
|
||||
.alloc_segment_with(nframes, |_| LargeAllocFrameMeta)
|
||||
.map_err(|_| {
|
||||
log::error!("Failed to allocate a large slot");
|
||||
AllocError
|
||||
})?;
|
||||
|
||||
let paddr_range = segment.into_raw();
|
||||
let vaddr = paddr_to_vaddr(paddr_range.start);
|
||||
|
||||
Ok(Self {
|
||||
addr: NonNull::new(vaddr as *mut u8).unwrap(),
|
||||
info: SlotInfo::LargeSlot(size),
|
||||
})
|
||||
}
|
||||
|
||||
/// Deallocates a large slot.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function aborts if the slot was not allocated with
|
||||
/// [`HeapSlot::alloc_large`], as it requires specific memory management
|
||||
/// operations that only apply to large slots.
|
||||
pub fn dealloc_large(self) {
|
||||
let SlotInfo::LargeSlot(size) = self.info else {
|
||||
log::error!(
|
||||
"Deallocating a large slot that was not allocated with `HeapSlot::alloc_large`"
|
||||
);
|
||||
crate::panic::abort();
|
||||
};
|
||||
|
||||
debug_assert_eq!(size % PAGE_SIZE, 0);
|
||||
debug_assert_eq!(self.paddr() % PAGE_SIZE, 0);
|
||||
let nframes = size / PAGE_SIZE;
|
||||
let range = self.paddr()..self.paddr() + nframes;
|
||||
|
||||
// SAFETY: The segment was once forgotten when allocated.
|
||||
drop(unsafe { Segment::<LargeAllocFrameMeta>::from_raw(range) });
|
||||
}
|
||||
|
||||
/// Gets the physical address of the slot.
|
||||
pub fn paddr(&self) -> Paddr {
|
||||
self.addr.as_ptr() as Vaddr - LINEAR_MAPPING_BASE_VADDR
|
||||
}
|
||||
|
||||
/// Gets the size of the slot.
|
||||
pub fn size(&self) -> usize {
|
||||
match self.info {
|
||||
SlotInfo::SlabSlot(size) => size,
|
||||
SlotInfo::LargeSlot(size) => size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the type and size of the slot.
|
||||
pub fn info(&self) -> SlotInfo {
|
||||
self.info
|
||||
}
|
||||
|
||||
/// Gets the pointer to the slot.
|
||||
pub fn as_ptr(&self) -> *mut u8 {
|
||||
self.addr.as_ptr()
|
||||
}
|
||||
}
|
||||
|
||||
/// The frames allocated for a large allocation.
|
||||
#[derive(Debug)]
|
||||
pub struct LargeAllocFrameMeta;
|
||||
|
||||
impl_frame_meta_for!(LargeAllocFrameMeta);
|
82
ostd/src/mm/heap/slot_list.rs
Normal file
82
ostd/src/mm/heap/slot_list.rs
Normal file
@ -0,0 +1,82 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! Implementation of the free heap slot list.
|
||||
|
||||
use core::ptr::NonNull;
|
||||
|
||||
use super::HeapSlot;
|
||||
|
||||
/// A singly-linked list of [`HeapSlot`]s from [`super::Slab`]s.
|
||||
///
|
||||
/// The slots inside this list will have a size of `SLOT_SIZE`. They can come
|
||||
/// from different slabs.
|
||||
#[derive(Debug)]
|
||||
pub struct SlabSlotList<const SLOT_SIZE: usize> {
|
||||
/// The head of the list.
|
||||
head: Option<NonNull<u8>>,
|
||||
}
|
||||
|
||||
impl<const SLOT_SIZE: usize> Default for SlabSlotList<SLOT_SIZE> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const SLOT_SIZE: usize> SlabSlotList<SLOT_SIZE> {
|
||||
/// Creates a new empty list.
|
||||
pub const fn new() -> Self {
|
||||
Self { head: None }
|
||||
}
|
||||
|
||||
/// Pushes a slot to the front of the list.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if
|
||||
/// - the slot does not come from a slab
|
||||
/// (i.e., `!matches(slot.info(), SlotInfo::SlabSlot(_))`);
|
||||
/// - the size of the slot does not match `SLOT_SIZE`.
|
||||
pub fn push(&mut self, slot: HeapSlot) {
|
||||
let slot_ptr = slot.as_ptr();
|
||||
let super::SlotInfo::SlabSlot(slot_size) = slot.info() else {
|
||||
panic!("The slot does not come from a slab");
|
||||
};
|
||||
|
||||
assert_eq!(slot_size, SLOT_SIZE);
|
||||
const { assert!(SLOT_SIZE >= core::mem::size_of::<usize>()) };
|
||||
|
||||
let original_head = self.head;
|
||||
|
||||
debug_assert!(!slot_ptr.is_null());
|
||||
// SAFETY: A pointer to a slot must not be NULL;
|
||||
self.head = Some(unsafe { NonNull::new_unchecked(slot_ptr) });
|
||||
// Write the original head to the slot.
|
||||
// SAFETY: A heap slot must be free so the pointer to the slot can be
|
||||
// written to. The slot size is at least the size of a pointer.
|
||||
unsafe {
|
||||
slot_ptr
|
||||
.cast::<usize>()
|
||||
.write(original_head.map_or(0, |h| h.as_ptr() as usize));
|
||||
}
|
||||
}
|
||||
|
||||
/// Pops a slot from the front of the list.
|
||||
///
|
||||
/// It returns `None` if the list is empty.
|
||||
pub fn pop(&mut self) -> Option<HeapSlot> {
|
||||
let original_head = self.head?;
|
||||
|
||||
// SAFETY: The head is a valid pointer to a free slot.
|
||||
// The slot contains a pointer to the next slot.
|
||||
let next = unsafe { original_head.as_ptr().cast::<usize>().read() } as *mut u8;
|
||||
|
||||
self.head = if next.is_null() {
|
||||
None
|
||||
} else {
|
||||
// SAFETY: We already verified that the next slot is not NULL.
|
||||
Some(unsafe { NonNull::new_unchecked(next) })
|
||||
};
|
||||
|
||||
Some(unsafe { HeapSlot::new(original_head, super::SlotInfo::SlabSlot(SLOT_SIZE)) })
|
||||
}
|
||||
}
|
@ -1,174 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
mod slab_allocator;
|
||||
|
||||
use core::{
|
||||
alloc::{GlobalAlloc, Layout},
|
||||
mem::ManuallyDrop,
|
||||
};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use log::debug;
|
||||
use slab_allocator::Heap;
|
||||
use spin::Once;
|
||||
|
||||
use super::paddr_to_vaddr;
|
||||
use crate::{
|
||||
impl_frame_meta_for,
|
||||
mm::{FrameAllocOptions, PAGE_SIZE},
|
||||
prelude::*,
|
||||
sync::SpinLock,
|
||||
trap::disable_local,
|
||||
};
|
||||
|
||||
#[global_allocator]
|
||||
static HEAP_ALLOCATOR: LockedHeapWithRescue = LockedHeapWithRescue::new();
|
||||
|
||||
#[alloc_error_handler]
|
||||
pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
|
||||
panic!("Heap allocation error, layout = {:?}", layout);
|
||||
}
|
||||
|
||||
const INIT_KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256;
|
||||
|
||||
#[repr(align(4096))]
|
||||
struct InitHeapSpace([u8; INIT_KERNEL_HEAP_SIZE]);
|
||||
|
||||
/// Initialize the heap allocator.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function should be called only once.
|
||||
pub unsafe fn init() {
|
||||
static mut HEAP_SPACE: InitHeapSpace = InitHeapSpace([0; INIT_KERNEL_HEAP_SIZE]);
|
||||
// SAFETY: The HEAP_SPACE is a static memory range, so it's always valid.
|
||||
unsafe {
|
||||
#[expect(static_mut_refs)]
|
||||
HEAP_ALLOCATOR.init(HEAP_SPACE.0.as_mut_ptr(), INIT_KERNEL_HEAP_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
struct LockedHeapWithRescue {
|
||||
heap: Once<SpinLock<Heap>>,
|
||||
}
|
||||
|
||||
/// The metadata for the kernel heap frames.
|
||||
#[derive(Debug)]
|
||||
pub struct KernelHeapMeta;
|
||||
|
||||
impl_frame_meta_for!(KernelHeapMeta);
|
||||
|
||||
impl LockedHeapWithRescue {
|
||||
/// Creates an new heap
|
||||
pub const fn new() -> Self {
|
||||
Self { heap: Once::new() }
|
||||
}
|
||||
|
||||
/// SAFETY: The range [start, start + size) must be a valid memory region.
|
||||
pub unsafe fn init(&self, start: *mut u8, size: usize) {
|
||||
self.heap
|
||||
.call_once(|| SpinLock::new(Heap::new(start as usize, size)));
|
||||
}
|
||||
|
||||
/// SAFETY: The range [start, start + size) must be a valid memory region.
|
||||
unsafe fn add_to_heap(&self, start: usize, size: usize) {
|
||||
self.heap
|
||||
.get()
|
||||
.unwrap()
|
||||
.disable_irq()
|
||||
.lock()
|
||||
.add_memory(start, size);
|
||||
}
|
||||
|
||||
fn rescue_if_low_memory(&self, remain_bytes: usize, layout: Layout) {
|
||||
if remain_bytes <= PAGE_SIZE * 4 {
|
||||
debug!(
|
||||
"Low memory in heap allocator, try to call rescue. Remaining bytes: {:x?}",
|
||||
remain_bytes
|
||||
);
|
||||
// We don't care if the rescue returns ok or not since we can still do heap allocation.
|
||||
let _ = self.rescue(&layout);
|
||||
}
|
||||
}
|
||||
|
||||
fn rescue(&self, layout: &Layout) -> Result<()> {
|
||||
const MIN_NUM_FRAMES: usize = 0x4000000 / PAGE_SIZE; // 64MB
|
||||
|
||||
debug!("enlarge heap, layout = {:?}", layout);
|
||||
let mut num_frames = {
|
||||
let align = PAGE_SIZE.max(layout.align());
|
||||
debug_assert!(align % PAGE_SIZE == 0);
|
||||
let size = layout.size().align_up(align);
|
||||
size / PAGE_SIZE
|
||||
};
|
||||
|
||||
let allocation_start = {
|
||||
let mut options = FrameAllocOptions::new();
|
||||
options.zeroed(false);
|
||||
let segment = if num_frames >= MIN_NUM_FRAMES {
|
||||
options
|
||||
.alloc_segment_with(num_frames, |_| KernelHeapMeta)
|
||||
.unwrap()
|
||||
} else {
|
||||
match options.alloc_segment_with(MIN_NUM_FRAMES, |_| KernelHeapMeta) {
|
||||
Ok(seg) => {
|
||||
num_frames = MIN_NUM_FRAMES;
|
||||
seg
|
||||
}
|
||||
Err(_) => options.alloc_segment_with(num_frames, |_| KernelHeapMeta)?,
|
||||
}
|
||||
};
|
||||
let paddr = segment.start_paddr();
|
||||
let _ = ManuallyDrop::new(segment);
|
||||
paddr
|
||||
};
|
||||
let vaddr = paddr_to_vaddr(allocation_start);
|
||||
|
||||
// SAFETY: the frame is allocated from FrameAllocator and never be deallocated,
|
||||
// so the addr is always valid.
|
||||
unsafe {
|
||||
debug!(
|
||||
"add frames to heap: addr = 0x{:x}, size = 0x{:x}",
|
||||
vaddr,
|
||||
PAGE_SIZE * num_frames
|
||||
);
|
||||
self.add_to_heap(vaddr, PAGE_SIZE * num_frames);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for LockedHeapWithRescue {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
let _guard = disable_local();
|
||||
|
||||
let res = self.heap.get().unwrap().lock().allocate(layout);
|
||||
if let Ok((allocation, remain_bytes)) = res {
|
||||
self.rescue_if_low_memory(remain_bytes, layout);
|
||||
return allocation;
|
||||
}
|
||||
|
||||
if self.rescue(&layout).is_err() {
|
||||
return core::ptr::null_mut::<u8>();
|
||||
}
|
||||
|
||||
let res = self.heap.get().unwrap().lock().allocate(layout);
|
||||
if let Ok((allocation, remain_bytes)) = res {
|
||||
self.rescue_if_low_memory(remain_bytes, layout);
|
||||
allocation
|
||||
} else {
|
||||
core::ptr::null_mut::<u8>()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
debug_assert!(ptr as usize != 0);
|
||||
self.heap
|
||||
.get()
|
||||
.unwrap()
|
||||
.disable_irq()
|
||||
.lock()
|
||||
.deallocate(ptr, layout)
|
||||
}
|
||||
}
|
@ -1,289 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Modified from lib.rs in slab_allocator project
|
||||
//
|
||||
// MIT License
|
||||
//
|
||||
// Copyright (c) 2024 Asterinas Developers
|
||||
// Copyright (c) 2024 ArceOS Developers
|
||||
// Copyright (c) 2017 Robert Węcławski
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//
|
||||
|
||||
//! Slab allocator for `no_std` systems. It uses multiple slabs with blocks of
|
||||
//! different sizes and a [buddy_system_allocator] for blocks larger than 4096
|
||||
//! bytes.
|
||||
//!
|
||||
//! It's based on <https://github.com/weclaw1/slab_allocator>.
|
||||
//!
|
||||
//! [buddy_system_allocator]: https://docs.rs/buddy_system_allocator/latest/buddy_system_allocator/
|
||||
|
||||
extern crate alloc;
|
||||
extern crate buddy_system_allocator;
|
||||
|
||||
use alloc::alloc::{AllocError, Layout};
|
||||
use core::ptr::NonNull;
|
||||
|
||||
mod slab;
|
||||
use slab::Slab;
|
||||
|
||||
const SET_SIZE: usize = 64;
|
||||
const MIN_HEAP_SIZE: usize = 0x8000;
|
||||
|
||||
enum HeapAllocator {
|
||||
Slab64Bytes,
|
||||
Slab128Bytes,
|
||||
Slab256Bytes,
|
||||
Slab512Bytes,
|
||||
Slab1024Bytes,
|
||||
Slab2048Bytes,
|
||||
Slab4096Bytes,
|
||||
BuddyAllocator,
|
||||
}
|
||||
|
||||
/// A fixed size heap backed by multiple slabs with blocks of different sizes.
|
||||
/// Allocations over 4096 bytes are served by linked list allocator.
|
||||
pub struct Heap {
|
||||
slab_64_bytes: Slab<64>,
|
||||
slab_128_bytes: Slab<128>,
|
||||
slab_256_bytes: Slab<256>,
|
||||
slab_512_bytes: Slab<512>,
|
||||
slab_1024_bytes: Slab<1024>,
|
||||
slab_2048_bytes: Slab<2048>,
|
||||
slab_4096_bytes: Slab<4096>,
|
||||
buddy_allocator: buddy_system_allocator::Heap<32>,
|
||||
}
|
||||
|
||||
impl Heap {
|
||||
/// Creates a new heap with the given `heap_start_addr` and `heap_size`. The start address must be valid
|
||||
/// and the memory in the `[heap_start_addr, heap_start_addr + heap_size)` range must not be used for
|
||||
/// anything else.
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is unsafe because it can cause undefined behavior if the
|
||||
/// given address is invalid.
|
||||
pub unsafe fn new(heap_start_addr: usize, heap_size: usize) -> Heap {
|
||||
assert!(
|
||||
heap_start_addr % 4096 == 0,
|
||||
"Start address should be page aligned"
|
||||
);
|
||||
assert!(
|
||||
heap_size >= MIN_HEAP_SIZE,
|
||||
"Heap size should be greater or equal to minimum heap size"
|
||||
);
|
||||
assert!(
|
||||
heap_size % MIN_HEAP_SIZE == 0,
|
||||
"Heap size should be a multiple of minimum heap size"
|
||||
);
|
||||
Heap {
|
||||
slab_64_bytes: Slab::<64>::new(0, 0),
|
||||
slab_128_bytes: Slab::<128>::new(0, 0),
|
||||
slab_256_bytes: Slab::<256>::new(0, 0),
|
||||
slab_512_bytes: Slab::<512>::new(0, 0),
|
||||
slab_1024_bytes: Slab::<1024>::new(0, 0),
|
||||
slab_2048_bytes: Slab::<2048>::new(0, 0),
|
||||
slab_4096_bytes: Slab::<4096>::new(0, 0),
|
||||
buddy_allocator: {
|
||||
let mut buddy = buddy_system_allocator::Heap::<32>::new();
|
||||
buddy.init(heap_start_addr, heap_size);
|
||||
buddy
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds memory to the heap. The start address must be valid
|
||||
/// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for
|
||||
/// anything else.
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is unsafe because it can cause undefined behavior if the
|
||||
/// given address is invalid.
|
||||
pub unsafe fn add_memory(&mut self, heap_start_addr: usize, heap_size: usize) {
|
||||
assert!(
|
||||
heap_start_addr % 4096 == 0,
|
||||
"Start address should be page aligned"
|
||||
);
|
||||
assert!(
|
||||
heap_size % 4096 == 0,
|
||||
"Add Heap size should be a multiple of page size"
|
||||
);
|
||||
self.buddy_allocator
|
||||
.add_to_heap(heap_start_addr, heap_start_addr + heap_size);
|
||||
}
|
||||
|
||||
/// Adds memory to the heap. The start address must be valid
|
||||
/// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for
|
||||
/// anything else.
|
||||
/// In case of linked list allocator the memory can only be extended.
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is unsafe because it can cause undefined behavior if the
|
||||
/// given address is invalid.
|
||||
unsafe fn _grow(&mut self, mem_start_addr: usize, mem_size: usize, slab: HeapAllocator) {
|
||||
match slab {
|
||||
HeapAllocator::Slab64Bytes => self.slab_64_bytes.grow(mem_start_addr, mem_size),
|
||||
HeapAllocator::Slab128Bytes => self.slab_128_bytes.grow(mem_start_addr, mem_size),
|
||||
HeapAllocator::Slab256Bytes => self.slab_256_bytes.grow(mem_start_addr, mem_size),
|
||||
HeapAllocator::Slab512Bytes => self.slab_512_bytes.grow(mem_start_addr, mem_size),
|
||||
HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.grow(mem_start_addr, mem_size),
|
||||
HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.grow(mem_start_addr, mem_size),
|
||||
HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.grow(mem_start_addr, mem_size),
|
||||
HeapAllocator::BuddyAllocator => self
|
||||
.buddy_allocator
|
||||
.add_to_heap(mem_start_addr, mem_start_addr + mem_size),
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a chunk of the given size with the given alignment. Returns a pointer to the
|
||||
/// beginning of that chunk and remaining bytes in buddy system allocator if it was successful.
|
||||
/// Else it returns `Err`.
|
||||
///
|
||||
/// This function finds the slab of lowest size which can still accommodate the given chunk.
|
||||
/// The runtime is in `O(1)` for chunks of size <= 4096, and `O(n)` when chunk size is > 4096,
|
||||
pub fn allocate(&mut self, layout: Layout) -> Result<(*mut u8, usize), AllocError> {
|
||||
let addr = match Heap::layout_to_allocator(&layout) {
|
||||
HeapAllocator::Slab64Bytes => self
|
||||
.slab_64_bytes
|
||||
.allocate(layout, &mut self.buddy_allocator)?,
|
||||
HeapAllocator::Slab128Bytes => self
|
||||
.slab_128_bytes
|
||||
.allocate(layout, &mut self.buddy_allocator)?,
|
||||
HeapAllocator::Slab256Bytes => self
|
||||
.slab_256_bytes
|
||||
.allocate(layout, &mut self.buddy_allocator)?,
|
||||
HeapAllocator::Slab512Bytes => self
|
||||
.slab_512_bytes
|
||||
.allocate(layout, &mut self.buddy_allocator)?,
|
||||
HeapAllocator::Slab1024Bytes => self
|
||||
.slab_1024_bytes
|
||||
.allocate(layout, &mut self.buddy_allocator)?,
|
||||
HeapAllocator::Slab2048Bytes => self
|
||||
.slab_2048_bytes
|
||||
.allocate(layout, &mut self.buddy_allocator)?,
|
||||
HeapAllocator::Slab4096Bytes => self
|
||||
.slab_4096_bytes
|
||||
.allocate(layout, &mut self.buddy_allocator)?,
|
||||
HeapAllocator::BuddyAllocator => self
|
||||
.buddy_allocator
|
||||
.alloc(layout)
|
||||
.map(|ptr| ptr.as_ptr() as usize)
|
||||
.map_err(|_| AllocError)?,
|
||||
};
|
||||
|
||||
Ok((
|
||||
addr as *mut u8,
|
||||
self.buddy_allocator.stats_total_bytes() - self.buddy_allocator.stats_alloc_actual(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Frees the given allocation. `ptr` must be a pointer returned
|
||||
/// by a call to the `allocate` function with identical size and alignment. Undefined
|
||||
/// behavior may occur for invalid arguments, thus this function is unsafe.
|
||||
///
|
||||
/// This function finds the slab which contains address of `ptr` and adds the blocks beginning
|
||||
/// with `ptr` address to the list of free blocks.
|
||||
/// This operation is in `O(1)` for blocks <= 4096 bytes and `O(n)` for blocks > 4096 bytes.
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is unsafe because it can cause undefined behavior if the
|
||||
/// given address is invalid.
|
||||
pub unsafe fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
|
||||
let ptr = ptr as usize;
|
||||
match Heap::layout_to_allocator(&layout) {
|
||||
HeapAllocator::Slab64Bytes => self.slab_64_bytes.deallocate(ptr),
|
||||
HeapAllocator::Slab128Bytes => self.slab_128_bytes.deallocate(ptr),
|
||||
HeapAllocator::Slab256Bytes => self.slab_256_bytes.deallocate(ptr),
|
||||
HeapAllocator::Slab512Bytes => self.slab_512_bytes.deallocate(ptr),
|
||||
HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.deallocate(ptr),
|
||||
HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.deallocate(ptr),
|
||||
HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.deallocate(ptr),
|
||||
HeapAllocator::BuddyAllocator => self
|
||||
.buddy_allocator
|
||||
.dealloc(NonNull::new(ptr as *mut u8).unwrap(), layout),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns bounds on the guaranteed usable size of a successful
|
||||
/// allocation created with the specified `layout`.
|
||||
#[expect(unused)]
|
||||
pub fn usable_size(&self, layout: Layout) -> (usize, usize) {
|
||||
match Heap::layout_to_allocator(&layout) {
|
||||
HeapAllocator::Slab64Bytes => (layout.size(), 64),
|
||||
HeapAllocator::Slab128Bytes => (layout.size(), 128),
|
||||
HeapAllocator::Slab256Bytes => (layout.size(), 256),
|
||||
HeapAllocator::Slab512Bytes => (layout.size(), 512),
|
||||
HeapAllocator::Slab1024Bytes => (layout.size(), 1024),
|
||||
HeapAllocator::Slab2048Bytes => (layout.size(), 2048),
|
||||
HeapAllocator::Slab4096Bytes => (layout.size(), 4096),
|
||||
HeapAllocator::BuddyAllocator => (layout.size(), layout.size()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds allocator to use based on layout size and alignment
|
||||
fn layout_to_allocator(layout: &Layout) -> HeapAllocator {
|
||||
if layout.size() > 4096 {
|
||||
HeapAllocator::BuddyAllocator
|
||||
} else if layout.size() <= 64 && layout.align() <= 64 {
|
||||
HeapAllocator::Slab64Bytes
|
||||
} else if layout.size() <= 128 && layout.align() <= 128 {
|
||||
HeapAllocator::Slab128Bytes
|
||||
} else if layout.size() <= 256 && layout.align() <= 256 {
|
||||
HeapAllocator::Slab256Bytes
|
||||
} else if layout.size() <= 512 && layout.align() <= 512 {
|
||||
HeapAllocator::Slab512Bytes
|
||||
} else if layout.size() <= 1024 && layout.align() <= 1024 {
|
||||
HeapAllocator::Slab1024Bytes
|
||||
} else if layout.size() <= 2048 && layout.align() <= 2048 {
|
||||
HeapAllocator::Slab2048Bytes
|
||||
} else {
|
||||
HeapAllocator::Slab4096Bytes
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns total memory size in bytes of the heap.
|
||||
pub fn total_bytes(&self) -> usize {
|
||||
self.slab_64_bytes.total_blocks() * 64
|
||||
+ self.slab_128_bytes.total_blocks() * 128
|
||||
+ self.slab_256_bytes.total_blocks() * 256
|
||||
+ self.slab_512_bytes.total_blocks() * 512
|
||||
+ self.slab_1024_bytes.total_blocks() * 1024
|
||||
+ self.slab_2048_bytes.total_blocks() * 2048
|
||||
+ self.slab_4096_bytes.total_blocks() * 4096
|
||||
+ self.buddy_allocator.stats_total_bytes()
|
||||
}
|
||||
|
||||
/// Returns allocated memory size in bytes.
|
||||
pub fn used_bytes(&self) -> usize {
|
||||
self.slab_64_bytes.used_blocks() * 64
|
||||
+ self.slab_128_bytes.used_blocks() * 128
|
||||
+ self.slab_256_bytes.used_blocks() * 256
|
||||
+ self.slab_512_bytes.used_blocks() * 512
|
||||
+ self.slab_1024_bytes.used_blocks() * 1024
|
||||
+ self.slab_2048_bytes.used_blocks() * 2048
|
||||
+ self.slab_4096_bytes.used_blocks() * 4096
|
||||
+ self.buddy_allocator.stats_alloc_actual()
|
||||
}
|
||||
|
||||
/// Returns available memory size in bytes.
|
||||
#[expect(unused)]
|
||||
pub fn available_bytes(&self) -> usize {
|
||||
self.total_bytes() - self.used_bytes()
|
||||
}
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Modified from slab.rs in slab_allocator project
|
||||
//
|
||||
// MIT License
|
||||
//
|
||||
// Copyright (c) 2024 Asterinas Developers
|
||||
// Copyright (c) 2024 ArceOS Developers
|
||||
// Copyright (c) 2017 Robert Węcławski
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//
|
||||
|
||||
use alloc::alloc::{AllocError, Layout};
|
||||
|
||||
use super::SET_SIZE;
|
||||
|
||||
pub struct Slab<const BLK_SIZE: usize> {
|
||||
free_block_list: FreeBlockList<BLK_SIZE>,
|
||||
total_blocks: usize,
|
||||
}
|
||||
|
||||
impl<const BLK_SIZE: usize> Slab<BLK_SIZE> {
|
||||
pub unsafe fn new(start_addr: usize, slab_size: usize) -> Slab<BLK_SIZE> {
|
||||
let num_of_blocks = slab_size / BLK_SIZE;
|
||||
Slab {
|
||||
free_block_list: FreeBlockList::new(start_addr, BLK_SIZE, num_of_blocks),
|
||||
total_blocks: num_of_blocks,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn total_blocks(&self) -> usize {
|
||||
self.total_blocks
|
||||
}
|
||||
|
||||
pub fn used_blocks(&self) -> usize {
|
||||
self.total_blocks - self.free_block_list.len()
|
||||
}
|
||||
|
||||
pub unsafe fn grow(&mut self, start_addr: usize, slab_size: usize) {
|
||||
let num_of_blocks = slab_size / BLK_SIZE;
|
||||
self.total_blocks += num_of_blocks;
|
||||
let mut block_list = FreeBlockList::<BLK_SIZE>::new(start_addr, BLK_SIZE, num_of_blocks);
|
||||
while let Some(block) = block_list.pop() {
|
||||
self.free_block_list.push(block);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocate(
|
||||
&mut self,
|
||||
_layout: Layout,
|
||||
buddy: &mut buddy_system_allocator::Heap<32>,
|
||||
) -> Result<usize, AllocError> {
|
||||
match self.free_block_list.pop() {
|
||||
Some(block) => Ok(block.addr()),
|
||||
None => {
|
||||
let layout =
|
||||
unsafe { Layout::from_size_align_unchecked(SET_SIZE * BLK_SIZE, 4096) };
|
||||
if let Ok(ptr) = buddy.alloc(layout) {
|
||||
unsafe {
|
||||
self.grow(ptr.as_ptr() as usize, SET_SIZE * BLK_SIZE);
|
||||
}
|
||||
Ok(self.free_block_list.pop().unwrap().addr())
|
||||
} else {
|
||||
Err(AllocError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deallocate(&mut self, ptr: usize) {
|
||||
let ptr = ptr as *mut FreeBlock;
|
||||
unsafe {
|
||||
self.free_block_list.push(&mut *ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct FreeBlockList<const BLK_SIZE: usize> {
|
||||
len: usize,
|
||||
head: Option<&'static mut FreeBlock>,
|
||||
}
|
||||
|
||||
impl<const BLK_SIZE: usize> FreeBlockList<BLK_SIZE> {
|
||||
unsafe fn new(
|
||||
start_addr: usize,
|
||||
block_size: usize,
|
||||
num_of_blocks: usize,
|
||||
) -> FreeBlockList<BLK_SIZE> {
|
||||
let mut new_list = FreeBlockList::new_empty();
|
||||
for i in (0..num_of_blocks).rev() {
|
||||
let new_block = (start_addr + i * block_size) as *mut FreeBlock;
|
||||
new_list.push(&mut *new_block);
|
||||
}
|
||||
new_list
|
||||
}
|
||||
|
||||
fn new_empty() -> FreeBlockList<BLK_SIZE> {
|
||||
FreeBlockList { len: 0, head: None }
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
fn pop(&mut self) -> Option<&'static mut FreeBlock> {
|
||||
#[expect(clippy::manual_inspect)]
|
||||
self.head.take().map(|node| {
|
||||
self.head = node.next.take();
|
||||
self.len -= 1;
|
||||
node
|
||||
})
|
||||
}
|
||||
|
||||
fn push(&mut self, free_block: &'static mut FreeBlock) {
|
||||
free_block.next = self.head.take();
|
||||
self.len += 1;
|
||||
self.head = Some(free_block);
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
fn is_empty(&self) -> bool {
|
||||
self.head.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
struct FreeBlock {
|
||||
next: Option<&'static mut FreeBlock>,
|
||||
}
|
||||
|
||||
impl FreeBlock {
|
||||
fn addr(&self) -> usize {
|
||||
self as *const _ as usize
|
||||
}
|
||||
}
|
@ -10,7 +10,7 @@ pub type Paddr = usize;
|
||||
|
||||
pub(crate) mod dma;
|
||||
pub mod frame;
|
||||
pub(crate) mod heap_allocator;
|
||||
pub mod heap;
|
||||
mod io;
|
||||
pub(crate) mod kspace;
|
||||
mod offset;
|
||||
|
Reference in New Issue
Block a user