Inject a scalable slab allocator

This commit is contained in:
Zhang Junyang
2025-01-13 14:24:32 +08:00
committed by Tate, Hongliang Tian
parent fdbe52c2ee
commit a708a0c046
23 changed files with 1166 additions and 626 deletions

View File

@ -0,0 +1,318 @@
// SPDX-License-Identifier: MPL-2.0
//! A global allocator implementation of many slab caches.
use core::{
alloc::{AllocError, Layout},
cell::RefCell,
};
use ostd::{
cpu_local,
mm::{
heap::{GlobalHeapAllocator, HeapSlot, SlabSlotList, SlotInfo},
PAGE_SIZE,
},
sync::{LocalIrqDisabled, SpinLock},
trap,
};
use crate::slab_cache::SlabCache;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(usize)]
enum CommonSizeClass {
Bytes8 = 8,
Bytes16 = 16,
Bytes32 = 32,
Bytes64 = 64,
Bytes128 = 128,
Bytes256 = 256,
Bytes512 = 512,
Bytes1024 = 1024,
Bytes2048 = 2048,
}
impl CommonSizeClass {
const fn from_layout(layout: Layout) -> Option<Self> {
let size_class = match layout.size() {
0..=8 => CommonSizeClass::Bytes8,
9..=16 => CommonSizeClass::Bytes16,
17..=32 => CommonSizeClass::Bytes32,
33..=64 => CommonSizeClass::Bytes64,
65..=128 => CommonSizeClass::Bytes128,
129..=256 => CommonSizeClass::Bytes256,
257..=512 => CommonSizeClass::Bytes512,
513..=1024 => CommonSizeClass::Bytes1024,
1025..=2048 => CommonSizeClass::Bytes2048,
_ => return None,
};
// Alignment must be non-zero and power-of-two.
let align_class = match layout.align() {
1 | 2 | 4 | 8 => CommonSizeClass::Bytes8,
16 => CommonSizeClass::Bytes16,
32 => CommonSizeClass::Bytes32,
64 => CommonSizeClass::Bytes64,
128 => CommonSizeClass::Bytes128,
256 => CommonSizeClass::Bytes256,
512 => CommonSizeClass::Bytes512,
1024 => CommonSizeClass::Bytes1024,
2048 => CommonSizeClass::Bytes2048,
_ => return None,
};
Some(if (size_class as usize) < (align_class as usize) {
align_class
} else {
size_class
})
}
fn from_size(size: usize) -> Option<Self> {
match size {
8 => Some(CommonSizeClass::Bytes8),
16 => Some(CommonSizeClass::Bytes16),
32 => Some(CommonSizeClass::Bytes32),
64 => Some(CommonSizeClass::Bytes64),
128 => Some(CommonSizeClass::Bytes128),
256 => Some(CommonSizeClass::Bytes256),
512 => Some(CommonSizeClass::Bytes512),
1024 => Some(CommonSizeClass::Bytes1024),
2048 => Some(CommonSizeClass::Bytes2048),
_ => None,
}
}
}
/// Get the type of the slot from the layout.
///
/// It should be used to define [`ostd::global_heap_allocator_slot_type_map`].
pub const fn type_from_layout(layout: Layout) -> Option<SlotInfo> {
if let Some(class) = CommonSizeClass::from_layout(layout) {
return Some(SlotInfo::SlabSlot(class as usize));
}
if layout.size() > PAGE_SIZE / 2 && layout.align() <= PAGE_SIZE {
return Some(SlotInfo::LargeSlot(
layout.size().div_ceil(PAGE_SIZE) * PAGE_SIZE,
));
}
None
}
struct Heap {
slab8: SlabCache<8>,
slab16: SlabCache<16>,
slab32: SlabCache<32>,
slab64: SlabCache<64>,
slab128: SlabCache<128>,
slab256: SlabCache<256>,
slab512: SlabCache<512>,
slab1024: SlabCache<1024>,
slab2048: SlabCache<2048>,
}
impl Heap {
const fn new() -> Self {
Self {
slab8: SlabCache::new(),
slab16: SlabCache::new(),
slab32: SlabCache::new(),
slab64: SlabCache::new(),
slab128: SlabCache::new(),
slab256: SlabCache::new(),
slab512: SlabCache::new(),
slab1024: SlabCache::new(),
slab2048: SlabCache::new(),
}
}
fn alloc(&mut self, class: CommonSizeClass) -> Result<HeapSlot, AllocError> {
match class {
CommonSizeClass::Bytes8 => self.slab8.alloc(),
CommonSizeClass::Bytes16 => self.slab16.alloc(),
CommonSizeClass::Bytes32 => self.slab32.alloc(),
CommonSizeClass::Bytes64 => self.slab64.alloc(),
CommonSizeClass::Bytes128 => self.slab128.alloc(),
CommonSizeClass::Bytes256 => self.slab256.alloc(),
CommonSizeClass::Bytes512 => self.slab512.alloc(),
CommonSizeClass::Bytes1024 => self.slab1024.alloc(),
CommonSizeClass::Bytes2048 => self.slab2048.alloc(),
}
}
fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> {
match class {
CommonSizeClass::Bytes8 => self.slab8.dealloc(slot),
CommonSizeClass::Bytes16 => self.slab16.dealloc(slot),
CommonSizeClass::Bytes32 => self.slab32.dealloc(slot),
CommonSizeClass::Bytes64 => self.slab64.dealloc(slot),
CommonSizeClass::Bytes128 => self.slab128.dealloc(slot),
CommonSizeClass::Bytes256 => self.slab256.dealloc(slot),
CommonSizeClass::Bytes512 => self.slab512.dealloc(slot),
CommonSizeClass::Bytes1024 => self.slab1024.dealloc(slot),
CommonSizeClass::Bytes2048 => self.slab2048.dealloc(slot),
}
}
}
static GLOBAL_POOL: SpinLock<Heap, LocalIrqDisabled> = SpinLock::new(Heap::new());
/// The maximum size in bytes of the object cache of each slot size class.
const OBJ_CACHE_MAX_SIZE: usize = 8 * PAGE_SIZE;
/// The expected size in bytes of the object cache of each slot size class.
///
/// If the cache exceeds the maximum size or is empty, it will be adjusted to
/// this size.
const OBJ_CACHE_EXPECTED_SIZE: usize = 2 * PAGE_SIZE;
struct ObjectCache<const SLOT_SIZE: usize> {
list: SlabSlotList<SLOT_SIZE>,
list_size: usize,
}
impl<const SLOT_SIZE: usize> ObjectCache<SLOT_SIZE> {
const fn new() -> Self {
Self {
list: SlabSlotList::new(),
list_size: 0,
}
}
fn alloc(&mut self) -> Result<HeapSlot, AllocError> {
if let Some(slot) = self.list.pop() {
self.list_size -= SLOT_SIZE;
return Ok(slot);
}
let size_class = CommonSizeClass::from_size(SLOT_SIZE).unwrap();
let mut global_pool = GLOBAL_POOL.lock();
for _ in 0..OBJ_CACHE_EXPECTED_SIZE / SLOT_SIZE {
if let Ok(slot) = global_pool.alloc(size_class) {
self.list.push(slot);
self.list_size += SLOT_SIZE;
} else {
break;
}
}
if let Ok(new_slot) = global_pool.alloc(size_class) {
Ok(new_slot)
} else if let Some(popped) = self.list.pop() {
self.list_size -= SLOT_SIZE;
Ok(popped)
} else {
Err(AllocError)
}
}
fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> {
if self.list_size + SLOT_SIZE < OBJ_CACHE_MAX_SIZE {
self.list.push(slot);
self.list_size += SLOT_SIZE;
return Ok(());
}
let mut global_pool = GLOBAL_POOL.lock();
global_pool.dealloc(slot, class)?;
for _ in 0..(self.list_size - OBJ_CACHE_EXPECTED_SIZE) / SLOT_SIZE {
let slot = self.list.pop().expect("The cache size should be ample");
global_pool.dealloc(slot, class)?;
self.list_size -= SLOT_SIZE;
}
Ok(())
}
}
struct LocalCache {
cache8: ObjectCache<8>,
cache16: ObjectCache<16>,
cache32: ObjectCache<32>,
cache64: ObjectCache<64>,
cache128: ObjectCache<128>,
cache256: ObjectCache<256>,
cache512: ObjectCache<512>,
cache1024: ObjectCache<1024>,
cache2048: ObjectCache<2048>,
}
impl LocalCache {
const fn new() -> Self {
Self {
cache8: ObjectCache::new(),
cache16: ObjectCache::new(),
cache32: ObjectCache::new(),
cache64: ObjectCache::new(),
cache128: ObjectCache::new(),
cache256: ObjectCache::new(),
cache512: ObjectCache::new(),
cache1024: ObjectCache::new(),
cache2048: ObjectCache::new(),
}
}
fn alloc(&mut self, class: CommonSizeClass) -> Result<HeapSlot, AllocError> {
match class {
CommonSizeClass::Bytes8 => self.cache8.alloc(),
CommonSizeClass::Bytes16 => self.cache16.alloc(),
CommonSizeClass::Bytes32 => self.cache32.alloc(),
CommonSizeClass::Bytes64 => self.cache64.alloc(),
CommonSizeClass::Bytes128 => self.cache128.alloc(),
CommonSizeClass::Bytes256 => self.cache256.alloc(),
CommonSizeClass::Bytes512 => self.cache512.alloc(),
CommonSizeClass::Bytes1024 => self.cache1024.alloc(),
CommonSizeClass::Bytes2048 => self.cache2048.alloc(),
}
}
fn dealloc(&mut self, slot: HeapSlot, class: CommonSizeClass) -> Result<(), AllocError> {
match class {
CommonSizeClass::Bytes8 => self.cache8.dealloc(slot, class),
CommonSizeClass::Bytes16 => self.cache16.dealloc(slot, class),
CommonSizeClass::Bytes32 => self.cache32.dealloc(slot, class),
CommonSizeClass::Bytes64 => self.cache64.dealloc(slot, class),
CommonSizeClass::Bytes128 => self.cache128.dealloc(slot, class),
CommonSizeClass::Bytes256 => self.cache256.dealloc(slot, class),
CommonSizeClass::Bytes512 => self.cache512.dealloc(slot, class),
CommonSizeClass::Bytes1024 => self.cache1024.dealloc(slot, class),
CommonSizeClass::Bytes2048 => self.cache2048.dealloc(slot, class),
}
}
}
cpu_local! {
static LOCAL_POOL: RefCell<LocalCache> = RefCell::new(LocalCache::new());
}
/// The global heap allocator provided by OSDK.
///
/// It is a singleton that provides heap allocation for the kernel. If
/// multiple instances of this struct are created, all the member functions
/// will eventually access the same allocator.
pub struct HeapAllocator;
impl GlobalHeapAllocator for HeapAllocator {
fn alloc(&self, layout: Layout) -> Result<HeapSlot, AllocError> {
let Some(class) = CommonSizeClass::from_layout(layout) else {
return HeapSlot::alloc_large(layout.size().div_ceil(PAGE_SIZE) * PAGE_SIZE);
};
let irq_guard = trap::disable_local();
let this_cache = LOCAL_POOL.get_with(&irq_guard);
let mut local_cache = this_cache.borrow_mut();
local_cache.alloc(class)
}
fn dealloc(&self, slot: HeapSlot) -> Result<(), AllocError> {
let Some(class) = CommonSizeClass::from_size(slot.size()) else {
slot.dealloc_large();
return Ok(());
};
let irq_guard = trap::disable_local();
let this_cache = LOCAL_POOL.get_with(&irq_guard);
let mut local_cache = this_cache.borrow_mut();
local_cache.dealloc(slot, class)
}
}

View File

@ -0,0 +1,10 @@
// SPDX-License-Identifier: MPL-2.0
#![feature(allocator_api)]
#![no_std]
#![deny(unsafe_code)]
mod allocator;
mod slab_cache;
pub use allocator::{type_from_layout, HeapAllocator};

View File

@ -0,0 +1,144 @@
// SPDX-License-Identifier: MPL-2.0
//! The slab cache that is composed of slabs.
use core::alloc::AllocError;
use ostd::mm::{
frame::linked_list::LinkedList,
heap::{HeapSlot, Slab, SlabMeta},
Paddr, PAGE_SIZE,
};
const EXPECTED_EMPTY_SLABS: usize = 4;
const MAX_EMPTY_SLABS: usize = 16;
/// A slab cache.
///
/// A slab cache contains 3 parts:
/// - a list of empty slabs;
/// - a list of partially allocated slabs;
/// - and a list of full slabs.
///
/// So the cache is partially sorted, to allow caching and reusing memory.
pub struct SlabCache<const SLOT_SIZE: usize> {
empty: LinkedList<SlabMeta<SLOT_SIZE>>,
partial: LinkedList<SlabMeta<SLOT_SIZE>>,
full: LinkedList<SlabMeta<SLOT_SIZE>>,
}
impl<const SLOT_SIZE: usize> SlabCache<SLOT_SIZE> {
/// Creates a new slab cache.
pub const fn new() -> Self {
Self {
empty: LinkedList::new(),
partial: LinkedList::new(),
full: LinkedList::new(),
}
}
/// Allocates a slot from the cache.
///
/// The caller must provide which cache is it because we don't know from
/// `&mut self`. The information is used for deallocation.
pub fn alloc(&mut self) -> Result<HeapSlot, AllocError> {
// Try to allocate from the partial slabs first.
if !self.partial.is_empty() {
let mut cursor = self.partial.cursor_back_mut();
let current = cursor.current_meta().unwrap();
let allocated = current.alloc().unwrap();
if current.nr_allocated() == current.capacity() {
self.full.push_front(cursor.take_current().unwrap());
}
return Ok(allocated);
}
// If no partial slab is available, try to get an empty slab.
if !self.empty.is_empty() {
let mut slab = self.empty.pop_front().unwrap();
let allocated = slab.meta_mut().alloc().unwrap();
self.add_slab(slab);
return Ok(allocated);
}
// If no empty slab is available, allocate new slabs.
let Ok(mut allocated_empty) = Slab::new() else {
log::error!("Failed to allocate a new slab");
return Err(AllocError);
};
let allocated = allocated_empty.meta_mut().alloc().unwrap();
self.add_slab(allocated_empty);
// Allocate more empty slabs and push them into the cache.
for _ in 0..EXPECTED_EMPTY_SLABS {
if let Ok(allocated_empty) = Slab::new() {
self.empty.push_front(allocated_empty);
} else {
break;
}
}
Ok(allocated)
}
/// Deallocates a slot into the cache.
///
/// The slot must be allocated from the cache.
pub fn dealloc(&mut self, slot: HeapSlot) -> Result<(), AllocError> {
let which = which_slab(&slot).ok_or_else(|| {
log::error!("Can't find the slab for the slot");
AllocError
})?;
let mut extracted_slab = None;
if self.partial.contains(which) {
extracted_slab = self.partial.cursor_mut_at(which).unwrap().take_current();
} else if self.full.contains(which) {
extracted_slab = self.full.cursor_mut_at(which).unwrap().take_current();
}
let mut slab = extracted_slab.ok_or_else(|| {
log::error!("Deallocating a slot that is not allocated from the cache");
AllocError
})?;
slab.dealloc(slot)?;
self.add_slab(slab);
// If the slab cache has too many empty slabs, free some of them.
if self.empty.size() > MAX_EMPTY_SLABS {
while self.empty.size() > EXPECTED_EMPTY_SLABS {
self.empty.pop_front();
}
}
Ok(())
}
fn add_slab(&mut self, slab: Slab<SLOT_SIZE>) {
if slab.meta().nr_allocated() == slab.meta().capacity() {
self.full.push_front(slab);
} else if slab.meta().nr_allocated() > 0 {
self.partial.push_back(slab);
} else {
self.empty.push_front(slab);
}
}
}
/// Gets which slab the slot belongs to.
///
/// If the slot size is larger than [`PAGE_SIZE`], it is not from a slab
/// and this function will return `None`.
///
/// `SLOT_SIZE` can be larger than `slot.size()` but not smaller.
fn which_slab(slot: &HeapSlot) -> Option<Paddr> {
if slot.size() > PAGE_SIZE {
return None;
}
let frame_paddr = slot.paddr() / PAGE_SIZE * PAGE_SIZE;
Some(frame_paddr)
}