Inject a scalable buddy system allocator to OSTD

Co-authored-by: Zhe Tang <tangzh@stu.pku.edu.cn>
This commit is contained in:
Zhang Junyang
2025-03-11 16:57:10 +08:00
committed by Tate, Hongliang Tian
parent 92bc8cbbf7
commit 5f05963ee5
27 changed files with 1301 additions and 236 deletions

View File

@ -0,0 +1,293 @@
// SPDX-License-Identifier: MPL-2.0
use core::{
alloc::Layout,
cell::RefCell,
ops::DerefMut,
sync::atomic::{AtomicUsize, Ordering},
};
use ostd::{
cpu::{all_cpus, PinCurrentCpu},
cpu_local,
mm::{frame::GlobalFrameAllocator, Paddr, PAGE_SIZE},
sync::{LocalIrqDisabled, SpinLock},
trap,
};
use crate::chunk::{size_of_order, BuddyOrder};
use super::set::BuddySet;
/// The global free buddies.
static GLOBAL_POOL: SpinLock<BuddySet<MAX_BUDDY_ORDER>, LocalIrqDisabled> =
SpinLock::new(BuddySet::new_empty());
static GLOBAL_POOL_SIZE: AtomicUsize = AtomicUsize::new(0);
// CPU-local free buddies.
cpu_local! {
static LOCAL_POOL: RefCell<BuddySet<MAX_LOCAL_BUDDY_ORDER>> = RefCell::new(BuddySet::new_empty());
static LOCAL_POOL_SIZE: AtomicUsize = AtomicUsize::new(0);
}
/// Maximum supported order of the buddy system.
///
/// i.e., it is the number of classes of free blocks. It determines the
/// maximum size of each allocation.
///
/// A maximum buddy order of 32 supports up to 4KiB*2^31 = 8 TiB of chunks.
const MAX_BUDDY_ORDER: BuddyOrder = 32;
/// Maximum supported order of the buddy system for CPU-local buddy system.
///
/// Since large blocks are rarely allocated, caching such blocks will lead
/// to much fragmentation.
///
/// Lock guards are also allocated on stack. We can limit the stack usage
/// for common paths in this way.
///
/// A maximum local buddy order of 18 supports up to 4KiB*2^17 = 512 MiB of
/// chunks.
const MAX_LOCAL_BUDDY_ORDER: BuddyOrder = 18;
/// The global frame allocator provided by OSDK.
///
/// It is a singleton that provides frame allocation for the kernel. If
/// multiple instances of this struct are created, all the member functions
/// will eventually access the same allocator.
pub struct FrameAllocator;
impl GlobalFrameAllocator for FrameAllocator {
fn alloc(&self, layout: Layout) -> Option<Paddr> {
let irq_guard = trap::disable_local();
let local_pool_cell = LOCAL_POOL.get_with(&irq_guard);
let mut local_pool = local_pool_cell.borrow_mut();
let size_order = greater_order_of(layout.size());
let align_order = greater_order_of(layout.align());
let order = size_order.max(align_order);
let mut chunk_addr = None;
if order < MAX_LOCAL_BUDDY_ORDER {
chunk_addr = local_pool.alloc_chunk(order);
}
// Fall back to the global free lists if the local free lists are empty.
if chunk_addr.is_none() {
chunk_addr = alloc_from_global_pool(order);
}
// TODO: On memory pressure the global pool may be not enough. We may need
// to merge all buddy chunks from the local pools to the global pool and
// try again.
// If the alignment order is larger than the size order, we need to split
// the chunk and return the rest part back to the free lists.
if align_order > size_order {
if let Some(chunk_addr) = chunk_addr {
let addr = chunk_addr + size_of_order(size_order);
let size = size_of_order(align_order) - size_of_order(size_order);
self.add_free_memory(addr, size);
}
} else {
balancing::balance(local_pool.deref_mut());
}
LOCAL_POOL_SIZE
.get_on_cpu(irq_guard.current_cpu())
.store(local_pool.total_size(), Ordering::Relaxed);
chunk_addr
}
fn add_free_memory(&self, mut addr: Paddr, mut size: usize) {
let irq_guard = trap::disable_local();
let local_pool_cell = LOCAL_POOL.get_with(&irq_guard);
let mut local_pool = local_pool_cell.borrow_mut();
// Split the range into chunks and return them to the local free lists
// respectively.
while size > 0 {
let next_chunk_order = max_order_from(addr).min(lesser_order_of(size));
if next_chunk_order >= MAX_LOCAL_BUDDY_ORDER {
dealloc_to_global_pool(addr, next_chunk_order);
} else {
local_pool.insert_chunk(addr, next_chunk_order);
}
size -= size_of_order(next_chunk_order);
addr += size_of_order(next_chunk_order);
}
balancing::balance(local_pool.deref_mut());
LOCAL_POOL_SIZE
.get_on_cpu(irq_guard.current_cpu())
.store(local_pool.total_size(), Ordering::Relaxed);
}
}
fn alloc_from_global_pool(order: BuddyOrder) -> Option<Paddr> {
let mut lock_guard = GLOBAL_POOL.lock();
let res = lock_guard.alloc_chunk(order);
GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed);
res
}
fn dealloc_to_global_pool(addr: Paddr, order: BuddyOrder) {
let mut lock_guard = GLOBAL_POOL.lock();
lock_guard.insert_chunk(addr, order);
GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed);
}
/// Loads the total size (in bytes) of free memory in the allocator.
pub fn load_total_free_size() -> usize {
let mut total = 0;
total += GLOBAL_POOL_SIZE.load(Ordering::Relaxed);
for cpu in all_cpus() {
total += LOCAL_POOL_SIZE.get_on_cpu(cpu).load(Ordering::Relaxed);
}
total
}
/// Returns an order that covers at least the given size.
fn greater_order_of(size: usize) -> BuddyOrder {
let size = size / PAGE_SIZE;
size.next_power_of_two().trailing_zeros() as BuddyOrder
}
/// Returns a order that covers at most the given size.
fn lesser_order_of(size: usize) -> BuddyOrder {
let size = size / PAGE_SIZE;
(usize::BITS - size.leading_zeros() - 1) as BuddyOrder
}
/// Returns the maximum order starting from the address.
///
/// If the start address is not aligned to the order, the address/order pair
/// cannot form a buddy chunk.
///
/// # Panics
///
/// Panics if the address is not page-aligned in debug mode.
fn max_order_from(addr: Paddr) -> BuddyOrder {
(addr.trailing_zeros() - PAGE_SIZE.trailing_zeros()) as BuddyOrder
}
pub mod balancing {
//! Controlling the balancing between CPU-local free pools and the global free pool.
use core::sync::atomic::Ordering;
use ostd::cpu::num_cpus;
use super::{
lesser_order_of, BuddyOrder, BuddySet, GLOBAL_POOL, GLOBAL_POOL_SIZE, MAX_LOCAL_BUDDY_ORDER,
};
use crate::chunk::size_of_order;
/// Controls the expected size of cache for each CPU-local free pool.
///
/// The expected size will be the size of `GLOBAL_POOL` divided by the number
/// of the CPUs, and then divided by this constant.
const CACHE_EXPECTED_PORTION: usize = 2;
/// Returns the expected size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_expected_size(global_size: usize) -> usize {
global_size / num_cpus() / CACHE_EXPECTED_PORTION
}
/// Controls the minimal size of cache for each CPU-local free pool.
///
/// The minimal will be the expected size divided by this constant.
const CACHE_MINIMAL_PORTION: usize = 8;
/// Returns the minimal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_minimal_size(global_size: usize) -> usize {
cache_expected_size(global_size) / CACHE_MINIMAL_PORTION
}
/// Controls the maximal size of cache for each CPU-local free pool.
///
/// The maximal will be the expected size multiplied by this constant.
const CACHE_MAXIMAL_MULTIPLIER: usize = 2;
/// Returns the maximal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_maximal_size(global_size: usize) -> usize {
cache_expected_size(global_size) * CACHE_MAXIMAL_MULTIPLIER
}
/// Balances a local cache and the global free pool.
pub fn balance(local: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>) {
let global_size = GLOBAL_POOL_SIZE.load(Ordering::Relaxed);
let minimal_local_size = cache_minimal_size(global_size);
let expected_local_size = cache_expected_size(global_size);
let maximal_local_size = cache_maximal_size(global_size);
let local_size = local.total_size();
if local_size >= maximal_local_size {
// Move local frames to the global pool.
if local_size == 0 {
return;
}
let expected_removal = local_size - expected_local_size;
let lesser_order = lesser_order_of(expected_removal);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(local, &mut *global_pool_lock, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
} else if local_size < minimal_local_size {
// Move global frames to the local pool.
if global_size == 0 {
return;
}
let expected_allocation = expected_local_size - local_size;
let lesser_order = lesser_order_of(expected_allocation);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(&mut *global_pool_lock, local, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
}
}
/// Balances from `a` to `b`.
fn balance_to<const MAX_ORDER1: BuddyOrder, const MAX_ORDER2: BuddyOrder>(
a: &mut BuddySet<MAX_ORDER1>,
b: &mut BuddySet<MAX_ORDER2>,
order: BuddyOrder,
) {
let allocated_from_a = a.alloc_chunk(order);
if let Some(addr) = allocated_from_a {
if order >= MAX_ORDER2 {
let inserted_order = MAX_ORDER2 - 1;
for i in 0..(1 << (order - inserted_order)) as usize {
let split_addr = addr + size_of_order(inserted_order) * i;
b.insert_chunk(split_addr, inserted_order);
}
} else {
b.insert_chunk(addr, order);
}
} else {
// Maybe the chunk size is too large.
// Try to reduce the order and balance again.
if order > 1 {
balance_to(a, b, order - 1);
balance_to(a, b, order - 1);
}
}
}
}

View File

@ -0,0 +1,208 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::{
impl_frame_meta_for,
mm::{frame::linked_list::Link, Paddr, UniqueFrame, PAGE_SIZE},
};
/// The order of a buddy chunk.
///
/// The size of a buddy chunk is `(1 << order) * PAGE_SIZE`.
pub(crate) type BuddyOrder = usize;
/// Returns the size of a buddy chunk of the given order.
pub(crate) const fn size_of_order(order: BuddyOrder) -> usize {
(1 << order) * PAGE_SIZE
}
/// The metadata of the head frame in a free buddy chunk.
#[derive(Debug)]
pub(crate) struct FreeHeadMeta {
/// The order of the buddy chunk.
order: BuddyOrder,
}
impl_frame_meta_for!(FreeHeadMeta);
impl FreeHeadMeta {
/// Returns the order of the buddy chunk.
pub(crate) fn order(&self) -> BuddyOrder {
self.order
}
}
/// A free buddy chunk.
#[derive(Debug)]
pub(crate) struct FreeChunk {
head: UniqueFrame<Link<FreeHeadMeta>>,
}
impl FreeChunk {
/// Gets a buddy chunk from the head frame.
///
/// The caller must ensure that the head frame should be uniquely free.
/// Otherwise it waits indefinitely.
///
/// We need a unique ownership of this chunk. Other threads may be
/// deallocating it's buddy and inspecting this chunk (see
/// [`Self::buddy`]). So we may spuriously fail to acquire it. But
/// they will soon release it so we can acquire it ultimately.
pub(crate) fn from_free_head(head: UniqueFrame<Link<FreeHeadMeta>>) -> FreeChunk {
FreeChunk { head }
}
/// Gets a buddy chunk from unused frames.
///
/// # Panics
///
/// Panics if:
/// - the range is not actually unused;
/// - the address is not aligned to the order.
pub(crate) fn from_unused(addr: Paddr, order: BuddyOrder) -> FreeChunk {
assert!(addr % size_of_order(order) == 0);
let head = UniqueFrame::from_unused(addr, Link::new(FreeHeadMeta { order }))
.expect("The head frame is not unused");
#[cfg(debug_assertions)]
{
use ostd::mm::{
frame::meta::{AnyFrameMeta, GetFrameError},
Frame,
};
let end = addr + size_of_order(order);
for paddr in (addr + PAGE_SIZE..end).step_by(PAGE_SIZE) {
let Err(GetFrameError::Unused) = Frame::<dyn AnyFrameMeta>::from_in_use(paddr)
else {
panic!("The range is not actually unused");
};
}
}
FreeChunk { head }
}
/// Turns the free chunk into a pointer to the head frame.
pub(crate) fn into_unique_head(self) -> UniqueFrame<Link<FreeHeadMeta>> {
self.head
}
/// Returns the order of the buddy chunk.
pub(crate) fn order(&self) -> BuddyOrder {
self.head.meta().order()
}
/// Returns the address of the buddy chunk.
pub(crate) fn addr(&self) -> Paddr {
self.head.start_paddr()
}
/// Gets the address of the buddy of this chunk.
pub(crate) fn buddy(&self) -> Paddr {
let addr = self.addr();
let order = self.order();
addr ^ size_of_order(order)
}
/// Splits the buddy chunk into two smaller buddies.
///
/// # Panics
///
/// Panics if the buddy chunk is not uniquely free.
pub(crate) fn split_free(self) -> (FreeChunk, FreeChunk) {
let order = self.order();
let addr = self.addr();
let new_order = order - 1;
let left_child_addr = addr;
let right_child_addr = addr ^ size_of_order(new_order);
let mut unique_head = self.into_unique_head();
debug_assert_eq!(unique_head.start_paddr(), left_child_addr);
unique_head.meta_mut().order = new_order;
let left_child = FreeChunk { head: unique_head };
let right_child = FreeChunk {
head: UniqueFrame::from_unused(
right_child_addr,
Link::new(FreeHeadMeta { order: new_order }),
)
.expect("Tail frames are not unused"),
};
(left_child, right_child)
}
/// Merges the buddy chunk with the sibling buddy.
///
/// # Panics
///
/// Panics if either the buddy chunks are not free or not buddies.
pub(crate) fn merge_free(mut self, mut buddy: FreeChunk) -> FreeChunk {
if self.addr() > buddy.addr() {
core::mem::swap(&mut self, &mut buddy);
}
let order = self.order();
let addr = self.addr();
let buddy_order = buddy.order();
let buddy_addr = buddy.addr();
buddy.into_unique_head().reset_as_unused(); // This will "drop" the frame without up-calling us.
assert_eq!(order, buddy_order);
assert_eq!(addr ^ size_of_order(order), buddy_addr);
let new_order = order + 1;
let mut unique_head = self.into_unique_head();
unique_head.meta_mut().order = new_order;
FreeChunk { head: unique_head }
}
}
#[cfg(ktest)]
mod test {
use super::*;
use crate::test::MockMemoryRegion;
use ostd::prelude::ktest;
#[ktest]
fn test_free_chunk_ops() {
let order = 3;
let size = size_of_order(order);
let region = MockMemoryRegion::alloc(size);
let addr1 = region.start_paddr();
let addr2 = addr1 + size_of_order(order - 2);
let addr3 = addr1 + size_of_order(order - 2) * 2;
let chunk = FreeChunk::from_unused(addr1, order);
assert_eq!(chunk.order(), order);
assert_eq!(chunk.addr(), addr1);
assert_eq!(chunk.buddy(), addr1 ^ size);
let (left, right) = chunk.split_free();
assert_eq!(left.order(), order - 1);
assert_eq!(left.addr(), addr1);
assert_eq!(left.buddy(), addr3);
assert_eq!(right.order(), order - 1);
assert_eq!(right.addr(), addr3);
assert_eq!(right.buddy(), addr1);
let (r1, r2) = left.split_free();
assert_eq!(r1.order(), order - 2);
assert_eq!(r1.addr(), addr1);
assert_eq!(r1.buddy(), addr2);
assert_eq!(r2.order(), order - 2);
assert_eq!(r2.addr(), addr2);
assert_eq!(r2.buddy(), addr1);
let left = r1.merge_free(r2);
let chunk = left.merge_free(right);
assert_eq!(chunk.order(), order);
assert_eq!(chunk.addr(), addr1);
chunk.into_unique_head().reset_as_unused();
}
}

View File

@ -0,0 +1,32 @@
// SPDX-License-Identifier: MPL-2.0
#![no_std]
#![deny(unsafe_code)]
//! An implementation of the global physical memory frame allocator for
//! [OSTD](https://crates.io/crates/ostd) based kernels.
//!
//! # Background
//!
//! `OSTD` has provided a page allocator interface, namely [`GlobalFrameAllocator`]
//! and [`global_frame_allocator`] procedure macro, allowing users to plug in
//! their own frame allocator into the kernel safely. You can refer to the
//! [`ostd::mm::frame::allocator`] module for detailed introduction.
//!
//! # Introduction
//!
//! This crate is an implementation of a scalable and efficient global frame
//! allocator based on the buddy system. It is by default shipped with OSDK
//! for users that don't have special requirements on the frame allocator.
//!
//! [`GlobalFrameAllocator`]: ostd::mm::GlobalFrameAllocator
//! [`global_frame_allocator`]: ostd::global_frame_allocator
mod allocator;
mod chunk;
mod set;
#[cfg(ktest)]
mod test;
pub use allocator::{load_total_free_size, FrameAllocator};

View File

@ -0,0 +1,148 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::mm::{frame::linked_list::LinkedList, Paddr};
use crate::chunk::{size_of_order, BuddyOrder, FreeChunk, FreeHeadMeta};
/// A set of free buddy chunks.
pub(crate) struct BuddySet<const MAX_ORDER: BuddyOrder> {
/// The sum of the sizes of all free chunks.
total_size: usize,
/// The lists of free buddy chunks for each orders.
lists: [LinkedList<FreeHeadMeta>; MAX_ORDER],
}
impl<const MAX_ORDER: BuddyOrder> BuddySet<MAX_ORDER> {
/// Creates a new empty set of free lists.
pub(crate) const fn new_empty() -> Self {
Self {
total_size: 0,
lists: [const { LinkedList::new() }; MAX_ORDER],
}
}
/// Gets the total size of free chunks.
pub(crate) fn total_size(&self) -> usize {
self.total_size
}
/// Inserts a free chunk into the set.
pub(crate) fn insert_chunk(&mut self, addr: Paddr, order: BuddyOrder) {
debug_assert!(order < MAX_ORDER);
let inserted_size = size_of_order(order);
let mut chunk = FreeChunk::from_unused(addr, order);
let order = chunk.order();
// Coalesce the chunk with its buddy whenever possible.
for (i, list) in self.lists.iter_mut().enumerate().skip(order) {
if i + 1 >= MAX_ORDER {
// The chunk is already the largest one.
break;
}
let buddy_addr = chunk.buddy();
let Some(mut cursor) = list.cursor_mut_at(buddy_addr) else {
// The buddy is not in this free list, so we can't coalesce.
break;
};
let taken = cursor.take_current().unwrap();
debug_assert_eq!(buddy_addr, taken.start_paddr());
chunk = chunk.merge_free(FreeChunk::from_free_head(taken));
}
// Insert the coalesced chunk into the free lists.
let order = chunk.order();
self.lists[order].push_front(chunk.into_unique_head());
self.total_size += inserted_size;
}
/// Allocates a chunk from the set.
///
/// The function will choose and remove a buddy chunk of the given order
/// from the set. The address of the chunk will be returned.
pub(crate) fn alloc_chunk(&mut self, order: BuddyOrder) -> Option<Paddr> {
// Find the first non-empty size class larger than the requested order.
let mut non_empty = None;
for (i, list) in self.lists.iter_mut().enumerate().skip(order) {
if !list.is_empty() {
non_empty = Some(i);
break;
}
}
let non_empty = non_empty?;
let mut chunk = {
let head = self.lists[non_empty].pop_front().unwrap();
debug_assert_eq!(head.meta().order(), non_empty as BuddyOrder);
Some(FreeChunk::from_free_head(head))
};
// Split the chunk.
for i in (order + 1..=non_empty).rev() {
let (left_sub, right_sub) = chunk.take().unwrap().split_free();
// Push the right sub-chunk back to the free lists.
let right_sub = right_sub.into_unique_head();
debug_assert_eq!(right_sub.meta().order(), (i - 1) as BuddyOrder);
self.lists[i - 1].push_front(right_sub);
// Pass the left sub-chunk to the next iteration.
chunk = Some(left_sub);
}
let allocated_size = size_of_order(order);
self.total_size -= allocated_size;
// The remaining chunk is the one we want.
let head_frame = chunk.take().unwrap().into_unique_head();
let paddr = head_frame.start_paddr();
head_frame.reset_as_unused(); // It will "drop" the frame without up-calling us.
Some(paddr)
}
}
#[cfg(ktest)]
mod test {
use super::*;
use crate::test::MockMemoryRegion;
use ostd::prelude::ktest;
#[ktest]
fn test_buddy_set_insert_alloc() {
let region_order = 4;
let region_size = size_of_order(region_order);
let region = MockMemoryRegion::alloc(region_size);
let region_start = region.start_paddr();
let mut set = BuddySet::<5>::new_empty();
set.insert_chunk(region_start, region_order);
assert!(set.total_size() == region_size);
// Allocating chunks of orders of 0, 0, 1, 2, 3 should be okay.
let chunk1 = set.alloc_chunk(0).unwrap();
assert!(set.total_size() == region_size - size_of_order(0));
let chunk2 = set.alloc_chunk(0).unwrap();
assert!(set.total_size() == region_size - size_of_order(1));
let chunk3 = set.alloc_chunk(1).unwrap();
assert!(set.total_size() == region_size - size_of_order(2));
let chunk4 = set.alloc_chunk(2).unwrap();
assert!(set.total_size() == region_size - size_of_order(3));
let chunk5 = set.alloc_chunk(3).unwrap();
assert!(set.total_size() == 0);
// Putting them back should enable us to allocate the original region.
set.insert_chunk(chunk3, 1);
assert!(set.total_size() == size_of_order(1));
set.insert_chunk(chunk1, 0);
assert!(set.total_size() == size_of_order(0) + size_of_order(1));
set.insert_chunk(chunk5, 3);
assert!(set.total_size() == size_of_order(0) + size_of_order(1) + size_of_order(3));
set.insert_chunk(chunk2, 0);
assert!(set.total_size() == size_of_order(2) + size_of_order(3));
set.insert_chunk(chunk4, 2);
assert!(set.total_size() == size_of_order(4));
let chunk = set.alloc_chunk(region_order).unwrap();
assert!(chunk == region_start);
assert!(set.total_size() == 0);
}
}

View File

@ -0,0 +1,86 @@
// SPDX-License-Identifier: MPL-2.0
//! Providing test utilities and high-level test cases for the frame allocator.
use core::alloc::Layout;
use ostd::{
mm::{frame::GlobalFrameAllocator, FrameAllocOptions, Paddr, Segment, UniqueFrame, PAGE_SIZE},
prelude::ktest,
};
use super::FrameAllocator;
#[ktest]
fn frame_allocator_alloc_layout_match() {
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 2, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 3, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 4, PAGE_SIZE).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 2, PAGE_SIZE * 2).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 4, PAGE_SIZE * 4).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 8, PAGE_SIZE * 8).unwrap());
assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 16, PAGE_SIZE * 16).unwrap());
}
#[track_caller]
fn assert_allocation_well_formed(layout: Layout) {
let instance = FrameAllocator;
let allocated = instance.alloc(layout).unwrap();
assert_eq!(
allocated % layout.align(),
0,
"Allocation alignment mismatch"
);
for offset in (0..layout.size()).step_by(PAGE_SIZE) {
let frame = allocated + offset;
let frame = UniqueFrame::from_unused(frame, ()).unwrap_or_else(|e| {
panic!(
"Metadata not well-formed after allocation at offset {:#x}: {:#?}",
offset, e
)
});
frame.reset_as_unused();
}
instance.add_free_memory(allocated, layout.size());
}
/// A mocked memory region for testing.
///
/// All the frames in the returned memory region will be marked as used.
/// When the region is dropped, all the frames will be returned to the global
/// frame allocator. If any frame is not unused by that time, the drop will panic.
pub(crate) struct MockMemoryRegion {
addr: Paddr,
size: usize,
}
impl MockMemoryRegion {
/// Gets a memory region for testing.
pub(crate) fn alloc(size: usize) -> Self {
let seg = FrameAllocOptions::new()
.alloc_segment(size / PAGE_SIZE)
.unwrap();
let addr = seg.start_paddr();
for frame in seg {
UniqueFrame::try_from(frame).unwrap().reset_as_unused();
}
Self { addr, size }
}
/// Gets the start address of the memory region.
pub(crate) fn start_paddr(&self) -> Paddr {
self.addr
}
}
impl Drop for MockMemoryRegion {
fn drop(&mut self) {
let seg = Segment::from_unused(self.addr..self.addr + self.size, |_| ()).unwrap();
drop(seg);
}
}