Re-write chunk-splitting and add unit tests

This commit is contained in:
Zhang Junyang
2025-03-20 16:33:44 +08:00
committed by Tate, Hongliang Tian
parent ba0dc8c122
commit d8e076f58a
4 changed files with 204 additions and 21 deletions

View File

@ -16,17 +16,66 @@ pub(crate) const fn size_of_order(order: BuddyOrder) -> usize {
}
/// Returns an order that covers at least the given size.
///
/// The size must be larger than 0.
pub(crate) fn greater_order_of(size: usize) -> BuddyOrder {
let size = size / PAGE_SIZE;
size.next_power_of_two().trailing_zeros() as BuddyOrder
}
/// Returns a order that covers at most the given size.
///
/// The size must be larger than 0.
pub(crate) fn lesser_order_of(size: usize) -> BuddyOrder {
let size = size / PAGE_SIZE;
(usize::BITS - size.leading_zeros() - 1) as BuddyOrder
}
/// Splits a range into chunks.
///
/// A chunk must have a `1 << order` size and alignment, so a random page-
/// aligned range might not be a chunk.
///
/// This function returns an iterator that yields the set of chunks whose union
/// is the range, and the number of the chunks is the smallest.
///
/// # Panics
///
/// It panics if the address is not page-aligned.
pub(crate) fn split_to_chunks(
addr: Paddr,
size: usize,
) -> impl Iterator<Item = (Paddr, BuddyOrder)> {
assert!(addr % PAGE_SIZE == 0);
assert!(size % PAGE_SIZE == 0);
struct SplitChunks {
addr: Paddr,
size: usize,
}
impl Iterator for SplitChunks {
type Item = (Paddr, BuddyOrder);
fn next(&mut self) -> Option<Self::Item> {
if self.size == 0 {
return None;
}
let order = max_order_from(self.addr).min(lesser_order_of(self.size));
let chunk_size = size_of_order(order);
let chunk_addr = self.addr;
self.addr += chunk_size;
self.size -= chunk_size;
Some((chunk_addr, order))
}
}
SplitChunks { addr, size }
}
/// Returns the maximum order starting from the address.
///
/// If the start address is not aligned to the order, the address/order pair
@ -39,6 +88,25 @@ pub(crate) fn max_order_from(addr: Paddr) -> BuddyOrder {
(addr.trailing_zeros() - PAGE_SIZE.trailing_zeros()) as BuddyOrder
}
/// Splits a large buddy chunk into two smaller buddies of order `split_order`.
///
/// Returns the addresses of each buddy.
///
/// # Panics
///
/// Panics if the address is not aligned to the `order`.
pub(crate) fn split_to_order(
addr: Paddr,
order: BuddyOrder,
split_order: BuddyOrder,
) -> impl Iterator<Item = Paddr> {
assert_eq!(addr % size_of_order(order), 0);
let split_count = 1 << (order - split_order);
let split_size = size_of_order(split_order);
(0..split_count).map(move |i| addr + split_size * i)
}
/// The metadata of the head frame in a free buddy chunk.
#[derive(Debug)]
pub(crate) struct FreeHeadMeta {
@ -83,7 +151,7 @@ impl FreeChunk {
/// - the range is not actually unused;
/// - the address is not aligned to the order.
pub(crate) fn from_unused(addr: Paddr, order: BuddyOrder) -> FreeChunk {
assert!(addr % size_of_order(order) == 0);
assert_eq!(addr % size_of_order(order), 0);
let head = UniqueFrame::from_unused(addr, Link::new(FreeHeadMeta { order }))
.expect("The head frame is not unused");
@ -188,6 +256,123 @@ mod test {
use crate::test::MockMemoryRegion;
use ostd::prelude::ktest;
#[ktest]
fn test_greater_order_of() {
#[track_caller]
fn assert_greater_order_of(nframes: usize, expected: BuddyOrder) {
assert_eq!(greater_order_of(nframes * PAGE_SIZE), expected);
}
assert_greater_order_of(1, 0);
assert_greater_order_of(2, 1);
assert_greater_order_of(3, 2);
assert_greater_order_of(4, 2);
assert_greater_order_of(5, 3);
assert_greater_order_of(6, 3);
assert_greater_order_of(7, 3);
assert_greater_order_of(8, 3);
assert_greater_order_of(9, 4);
}
#[ktest]
fn test_lesser_order_of() {
#[track_caller]
fn assert_lesser_order_of(nframes: usize, expected: BuddyOrder) {
assert_eq!(lesser_order_of(nframes * PAGE_SIZE), expected);
}
assert_lesser_order_of(1, 0);
assert_lesser_order_of(2, 1);
assert_lesser_order_of(3, 1);
assert_lesser_order_of(4, 2);
assert_lesser_order_of(5, 2);
assert_lesser_order_of(6, 2);
assert_lesser_order_of(7, 2);
assert_lesser_order_of(8, 3);
assert_lesser_order_of(9, 3);
}
#[ktest]
fn test_max_order_from() {
#[track_caller]
fn assert_max_order_from(frame_num: usize, expected: BuddyOrder) {
assert_eq!(max_order_from(frame_num * PAGE_SIZE), expected);
}
assert_max_order_from(0, (usize::BITS - PAGE_SIZE.trailing_zeros()) as BuddyOrder);
assert_max_order_from(1, 0);
assert_max_order_from(2, 1);
assert_max_order_from(3, 0);
assert_max_order_from(4, 2);
assert_max_order_from(5, 0);
assert_max_order_from(6, 1);
assert_max_order_from(7, 0);
assert_max_order_from(8, 3);
assert_max_order_from(9, 0);
assert_max_order_from(10, 1);
assert_max_order_from(11, 0);
assert_max_order_from(12, 2);
}
#[ktest]
fn test_split_to_chunks() {
use alloc::{vec, vec::Vec};
#[track_caller]
fn assert_split_to_chunk(
addr_frame_num: usize,
size_num_frames: usize,
expected: Vec<(Paddr, BuddyOrder)>,
) {
let addr = addr_frame_num * PAGE_SIZE;
let size = size_num_frames * PAGE_SIZE;
let chunks: Vec<_> = split_to_chunks(addr, size).collect();
let expected = expected
.iter()
.map(|(addr, order)| (addr * PAGE_SIZE, *order))
.collect::<Vec<_>>();
assert_eq!(chunks, expected);
}
assert_split_to_chunk(0, 0, vec![]);
assert_split_to_chunk(0, 1, vec![(0, 0)]);
assert_split_to_chunk(0, 2, vec![(0, 1)]);
assert_split_to_chunk(6, 32, vec![(6, 1), (8, 3), (16, 4), (32, 2), (36, 1)]);
assert_split_to_chunk(7, 5, vec![(7, 0), (8, 2)]);
assert_split_to_chunk(12, 16, vec![(12, 2), (16, 3), (24, 2)]);
assert_split_to_chunk(1024, 1024, vec![(1024, 10)]);
}
#[ktest]
fn test_split_to_order() {
use alloc::{vec, vec::Vec};
#[track_caller]
fn assert_split_to_order(
addr_frame_num: usize,
order: BuddyOrder,
split_order: BuddyOrder,
expected: Vec<Paddr>,
) {
let addr = addr_frame_num * PAGE_SIZE;
let chunks: Vec<_> = split_to_order(addr, order, split_order).collect();
let expected = expected
.iter()
.map(|addr| addr * PAGE_SIZE)
.collect::<Vec<_>>();
assert_eq!(chunks, expected);
}
assert_split_to_order(0, 3, 3, vec![0]);
assert_split_to_order(0, 3, 2, vec![0, 4]);
assert_split_to_order(0, 3, 1, vec![0, 2, 4, 6]);
assert_split_to_order(0, 3, 0, vec![0, 1, 2, 3, 4, 5, 6, 7]);
}
#[ktest]
fn test_free_chunk_ops() {
let order = 3;

View File

@ -22,6 +22,12 @@
//! [`GlobalFrameAllocator`]: ostd::mm::GlobalFrameAllocator
//! [`global_frame_allocator`]: ostd::global_frame_allocator
// The heap allocator usually depends on frame allocation. If we depend on heap
// allocation then there will be a cyclic dependency. We only use the heap in
// unit tests.
#[cfg(ktest)]
extern crate alloc;
use core::alloc::Layout;
use ostd::{

View File

@ -10,7 +10,7 @@ use super::{
lesser_order_of, BuddyOrder, BuddySet, GLOBAL_POOL, GLOBAL_POOL_SIZE, MAX_LOCAL_BUDDY_ORDER,
};
use crate::chunk::size_of_order;
use crate::chunk::split_to_order;
/// Controls the expected size of cache for each CPU-local free pool.
///
@ -99,10 +99,9 @@ fn balance_to<const MAX_ORDER1: BuddyOrder, const MAX_ORDER2: BuddyOrder>(
if let Some(addr) = allocated_from_a {
if order >= MAX_ORDER2 {
let inserted_order = MAX_ORDER2 - 1;
for i in 0..(1 << (order - inserted_order)) as usize {
let split_addr = addr + size_of_order(inserted_order) * i;
b.insert_chunk(split_addr, inserted_order);
}
split_to_order(addr, order, inserted_order).for_each(|addr| {
b.insert_chunk(addr, inserted_order);
});
} else {
b.insert_chunk(addr, order);
}

View File

@ -17,7 +17,7 @@ use ostd::{
trap::DisabledLocalIrqGuard,
};
use crate::chunk::{greater_order_of, lesser_order_of, max_order_from, size_of_order, BuddyOrder};
use crate::chunk::{greater_order_of, lesser_order_of, size_of_order, split_to_chunks, BuddyOrder};
use super::set::BuddySet;
@ -114,23 +114,16 @@ pub(super) fn add_free_memory(guard: &DisabledLocalIrqGuard, addr: Paddr, size:
fn add_free_memory_to(
local_pool: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>,
guard: &DisabledLocalIrqGuard,
mut addr: Paddr,
mut size: usize,
addr: Paddr,
size: usize,
) {
// Split the range into chunks and return them to the local free lists
// respectively.
while size > 0 {
let next_chunk_order = max_order_from(addr).min(lesser_order_of(size));
if next_chunk_order >= MAX_LOCAL_BUDDY_ORDER {
dealloc_to_global_pool(addr, next_chunk_order);
split_to_chunks(addr, size).for_each(|(addr, order)| {
if order >= MAX_LOCAL_BUDDY_ORDER {
dealloc_to_global_pool(addr, order);
} else {
local_pool.insert_chunk(addr, next_chunk_order);
local_pool.insert_chunk(addr, order);
}
size -= size_of_order(next_chunk_order);
addr += size_of_order(next_chunk_order);
}
});
balancing::balance(local_pool);
LOCAL_POOL_SIZE