Re-organize code of frame allocator's pools

This commit is contained in:
Zhang Junyang
2025-03-19 15:35:12 +08:00
committed by Tate, Hongliang Tian
parent 77c4feffd6
commit ba0dc8c122
4 changed files with 146 additions and 136 deletions

View File

@ -69,14 +69,14 @@ impl<const NR_CONT_FRAMES: usize, const COUNT: usize> CacheArray<NR_CONT_FRAMES,
/// ///
/// It may deallocate directly to this cache. If the cache is full, it will /// It may deallocate directly to this cache. If the cache is full, it will
/// deallocate to the global pool. /// deallocate to the global pool.
fn add_free_memory(&mut self, guard: &DisabledLocalIrqGuard, addr: Paddr) { fn dealloc(&mut self, guard: &DisabledLocalIrqGuard, addr: Paddr) {
if self.push_front(addr).is_none() { if self.push_front(addr).is_none() {
super::pools::add_free_memory(guard, addr, Self::segment_size()); super::pools::dealloc(guard, addr, Self::segment_size());
let nr_to_dealloc = COUNT * 2 / 3; let nr_to_dealloc = COUNT * 2 / 3;
for _ in 0..nr_to_dealloc { for _ in 0..nr_to_dealloc {
let frame = self.pop_front().unwrap(); let frame = self.pop_front().unwrap();
super::pools::add_free_memory(guard, frame, Self::segment_size()); super::pools::dealloc(guard, frame, Self::segment_size());
} }
}; };
} }
@ -131,10 +131,10 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
} }
} }
pub(super) fn add_free_memory(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) { pub(super) fn dealloc(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) {
let nr_frames = size / PAGE_SIZE; let nr_frames = size / PAGE_SIZE;
if nr_frames > 4 { if nr_frames > 4 {
super::pools::add_free_memory(guard, addr, size); super::pools::dealloc(guard, addr, size);
return; return;
} }
@ -142,10 +142,10 @@ pub(super) fn add_free_memory(guard: &DisabledLocalIrqGuard, addr: Paddr, size:
let mut cache = cache_cell.borrow_mut(); let mut cache = cache_cell.borrow_mut();
match nr_frames { match nr_frames {
1 => cache.cache1.add_free_memory(guard, addr), 1 => cache.cache1.dealloc(guard, addr),
2 => cache.cache2.add_free_memory(guard, addr), 2 => cache.cache2.dealloc(guard, addr),
3 => cache.cache3.add_free_memory(guard, addr), 3 => cache.cache3.dealloc(guard, addr),
4 => cache.cache4.add_free_memory(guard, addr), 4 => cache.cache4.dealloc(guard, addr),
_ => super::pools::add_free_memory(guard, addr, size), _ => super::pools::dealloc(guard, addr, size),
} }
} }

View File

@ -61,12 +61,14 @@ impl GlobalFrameAllocator for FrameAllocator {
} }
fn dealloc(&self, addr: Paddr, size: usize) { fn dealloc(&self, addr: Paddr, size: usize) {
self.add_free_memory(addr, size); let guard = trap::disable_local();
per_cpu_counter::add_free_size(&guard, size);
cache::dealloc(&guard, addr, size);
} }
fn add_free_memory(&self, addr: Paddr, size: usize) { fn add_free_memory(&self, addr: Paddr, size: usize) {
let guard = trap::disable_local(); let guard = trap::disable_local();
per_cpu_counter::add_free_size(&guard, size); per_cpu_counter::add_free_size(&guard, size);
cache::add_free_memory(&guard, addr, size); pools::add_free_memory(&guard, addr, size);
} }
} }

View File

@ -0,0 +1,117 @@
// SPDX-License-Identifier: MPL-2.0
//! Controlling the balancing between CPU-local free pools and the global free pool.
use core::sync::atomic::Ordering;
use ostd::cpu::num_cpus;
use super::{
lesser_order_of, BuddyOrder, BuddySet, GLOBAL_POOL, GLOBAL_POOL_SIZE, MAX_LOCAL_BUDDY_ORDER,
};
use crate::chunk::size_of_order;
/// Controls the expected size of cache for each CPU-local free pool.
///
/// The expected size will be the size of `GLOBAL_POOL` divided by the number
/// of the CPUs, and then divided by this constant.
const CACHE_EXPECTED_PORTION: usize = 2;
/// Returns the expected size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_expected_size(global_size: usize) -> usize {
global_size / num_cpus() / CACHE_EXPECTED_PORTION
}
/// Controls the minimal size of cache for each CPU-local free pool.
///
/// The minimal will be the expected size divided by this constant.
const CACHE_MINIMAL_PORTION: usize = 8;
/// Returns the minimal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_minimal_size(global_size: usize) -> usize {
cache_expected_size(global_size) / CACHE_MINIMAL_PORTION
}
/// Controls the maximal size of cache for each CPU-local free pool.
///
/// The maximal will be the expected size multiplied by this constant.
const CACHE_MAXIMAL_MULTIPLIER: usize = 2;
/// Returns the maximal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_maximal_size(global_size: usize) -> usize {
cache_expected_size(global_size) * CACHE_MAXIMAL_MULTIPLIER
}
/// Balances a local cache and the global free pool.
pub fn balance(local: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>) {
let global_size = GLOBAL_POOL_SIZE.load(Ordering::Relaxed);
let minimal_local_size = cache_minimal_size(global_size);
let expected_local_size = cache_expected_size(global_size);
let maximal_local_size = cache_maximal_size(global_size);
let local_size = local.total_size();
if local_size >= maximal_local_size {
// Move local frames to the global pool.
if local_size == 0 {
return;
}
let expected_removal = local_size - expected_local_size;
let lesser_order = lesser_order_of(expected_removal);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(local, &mut *global_pool_lock, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
} else if local_size < minimal_local_size {
// Move global frames to the local pool.
if global_size == 0 {
return;
}
let expected_allocation = expected_local_size - local_size;
let lesser_order = lesser_order_of(expected_allocation);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(&mut *global_pool_lock, local, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
}
}
/// Balances from `a` to `b`.
fn balance_to<const MAX_ORDER1: BuddyOrder, const MAX_ORDER2: BuddyOrder>(
a: &mut BuddySet<MAX_ORDER1>,
b: &mut BuddySet<MAX_ORDER2>,
order: BuddyOrder,
) {
let allocated_from_a = a.alloc_chunk(order);
if let Some(addr) = allocated_from_a {
if order >= MAX_ORDER2 {
let inserted_order = MAX_ORDER2 - 1;
for i in 0..(1 << (order - inserted_order)) as usize {
let split_addr = addr + size_of_order(inserted_order) * i;
b.insert_chunk(split_addr, inserted_order);
}
} else {
b.insert_chunk(addr, order);
}
} else {
// Maybe the chunk size is too large.
// Try to reduce the order and balance again.
if order > 1 {
balance_to(a, b, order - 1);
balance_to(a, b, order - 1);
}
}
}

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
mod balancing;
use core::{ use core::{
alloc::Layout, alloc::Layout,
cell::RefCell, cell::RefCell,
@ -56,8 +58,8 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
let size_order = greater_order_of(layout.size()); let size_order = greater_order_of(layout.size());
let align_order = greater_order_of(layout.align()); let align_order = greater_order_of(layout.align());
let order = size_order.max(align_order); let order = size_order.max(align_order);
let mut chunk_addr = None; let mut chunk_addr = None;
if order < MAX_LOCAL_BUDDY_ORDER { if order < MAX_LOCAL_BUDDY_ORDER {
@ -77,17 +79,17 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
let allocated_size = size_of_order(order); let allocated_size = size_of_order(order);
if allocated_size > layout.size() { if allocated_size > layout.size() {
if let Some(chunk_addr) = chunk_addr { if let Some(chunk_addr) = chunk_addr {
dealloc_in( add_free_memory_to(
&mut local_pool, &mut local_pool,
guard, guard,
chunk_addr + layout.size(), chunk_addr + layout.size(),
allocated_size - layout.size(), allocated_size - layout.size(),
); );
} }
} else {
balancing::balance(local_pool.deref_mut());
} }
balancing::balance(local_pool.deref_mut());
LOCAL_POOL_SIZE LOCAL_POOL_SIZE
.get_on_cpu(guard.current_cpu()) .get_on_cpu(guard.current_cpu())
.store(local_pool.total_size(), Ordering::Relaxed); .store(local_pool.total_size(), Ordering::Relaxed);
@ -95,14 +97,21 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
chunk_addr chunk_addr
} }
pub(super) fn dealloc(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) {
let local_pool_cell = LOCAL_POOL.get_with(guard);
let mut local_pool = local_pool_cell.borrow_mut();
add_free_memory_to(&mut local_pool, guard, addr, size);
}
pub(super) fn add_free_memory(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) { pub(super) fn add_free_memory(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) {
let local_pool_cell = LOCAL_POOL.get_with(guard); let local_pool_cell = LOCAL_POOL.get_with(guard);
let mut local_pool = local_pool_cell.borrow_mut(); let mut local_pool = local_pool_cell.borrow_mut();
dealloc_in(&mut local_pool, guard, addr, size); add_free_memory_to(&mut local_pool, guard, addr, size);
} }
fn dealloc_in( fn add_free_memory_to(
local_pool: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>, local_pool: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>,
guard: &DisabledLocalIrqGuard, guard: &DisabledLocalIrqGuard,
mut addr: Paddr, mut addr: Paddr,
@ -141,121 +150,3 @@ fn dealloc_to_global_pool(addr: Paddr, order: BuddyOrder) {
lock_guard.insert_chunk(addr, order); lock_guard.insert_chunk(addr, order);
GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed); GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed);
} }
pub mod balancing {
//! Controlling the balancing between CPU-local free pools and the global free pool.
use core::sync::atomic::Ordering;
use ostd::cpu::num_cpus;
use super::{
lesser_order_of, BuddyOrder, BuddySet, GLOBAL_POOL, GLOBAL_POOL_SIZE, MAX_LOCAL_BUDDY_ORDER,
};
use crate::chunk::size_of_order;
/// Controls the expected size of cache for each CPU-local free pool.
///
/// The expected size will be the size of `GLOBAL_POOL` divided by the number
/// of the CPUs, and then divided by this constant.
const CACHE_EXPECTED_PORTION: usize = 2;
/// Returns the expected size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_expected_size(global_size: usize) -> usize {
global_size / num_cpus() / CACHE_EXPECTED_PORTION
}
/// Controls the minimal size of cache for each CPU-local free pool.
///
/// The minimal will be the expected size divided by this constant.
const CACHE_MINIMAL_PORTION: usize = 8;
/// Returns the minimal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_minimal_size(global_size: usize) -> usize {
cache_expected_size(global_size) / CACHE_MINIMAL_PORTION
}
/// Controls the maximal size of cache for each CPU-local free pool.
///
/// The maximal will be the expected size multiplied by this constant.
const CACHE_MAXIMAL_MULTIPLIER: usize = 2;
/// Returns the maximal size of cache for each CPU-local free pool.
///
/// It depends on the size of the global free pool.
fn cache_maximal_size(global_size: usize) -> usize {
cache_expected_size(global_size) * CACHE_MAXIMAL_MULTIPLIER
}
/// Balances a local cache and the global free pool.
pub fn balance(local: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>) {
let global_size = GLOBAL_POOL_SIZE.load(Ordering::Relaxed);
let minimal_local_size = cache_minimal_size(global_size);
let expected_local_size = cache_expected_size(global_size);
let maximal_local_size = cache_maximal_size(global_size);
let local_size = local.total_size();
if local_size >= maximal_local_size {
// Move local frames to the global pool.
if local_size == 0 {
return;
}
let expected_removal = local_size - expected_local_size;
let lesser_order = lesser_order_of(expected_removal);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(local, &mut *global_pool_lock, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
} else if local_size < minimal_local_size {
// Move global frames to the local pool.
if global_size == 0 {
return;
}
let expected_allocation = expected_local_size - local_size;
let lesser_order = lesser_order_of(expected_allocation);
let mut global_pool_lock = GLOBAL_POOL.lock();
balance_to(&mut *global_pool_lock, local, lesser_order);
GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed);
}
}
/// Balances from `a` to `b`.
fn balance_to<const MAX_ORDER1: BuddyOrder, const MAX_ORDER2: BuddyOrder>(
a: &mut BuddySet<MAX_ORDER1>,
b: &mut BuddySet<MAX_ORDER2>,
order: BuddyOrder,
) {
let allocated_from_a = a.alloc_chunk(order);
if let Some(addr) = allocated_from_a {
if order >= MAX_ORDER2 {
let inserted_order = MAX_ORDER2 - 1;
for i in 0..(1 << (order - inserted_order)) as usize {
let split_addr = addr + size_of_order(inserted_order) * i;
b.insert_chunk(split_addr, inserted_order);
}
} else {
b.insert_chunk(addr, order);
}
} else {
// Maybe the chunk size is too large.
// Try to reduce the order and balance again.
if order > 1 {
balance_to(a, b, order - 1);
balance_to(a, b, order - 1);
}
}
}
}