fix: 修复slab分配器的UAF\内存越界问题 (#1111)

- 新增 `tests.rs` 模块,包含多个测试用例以验证内存分配器的正确性和性能。
- 优化 `pages.rs` 中的 `Bitfield` 实现,移除不必要的 `get_offset_for_align` 函数。
- 在 `zone.rs` 中新增 `try_reclaim_pages_in_slab` 方法,用于在特定 slab 中回收页面。
- 修复 `kernel_allocator.rs` 中的 `allocator_select_condition` 逻辑,移除对 `slab_init_state` 的依赖。
- 移除 `slab.rs` 中的 `slab_init_state` 函数,简化初始化状态检查。

Signed-off-by: longjin <longjin@DragonOS.org>
This commit is contained in:
LoGin 2025-03-24 23:21:22 +08:00 committed by GitHub
parent 13514f6695
commit f8c5e12d70
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 637 additions and 75 deletions

View File

@ -18,6 +18,7 @@
//! # Implementing GlobalAlloc //! # Implementing GlobalAlloc
//! See the [global alloc](https://github.com/gz/rust-slabmalloc/tree/master/examples/global_alloc.rs) example. //! See the [global alloc](https://github.com/gz/rust-slabmalloc/tree/master/examples/global_alloc.rs) example.
#![allow(unused_features)] #![allow(unused_features)]
#![cfg_attr(test, feature(test, c_void_variant))]
#![no_std] #![no_std]
#![crate_name = "slabmalloc"] #![crate_name = "slabmalloc"]
#![crate_type = "lib"] #![crate_type = "lib"]
@ -33,8 +34,18 @@ pub use pages::*;
pub use sc::*; pub use sc::*;
pub use zone::*; pub use zone::*;
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate test;
#[cfg(test)]
mod tests;
use core::alloc::Layout; use core::alloc::Layout;
use core::fmt; use core::fmt;
use core::mem;
use core::ptr::{self, NonNull}; use core::ptr::{self, NonNull};
use log::trace; use log::trace;
@ -71,7 +82,6 @@ pub unsafe trait Allocator<'a> {
layout: Layout, layout: Layout,
slab_callback: &'static dyn CallBack, slab_callback: &'static dyn CallBack,
) -> Result<(), AllocationError>; ) -> Result<(), AllocationError>;
/// Refill the allocator with a [`ObjectPage`]. /// Refill the allocator with a [`ObjectPage`].
/// ///
/// # Safety /// # Safety

View File

@ -1,11 +1,6 @@
use alloc::boxed::Box;
use crate::*; use crate::*;
use core::{ use alloc::boxed::Box;
mem, use core::sync::atomic::{AtomicU64, Ordering};
sync::atomic::{AtomicU64, Ordering},
};
/// A trait defining bitfield operations we need for tracking allocated objects within a page. /// A trait defining bitfield operations we need for tracking allocated objects within a page.
pub(crate) trait Bitfield { pub(crate) trait Bitfield {
fn initialize(&mut self, for_size: usize, capacity: usize); fn initialize(&mut self, for_size: usize, capacity: usize);
@ -38,7 +33,7 @@ impl Bitfield for [AtomicU64] {
fn initialize(&mut self, for_size: usize, capacity: usize) { fn initialize(&mut self, for_size: usize, capacity: usize) {
// Set everything to allocated // Set everything to allocated
for bitmap in self.iter_mut() { for bitmap in self.iter_mut() {
*bitmap = AtomicU64::new(u64::MAX); *bitmap = AtomicU64::new(u64::max_value());
} }
// Mark actual slots as free // Mark actual slots as free
@ -59,12 +54,9 @@ impl Bitfield for [AtomicU64] {
layout: Layout, layout: Layout,
page_size: usize, page_size: usize,
) -> Option<(usize, usize)> { ) -> Option<(usize, usize)> {
let start_offset = get_offset_for_align(layout);
let data_start = base_addr + start_offset;
for (base_idx, b) in self.iter().enumerate() { for (base_idx, b) in self.iter().enumerate() {
let bitval = b.load(Ordering::Relaxed); let bitval = b.load(Ordering::Relaxed);
if bitval == u64::MAX { if bitval == u64::max_value() {
continue; continue;
} else { } else {
let negated = !bitval; let negated = !bitval;
@ -79,7 +71,7 @@ impl Bitfield for [AtomicU64] {
return None; return None;
} }
let addr: usize = data_start + offset; let addr: usize = base_addr + offset;
let alignment_ok = addr % layout.align() == 0; let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0; let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free { if alignment_ok && block_is_free {
@ -125,7 +117,7 @@ impl Bitfield for [AtomicU64] {
#[inline(always)] #[inline(always)]
fn is_full(&self) -> bool { fn is_full(&self) -> bool {
self.iter() self.iter()
.filter(|&x| x.load(Ordering::Relaxed) != u64::MAX) .filter(|&x| x.load(Ordering::Relaxed) != u64::max_value())
.count() .count()
== 0 == 0
} }
@ -157,32 +149,6 @@ impl Bitfield for [AtomicU64] {
} }
} }
/// # get_offset_for_align - 根据布局大小获取page内对齐偏移量
///
/// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。
///
/// ## 参数
///
/// - layout: Layout这是需要计算对齐偏移量的布局参数。
///
/// ## 返回值
///
/// - usize: 成功时返回一个usize类型的对齐偏移量。
fn get_offset_for_align(layout: Layout) -> usize {
match layout.size() {
0..=8 => 80,
9..=16 => 80,
17..=32 => 96,
33..=64 => 128,
65..=128 => 128,
129..=256 => 256,
257..=512 => 512,
513..=1024 => 1024,
1025..=2048 => 2048,
_ => panic!(),
}
}
/// This trait is used to define a page from which objects are allocated /// This trait is used to define a page from which objects are allocated
/// in an `SCAllocator`. /// in an `SCAllocator`.
/// ///
@ -242,8 +208,7 @@ pub trait AllocablePage {
ptr, ptr,
layout layout
); );
let align_offset = get_offset_for_align(layout); let page_offset = (ptr.as_ptr() as usize) & (Self::SIZE - 1);
let page_offset = ((ptr.as_ptr() as usize) - align_offset) & (Self::SIZE - 1);
assert!(page_offset % layout.size() == 0); assert!(page_offset % layout.size() == 0);
let idx = page_offset / layout.size(); let idx = page_offset / layout.size();
assert!( assert!(
@ -282,20 +247,20 @@ pub trait AllocablePage {
/// It is marked `repr(C)` because we rely on a well defined order of struct /// It is marked `repr(C)` because we rely on a well defined order of struct
/// members (e.g., dealloc does a cast to find the bitfield). /// members (e.g., dealloc does a cast to find the bitfield).
#[repr(C)] #[repr(C)]
#[repr(align(4096))]
pub struct ObjectPage<'a> { pub struct ObjectPage<'a> {
/// Holds memory objects.
#[allow(dead_code)] #[allow(dead_code)]
/// A bit-field to track free/allocated memory within `data`. data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
pub(crate) bitfield: [AtomicU64; 8],
/// Next element in list (used by `PageList`). /// Next element in list (used by `PageList`).
next: Rawlink<ObjectPage<'a>>, next: Rawlink<ObjectPage<'a>>,
/// Previous element in list (used by `PageList`) /// Previous element in list (used by `PageList`)
prev: Rawlink<ObjectPage<'a>>, prev: Rawlink<ObjectPage<'a>>,
/// Holds memory objects. /// A bit-field to track free/allocated memory within `data`.
data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD], pub(crate) bitfield: [AtomicU64; 8],
} }
impl<'a> ObjectPage<'a> { impl<'a> ObjectPage<'a> {
pub fn new() -> Box<ObjectPage<'a>> { pub fn new() -> Box<ObjectPage<'a>> {
unsafe { Box::new_uninit().assume_init() } unsafe { Box::new_uninit().assume_init() }
@ -303,10 +268,10 @@ impl<'a> ObjectPage<'a> {
} }
// These needs some more work to be really safe... // These needs some more work to be really safe...
unsafe impl Send for ObjectPage<'_> {} unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl Sync for ObjectPage<'_> {} unsafe impl<'a> Sync for ObjectPage<'a> {}
impl AllocablePage for ObjectPage<'_> { impl<'a> AllocablePage for ObjectPage<'a> {
const SIZE: usize = OBJECT_PAGE_SIZE; const SIZE: usize = OBJECT_PAGE_SIZE;
fn bitfield(&self) -> &[AtomicU64; 8] { fn bitfield(&self) -> &[AtomicU64; 8] {
@ -331,7 +296,7 @@ impl<'a> Default for ObjectPage<'a> {
} }
} }
impl fmt::Debug for ObjectPage<'_> { impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage") write!(f, "ObjectPage")
} }
@ -424,7 +389,6 @@ impl<'a, T: AllocablePage> PageList<'a, T> {
} }
/// Removes `slab_page` from the list. /// Removes `slab_page` from the list.
#[allow(clippy::manual_inspect)]
pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> { pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> {
match self.head { match self.head {
None => None, None => None,
@ -468,7 +432,6 @@ impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> {
type Item = &'a mut P; type Item = &'a mut P;
#[inline] #[inline]
#[allow(clippy::manual_inspect)]
fn next(&mut self) -> Option<&'a mut P> { fn next(&mut self) -> Option<&'a mut P> {
unsafe { unsafe {
self.head.resolve_mut().map(|next| { self.head.resolve_mut().map(|next| {

View File

@ -1,7 +1,5 @@
//! A SCAllocator that can allocate fixed size objects. //! A SCAllocator that can allocate fixed size objects.
use core::mem;
use crate::*; use crate::*;
/// A genius(?) const min() /// A genius(?) const min()
@ -73,7 +71,7 @@ macro_rules! new_sc_allocator {
SCAllocator { SCAllocator {
size: $size, size: $size,
allocation_count: 0, allocation_count: 0,
obj_per_page, obj_per_page: obj_per_page,
empty_slabs: PageList::new(), empty_slabs: PageList::new(),
slabs: PageList::new(), slabs: PageList::new(),
full_slabs: PageList::new(), full_slabs: PageList::new(),
@ -235,6 +233,10 @@ impl<'a, P: AllocablePage> SCAllocator<'a, P> {
} }
} }
self.free_obj_count = self
.free_obj_count
.saturating_sub(reclaimed * self.obj_per_page);
reclaimed reclaimed
} }
@ -247,7 +249,6 @@ impl<'a, P: AllocablePage> SCAllocator<'a, P> {
.initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD); .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
*page.prev() = Rawlink::none(); *page.prev() = Rawlink::none();
*page.next() = Rawlink::none(); *page.next() = Rawlink::none();
trace!("adding page to SCAllocator {:p}", page);
self.insert_empty(page); self.insert_empty(page);
self.free_obj_count += self.obj_per_page; self.free_obj_count += self.obj_per_page;
} }
@ -314,15 +315,13 @@ impl<'a, P: AllocablePage> SCAllocator<'a, P> {
/// May return an error in case an invalid `layout` is provided. /// May return an error in case an invalid `layout` is provided.
/// The function may also move internal slab pages between lists partial -> empty /// The function may also move internal slab pages between lists partial -> empty
/// or full -> partial lists. /// or full -> partial lists.
///
/// # Safety /// # Safety
/// The caller must ensure that the `layout` is valid. /// The caller must ensure that the `layout` is valid.
pub unsafe fn deallocate( pub unsafe fn deallocate(
&mut self, &mut self,
ptr: NonNull<u8>, ptr: NonNull<u8>,
layout: Layout, layout: Layout,
slab_callback: &'static dyn CallBack, ) -> Result<bool, AllocationError> {
) -> Result<(), AllocationError> {
assert!(layout.size() <= self.size); assert!(layout.size() <= self.size);
assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD)); assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
trace!( trace!(
@ -342,17 +341,16 @@ impl<'a, P: AllocablePage> SCAllocator<'a, P> {
let ret = slab_page.deallocate(ptr, new_layout); let ret = slab_page.deallocate(ptr, new_layout);
debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment"); debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
self.free_obj_count += 1; self.free_obj_count += 1;
let is_empty_after_dealloc = slab_page.is_empty(self.obj_per_page); let is_empty_after_dealloc = slab_page.is_empty(self.obj_per_page);
let mut need_reclaim = false;
// 如果slab_page是空白的且空闲块数大于free_limit将slab_page归还buddy // 如果slab_page是空白的且空闲块数大于free_limit将slab_page归还buddy
if self.free_obj_count >= self.free_limit && is_empty_after_dealloc { if self.free_obj_count >= self.free_limit && is_empty_after_dealloc {
self.slabs.remove_from_list(slab_page); need_reclaim = true;
// 将slab_page归还buddy
slab_callback.free_slab_page(slab_page as *const P as *mut u8, P::SIZE);
} }
self.check_page_assignments();
ret ret.map(|_| need_reclaim)
} }
} }

View File

@ -0,0 +1,582 @@
use env_logger;
use rand;
use std::alloc;
use std::alloc::Layout;
use std::collections::HashSet;
use std::mem::{size_of, transmute};
use std::prelude::v1::*;
use crate::*;
use test::Bencher;
/// A simple page allocator based on GlobalAlloc (for testing purposes).
struct Pager {
base_pages: HashSet<*mut u8>, // probably should be hash-tables
}
unsafe impl Send for Pager {}
unsafe impl Sync for Pager {}
impl Pager {
pub fn new() -> Pager {
Pager {
base_pages: HashSet::with_capacity(1024),
}
}
}
impl Pager {
pub fn currently_allocated(&self) -> usize {
self.base_pages.len()
}
fn alloc_page(&mut self, page_size: usize) -> Option<*mut u8> {
let r =
unsafe { std::alloc::alloc(Layout::from_size_align(page_size, page_size).unwrap()) };
if !r.is_null() {
match page_size {
OBJECT_PAGE_SIZE => self.base_pages.insert(r),
_ => unreachable!("invalid page-size supplied"),
};
Some(r)
} else {
None
}
}
fn dealloc_page(&mut self, ptr: *mut u8, page_size: usize) {
let layout = match page_size {
OBJECT_PAGE_SIZE => {
assert!(
self.base_pages.contains(&ptr),
"Trying to deallocate invalid base-page"
);
self.base_pages.remove(&ptr);
Layout::from_size_align(OBJECT_PAGE_SIZE, OBJECT_PAGE_SIZE).unwrap()
}
_ => unreachable!("invalid page-size supplied"),
};
unsafe { std::alloc::dealloc(ptr, layout) };
}
}
trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, page: &'a mut ObjectPage<'a>);
}
impl<'a> PageProvider<'a> for Pager {
/// Allocates a new ObjectPage from the system.
///
/// Uses `mmap` to map a page and casts it to a ObjectPage.
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>> {
self.alloc_page(OBJECT_PAGE_SIZE)
.map(|r| unsafe { transmute(r as usize) })
}
/// Release a ObjectPage back to the system.slab_page
///
/// Uses `munmap` to release the page back to the OS.
fn release_page(&mut self, p: &'a mut ObjectPage<'a>) {
self.dealloc_page(p as *const ObjectPage as *mut u8, OBJECT_PAGE_SIZE);
}
}
#[test]
fn check_size() {
assert_eq!(
OBJECT_PAGE_SIZE as usize,
size_of::<ObjectPage>(),
"ObjectPage should be exactly the size of a single page."
);
}
#[test]
fn test_mmap_allocator() {
let mut mmap = Pager::new();
match mmap.allocate_page() {
Some(sp) => {
sp.bitfield.initialize(8, OBJECT_PAGE_SIZE - 80);
assert!(!sp.is_full(), "Got empty slab");
assert!(sp.is_empty(6 * 64), "Got empty slab");
mmap.release_page(sp)
}
None => panic!("failed to allocate ObjectPage"),
}
}
macro_rules! test_sc_allocation {
($test:ident, $size:expr, $alignment:expr, $allocations:expr, $type:ty) => {
#[test]
fn $test() {
let _ = env_logger::try_init();
let mut mmap = Pager::new();
{
let mut sa: SCAllocator<$type> = SCAllocator::new($size);
let alignment = $alignment;
let mut objects: Vec<NonNull<u8>> = Vec::new();
let mut vec: Vec<(usize, &mut [usize; $size / 8])> = Vec::new();
let layout = Layout::from_size_align($size, alignment).unwrap();
for _ in 0..$allocations {
loop {
match sa.allocate(layout) {
// Allocation was successful
Ok(nptr) => {
unsafe {
vec.push((rand::random::<usize>(), transmute(nptr.as_ptr())))
};
objects.push(nptr);
break;
}
// Couldn't allocate need to refill first
Err(AllocationError::OutOfMemory) => {
let page = mmap.allocate_page().unwrap();
unsafe {
sa.refill(page);
}
}
// Unexpected errors
Err(AllocationError::InvalidLayout) => unreachable!("Unexpected error"),
}
}
}
// Write the objects with a random pattern
for item in vec.iter_mut() {
let (pattern, ref mut obj) = *item;
assert!(obj.len() == $size / 8);
for i in 0..obj.len() {
obj[i] = pattern;
}
}
for item in vec.iter() {
let (pattern, ref obj) = *item;
for i in 0..obj.len() {
assert_eq!(
obj[i], pattern,
"No two allocations point to the same memory."
);
}
}
// Make sure we can correctly deallocate:
let pages_allocated = sa.slabs.elements;
// Deallocate all the objects
for item in objects.iter_mut() {
unsafe {
sa.deallocate(*item, layout).expect("Can't deallocate");
}
}
objects.clear();
sa.check_page_assignments();
// then allocate everything again,
for _ in 0..$allocations {
loop {
match sa.allocate(layout) {
// Allocation was successful
Ok(nptr) => {
unsafe {
vec.push((rand::random::<usize>(), transmute(nptr.as_ptr())))
};
objects.push(nptr);
break;
}
// Couldn't allocate need to refill first
Err(AllocationError::OutOfMemory) => {
let page = mmap.allocate_page().unwrap();
unsafe {
sa.refill(page);
}
}
// Unexpected errors
Err(AllocationError::InvalidLayout) => unreachable!("Unexpected error"),
}
}
}
// and make sure we do not request more pages than what we had previously
// println!("{} {}", pages_allocated, sa.slabs.elements);
assert_eq!(
pages_allocated, sa.slabs.elements,
"Did not use more memory for 2nd allocation run."
);
// Deallocate everything once more
for item in objects.iter_mut() {
unsafe {
sa.deallocate(*item, layout).expect("Can't deallocate");
}
}
// Drain the slab-allocator and give unused pages back to the OS
sa.try_reclaim_pages(usize::MAX, &mut |p: *mut ObjectPage| unsafe {
mmap.release_page(&mut *p)
});
}
// Check that we released everything to our page allocator:
assert_eq!(
mmap.currently_allocated(),
0,
"Released all pages to the underlying memory manager."
);
}
};
}
test_sc_allocation!(op_512_size8_alignment1, 8, 1, 512, ObjectPage);
test_sc_allocation!(op_4096_size8_alignment8, 8, 8, 4096, ObjectPage);
test_sc_allocation!(op_500_size8_alignment64, 8, 64, 500, ObjectPage);
test_sc_allocation!(op_4096_size12_alignment1, 12, 1, 4096, ObjectPage);
test_sc_allocation!(op_4096_size13_alignment1, 13, 1, 4096, ObjectPage);
test_sc_allocation!(op_2000_size14_alignment1, 14, 1, 2000, ObjectPage);
test_sc_allocation!(op_4096_size15_alignment1, 15, 1, 4096, ObjectPage);
test_sc_allocation!(op_8000_size16_alignment1, 16, 1, 8000, ObjectPage);
test_sc_allocation!(op_1024_size24_alignment1, 24, 1, 1024, ObjectPage);
test_sc_allocation!(op_3090_size32_alignment1, 32, 1, 3090, ObjectPage);
test_sc_allocation!(op_4096_size64_alignment1, 64, 1, 4096, ObjectPage);
test_sc_allocation!(op_1000_size512_alignment1, 512, 1, 1000, ObjectPage);
test_sc_allocation!(op_4096_size1024_alignment1, 1024, 1, 4096, ObjectPage);
test_sc_allocation!(op_10_size2048_alignment1, 2048, 1, 10, ObjectPage);
test_sc_allocation!(op_10000_size512_alignment1, 512, 1, 10000, ObjectPage);
#[test]
#[should_panic]
fn invalid_alignment() {
let _layout = Layout::from_size_align(10, 3).unwrap();
}
#[test]
fn test_readme() -> Result<(), AllocationError> {
let object_size = 12;
let alignment = 4;
let layout = Layout::from_size_align(object_size, alignment).unwrap();
// We need something that can provide backing memory
// (4 KiB and 2 MiB pages) to our ZoneAllocator
// (see tests.rs for a dummy implementation).
let mut pager = Pager::new();
let page = pager.allocate_page().expect("Can't allocate a page");
let mut zone: ZoneAllocator = Default::default();
// Prematurely fill the ZoneAllocator with memory.
// Alternatively, the allocate call would return an
// error which we can capture to refill on-demand.
unsafe { zone.refill(layout, page)? };
let allocated = zone.allocate(layout)?;
unsafe { zone.deallocate(allocated, layout, &SlabCallback) }?;
Ok(())
}
#[test]
fn test_readme2() -> Result<(), AllocationError> {
let object_size = 10;
let alignment = 8;
let layout = Layout::from_size_align(object_size, alignment).unwrap();
// We need something that can provide backing memory
// (4 KiB and 2 MiB pages) to our ZoneAllocator
// (see tests.rs for a dummy implementation).
let mut pager = Pager::new();
let page = pager.allocate_page().expect("Can't allocate a page");
let mut sa: SCAllocator<ObjectPage> = SCAllocator::new(object_size);
// Prematurely fill the SCAllocator with memory.
// Alternatively, the allocate call would return an
// error which we can capture to refill on-demand.
unsafe { sa.refill(page) };
sa.allocate(layout)?;
Ok(())
}
#[test]
fn test_bug1() -> Result<(), AllocationError> {
let _ = env_logger::try_init();
let mut mmap = Pager::new();
let page = mmap.allocate_page();
let mut sa: SCAllocator<ObjectPage> = SCAllocator::new(8);
unsafe {
sa.refill(page.unwrap());
}
let ptr1 = sa.allocate(Layout::from_size_align(1, 1).unwrap())?;
let ptr2 = sa.allocate(Layout::from_size_align(2, 1).unwrap())?;
unsafe { sa.deallocate(ptr1, Layout::from_size_align(1, 1).unwrap()) }?;
let _ptr3 = sa.allocate(Layout::from_size_align(4, 1).unwrap())?;
unsafe {
sa.deallocate(ptr2, Layout::from_size_align(2, 1).unwrap())
.map(|_| ())
}
}
#[bench]
fn slabmalloc_allocate_deallocate(b: &mut Bencher) {
let _ = env_logger::try_init();
let mut mmap = Pager::new();
let mut sa: SCAllocator<ObjectPage> = SCAllocator::new(8);
let layout = Layout::from_size_align(8, 1).unwrap();
let page = mmap.allocate_page();
unsafe {
sa.refill(page.unwrap());
}
let ptr = sa.allocate(layout).expect("Can't allocate");
test::black_box(ptr);
b.iter(|| {
let ptr = sa.allocate(layout).expect("Can't allocate");
test::black_box(ptr);
unsafe { sa.deallocate(ptr, layout).expect("Can't deallocate") };
});
}
#[bench]
fn slabmalloc_allocate_deallocate_big(b: &mut Bencher) {
let _ = env_logger::try_init();
let mut mmap = Pager::new();
let mut sa: SCAllocator<ObjectPage> = SCAllocator::new(512);
let page = mmap.allocate_page();
unsafe {
sa.refill(page.unwrap());
}
let layout = Layout::from_size_align(512, 1).unwrap();
let ptr = sa.allocate(layout).expect("Can't allocate");
test::black_box(ptr);
b.iter(|| {
let ptr = sa.allocate(layout).expect("Can't allocate");
test::black_box(ptr);
unsafe { sa.deallocate(ptr, layout).expect("Can't deallocate") };
});
}
#[bench]
fn jemalloc_allocate_deallocate(b: &mut Bencher) {
let layout = Layout::from_size_align(8, 1).unwrap();
let ptr = unsafe { alloc::alloc(layout) };
test::black_box(ptr);
b.iter(|| unsafe {
let ptr = alloc::alloc(layout);
test::black_box(ptr);
alloc::dealloc(ptr, layout);
});
}
#[bench]
fn jemalloc_allocate_deallocate_big(b: &mut Bencher) {
let layout = Layout::from_size_align(512, 1).unwrap();
let ptr = unsafe { alloc::alloc(layout) };
test::black_box(ptr);
b.iter(|| unsafe {
let ptr = alloc::alloc(layout);
test::black_box(ptr);
alloc::dealloc(ptr, layout);
});
}
#[test]
pub fn check_first_fit() {
let op: ObjectPage = Default::default();
let layout = Layout::from_size_align(8, 8).unwrap();
println!("{:?}", op.first_fit(layout));
}
#[test]
fn list_pop() {
let mut op1: ObjectPage = Default::default();
let op1_ptr = &op1 as *const ObjectPage<'_>;
let mut op2: ObjectPage = Default::default();
let op2_ptr = &op2 as *const ObjectPage<'_>;
let mut op3: ObjectPage = Default::default();
let op3_ptr = &op3 as *const ObjectPage<'_>;
let mut op4: ObjectPage = Default::default();
let op4_ptr = &op4 as *const ObjectPage<'_>;
let mut list: PageList<ObjectPage> = PageList::new();
list.insert_front(&mut op1);
list.insert_front(&mut op2);
list.insert_front(&mut op3);
assert!(list.contains(op1_ptr));
assert!(list.contains(op2_ptr));
assert!(list.contains(op3_ptr));
assert!(!list.contains(op4_ptr));
let popped = list.pop();
assert_eq!(popped.unwrap() as *const ObjectPage, op3_ptr);
assert!(!list.contains(op3_ptr));
let popped = list.pop();
assert_eq!(popped.unwrap() as *const ObjectPage, op2_ptr);
assert!(!list.contains(op2_ptr));
list.insert_front(&mut op4);
assert!(list.contains(op4_ptr));
let popped = list.pop();
assert_eq!(popped.unwrap() as *const ObjectPage, op4_ptr);
assert!(!list.contains(op4_ptr));
let popped = list.pop();
assert_eq!(popped.unwrap() as *const ObjectPage, op1_ptr);
assert!(!list.contains(op1_ptr));
let popped = list.pop();
assert!(popped.is_none());
assert!(!list.contains(op1_ptr));
assert!(!list.contains(op2_ptr));
assert!(!list.contains(op3_ptr));
assert!(!list.contains(op4_ptr));
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = PageList::new();
l.insert_front(&mut new_head1);
for _p in l.iter_mut() {}
}
#[test]
pub fn check_is_full_8() {
let _r = env_logger::try_init();
let layout = Layout::from_size_align(8, 1).unwrap();
let mut page: ObjectPage = Default::default();
page.bitfield.initialize(8, OBJECT_PAGE_SIZE - 80);
let obj_per_page = core::cmp::min((OBJECT_PAGE_SIZE - 80) / 8, 8 * 64);
let mut allocs = 0;
loop {
if page.allocate(layout).is_null() {
break;
}
allocs += 1;
if allocs < obj_per_page {
assert!(
!page.is_full(),
"Page mistakenly considered full after {} allocs",
allocs
);
assert!(!page.is_empty(obj_per_page));
}
}
assert_eq!(allocs, obj_per_page, "Can use all bitmap space");
assert!(page.is_full());
}
// Test for bug that reports pages not as full when
// the entire bitfield wasn't allocated.
#[test]
pub fn check_is_full_512() {
let _r = env_logger::try_init();
let mut page: ObjectPage = Default::default();
page.bitfield.initialize(512, OBJECT_PAGE_SIZE - 80);
let layout = Layout::from_size_align(512, 1).unwrap();
let obj_per_page = core::cmp::min((OBJECT_PAGE_SIZE - 80) / 512, 6 * 64);
let mut allocs = 0;
loop {
if page.allocate(layout).is_null() {
break;
}
allocs += 1;
if allocs < (OBJECT_PAGE_SIZE - 80) / 512 {
assert!(!page.is_full());
assert!(!page.is_empty(obj_per_page));
}
}
assert!(page.is_full());
}
#[test]
pub fn issue_9() -> Result<(), AllocationError> {
let mut pager = Pager::new();
let mut zone: ZoneAllocator = Default::default();
// size: 256 align: 1 | my pager gets called
let l1 = Layout::from_size_align(256, 1).unwrap();
assert!(zone.allocate(l1).is_err(), "my pager gets called");
let page = pager.allocate_page().expect("Can't allocate a page");
unsafe { zone.refill(l1, page)? };
let p1 = zone.allocate(l1)?;
// size: 48 align: 8 | my pager gets called
let l2 = Layout::from_size_align(48, 8).unwrap();
assert!(zone.allocate(l2).is_err(), "my pager gets called");
let page = pager.allocate_page().expect("Can't allocate a page");
unsafe { zone.refill(l2, page)? };
let p2 = zone.allocate(l2)?;
assert_eq!(p2.as_ptr() as usize % l2.align(), 0);
assert_ne!(p2, p1);
// size: 6 align: 1 | my pager gets called and returns the properly aligned address X
let l3 = Layout::from_size_align(6, 1).unwrap();
assert!(
zone.allocate(l3).is_err(),
"my pager gets called and returns the properly aligned address X"
);
let page = pager.allocate_page().expect("Can't allocate a page");
unsafe { zone.refill(l3, page)? };
let p3 = zone.allocate(l3)?;
assert_eq!(p3.as_ptr() as usize % l3.align(), 0);
assert_ne!(p3, p2);
assert_ne!(p3, p1);
//size: 8 align: 1 | my pager doesn't get called
let l4 = Layout::from_size_align(8, 1).unwrap();
// my pager doesn't get called
let p4 = zone.allocate(l4)?;
assert_eq!(p4.as_ptr() as usize % l4.align(), 0);
assert_ne!(p4, p3);
assert_ne!(p4, p2);
assert_ne!(p4, p1);
// size: 16 align: 1 | my pager gets called
let l5 = Layout::from_size_align(16, 1).unwrap();
assert!(zone.allocate(l5).is_err(), "my pager gets called");
let page = pager.allocate_page().expect("Can't allocate a page");
unsafe { zone.refill(l5, page)? };
let p5 = zone.allocate(l5)?;
assert_eq!(p5.as_ptr() as usize % l5.align(), 0);
assert_ne!(p5, p1);
assert_ne!(p5, p2);
assert_ne!(p5, p3);
assert_ne!(p5, p4);
Ok(())
}
/// 归还slab_page给buddy的回调
struct SlabCallback;
impl CallBack for SlabCallback {
unsafe fn free_slab_page(&self, base_addr: *mut u8, size: usize) {
assert_eq!(base_addr as usize & (OBJECT_PAGE_SIZE - 1), 0); // 确认地址4k对齐
assert_eq!(size, OBJECT_PAGE_SIZE); // 确认释放的slab_page大小
}
}

View File

@ -120,6 +120,7 @@ impl<'a> ZoneAllocator<'a> {
// reclaim的page数 // reclaim的page数
let just_reclaimed = slab.try_reclaim_pages(to_reclaim, &mut dealloc); let just_reclaimed = slab.try_reclaim_pages(to_reclaim, &mut dealloc);
self.total -= (just_reclaimed * OBJECT_PAGE_SIZE) as u64; self.total -= (just_reclaimed * OBJECT_PAGE_SIZE) as u64;
to_reclaim = to_reclaim.saturating_sub(just_reclaimed); to_reclaim = to_reclaim.saturating_sub(just_reclaimed);
if to_reclaim == 0 { if to_reclaim == 0 {
break; break;
@ -177,7 +178,20 @@ unsafe impl<'a> crate::Allocator<'a> for ZoneAllocator<'a> {
slab_callback: &'static dyn CallBack, slab_callback: &'static dyn CallBack,
) -> Result<(), AllocationError> { ) -> Result<(), AllocationError> {
match ZoneAllocator::get_slab(layout.size()) { match ZoneAllocator::get_slab(layout.size()) {
Slab::Base(idx) => self.small_slabs[idx].deallocate(ptr, layout, slab_callback), Slab::Base(idx) => {
let r = self.small_slabs[idx].deallocate(ptr, layout);
if let Ok(true) = r {
self.small_slabs[idx].try_reclaim_pages(
1,
&mut |slab_page: *mut ObjectPage| {
// 将slab_page归还buddy
slab_callback
.free_slab_page(slab_page as *const _ as *mut u8, ObjectPage::SIZE);
},
);
}
r.map(|_| ())
}
Slab::Unsupported => Err(AllocationError::InvalidLayout), Slab::Unsupported => Err(AllocationError::InvalidLayout),
} }
} }

View File

@ -15,7 +15,7 @@ use core::{
use super::{ use super::{
page_frame::{FrameAllocator, PageFrameCount}, page_frame::{FrameAllocator, PageFrameCount},
slab::{slab_init_state, SLABALLOCATOR}, slab::SLABALLOCATOR,
}; };
/// 类kmalloc的分配器应当实现的trait /// 类kmalloc的分配器应当实现的trait
@ -95,7 +95,7 @@ impl LocalAlloc for KernelAllocator {
} }
unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) { unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) {
if allocator_select_condition(layout) || ((ptr as usize) % 4096) == 0 { if allocator_select_condition(layout) {
self.free_in_buddy(ptr, layout) self.free_in_buddy(ptr, layout)
} else if let Some(ref mut slab) = SLABALLOCATOR { } else if let Some(ref mut slab) = SLABALLOCATOR {
slab.deallocate(ptr, layout).unwrap() slab.deallocate(ptr, layout).unwrap()
@ -137,7 +137,7 @@ unsafe impl GlobalAlloc for KernelAllocator {
/// 判断选择buddy分配器还是slab分配器 /// 判断选择buddy分配器还是slab分配器
fn allocator_select_condition(layout: Layout) -> bool { fn allocator_select_condition(layout: Layout) -> bool {
layout.size() > 2048 || !slab_init_state() layout.size() > 2048
} }
fn alloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) { fn alloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) {

View File

@ -72,11 +72,6 @@ pub unsafe fn slab_init() {
SLABINITSTATE = true.into(); SLABINITSTATE = true.into();
} }
// 查看slab初始化状态
pub fn slab_init_state() -> bool {
unsafe { *SLABINITSTATE.get_mut() }
}
pub unsafe fn slab_usage() -> SlabUsage { pub unsafe fn slab_usage() -> SlabUsage {
if let Some(ref mut slab) = SLABALLOCATOR { if let Some(ref mut slab) = SLABALLOCATOR {
slab.zone.usage() slab.zone.usage()