Refactor and rename VirtAddrAllocator to RangeAllocator

This commit is contained in:
Yuke Peng 2024-12-25 15:32:15 +08:00 committed by Tate, Hongliang Tian
parent 11459a6164
commit 9a165ec41a
4 changed files with 41 additions and 45 deletions

View File

@ -11,12 +11,12 @@ use spin::Once;
use crate::{ use crate::{
io::io_mem::IoMem, io::io_mem::IoMem,
mm::{CachePolicy, PageFlags}, mm::{CachePolicy, PageFlags},
util::vaddr_alloc::VirtAddrAllocator, util::range_alloc::RangeAllocator,
}; };
/// I/O memory allocator that allocates memory I/O access to device drivers. /// I/O memory allocator that allocates memory I/O access to device drivers.
pub struct IoMemAllocator { pub struct IoMemAllocator {
allocators: Vec<VirtAddrAllocator>, allocators: Vec<RangeAllocator>,
} }
impl IoMemAllocator { impl IoMemAllocator {
@ -53,7 +53,7 @@ impl IoMemAllocator {
/// # Safety /// # Safety
/// ///
/// User must ensure the range doesn't belong to physical memory or system device I/O. /// User must ensure the range doesn't belong to physical memory or system device I/O.
unsafe fn new(allocators: Vec<VirtAddrAllocator>) -> Self { unsafe fn new(allocators: Vec<RangeAllocator>) -> Self {
Self { allocators } Self { allocators }
} }
} }
@ -63,7 +63,7 @@ impl IoMemAllocator {
/// The builder must contains the memory I/O regions that don't belong to the physical memory. Also, OSTD /// The builder must contains the memory I/O regions that don't belong to the physical memory. Also, OSTD
/// must exclude the memory I/O regions of the system device before building the `IoMemAllocator`. /// must exclude the memory I/O regions of the system device before building the `IoMemAllocator`.
pub(crate) struct IoMemAllocatorBuilder { pub(crate) struct IoMemAllocatorBuilder {
allocators: Vec<VirtAddrAllocator>, allocators: Vec<RangeAllocator>,
} }
impl IoMemAllocatorBuilder { impl IoMemAllocatorBuilder {
@ -79,7 +79,7 @@ impl IoMemAllocatorBuilder {
); );
let mut allocators = Vec::with_capacity(ranges.len()); let mut allocators = Vec::with_capacity(ranges.len());
for range in ranges { for range in ranges {
allocators.push(VirtAddrAllocator::new(range)); allocators.push(RangeAllocator::new(range));
} }
Self { allocators } Self { allocators }
} }
@ -118,9 +118,9 @@ pub(crate) unsafe fn init(io_mem_builder: IoMemAllocatorBuilder) {
} }
fn find_allocator<'a>( fn find_allocator<'a>(
allocators: &'a [VirtAddrAllocator], allocators: &'a [RangeAllocator],
range: &Range<usize>, range: &Range<usize>,
) -> Option<&'a VirtAddrAllocator> { ) -> Option<&'a RangeAllocator> {
for allocator in allocators.iter() { for allocator in allocators.iter() {
let allocator_range = allocator.fullrange(); let allocator_range = allocator.fullrange();
if allocator_range.start >= range.end || allocator_range.end <= range.start { if allocator_range.start >= range.end || allocator_range.end <= range.start {

View File

@ -15,13 +15,12 @@ use crate::{
Paddr, Vaddr, PAGE_SIZE, Paddr, Vaddr, PAGE_SIZE,
}, },
task::disable_preempt, task::disable_preempt,
util::vaddr_alloc::VirtAddrAllocator, util::range_alloc::RangeAllocator,
}; };
static KVIRT_AREA_TRACKED_ALLOCATOR: VirtAddrAllocator = static KVIRT_AREA_TRACKED_ALLOCATOR: RangeAllocator =
VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE); RangeAllocator::new(TRACKED_MAPPED_PAGES_RANGE);
static KVIRT_AREA_UNTRACKED_ALLOCATOR: VirtAddrAllocator = static KVIRT_AREA_UNTRACKED_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
VirtAddrAllocator::new(VMALLOC_VADDR_RANGE);
#[derive(Debug)] #[derive(Debug)]
pub struct Tracked; pub struct Tracked;
@ -29,17 +28,17 @@ pub struct Tracked;
pub struct Untracked; pub struct Untracked;
pub trait AllocatorSelector { pub trait AllocatorSelector {
fn select_allocator() -> &'static VirtAddrAllocator; fn select_allocator() -> &'static RangeAllocator;
} }
impl AllocatorSelector for Tracked { impl AllocatorSelector for Tracked {
fn select_allocator() -> &'static VirtAddrAllocator { fn select_allocator() -> &'static RangeAllocator {
&KVIRT_AREA_TRACKED_ALLOCATOR &KVIRT_AREA_TRACKED_ALLOCATOR
} }
} }
impl AllocatorSelector for Untracked { impl AllocatorSelector for Untracked {
fn select_allocator() -> &'static VirtAddrAllocator { fn select_allocator() -> &'static RangeAllocator {
&KVIRT_AREA_UNTRACKED_ALLOCATOR &KVIRT_AREA_UNTRACKED_ALLOCATOR
} }
} }

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
pub mod vaddr_alloc; pub mod range_alloc;
use core::ops::Range; use core::ops::Range;

View File

@ -9,35 +9,25 @@ use crate::{
Error, Error,
}; };
struct KVirtAreaFreeNode { pub struct RangeAllocator {
block: Range<Vaddr>, fullrange: Range<usize>,
freelist: SpinLock<Option<BTreeMap<usize, FreeRange>>>,
} }
impl KVirtAreaFreeNode { impl RangeAllocator {
const fn new(range: Range<Vaddr>) -> Self { pub const fn new(fullrange: Range<usize>) -> Self {
Self { block: range }
}
}
pub struct VirtAddrAllocator {
fullrange: Range<Vaddr>,
freelist: SpinLock<Option<BTreeMap<Vaddr, KVirtAreaFreeNode>>>,
}
impl VirtAddrAllocator {
pub const fn new(fullrange: Range<Vaddr>) -> Self {
Self { Self {
fullrange, fullrange,
freelist: SpinLock::new(None), freelist: SpinLock::new(None),
} }
} }
pub const fn fullrange(&self) -> &Range<Vaddr> { pub const fn fullrange(&self) -> &Range<usize> {
&self.fullrange &self.fullrange
} }
/// Allocates a specific kernel virtual area. /// Allocates a specific kernel virtual area.
pub fn alloc_specific(&self, allocate_range: &Range<Vaddr>) -> Result<()> { pub fn alloc_specific(&self, allocate_range: &Range<usize>) -> Result<()> {
debug_assert!(allocate_range.start < allocate_range.end); debug_assert!(allocate_range.start < allocate_range.end);
let mut lock_guard = self.get_freelist_guard(); let mut lock_guard = self.get_freelist_guard();
@ -65,7 +55,7 @@ impl VirtAddrAllocator {
if right_length != 0 { if right_length != 0 {
freelist.insert( freelist.insert(
allocate_range.end, allocate_range.end,
KVirtAreaFreeNode::new(allocate_range.end..(allocate_range.end + right_length)), FreeRange::new(allocate_range.end..(allocate_range.end + right_length)),
); );
} }
} }
@ -77,10 +67,10 @@ impl VirtAddrAllocator {
} }
} }
/// Allocates a kernel virtual area. /// Allocates a range specific by the `size`.
/// ///
/// This is currently implemented with a simple FIRST-FIT algorithm. /// This is currently implemented with a simple FIRST-FIT algorithm.
pub fn alloc(&self, size: usize) -> Result<Range<Vaddr>> { pub fn alloc(&self, size: usize) -> Result<Range<usize>> {
let mut lock_guard = self.get_freelist_guard(); let mut lock_guard = self.get_freelist_guard();
let freelist = lock_guard.as_mut().unwrap(); let freelist = lock_guard.as_mut().unwrap();
let mut allocate_range = None; let mut allocate_range = None;
@ -111,8 +101,8 @@ impl VirtAddrAllocator {
} }
} }
/// Frees a kernel virtual area. /// Frees a `range`.
pub fn free(&self, range: Range<Vaddr>) { pub fn free(&self, range: Range<usize>) {
let mut lock_guard = self.freelist.lock(); let mut lock_guard = self.freelist.lock();
let freelist = lock_guard.as_mut().unwrap_or_else(|| { let freelist = lock_guard.as_mut().unwrap_or_else(|| {
panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.") panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.")
@ -132,7 +122,7 @@ impl VirtAddrAllocator {
freelist.remove(&prev_va); freelist.remove(&prev_va);
} }
} }
freelist.insert(free_range.start, KVirtAreaFreeNode::new(free_range.clone())); freelist.insert(free_range.start, FreeRange::new(free_range.clone()));
// 2. check if we can merge the current block with the next block, if we can, do so. // 2. check if we can merge the current block with the next block, if we can, do so.
if let Some((next_va, next_node)) = freelist if let Some((next_va, next_node)) = freelist
@ -150,16 +140,23 @@ impl VirtAddrAllocator {
fn get_freelist_guard( fn get_freelist_guard(
&self, &self,
) -> SpinLockGuard<Option<BTreeMap<usize, KVirtAreaFreeNode>>, PreemptDisabled> { ) -> SpinLockGuard<Option<BTreeMap<usize, FreeRange>>, PreemptDisabled> {
let mut lock_guard = self.freelist.lock(); let mut lock_guard = self.freelist.lock();
if lock_guard.is_none() { if lock_guard.is_none() {
let mut freelist: BTreeMap<Vaddr, KVirtAreaFreeNode> = BTreeMap::new(); let mut freelist: BTreeMap<usize, FreeRange> = BTreeMap::new();
freelist.insert( freelist.insert(self.fullrange.start, FreeRange::new(self.fullrange.clone()));
self.fullrange.start,
KVirtAreaFreeNode::new(self.fullrange.clone()),
);
*lock_guard = Some(freelist); *lock_guard = Some(freelist);
} }
lock_guard lock_guard
} }
} }
struct FreeRange {
block: Range<usize>,
}
impl FreeRange {
const fn new(range: Range<usize>) -> Self {
Self { block: range }
}
}