From 9a165ec41a4335335e2f49cf3231d0c7949bf77b Mon Sep 17 00:00:00 2001 From: Yuke Peng Date: Wed, 25 Dec 2024 15:32:15 +0800 Subject: [PATCH] Refactor and rename VirtAddrAllocator to RangeAllocator --- ostd/src/io/io_mem/allocator.rs | 14 ++--- ostd/src/mm/kspace/kvirt_area.rs | 15 +++-- ostd/src/util/mod.rs | 2 +- .../util/{vaddr_alloc.rs => range_alloc.rs} | 55 +++++++++---------- 4 files changed, 41 insertions(+), 45 deletions(-) rename ostd/src/util/{vaddr_alloc.rs => range_alloc.rs} (79%) diff --git a/ostd/src/io/io_mem/allocator.rs b/ostd/src/io/io_mem/allocator.rs index 0bb55959..eb7c81aa 100644 --- a/ostd/src/io/io_mem/allocator.rs +++ b/ostd/src/io/io_mem/allocator.rs @@ -11,12 +11,12 @@ use spin::Once; use crate::{ io::io_mem::IoMem, mm::{CachePolicy, PageFlags}, - util::vaddr_alloc::VirtAddrAllocator, + util::range_alloc::RangeAllocator, }; /// I/O memory allocator that allocates memory I/O access to device drivers. pub struct IoMemAllocator { - allocators: Vec, + allocators: Vec, } impl IoMemAllocator { @@ -53,7 +53,7 @@ impl IoMemAllocator { /// # Safety /// /// User must ensure the range doesn't belong to physical memory or system device I/O. - unsafe fn new(allocators: Vec) -> Self { + unsafe fn new(allocators: Vec) -> Self { Self { allocators } } } @@ -63,7 +63,7 @@ impl IoMemAllocator { /// The builder must contains the memory I/O regions that don't belong to the physical memory. Also, OSTD /// must exclude the memory I/O regions of the system device before building the `IoMemAllocator`. pub(crate) struct IoMemAllocatorBuilder { - allocators: Vec, + allocators: Vec, } impl IoMemAllocatorBuilder { @@ -79,7 +79,7 @@ impl IoMemAllocatorBuilder { ); let mut allocators = Vec::with_capacity(ranges.len()); for range in ranges { - allocators.push(VirtAddrAllocator::new(range)); + allocators.push(RangeAllocator::new(range)); } Self { allocators } } @@ -118,9 +118,9 @@ pub(crate) unsafe fn init(io_mem_builder: IoMemAllocatorBuilder) { } fn find_allocator<'a>( - allocators: &'a [VirtAddrAllocator], + allocators: &'a [RangeAllocator], range: &Range, -) -> Option<&'a VirtAddrAllocator> { +) -> Option<&'a RangeAllocator> { for allocator in allocators.iter() { let allocator_range = allocator.fullrange(); if allocator_range.start >= range.end || allocator_range.end <= range.start { diff --git a/ostd/src/mm/kspace/kvirt_area.rs b/ostd/src/mm/kspace/kvirt_area.rs index 04e48a8e..4b29c044 100644 --- a/ostd/src/mm/kspace/kvirt_area.rs +++ b/ostd/src/mm/kspace/kvirt_area.rs @@ -15,13 +15,12 @@ use crate::{ Paddr, Vaddr, PAGE_SIZE, }, task::disable_preempt, - util::vaddr_alloc::VirtAddrAllocator, + util::range_alloc::RangeAllocator, }; -static KVIRT_AREA_TRACKED_ALLOCATOR: VirtAddrAllocator = - VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE); -static KVIRT_AREA_UNTRACKED_ALLOCATOR: VirtAddrAllocator = - VirtAddrAllocator::new(VMALLOC_VADDR_RANGE); +static KVIRT_AREA_TRACKED_ALLOCATOR: RangeAllocator = + RangeAllocator::new(TRACKED_MAPPED_PAGES_RANGE); +static KVIRT_AREA_UNTRACKED_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE); #[derive(Debug)] pub struct Tracked; @@ -29,17 +28,17 @@ pub struct Tracked; pub struct Untracked; pub trait AllocatorSelector { - fn select_allocator() -> &'static VirtAddrAllocator; + fn select_allocator() -> &'static RangeAllocator; } impl AllocatorSelector for Tracked { - fn select_allocator() -> &'static VirtAddrAllocator { + fn select_allocator() -> &'static RangeAllocator { &KVIRT_AREA_TRACKED_ALLOCATOR } } impl AllocatorSelector for Untracked { - fn select_allocator() -> &'static VirtAddrAllocator { + fn select_allocator() -> &'static RangeAllocator { &KVIRT_AREA_UNTRACKED_ALLOCATOR } } diff --git a/ostd/src/util/mod.rs b/ostd/src/util/mod.rs index cddce7fa..a372b041 100644 --- a/ostd/src/util/mod.rs +++ b/ostd/src/util/mod.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 -pub mod vaddr_alloc; +pub mod range_alloc; use core::ops::Range; diff --git a/ostd/src/util/vaddr_alloc.rs b/ostd/src/util/range_alloc.rs similarity index 79% rename from ostd/src/util/vaddr_alloc.rs rename to ostd/src/util/range_alloc.rs index f4f53c12..c0a7d222 100644 --- a/ostd/src/util/vaddr_alloc.rs +++ b/ostd/src/util/range_alloc.rs @@ -9,35 +9,25 @@ use crate::{ Error, }; -struct KVirtAreaFreeNode { - block: Range, +pub struct RangeAllocator { + fullrange: Range, + freelist: SpinLock>>, } -impl KVirtAreaFreeNode { - const fn new(range: Range) -> Self { - Self { block: range } - } -} - -pub struct VirtAddrAllocator { - fullrange: Range, - freelist: SpinLock>>, -} - -impl VirtAddrAllocator { - pub const fn new(fullrange: Range) -> Self { +impl RangeAllocator { + pub const fn new(fullrange: Range) -> Self { Self { fullrange, freelist: SpinLock::new(None), } } - pub const fn fullrange(&self) -> &Range { + pub const fn fullrange(&self) -> &Range { &self.fullrange } /// Allocates a specific kernel virtual area. - pub fn alloc_specific(&self, allocate_range: &Range) -> Result<()> { + pub fn alloc_specific(&self, allocate_range: &Range) -> Result<()> { debug_assert!(allocate_range.start < allocate_range.end); let mut lock_guard = self.get_freelist_guard(); @@ -65,7 +55,7 @@ impl VirtAddrAllocator { if right_length != 0 { freelist.insert( allocate_range.end, - KVirtAreaFreeNode::new(allocate_range.end..(allocate_range.end + right_length)), + FreeRange::new(allocate_range.end..(allocate_range.end + right_length)), ); } } @@ -77,10 +67,10 @@ impl VirtAddrAllocator { } } - /// Allocates a kernel virtual area. + /// Allocates a range specific by the `size`. /// /// This is currently implemented with a simple FIRST-FIT algorithm. - pub fn alloc(&self, size: usize) -> Result> { + pub fn alloc(&self, size: usize) -> Result> { let mut lock_guard = self.get_freelist_guard(); let freelist = lock_guard.as_mut().unwrap(); let mut allocate_range = None; @@ -111,8 +101,8 @@ impl VirtAddrAllocator { } } - /// Frees a kernel virtual area. - pub fn free(&self, range: Range) { + /// Frees a `range`. + pub fn free(&self, range: Range) { let mut lock_guard = self.freelist.lock(); let freelist = lock_guard.as_mut().unwrap_or_else(|| { panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.") @@ -132,7 +122,7 @@ impl VirtAddrAllocator { freelist.remove(&prev_va); } } - freelist.insert(free_range.start, KVirtAreaFreeNode::new(free_range.clone())); + freelist.insert(free_range.start, FreeRange::new(free_range.clone())); // 2. check if we can merge the current block with the next block, if we can, do so. if let Some((next_va, next_node)) = freelist @@ -150,16 +140,23 @@ impl VirtAddrAllocator { fn get_freelist_guard( &self, - ) -> SpinLockGuard>, PreemptDisabled> { + ) -> SpinLockGuard>, PreemptDisabled> { let mut lock_guard = self.freelist.lock(); if lock_guard.is_none() { - let mut freelist: BTreeMap = BTreeMap::new(); - freelist.insert( - self.fullrange.start, - KVirtAreaFreeNode::new(self.fullrange.clone()), - ); + let mut freelist: BTreeMap = BTreeMap::new(); + freelist.insert(self.fullrange.start, FreeRange::new(self.fullrange.clone())); *lock_guard = Some(freelist); } lock_guard } } + +struct FreeRange { + block: Range, +} + +impl FreeRange { + const fn new(range: Range) -> Self { + Self { block: range } + } +}