From 0054a8080f257b991e8dbfd81e1f81ab448eb331 Mon Sep 17 00:00:00 2001 From: Yuke Peng Date: Mon, 21 Oct 2024 18:43:19 +0800 Subject: [PATCH] Extract VirtAddrAllocator and add alloc_specific API --- ostd/src/lib.rs | 4 +- ostd/src/mm/kspace/kvirt_area.rs | 107 +------------------ ostd/src/{util.rs => util/mod.rs} | 2 + ostd/src/util/vaddr_alloc.rs | 165 ++++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+), 108 deletions(-) rename ostd/src/{util.rs => util/mod.rs} (99%) create mode 100644 ostd/src/util/vaddr_alloc.rs diff --git a/ostd/src/lib.rs b/ostd/src/lib.rs index 4e7c17c28..ed48d4f62 100644 --- a/ostd/src/lib.rs +++ b/ostd/src/lib.rs @@ -31,8 +31,8 @@ pub mod bus; pub mod collections; pub mod console; pub mod cpu; -pub mod io; mod error; +pub mod io; pub mod logger; pub mod mm; pub mod panic; @@ -43,7 +43,7 @@ pub mod task; pub mod timer; pub mod trap; pub mod user; -mod util; +pub(crate) mod util; use core::sync::atomic::{AtomicBool, Ordering}; diff --git a/ostd/src/mm/kspace/kvirt_area.rs b/ostd/src/mm/kspace/kvirt_area.rs index 6361bd336..04e48a8e0 100644 --- a/ostd/src/mm/kspace/kvirt_area.rs +++ b/ostd/src/mm/kspace/kvirt_area.rs @@ -2,7 +2,6 @@ //! Kernel virtual memory allocation -use alloc::collections::BTreeMap; use core::{any::TypeId, marker::PhantomData, ops::Range}; use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE}; @@ -15,114 +14,10 @@ use crate::{ tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, Paddr, Vaddr, PAGE_SIZE, }, - sync::SpinLock, task::disable_preempt, - Error, Result, + util::vaddr_alloc::VirtAddrAllocator, }; -pub struct KVirtAreaFreeNode { - block: Range, -} - -impl KVirtAreaFreeNode { - pub(crate) const fn new(range: Range) -> Self { - Self { block: range } - } -} - -pub struct VirtAddrAllocator { - fullrange: Range, - freelist: SpinLock>>, -} - -impl VirtAddrAllocator { - const fn new(fullrange: Range) -> Self { - Self { - fullrange, - freelist: SpinLock::new(None), - } - } - - /// Allocates a kernel virtual area. - /// - /// This is currently implemented with a simple FIRST-FIT algorithm. - fn alloc(&self, size: usize) -> Result> { - let mut lock_guard = self.freelist.lock(); - if lock_guard.is_none() { - let mut freelist: BTreeMap = BTreeMap::new(); - freelist.insert( - self.fullrange.start, - KVirtAreaFreeNode::new(self.fullrange.clone()), - ); - *lock_guard = Some(freelist); - } - let freelist = lock_guard.as_mut().unwrap(); - let mut allocate_range = None; - let mut to_remove = None; - - for (key, value) in freelist.iter() { - if value.block.end - value.block.start >= size { - allocate_range = Some((value.block.end - size)..value.block.end); - to_remove = Some(*key); - break; - } - } - - if let Some(key) = to_remove { - if let Some(freenode) = freelist.get_mut(&key) { - if freenode.block.end - size == freenode.block.start { - freelist.remove(&key); - } else { - freenode.block.end -= size; - } - } - } - - if let Some(range) = allocate_range { - Ok(range) - } else { - Err(Error::KVirtAreaAllocError) - } - } - - /// Frees a kernel virtual area. - fn free(&self, range: Range) { - let mut lock_guard = self.freelist.lock(); - let freelist = lock_guard.as_mut().unwrap_or_else(|| { - panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.") - }); - // 1. get the previous free block, check if we can merge this block with the free one - // - if contiguous, merge this area with the free block. - // - if not contiguous, create a new free block, insert it into the list. - let mut free_range = range.clone(); - - if let Some((prev_va, prev_node)) = freelist - .upper_bound_mut(core::ops::Bound::Excluded(&free_range.start)) - .peek_prev() - { - if prev_node.block.end == free_range.start { - let prev_va = *prev_va; - free_range.start = prev_node.block.start; - freelist.remove(&prev_va); - } - } - freelist.insert(free_range.start, KVirtAreaFreeNode::new(free_range.clone())); - - // 2. check if we can merge the current block with the next block, if we can, do so. - if let Some((next_va, next_node)) = freelist - .lower_bound_mut(core::ops::Bound::Excluded(&free_range.start)) - .peek_next() - { - if free_range.end == next_node.block.start { - let next_va = *next_va; - free_range.end = next_node.block.end; - freelist.remove(&next_va); - freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end; - } - } - } -} - static KVIRT_AREA_TRACKED_ALLOCATOR: VirtAddrAllocator = VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE); static KVIRT_AREA_UNTRACKED_ALLOCATOR: VirtAddrAllocator = diff --git a/ostd/src/util.rs b/ostd/src/util/mod.rs similarity index 99% rename from ostd/src/util.rs rename to ostd/src/util/mod.rs index 649a81538..cddce7fa9 100644 --- a/ostd/src/util.rs +++ b/ostd/src/util/mod.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 +pub mod vaddr_alloc; + use core::ops::Range; /// Asserts that a boolean expression is `true` at compile-time. diff --git a/ostd/src/util/vaddr_alloc.rs b/ostd/src/util/vaddr_alloc.rs new file mode 100644 index 000000000..7a8353f07 --- /dev/null +++ b/ostd/src/util/vaddr_alloc.rs @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: MPL-2.0 + +use alloc::collections::btree_map::BTreeMap; +use core::ops::Range; + +use crate::{ + prelude::*, + sync::{PreemptDisabled, SpinLock, SpinLockGuard}, + Error, +}; + +pub struct KVirtAreaFreeNode { + block: Range, +} + +impl KVirtAreaFreeNode { + const fn new(range: Range) -> Self { + Self { block: range } + } +} + +pub struct VirtAddrAllocator { + fullrange: Range, + freelist: SpinLock>>, +} + +impl VirtAddrAllocator { + pub const fn new(fullrange: Range) -> Self { + Self { + fullrange, + freelist: SpinLock::new(None), + } + } + + pub const fn fullrange(&self) -> &Range { + &self.fullrange + } + + /// Allocates a specific kernel virtual area. + pub fn alloc_specific(&self, allocate_range: &Range) -> Result<()> { + debug_assert!(allocate_range.start < allocate_range.end); + + let mut lock_guard = self.get_freelist_guard(); + let freelist = lock_guard.as_mut().unwrap(); + let mut target_node = None; + let mut left_length = 0; + let mut right_length = 0; + + for (key, value) in freelist.iter() { + if value.block.end >= allocate_range.end && value.block.start <= allocate_range.start { + target_node = Some(*key); + left_length = allocate_range.start - value.block.start; + right_length = value.block.end - allocate_range.end; + break; + } + } + + if let Some(key) = target_node { + if left_length == 0 { + freelist.remove(&key); + } else if let Some(freenode) = freelist.get_mut(&key) { + freenode.block.end = allocate_range.start; + } + + if right_length != 0 { + freelist.insert( + allocate_range.end, + KVirtAreaFreeNode::new(allocate_range.end..(allocate_range.end + right_length)), + ); + } + } + + if target_node.is_some() { + Ok(()) + } else { + Err(Error::KVirtAreaAllocError) + } + } + + /// Allocates a kernel virtual area. + /// + /// This is currently implemented with a simple FIRST-FIT algorithm. + pub fn alloc(&self, size: usize) -> Result> { + let mut lock_guard = self.get_freelist_guard(); + let freelist = lock_guard.as_mut().unwrap(); + let mut allocate_range = None; + let mut to_remove = None; + + for (key, value) in freelist.iter() { + if value.block.end - value.block.start >= size { + allocate_range = Some((value.block.end - size)..value.block.end); + to_remove = Some(*key); + break; + } + } + + if let Some(key) = to_remove { + if let Some(freenode) = freelist.get_mut(&key) { + if freenode.block.end - size == freenode.block.start { + freelist.remove(&key); + } else { + freenode.block.end -= size; + } + } + } + + if let Some(range) = allocate_range { + Ok(range) + } else { + Err(Error::KVirtAreaAllocError) + } + } + + /// Frees a kernel virtual area. + pub fn free(&self, range: Range) { + let mut lock_guard = self.freelist.lock(); + let freelist = lock_guard.as_mut().unwrap_or_else(|| { + panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.") + }); + // 1. get the previous free block, check if we can merge this block with the free one + // - if contiguous, merge this area with the free block. + // - if not contiguous, create a new free block, insert it into the list. + let mut free_range = range.clone(); + + if let Some((prev_va, prev_node)) = freelist + .upper_bound_mut(core::ops::Bound::Excluded(&free_range.start)) + .peek_prev() + { + if prev_node.block.end == free_range.start { + let prev_va = *prev_va; + free_range.start = prev_node.block.start; + freelist.remove(&prev_va); + } + } + freelist.insert(free_range.start, KVirtAreaFreeNode::new(free_range.clone())); + + // 2. check if we can merge the current block with the next block, if we can, do so. + if let Some((next_va, next_node)) = freelist + .lower_bound_mut(core::ops::Bound::Excluded(&free_range.start)) + .peek_next() + { + if free_range.end == next_node.block.start { + let next_va = *next_va; + free_range.end = next_node.block.end; + freelist.remove(&next_va); + freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end; + } + } + } + + fn get_freelist_guard( + &self, + ) -> SpinLockGuard>, PreemptDisabled> { + let mut lock_guard = self.freelist.lock(); + if lock_guard.is_none() { + let mut freelist: BTreeMap = BTreeMap::new(); + freelist.insert( + self.fullrange.start, + KVirtAreaFreeNode::new(self.fullrange.clone()), + ); + *lock_guard = Some(freelist); + } + lock_guard + } +}