Extract VirtAddrAllocator and add alloc_specific API

This commit is contained in:
Yuke Peng
2024-10-21 18:43:19 +08:00
committed by Tate, Hongliang Tian
parent 1f03955f51
commit 0054a8080f
4 changed files with 170 additions and 108 deletions

View File

@ -31,8 +31,8 @@ pub mod bus;
pub mod collections; pub mod collections;
pub mod console; pub mod console;
pub mod cpu; pub mod cpu;
pub mod io;
mod error; mod error;
pub mod io;
pub mod logger; pub mod logger;
pub mod mm; pub mod mm;
pub mod panic; pub mod panic;
@ -43,7 +43,7 @@ pub mod task;
pub mod timer; pub mod timer;
pub mod trap; pub mod trap;
pub mod user; pub mod user;
mod util; pub(crate) mod util;
use core::sync::atomic::{AtomicBool, Ordering}; use core::sync::atomic::{AtomicBool, Ordering};

View File

@ -2,7 +2,6 @@
//! Kernel virtual memory allocation //! Kernel virtual memory allocation
use alloc::collections::BTreeMap;
use core::{any::TypeId, marker::PhantomData, ops::Range}; use core::{any::TypeId, marker::PhantomData, ops::Range};
use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE}; use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE};
@ -15,114 +14,10 @@ use crate::{
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
Paddr, Vaddr, PAGE_SIZE, Paddr, Vaddr, PAGE_SIZE,
}, },
sync::SpinLock,
task::disable_preempt, task::disable_preempt,
Error, Result, util::vaddr_alloc::VirtAddrAllocator,
}; };
pub struct KVirtAreaFreeNode {
block: Range<Vaddr>,
}
impl KVirtAreaFreeNode {
pub(crate) const fn new(range: Range<Vaddr>) -> Self {
Self { block: range }
}
}
pub struct VirtAddrAllocator {
fullrange: Range<Vaddr>,
freelist: SpinLock<Option<BTreeMap<Vaddr, KVirtAreaFreeNode>>>,
}
impl VirtAddrAllocator {
const fn new(fullrange: Range<Vaddr>) -> Self {
Self {
fullrange,
freelist: SpinLock::new(None),
}
}
/// Allocates a kernel virtual area.
///
/// This is currently implemented with a simple FIRST-FIT algorithm.
fn alloc(&self, size: usize) -> Result<Range<Vaddr>> {
let mut lock_guard = self.freelist.lock();
if lock_guard.is_none() {
let mut freelist: BTreeMap<Vaddr, KVirtAreaFreeNode> = BTreeMap::new();
freelist.insert(
self.fullrange.start,
KVirtAreaFreeNode::new(self.fullrange.clone()),
);
*lock_guard = Some(freelist);
}
let freelist = lock_guard.as_mut().unwrap();
let mut allocate_range = None;
let mut to_remove = None;
for (key, value) in freelist.iter() {
if value.block.end - value.block.start >= size {
allocate_range = Some((value.block.end - size)..value.block.end);
to_remove = Some(*key);
break;
}
}
if let Some(key) = to_remove {
if let Some(freenode) = freelist.get_mut(&key) {
if freenode.block.end - size == freenode.block.start {
freelist.remove(&key);
} else {
freenode.block.end -= size;
}
}
}
if let Some(range) = allocate_range {
Ok(range)
} else {
Err(Error::KVirtAreaAllocError)
}
}
/// Frees a kernel virtual area.
fn free(&self, range: Range<Vaddr>) {
let mut lock_guard = self.freelist.lock();
let freelist = lock_guard.as_mut().unwrap_or_else(|| {
panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.")
});
// 1. get the previous free block, check if we can merge this block with the free one
// - if contiguous, merge this area with the free block.
// - if not contiguous, create a new free block, insert it into the list.
let mut free_range = range.clone();
if let Some((prev_va, prev_node)) = freelist
.upper_bound_mut(core::ops::Bound::Excluded(&free_range.start))
.peek_prev()
{
if prev_node.block.end == free_range.start {
let prev_va = *prev_va;
free_range.start = prev_node.block.start;
freelist.remove(&prev_va);
}
}
freelist.insert(free_range.start, KVirtAreaFreeNode::new(free_range.clone()));
// 2. check if we can merge the current block with the next block, if we can, do so.
if let Some((next_va, next_node)) = freelist
.lower_bound_mut(core::ops::Bound::Excluded(&free_range.start))
.peek_next()
{
if free_range.end == next_node.block.start {
let next_va = *next_va;
free_range.end = next_node.block.end;
freelist.remove(&next_va);
freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end;
}
}
}
}
static KVIRT_AREA_TRACKED_ALLOCATOR: VirtAddrAllocator = static KVIRT_AREA_TRACKED_ALLOCATOR: VirtAddrAllocator =
VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE); VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE);
static KVIRT_AREA_UNTRACKED_ALLOCATOR: VirtAddrAllocator = static KVIRT_AREA_UNTRACKED_ALLOCATOR: VirtAddrAllocator =

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
pub mod vaddr_alloc;
use core::ops::Range; use core::ops::Range;
/// Asserts that a boolean expression is `true` at compile-time. /// Asserts that a boolean expression is `true` at compile-time.

View File

@ -0,0 +1,165 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::collections::btree_map::BTreeMap;
use core::ops::Range;
use crate::{
prelude::*,
sync::{PreemptDisabled, SpinLock, SpinLockGuard},
Error,
};
pub struct KVirtAreaFreeNode {
block: Range<Vaddr>,
}
impl KVirtAreaFreeNode {
const fn new(range: Range<Vaddr>) -> Self {
Self { block: range }
}
}
pub struct VirtAddrAllocator {
fullrange: Range<Vaddr>,
freelist: SpinLock<Option<BTreeMap<Vaddr, KVirtAreaFreeNode>>>,
}
impl VirtAddrAllocator {
pub const fn new(fullrange: Range<Vaddr>) -> Self {
Self {
fullrange,
freelist: SpinLock::new(None),
}
}
pub const fn fullrange(&self) -> &Range<Vaddr> {
&self.fullrange
}
/// Allocates a specific kernel virtual area.
pub fn alloc_specific(&self, allocate_range: &Range<Vaddr>) -> Result<()> {
debug_assert!(allocate_range.start < allocate_range.end);
let mut lock_guard = self.get_freelist_guard();
let freelist = lock_guard.as_mut().unwrap();
let mut target_node = None;
let mut left_length = 0;
let mut right_length = 0;
for (key, value) in freelist.iter() {
if value.block.end >= allocate_range.end && value.block.start <= allocate_range.start {
target_node = Some(*key);
left_length = allocate_range.start - value.block.start;
right_length = value.block.end - allocate_range.end;
break;
}
}
if let Some(key) = target_node {
if left_length == 0 {
freelist.remove(&key);
} else if let Some(freenode) = freelist.get_mut(&key) {
freenode.block.end = allocate_range.start;
}
if right_length != 0 {
freelist.insert(
allocate_range.end,
KVirtAreaFreeNode::new(allocate_range.end..(allocate_range.end + right_length)),
);
}
}
if target_node.is_some() {
Ok(())
} else {
Err(Error::KVirtAreaAllocError)
}
}
/// Allocates a kernel virtual area.
///
/// This is currently implemented with a simple FIRST-FIT algorithm.
pub fn alloc(&self, size: usize) -> Result<Range<Vaddr>> {
let mut lock_guard = self.get_freelist_guard();
let freelist = lock_guard.as_mut().unwrap();
let mut allocate_range = None;
let mut to_remove = None;
for (key, value) in freelist.iter() {
if value.block.end - value.block.start >= size {
allocate_range = Some((value.block.end - size)..value.block.end);
to_remove = Some(*key);
break;
}
}
if let Some(key) = to_remove {
if let Some(freenode) = freelist.get_mut(&key) {
if freenode.block.end - size == freenode.block.start {
freelist.remove(&key);
} else {
freenode.block.end -= size;
}
}
}
if let Some(range) = allocate_range {
Ok(range)
} else {
Err(Error::KVirtAreaAllocError)
}
}
/// Frees a kernel virtual area.
pub fn free(&self, range: Range<Vaddr>) {
let mut lock_guard = self.freelist.lock();
let freelist = lock_guard.as_mut().unwrap_or_else(|| {
panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.")
});
// 1. get the previous free block, check if we can merge this block with the free one
// - if contiguous, merge this area with the free block.
// - if not contiguous, create a new free block, insert it into the list.
let mut free_range = range.clone();
if let Some((prev_va, prev_node)) = freelist
.upper_bound_mut(core::ops::Bound::Excluded(&free_range.start))
.peek_prev()
{
if prev_node.block.end == free_range.start {
let prev_va = *prev_va;
free_range.start = prev_node.block.start;
freelist.remove(&prev_va);
}
}
freelist.insert(free_range.start, KVirtAreaFreeNode::new(free_range.clone()));
// 2. check if we can merge the current block with the next block, if we can, do so.
if let Some((next_va, next_node)) = freelist
.lower_bound_mut(core::ops::Bound::Excluded(&free_range.start))
.peek_next()
{
if free_range.end == next_node.block.start {
let next_va = *next_va;
free_range.end = next_node.block.end;
freelist.remove(&next_va);
freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end;
}
}
}
fn get_freelist_guard(
&self,
) -> SpinLockGuard<Option<BTreeMap<usize, KVirtAreaFreeNode>>, PreemptDisabled> {
let mut lock_guard = self.freelist.lock();
if lock_guard.is_none() {
let mut freelist: BTreeMap<Vaddr, KVirtAreaFreeNode> = BTreeMap::new();
freelist.insert(
self.fullrange.start,
KVirtAreaFreeNode::new(self.fullrange.clone()),
);
*lock_guard = Some(freelist);
}
lock_guard
}
}