Make ID allocator a separate crate and use it in the frame

This commit is contained in:
Zhang Junyang
2024-05-02 17:44:10 +08:00
committed by Tate, Hongliang Tian
parent 8f3b1f8ddf
commit d1990869ae
19 changed files with 93 additions and 129 deletions

View File

@ -22,6 +22,7 @@ int-to-c-enum = { path = "../../kernel/libs/int-to-c-enum" }
# So we set a fixed version 0.9.5 for this crate
intrusive-collections = "=0.9.5"
ktest = { path = "../libs/ktest" }
id-alloc = { path = "../libs/id-alloc" }
lazy_static = { version = "1.0", features = ["spin_no_std"] }
log = "0.4"
pod = { git = "https://github.com/asterinas/pod", rev = "d7dba56" }

View File

@ -2,17 +2,14 @@
use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec::Vec};
use id_alloc::IdAlloc;
use spin::Once;
use trapframe::TrapFrame;
use crate::{
sync::{Mutex, SpinLock, SpinLockGuard},
util::recycle_allocator::RecycleAllocator,
};
use crate::sync::{Mutex, SpinLock, SpinLockGuard};
/// The IRQ numbers which are not using
pub(crate) static NOT_USING_IRQ: SpinLock<RecycleAllocator> =
SpinLock::new(RecycleAllocator::with_start_max(32, 256));
/// The global allocator for software defined IRQ lines.
pub(crate) static IRQ_ALLOCATOR: Once<SpinLock<IdAlloc>> = Once::new();
pub(crate) static IRQ_LIST: Once<Vec<IrqLine>> = Once::new();
@ -25,6 +22,18 @@ pub(crate) fn init() {
});
}
IRQ_LIST.call_once(|| list);
CALLBACK_ID_ALLOCATOR.call_once(|| Mutex::new(IdAlloc::with_capacity(256)));
IRQ_ALLOCATOR.call_once(|| {
// As noted in the Intel 64 and IA-32 rchitectures Software Developers Manual,
// Volume 3A, Section 6.2, the first 32 interrupts are reserved for specific
// usages. And the rest from 32 to 255 are available for external user-defined
// interrupts.
let mut id_alloc = IdAlloc::with_capacity(256);
for i in 0..32 {
id_alloc.alloc_specific(i).unwrap();
}
SpinLock::new(id_alloc)
});
}
pub(crate) fn enable_local() {
@ -43,7 +52,7 @@ pub(crate) fn is_local_enabled() -> bool {
x86_64::instructions::interrupts::are_enabled()
}
static ID_ALLOCATOR: Mutex<RecycleAllocator> = Mutex::new(RecycleAllocator::new());
static CALLBACK_ID_ALLOCATOR: Once<Mutex<IdAlloc>> = Once::new();
pub struct CallbackElement {
function: Box<dyn Fn(&TrapFrame) + Send + Sync + 'static>,
@ -102,14 +111,14 @@ impl IrqLine {
where
F: Fn(&TrapFrame) + Sync + Send + 'static,
{
let allocate_id = ID_ALLOCATOR.lock().alloc();
let allocated_id = CALLBACK_ID_ALLOCATOR.get().unwrap().lock().alloc().unwrap();
self.callback_list.lock().push(CallbackElement {
function: Box::new(callback),
id: allocate_id,
id: allocated_id,
});
IrqCallbackHandle {
irq_num: self.irq_num,
id: allocate_id,
id: allocated_id,
}
}
}
@ -134,6 +143,6 @@ impl Drop for IrqCallbackHandle {
.callback_list
.lock();
a.retain(|item| item.id != self.id);
ID_ALLOCATOR.lock().dealloc(self.id);
CALLBACK_ID_ALLOCATOR.get().unwrap().lock().free(self.id);
}
}

View File

@ -41,7 +41,6 @@ pub mod task;
pub mod timer;
pub mod trap;
pub mod user;
mod util;
pub mod vm;
#[cfg(feature = "intel_tdx")]

View File

@ -5,7 +5,7 @@ use core::fmt::Debug;
use trapframe::TrapFrame;
use crate::{
arch::irq::{self, IrqCallbackHandle, NOT_USING_IRQ},
arch::irq::{self, IrqCallbackHandle, IRQ_ALLOCATOR},
prelude::*,
task::{disable_preempt, DisablePreemptGuard},
Error,
@ -28,20 +28,20 @@ pub struct IrqLine {
impl IrqLine {
pub fn alloc_specific(irq: u8) -> Result<Self> {
if NOT_USING_IRQ.lock().get_target(irq as usize) {
Ok(Self::new(irq))
} else {
Err(Error::NotEnoughResources)
}
IRQ_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc_specific(irq as usize)
.map(|irq_num| Self::new(irq_num as u8))
.ok_or(Error::NotEnoughResources)
}
pub fn alloc() -> Result<Self> {
let irq_num = NOT_USING_IRQ.lock().alloc();
if irq_num == usize::MAX {
Err(Error::NotEnoughResources)
} else {
Ok(Self::new(irq_num as u8))
}
let Some(irq_num) = IRQ_ALLOCATOR.get().unwrap().lock().alloc() else {
return Err(Error::NotEnoughResources);
};
Ok(Self::new(irq_num as u8))
}
fn new(irq_num: u8) -> Self {
@ -87,7 +87,11 @@ impl Clone for IrqLine {
impl Drop for IrqLine {
fn drop(&mut self) {
if Arc::strong_count(&self.irq) == 1 {
NOT_USING_IRQ.lock().dealloc(self.irq_num as usize);
IRQ_ALLOCATOR
.get()
.unwrap()
.lock()
.free(self.irq_num as usize);
}
}
}

View File

@ -1,3 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
pub mod recycle_allocator;

View File

@ -1,94 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::vec::Vec;
#[derive(Debug)]
pub struct RecycleAllocator {
current: usize,
recycled: Vec<usize>,
skip: Vec<usize>,
max: usize,
}
impl RecycleAllocator {
pub const fn new() -> Self {
RecycleAllocator {
current: 0,
recycled: Vec::new(),
skip: Vec::new(),
max: usize::MAX - 1,
}
}
pub const fn with_start_max(start: usize, max: usize) -> Self {
RecycleAllocator {
current: start,
recycled: Vec::new(),
skip: Vec::new(),
max,
}
}
#[allow(unused)]
pub fn alloc(&mut self) -> usize {
if let Some(id) = self.recycled.pop() {
return id;
}
// recycle list is empty, need to use current to allocate an id.
// it should skip the element in skip list
while self.skip.contains(&self.current) {
self.current += 1;
}
if self.current == self.max {
return usize::MAX;
}
self.current += 1;
self.current - 1
}
/// deallocate a id, it should fit one of the following requirement, otherwise it will panic:
///
/// 1. It is in the skip list
///
/// 2. It smaller than current and not in recycled list
#[allow(unused)]
pub fn dealloc(&mut self, id: usize) {
if !self.skip.contains(&id) {
assert!(id < self.current);
assert!(
!self.recycled.iter().any(|i| *i == id),
"id {} has been deallocated!",
id
);
} else {
// if the value is in skip list, then remove it from the skip list
self.skip.retain(|value| *value != id);
}
self.recycled.push(id);
}
/// get target id in the list, it will return true if the target can used, false if can not used.
/// the target need to meet one of the following requirement so that it can used:
///
/// 1. It is in the recycled list
///
/// 2. It is bigger than the current, smaller than max and not in the skip list
///
pub fn get_target(&mut self, target: usize) -> bool {
if target >= self.max {
return false;
}
if target >= self.current {
if self.skip.contains(&target) {
false
} else {
self.skip.push(target);
true
}
} else if self.recycled.contains(&target) {
self.recycled.retain(|value| *value != target);
true
} else {
false
}
}
}

View File

@ -0,0 +1,9 @@
[package]
name = "id-alloc"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitvec = { version = "1.0", default-features = false, features = ["alloc"] }

View File

@ -0,0 +1,194 @@
// SPDX-License-Identifier: MPL-2.0
#![cfg_attr(not(test), no_std)]
#![forbid(unsafe_code)]
use core::{fmt::Debug, ops::Range};
use bitvec::prelude::BitVec;
/// An id allocator implemented by the bitmap.
/// The true bit implies that the id is allocated, and vice versa.
#[derive(Clone)]
pub struct IdAlloc {
bitset: BitVec<u8>,
first_available_id: usize,
}
impl IdAlloc {
/// Constructs a new id allocator with a maximum capacity.
pub fn with_capacity(capacity: usize) -> Self {
let mut bitset = BitVec::with_capacity(capacity);
bitset.resize(capacity, false);
Self {
bitset,
first_available_id: 0,
}
}
/// Constructs a new id allocator from a slice of `u8` bytes and a maximum capacity.
///
/// The slice of `u8` bytes is the raw data of a bitmap.
pub fn from_bytes_with_capacity(slice: &[u8], capacity: usize) -> Self {
let bitset = if capacity > slice.len() * 8 {
let mut bitset = BitVec::from_slice(slice);
bitset.resize(capacity, false);
bitset
} else {
let mut bitset = BitVec::from_slice(&slice[..capacity.div_ceil(8)]);
bitset.truncate(capacity);
bitset
};
let first_available_id = (0..bitset.len())
.find(|&i| !bitset[i])
.map_or(bitset.len(), |i| i);
Self {
bitset,
first_available_id,
}
}
/// Allocates and returns a new `id`.
///
/// If allocation is not possible, it returns `None`.
pub fn alloc(&mut self) -> Option<usize> {
if self.first_available_id < self.bitset.len() {
let id = self.first_available_id;
self.bitset.set(id, true);
self.first_available_id = (id + 1..self.bitset.len())
.find(|&i| !self.bitset[i])
.map_or(self.bitset.len(), |i| i);
Some(id)
} else {
None
}
}
/// Allocates a consecutive range of new `id`s.
///
/// The `count` is the number of consecutive `id`s to allocate. If it is 0, return `None`.
///
/// If allocation is not possible, it returns `None`.
///
/// TODO: Choose a more efficient strategy.
pub fn alloc_consecutive(&mut self, count: usize) -> Option<Range<usize>> {
if count == 0 {
return None;
}
// Scan the bitmap from the position `first_available_id`
// for the first `count` number of consecutive 0's.
let allocated_range = {
// Invariance: all bits within `curr_range` are 0's
let mut curr_range = self.first_available_id..self.first_available_id + 1;
while curr_range.len() < count && curr_range.end < self.bitset.len() {
if !self.is_allocated(curr_range.end) {
curr_range.end += 1;
} else {
curr_range = curr_range.end + 1..curr_range.end + 1;
}
}
if curr_range.len() < count {
return None;
}
curr_range
};
// Set every bit to 1 within the allocated range
for id in allocated_range.clone() {
self.bitset.set(id, true);
}
// In case we need to update first_available_id
if self.is_allocated(self.first_available_id) {
self.first_available_id = (allocated_range.end..self.bitset.len())
.find(|&i| !self.bitset[i])
.map_or(self.bitset.len(), |i| i);
}
Some(allocated_range)
}
/// Releases the consecutive range of allocated `id`s.
///
/// # Panic
///
/// If the `range` is out of bounds, this method will panic.
pub fn free_consecutive(&mut self, range: Range<usize>) {
if range.is_empty() {
return;
}
let range_start = range.start;
for id in range {
debug_assert!(self.is_allocated(id));
self.bitset.set(id, false);
}
if range_start < self.first_available_id {
self.first_available_id = range_start
}
}
/// Releases the allocated `id`.
///
/// # Panic
///
/// If the `id` is out of bounds, this method will panic.
pub fn free(&mut self, id: usize) {
debug_assert!(self.is_allocated(id));
self.bitset.set(id, false);
if id < self.first_available_id {
self.first_available_id = id;
}
}
/// Allocate a specific ID.
///
/// If the ID is already allocated, it returns `None`, otherwise it
/// returns the allocated ID.
///
/// # Panic
///
/// If the `id` is out of bounds, this method will panic.
pub fn alloc_specific(&mut self, id: usize) -> Option<usize> {
if self.bitset[id] {
return None;
}
self.bitset.set(id, true);
if id == self.first_available_id {
self.first_available_id = (id + 1..self.bitset.len())
.find(|&i| !self.bitset[i])
.map_or(self.bitset.len(), |i| i);
}
Some(id)
}
/// Returns true if the `id` is allocated.
///
/// # Panic
///
/// If the `id` is out of bounds, this method will panic.
pub fn is_allocated(&self, id: usize) -> bool {
self.bitset[id]
}
/// Views the id allocator as a slice of `u8` bytes.
pub fn as_bytes(&self) -> &[u8] {
self.bitset.as_raw_slice()
}
}
impl Debug for IdAlloc {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("IdAlloc")
.field("len", &self.bitset.len())
.field("first_available_id", &self.first_available_id)
.finish()
}
}