Pull code from arch/*/irq.rs to trap/irq.rs

This commit is contained in:
Ruihan Li 2025-05-23 23:18:54 +08:00 committed by Junyang Zhang
parent dd9fc81a81
commit 758c80c321
6 changed files with 229 additions and 330 deletions

View File

@ -2,33 +2,33 @@
//! Interrupts.
use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec::Vec};
use crate::cpu::PinCurrentCpu;
use id_alloc::IdAlloc;
use spin::Once;
pub(crate) const IRQ_NUM_MIN: u8 = 0;
pub(crate) const IRQ_NUM_MAX: u8 = 255;
use crate::{
cpu::PinCurrentCpu,
sync::{Mutex, PreemptDisabled, SpinLock, SpinLockGuard},
trap::TrapFrame,
};
pub(crate) struct IrqRemapping {
_private: (),
}
/// The global allocator for software defined IRQ lines.
pub(crate) static IRQ_ALLOCATOR: Once<SpinLock<IdAlloc>> = Once::new();
pub(crate) static IRQ_LIST: Once<Vec<IrqLine>> = Once::new();
pub(crate) fn init() {
let mut list: Vec<IrqLine> = Vec::new();
for i in 0..256 {
list.push(IrqLine {
irq_num: i as u8,
callback_list: SpinLock::new(Vec::new()),
});
impl IrqRemapping {
pub(crate) const fn new() -> Self {
Self { _private: () }
}
/// Initializes the remapping entry for the specific IRQ number.
///
/// This will do nothing if the entry is already initialized or interrupt
/// remapping is disabled or not supported by the architecture.
pub(crate) fn init(&self, irq_num: u8) {}
/// Gets the remapping index of the IRQ line.
///
/// This method will return `None` if interrupt remapping is disabled or
/// not supported by the architecture.
pub(crate) fn remapping_index(&self) -> Option<u16> {
None
}
IRQ_LIST.call_once(|| list);
CALLBACK_ID_ALLOCATOR.call_once(|| Mutex::new(IdAlloc::with_capacity(256)));
IRQ_ALLOCATOR.call_once(|| SpinLock::new(IdAlloc::with_capacity(256)));
}
pub(crate) fn enable_local() {
@ -43,111 +43,6 @@ pub(crate) fn is_local_enabled() -> bool {
riscv::register::sstatus::read().sie()
}
static CALLBACK_ID_ALLOCATOR: Once<Mutex<IdAlloc>> = Once::new();
pub struct CallbackElement {
function: Box<dyn Fn(&TrapFrame) + Send + Sync + 'static>,
id: usize,
}
impl CallbackElement {
pub fn call(&self, element: &TrapFrame) {
self.function.call((element,));
}
}
impl Debug for CallbackElement {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("CallbackElement")
.field("id", &self.id)
.finish()
}
}
/// An interrupt request (IRQ) line.
#[derive(Debug)]
pub(crate) struct IrqLine {
pub(crate) irq_num: u8,
pub(crate) callback_list: SpinLock<Vec<CallbackElement>>,
}
impl IrqLine {
/// Acquire an interrupt request line.
///
/// # Safety
///
/// This function is marked unsafe as manipulating interrupt lines is
/// considered a dangerous operation.
#[expect(clippy::redundant_allocation)]
pub unsafe fn acquire(irq_num: u8) -> Arc<&'static Self> {
Arc::new(IRQ_LIST.get().unwrap().get(irq_num as usize).unwrap())
}
/// Gets the remapping index of the IRQ line.
///
/// This method will return `None` if interrupt remapping is disabled or
/// not supported by the architecture.
pub fn remapping_index(&self) -> Option<u16> {
None
}
/// Get the IRQ number.
pub fn num(&self) -> u8 {
self.irq_num
}
pub fn callback_list(
&self,
) -> SpinLockGuard<alloc::vec::Vec<CallbackElement>, PreemptDisabled> {
self.callback_list.lock()
}
/// Register a callback that will be invoked when the IRQ is active.
///
/// A handle to the callback is returned. Dropping the handle
/// automatically unregisters the callback.
///
/// For each IRQ line, multiple callbacks may be registered.
pub fn on_active<F>(&self, callback: F) -> IrqCallbackHandle
where
F: Fn(&TrapFrame) + Sync + Send + 'static,
{
let allocate_id = CALLBACK_ID_ALLOCATOR.get().unwrap().lock().alloc().unwrap();
self.callback_list.lock().push(CallbackElement {
function: Box::new(callback),
id: allocate_id,
});
IrqCallbackHandle {
irq_num: self.irq_num,
id: allocate_id,
}
}
}
/// The handle to a registered callback for a IRQ line.
///
/// When the handle is dropped, the callback will be unregistered automatically.
#[must_use]
#[derive(Debug)]
pub struct IrqCallbackHandle {
irq_num: u8,
id: usize,
}
impl Drop for IrqCallbackHandle {
fn drop(&mut self) {
let mut a = IRQ_LIST
.get()
.unwrap()
.get(self.irq_num as usize)
.unwrap()
.callback_list
.lock();
a.retain(|item| item.id != self.id);
CALLBACK_ID_ALLOCATOR.get().unwrap().lock().free(self.id);
}
}
// ####### Inter-Processor Interrupts (IPIs) #######
/// Hardware-specific, architecture-dependent CPU ID.

View File

@ -25,7 +25,6 @@ pub(crate) fn init_cvm_guest() {
pub(crate) unsafe fn late_init_on_bsp() {
// SAFETY: This function is called in the boot context of the BSP.
unsafe { trap::init() };
irq::init();
// SAFETY: We're on the BSP and we're ready to boot all APs.
unsafe { crate::boot::smp::boot_all_aps() };

View File

@ -2,48 +2,52 @@
//! Interrupts.
#![expect(dead_code)]
use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec::Vec};
use id_alloc::IdAlloc;
use spin::Once;
use x86_64::registers::rflags::{self, RFlags};
use super::iommu::{alloc_irt_entry, has_interrupt_remapping, IrtEntryHandle};
use crate::{
cpu::PinCurrentCpu,
sync::{Mutex, PreemptDisabled, RwLock, RwLockReadGuard, SpinLock},
trap::TrapFrame,
};
use crate::cpu::PinCurrentCpu;
/// The global allocator for software defined IRQ lines.
pub(crate) static IRQ_ALLOCATOR: Once<SpinLock<IdAlloc>> = Once::new();
// Intel(R) 64 and IA-32 rchitectures Software Developer's Manual,
// Volume 3A, Section 6.2 says "Vector numbers in the range 32 to 255
// are designated as user-defined interrupts and are not reserved by
// the Intel 64 and IA-32 architecture."
pub(crate) const IRQ_NUM_MIN: u8 = 32;
pub(crate) const IRQ_NUM_MAX: u8 = 255;
pub(crate) static IRQ_LIST: Once<Vec<IrqLine>> = Once::new();
pub(crate) struct IrqRemapping {
entry: Once<IrtEntryHandle>,
}
pub(crate) fn init() {
let mut list: Vec<IrqLine> = Vec::new();
for i in 0..256 {
list.push(IrqLine {
irq_num: i as u8,
callback_list: RwLock::new(Vec::new()),
bind_remapping_entry: Once::new(),
impl IrqRemapping {
pub(crate) const fn new() -> Self {
Self { entry: Once::new() }
}
/// Initializes the remapping entry for the specific IRQ number.
///
/// This will do nothing if the entry is already initialized or interrupt
/// remapping is disabled or not supported by the architecture.
pub(crate) fn init(&self, irq_num: u8) {
if !has_interrupt_remapping() {
return;
}
self.entry.call_once(|| {
// Allocate and enable the IRT entry.
let handle = alloc_irt_entry().unwrap();
handle.enable(irq_num as u32);
handle
});
}
IRQ_LIST.call_once(|| list);
CALLBACK_ID_ALLOCATOR.call_once(|| Mutex::new(IdAlloc::with_capacity(256)));
IRQ_ALLOCATOR.call_once(|| {
// As noted in the Intel 64 and IA-32 rchitectures Software Developers Manual,
// Volume 3A, Section 6.2, the first 32 interrupts are reserved for specific
// usages. And the rest from 32 to 255 are available for external user-defined
// interrupts.
let mut id_alloc = IdAlloc::with_capacity(256);
for i in 0..32 {
id_alloc.alloc_specific(i).unwrap();
/// Gets the remapping index of the IRQ line.
///
/// This method will return `None` if interrupt remapping is disabled or
/// not supported by the architecture.
pub(crate) fn remapping_index(&self) -> Option<u16> {
Some(self.entry.get()?.index())
}
SpinLock::new(id_alloc)
});
}
pub(crate) fn enable_local() {
@ -62,121 +66,6 @@ pub(crate) fn is_local_enabled() -> bool {
(rflags::read_raw() & RFlags::INTERRUPT_FLAG.bits()) != 0
}
static CALLBACK_ID_ALLOCATOR: Once<Mutex<IdAlloc>> = Once::new();
pub struct CallbackElement {
function: Box<dyn Fn(&TrapFrame) + Send + Sync + 'static>,
id: usize,
}
impl CallbackElement {
pub fn call(&self, element: &TrapFrame) {
(self.function)(element);
}
}
impl Debug for CallbackElement {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("CallbackElement")
.field("id", &self.id)
.finish()
}
}
/// An interrupt request (IRQ) line.
#[derive(Debug)]
pub(crate) struct IrqLine {
pub(crate) irq_num: u8,
pub(crate) callback_list: RwLock<Vec<CallbackElement>>,
bind_remapping_entry: Once<IrtEntryHandle>,
}
impl IrqLine {
/// Acquires an interrupt request line.
///
/// # Safety
///
/// This function is marked unsafe as manipulating interrupt lines is
/// considered a dangerous operation.
#[expect(clippy::redundant_allocation)]
pub unsafe fn acquire(irq_num: u8) -> Arc<&'static Self> {
let irq = Arc::new(IRQ_LIST.get().unwrap().get(irq_num as usize).unwrap());
if has_interrupt_remapping() {
let handle = alloc_irt_entry();
if let Some(handle) = handle {
// Enable the IRT entry
handle.enable(irq_num as u32);
irq.bind_remapping_entry.call_once(|| handle);
}
}
irq
}
/// Gets the remapping index of the IRQ line.
///
/// This method will return `None` if interrupt remapping is disabled or
/// not supported by the architecture.
pub fn remapping_index(&self) -> Option<u16> {
Some(self.bind_remapping_entry.get()?.index())
}
/// Gets the IRQ number.
pub fn num(&self) -> u8 {
self.irq_num
}
pub fn callback_list(
&self,
) -> RwLockReadGuard<alloc::vec::Vec<CallbackElement>, PreemptDisabled> {
self.callback_list.read()
}
/// Registers a callback that will be invoked when the IRQ is active.
///
/// A handle to the callback is returned. Dropping the handle
/// automatically unregisters the callback.
///
/// For each IRQ line, multiple callbacks may be registered.
pub fn on_active<F>(&self, callback: F) -> IrqCallbackHandle
where
F: Fn(&TrapFrame) + Sync + Send + 'static,
{
let allocated_id = CALLBACK_ID_ALLOCATOR.get().unwrap().lock().alloc().unwrap();
self.callback_list.write().push(CallbackElement {
function: Box::new(callback),
id: allocated_id,
});
IrqCallbackHandle {
irq_num: self.irq_num,
id: allocated_id,
}
}
}
/// The handle to a registered callback for a IRQ line.
///
/// When the handle is dropped, the callback will be unregistered automatically.
#[must_use]
#[derive(Debug)]
pub struct IrqCallbackHandle {
irq_num: u8,
id: usize,
}
impl Drop for IrqCallbackHandle {
fn drop(&mut self) {
let mut a = IRQ_LIST
.get()
.unwrap()
.get(self.irq_num as usize)
.unwrap()
.callback_list
.write();
a.retain(|item| item.id != self.id);
CALLBACK_ID_ALLOCATOR.get().unwrap().lock().free(self.id);
}
}
// ####### Inter-Processor Interrupts (IPIs) #######
/// Hardware-specific, architecture-dependent CPU ID.

View File

@ -64,7 +64,6 @@ static CPU_FEATURES: Once<FeatureInfo> = Once::new();
pub(crate) unsafe fn late_init_on_bsp() {
// SAFETY: This function is only called once on BSP.
unsafe { trap::init() };
irq::init();
kernel::acpi::init();

View File

@ -2,8 +2,8 @@
use spin::Once;
use super::{disable_local, DisabledLocalIrqGuard};
use crate::{arch::irq::IRQ_LIST, cpu_local_cell, task::disable_preempt, trap::TrapFrame};
use super::{disable_local, irq::process_top_half, DisabledLocalIrqGuard};
use crate::{cpu_local_cell, task::disable_preempt, trap::TrapFrame};
static BOTTOM_HALF_HANDLER: Once<fn(DisabledLocalIrqGuard) -> DisabledLocalIrqGuard> = Once::new();
@ -26,14 +26,6 @@ pub fn register_bottom_half_handler(func: fn(DisabledLocalIrqGuard) -> DisabledL
BOTTOM_HALF_HANDLER.call_once(|| func);
}
fn process_top_half(trap_frame: &TrapFrame, irq_number: usize) {
let irq_line = IRQ_LIST.get().unwrap().get(irq_number).unwrap();
let callback_functions = irq_line.callback_list();
for callback_function in callback_functions.iter() {
callback_function.call(trap_frame);
}
}
fn process_bottom_half() {
let Some(handler) = BOTTOM_HALF_HANDLER.get() else {
return;

View File

@ -1,68 +1,72 @@
// SPDX-License-Identifier: MPL-2.0
use core::fmt::Debug;
use core::{fmt::Debug, ops::Deref};
use id_alloc::IdAlloc;
use spin::Once;
use crate::{
arch::irq::{self, IrqCallbackHandle, IRQ_ALLOCATOR},
arch::irq::{self, IrqRemapping, IRQ_NUM_MAX, IRQ_NUM_MIN},
prelude::*,
sync::GuardTransfer,
sync::{GuardTransfer, RwLock, SpinLock, WriteIrqDisabled},
task::atomic_mode::InAtomicMode,
trap::TrapFrame,
Error,
};
/// Type alias for the irq callback function.
/// A type alias for the IRQ callback function.
pub type IrqCallbackFunction = dyn Fn(&TrapFrame) + Sync + Send + 'static;
/// An Interrupt ReQuest(IRQ) line. User can use [`alloc`] or [`alloc_specific`] to get specific IRQ line.
/// An Interrupt ReQuest (IRQ) line.
///
/// The IRQ number is guaranteed to be external IRQ number and user can register callback functions to this IRQ resource.
/// When this resource is dropped, all the callback in this will be unregistered automatically.
/// Users can use [`alloc`] or [`alloc_specific`] to allocate a (specific) IRQ line.
///
/// The IRQ number is guaranteed to be an external IRQ number and users can use [`on_active`] to
/// safely register callback functions on this IRQ line. When the IRQ line is dropped, all the
/// registered callbacks will be unregistered automatically.
///
/// [`alloc`]: Self::alloc
/// [`alloc_specific`]: Self::alloc_specific
/// [`on_active`]: Self::on_active
#[derive(Debug)]
#[must_use]
pub struct IrqLine {
irq_num: u8,
#[expect(clippy::redundant_allocation)]
inner_irq: Arc<&'static irq::IrqLine>,
callbacks: Vec<IrqCallbackHandle>,
inner: Arc<InnerHandle>,
callbacks: Vec<CallbackHandle>,
}
impl IrqLine {
/// Allocates a specific IRQ line.
pub fn alloc_specific(irq: u8) -> Result<Self> {
IRQ_ALLOCATOR
.get()
.unwrap()
/// Allocates an available IRQ line.
pub fn alloc() -> Result<Self> {
get_or_init_allocator()
.lock()
.alloc_specific(irq as usize)
.map(|irq_num| Self::new(irq_num as u8))
.alloc()
.map(|id| Self::new(id as u8))
.ok_or(Error::NotEnoughResources)
}
/// Allocates an available IRQ line.
pub fn alloc() -> Result<Self> {
let Some(irq_num) = IRQ_ALLOCATOR.get().unwrap().lock().alloc() else {
return Err(Error::NotEnoughResources);
};
Ok(Self::new(irq_num as u8))
/// Allocates a specific IRQ line.
pub fn alloc_specific(irq_num: u8) -> Result<Self> {
get_or_init_allocator()
.lock()
.alloc_specific((irq_num - IRQ_NUM_MIN) as usize)
.map(|id| Self::new(id as u8))
.ok_or(Error::NotEnoughResources)
}
fn new(irq_num: u8) -> Self {
// SAFETY: The IRQ number is allocated through `RecycleAllocator`, and it is guaranteed that the
// IRQ is not one of the important IRQ like cpu exception IRQ.
fn new(index: u8) -> Self {
let inner = InnerHandle { index };
inner.remapping.init(index + IRQ_NUM_MIN);
Self {
irq_num,
inner_irq: unsafe { irq::IrqLine::acquire(irq_num) },
inner: Arc::new(inner),
callbacks: Vec::new(),
}
}
/// Gets the IRQ number.
pub fn num(&self) -> u8 {
self.irq_num
self.inner.index + IRQ_NUM_MIN
}
/// Registers a callback that will be invoked when the IRQ is active.
@ -72,7 +76,20 @@ impl IrqLine {
where
F: Fn(&TrapFrame) + Sync + Send + 'static,
{
self.callbacks.push(self.inner_irq.on_active(callback))
let callback_handle = {
let callback_box = Box::new(callback);
let callback_addr = core::ptr::from_ref(&*callback_box).addr();
let mut callbacks = self.inner.callbacks.write();
callbacks.push(callback_box);
CallbackHandle {
irq_index: self.inner.index,
callback_addr,
}
};
self.callbacks.push(callback_handle);
}
/// Checks if there are no registered callbacks.
@ -85,32 +102,94 @@ impl IrqLine {
/// This method will return `None` if interrupt remapping is disabled or
/// not supported by the architecture.
pub fn remapping_index(&self) -> Option<u16> {
self.inner_irq.remapping_index()
self.inner.remapping.remapping_index()
}
}
impl Clone for IrqLine {
fn clone(&self) -> Self {
Self {
irq_num: self.irq_num,
inner_irq: self.inner_irq.clone(),
inner: self.inner.clone(),
callbacks: Vec::new(),
}
}
}
impl Drop for IrqLine {
fn drop(&mut self) {
if Arc::strong_count(&self.inner_irq) == 1 {
IRQ_ALLOCATOR
.get()
.unwrap()
.lock()
.free(self.irq_num as usize);
struct Inner {
callbacks: RwLock<Vec<Box<IrqCallbackFunction>>, WriteIrqDisabled>,
remapping: IrqRemapping,
}
impl Inner {
const fn new() -> Self {
Self {
callbacks: RwLock::new(Vec::new()),
remapping: IrqRemapping::new(),
}
}
}
const NUMBER_OF_IRQS: usize = (IRQ_NUM_MAX - IRQ_NUM_MIN) as usize + 1;
static INNERS: [Inner; NUMBER_OF_IRQS] = [const { Inner::new() }; NUMBER_OF_IRQS];
static ALLOCATOR: Once<SpinLock<IdAlloc>> = Once::new();
fn get_or_init_allocator() -> &'static SpinLock<IdAlloc> {
ALLOCATOR.call_once(|| SpinLock::new(IdAlloc::with_capacity(NUMBER_OF_IRQS)))
}
/// A handle for an allocated IRQ line.
///
/// When the handle is dropped, the IRQ line will be released automatically.
#[derive(Debug)]
struct InnerHandle {
index: u8,
}
impl Deref for InnerHandle {
type Target = Inner;
fn deref(&self) -> &Self::Target {
&INNERS[self.index as usize]
}
}
impl Drop for InnerHandle {
fn drop(&mut self) {
ALLOCATOR.get().unwrap().lock().free(self.index as usize);
}
}
/// A handle for a registered callback on an IRQ line.
///
/// When the handle is dropped, the callback will be unregistered automatically.
#[must_use]
#[derive(Debug)]
struct CallbackHandle {
irq_index: u8,
callback_addr: usize,
}
impl Drop for CallbackHandle {
fn drop(&mut self) {
let mut callbacks = INNERS[self.irq_index as usize].callbacks.write();
let pos = callbacks
.iter()
.position(|element| core::ptr::from_ref(&**element).addr() == self.callback_addr);
let _ = callbacks.swap_remove(pos.unwrap());
}
}
pub(super) fn process_top_half(trap_frame: &TrapFrame, irq_num: usize) {
let inner = &INNERS[irq_num - (IRQ_NUM_MIN as usize)];
for callback in &*inner.callbacks.read() {
callback(trap_frame);
}
}
// ####### IRQ Guards #######
/// Disables all IRQs on the current CPU (i.e., locally).
///
/// This function returns a guard object, which will automatically enable local IRQs again when
@ -175,3 +254,49 @@ impl Drop for DisabledLocalIrqGuard {
}
}
}
#[cfg(ktest)]
mod test {
use super::*;
const IRQ_NUM: u8 = 64;
const IRQ_INDEX: usize = (IRQ_NUM - IRQ_NUM_MIN) as usize;
#[ktest]
fn alloc_and_free_irq() {
let irq_line = IrqLine::alloc_specific(IRQ_NUM).unwrap();
assert!(IrqLine::alloc_specific(IRQ_NUM).is_err());
let irq_line_cloned = irq_line.clone();
assert!(IrqLine::alloc_specific(IRQ_NUM).is_err());
drop(irq_line);
assert!(IrqLine::alloc_specific(IRQ_NUM).is_err());
drop(irq_line_cloned);
assert!(IrqLine::alloc_specific(IRQ_NUM).is_ok());
}
#[ktest]
fn register_and_unregister_callback() {
let mut irq_line = IrqLine::alloc_specific(IRQ_NUM).unwrap();
let mut irq_line_cloned = irq_line.clone();
assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 0);
irq_line.on_active(|_| {});
assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 1);
irq_line_cloned.on_active(|_| {});
assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 2);
irq_line_cloned.on_active(|_| {});
assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 3);
drop(irq_line);
assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 2);
drop(irq_line_cloned);
assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 0);
}
}