diff --git a/ostd/src/arch/x86/iommu/interrupt_remapping/mod.rs b/ostd/src/arch/x86/iommu/interrupt_remapping/mod.rs new file mode 100644 index 000000000..419118ab9 --- /dev/null +++ b/ostd/src/arch/x86/iommu/interrupt_remapping/mod.rs @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: MPL-2.0 + +mod table; + +use alloc::sync::Arc; +use core::{fmt::Debug, mem::size_of}; + +use log::{info, warn}; +use spin::Once; +pub(super) use table::IntRemappingTable; +use table::IrtEntry; + +use crate::{ + arch::iommu::registers::{ExtendedCapabilityFlags, IOMMU_REGS}, + prelude::Vaddr, + sync::{LocalIrqDisabled, SpinLock}, +}; + +pub struct IrtEntryHandle { + index: u16, + entry_ref: Option<&'static mut IrtEntry>, +} + +impl IrtEntryHandle { + pub fn index(&self) -> u16 { + self.index + } + + #[allow(unused)] + pub fn irt_entry(&self) -> Option<&IrtEntry> { + self.entry_ref.as_deref() + } + + pub fn irt_entry_mut(&mut self) -> Option<&mut IrtEntry> { + self.entry_ref.as_deref_mut() + } + + /// Set entry reference to None. + pub(self) fn set_none(&mut self) { + self.entry_ref = None; + } + + /// Create a handle based on index and the interrupt remapping table base virtual address. + /// + /// # Safety + /// + /// User must ensure the target address is **always** valid and point to `IrtEntry`. + pub(self) unsafe fn new(table_vaddr: Vaddr, index: u16) -> Self { + Self { + index, + entry_ref: Some( + &mut *((table_vaddr + index as usize * size_of::()) as *mut IrtEntry), + ), + } + } +} + +impl Debug for IrtEntryHandle { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("IrtEntryHandle") + .field("index", &self.index) + .field("entry_ref", &self.entry_ref) + .finish() + } +} + +pub fn has_interrupt_remapping() -> bool { + REMAPPING_TABLE.get().is_some() +} + +pub fn alloc_irt_entry() -> Option>> { + let page_table = REMAPPING_TABLE.get()?; + page_table.alloc() +} + +pub(super) fn init() { + let mut iommu_regs = IOMMU_REGS.get().unwrap().lock(); + + // Check if interrupt remapping is supported + let extend_cap = iommu_regs.extended_capability(); + if !extend_cap.flags().contains(ExtendedCapabilityFlags::IR) { + warn!("[IOMMU] Interrupt remapping not supported"); + return; + } + + // Create interrupt remapping table + REMAPPING_TABLE.call_once(IntRemappingTable::new); + iommu_regs.enable_interrupt_remapping(REMAPPING_TABLE.get().unwrap()); + + info!("[IOMMU] Interrupt remapping enabled"); +} + +static REMAPPING_TABLE: Once = Once::new(); diff --git a/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs new file mode 100644 index 000000000..1ec3623f0 --- /dev/null +++ b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: MPL-2.0 + +use alloc::{sync::Arc, vec::Vec}; +use core::{fmt::Debug, mem::size_of}; + +use bitflags::bitflags; +use id_alloc::IdAlloc; +use int_to_c_enum::TryFromInt; + +use super::IrtEntryHandle; +use crate::{ + mm::{paddr_to_vaddr, FrameAllocOptions, Segment, PAGE_SIZE}, + sync::{LocalIrqDisabled, SpinLock}, +}; + +#[allow(dead_code)] +#[derive(Debug)] +enum ExtendedInterruptMode { + XApic, + X2Apic, +} + +pub struct IntRemappingTable { + size: u16, + extended_interrupt_mode: ExtendedInterruptMode, + frames: Segment, + /// The global allocator for Interrupt remapping entry. + allocator: SpinLock, + handles: Vec>>, +} + +impl IntRemappingTable { + pub fn alloc(&self) -> Option>> { + let id = self.allocator.lock().alloc()?; + Some(self.handles.get(id).unwrap().clone()) + } + + /// Creates an Interrupt Remapping Table with one Frame (default). + pub(super) fn new() -> Self { + const DEFAULT_PAGES: usize = 1; + let segment = FrameAllocOptions::new(DEFAULT_PAGES) + .is_contiguous(true) + .alloc_contiguous() + .unwrap(); + let entry_number = (DEFAULT_PAGES * PAGE_SIZE / size_of::()) as u16; + + let mut handles = Vec::new(); + let base_vaddr = paddr_to_vaddr(segment.start_paddr()); + for index in 0..entry_number { + // SAFETY: The IrtEntry reference will always valid and will disabled when IntRemappingTable is dropped. + let handle = unsafe { IrtEntryHandle::new(base_vaddr, index) }; + handles.push(Arc::new(SpinLock::new(handle))); + } + + Self { + size: entry_number, + extended_interrupt_mode: ExtendedInterruptMode::X2Apic, + frames: segment, + allocator: SpinLock::new(IdAlloc::with_capacity(entry_number as usize)), + handles, + } + } + + /// Encodes the value written into the Interrupt Remapping Table Register. + pub(crate) fn encode(&self) -> u64 { + let mut encoded = self.frames.start_paddr() as u64; + + match self.extended_interrupt_mode { + ExtendedInterruptMode::XApic => {} + ExtendedInterruptMode::X2Apic => encoded |= 1 << 11, + } + + // entry_number = 2^(size+1) + if self.size == 1 { + panic!("Wrong entry number"); + } + let mut size = 0; + let mut tmp = self.size >> 1; + while (tmp & 0b1) == 0 { + tmp >>= 1; + size += 1; + } + encoded += size; + + encoded + } +} + +impl Drop for IntRemappingTable { + fn drop(&mut self) { + for handle in self.handles.iter_mut() { + let mut handle = handle.lock(); + handle.set_none(); + } + } +} + +/// The type of validation that must be performed by the interrupt-remapping hardware. +#[derive(Debug, TryFromInt)] +#[repr(u32)] +pub enum SourceValidationType { + /// No requester-id verification is required. + Disable = 0b00, + /// Verify requester-id in the interrupt request using the SID and SQ fields in the + /// IRTE. + RequesterId = 0b01, + /// Verify the most significant 8 bits of the requester-id (Bus#) in the interrupt + /// request are equal to or within the Startbus# and EndBus# specified through the + /// upper and lower 8 bits of the SID field respectively. + RequesterBus = 0b10, + Reserved = 0b11, +} + +/// Source ID qualifier. This field is evaluated by hardware only when the Present bit +/// is Set and the SVT field is 0b01. +#[derive(Debug, TryFromInt)] +#[repr(u32)] +pub enum SourceIdQualifier { + /// Verify the interrupt request by comparing all 16 bits of the SID field with the + /// 16-bit requester-id of the interrupt request. + All = 0b00, + /// Verify the interrupt request by comparing the **most significant 13 bits** of the + /// SID and requester-id of the interrupt request, and comparing the **least significant + /// two bits** of the SID field and requester-id of the interrupt request. + IgnoreThirdLeast = 0b01, + /// Verify the interrupt request by comparing the **most significant 13 bits** of the + /// SID and requester-id of the interrupt request, and comparing the **least significant + /// bit** of the SID field and requester-id of the interrupt request. + IgnoreSecondThirdLeast = 0b10, + /// Verify the interrupt request by comparing the **most significant 13 bits** of the + /// SID and requester-id of the interrupt request. + IgnoreLeastThree = 0b11, +} + +#[derive(Debug, TryFromInt)] +#[repr(u32)] +enum DeliveryMode { + FixedMode = 0b000, + LowestPriority = 0b001, + SystemManagementInterrupt = 0b010, + NonMaskableInterrupt = 0b100, + Init = 0b101, + ExInt = 0b111, +} + +/// Interrupt Remapping Table Entry (IRTE) for Remapped Interrupts. +pub struct IrtEntry(u128); + +impl IrtEntry { + #[allow(unused)] + pub const fn new(value: u128) -> Self { + Self(value) + } + + #[allow(unused)] + pub fn clear(&mut self) { + self.0 = 0 + } + + /// Enable this entry with no validation, + /// DST = 0, IM = 0, DLM = 0, TM = 0, RH = 0, DM = 0, FPD = 1, P = 1 + pub fn enable_default(&mut self, vector: u32) { + self.0 = 0b11 | (vector as u128) << 16; + } + + pub fn source_validation_type(&self) -> SourceValidationType { + const SVT_MASK: u128 = 0x3 << 82; + SourceValidationType::try_from(((self.0 & SVT_MASK) >> 82) as u32).unwrap() + } + + pub fn source_id_qualifier(&self) -> SourceIdQualifier { + const SQ_MASK: u128 = 0x3 << 82; + SourceIdQualifier::try_from(((self.0 & SQ_MASK) >> 82) as u32).unwrap() + } + + pub const fn source_identifier(&self) -> u32 { + const SID_MASK: u128 = 0xFFFF << 64; + ((self.0 & SID_MASK) >> 64) as u32 + } + + /// This field identifies the remapped interrupt request’s target processor(s). It is + /// evaluated by hardware only when the Present (P) field is Set. + /// + /// The format of this field in various Interrupt Remapping modes is as follows: + /// - Intel xAPIC Mode (IRTA_REG.EIME=0): + /// - 63:48 - Reserved (0) + /// - 47:40 - APIC DestinationID[7:0] + /// - 39:32 - Reserved (0) + /// - Intel x2APIC Mode (IRTA_REG.EIME=1): + /// - 63:32 - APIC DestinationID[31:0] + pub const fn destination_id(&self) -> u32 { + const DST_MASK: u128 = 0xFFFF_FFFF << 32; + ((self.0 & DST_MASK) >> 32) as u32 + } + + pub const fn vector(&self) -> u8 { + const VECTOR_MASK: u128 = 0xFF << 16; + ((self.0 & VECTOR_MASK) >> 16) as u8 + } + + pub const fn flags(&self) -> IrtEntryFlags { + IrtEntryFlags::from_bits_truncate((self.0 & 0xFFFF_FFFF) as u32) + } +} + +impl Debug for IrtEntry { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("IrtEntry") + .field("flags", &self.flags()) + .field("destination_id", &self.destination_id()) + .field("vector", &self.vector()) + .field("source_identifier", &self.source_identifier()) + .field("source_id_qualifier", &self.source_id_qualifier()) + .field("source_validation_type", &self.source_validation_type()) + .field("raw", &self.0) + .finish() + } +} + +bitflags! { + /// Interrupt Remapping Table Entry Flags for Remapped Interrupts. + pub struct IrtEntryFlags: u32{ + /// Present bit + const P = 1 << 0; + /// Fault Processing Disable. Enables or disables recording/reporting of faults + /// caused by interrupt messages requests processed through this entry. + /// + /// - 0: Enabled + /// - 1: Disabled + const FPD = 1 << 1; + /// Destination Mode, indicates the Destination ID in an IRTE should be interpreted + /// as logical or physical APIC ID. + /// + /// - 0: Physical + /// - 1: Logical + const DM = 1 << 2; + /// Redirection Hint, indicates whether the remapped interrupt request should be + /// directed to one among N processors specified in Destination ID. + /// + /// - 0: The remapped interrupt is directed to the processor. + /// - 1: The remapped interrupt is directed to 1 of N processors. + const RH = 1 << 3; + /// Trigger Mode. + /// + /// - 0: Edge sensitive + /// - 1: Level sensitive + const TM = 1 << 4; + /// IRTE Mode. + /// + /// - 0: Remapped Mode. + /// - 1: Posted Mode. + const IM = 1 << 15; + } +} diff --git a/ostd/src/arch/x86/iommu/invalidate/descriptor/mod.rs b/ostd/src/arch/x86/iommu/invalidate/descriptor/mod.rs index b4e656c78..3a60ef4e0 100644 --- a/ostd/src/arch/x86/iommu/invalidate/descriptor/mod.rs +++ b/ostd/src/arch/x86/iommu/invalidate/descriptor/mod.rs @@ -3,7 +3,19 @@ pub struct InterruptEntryCache(pub u128); impl InterruptEntryCache { + const INVALIDATION_TYPE: u128 = 4; + pub fn global_invalidation() -> Self { - Self(0x4) + Self(Self::INVALIDATION_TYPE) + } +} + +pub struct InvalidationWait(pub u128); + +impl InvalidationWait { + const INVALIDATION_TYPE: u128 = 5; + + pub fn with_interrupt_flag() -> Self { + Self(Self::INVALIDATION_TYPE | 0x10) } } diff --git a/ostd/src/arch/x86/iommu/mod.rs b/ostd/src/arch/x86/iommu/mod.rs index 1c23f4e12..cffe0ce6c 100644 --- a/ostd/src/arch/x86/iommu/mod.rs +++ b/ostd/src/arch/x86/iommu/mod.rs @@ -4,10 +4,12 @@ mod dma_remapping; mod fault; +mod interrupt_remapping; mod invalidate; mod registers; pub(crate) use dma_remapping::{has_dma_remapping, map, unmap}; +pub(crate) use interrupt_remapping::{alloc_irt_entry, has_interrupt_remapping, IrtEntryHandle}; use crate::mm::page_table::PageTableError; @@ -24,6 +26,6 @@ pub(crate) fn init() -> Result<(), IommuError> { registers::init()?; invalidate::init(); dma_remapping::init(); - + interrupt_remapping::init(); Ok(()) } diff --git a/ostd/src/arch/x86/iommu/registers/mod.rs b/ostd/src/arch/x86/iommu/registers/mod.rs index adfc81f8e..d12d96bc8 100644 --- a/ostd/src/arch/x86/iommu/registers/mod.rs +++ b/ostd/src/arch/x86/iommu/registers/mod.rs @@ -22,10 +22,19 @@ use volatile::{ Volatile, }; -use super::{dma_remapping::RootTable, invalidate::queue::Queue, IommuError}; +use super::{ + dma_remapping::RootTable, interrupt_remapping::IntRemappingTable, invalidate::queue::Queue, + IommuError, +}; use crate::{ arch::{ - iommu::fault, + iommu::{ + fault, + invalidate::{ + descriptor::{InterruptEntryCache, InvalidationWait}, + QUEUE, + }, + }, x86::kernel::acpi::dmar::{Dmar, Remapping}, }, mm::paddr_to_vaddr, @@ -66,6 +75,8 @@ pub struct IommuRegisters { #[allow(dead_code)] context_command: Volatile<&'static mut u64, ReadWrite>, + interrupt_remapping_table_addr: Volatile<&'static mut u64, ReadWrite>, + invalidate: InvalidationRegisters, } @@ -111,6 +122,48 @@ impl IommuRegisters { while !self.global_status().contains(GlobalStatus::TES) {} } + /// Enable Interrupt Remapping with IntRemappingTable + pub(super) fn enable_interrupt_remapping(&mut self, table: &'static IntRemappingTable) { + assert!(self + .extended_capability() + .flags() + .contains(ExtendedCapabilityFlags::IR)); + // Set interrupt remapping table address + self.interrupt_remapping_table_addr.write(table.encode()); + self.write_global_command(GlobalCommand::SIRTP, true); + while !self.global_status().contains(GlobalStatus::IRTPS) {} + + // Enable Interrupt Remapping + self.write_global_command(GlobalCommand::IRE, true); + while !self.global_status().contains(GlobalStatus::IRES) {} + + // Invalidate interrupt cache + if self.global_status().contains(GlobalStatus::QIES) { + let mut queue = QUEUE.get().unwrap().lock(); + + // Construct global invalidation of interrupt cache and invalidation wait. + queue.append_descriptor(InterruptEntryCache::global_invalidation().0); + let tail = queue.tail(); + self.invalidate.queue_tail.write((tail << 4) as u64); + while (self.invalidate.queue_head.read() >> 4) + 1 == tail as u64 {} + + // We need to set the interrupt flag so that the `Invalidation Completion Status Register` can report the completion status. + queue.append_descriptor(InvalidationWait::with_interrupt_flag().0); + self.invalidate.queue_tail.write((queue.tail() << 4) as u64); + + // Wait for completion + while self.invalidate.completion_status.read() == 0 {} + } else { + self.global_invalidation() + } + + // Disable Compatibility format interrupts + if self.global_status().contains(GlobalStatus::CFIS) { + self.write_global_command(GlobalCommand::CFI, false); + while self.global_status().contains(GlobalStatus::CFIS) {} + } + } + pub(super) fn enable_queued_invalidation(&mut self, queue: &Queue) { assert!(self .extended_capability() @@ -212,6 +265,9 @@ impl IommuRegisters { let global_status = Volatile::new_read_only(&*((vaddr + 0x1C) as *const u32)); let root_table_address = Volatile::new(&mut *((vaddr + 0x20) as *mut u64)); let context_command = Volatile::new(&mut *((vaddr + 0x28) as *mut u64)); + + let interrupt_remapping_table_addr = Volatile::new(&mut *((vaddr + 0xb8) as *mut u64)); + Self { version, capability, @@ -221,6 +277,7 @@ impl IommuRegisters { root_table_address, context_command, invalidate: InvalidationRegisters::new(vaddr), + interrupt_remapping_table_addr, } }; diff --git a/ostd/src/arch/x86/irq.rs b/ostd/src/arch/x86/irq.rs index 073c5d2aa..3635cd75e 100644 --- a/ostd/src/arch/x86/irq.rs +++ b/ostd/src/arch/x86/irq.rs @@ -10,9 +10,10 @@ use id_alloc::IdAlloc; use spin::Once; use x86_64::registers::rflags::{self, RFlags}; +use super::iommu::{alloc_irt_entry, has_interrupt_remapping, IrtEntryHandle}; use crate::{ cpu::CpuId, - sync::{Mutex, RwLock, RwLockReadGuard, SpinLock}, + sync::{LocalIrqDisabled, Mutex, RwLock, RwLockReadGuard, SpinLock}, trap::TrapFrame, }; @@ -27,6 +28,7 @@ pub(crate) fn init() { list.push(IrqLine { irq_num: i as u8, callback_list: RwLock::new(Vec::new()), + bind_remapping_entry: Once::new(), }); } IRQ_LIST.call_once(|| list); @@ -86,6 +88,7 @@ impl Debug for CallbackElement { pub(crate) struct IrqLine { pub(crate) irq_num: u8, pub(crate) callback_list: RwLock>, + bind_remapping_entry: Once>>, } impl IrqLine { @@ -97,7 +100,18 @@ impl IrqLine { /// considered a dangerous operation. #[allow(clippy::redundant_allocation)] pub unsafe fn acquire(irq_num: u8) -> Arc<&'static Self> { - Arc::new(IRQ_LIST.get().unwrap().get(irq_num as usize).unwrap()) + let irq = Arc::new(IRQ_LIST.get().unwrap().get(irq_num as usize).unwrap()); + if has_interrupt_remapping() { + let handle = alloc_irt_entry(); + if let Some(handle) = handle { + irq.bind_remapping_entry.call_once(|| handle); + } + } + irq + } + + pub fn bind_remapping_entry(&self) -> Option<&Arc>> { + self.bind_remapping_entry.get() } /// Gets the IRQ number. diff --git a/ostd/src/arch/x86/kernel/apic/ioapic.rs b/ostd/src/arch/x86/kernel/apic/ioapic.rs index 5effdc04e..1fe83ce45 100644 --- a/ostd/src/arch/x86/kernel/apic/ioapic.rs +++ b/ostd/src/arch/x86/kernel/apic/ioapic.rs @@ -11,8 +11,11 @@ use log::info; use spin::Once; use crate::{ - arch::x86::kernel::acpi::ACPI_TABLES, mm::paddr_to_vaddr, sync::SpinLock, trap::IrqLine, Error, - Result, + arch::{iommu::has_interrupt_remapping, x86::kernel::acpi::ACPI_TABLES}, + mm::paddr_to_vaddr, + sync::SpinLock, + trap::IrqLine, + Error, Result, }; cfg_if! { @@ -47,6 +50,34 @@ impl IoApic { if value.get_bits(0..8) as u8 != 0 { return Err(Error::AccessDenied); } + if has_interrupt_remapping() { + let mut handle = irq.inner_irq().bind_remapping_entry().unwrap().lock(); + + // Enable irt entry + let irt_entry_mut = handle.irt_entry_mut().unwrap(); + irt_entry_mut.enable_default(irq.num() as u32); + + // Construct remappable format RTE with RTE[48] set. + let mut value: u64 = irq.num() as u64 | 0x1_0000_0000_0000; + + // Interrupt index[14:0] is on RTE[63:49] and interrupt index[15] is on RTE[11]. + value |= ((handle.index() & 0x8000) >> 4) as u64; + value |= (handle.index() as u64 & 0x7FFF) << 49; + + self.access.write( + Self::TABLE_REG_BASE + 2 * index, + value.get_bits(0..32) as u32, + ); + self.access.write( + Self::TABLE_REG_BASE + 2 * index + 1, + value.get_bits(32..64) as u32, + ); + + drop(handle); + self.irqs.push(irq); + return Ok(()); + } + self.access .write(Self::TABLE_REG_BASE + 2 * index, irq.num() as u32); self.access.write(Self::TABLE_REG_BASE + 2 * index + 1, 0); diff --git a/ostd/src/bus/pci/capability/msix.rs b/ostd/src/bus/pci/capability/msix.rs index 00907c0b4..821322f58 100644 --- a/ostd/src/bus/pci/capability/msix.rs +++ b/ostd/src/bus/pci/capability/msix.rs @@ -10,6 +10,7 @@ use alloc::{sync::Arc, vec::Vec}; use cfg_if::cfg_if; use crate::{ + arch::iommu::has_interrupt_remapping, bus::pci::{ cfg_space::{Bar, Command, MemoryBar}, common_device::PciCommonDevice, @@ -59,6 +60,9 @@ impl Clone for CapabilityMsixData { } } +#[cfg(target_arch = "x86_64")] +const MSIX_DEFAULT_MSG_ADDR: u32 = 0xFEE0_0000; + impl CapabilityMsixData { pub(super) fn new(dev: &mut PciCommonDevice, cap_ptr: u16) -> Self { // Get Table and PBA offset, provide functions to modify them @@ -99,7 +103,7 @@ impl CapabilityMsixData { let table_size = (dev.location().read16(cap_ptr + 2) & 0b11_1111_1111) + 1; // TODO: Different architecture seems to have different, so we should set different address here. - let message_address = 0xFEE0_0000u32; + let message_address = MSIX_DEFAULT_MSG_ADDR; let message_upper_address = 0u32; // Set message address 0xFEE0_0000 @@ -163,18 +167,45 @@ impl CapabilityMsixData { } /// Enables an interrupt line, it will replace the old handle with the new handle. - pub fn set_interrupt_vector(&mut self, handle: IrqLine, index: u16) { + pub fn set_interrupt_vector(&mut self, irq: IrqLine, index: u16) { if index >= self.table_size { return; } - self.table_bar - .io_mem() - .write_once( - (16 * index + 8) as usize + self.table_offset, - &(handle.num() as u32), - ) - .unwrap(); - let old_handles = core::mem::replace(&mut self.irqs[index as usize], Some(handle)); + + // If interrupt remapping is enabled, then we need to change the value of the message address. + if has_interrupt_remapping() { + let mut handle = irq.inner_irq().bind_remapping_entry().unwrap().lock(); + + // Enable irt entry + let irt_entry_mut = handle.irt_entry_mut().unwrap(); + irt_entry_mut.enable_default(irq.num() as u32); + + // Use remappable format. The bits[4:3] should be always set to 1 according to the manual. + let mut address = MSIX_DEFAULT_MSG_ADDR | 0b1_1000; + + // Interrupt index[14:0] is on address[19:5] and interrupt index[15] is on address[2]. + address |= (handle.index() as u32 & 0x7FFF) << 5; + address |= (handle.index() as u32 & 0x8000) >> 13; + + self.table_bar + .io_mem() + .write_once((16 * index) as usize + self.table_offset, &address) + .unwrap(); + self.table_bar + .io_mem() + .write_once((16 * index + 8) as usize + self.table_offset, &0) + .unwrap(); + } else { + self.table_bar + .io_mem() + .write_once( + (16 * index + 8) as usize + self.table_offset, + &(irq.num() as u32), + ) + .unwrap(); + } + + let _old_irq = core::mem::replace(&mut self.irqs[index as usize], Some(irq)); // Enable this msix vector self.table_bar .io_mem() diff --git a/ostd/src/trap/irq.rs b/ostd/src/trap/irq.rs index d2f8f3393..ca735a14d 100644 --- a/ostd/src/trap/irq.rs +++ b/ostd/src/trap/irq.rs @@ -26,7 +26,7 @@ pub type IrqCallbackFunction = dyn Fn(&TrapFrame) + Sync + Send + 'static; pub struct IrqLine { irq_num: u8, #[allow(clippy::redundant_allocation)] - irq: Arc<&'static irq::IrqLine>, + inner_irq: Arc<&'static irq::IrqLine>, callbacks: Vec, } @@ -55,7 +55,7 @@ impl IrqLine { // IRQ is not one of the important IRQ like cpu exception IRQ. Self { irq_num, - irq: unsafe { irq::IrqLine::acquire(irq_num) }, + inner_irq: unsafe { irq::IrqLine::acquire(irq_num) }, callbacks: Vec::new(), } } @@ -72,20 +72,24 @@ impl IrqLine { where F: Fn(&TrapFrame) + Sync + Send + 'static, { - self.callbacks.push(self.irq.on_active(callback)) + self.callbacks.push(self.inner_irq.on_active(callback)) } /// Checks if there are no registered callbacks. pub fn is_empty(&self) -> bool { self.callbacks.is_empty() } + + pub(crate) fn inner_irq(&self) -> &'static irq::IrqLine { + &self.inner_irq + } } impl Clone for IrqLine { fn clone(&self) -> Self { Self { irq_num: self.irq_num, - irq: self.irq.clone(), + inner_irq: self.inner_irq.clone(), callbacks: Vec::new(), } } @@ -93,7 +97,7 @@ impl Clone for IrqLine { impl Drop for IrqLine { fn drop(&mut self) { - if Arc::strong_count(&self.irq) == 1 { + if Arc::strong_count(&self.inner_irq) == 1 { IRQ_ALLOCATOR .get() .unwrap()