Support Queue Invalidation in IOMMU

This commit is contained in:
Yuke Peng
2024-08-22 16:31:03 +08:00
committed by Tate, Hongliang Tian
parent c7404f8139
commit 99fdd49076
6 changed files with 223 additions and 2 deletions

View File

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MPL-2.0
pub struct InterruptEntryCache(pub u128);
impl InterruptEntryCache {
pub fn global_invalidation() -> Self {
Self(0x4)
}
}

View File

@ -0,0 +1,33 @@
// SPDX-License-Identifier: MPL-2.0
use log::{info, warn};
use queue::Queue;
use spin::Once;
use super::registers::{ExtendedCapabilityFlags, IOMMU_REGS};
use crate::sync::SpinLock;
pub mod descriptor;
pub mod queue;
pub(super) fn init() {
let mut iommu_regs = IOMMU_REGS.get().unwrap().lock();
if !iommu_regs
.extended_capability()
.flags()
.contains(ExtendedCapabilityFlags::QI)
{
warn!("[IOMMU] Queued invalidation not supported");
return;
}
QUEUE.call_once(|| {
let queue = Queue::new();
iommu_regs.enable_queued_invalidation(&queue);
SpinLock::new(queue)
});
info!("[IOMMU] Queued invalidation is enabled");
}
pub(super) static QUEUE: Once<SpinLock<Queue>> = Once::new();

View File

@ -0,0 +1,51 @@
// SPDX-License-Identifier: MPL-2.0
use core::mem::size_of;
use crate::{
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE},
prelude::Paddr,
};
pub struct Queue {
segment: Segment,
queue_size: usize,
tail: usize,
}
impl Queue {
pub fn append_descriptor(&mut self, descriptor: u128) {
if self.tail == self.queue_size {
self.tail = 0;
}
self.segment
.write_val(self.tail * size_of::<u128>(), &descriptor)
.unwrap();
self.tail += 1;
}
pub fn tail(&self) -> usize {
self.tail
}
pub fn size(&self) -> usize {
self.queue_size
}
pub(crate) fn base_paddr(&self) -> Paddr {
self.segment.start_paddr()
}
pub(super) fn new() -> Self {
const DEFAULT_PAGES: usize = 1;
let segment = FrameAllocOptions::new(DEFAULT_PAGES)
.is_contiguous(true)
.alloc_contiguous()
.unwrap();
Self {
segment,
queue_size: (DEFAULT_PAGES * PAGE_SIZE) / size_of::<u128>(),
tail: 0,
}
}
}

View File

@ -4,6 +4,7 @@
mod dma_remapping; mod dma_remapping;
mod fault; mod fault;
mod invalidate;
mod registers; mod registers;
pub(crate) use dma_remapping::{has_dma_remapping, map, unmap}; pub(crate) use dma_remapping::{has_dma_remapping, map, unmap};
@ -21,6 +22,7 @@ pub enum IommuError {
pub(crate) fn init() -> Result<(), IommuError> { pub(crate) fn init() -> Result<(), IommuError> {
registers::init()?; registers::init()?;
invalidate::init();
dma_remapping::init(); dma_remapping::init();
Ok(()) Ok(())

View File

@ -0,0 +1,61 @@
// SPDX-License-Identifier: MPL-2.0
//! Invalidation-related registers
use volatile::{
access::{ReadOnly, ReadWrite, WriteOnly},
Volatile,
};
use super::ExtendedCapability;
use crate::prelude::Vaddr;
#[derive(Debug)]
pub struct InvalidationRegisters {
pub(super) queue_head: Volatile<&'static u64, ReadOnly>,
pub(super) queue_tail: Volatile<&'static mut u64, ReadWrite>,
pub(super) queue_addr: Volatile<&'static mut u64, ReadWrite>,
pub(super) completion_status: Volatile<&'static mut u32, ReadWrite>,
pub(super) _completion_event_control: Volatile<&'static mut u32, ReadWrite>,
pub(super) _completion_event_data: Volatile<&'static mut u32, ReadWrite>,
pub(super) _completion_event_addr: Volatile<&'static mut u32, ReadWrite>,
pub(super) _completion_event_upper_addr: Volatile<&'static mut u32, ReadWrite>,
pub(super) _queue_error_record: Volatile<&'static mut u64, ReadOnly>,
pub(super) _invalidate_address: Volatile<&'static mut u64, WriteOnly>,
pub(super) _iotlb_invalidate: Volatile<&'static mut u64, ReadWrite>,
}
impl InvalidationRegisters {
/// Create an instance from IOMMU base address.
///
/// # Safety
///
/// User must ensure the address is valid.
pub(super) unsafe fn new(base_vaddr: Vaddr) -> Self {
let extended_capability: Volatile<&u64, ReadOnly> =
Volatile::new_read_only(&*((base_vaddr + 0x10) as *const u64));
let extend_cap = ExtendedCapability::new(extended_capability.read());
let offset = extend_cap.iotlb_register_offset() as usize * 16;
let invalidate_address =
Volatile::new_write_only(&mut *((base_vaddr + offset) as *mut u64));
let iotlb_invalidate = Volatile::new(&mut *((base_vaddr + offset + 0x8) as *mut u64));
Self {
queue_head: Volatile::new_read_only(&*((base_vaddr + 0x80) as *mut u64)),
queue_tail: Volatile::new(&mut *((base_vaddr + 0x88) as *mut u64)),
queue_addr: Volatile::new(&mut *((base_vaddr + 0x90) as *mut u64)),
completion_status: Volatile::new(&mut *((base_vaddr + 0x9C) as *mut u32)),
_completion_event_control: Volatile::new(&mut *((base_vaddr + 0xA0) as *mut u32)),
_completion_event_data: Volatile::new(&mut *((base_vaddr + 0xA4) as *mut u32)),
_completion_event_addr: Volatile::new(&mut *((base_vaddr + 0xA8) as *mut u32)),
_completion_event_upper_addr: Volatile::new(&mut *((base_vaddr + 0xAC) as *mut u32)),
_queue_error_record: Volatile::new_read_only(&mut *((base_vaddr + 0xB0) as *mut u64)),
_invalidate_address: invalidate_address,
_iotlb_invalidate: iotlb_invalidate,
}
}
}

View File

@ -5,12 +5,15 @@
mod capability; mod capability;
mod command; mod command;
mod extended_cap; mod extended_cap;
mod invalidation;
mod status; mod status;
use bit_field::BitField; use bit_field::BitField;
pub use capability::Capability; pub use capability::Capability;
use command::GlobalCommand; use command::GlobalCommand;
use extended_cap::ExtendedCapability; use extended_cap::ExtendedCapability;
pub use extended_cap::ExtendedCapabilityFlags;
use invalidation::InvalidationRegisters;
use log::debug; use log::debug;
use spin::Once; use spin::Once;
use status::GlobalStatus; use status::GlobalStatus;
@ -19,7 +22,7 @@ use volatile::{
Volatile, Volatile,
}; };
use super::{dma_remapping::RootTable, IommuError}; use super::{dma_remapping::RootTable, invalidate::queue::Queue, IommuError};
use crate::{ use crate::{
arch::{ arch::{
iommu::fault, iommu::fault,
@ -62,6 +65,8 @@ pub struct IommuRegisters {
root_table_address: Volatile<&'static mut u64, ReadWrite>, root_table_address: Volatile<&'static mut u64, ReadWrite>,
#[allow(dead_code)] #[allow(dead_code)]
context_command: Volatile<&'static mut u64, ReadWrite>, context_command: Volatile<&'static mut u64, ReadWrite>,
invalidate: InvalidationRegisters,
} }
impl IommuRegisters { impl IommuRegisters {
@ -106,6 +111,65 @@ impl IommuRegisters {
while !self.global_status().contains(GlobalStatus::TES) {} while !self.global_status().contains(GlobalStatus::TES) {}
} }
pub(super) fn enable_queued_invalidation(&mut self, queue: &Queue) {
assert!(self
.extended_capability()
.flags()
.contains(ExtendedCapabilityFlags::QI));
self.invalidate.queue_tail.write(0);
let mut write_value = queue.base_paddr() as u64;
// By default, we set descriptor width to 128-bit(0)
let descriptor_width = 0b0;
write_value |= descriptor_width << 11;
let write_queue_size = {
let mut queue_size = queue.size();
assert!(queue_size.is_power_of_two());
let mut write_queue_size = 0;
if descriptor_width == 0 {
// 2^(write_queue_size + 8) = number of entries = queue_size
assert!(queue_size >= (1 << 8));
queue_size >>= 8;
} else {
// 2^(write_queue_size + 7) = number of entries = queue_size
assert!(queue_size >= (1 << 7));
queue_size >>= 7;
};
while queue_size & 0b1 == 0 {
queue_size >>= 1;
write_queue_size += 1;
}
write_queue_size
};
write_value |= write_queue_size;
self.invalidate.queue_addr.write(write_value);
// Enable Queued invalidation
self.write_global_command(GlobalCommand::QIE, true);
while !self.global_status().contains(GlobalStatus::QIES) {}
}
fn global_invalidation(&mut self) {
// Set ICC(63) to 1 to requests invalidation and CIRG(62:61) to 01 to indicate global invalidation request.
self.context_command.write(0xA000_0000_0000_0000);
// Wait for invalidation complete (ICC set to 0).
let mut value = 0x8000_0000_0000_0000;
while (value & 0x8000_0000_0000_0000) != 0 {
value = self.context_command.read();
}
// Set IVT(63) to 1 to requests IOTLB invalidation and IIRG(61:60) to 01 to indicate global invalidation request.
self.invalidate
._iotlb_invalidate
.write(0x9000_0000_0000_0000);
}
/// Write value to the global command register. This function will not wait until the command /// Write value to the global command register. This function will not wait until the command
/// is serviced. User need to check the global status register. /// is serviced. User need to check the global status register.
fn write_global_command(&mut self, command: GlobalCommand, enable: bool) { fn write_global_command(&mut self, command: GlobalCommand, enable: bool) {
@ -156,6 +220,7 @@ impl IommuRegisters {
global_status, global_status,
root_table_address, root_table_address,
context_command, context_command,
invalidate: InvalidationRegisters::new(vaddr),
} }
}; };
@ -170,7 +235,7 @@ impl IommuRegisters {
} }
} }
pub(super) static IOMMU_REGS: Once<SpinLock<IommuRegisters>> = Once::new(); pub(super) static IOMMU_REGS: Once<SpinLock<IommuRegisters, LocalIrqDisabled>> = Once::new();
pub(super) fn init() -> Result<(), IommuError> { pub(super) fn init() -> Result<(), IommuError> {
let iommu_regs = IommuRegisters::new().ok_or(IommuError::NoIommu)?; let iommu_regs = IommuRegisters::new().ok_or(IommuError::NoIommu)?;