mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-10 13:56:48 +00:00
Support coherent dma mappings
This commit is contained in:
parent
ce5730287e
commit
ceaba95fa0
@ -7,8 +7,9 @@ use pod::Pod;
|
||||
use crate::{
|
||||
bus::pci::PciDeviceLocation,
|
||||
vm::{
|
||||
page_table::{PageTableConfig, PageTableError},
|
||||
Paddr, PageTable, Vaddr, VmAllocOptions, VmFrame, VmIo,
|
||||
dma::Daddr,
|
||||
page_table::{DeviceMode, PageTableConfig, PageTableError},
|
||||
Paddr, PageTable, VmAllocOptions, VmFrame, VmIo,
|
||||
},
|
||||
};
|
||||
|
||||
@ -53,18 +54,22 @@ impl RootTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map(
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// User must ensure the given paddr is a valid one.
|
||||
pub unsafe fn map(
|
||||
&mut self,
|
||||
device: PciDeviceLocation,
|
||||
vaddr: Vaddr,
|
||||
frame: &VmFrame,
|
||||
daddr: Daddr,
|
||||
paddr: Paddr,
|
||||
) -> Result<(), ContextTableError> {
|
||||
if device.device >= 32 || device.function >= 8 {
|
||||
return Err(ContextTableError::InvalidDeviceId);
|
||||
}
|
||||
|
||||
self.get_or_create_context_table(device)
|
||||
.map(device, vaddr, frame)?;
|
||||
.map(device, daddr, paddr)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -72,14 +77,14 @@ impl RootTable {
|
||||
pub fn unmap(
|
||||
&mut self,
|
||||
device: PciDeviceLocation,
|
||||
vaddr: Vaddr,
|
||||
daddr: Daddr,
|
||||
) -> Result<(), ContextTableError> {
|
||||
if device.device >= 32 || device.function >= 8 {
|
||||
return Err(ContextTableError::InvalidDeviceId);
|
||||
}
|
||||
|
||||
self.get_or_create_context_table(device)
|
||||
.unmap(device, vaddr)?;
|
||||
.unmap(device, daddr)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -117,7 +122,7 @@ impl RootTable {
|
||||
pub fn specify_device_page_table(
|
||||
&mut self,
|
||||
device_id: PciDeviceLocation,
|
||||
page_table: PageTable<PageTableEntry>,
|
||||
page_table: PageTable<PageTableEntry, DeviceMode>,
|
||||
) {
|
||||
let context_table = self.get_or_create_context_table(device_id);
|
||||
|
||||
@ -227,7 +232,7 @@ pub enum AddressWidth {
|
||||
pub struct ContextTable {
|
||||
/// Total 32 devices, each device has 8 functions.
|
||||
entries_frame: VmFrame,
|
||||
page_tables: BTreeMap<Paddr, PageTable<PageTableEntry>>,
|
||||
page_tables: BTreeMap<Paddr, PageTable<PageTableEntry, DeviceMode>>,
|
||||
}
|
||||
|
||||
impl ContextTable {
|
||||
@ -245,7 +250,7 @@ impl ContextTable {
|
||||
fn get_or_create_page_table(
|
||||
&mut self,
|
||||
device: PciDeviceLocation,
|
||||
) -> &mut PageTable<PageTableEntry> {
|
||||
) -> &mut PageTable<PageTableEntry, DeviceMode> {
|
||||
let bus_entry = self
|
||||
.entries_frame
|
||||
.read_val::<ContextEntry>(
|
||||
@ -254,9 +259,10 @@ impl ContextTable {
|
||||
.unwrap();
|
||||
|
||||
if !bus_entry.is_present() {
|
||||
let table: PageTable<PageTableEntry> = PageTable::new(PageTableConfig {
|
||||
address_width: crate::vm::page_table::AddressWidth::Level3,
|
||||
});
|
||||
let table: PageTable<PageTableEntry, DeviceMode> =
|
||||
PageTable::<PageTableEntry, DeviceMode>::new(PageTableConfig {
|
||||
address_width: crate::vm::page_table::AddressWidth::Level3,
|
||||
});
|
||||
let address = table.root_paddr();
|
||||
self.page_tables.insert(address, table);
|
||||
let entry = ContextEntry(address as u128 | 3 | 0x1_0000_0000_0000_0000);
|
||||
@ -275,31 +281,35 @@ impl ContextTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn map(
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// User must ensure the given paddr is a valid one.
|
||||
unsafe fn map(
|
||||
&mut self,
|
||||
device: PciDeviceLocation,
|
||||
vaddr: Vaddr,
|
||||
frame: &VmFrame,
|
||||
daddr: Daddr,
|
||||
paddr: Paddr,
|
||||
) -> Result<(), ContextTableError> {
|
||||
if device.device >= 32 || device.function >= 8 {
|
||||
return Err(ContextTableError::InvalidDeviceId);
|
||||
}
|
||||
self.get_or_create_page_table(device)
|
||||
.map(
|
||||
vaddr,
|
||||
frame,
|
||||
.map_with_paddr(
|
||||
daddr,
|
||||
paddr,
|
||||
PageTableFlags::WRITABLE | PageTableFlags::READABLE | PageTableFlags::LAST_PAGE,
|
||||
)
|
||||
.map_err(ContextTableError::ModificationError)
|
||||
}
|
||||
|
||||
fn unmap(&mut self, device: PciDeviceLocation, vaddr: Vaddr) -> Result<(), ContextTableError> {
|
||||
fn unmap(&mut self, device: PciDeviceLocation, daddr: Daddr) -> Result<(), ContextTableError> {
|
||||
if device.device >= 32 || device.function >= 8 {
|
||||
return Err(ContextTableError::InvalidDeviceId);
|
||||
}
|
||||
|
||||
self.get_or_create_page_table(device)
|
||||
.unmap(vaddr)
|
||||
.unmap(daddr)
|
||||
.map_err(ContextTableError::ModificationError)
|
||||
}
|
||||
}
|
||||
|
@ -3,16 +3,17 @@ mod fault;
|
||||
mod remapping;
|
||||
mod second_stage;
|
||||
|
||||
use crate::{sync::Mutex, vm::VmFrame};
|
||||
use log::info;
|
||||
use spin::Once;
|
||||
|
||||
use crate::{
|
||||
arch::iommu::{context_table::RootTable, second_stage::PageTableEntry},
|
||||
bus::pci::PciDeviceLocation,
|
||||
sync::Mutex,
|
||||
vm::{
|
||||
page_table::{PageTableConfig, PageTableError},
|
||||
PageTable, Vaddr,
|
||||
dma::Daddr,
|
||||
page_table::{DeviceMode, PageTableConfig, PageTableError},
|
||||
Paddr, PageTable,
|
||||
},
|
||||
};
|
||||
|
||||
@ -22,27 +23,18 @@ pub enum IommuError {
|
||||
ModificationError(PageTableError),
|
||||
}
|
||||
|
||||
// FIXME: Perform map operations by obtaining ownership of a VmFrame.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Mapping an incorrect address may lead to a kernel data leak.
|
||||
pub(crate) unsafe fn map(vaddr: Vaddr, frame: &VmFrame) -> Result<(), IommuError> {
|
||||
pub(crate) unsafe fn map(daddr: Daddr, paddr: Paddr) -> Result<(), IommuError> {
|
||||
let Some(table) = PAGE_TABLE.get() else {
|
||||
return Err(IommuError::NoIommu);
|
||||
};
|
||||
// The page table of all devices is the same. So we can use any device ID.
|
||||
table
|
||||
.lock()
|
||||
.map(
|
||||
PciDeviceLocation {
|
||||
bus: 0,
|
||||
device: 0,
|
||||
function: 0,
|
||||
},
|
||||
vaddr,
|
||||
frame,
|
||||
)
|
||||
.map(PciDeviceLocation::zero(), daddr, paddr)
|
||||
.map_err(|err| match err {
|
||||
context_table::ContextTableError::InvalidDeviceId => unreachable!(),
|
||||
context_table::ContextTableError::ModificationError(err) => {
|
||||
@ -51,21 +43,14 @@ pub(crate) unsafe fn map(vaddr: Vaddr, frame: &VmFrame) -> Result<(), IommuError
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn unmap(vaddr: Vaddr) -> Result<(), IommuError> {
|
||||
pub(crate) fn unmap(daddr: Daddr) -> Result<(), IommuError> {
|
||||
let Some(table) = PAGE_TABLE.get() else {
|
||||
return Err(IommuError::NoIommu);
|
||||
};
|
||||
// The page table of all devices is the same. So we can use any device ID.
|
||||
table
|
||||
.lock()
|
||||
.unmap(
|
||||
PciDeviceLocation {
|
||||
bus: 0,
|
||||
device: 0,
|
||||
function: 0,
|
||||
},
|
||||
vaddr,
|
||||
)
|
||||
.unmap(PciDeviceLocation::zero(), daddr)
|
||||
.map_err(|err| match err {
|
||||
context_table::ContextTableError::InvalidDeviceId => unreachable!(),
|
||||
context_table::ContextTableError::ModificationError(err) => {
|
||||
@ -77,9 +62,10 @@ pub(crate) fn unmap(vaddr: Vaddr) -> Result<(), IommuError> {
|
||||
pub(crate) fn init() -> Result<(), IommuError> {
|
||||
let mut root_table = RootTable::new();
|
||||
// For all PCI Device, use the same page table.
|
||||
let page_table: PageTable<PageTableEntry> = PageTable::new(PageTableConfig {
|
||||
address_width: crate::vm::page_table::AddressWidth::Level3,
|
||||
});
|
||||
let page_table: PageTable<PageTableEntry, DeviceMode> =
|
||||
PageTable::<PageTableEntry, DeviceMode>::new(PageTableConfig {
|
||||
address_width: crate::vm::page_table::AddressWidth::Level3,
|
||||
});
|
||||
for table in PciDeviceLocation::all() {
|
||||
root_table.specify_device_page_table(table, page_table.clone())
|
||||
}
|
||||
@ -89,4 +75,8 @@ pub(crate) fn init() -> Result<(), IommuError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn has_iommu() -> bool {
|
||||
PAGE_TABLE.get().is_some()
|
||||
}
|
||||
|
||||
static PAGE_TABLE: Once<Mutex<RootTable>> = Once::new();
|
||||
|
@ -68,7 +68,7 @@ impl PageTableFlagsTrait for PageTableFlags {
|
||||
}
|
||||
|
||||
fn is_present(&self) -> bool {
|
||||
true
|
||||
self.contains(Self::WRITABLE) || self.contains(Self::READABLE)
|
||||
}
|
||||
|
||||
fn writable(&self) -> bool {
|
||||
|
@ -83,6 +83,16 @@ impl PciDeviceLocation {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// The page table of all devices is the same. So we can use any device ID.
|
||||
/// FIXME: distinguish different device id.
|
||||
pub fn zero() -> Self {
|
||||
Self {
|
||||
bus: 0,
|
||||
device: 0,
|
||||
function: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PciDeviceLocation {
|
||||
|
160
framework/jinux-frame/src/vm/dma/dma_coherent.rs
Normal file
160
framework/jinux-frame/src/vm/dma/dma_coherent.rs
Normal file
@ -0,0 +1,160 @@
|
||||
use alloc::sync::Arc;
|
||||
use core::ops::Deref;
|
||||
|
||||
use crate::arch::{iommu, mm::PageTableFlags};
|
||||
use crate::vm::{
|
||||
dma::{dma_type, Daddr, DmaType},
|
||||
paddr_to_vaddr,
|
||||
page_table::KERNEL_PAGE_TABLE,
|
||||
HasPaddr, Paddr, VmIo, VmReader, VmSegment, VmWriter, PAGE_SIZE,
|
||||
};
|
||||
|
||||
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr};
|
||||
|
||||
/// A coherent (or consistent) DMA mapping,
|
||||
/// which guarantees that the device and the CPU can
|
||||
/// access the data in parallel.
|
||||
///
|
||||
/// The mapping will be destroyed automatically when
|
||||
/// the object is dropped.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DmaCoherent {
|
||||
inner: Arc<DmaCoherentInner>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DmaCoherentInner {
|
||||
vm_segment: VmSegment,
|
||||
start_daddr: Daddr,
|
||||
is_cache_coherent: bool,
|
||||
}
|
||||
|
||||
impl DmaCoherent {
|
||||
/// Create a coherent DMA mapping backed by `vm_segment`.
|
||||
///
|
||||
/// The `is_cache_coherent` argument specifies whether
|
||||
/// the target device that the DMA mapping is prepared for
|
||||
/// can access the main memory in a CPU cache coherent way
|
||||
/// or not.
|
||||
///
|
||||
/// The method fails if any part of the given VM segment
|
||||
/// already belongs to a DMA mapping.
|
||||
pub fn map(vm_segment: VmSegment, is_cache_coherent: bool) -> Result<Self, DmaError> {
|
||||
let frame_count = vm_segment.nframes();
|
||||
let start_paddr = vm_segment.start_paddr();
|
||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||
return Err(DmaError::AlreadyMapped);
|
||||
}
|
||||
if !is_cache_coherent {
|
||||
let mut page_table = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
let vaddr = paddr_to_vaddr(paddr);
|
||||
let flags = page_table.flags(vaddr).unwrap();
|
||||
// Safety: the address is in the range of `vm_segment`.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect(vaddr, flags.union(PageTableFlags::NO_CACHE))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
let start_daddr = match dma_type() {
|
||||
DmaType::Direct => start_paddr as Daddr,
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
// Safety: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
|
||||
unsafe {
|
||||
iommu::map(paddr as Daddr, paddr).unwrap();
|
||||
}
|
||||
}
|
||||
start_paddr as Daddr
|
||||
}
|
||||
DmaType::Tdx => {
|
||||
todo!()
|
||||
}
|
||||
};
|
||||
Ok(Self {
|
||||
inner: Arc::new(DmaCoherentInner {
|
||||
vm_segment,
|
||||
start_daddr,
|
||||
is_cache_coherent,
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl HasDaddr for DmaCoherent {
|
||||
fn daddr(&self) -> Daddr {
|
||||
self.inner.start_daddr
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for DmaCoherent {
|
||||
type Target = VmSegment;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner.vm_segment
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DmaCoherentInner {
|
||||
fn drop(&mut self) {
|
||||
let frame_count = self.vm_segment.nframes();
|
||||
let start_paddr = self.vm_segment.start_paddr();
|
||||
match dma_type() {
|
||||
DmaType::Direct => {}
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
iommu::unmap(paddr).unwrap();
|
||||
}
|
||||
}
|
||||
DmaType::Tdx => {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
if !self.is_cache_coherent {
|
||||
let mut page_table = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
let vaddr = paddr_to_vaddr(paddr);
|
||||
let mut flags = page_table.flags(vaddr).unwrap();
|
||||
flags.remove(PageTableFlags::NO_CACHE);
|
||||
// Safety: the address is in the range of `vm_segment`.
|
||||
unsafe {
|
||||
page_table.protect(vaddr, flags).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
remove_dma_mapping(start_paddr, frame_count);
|
||||
}
|
||||
}
|
||||
|
||||
impl VmIo for DmaCoherent {
|
||||
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> crate::prelude::Result<()> {
|
||||
self.inner.vm_segment.read_bytes(offset, buf)
|
||||
}
|
||||
|
||||
fn write_bytes(&self, offset: usize, buf: &[u8]) -> crate::prelude::Result<()> {
|
||||
self.inner.vm_segment.write_bytes(offset, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DmaCoherent {
|
||||
/// Returns a reader to read data from it.
|
||||
pub fn reader(&'a self) -> VmReader<'a> {
|
||||
self.inner.vm_segment.reader()
|
||||
}
|
||||
|
||||
/// Returns a writer to write data into it.
|
||||
pub fn writer(&'a self) -> VmWriter<'a> {
|
||||
self.inner.vm_segment.writer()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasPaddr for DmaCoherent {
|
||||
fn paddr(&self) -> Paddr {
|
||||
self.inner.vm_segment.start_paddr()
|
||||
}
|
||||
}
|
83
framework/jinux-frame/src/vm/dma/mod.rs
Normal file
83
framework/jinux-frame/src/vm/dma/mod.rs
Normal file
@ -0,0 +1,83 @@
|
||||
mod dma_coherent;
|
||||
|
||||
use alloc::collections::BTreeSet;
|
||||
use spin::Once;
|
||||
|
||||
use crate::{arch::iommu::has_iommu, config::PAGE_SIZE, sync::SpinLock};
|
||||
|
||||
use super::Paddr;
|
||||
|
||||
pub use dma_coherent::DmaCoherent;
|
||||
|
||||
/// If a device performs DMA to read or write system
|
||||
/// memory, the addresses used by the device are device addresses.
|
||||
/// Daddr can distinguish the address space used by cpu side and
|
||||
/// the address space used by device side.
|
||||
pub type Daddr = usize;
|
||||
|
||||
fn has_tdx() -> bool {
|
||||
// FIXME: Support TDX
|
||||
false
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum DmaType {
|
||||
Direct,
|
||||
Iommu,
|
||||
Tdx,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum DmaError {
|
||||
InvalidArgs,
|
||||
AlreadyMapped,
|
||||
}
|
||||
|
||||
pub trait HasDaddr {
|
||||
/// Get the base address of the mapping in the
|
||||
/// device address space.
|
||||
fn daddr(&self) -> Daddr;
|
||||
}
|
||||
|
||||
/// Set of all physical addresses with dma mapping.
|
||||
static DMA_MAPPING_SET: Once<SpinLock<BTreeSet<Paddr>>> = Once::new();
|
||||
|
||||
pub fn dma_type() -> DmaType {
|
||||
if has_iommu() {
|
||||
DmaType::Iommu
|
||||
} else if has_tdx() {
|
||||
return DmaType::Tdx;
|
||||
} else {
|
||||
return DmaType::Direct;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init() {
|
||||
DMA_MAPPING_SET.call_once(|| SpinLock::new(BTreeSet::new()));
|
||||
}
|
||||
|
||||
/// Check whether the physical addresses has dma mapping.
|
||||
/// Fail if they have been mapped, otherwise insert them.
|
||||
fn check_and_insert_dma_mapping(start_paddr: Paddr, num_pages: usize) -> bool {
|
||||
let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled();
|
||||
for i in 0..num_pages {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
if mapping_set.contains(&paddr) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for i in 0..num_pages {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
mapping_set.insert(paddr);
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Remove a physical address from the dma mapping set.
|
||||
fn remove_dma_mapping(start_paddr: Paddr, num_pages: usize) {
|
||||
let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled();
|
||||
for i in 0..num_pages {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
mapping_set.remove(&paddr);
|
||||
}
|
||||
}
|
@ -5,7 +5,7 @@ use core::{
|
||||
ops::{BitAnd, BitOr, Not, Range},
|
||||
};
|
||||
|
||||
use crate::{arch::iommu, config::PAGE_SIZE, prelude::*, Error};
|
||||
use crate::{config::PAGE_SIZE, prelude::*, Error};
|
||||
|
||||
use super::{frame_allocator, HasPaddr};
|
||||
use super::{Paddr, VmIo};
|
||||
@ -172,7 +172,6 @@ impl<'a> Iterator for VmFrameVecIter<'a> {
|
||||
bitflags::bitflags! {
|
||||
pub(crate) struct VmFrameFlags : usize{
|
||||
const NEED_DEALLOC = 1 << 63;
|
||||
const CAN_DMA = 1 << 62;
|
||||
}
|
||||
}
|
||||
|
||||
@ -233,14 +232,6 @@ impl VmFrame {
|
||||
unsafe { core::ptr::write_bytes(self.as_mut_ptr(), 0, PAGE_SIZE) }
|
||||
}
|
||||
|
||||
/// Returns whether the page frame is accessible by DMA.
|
||||
///
|
||||
/// In a TEE environment, DMAable pages are untrusted pages shared with
|
||||
/// the VMM.
|
||||
pub fn can_dma(&self) -> bool {
|
||||
(*self.frame_index & VmFrameFlags::CAN_DMA.bits()) != 0
|
||||
}
|
||||
|
||||
fn need_dealloc(&self) -> bool {
|
||||
(*self.frame_index & VmFrameFlags::NEED_DEALLOC.bits()) != 0
|
||||
}
|
||||
@ -306,17 +297,6 @@ impl VmIo for VmFrame {
|
||||
impl Drop for VmFrame {
|
||||
fn drop(&mut self) {
|
||||
if self.need_dealloc() && Arc::strong_count(&self.frame_index) == 1 {
|
||||
if self.can_dma() {
|
||||
if let Err(err) = iommu::unmap(self.start_paddr()) {
|
||||
match err {
|
||||
// do nothing
|
||||
iommu::IommuError::NoIommu => {}
|
||||
iommu::IommuError::ModificationError(err) => {
|
||||
panic!("iommu map error:{:?}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Safety: the frame index is valid.
|
||||
unsafe {
|
||||
frame_allocator::dealloc_single(self.frame_index());
|
||||
@ -442,14 +422,6 @@ impl VmSegment {
|
||||
unsafe { core::ptr::write_bytes(self.as_mut_ptr(), 0, self.nbytes()) }
|
||||
}
|
||||
|
||||
/// Returns whether the page frames is accessible by DMA.
|
||||
///
|
||||
/// In a TEE environment, DMAable pages are untrusted pages shared with
|
||||
/// the VMM.
|
||||
pub fn can_dma(&self) -> bool {
|
||||
(self.inner.start_frame_index & VmFrameFlags::CAN_DMA.bits()) != 0
|
||||
}
|
||||
|
||||
fn need_dealloc(&self) -> bool {
|
||||
(self.inner.start_frame_index & VmFrameFlags::NEED_DEALLOC.bits()) != 0
|
||||
}
|
||||
@ -504,17 +476,6 @@ impl VmIo for VmSegment {
|
||||
impl Drop for VmSegment {
|
||||
fn drop(&mut self) {
|
||||
if self.need_dealloc() && Arc::strong_count(&self.inner) == 1 {
|
||||
if self.can_dma() {
|
||||
if let Err(err) = iommu::unmap(self.inner.start_paddr()) {
|
||||
match err {
|
||||
// do nothing
|
||||
iommu::IommuError::NoIommu => {}
|
||||
iommu::IommuError::ModificationError(err) => {
|
||||
panic!("iommu map error:{:?}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Safety: the range of contiguous page frames is valid.
|
||||
unsafe {
|
||||
frame_allocator::dealloc_contiguous(
|
||||
|
@ -1,4 +1,4 @@
|
||||
use super::page_table::{PageTable, PageTableConfig};
|
||||
use super::page_table::{PageTable, PageTableConfig, UserMode};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PageTableFlags},
|
||||
config::{PAGE_SIZE, PHYS_OFFSET},
|
||||
@ -171,7 +171,7 @@ impl MemorySet {
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
let mut page_table = PageTable::new(PageTableConfig {
|
||||
let mut page_table = PageTable::<PageTableEntry, UserMode>::new(PageTableConfig {
|
||||
address_width: super::page_table::AddressWidth::Level4,
|
||||
});
|
||||
let mapped_pte = crate::arch::mm::ALL_MAPPED_PTE.lock();
|
||||
|
@ -6,6 +6,7 @@ pub type Vaddr = usize;
|
||||
/// Physical addresses.
|
||||
pub type Paddr = usize;
|
||||
|
||||
pub(crate) mod dma;
|
||||
mod frame;
|
||||
mod frame_allocator;
|
||||
pub(crate) mod heap_allocator;
|
||||
@ -18,6 +19,7 @@ mod space;
|
||||
|
||||
use crate::config::{KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET};
|
||||
|
||||
pub use self::dma::{DmaCoherent, HasDaddr};
|
||||
pub use self::frame::{VmFrame, VmFrameVec, VmFrameVecIter, VmReader, VmSegment, VmWriter};
|
||||
pub use self::io::VmIo;
|
||||
pub use self::options::VmAllocOptions;
|
||||
@ -66,6 +68,7 @@ pub(crate) fn init() {
|
||||
let memory_regions = crate::boot::memory_regions().to_owned();
|
||||
frame_allocator::init(&memory_regions);
|
||||
page_table::init();
|
||||
dma::init();
|
||||
|
||||
let mut framebuffer_regions = Vec::new();
|
||||
for i in memory_regions.iter() {
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::{arch::iommu, prelude::*, Error};
|
||||
use crate::{prelude::*, Error};
|
||||
|
||||
use super::{frame::VmFrameFlags, frame_allocator, VmFrame, VmFrameVec, VmSegment};
|
||||
|
||||
@ -13,7 +13,6 @@ pub struct VmAllocOptions {
|
||||
nframes: usize,
|
||||
is_contiguous: bool,
|
||||
uninit: bool,
|
||||
can_dma: bool,
|
||||
}
|
||||
|
||||
impl VmAllocOptions {
|
||||
@ -23,7 +22,6 @@ impl VmAllocOptions {
|
||||
nframes,
|
||||
is_contiguous: false,
|
||||
uninit: false,
|
||||
can_dma: false,
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,16 +44,6 @@ impl VmAllocOptions {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets whether the pages can be accessed by devices through
|
||||
/// Direct Memory Access (DMA).
|
||||
///
|
||||
/// In a TEE environment, DMAable pages are untrusted pages shared with
|
||||
/// the VMM.
|
||||
pub fn can_dma(&mut self, can_dma: bool) -> &mut Self {
|
||||
self.can_dma = can_dma;
|
||||
self
|
||||
}
|
||||
|
||||
/// Allocate a collection of page frames according to the given options.
|
||||
pub fn alloc(&self) -> Result<VmFrameVec> {
|
||||
let flags = self.flags();
|
||||
@ -68,12 +56,6 @@ impl VmAllocOptions {
|
||||
}
|
||||
VmFrameVec(frame_list)
|
||||
};
|
||||
if self.can_dma {
|
||||
for frame in frames.0.iter() {
|
||||
// Safety: the frame is controlled by frame allocator
|
||||
unsafe { map_frame(frame) };
|
||||
}
|
||||
}
|
||||
if !self.uninit {
|
||||
frames.zero();
|
||||
}
|
||||
@ -88,10 +70,6 @@ impl VmAllocOptions {
|
||||
}
|
||||
|
||||
let frame = frame_allocator::alloc_single(self.flags()).ok_or(Error::NoMemory)?;
|
||||
if self.can_dma {
|
||||
// Safety: the frame is controlled by frame allocator
|
||||
unsafe { map_frame(&frame) };
|
||||
}
|
||||
if !self.uninit {
|
||||
frame.zero();
|
||||
}
|
||||
@ -109,10 +87,6 @@ impl VmAllocOptions {
|
||||
|
||||
let segment =
|
||||
frame_allocator::alloc_contiguous(self.nframes, self.flags()).ok_or(Error::NoMemory)?;
|
||||
if self.can_dma {
|
||||
// Safety: the segment is controlled by frame allocator
|
||||
unsafe { map_segment(&segment) };
|
||||
}
|
||||
if !self.uninit {
|
||||
segment.zero();
|
||||
}
|
||||
@ -121,39 +95,6 @@ impl VmAllocOptions {
|
||||
}
|
||||
|
||||
fn flags(&self) -> VmFrameFlags {
|
||||
let mut flags = VmFrameFlags::empty();
|
||||
if self.can_dma {
|
||||
flags.insert(VmFrameFlags::CAN_DMA);
|
||||
}
|
||||
flags
|
||||
VmFrameFlags::empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Iommu map for the `VmFrame`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The address should be controlled by frame allocator.
|
||||
unsafe fn map_frame(frame: &VmFrame) {
|
||||
let Err(err) = iommu::map(frame.start_paddr(), frame) else {
|
||||
return;
|
||||
};
|
||||
|
||||
match err {
|
||||
// do nothing
|
||||
iommu::IommuError::NoIommu => {}
|
||||
iommu::IommuError::ModificationError(err) => {
|
||||
panic!("iommu map error:{:?}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Iommu map for the `VmSegment`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The address should be controlled by frame allocator.
|
||||
unsafe fn map_segment(segment: &VmSegment) {
|
||||
// TODO: Support to map a VmSegment.
|
||||
panic!("VmSegment do not support DMA");
|
||||
}
|
||||
|
@ -113,6 +113,11 @@ pub struct UserMode {}
|
||||
#[derive(Clone)]
|
||||
pub struct KernelMode {}
|
||||
|
||||
/// The page table used by iommu maps the device address
|
||||
/// space to the physical address space.
|
||||
#[derive(Clone)]
|
||||
pub struct DeviceMode {}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PageTable<T: PageTableEntryTrait, M = UserMode> {
|
||||
root_paddr: Paddr,
|
||||
@ -215,6 +220,38 @@ impl<T: PageTableEntryTrait> PageTable<T, KernelMode> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PageTableEntryTrait> PageTable<T, DeviceMode> {
|
||||
pub fn new(config: PageTableConfig) -> Self {
|
||||
let root_frame = VmAllocOptions::new(1).alloc_single().unwrap();
|
||||
Self {
|
||||
root_paddr: root_frame.start_paddr(),
|
||||
tables: vec![root_frame],
|
||||
config,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Mapping directly from a virtual address to a physical address.
|
||||
/// The virtual address should be in the device address space.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// User must ensure the given paddr is a valid one (e.g. from the VmSegment).
|
||||
pub unsafe fn map_with_paddr(
|
||||
&mut self,
|
||||
vaddr: Vaddr,
|
||||
paddr: Paddr,
|
||||
flags: T::F,
|
||||
) -> Result<(), PageTableError> {
|
||||
self.do_map(vaddr, paddr, flags)
|
||||
}
|
||||
|
||||
pub fn unmap(&mut self, vaddr: Vaddr) -> Result<(), PageTableError> {
|
||||
// Safety: the `vaddr` is in the device address space.
|
||||
unsafe { self.do_unmap(vaddr) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
||||
/// Add a new mapping directly in the root page table.
|
||||
///
|
||||
@ -344,6 +381,11 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
||||
Ok(old_flags)
|
||||
}
|
||||
|
||||
pub fn flags(&mut self, vaddr: Vaddr) -> Option<T::F> {
|
||||
let last_entry = self.page_walk(vaddr, false)?;
|
||||
Some(last_entry.flags())
|
||||
}
|
||||
|
||||
pub fn root_paddr(&self) -> Paddr {
|
||||
self.root_paddr
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ use core::{
|
||||
use jinux_frame::{
|
||||
io_mem::IoMem,
|
||||
offset_of,
|
||||
vm::{HasPaddr, VmAllocOptions, VmFrame},
|
||||
vm::{DmaCoherent, VmAllocOptions},
|
||||
};
|
||||
use jinux_rights::{Dup, TRightSet, TRights, Write};
|
||||
use jinux_util::{field_ptr, safe_ptr::SafePtr};
|
||||
@ -33,11 +33,11 @@ pub enum QueueError {
|
||||
#[derive(Debug)]
|
||||
pub struct VirtQueue {
|
||||
/// Descriptor table
|
||||
descs: Vec<SafePtr<Descriptor, VmFrame>>,
|
||||
descs: Vec<SafePtr<Descriptor, DmaCoherent>>,
|
||||
/// Available ring
|
||||
avail: SafePtr<AvailRing, VmFrame>,
|
||||
avail: SafePtr<AvailRing, DmaCoherent>,
|
||||
/// Used ring
|
||||
used: SafePtr<UsedRing, VmFrame>,
|
||||
used: SafePtr<UsedRing, DmaCoherent>,
|
||||
/// point to notify address
|
||||
notify: SafePtr<u32, IoMem>,
|
||||
|
||||
@ -78,24 +78,22 @@ impl VirtQueue {
|
||||
}
|
||||
let desc_size = size_of::<Descriptor>() * size as usize;
|
||||
|
||||
let (page1, page2) = {
|
||||
let mut continue_pages = VmAllocOptions::new(2)
|
||||
.can_dma(true)
|
||||
let (seg1, seg2) = {
|
||||
let continue_segment = VmAllocOptions::new(2)
|
||||
.is_contiguous(true)
|
||||
.alloc()
|
||||
.alloc_contiguous()
|
||||
.unwrap();
|
||||
let page1 = continue_pages.pop().unwrap();
|
||||
let page2 = continue_pages.pop().unwrap();
|
||||
if page1.paddr() > page2.paddr() {
|
||||
(page2, page1)
|
||||
} else {
|
||||
(page1, page2)
|
||||
}
|
||||
let seg1 = continue_segment.range(0..1);
|
||||
let seg2 = continue_segment.range(1..2);
|
||||
(seg1, seg2)
|
||||
};
|
||||
let desc_frame_ptr: SafePtr<Descriptor, VmFrame> = SafePtr::new(page1, 0);
|
||||
let mut avail_frame_ptr: SafePtr<AvailRing, VmFrame> = desc_frame_ptr.clone().cast();
|
||||
let desc_frame_ptr: SafePtr<Descriptor, DmaCoherent> =
|
||||
SafePtr::new(DmaCoherent::map(seg1, true).unwrap(), 0);
|
||||
let mut avail_frame_ptr: SafePtr<AvailRing, DmaCoherent> =
|
||||
desc_frame_ptr.clone().cast();
|
||||
avail_frame_ptr.byte_add(desc_size);
|
||||
let used_frame_ptr: SafePtr<UsedRing, VmFrame> = SafePtr::new(page2, 0);
|
||||
let used_frame_ptr: SafePtr<UsedRing, DmaCoherent> =
|
||||
SafePtr::new(DmaCoherent::map(seg2, true).unwrap(), 0);
|
||||
(desc_frame_ptr, avail_frame_ptr, used_frame_ptr)
|
||||
} else {
|
||||
if size > 256 {
|
||||
@ -103,15 +101,36 @@ impl VirtQueue {
|
||||
}
|
||||
(
|
||||
SafePtr::new(
|
||||
VmAllocOptions::new(1).can_dma(true).alloc_single().unwrap(),
|
||||
DmaCoherent::map(
|
||||
VmAllocOptions::new(1)
|
||||
.is_contiguous(true)
|
||||
.alloc_contiguous()
|
||||
.unwrap(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
0,
|
||||
),
|
||||
SafePtr::new(
|
||||
VmAllocOptions::new(1).can_dma(true).alloc_single().unwrap(),
|
||||
DmaCoherent::map(
|
||||
VmAllocOptions::new(1)
|
||||
.is_contiguous(true)
|
||||
.alloc_contiguous()
|
||||
.unwrap(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
0,
|
||||
),
|
||||
SafePtr::new(
|
||||
VmAllocOptions::new(1).can_dma(true).alloc_single().unwrap(),
|
||||
DmaCoherent::map(
|
||||
VmAllocOptions::new(1)
|
||||
.is_contiguous(true)
|
||||
.alloc_contiguous()
|
||||
.unwrap(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
0,
|
||||
),
|
||||
)
|
||||
@ -120,8 +139,11 @@ impl VirtQueue {
|
||||
debug!("queue_driver start paddr:{:x?}", avail_ring_ptr.paddr());
|
||||
debug!("queue_device start paddr:{:x?}", used_ring_ptr.paddr());
|
||||
|
||||
transport
|
||||
.set_queue(idx, size, &descriptor_ptr, &avail_ring_ptr, &used_ring_ptr)
|
||||
.unwrap();
|
||||
let mut descs = Vec::with_capacity(size as usize);
|
||||
descs.push(descriptor_ptr.clone());
|
||||
descs.push(descriptor_ptr);
|
||||
for i in 0..size as usize {
|
||||
let mut desc = descs.get(i).unwrap().clone();
|
||||
desc.add(1);
|
||||
@ -137,9 +159,6 @@ impl VirtQueue {
|
||||
field_ptr!(&avail_ring_ptr, AvailRing, flags)
|
||||
.write(&(0u16))
|
||||
.unwrap();
|
||||
transport
|
||||
.set_queue(idx, size, &descriptor_ptr, &avail_ring_ptr, &used_ring_ptr)
|
||||
.unwrap();
|
||||
Ok(VirtQueue {
|
||||
descs,
|
||||
avail: avail_ring_ptr,
|
||||
@ -158,6 +177,13 @@ impl VirtQueue {
|
||||
///
|
||||
/// Ref: linux virtio_ring.c virtqueue_add
|
||||
pub fn add(&mut self, inputs: &[&[u8]], outputs: &[&mut [u8]]) -> Result<u16, QueueError> {
|
||||
// FIXME: use `DmaSteam` for inputs and outputs. Now because the upper device driver lacks the
|
||||
// ability to safely construct DmaStream from slice, slice is still used here.
|
||||
// pub fn add(
|
||||
// &mut self,
|
||||
// inputs: &[&DmaStream],
|
||||
// outputs: &[&mut DmaStream],
|
||||
// ) -> Result<u16, QueueError> {
|
||||
if inputs.is_empty() && outputs.is_empty() {
|
||||
return Err(QueueError::InvalidArgs);
|
||||
}
|
||||
@ -198,7 +224,8 @@ impl VirtQueue {
|
||||
let avail_slot = self.avail_idx & (self.queue_size - 1);
|
||||
|
||||
{
|
||||
let ring_ptr: SafePtr<[u16; 64], &VmFrame> = field_ptr!(&self.avail, AvailRing, ring);
|
||||
let ring_ptr: SafePtr<[u16; 64], &DmaCoherent> =
|
||||
field_ptr!(&self.avail, AvailRing, ring);
|
||||
let mut ring_slot_ptr = ring_ptr.cast::<u16>();
|
||||
ring_slot_ptr.add(avail_slot as usize);
|
||||
ring_slot_ptr.write(&head).unwrap();
|
||||
@ -341,7 +368,9 @@ pub struct Descriptor {
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn set_buf(ptr: &SafePtr<Descriptor, &VmFrame, TRightSet<TRights![Dup, Write]>>, buf: &[u8]) {
|
||||
fn set_buf(ptr: &SafePtr<Descriptor, &DmaCoherent, TRightSet<TRights![Dup, Write]>>, buf: &[u8]) {
|
||||
// FIXME: use `DmaSteam` for buf. Now because the upper device driver lacks the
|
||||
// ability to safely construct DmaStream from slice, slice is still used here.
|
||||
let va = buf.as_ptr() as usize;
|
||||
let pa = jinux_frame::vm::vaddr_to_paddr(va).unwrap();
|
||||
field_ptr!(ptr, Descriptor, addr)
|
||||
|
@ -10,7 +10,7 @@ use jinux_frame::{
|
||||
offset_of,
|
||||
sync::RwLock,
|
||||
trap::IrqCallbackFunction,
|
||||
vm::VmFrame,
|
||||
vm::DmaCoherent,
|
||||
};
|
||||
use jinux_rights::{ReadOp, WriteOp};
|
||||
use jinux_util::{field_ptr, safe_ptr::SafePtr};
|
||||
@ -94,9 +94,9 @@ impl VirtioTransport for VirtioMmioTransport {
|
||||
&mut self,
|
||||
idx: u16,
|
||||
queue_size: u16,
|
||||
descriptor_ptr: &SafePtr<Descriptor, VmFrame>,
|
||||
driver_ptr: &SafePtr<AvailRing, VmFrame>,
|
||||
device_ptr: &SafePtr<UsedRing, VmFrame>,
|
||||
descriptor_ptr: &SafePtr<Descriptor, DmaCoherent>,
|
||||
driver_ptr: &SafePtr<AvailRing, DmaCoherent>,
|
||||
device_ptr: &SafePtr<UsedRing, DmaCoherent>,
|
||||
) -> Result<(), VirtioTransportError> {
|
||||
field_ptr!(&self.layout, VirtioMmioLayout, queue_sel)
|
||||
.write(&(idx as u32))
|
||||
|
@ -1,7 +1,7 @@
|
||||
use core::fmt::Debug;
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use jinux_frame::{io_mem::IoMem, trap::IrqCallbackFunction, vm::VmFrame};
|
||||
use jinux_frame::{io_mem::IoMem, trap::IrqCallbackFunction, vm::DmaCoherent};
|
||||
use jinux_util::safe_ptr::SafePtr;
|
||||
|
||||
use crate::{
|
||||
@ -61,9 +61,9 @@ pub trait VirtioTransport: Sync + Send + Debug {
|
||||
&mut self,
|
||||
idx: u16,
|
||||
queue_size: u16,
|
||||
descriptor_ptr: &SafePtr<Descriptor, VmFrame>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, VmFrame>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, VmFrame>,
|
||||
descriptor_ptr: &SafePtr<Descriptor, DmaCoherent>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, DmaCoherent>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, DmaCoherent>,
|
||||
) -> Result<(), VirtioTransportError>;
|
||||
|
||||
/// The max queue size of one virtqueue.
|
||||
|
@ -8,7 +8,7 @@ use jinux_frame::{
|
||||
io_mem::IoMem,
|
||||
offset_of,
|
||||
trap::IrqCallbackFunction,
|
||||
vm::VmFrame,
|
||||
vm::DmaCoherent,
|
||||
};
|
||||
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
@ -70,9 +70,9 @@ impl VirtioTransport for VirtioPciTransport {
|
||||
&mut self,
|
||||
idx: u16,
|
||||
queue_size: u16,
|
||||
descriptor_ptr: &SafePtr<Descriptor, VmFrame>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, VmFrame>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, VmFrame>,
|
||||
descriptor_ptr: &SafePtr<Descriptor, DmaCoherent>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, DmaCoherent>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, DmaCoherent>,
|
||||
) -> Result<(), VirtioTransportError> {
|
||||
if idx >= self.num_queues() {
|
||||
return Err(VirtioTransportError::InvalidArgs);
|
||||
|
Loading…
x
Reference in New Issue
Block a user