mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 21:06:48 +00:00
Implement IoMemAllocator
This commit is contained in:
parent
0054a8080f
commit
8a26b785a4
56
ostd/src/arch/x86/allocator.rs
Normal file
56
ostd/src/arch/x86/allocator.rs
Normal file
@ -0,0 +1,56 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use align_ext::AlignExt;
|
||||
|
||||
use crate::{boot::memory_region::MemoryRegionType, io::IoMemAllocatorBuilder};
|
||||
|
||||
/// Initializes the allocatable MMIO area based on the x86-64 memory distribution map.
|
||||
///
|
||||
/// In x86-64, the available physical memory area is divided into two regions below 32 bits (Low memory)
|
||||
/// and above (High memory). The area from the top of low memory to 0xffff_ffff and the area after the
|
||||
/// top of high memory are available MMIO areas.
|
||||
pub(super) fn construct_io_mem_allocator_builder() -> IoMemAllocatorBuilder {
|
||||
// TODO: Add MMIO regions below 1MB (e.g., VGA framebuffer).
|
||||
let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
|
||||
let mut ranges = Vec::with_capacity(2);
|
||||
|
||||
let reserved_filter = regions.iter().filter(|r| {
|
||||
r.typ() != (MemoryRegionType::Unknown) && r.typ() != (MemoryRegionType::Reserved)
|
||||
});
|
||||
|
||||
// Find the TOLM (Top of Low Memory) and initialize Low MMIO region (TOLM ~ LOW_MMIO_TOP).
|
||||
// Align start address to LOW_MMIO_ALIGN
|
||||
const LOW_MMIO_TOP: usize = 0x1_0000_0000;
|
||||
const LOW_MMIO_ALIGN: usize = 0x1000_0000;
|
||||
let (lower_half_base, lower_half_len) = reserved_filter
|
||||
.clone()
|
||||
.filter(|r| r.base() < u32::MAX as usize)
|
||||
.max_by(|a, b| a.base().cmp(&b.base()))
|
||||
.map(|reg| (reg.base(), reg.len()))
|
||||
.unwrap();
|
||||
|
||||
let mmio_start_addr = (lower_half_base + lower_half_len).align_up(LOW_MMIO_ALIGN);
|
||||
assert!(mmio_start_addr < LOW_MMIO_TOP);
|
||||
ranges.push(mmio_start_addr..LOW_MMIO_TOP);
|
||||
|
||||
// Find the TOHM (Top of High Memory) and initialize High MMIO region.
|
||||
// Here, using HIGH_MMIO_TOP as the top of High MMIO region.
|
||||
//
|
||||
// TODO: Update the High MMIO region in runtime.
|
||||
const HIGH_MMIO_TOP: usize = 0x8000_0000_0000;
|
||||
const HIGH_MMIO_ALIGN: usize = 0x1_0000_0000;
|
||||
let (upper_half_base, upper_half_len) = reserved_filter
|
||||
.filter(|r| r.base() >= u32::MAX as usize)
|
||||
.max_by(|a, b| a.base().cmp(&b.base()))
|
||||
.map(|reg| (reg.base(), reg.len()))
|
||||
.unwrap_or((HIGH_MMIO_ALIGN, 0));
|
||||
|
||||
let mmio_start_addr = (upper_half_base + upper_half_len).align_up(HIGH_MMIO_ALIGN);
|
||||
assert!(mmio_start_addr < HIGH_MMIO_TOP);
|
||||
ranges.push(mmio_start_addr..HIGH_MMIO_TOP);
|
||||
|
||||
// SAFETY: The range is guaranteed not to access physical memory.
|
||||
unsafe { IoMemAllocatorBuilder::new(ranges) }
|
||||
}
|
@ -2,6 +2,7 @@
|
||||
|
||||
//! Platform-specific code for the x86 platform.
|
||||
|
||||
mod allocator;
|
||||
pub mod boot;
|
||||
pub(crate) mod cpu;
|
||||
pub mod device;
|
||||
@ -17,6 +18,7 @@ pub mod task;
|
||||
pub mod timer;
|
||||
pub mod trap;
|
||||
|
||||
use allocator::construct_io_mem_allocator_builder;
|
||||
use cfg_if::cfg_if;
|
||||
use spin::Once;
|
||||
use x86::cpuid::{CpuId, FeatureInfo};
|
||||
@ -78,6 +80,8 @@ pub(crate) unsafe fn late_init_on_bsp() {
|
||||
|
||||
kernel::acpi::init();
|
||||
|
||||
let builder = construct_io_mem_allocator_builder();
|
||||
|
||||
match kernel::apic::init() {
|
||||
Ok(_) => {
|
||||
ioapic::init();
|
||||
@ -103,6 +107,11 @@ pub(crate) unsafe fn late_init_on_bsp() {
|
||||
|
||||
// Some driver like serial may use PIC
|
||||
kernel::pic::init();
|
||||
|
||||
// SAFETY: All the system device memory I/Os have been removed from the builder.
|
||||
unsafe {
|
||||
crate::io::init(builder);
|
||||
}
|
||||
}
|
||||
|
||||
/// Architecture-specific initialization on the application processor.
|
||||
|
186
ostd/src/io/io_mem/allocator.rs
Normal file
186
ostd/src/io/io_mem/allocator.rs
Normal file
@ -0,0 +1,186 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! I/O Memory allocator.
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use core::ops::Range;
|
||||
|
||||
use log::{debug, info};
|
||||
use spin::Once;
|
||||
|
||||
use crate::{
|
||||
io::io_mem::IoMem,
|
||||
mm::{CachePolicy, PageFlags},
|
||||
util::vaddr_alloc::VirtAddrAllocator,
|
||||
};
|
||||
|
||||
/// I/O memory allocator that allocates memory I/O access to device drivers.
|
||||
pub struct IoMemAllocator {
|
||||
allocators: Vec<VirtAddrAllocator>,
|
||||
}
|
||||
|
||||
impl IoMemAllocator {
|
||||
/// Acquires the I/O memory access for `range`.
|
||||
///
|
||||
/// If the range is not available, then the return value will be `None`.
|
||||
pub fn acquire(&self, range: Range<usize>) -> Option<IoMem> {
|
||||
find_allocator(&self.allocators, &range)?
|
||||
.alloc_specific(&range)
|
||||
.ok()?;
|
||||
|
||||
debug!("Acquiring MMIO range:{:x?}..{:x?}", range.start, range.end);
|
||||
|
||||
// SAFETY: The created `IoMem` is guaranteed not to access physical memory or system device I/O.
|
||||
unsafe { Some(IoMem::new(range, PageFlags::RW, CachePolicy::Uncacheable)) }
|
||||
}
|
||||
|
||||
/// Recycles an MMIO range.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must have ownership of the MMIO region through the `IoMemAllocator::get` interface.
|
||||
#[expect(dead_code)]
|
||||
pub(in crate::io) unsafe fn recycle(&self, range: Range<usize>) {
|
||||
let allocator = find_allocator(&self.allocators, &range).unwrap();
|
||||
|
||||
debug!("Recycling MMIO range:{:x}..{:x}", range.start, range.end);
|
||||
|
||||
allocator.free(range);
|
||||
}
|
||||
|
||||
/// Initializes usable memory I/O region.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// User must ensure the range doesn't belong to physical memory or system device I/O.
|
||||
unsafe fn new(allocators: Vec<VirtAddrAllocator>) -> Self {
|
||||
Self { allocators }
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for `IoMemAllocator`.
|
||||
///
|
||||
/// The builder must contains the memory I/O regions that don't belong to the physical memory. Also, OSTD
|
||||
/// must exclude the memory I/O regions of the system device before building the `IoMemAllocator`.
|
||||
pub(crate) struct IoMemAllocatorBuilder {
|
||||
allocators: Vec<VirtAddrAllocator>,
|
||||
}
|
||||
|
||||
impl IoMemAllocatorBuilder {
|
||||
/// Initializes memory I/O region for devices.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// User must ensure the range doesn't belong to physical memory.
|
||||
pub(crate) unsafe fn new(ranges: Vec<Range<usize>>) -> Self {
|
||||
info!(
|
||||
"Creating new I/O memory allocator builder, ranges: {:#x?}",
|
||||
ranges
|
||||
);
|
||||
let mut allocators = Vec::with_capacity(ranges.len());
|
||||
for range in ranges {
|
||||
allocators.push(VirtAddrAllocator::new(range));
|
||||
}
|
||||
Self { allocators }
|
||||
}
|
||||
|
||||
/// Removes access to a specific memory I/O range.
|
||||
///
|
||||
/// All drivers in OSTD must use this method to prevent peripheral drivers from accessing illegal memory I/O range.
|
||||
pub(crate) fn remove(&self, range: Range<usize>) {
|
||||
let Some(allocator) = find_allocator(&self.allocators, &range) else {
|
||||
panic!(
|
||||
"Allocator for the system device's MMIO was not found. Range: {:x?}",
|
||||
range
|
||||
);
|
||||
};
|
||||
|
||||
if let Err(err) = allocator.alloc_specific(&range) {
|
||||
panic!(
|
||||
"An error occurred while trying to remove access to the system device's MMIO. Range: {:x?}. Error: {:?}",
|
||||
range, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The I/O Memory allocator of the system.
|
||||
pub static IO_MEM_ALLOCATOR: Once<IoMemAllocator> = Once::new();
|
||||
|
||||
/// Initializes the static `IO_MEM_ALLOCATOR` based on builder.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// User must ensure all the memory I/O regions that belong to the system device have been removed by calling the
|
||||
/// `remove` function.
|
||||
pub(crate) unsafe fn init(builder: IoMemAllocatorBuilder) {
|
||||
IO_MEM_ALLOCATOR.call_once(|| IoMemAllocator::new(builder.allocators));
|
||||
}
|
||||
|
||||
fn find_allocator<'a>(
|
||||
allocators: &'a [VirtAddrAllocator],
|
||||
range: &Range<usize>,
|
||||
) -> Option<&'a VirtAddrAllocator> {
|
||||
for allocator in allocators.iter() {
|
||||
let allocator_range = allocator.fullrange();
|
||||
if allocator_range.start >= range.end || allocator_range.end <= range.start {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Some(allocator);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(ktest)]
|
||||
mod test {
|
||||
use alloc::vec;
|
||||
|
||||
use super::{IoMemAllocator, IoMemAllocatorBuilder};
|
||||
use crate::{mm::PAGE_SIZE, prelude::ktest};
|
||||
|
||||
#[expect(clippy::reversed_empty_ranges)]
|
||||
#[expect(clippy::single_range_in_vec_init)]
|
||||
#[ktest]
|
||||
fn illegal_region() {
|
||||
let range = vec![0x4000_0000..0x4200_0000];
|
||||
let allocator =
|
||||
unsafe { IoMemAllocator::new(IoMemAllocatorBuilder::new(range).allocators) };
|
||||
assert!(allocator.acquire(0..0).is_none());
|
||||
assert!(allocator.acquire(0x4000_0000..0x4000_0000).is_none());
|
||||
assert!(allocator.acquire(0x4000_1000..0x4000_0000).is_none());
|
||||
assert!(allocator.acquire(usize::MAX..0).is_none());
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn conflict_region() {
|
||||
let io_mem_region_a = 0x4000_0000..0x4200_0000;
|
||||
let io_mem_region_b =
|
||||
(io_mem_region_a.end + PAGE_SIZE)..(io_mem_region_a.end + 10 * PAGE_SIZE);
|
||||
let range = vec![io_mem_region_a.clone(), io_mem_region_b.clone()];
|
||||
|
||||
let allocator =
|
||||
unsafe { IoMemAllocator::new(IoMemAllocatorBuilder::new(range).allocators) };
|
||||
|
||||
assert!(allocator
|
||||
.acquire((io_mem_region_a.start - 1)..io_mem_region_a.start)
|
||||
.is_none());
|
||||
assert!(allocator
|
||||
.acquire(io_mem_region_a.start..(io_mem_region_a.start + 1))
|
||||
.is_some());
|
||||
|
||||
assert!(allocator
|
||||
.acquire((io_mem_region_a.end + 1)..(io_mem_region_b.start - 1))
|
||||
.is_none());
|
||||
assert!(allocator
|
||||
.acquire((io_mem_region_a.end - 1)..(io_mem_region_b.start + 1))
|
||||
.is_none());
|
||||
|
||||
assert!(allocator
|
||||
.acquire((io_mem_region_a.end - 1)..io_mem_region_a.end)
|
||||
.is_some());
|
||||
assert!(allocator
|
||||
.acquire(io_mem_region_a.end..(io_mem_region_a.end + 1))
|
||||
.is_none());
|
||||
}
|
||||
}
|
@ -1,11 +1,15 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! I/O memory.
|
||||
//! I/O memory and its allocator that allocates memory I/O (MMIO) to device drivers.
|
||||
|
||||
mod allocator;
|
||||
|
||||
use core::ops::{Deref, Range};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
|
||||
pub(super) use self::allocator::init;
|
||||
pub(crate) use self::allocator::IoMemAllocatorBuilder;
|
||||
use crate::{
|
||||
if_tdx_enabled,
|
||||
mm::{
|
||||
@ -35,6 +39,43 @@ impl HasPaddr for IoMem {
|
||||
}
|
||||
|
||||
impl IoMem {
|
||||
/// Acquires an `IoMem` instance for the given range.
|
||||
pub fn acquire(range: Range<Paddr>) -> Result<IoMem> {
|
||||
allocator::IO_MEM_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.acquire(range)
|
||||
.ok_or(Error::AccessDenied)
|
||||
}
|
||||
|
||||
/// Returns the physical address of the I/O memory.
|
||||
pub fn paddr(&self) -> Paddr {
|
||||
self.pa
|
||||
}
|
||||
|
||||
/// Returns the length of the I/O memory region.
|
||||
pub fn length(&self) -> usize {
|
||||
self.limit
|
||||
}
|
||||
|
||||
/// Slices the `IoMem`, returning another `IoMem` representing the subslice.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if the range is empty or out of bounds.
|
||||
pub fn slice(&self, range: Range<usize>) -> Self {
|
||||
// This ensures `range.start < range.end` and `range.end <= limit`.
|
||||
assert!(!range.is_empty() && range.end <= self.limit);
|
||||
|
||||
// We've checked the range is in bounds, so we can construct the new `IoMem` safely.
|
||||
Self {
|
||||
kvirt_area: self.kvirt_area.clone(),
|
||||
offset: self.offset + range.start,
|
||||
limit: range.len(),
|
||||
pa: self.pa + range.start,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `IoMem`.
|
||||
///
|
||||
/// # Safety
|
||||
@ -76,34 +117,6 @@ impl IoMem {
|
||||
pa: range.start,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the physical address of the I/O memory.
|
||||
pub fn paddr(&self) -> Paddr {
|
||||
self.pa
|
||||
}
|
||||
|
||||
/// Returns the length of the I/O memory region.
|
||||
pub fn length(&self) -> usize {
|
||||
self.limit
|
||||
}
|
||||
|
||||
/// Slices the `IoMem`, returning another `IoMem` representing the subslice.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if the range is empty or out of bounds.
|
||||
pub fn slice(&self, range: Range<usize>) -> Self {
|
||||
// This ensures `range.start < range.end` and `range.end <= limit`.
|
||||
assert!(!range.is_empty() && range.end <= self.limit);
|
||||
|
||||
// We've checked the range is in bounds, so we can construct the new `IoMem` safely.
|
||||
Self {
|
||||
kvirt_area: self.kvirt_area.clone(),
|
||||
offset: self.offset + range.start,
|
||||
limit: range.len(),
|
||||
pa: self.pa + range.start,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For now, we reuse `VmReader` and `VmWriter` to access I/O memory.
|
||||
@ -186,3 +199,11 @@ impl VmIoOnce for IoMem {
|
||||
self.writer().skip(offset).write_once(new_val)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for IoMem {
|
||||
fn drop(&mut self) {
|
||||
// TODO: Multiple `IoMem` instances should not overlap, we should refactor the driver code and
|
||||
// remove the `Clone` and `IoMem::slice`. After refactoring, the `Drop` can be implemented to recycle
|
||||
// the `IoMem`.
|
||||
}
|
||||
}
|
@ -10,3 +10,15 @@
|
||||
mod io_mem;
|
||||
|
||||
pub use self::io_mem::IoMem;
|
||||
pub(crate) use self::io_mem::IoMemAllocatorBuilder;
|
||||
|
||||
/// Initializes the static allocator based on builder.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// User must ensure all the memory I/O regions that belong to the system device have been removed by calling the
|
||||
/// `remove` function.
|
||||
pub(crate) unsafe fn init(builder: IoMemAllocatorBuilder) {
|
||||
self::io_mem::init(builder);
|
||||
// TODO: IoPort initialization
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user