mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 12:56:48 +00:00
Refactor kernel virtual memory allocation for kernel stack and I/O memory
This commit is contained in:
parent
29eb37c07c
commit
998869d57e
2
Makefile
2
Makefile
@ -68,8 +68,6 @@ endif
|
||||
# If the BENCHMARK is set, we will run the benchmark in the kernel mode.
|
||||
ifneq ($(BENCHMARK), none)
|
||||
CARGO_OSDK_ARGS += --init-args="/benchmark/common/bench_runner.sh $(BENCHMARK) asterinas"
|
||||
# TODO: remove this workaround after enabling kernel virtual area.
|
||||
OSTD_TASK_STACK_SIZE_IN_PAGES = 7
|
||||
endif
|
||||
|
||||
ifeq ($(INTEL_TDX), 1)
|
||||
|
@ -199,7 +199,7 @@ impl From<ostd::Error> for Error {
|
||||
ostd::Error::PageFault => Error::new(Errno::EFAULT),
|
||||
ostd::Error::Overflow => Error::new(Errno::EOVERFLOW),
|
||||
ostd::Error::MapAlreadyMappedVaddr => Error::new(Errno::EINVAL),
|
||||
ostd::Error::KvaAllocError => Error::new(Errno::ENOMEM),
|
||||
ostd::Error::KVirtAreaAllocError => Error::new(Errno::ENOMEM),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,11 @@ use log::info;
|
||||
use super::VIRTIO_MMIO_MAGIC;
|
||||
use crate::{
|
||||
io_mem::IoMem,
|
||||
mm::{paddr_to_vaddr, Paddr, VmIoOnce},
|
||||
mm::{
|
||||
paddr_to_vaddr,
|
||||
page_prop::{CachePolicy, PageFlags},
|
||||
Paddr, VmIoOnce,
|
||||
},
|
||||
trap::IrqLine,
|
||||
Error, Result,
|
||||
};
|
||||
@ -31,7 +35,13 @@ impl MmioCommonDevice {
|
||||
debug_assert_eq!(*(paddr_to_vaddr(paddr) as *const u32), VIRTIO_MMIO_MAGIC);
|
||||
}
|
||||
// SAFETY: This range is virtio-mmio device space.
|
||||
let io_mem = unsafe { IoMem::new(paddr..paddr + 0x200) };
|
||||
let io_mem = unsafe {
|
||||
IoMem::new(
|
||||
paddr..paddr + 0x200,
|
||||
PageFlags::RW,
|
||||
CachePolicy::Uncacheable,
|
||||
)
|
||||
};
|
||||
let res = Self {
|
||||
io_mem,
|
||||
irq: handle,
|
||||
|
@ -13,6 +13,7 @@ use super::PciDeviceLocation;
|
||||
use crate::{
|
||||
arch::device::io_port::{PortRead, PortWrite},
|
||||
io_mem::IoMem,
|
||||
mm::page_prop::{CachePolicy, PageFlags},
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
@ -244,7 +245,13 @@ impl MemoryBar {
|
||||
size,
|
||||
prefetchable,
|
||||
address_length,
|
||||
io_memory: unsafe { IoMem::new((base as usize)..((base + size as u64) as usize)) },
|
||||
io_memory: unsafe {
|
||||
IoMem::new(
|
||||
(base as usize)..((base + size as u64) as usize),
|
||||
PageFlags::RW,
|
||||
CachePolicy::Uncacheable,
|
||||
)
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ pub enum Error {
|
||||
/// Memory mapping already exists for the given virtual address.
|
||||
MapAlreadyMappedVaddr,
|
||||
/// Error when allocating kernel virtual memory.
|
||||
KvaAllocError,
|
||||
KVirtAreaAllocError,
|
||||
}
|
||||
|
||||
impl From<PageTableError> for Error {
|
||||
|
@ -2,12 +2,17 @@
|
||||
|
||||
//! I/O memory.
|
||||
|
||||
use core::ops::Range;
|
||||
use core::ops::{Deref, Range};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
use crate::{
|
||||
mm::{
|
||||
kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, FallibleVmRead, FallibleVmWrite,
|
||||
HasPaddr, Infallible, Paddr, PodOnce, Vaddr, VmIo, VmIoOnce, VmReader, VmWriter,
|
||||
kspace::kvirt_area::{KVirtArea, Untracked},
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||
FallibleVmRead, FallibleVmWrite, HasPaddr, Infallible, Paddr, PodOnce, VmIo, VmIoOnce,
|
||||
VmReader, VmWriter, PAGE_SIZE,
|
||||
},
|
||||
prelude::*,
|
||||
Error,
|
||||
@ -16,13 +21,16 @@ use crate::{
|
||||
/// I/O memory.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IoMem {
|
||||
virtual_address: Vaddr,
|
||||
kvirt_area: Arc<KVirtArea<Untracked>>,
|
||||
// The actually used range for MMIO is `kvirt_area.start + offset..kvirt_area.start + offset + limit`
|
||||
offset: usize,
|
||||
limit: usize,
|
||||
pa: Paddr,
|
||||
}
|
||||
|
||||
impl HasPaddr for IoMem {
|
||||
fn paddr(&self) -> Paddr {
|
||||
self.virtual_address - LINEAR_MAPPING_BASE_VADDR
|
||||
self.pa
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,16 +42,50 @@ impl IoMem {
|
||||
/// - The given physical address range must be in the I/O memory region.
|
||||
/// - Reading from or writing to I/O memory regions may have side effects. Those side effects
|
||||
/// must not cause soundness problems (e.g., they must not corrupt the kernel memory).
|
||||
pub(crate) unsafe fn new(range: Range<Paddr>) -> Self {
|
||||
pub(crate) unsafe fn new(range: Range<Paddr>, flags: PageFlags, cache: CachePolicy) -> Self {
|
||||
let first_page_start = range.start.align_down(PAGE_SIZE);
|
||||
let last_page_end = range.end.align_up(PAGE_SIZE);
|
||||
let mut new_kvirt_area = KVirtArea::<Untracked>::new(last_page_end - first_page_start);
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(all(feature = "cvm_guest", target_arch = "x86_64"))] {
|
||||
let priv_flags = if tdx_guest::tdx_is_enabled() {
|
||||
PrivilegedPageFlags::SHARED
|
||||
} else {
|
||||
PrivilegedPageFlags::empty()
|
||||
};
|
||||
} else {
|
||||
let priv_flags = PrivilegedPageFlags::empty();
|
||||
}
|
||||
}
|
||||
|
||||
let prop = PageProperty {
|
||||
flags,
|
||||
cache,
|
||||
priv_flags,
|
||||
};
|
||||
|
||||
// SAFETY: The caller of `IoMem::new()` and the constructor of `new_kvirt_area` has ensured the
|
||||
// safety of this mapping.
|
||||
unsafe {
|
||||
new_kvirt_area.map_untracked_pages(
|
||||
new_kvirt_area.range(),
|
||||
first_page_start..last_page_end,
|
||||
prop,
|
||||
);
|
||||
}
|
||||
|
||||
Self {
|
||||
virtual_address: paddr_to_vaddr(range.start),
|
||||
kvirt_area: Arc::new(new_kvirt_area),
|
||||
offset: range.start - first_page_start,
|
||||
limit: range.len(),
|
||||
pa: range.start,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the physical address of the I/O memory.
|
||||
pub fn paddr(&self) -> Paddr {
|
||||
self.virtual_address - LINEAR_MAPPING_BASE_VADDR
|
||||
self.pa
|
||||
}
|
||||
|
||||
/// Returns the length of the I/O memory region.
|
||||
@ -62,8 +104,10 @@ impl IoMem {
|
||||
|
||||
// We've checked the range is in bounds, so we can construct the new `IoMem` safely.
|
||||
Self {
|
||||
virtual_address: self.virtual_address + range.start,
|
||||
limit: range.end - range.start,
|
||||
kvirt_area: self.kvirt_area.clone(),
|
||||
offset: self.offset + range.start,
|
||||
limit: range.len(),
|
||||
pa: self.pa + range.start,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -74,24 +118,38 @@ impl IoMem {
|
||||
// "memory", but rather I/O ports that communicate directly with the hardware. However, this code
|
||||
// is in OSTD, so we can rely on the implementation details of `VmReader` and `VmWriter`, which we
|
||||
// know are also suitable for accessing I/O memory.
|
||||
|
||||
impl IoMem {
|
||||
fn reader(&self) -> VmReader<'_, Infallible> {
|
||||
// SAFETY: The safety conditions of `IoMem::new` guarantee we can read from the I/O memory
|
||||
// safely.
|
||||
unsafe { VmReader::from_kernel_space(self.virtual_address as *mut u8, self.limit) }
|
||||
// SAFETY: The constructor of the `IoMem` structure has already ensured the
|
||||
// safety of reading from the mapped physical address, and the mapping is valid.
|
||||
unsafe {
|
||||
VmReader::from_kernel_space(
|
||||
self.kvirt_area.deref().start() as *mut u8,
|
||||
self.kvirt_area.deref().len(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn writer(&self) -> VmWriter<'_, Infallible> {
|
||||
// SAFETY: The safety conditions of `IoMem::new` guarantee we can read from the I/O memory
|
||||
// safely.
|
||||
unsafe { VmWriter::from_kernel_space(self.virtual_address as *mut u8, self.limit) }
|
||||
// SAFETY: The constructor of the `IoMem` structure has already ensured the
|
||||
// safety of writing to the mapped physical address, and the mapping is valid.
|
||||
unsafe {
|
||||
VmWriter::from_kernel_space(
|
||||
self.kvirt_area.deref().start() as *mut u8,
|
||||
self.kvirt_area.deref().len(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VmIo for IoMem {
|
||||
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
||||
let offset = offset + self.offset;
|
||||
if self
|
||||
.limit
|
||||
.kvirt_area
|
||||
.deref()
|
||||
.len()
|
||||
.checked_sub(offset)
|
||||
.is_none_or(|remain| remain < writer.avail())
|
||||
{
|
||||
@ -108,10 +166,13 @@ impl VmIo for IoMem {
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
||||
let offset = offset + self.offset;
|
||||
if self
|
||||
.limit
|
||||
.kvirt_area
|
||||
.deref()
|
||||
.len()
|
||||
.checked_sub(offset)
|
||||
.is_none_or(|avail| avail < reader.remain())
|
||||
.is_none_or(|remain| remain < reader.remain())
|
||||
{
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
@ -128,10 +189,10 @@ impl VmIo for IoMem {
|
||||
|
||||
impl VmIoOnce for IoMem {
|
||||
fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T> {
|
||||
self.reader().skip(offset).read_once()
|
||||
self.reader().skip(offset + self.offset).read_once()
|
||||
}
|
||||
|
||||
fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()> {
|
||||
self.writer().skip(offset).write_once(new_val)
|
||||
self.writer().skip(offset + self.offset).write_once(new_val)
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
//! The standard library for Asterinas and other Rust OSes.
|
||||
#![feature(alloc_error_handler)]
|
||||
#![feature(allocator_api)]
|
||||
#![feature(btree_cursors)]
|
||||
#![feature(const_ptr_sub_ptr)]
|
||||
#![feature(const_trait_impl)]
|
||||
#![feature(core_intrinsics)]
|
||||
@ -90,13 +91,13 @@ unsafe fn init() {
|
||||
|
||||
smp::init();
|
||||
|
||||
bus::init();
|
||||
|
||||
// SAFETY: This function is called only once on the BSP.
|
||||
unsafe {
|
||||
mm::kspace::activate_kernel_page_table();
|
||||
}
|
||||
|
||||
bus::init();
|
||||
|
||||
arch::irq::enable_local();
|
||||
|
||||
invoke_ffi_init_funcs();
|
||||
|
@ -1,213 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! Kernel virtual memory allocation
|
||||
|
||||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
use core::ops::{DerefMut, Range};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
|
||||
use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE};
|
||||
use crate::{
|
||||
arch::mm::tlb_flush_addr_range,
|
||||
mm::{
|
||||
page::{
|
||||
meta::{PageMeta, PageUsage},
|
||||
Page,
|
||||
},
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||
page_table::PageTableItem,
|
||||
Vaddr, PAGE_SIZE,
|
||||
},
|
||||
sync::SpinLock,
|
||||
Error, Result,
|
||||
};
|
||||
pub(crate) use lazy_static::lazy_static;
|
||||
|
||||
pub struct KvaFreeNode {
|
||||
block: Range<Vaddr>,
|
||||
}
|
||||
|
||||
impl KvaFreeNode {
|
||||
pub(crate) const fn new(range: Range<Vaddr>) -> Self {
|
||||
Self { block: range }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct VirtAddrAllocator {
|
||||
freelist: BTreeMap<Vaddr, KvaFreeNode>,
|
||||
}
|
||||
|
||||
impl VirtAddrAllocator {
|
||||
fn new(range: Range<Vaddr>) -> Self {
|
||||
let mut freelist:BTreeMap<Vaddr, KvaFreeNode> = BTreeMap::new();
|
||||
freelist.insert(range.start, KvaFreeNode::new(range));
|
||||
Self { freelist }
|
||||
}
|
||||
/// Allocate a kernel virtual area.
|
||||
///
|
||||
/// This is currently implemented with a simple FIRST-FIT algorithm.
|
||||
fn alloc(&mut self, size: usize) -> Result<Range<Vaddr>> {
|
||||
let mut allocate_range = None;
|
||||
let mut to_remove = None;
|
||||
|
||||
for (key, value) in self.freelist.iter() {
|
||||
if value.block.end - value.block.start >= size {
|
||||
allocate_range = Some((value.block.end - size)..value.block.end);
|
||||
to_remove = Some(*key);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(key) = to_remove {
|
||||
if let Some(freenode) = self.freelist.get_mut(&key) {
|
||||
if freenode.block.end - size == freenode.block.start {
|
||||
self.freelist.remove(&key);
|
||||
} else {
|
||||
freenode.block.end -= size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(range) = allocate_range {
|
||||
Ok(range)
|
||||
} else {
|
||||
Err(Error::KvaAllocError)
|
||||
}
|
||||
}
|
||||
|
||||
/// Free a kernel virtual area.
|
||||
fn free(&mut self, range: Range<Vaddr>) {
|
||||
// 1. get the previous free block, check if we can merge this block with the free one
|
||||
// - if contiguous, merge this area with the free block.
|
||||
// - if not contiguous, create a new free block, insert it into the list.
|
||||
// 2. check if we can merge the current block with the next block, if we can, do so.
|
||||
self.freelist.insert(range.start, KvaFreeNode::new(range));
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref KVA_ALLOCATOR: SpinLock<VirtAddrAllocator> = SpinLock::new(VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE));
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Kva(Range<Vaddr>);
|
||||
|
||||
impl Kva {
|
||||
// static KVA_FREELIST_2: SpinLock<BTreeMap<Vaddr, KvaFreeNode>> = SpinLock::new(BTreeMap::new());
|
||||
|
||||
pub fn new(size: usize) -> Self {
|
||||
let mut lock_guard = KVA_ALLOCATOR.lock();
|
||||
let var = lock_guard.deref_mut().alloc(size).unwrap();
|
||||
Kva(var)
|
||||
}
|
||||
|
||||
pub fn start(&self) -> Vaddr {
|
||||
self.0.start
|
||||
}
|
||||
|
||||
pub fn end(&self) -> Vaddr {
|
||||
self.0.end
|
||||
}
|
||||
|
||||
pub fn range(&self) -> Range<Vaddr> {
|
||||
self.0.start..self.0.end
|
||||
}
|
||||
|
||||
/// Map pages into the kernel virtual area.
|
||||
/// # Safety
|
||||
/// The caller should ensure either the mapped pages or the range to be used doesn't
|
||||
/// violate the memory safety of kernel objects.
|
||||
pub unsafe fn map_pages<T: PageMeta>(&mut self, range: Range<Vaddr>, pages: Vec<Page<T>>) {
|
||||
assert!(range.len() == pages.len() * PAGE_SIZE);
|
||||
assert!(self.start() <= range.start && self.end() >= range.end);
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let prop = PageProperty {
|
||||
flags: PageFlags::RW,
|
||||
cache: CachePolicy::Writeback,
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
let mut cursor = page_table.cursor_mut(&range).unwrap();
|
||||
for page in pages.into_iter() {
|
||||
cursor.map(page.into(), prop);
|
||||
}
|
||||
tlb_flush_addr_range(&range);
|
||||
}
|
||||
|
||||
/// This function returns the page usage type based on the provided virtual address `addr`.
|
||||
/// This function will fail in the following cases:
|
||||
/// * If the address is not mapped (`NotMapped`), the function will fail.
|
||||
/// * If the address is mapped to a `MappedUntracked` page, the function will fail.
|
||||
pub fn get_page_type(&self, addr: Vaddr) -> PageUsage {
|
||||
assert!(self.start() <= addr && self.end() >= addr);
|
||||
let start = addr.align_down(PAGE_SIZE);
|
||||
let vaddr = start..start + PAGE_SIZE;
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let mut cursor = page_table.cursor(&vaddr).unwrap();
|
||||
let query_result = cursor.query().unwrap();
|
||||
match query_result {
|
||||
PageTableItem::Mapped {
|
||||
va: _,
|
||||
page,
|
||||
prop: _,
|
||||
} => page.usage(),
|
||||
_ => {
|
||||
panic!(
|
||||
"Unexpected query result: Expected 'Mapped', found '{:?}'",
|
||||
query_result
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the mapped page.
|
||||
/// This function will fail in the following cases:
|
||||
/// * if the provided page type doesn't match the actual mapped one.
|
||||
/// * If the address is not mapped (`NotMapped`), the function will fail.
|
||||
/// * If the address is mapped to a `MappedUntracked` page, the function will fail.
|
||||
pub fn get_page<T: PageMeta>(&self, addr: Vaddr) -> Result<Page<T>> {
|
||||
assert!(self.start() <= addr && self.end() >= addr);
|
||||
let start = addr.align_down(PAGE_SIZE);
|
||||
let vaddr = start..start + PAGE_SIZE;
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let mut cursor = page_table.cursor(&vaddr).unwrap();
|
||||
let query_result = cursor.query().unwrap();
|
||||
match query_result {
|
||||
PageTableItem::Mapped {
|
||||
va: _,
|
||||
page,
|
||||
prop: _,
|
||||
} => {
|
||||
let result = Page::<T>::try_from(page);
|
||||
if let Ok(page) = result {
|
||||
Ok(page)
|
||||
} else {
|
||||
panic!("the provided page type doesn't match the actual mapped one");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
panic!(
|
||||
"Unexpected query result: Expected 'Mapped', found '{:?}'",
|
||||
query_result
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Kva {
|
||||
fn drop(&mut self) {
|
||||
// 1. unmap all mapped pages.
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let range = self.start()..self.end();
|
||||
let mut cursor = page_table.cursor_mut(&range).unwrap();
|
||||
unsafe {
|
||||
cursor.unmap(range.len());
|
||||
}
|
||||
tlb_flush_addr_range(&range);
|
||||
// 2. free the virtual block
|
||||
let mut lock_guard = KVA_ALLOCATOR.lock();
|
||||
lock_guard.deref_mut().free(range);
|
||||
}
|
||||
}
|
368
ostd/src/mm/kspace/kvirt_area.rs
Normal file
368
ostd/src/mm/kspace/kvirt_area.rs
Normal file
@ -0,0 +1,368 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! Kernel virtual memory allocation
|
||||
|
||||
use alloc::collections::BTreeMap;
|
||||
use core::{any::TypeId, marker::PhantomData, ops::Range};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
|
||||
use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE};
|
||||
use crate::{
|
||||
cpu::CpuSet,
|
||||
mm::{
|
||||
page::{meta::PageMeta, DynPage, Page},
|
||||
page_prop::PageProperty,
|
||||
page_table::PageTableItem,
|
||||
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
|
||||
Paddr, Vaddr, PAGE_SIZE,
|
||||
},
|
||||
sync::SpinLock,
|
||||
task::disable_preempt,
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
pub struct KVirtAreaFreeNode {
|
||||
block: Range<Vaddr>,
|
||||
}
|
||||
|
||||
impl KVirtAreaFreeNode {
|
||||
pub(crate) const fn new(range: Range<Vaddr>) -> Self {
|
||||
Self { block: range }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct VirtAddrAllocator {
|
||||
fullrange: Range<Vaddr>,
|
||||
freelist: SpinLock<Option<BTreeMap<Vaddr, KVirtAreaFreeNode>>>,
|
||||
}
|
||||
|
||||
impl VirtAddrAllocator {
|
||||
const fn new(fullrange: Range<Vaddr>) -> Self {
|
||||
Self {
|
||||
fullrange,
|
||||
freelist: SpinLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a kernel virtual area.
|
||||
///
|
||||
/// This is currently implemented with a simple FIRST-FIT algorithm.
|
||||
fn alloc(&self, size: usize) -> Result<Range<Vaddr>> {
|
||||
let mut lock_guard = self.freelist.lock();
|
||||
if lock_guard.is_none() {
|
||||
let mut freelist: BTreeMap<Vaddr, KVirtAreaFreeNode> = BTreeMap::new();
|
||||
freelist.insert(
|
||||
self.fullrange.start,
|
||||
KVirtAreaFreeNode::new(self.fullrange.clone()),
|
||||
);
|
||||
*lock_guard = Some(freelist);
|
||||
}
|
||||
let freelist = lock_guard.as_mut().unwrap();
|
||||
let mut allocate_range = None;
|
||||
let mut to_remove = None;
|
||||
|
||||
for (key, value) in freelist.iter() {
|
||||
if value.block.end - value.block.start >= size {
|
||||
allocate_range = Some((value.block.end - size)..value.block.end);
|
||||
to_remove = Some(*key);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(key) = to_remove {
|
||||
if let Some(freenode) = freelist.get_mut(&key) {
|
||||
if freenode.block.end - size == freenode.block.start {
|
||||
freelist.remove(&key);
|
||||
} else {
|
||||
freenode.block.end -= size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(range) = allocate_range {
|
||||
Ok(range)
|
||||
} else {
|
||||
Err(Error::KVirtAreaAllocError)
|
||||
}
|
||||
}
|
||||
|
||||
/// Frees a kernel virtual area.
|
||||
fn free(&self, range: Range<Vaddr>) {
|
||||
let mut lock_guard = self.freelist.lock();
|
||||
let freelist = lock_guard.as_mut().unwrap_or_else(|| {
|
||||
panic!("Free a 'KVirtArea' when 'VirtAddrAllocator' has not been initialized.")
|
||||
});
|
||||
// 1. get the previous free block, check if we can merge this block with the free one
|
||||
// - if contiguous, merge this area with the free block.
|
||||
// - if not contiguous, create a new free block, insert it into the list.
|
||||
let mut free_range = range.clone();
|
||||
|
||||
if let Some((prev_va, prev_node)) = freelist
|
||||
.upper_bound_mut(core::ops::Bound::Excluded(&free_range.start))
|
||||
.peek_prev()
|
||||
{
|
||||
if prev_node.block.end == free_range.start {
|
||||
let prev_va = *prev_va;
|
||||
free_range.start = prev_node.block.start;
|
||||
freelist.remove(&prev_va);
|
||||
}
|
||||
}
|
||||
freelist.insert(free_range.start, KVirtAreaFreeNode::new(free_range.clone()));
|
||||
|
||||
// 2. check if we can merge the current block with the next block, if we can, do so.
|
||||
if let Some((next_va, next_node)) = freelist
|
||||
.lower_bound_mut(core::ops::Bound::Excluded(&free_range.start))
|
||||
.peek_next()
|
||||
{
|
||||
if free_range.end == next_node.block.start {
|
||||
let next_va = *next_va;
|
||||
free_range.end = next_node.block.end;
|
||||
freelist.remove(&next_va);
|
||||
freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static KVIRT_AREA_TRACKED_ALLOCATOR: VirtAddrAllocator =
|
||||
VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE);
|
||||
static KVIRT_AREA_UNTRACKED_ALLOCATOR: VirtAddrAllocator =
|
||||
VirtAddrAllocator::new(VMALLOC_VADDR_RANGE);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Tracked;
|
||||
#[derive(Debug)]
|
||||
pub struct Untracked;
|
||||
|
||||
pub trait AllocatorSelector {
|
||||
fn select_allocator() -> &'static VirtAddrAllocator;
|
||||
}
|
||||
|
||||
impl AllocatorSelector for Tracked {
|
||||
fn select_allocator() -> &'static VirtAddrAllocator {
|
||||
&KVIRT_AREA_TRACKED_ALLOCATOR
|
||||
}
|
||||
}
|
||||
|
||||
impl AllocatorSelector for Untracked {
|
||||
fn select_allocator() -> &'static VirtAddrAllocator {
|
||||
&KVIRT_AREA_UNTRACKED_ALLOCATOR
|
||||
}
|
||||
}
|
||||
|
||||
/// Kernel Virtual Area.
|
||||
///
|
||||
/// A tracked kernel virtual area (`KVirtArea<Tracked>`) manages a range of memory in
|
||||
/// `TRACKED_MAPPED_PAGES_RANGE`. It can map a inner part or all of its virtual memory
|
||||
/// to some physical tracked pages.
|
||||
///
|
||||
/// A untracked kernel virtual area (`KVirtArea<Untracked>`) manages a range of memory in
|
||||
/// `VMALLOC_VADDR_RANGE`. It can map a inner part or all of its virtual memory to
|
||||
/// some physical untracked pages.
|
||||
#[derive(Debug)]
|
||||
pub struct KVirtArea<M: AllocatorSelector + 'static> {
|
||||
range: Range<Vaddr>,
|
||||
phantom: PhantomData<M>,
|
||||
}
|
||||
|
||||
impl<M: AllocatorSelector + 'static> KVirtArea<M> {
|
||||
pub fn new(size: usize) -> Self {
|
||||
let allocator = M::select_allocator();
|
||||
let range = allocator.alloc(size).unwrap();
|
||||
Self {
|
||||
range,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start(&self) -> Vaddr {
|
||||
self.range.start
|
||||
}
|
||||
|
||||
pub fn end(&self) -> Vaddr {
|
||||
self.range.end
|
||||
}
|
||||
|
||||
pub fn range(&self) -> Range<Vaddr> {
|
||||
self.range.start..self.range.end
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.range.len()
|
||||
}
|
||||
|
||||
fn query_page(&self, addr: Vaddr) -> PageTableItem {
|
||||
assert!(self.start() <= addr && self.end() >= addr);
|
||||
let start = addr.align_down(PAGE_SIZE);
|
||||
let vaddr = start..start + PAGE_SIZE;
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let mut cursor = page_table.cursor(&vaddr).unwrap();
|
||||
cursor.query().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl KVirtArea<Tracked> {
|
||||
/// Maps pages into the kernel virtual area.
|
||||
pub fn map_pages<T: PageMeta>(
|
||||
&mut self,
|
||||
range: Range<Vaddr>,
|
||||
pages: impl Iterator<Item = Page<T>>,
|
||||
prop: PageProperty,
|
||||
) {
|
||||
assert!(self.start() <= range.start && self.end() >= range.end);
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let mut cursor = page_table.cursor_mut(&range).unwrap();
|
||||
let flusher = TlbFlusher::new(CpuSet::new_full(), disable_preempt());
|
||||
let mut va = self.start();
|
||||
for page in pages.into_iter() {
|
||||
// SAFETY: The constructor of the `KVirtArea<Tracked>` structure has already ensured this
|
||||
// mapping does not affect kernel's memory safety.
|
||||
if let Some(old) = unsafe { cursor.map(page.into(), prop) } {
|
||||
flusher.issue_tlb_flush_with(TlbFlushOp::Address(va), old);
|
||||
flusher.dispatch_tlb_flush();
|
||||
}
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
flusher.issue_tlb_flush(TlbFlushOp::Range(range));
|
||||
flusher.dispatch_tlb_flush();
|
||||
}
|
||||
|
||||
/// Gets the mapped tracked page.
|
||||
///
|
||||
/// This function returns None if the address is not mapped (`NotMapped`),
|
||||
/// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page.
|
||||
pub fn get_page(&self, addr: Vaddr) -> Option<DynPage> {
|
||||
let query_result = self.query_page(addr);
|
||||
match query_result {
|
||||
PageTableItem::Mapped {
|
||||
va: _,
|
||||
page,
|
||||
prop: _,
|
||||
} => Some(page),
|
||||
PageTableItem::NotMapped { .. } => None,
|
||||
_ => {
|
||||
panic!(
|
||||
"Found '{:?}' mapped into tracked `KVirtArea`, expected `Mapped`",
|
||||
query_result
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl KVirtArea<Untracked> {
|
||||
/// Maps untracked pages into the kernel virtual area.
|
||||
///
|
||||
/// `pa_range.start` and `pa_range.end` should be aligned to PAGE_SIZE.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that
|
||||
/// - the range being mapped does not affect kernel's memory safety;
|
||||
/// - the physical address to be mapped is valid and safe to use;
|
||||
/// - it is allowed to map untracked pages in this virtual address range.
|
||||
pub unsafe fn map_untracked_pages(
|
||||
&mut self,
|
||||
va_range: Range<Vaddr>,
|
||||
pa_range: Range<Paddr>,
|
||||
prop: PageProperty,
|
||||
) {
|
||||
assert!(pa_range.start % PAGE_SIZE == 0);
|
||||
assert!(pa_range.end % PAGE_SIZE == 0);
|
||||
assert!(va_range.len() == pa_range.len());
|
||||
assert!(self.start() <= va_range.start && self.end() >= va_range.end);
|
||||
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let mut cursor = page_table.cursor_mut(&va_range).unwrap();
|
||||
let flusher = TlbFlusher::new(CpuSet::new_full(), disable_preempt());
|
||||
// SAFETY: The caller of `map_untracked_pages` has ensured the safety of this mapping.
|
||||
unsafe {
|
||||
cursor.map_pa(&pa_range, prop);
|
||||
}
|
||||
flusher.issue_tlb_flush(TlbFlushOp::Range(va_range.clone()));
|
||||
flusher.dispatch_tlb_flush();
|
||||
}
|
||||
|
||||
/// Gets the mapped untracked page.
|
||||
///
|
||||
/// This function returns None if the address is not mapped (`NotMapped`),
|
||||
/// while panics if the address is mapped to a `Mapped` or `PageTableNode` page.
|
||||
pub fn get_untracked_page(&self, addr: Vaddr) -> Option<(Paddr, usize)> {
|
||||
let query_result = self.query_page(addr);
|
||||
match query_result {
|
||||
PageTableItem::MappedUntracked {
|
||||
va: _,
|
||||
pa,
|
||||
len,
|
||||
prop: _,
|
||||
} => Some((pa, len)),
|
||||
PageTableItem::NotMapped { .. } => None,
|
||||
_ => {
|
||||
panic!(
|
||||
"Found '{:?}' mapped into untracked `KVirtArea`, expected `MappedUntracked`",
|
||||
query_result
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: AllocatorSelector + 'static> Drop for KVirtArea<M> {
|
||||
fn drop(&mut self) {
|
||||
// 1. unmap all mapped pages.
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let range = self.start()..self.end();
|
||||
let mut cursor = page_table.cursor_mut(&range).unwrap();
|
||||
let flusher = TlbFlusher::new(CpuSet::new_full(), disable_preempt());
|
||||
let tlb_prefer_flush_all = self.end() - self.start() > FLUSH_ALL_RANGE_THRESHOLD;
|
||||
|
||||
loop {
|
||||
let result = unsafe { cursor.take_next(self.end() - cursor.virt_addr()) };
|
||||
match result {
|
||||
PageTableItem::Mapped { va, page, .. } => match TypeId::of::<M>() {
|
||||
id if id == TypeId::of::<Tracked>() => {
|
||||
if !flusher.need_remote_flush() && tlb_prefer_flush_all {
|
||||
// Only on single-CPU cases we can drop the page immediately before flushing.
|
||||
drop(page);
|
||||
continue;
|
||||
}
|
||||
flusher.issue_tlb_flush_with(TlbFlushOp::Address(va), page);
|
||||
}
|
||||
id if id == TypeId::of::<Untracked>() => {
|
||||
panic!("Found tracked memory mapped into untracked `KVirtArea`");
|
||||
}
|
||||
_ => panic!("Unexpected `KVirtArea` type"),
|
||||
},
|
||||
PageTableItem::MappedUntracked { va, .. } => match TypeId::of::<M>() {
|
||||
id if id == TypeId::of::<Untracked>() => {
|
||||
if !flusher.need_remote_flush() && tlb_prefer_flush_all {
|
||||
continue;
|
||||
}
|
||||
flusher.issue_tlb_flush(TlbFlushOp::Address(va));
|
||||
}
|
||||
id if id == TypeId::of::<Tracked>() => {
|
||||
panic!("Found untracked memory mapped into tracked `KVirtArea`");
|
||||
}
|
||||
_ => panic!("Unexpected `KVirtArea` type"),
|
||||
},
|
||||
PageTableItem::NotMapped { .. } => {
|
||||
break;
|
||||
}
|
||||
PageTableItem::PageTableNode { .. } => {
|
||||
panic!("Found page table node in `KVirtArea`");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !flusher.need_remote_flush() && tlb_prefer_flush_all {
|
||||
flusher.issue_tlb_flush(TlbFlushOp::All);
|
||||
}
|
||||
|
||||
flusher.dispatch_tlb_flush();
|
||||
|
||||
// 2. free the virtual block
|
||||
let allocator = M::select_allocator();
|
||||
allocator.free(range);
|
||||
}
|
||||
}
|
@ -20,9 +20,9 @@
|
||||
//! +-+ <- 0xffff_e100_0000_0000
|
||||
//! | | For frame metadata, 1 TiB. Mapped frames are untracked.
|
||||
//! +-+ <- 0xffff_e000_0000_0000
|
||||
//! | | For [`kva::Kva`], 16 TiB. Mapped pages are tracked with handles.
|
||||
//! | | For [`KVirtArea<Tracked>`], 16 TiB. Mapped pages are tracked with handles.
|
||||
//! +-+ <- 0xffff_d000_0000_0000
|
||||
//! | | For [`kva::Kva`], 16 TiB. Mapped pages are untracked.
|
||||
//! | | For [`KVirtArea<Untracked>`], 16 TiB. Mapped pages are untracked.
|
||||
//! +-+ <- the middle of the higher half (0xffff_c000_0000_0000)
|
||||
//! | |
|
||||
//! | |
|
||||
@ -38,7 +38,7 @@
|
||||
//! If the address width is (according to [`crate::arch::mm::PagingConsts`])
|
||||
//! 39 bits or 57 bits, the memory space just adjust proportionally.
|
||||
|
||||
pub(crate) mod kva;
|
||||
pub(crate) mod kvirt_area;
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use core::ops::Range;
|
||||
@ -114,7 +114,7 @@ pub fn paddr_to_vaddr(pa: Paddr) -> usize {
|
||||
///
|
||||
/// About what is tracked mapping, see [`crate::mm::page::meta::MapTrackingStatus`].
|
||||
pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool {
|
||||
!LINEAR_MAPPING_VADDR_RANGE.contains(&addr)
|
||||
!(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr))
|
||||
}
|
||||
|
||||
/// The kernel page table instance.
|
||||
|
@ -2,8 +2,9 @@
|
||||
|
||||
use crate::{
|
||||
mm::{
|
||||
kspace::kva::Kva,
|
||||
kspace::kvirt_area::{KVirtArea, Tracked},
|
||||
page::{allocator, meta::KernelStackMeta},
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||
PAGE_SIZE,
|
||||
},
|
||||
prelude::*,
|
||||
@ -26,25 +27,30 @@ pub const DEFAULT_STACK_SIZE_IN_PAGES: u32 = 128;
|
||||
pub static KERNEL_STACK_SIZE: usize = STACK_SIZE_IN_PAGES as usize * PAGE_SIZE;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
pub struct KernelStack {
|
||||
kva: Kva,
|
||||
kvirt_area: KVirtArea<Tracked>,
|
||||
end_vaddr: Vaddr,
|
||||
has_guard_page: bool,
|
||||
}
|
||||
|
||||
impl KernelStack {
|
||||
/// Generates a kernel stack with a guard page.
|
||||
/// An additional page is allocated and be regarded as a guard page, which should not be accessed.
|
||||
/// Generates a kernel stack with guard pages.
|
||||
/// 4 additional pages are allocated and regarded as guard pages, which should not be accessed.
|
||||
pub fn new_with_guard_page() -> Result<Self> {
|
||||
let mut new_kva = Kva::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE);
|
||||
let mapped_start = new_kva.range().start + 2 * PAGE_SIZE;
|
||||
let mut new_kvirt_area = KVirtArea::<Tracked>::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE);
|
||||
let mapped_start = new_kvirt_area.range().start + 2 * PAGE_SIZE;
|
||||
let mapped_end = mapped_start + KERNEL_STACK_SIZE;
|
||||
let pages = allocator::alloc(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap();
|
||||
unsafe {
|
||||
new_kva.map_pages(mapped_start..mapped_end, pages);
|
||||
}
|
||||
let prop = PageProperty {
|
||||
flags: PageFlags::RW,
|
||||
cache: CachePolicy::Writeback,
|
||||
priv_flags: PrivilegedPageFlags::empty(),
|
||||
};
|
||||
new_kvirt_area.map_pages(mapped_start..mapped_end, pages.iter().cloned(), prop);
|
||||
|
||||
Ok(Self {
|
||||
kva: new_kva,
|
||||
kvirt_area: new_kvirt_area,
|
||||
end_vaddr: mapped_end,
|
||||
has_guard_page: true,
|
||||
})
|
||||
|
Loading…
x
Reference in New Issue
Block a user