diff --git a/kernel/src/error.rs b/kernel/src/error.rs index 204af7261..21c6283cd 100644 --- a/kernel/src/error.rs +++ b/kernel/src/error.rs @@ -199,6 +199,7 @@ impl From for Error { ostd::Error::PageFault => Error::new(Errno::EFAULT), ostd::Error::Overflow => Error::new(Errno::EOVERFLOW), ostd::Error::MapAlreadyMappedVaddr => Error::new(Errno::EINVAL), + ostd::Error::KvaAllocError => Error::new(Errno::ENOMEM), } } } diff --git a/ostd/src/error.rs b/ostd/src/error.rs index 01d10a0e6..188aa7830 100644 --- a/ostd/src/error.rs +++ b/ostd/src/error.rs @@ -21,6 +21,8 @@ pub enum Error { Overflow, /// Memory mapping already exists for the given virtual address. MapAlreadyMappedVaddr, + /// Error when allocating kernel virtual memory. + KvaAllocError, } impl From for Error { diff --git a/ostd/src/mm/kspace/kva.rs b/ostd/src/mm/kspace/kva.rs new file mode 100644 index 000000000..3e6fee57b --- /dev/null +++ b/ostd/src/mm/kspace/kva.rs @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Kernel virtual memory allocation + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::ops::{DerefMut, Range}; + +use align_ext::AlignExt; + +use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE}; +use crate::{ + arch::mm::tlb_flush_addr_range, + mm::{ + page::{ + meta::{PageMeta, PageUsage}, + Page, + }, + page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, + page_table::PageTableItem, + Vaddr, PAGE_SIZE, + }, + sync::SpinLock, + Error, Result, +}; +pub(crate) use lazy_static::lazy_static; + +pub struct KvaFreeNode { + block: Range, +} + +impl KvaFreeNode { + pub(crate) const fn new(range: Range) -> Self { + Self { block: range } + } +} + +pub struct VirtAddrAllocator { + freelist: BTreeMap, +} + +impl VirtAddrAllocator { + fn new(range: Range) -> Self { + let mut freelist:BTreeMap = BTreeMap::new(); + freelist.insert(range.start, KvaFreeNode::new(range)); + Self { freelist } + } + /// Allocate a kernel virtual area. + /// + /// This is currently implemented with a simple FIRST-FIT algorithm. + fn alloc(&mut self, size: usize) -> Result> { + let mut allocate_range = None; + let mut to_remove = None; + + for (key, value) in self.freelist.iter() { + if value.block.end - value.block.start >= size { + allocate_range = Some((value.block.end - size)..value.block.end); + to_remove = Some(*key); + break; + } + } + + if let Some(key) = to_remove { + if let Some(freenode) = self.freelist.get_mut(&key) { + if freenode.block.end - size == freenode.block.start { + self.freelist.remove(&key); + } else { + freenode.block.end -= size; + } + } + } + + if let Some(range) = allocate_range { + Ok(range) + } else { + Err(Error::KvaAllocError) + } + } + + /// Free a kernel virtual area. + fn free(&mut self, range: Range) { + // 1. get the previous free block, check if we can merge this block with the free one + // - if contiguous, merge this area with the free block. + // - if not contiguous, create a new free block, insert it into the list. + // 2. check if we can merge the current block with the next block, if we can, do so. + self.freelist.insert(range.start, KvaFreeNode::new(range)); + todo!(); + } +} + +lazy_static! { + pub static ref KVA_ALLOCATOR: SpinLock = SpinLock::new(VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE)); +} + +#[derive(Debug)] +pub struct Kva(Range); + +impl Kva { + // static KVA_FREELIST_2: SpinLock> = SpinLock::new(BTreeMap::new()); + + pub fn new(size: usize) -> Self { + let mut lock_guard = KVA_ALLOCATOR.lock(); + let var = lock_guard.deref_mut().alloc(size).unwrap(); + Kva(var) + } + + pub fn start(&self) -> Vaddr { + self.0.start + } + + pub fn end(&self) -> Vaddr { + self.0.end + } + + pub fn range(&self) -> Range { + self.0.start..self.0.end + } + + /// Map pages into the kernel virtual area. + /// # Safety + /// The caller should ensure either the mapped pages or the range to be used doesn't + /// violate the memory safety of kernel objects. + pub unsafe fn map_pages(&mut self, range: Range, pages: Vec>) { + assert!(range.len() == pages.len() * PAGE_SIZE); + assert!(self.start() <= range.start && self.end() >= range.end); + let page_table = KERNEL_PAGE_TABLE.get().unwrap(); + let prop = PageProperty { + flags: PageFlags::RW, + cache: CachePolicy::Writeback, + priv_flags: PrivilegedPageFlags::GLOBAL, + }; + let mut cursor = page_table.cursor_mut(&range).unwrap(); + for page in pages.into_iter() { + cursor.map(page.into(), prop); + } + tlb_flush_addr_range(&range); + } + + /// This function returns the page usage type based on the provided virtual address `addr`. + /// This function will fail in the following cases: + /// * If the address is not mapped (`NotMapped`), the function will fail. + /// * If the address is mapped to a `MappedUntracked` page, the function will fail. + pub fn get_page_type(&self, addr: Vaddr) -> PageUsage { + assert!(self.start() <= addr && self.end() >= addr); + let start = addr.align_down(PAGE_SIZE); + let vaddr = start..start + PAGE_SIZE; + let page_table = KERNEL_PAGE_TABLE.get().unwrap(); + let mut cursor = page_table.cursor(&vaddr).unwrap(); + let query_result = cursor.query().unwrap(); + match query_result { + PageTableItem::Mapped { + va: _, + page, + prop: _, + } => page.usage(), + _ => { + panic!( + "Unexpected query result: Expected 'Mapped', found '{:?}'", + query_result + ); + } + } + } + + /// Get the mapped page. + /// This function will fail in the following cases: + /// * if the provided page type doesn't match the actual mapped one. + /// * If the address is not mapped (`NotMapped`), the function will fail. + /// * If the address is mapped to a `MappedUntracked` page, the function will fail. + pub fn get_page(&self, addr: Vaddr) -> Result> { + assert!(self.start() <= addr && self.end() >= addr); + let start = addr.align_down(PAGE_SIZE); + let vaddr = start..start + PAGE_SIZE; + let page_table = KERNEL_PAGE_TABLE.get().unwrap(); + let mut cursor = page_table.cursor(&vaddr).unwrap(); + let query_result = cursor.query().unwrap(); + match query_result { + PageTableItem::Mapped { + va: _, + page, + prop: _, + } => { + let result = Page::::try_from(page); + if let Ok(page) = result { + Ok(page) + } else { + panic!("the provided page type doesn't match the actual mapped one"); + } + } + _ => { + panic!( + "Unexpected query result: Expected 'Mapped', found '{:?}'", + query_result + ); + } + } + } +} + +impl Drop for Kva { + fn drop(&mut self) { + // 1. unmap all mapped pages. + let page_table = KERNEL_PAGE_TABLE.get().unwrap(); + let range = self.start()..self.end(); + let mut cursor = page_table.cursor_mut(&range).unwrap(); + unsafe { + cursor.unmap(range.len()); + } + tlb_flush_addr_range(&range); + // 2. free the virtual block + let mut lock_guard = KVA_ALLOCATOR.lock(); + lock_guard.deref_mut().free(range); + } +} \ No newline at end of file diff --git a/ostd/src/mm/kspace.rs b/ostd/src/mm/kspace/mod.rs similarity index 90% rename from ostd/src/mm/kspace.rs rename to ostd/src/mm/kspace/mod.rs index c2428f435..45f5d32a7 100644 --- a/ostd/src/mm/kspace.rs +++ b/ostd/src/mm/kspace/mod.rs @@ -17,17 +17,17 @@ //! +-+ <- 0xffff_ffff_8000_0000 //! | | //! | | Unused hole. -//! +-+ <- 0xffff_ff00_0000_0000 -//! | | For frame metadata, 1 TiB. -//! | | Mapped frames are tracked with handles. -//! +-+ <- 0xffff_fe00_0000_0000 -//! | | For vm alloc/io mappings, 1 TiB. -//! | | Mapped frames are tracked with handles. -//! +-+ <- 0xffff_fd00_0000_0000 +//! +-+ <- 0xffff_e100_0000_0000 +//! | | For frame metadata, 1 TiB. Mapped frames are untracked. +//! +-+ <- 0xffff_e000_0000_0000 +//! | | For [`kva::Kva`], 16 TiB. Mapped pages are tracked with handles. +//! +-+ <- 0xffff_d000_0000_0000 +//! | | For [`kva::Kva`], 16 TiB. Mapped pages are untracked. +//! +-+ <- the middle of the higher half (0xffff_c000_0000_0000) //! | | //! | | //! | | -//! | | For linear mappings. +//! | | For linear mappings, 64 TiB. //! | | Mapped physical addresses are untracked. //! | | //! | | @@ -38,6 +38,8 @@ //! If the address width is (according to [`crate::arch::mm::PagingConsts`]) //! 39 bits or 57 bits, the memory space just adjust proportionally. +pub(crate) mod kva; + use alloc::vec::Vec; use core::ops::Range; @@ -85,13 +87,17 @@ const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT; #[cfg(target_arch = "riscv64")] const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_0000_0000 << ADDR_WIDTH_SHIFT; -const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_ff00_0000_0000 << ADDR_WIDTH_SHIFT; -const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_fe00_0000_0000 << ADDR_WIDTH_SHIFT; +const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_e100_0000_0000 << ADDR_WIDTH_SHIFT; +const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHIFT; pub(in crate::mm) const FRAME_METADATA_RANGE: Range = FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR; -const VMALLOC_BASE_VADDR: Vaddr = 0xffff_fd00_0000_0000 << ADDR_WIDTH_SHIFT; -pub const VMALLOC_VADDR_RANGE: Range = VMALLOC_BASE_VADDR..FRAME_METADATA_BASE_VADDR; +const TRACKED_MAPPED_PAGES_BASE_VADDR: Vaddr = 0xffff_d000_0000_0000 << ADDR_WIDTH_SHIFT; +pub const TRACKED_MAPPED_PAGES_RANGE: Range = + TRACKED_MAPPED_PAGES_BASE_VADDR..FRAME_METADATA_BASE_VADDR; + +const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT; +pub const VMALLOC_VADDR_RANGE: Range = VMALLOC_BASE_VADDR..TRACKED_MAPPED_PAGES_BASE_VADDR; /// The base address of the linear mapping of all physical /// memory in the kernel address space. diff --git a/ostd/src/mm/page/meta.rs b/ostd/src/mm/page/meta.rs index 89480b866..7d92dd7cf 100644 --- a/ostd/src/mm/page/meta.rs +++ b/ostd/src/mm/page/meta.rs @@ -80,6 +80,9 @@ pub enum PageUsage { Meta = 65, /// The page stores the kernel such as kernel code, data, etc. Kernel = 66, + + /// The page stores data for kernel stack. + KernelStack = 67, } #[repr(C)] @@ -268,6 +271,16 @@ impl PageMeta for KernelMeta { } } +#[derive(Debug, Default)] +#[repr(C)] +pub struct KernelStackMeta {} + +impl Sealed for KernelStackMeta {} +impl PageMeta for KernelStackMeta { + const USAGE: PageUsage = PageUsage::KernelStack; + fn on_drop(_page: &mut Page) {} +} + // ======== End of all the specific metadata structures definitions =========== /// Initializes the metadata of all physical pages. diff --git a/ostd/src/mm/page/mod.rs b/ostd/src/mm/page/mod.rs index ee0057244..3b5381647 100644 --- a/ostd/src/mm/page/mod.rs +++ b/ostd/src/mm/page/mod.rs @@ -318,6 +318,9 @@ impl Drop for DynPage { PageUsage::PageTable => { meta::drop_as_last::(self.ptr); } + PageUsage::KernelStack => { + meta::drop_as_last::(self.ptr); + } // The following pages don't have metadata and can't be dropped. PageUsage::Unused | PageUsage::Reserved diff --git a/ostd/src/task/kernel_stack.rs b/ostd/src/task/kernel_stack.rs index bf2416a8f..06a391961 100644 --- a/ostd/src/task/kernel_stack.rs +++ b/ostd/src/task/kernel_stack.rs @@ -1,7 +1,11 @@ // SPDX-License-Identifier: MPL-2.0 use crate::{ - mm::{kspace::KERNEL_PAGE_TABLE, FrameAllocOptions, Paddr, PageFlags, Segment, PAGE_SIZE}, + mm::{ + kspace::kva::Kva, + page::{allocator, meta::KernelStackMeta}, + PAGE_SIZE, + }, prelude::*, }; @@ -19,59 +23,35 @@ pub static STACK_SIZE_IN_PAGES: u32 = parse_u32_or_default( /// The default kernel stack size of a task, specified in pages. pub const DEFAULT_STACK_SIZE_IN_PAGES: u32 = 128; +pub static KERNEL_STACK_SIZE: usize = STACK_SIZE_IN_PAGES as usize * PAGE_SIZE; + #[derive(Debug)] pub struct KernelStack { - segment: Segment, + kva: Kva, + end_vaddr: Vaddr, has_guard_page: bool, } impl KernelStack { /// Generates a kernel stack with a guard page. - /// An additional page is allocated and be regarded as a guard page, which should not be accessed. + /// An additional page is allocated and be regarded as a guard page, which should not be accessed. pub fn new_with_guard_page() -> Result { - let stack_segment = - FrameAllocOptions::new(STACK_SIZE_IN_PAGES as usize + 1).alloc_contiguous()?; - // FIXME: modifying the the linear mapping is bad. - let page_table = KERNEL_PAGE_TABLE.get().unwrap(); - let guard_page_vaddr = { - let guard_page_paddr = stack_segment.start_paddr(); - crate::mm::paddr_to_vaddr(guard_page_paddr) - }; - // SAFETY: the segment allocated is not used by others so we can protect it. + let mut new_kva = Kva::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE); + let mapped_start = new_kva.range().start + 2 * PAGE_SIZE; + let mapped_end = mapped_start + KERNEL_STACK_SIZE; + let pages = allocator::alloc(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap(); unsafe { - let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE; - page_table - .protect_flush_tlb(&vaddr_range, |p| p.flags -= PageFlags::RW) - .unwrap(); + new_kva.map_pages(mapped_start..mapped_end, pages); } Ok(Self { - segment: stack_segment, + kva: new_kva, + end_vaddr: mapped_end, has_guard_page: true, }) } - pub fn end_paddr(&self) -> Paddr { - self.segment.end_paddr() - } -} - -impl Drop for KernelStack { - fn drop(&mut self) { - if self.has_guard_page { - // FIXME: modifying the the linear mapping is bad. - let page_table = KERNEL_PAGE_TABLE.get().unwrap(); - let guard_page_vaddr = { - let guard_page_paddr = self.segment.start_paddr(); - crate::mm::paddr_to_vaddr(guard_page_paddr) - }; - // SAFETY: the segment allocated is not used by others so we can protect it. - unsafe { - let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE; - page_table - .protect_flush_tlb(&vaddr_range, |p| p.flags |= PageFlags::RW) - .unwrap(); - } - } + pub fn end_vaddr(&self) -> Vaddr { + self.end_vaddr } } diff --git a/ostd/src/task/mod.rs b/ostd/src/task/mod.rs index bbb8d7409..7565e7eb9 100644 --- a/ostd/src/task/mod.rs +++ b/ostd/src/task/mod.rs @@ -178,8 +178,7 @@ impl TaskOptions { // to at least 16 bytes. And a larger alignment is needed if larger arguments // are passed to the function. The `kernel_task_entry` function does not // have any arguments, so we only need to align the stack pointer to 16 bytes. - ctx.get_mut() - .set_stack_pointer(crate::mm::paddr_to_vaddr(kstack.end_paddr() - 16)); + ctx.get_mut().set_stack_pointer(kstack.end_vaddr() - 16); let new_task = Task { func: SyncUnsafeCell::new(self.func),