Introduce kernel virtual memory allocation for kernel stack

This commit is contained in:
Ni Shirong
2024-09-24 06:09:15 +00:00
committed by Tate, Hongliang Tian
parent 03906513aa
commit 29eb37c07c
8 changed files with 270 additions and 53 deletions

View File

@ -1,7 +1,11 @@
// SPDX-License-Identifier: MPL-2.0
use crate::{
mm::{kspace::KERNEL_PAGE_TABLE, FrameAllocOptions, Paddr, PageFlags, Segment, PAGE_SIZE},
mm::{
kspace::kva::Kva,
page::{allocator, meta::KernelStackMeta},
PAGE_SIZE,
},
prelude::*,
};
@ -19,59 +23,35 @@ pub static STACK_SIZE_IN_PAGES: u32 = parse_u32_or_default(
/// The default kernel stack size of a task, specified in pages.
pub const DEFAULT_STACK_SIZE_IN_PAGES: u32 = 128;
pub static KERNEL_STACK_SIZE: usize = STACK_SIZE_IN_PAGES as usize * PAGE_SIZE;
#[derive(Debug)]
pub struct KernelStack {
segment: Segment,
kva: Kva,
end_vaddr: Vaddr,
has_guard_page: bool,
}
impl KernelStack {
/// Generates a kernel stack with a guard page.
/// An additional page is allocated and be regarded as a guard page, which should not be accessed.
/// An additional page is allocated and be regarded as a guard page, which should not be accessed.
pub fn new_with_guard_page() -> Result<Self> {
let stack_segment =
FrameAllocOptions::new(STACK_SIZE_IN_PAGES as usize + 1).alloc_contiguous()?;
// FIXME: modifying the the linear mapping is bad.
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let guard_page_vaddr = {
let guard_page_paddr = stack_segment.start_paddr();
crate::mm::paddr_to_vaddr(guard_page_paddr)
};
// SAFETY: the segment allocated is not used by others so we can protect it.
let mut new_kva = Kva::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE);
let mapped_start = new_kva.range().start + 2 * PAGE_SIZE;
let mapped_end = mapped_start + KERNEL_STACK_SIZE;
let pages = allocator::alloc(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap();
unsafe {
let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE;
page_table
.protect_flush_tlb(&vaddr_range, |p| p.flags -= PageFlags::RW)
.unwrap();
new_kva.map_pages(mapped_start..mapped_end, pages);
}
Ok(Self {
segment: stack_segment,
kva: new_kva,
end_vaddr: mapped_end,
has_guard_page: true,
})
}
pub fn end_paddr(&self) -> Paddr {
self.segment.end_paddr()
}
}
impl Drop for KernelStack {
fn drop(&mut self) {
if self.has_guard_page {
// FIXME: modifying the the linear mapping is bad.
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let guard_page_vaddr = {
let guard_page_paddr = self.segment.start_paddr();
crate::mm::paddr_to_vaddr(guard_page_paddr)
};
// SAFETY: the segment allocated is not used by others so we can protect it.
unsafe {
let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE;
page_table
.protect_flush_tlb(&vaddr_range, |p| p.flags |= PageFlags::RW)
.unwrap();
}
}
pub fn end_vaddr(&self) -> Vaddr {
self.end_vaddr
}
}