From 5cc9f250dd873a2b2874b6a44c23fad2613efceb Mon Sep 17 00:00:00 2001 From: Chen Chengjun Date: Thu, 9 Nov 2023 15:13:39 +0800 Subject: [PATCH] Implement a guard page for the kernel stack --- framework/jinux-frame/src/task/task.rs | 44 +++++++++++++++++++++- framework/jinux-frame/src/vm/page_table.rs | 23 +++++++---- 2 files changed, 57 insertions(+), 10 deletions(-) diff --git a/framework/jinux-frame/src/task/task.rs b/framework/jinux-frame/src/task/task.rs index 7cbc09f98..69aa10769 100644 --- a/framework/jinux-frame/src/task/task.rs +++ b/framework/jinux-frame/src/task/task.rs @@ -1,7 +1,9 @@ +use crate::arch::mm::PageTableFlags; use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE}; use crate::cpu::CpuSet; use crate::prelude::*; use crate::user::UserSpace; +use crate::vm::page_table::KERNEL_PAGE_TABLE; use crate::vm::{VmAllocOptions, VmSegment}; use spin::{Mutex, MutexGuard}; @@ -39,6 +41,7 @@ extern "C" { pub struct KernelStack { segment: VmSegment, + old_guard_page_flag: Option, } impl KernelStack { @@ -47,12 +50,49 @@ impl KernelStack { segment: VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE) .is_contiguous(true) .alloc_contiguous()?, + old_guard_page_flag: None, + }) + } + + /// Generate a kernel stack with a guard page. + /// An additional page is allocated and be regarded as a guard page, which should not be accessed. + pub fn new_with_guard_page() -> Result { + let stack_segment = VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE + 1) + .is_contiguous(true) + .alloc_contiguous()?; + let unpresent_flag = PageTableFlags::empty(); + let old_guard_page_flag = Self::protect_guard_page(&stack_segment, unpresent_flag); + Ok(Self { + segment: stack_segment, + old_guard_page_flag: Some(old_guard_page_flag), }) } pub fn end_paddr(&self) -> Paddr { self.segment.end_paddr() } + + pub fn has_guard_page(&self) -> bool { + self.old_guard_page_flag.is_some() + } + + fn protect_guard_page(stack_segment: &VmSegment, flags: PageTableFlags) -> PageTableFlags { + let mut kernel_pt = KERNEL_PAGE_TABLE.get().unwrap().lock(); + let guard_page_vaddr = { + let guard_page_paddr = stack_segment.start_paddr(); + crate::vm::paddr_to_vaddr(guard_page_paddr) + }; + // Safety: The protected address must be the address of guard page hence it should be safe and valid. + unsafe { kernel_pt.protect(guard_page_vaddr, flags).unwrap() } + } +} + +impl Drop for KernelStack { + fn drop(&mut self) { + if self.has_guard_page() { + Self::protect_guard_page(&self.segment, self.old_guard_page_flag.unwrap()); + } + } } /// A task that executes a function to the end. @@ -225,7 +265,7 @@ impl TaskOptions { ctx: TaskContext::default(), }), exit_code: 0, - kstack: KernelStack::new()?, + kstack: KernelStack::new_with_guard_page()?, link: LinkedListAtomicLink::new(), priority: self.priority, cpu_affinity: self.cpu_affinity, @@ -262,7 +302,7 @@ impl TaskOptions { ctx: TaskContext::default(), }), exit_code: 0, - kstack: KernelStack::new()?, + kstack: KernelStack::new_with_guard_page()?, link: LinkedListAtomicLink::new(), priority: self.priority, cpu_affinity: self.cpu_affinity, diff --git a/framework/jinux-frame/src/vm/page_table.rs b/framework/jinux-frame/src/vm/page_table.rs index cb35086f2..d2109ee67 100644 --- a/framework/jinux-frame/src/vm/page_table.rs +++ b/framework/jinux-frame/src/vm/page_table.rs @@ -156,7 +156,7 @@ impl PageTable { unsafe { self.do_unmap(vaddr) } } - pub fn protect(&mut self, vaddr: Vaddr, flags: T::F) -> Result<(), PageTableError> { + pub fn protect(&mut self, vaddr: Vaddr, flags: T::F) -> Result { if is_kernel_vaddr(vaddr) { return Err(PageTableError::InvalidVaddr); } @@ -201,12 +201,13 @@ impl PageTable { /// Modify the flags mapped at `vaddr`. The `vaddr` should not be at the low address /// (memory belonging to the user mode program). + /// If the modification succeeds, it will return the old flags of `vaddr`. /// /// # Safety /// /// Modifying kernel mappings is considered unsafe, and incorrect operation may cause crashes. /// User must take care of the consequences when using this API. - pub unsafe fn protect(&mut self, vaddr: Vaddr, flags: T::F) -> Result<(), PageTableError> { + pub unsafe fn protect(&mut self, vaddr: Vaddr, flags: T::F) -> Result { if is_user_vaddr(vaddr) { return Err(PageTableError::InvalidVaddr); } @@ -319,22 +320,28 @@ impl PageTable { Ok(()) } - /// Modify the flags mapped at `vaddr` + /// Modify the flags mapped at `vaddr`. + /// If the modification succeeds, it will return the old flags of `vaddr`. /// /// # Safety /// /// This function allows arbitrary modifications to the page table. /// Incorrect modifications may cause the kernel to crash /// (e.g., make the linear mapping visible to the user mode applications.). - unsafe fn do_protect(&mut self, vaddr: Vaddr, flags: T::F) -> Result<(), PageTableError> { + unsafe fn do_protect(&mut self, vaddr: Vaddr, new_flags: T::F) -> Result { let last_entry = self.page_walk(vaddr, false).unwrap(); - trace!("Page Table: Protect vaddr:{:x?}, flags:{:x?}", vaddr, flags); - if last_entry.is_unused() || !last_entry.flags().is_present() { + let old_flags = last_entry.flags(); + trace!( + "Page Table: Protect vaddr:{:x?}, flags:{:x?}", + vaddr, + new_flags + ); + if last_entry.is_unused() || !old_flags.is_present() { return Err(PageTableError::InvalidModification); } - last_entry.update(last_entry.paddr(), flags); + last_entry.update(last_entry.paddr(), new_flags); tlb_flush(vaddr); - Ok(()) + Ok(old_flags) } pub fn root_paddr(&self) -> Paddr {