Add missing TLB flushes for the kernel stack guard pages

This commit is contained in:
Zhang Junyang
2024-06-27 08:39:37 +00:00
committed by Tate, Hongliang Tian
parent 57ecfa2fec
commit 8cd8cf3938
2 changed files with 14 additions and 6 deletions

View File

@ -63,20 +63,27 @@ bitflags::bitflags! {
} }
} }
/// Flush any TLB entry that contains the map of the given virtual address.
///
/// This flush performs regardless of the global-page bit. So it can flush both global
/// and non-global entries.
pub(crate) fn tlb_flush_addr(vaddr: Vaddr) { pub(crate) fn tlb_flush_addr(vaddr: Vaddr) {
tlb::flush(VirtAddr::new(vaddr as u64)); tlb::flush(VirtAddr::new(vaddr as u64));
} }
/// Flush any TLB entry that intersects with the given address range.
pub(crate) fn tlb_flush_addr_range(range: &Range<Vaddr>) { pub(crate) fn tlb_flush_addr_range(range: &Range<Vaddr>) {
for vaddr in range.clone().step_by(PAGE_SIZE) { for vaddr in range.clone().step_by(PAGE_SIZE) {
tlb_flush_addr(vaddr); tlb_flush_addr(vaddr);
} }
} }
/// Flush all TLB entries except for the global-page entries.
pub(crate) fn tlb_flush_all_excluding_global() { pub(crate) fn tlb_flush_all_excluding_global() {
tlb::flush_all(); tlb::flush_all();
} }
/// Flush all TLB entries, including global-page entries.
pub(crate) fn tlb_flush_all_including_global() { pub(crate) fn tlb_flush_all_including_global() {
// SAFETY: updates to CR4 here only change the global-page bit, the side effect // SAFETY: updates to CR4 here only change the global-page bit, the side effect
// is only to invalidate the TLB, which doesn't affect the memory safety. // is only to invalidate the TLB, which doesn't affect the memory safety.

View File

@ -16,6 +16,7 @@ use super::{
}; };
pub(crate) use crate::arch::task::{context_switch, TaskContext}; pub(crate) use crate::arch::task::{context_switch, TaskContext};
use crate::{ use crate::{
arch::mm::tlb_flush_addr_range,
cpu::CpuSet, cpu::CpuSet,
mm::{kspace::KERNEL_PAGE_TABLE, FrameAllocOptions, PageFlags, Segment, PAGE_SIZE}, mm::{kspace::KERNEL_PAGE_TABLE, FrameAllocOptions, PageFlags, Segment, PAGE_SIZE},
prelude::*, prelude::*,
@ -66,11 +67,11 @@ impl KernelStack {
}; };
// SAFETY: the segment allocated is not used by others so we can protect it. // SAFETY: the segment allocated is not used by others so we can protect it.
unsafe { unsafe {
let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE;
page_table page_table
.protect(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |p| { .protect(&vaddr_range, |p| p.flags -= PageFlags::RW)
p.flags -= PageFlags::RW
})
.unwrap(); .unwrap();
tlb_flush_addr_range(&vaddr_range);
} }
Ok(Self { Ok(Self {
segment: stack_segment, segment: stack_segment,
@ -94,11 +95,11 @@ impl Drop for KernelStack {
}; };
// SAFETY: the segment allocated is not used by others so we can protect it. // SAFETY: the segment allocated is not used by others so we can protect it.
unsafe { unsafe {
let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE;
page_table page_table
.protect(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |p| { .protect(&vaddr_range, |p| p.flags |= PageFlags::RW)
p.flags |= PageFlags::RW
})
.unwrap(); .unwrap();
tlb_flush_addr_range(&vaddr_range);
} }
} }
} }