Refactor the definition of page properties and permissions

This commit is contained in:
Zhang Junyang
2024-05-05 20:51:38 +08:00
committed by Tate, Hongliang Tian
parent 351e08c897
commit 989970429a
24 changed files with 538 additions and 664 deletions

View File

@ -56,7 +56,7 @@ use aster_frame::cpu::UserContext;
use aster_frame::prelude::*;
use aster_frame::task::{Task, TaskOptions};
use aster_frame::user::{UserEvent, UserMode, UserSpace};
use aster_frame::vm::{Vaddr, VmAllocOptions, VmIo, VmMapOptions, VmPerm, VmSpace, PAGE_SIZE};
use aster_frame::vm::{PageFlags, PAGE_SIZE, Vaddr, VmAllocOptions, VmIo, VmMapOptions, VmSpace};
/// The kernel's boot and initialization process is managed by Asterinas Framework.
/// After the process is done, the kernel's execution environment
@ -87,7 +87,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
// the VmSpace abstraction.
let vm_space = VmSpace::new();
let mut options = VmMapOptions::new();
options.addr(Some(MAP_ADDR)).perm(VmPerm::RWX);
options.addr(Some(MAP_ADDR)).flags(PageFlags::RWX);
vm_space.map(user_pages, &options).unwrap();
vm_space
};

View File

@ -6,13 +6,14 @@ use core::mem::size_of;
use log::warn;
use pod::Pod;
use super::second_stage::{DeviceMode, PageTableEntry, PageTableFlags, PagingConsts};
use super::second_stage::{DeviceMode, PageTableEntry, PagingConsts};
use crate::{
bus::pci::PciDeviceLocation,
vm::{
dma::Daddr,
page_table::{CachePolicy, MapProperty, PageTableError},
Paddr, PageTable, VmAllocOptions, VmFrame, VmIo, VmPerm, PAGE_SIZE,
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableError,
Paddr, PageFlags, PageTable, VmAllocOptions, VmFrame, VmIo, PAGE_SIZE,
},
};
@ -295,11 +296,10 @@ impl ContextTable {
.map(
&(daddr..daddr + PAGE_SIZE),
&(paddr..paddr + PAGE_SIZE),
MapProperty {
perm: VmPerm::RW,
global: false,
extension: PageTableFlags::empty().bits(),
PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Uncacheable,
priv_flags: PrivFlags::empty(),
},
)
.unwrap();

View File

@ -5,10 +5,9 @@ use core::ops::Range;
use pod::Pod;
use crate::vm::{
page_table::{
CachePolicy, MapInfo, MapProperty, MapStatus, PageTableEntryTrait, PageTableMode,
},
Paddr, PagingConstsTrait, Vaddr, VmPerm,
page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableEntryTrait, PageTableMode},
Paddr, PageProperty, PagingConstsTrait, Vaddr,
};
/// The page table used by iommu maps the device address
@ -71,14 +70,17 @@ impl PageTableEntry {
}
impl PageTableEntryTrait for PageTableEntry {
fn new(paddr: crate::vm::Paddr, prop: MapProperty, huge: bool, last: bool) -> Self {
fn new(paddr: crate::vm::Paddr, prop: PageProperty, huge: bool, last: bool) -> Self {
let mut flags = PageTableFlags::empty();
if prop.perm.contains(VmPerm::W) {
if prop.flags.contains(PageFlags::W) {
flags |= PageTableFlags::WRITABLE;
}
if prop.perm.contains(VmPerm::R) {
if prop.flags.contains(PageFlags::R) {
flags |= PageTableFlags::READABLE;
}
if prop.cache != CachePolicy::Uncacheable {
flags |= PageTableFlags::SNOOP;
}
if last {
flags |= PageTableFlags::LAST_PAGE;
}
@ -92,42 +94,39 @@ impl PageTableEntryTrait for PageTableEntry {
(self.0 & Self::PHYS_MASK as u64) as usize
}
fn new_invalid() -> Self {
fn new_absent() -> Self {
Self(0)
}
fn is_valid(&self) -> bool {
fn is_present(&self) -> bool {
self.0 & (PageTableFlags::READABLE | PageTableFlags::WRITABLE).bits() != 0
}
fn info(&self) -> MapInfo {
let mut perm = VmPerm::empty();
fn prop(&self) -> PageProperty {
let mut flags = PageFlags::empty();
if self.0 & PageTableFlags::READABLE.bits() != 0 {
perm |= VmPerm::R;
flags |= PageFlags::R;
}
if self.0 & PageTableFlags::WRITABLE.bits() != 0 {
perm |= VmPerm::W;
flags |= PageFlags::W;
}
if self.0 & PageTableFlags::ACCESSED.bits() != 0 {
flags |= PageFlags::ACCESSED;
}
if self.0 & PageTableFlags::DIRTY.bits() != 0 {
flags |= PageFlags::DIRTY;
}
// TODO: The determination cache policy is not rigorous. We should revise it.
let cache = if self.0 & PageTableFlags::SNOOP.bits() != 0 {
CachePolicy::Writeback
} else {
CachePolicy::Uncacheable
};
let mut status = MapStatus::empty();
if self.0 & PageTableFlags::ACCESSED.bits() != 0 {
status |= MapStatus::ACCESSED;
}
if self.0 & PageTableFlags::DIRTY.bits() != 0 {
status |= MapStatus::DIRTY;
}
MapInfo {
prop: MapProperty {
perm,
global: false,
extension: self.0 & !Self::PHYS_MASK as u64,
cache,
},
status,
PageProperty {
flags,
cache,
priv_flags: PrivFlags::empty(),
}
}

View File

@ -6,8 +6,9 @@ use pod::Pod;
use x86_64::{instructions::tlb, structures::paging::PhysFrame, VirtAddr};
use crate::vm::{
page_table::{CachePolicy, MapInfo, MapProperty, MapStatus, PageTableEntryTrait},
Paddr, PagingConstsTrait, Vaddr, VmPerm,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableEntryTrait,
Paddr, PagingConstsTrait, Vaddr,
};
pub(crate) const NR_ENTRIES_PER_PAGE: usize = 512;
@ -101,72 +102,95 @@ impl PageTableEntry {
const PHYS_ADDR_MASK: usize = 0x7_FFFF_FFFF_F000;
}
/// Parse a bit-flag bits `val` in the representation of `from` to `to` in bits.
macro_rules! parse_flags {
($val:expr, $from:expr, $to:expr) => {
($val as usize & $from.bits() as usize) >> $from.bits().ilog2() << $to.bits().ilog2()
};
}
impl PageTableEntryTrait for PageTableEntry {
fn new_invalid() -> Self {
fn new_absent() -> Self {
Self(0)
}
fn is_valid(&self) -> bool {
fn is_present(&self) -> bool {
self.0 & PageTableFlags::PRESENT.bits() != 0
}
fn new(paddr: Paddr, prop: MapProperty, huge: bool, last: bool) -> Self {
let mut flags = PageTableFlags::PRESENT;
fn new(paddr: Paddr, prop: PageProperty, huge: bool, last: bool) -> Self {
let mut flags =
PageTableFlags::PRESENT.bits() | (huge as usize) << PageTableFlags::HUGE.bits().ilog2();
if !huge && !last {
// In x86 if it's an intermediate PTE, it's better to have the same permissions
// as the most permissive child (to reduce hardware page walk accesses). But we
// don't have a mechanism to keep it generic across architectures, thus just
// setting it to be the most permissive.
flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER;
flags |= PageTableFlags::WRITABLE.bits() | PageTableFlags::USER.bits();
#[cfg(feature = "intel_tdx")]
{
flags |= parse_flags!(
prop.priv_flags.bits(),
PrivFlags::SHARED,
PageTableFlags::SHARED
);
}
} else {
if prop.perm.contains(VmPerm::W) {
flags |= PageTableFlags::WRITABLE;
}
if !prop.perm.contains(VmPerm::X) {
flags |= PageTableFlags::NO_EXECUTE;
}
if prop.perm.contains(VmPerm::U) {
flags |= PageTableFlags::USER;
}
if prop.global {
flags |= PageTableFlags::GLOBAL;
flags |= parse_flags!(prop.flags.bits(), PageFlags::W, PageTableFlags::WRITABLE)
| parse_flags!(!prop.flags.bits(), PageFlags::X, PageTableFlags::NO_EXECUTE)
| parse_flags!(
prop.flags.bits(),
PageFlags::ACCESSED,
PageTableFlags::ACCESSED
)
| parse_flags!(prop.flags.bits(), PageFlags::DIRTY, PageTableFlags::DIRTY)
| parse_flags!(
prop.priv_flags.bits(),
PrivFlags::USER,
PageTableFlags::USER
)
| parse_flags!(
prop.priv_flags.bits(),
PrivFlags::GLOBAL,
PageTableFlags::GLOBAL
);
#[cfg(feature = "intel_tdx")]
{
flags |= parse_flags!(
prop.priv_flags.bits(),
PrivFlags::SHARED,
PageTableFlags::SHARED
);
}
}
if prop.cache == CachePolicy::Uncacheable {
flags |= PageTableFlags::NO_CACHE;
match prop.cache {
CachePolicy::Writeback => {}
CachePolicy::Writethrough => {
flags |= PageTableFlags::WRITE_THROUGH.bits();
}
CachePolicy::Uncacheable => {
flags |= PageTableFlags::NO_CACHE.bits();
}
_ => panic!("unsupported cache policy"),
}
if prop.cache == CachePolicy::Writethrough {
flags |= PageTableFlags::WRITE_THROUGH;
}
if huge {
flags |= PageTableFlags::HUGE;
}
#[cfg(feature = "intel_tdx")]
if prop.extension as usize & PageTableFlags::SHARED.bits() != 0 {
flags |= PageTableFlags::SHARED;
}
Self(paddr & Self::PHYS_ADDR_MASK | flags.bits())
Self(paddr & Self::PHYS_ADDR_MASK | flags)
}
fn paddr(&self) -> Paddr {
self.0 & Self::PHYS_ADDR_MASK
}
fn info(&self) -> MapInfo {
let mut perm = VmPerm::empty();
if self.0 & PageTableFlags::PRESENT.bits() != 0 {
perm |= VmPerm::R;
}
if self.0 & PageTableFlags::WRITABLE.bits() != 0 {
perm |= VmPerm::W;
}
if self.0 & PageTableFlags::NO_EXECUTE.bits() == 0 {
perm |= VmPerm::X;
}
if self.0 & PageTableFlags::USER.bits() != 0 {
perm |= VmPerm::U;
}
let global = self.0 & PageTableFlags::GLOBAL.bits() != 0;
fn prop(&self) -> PageProperty {
let flags = parse_flags!(self.0, PageTableFlags::PRESENT, PageFlags::R)
| parse_flags!(self.0, PageTableFlags::WRITABLE, PageFlags::W)
| parse_flags!(!self.0, PageTableFlags::NO_EXECUTE, PageFlags::X)
| parse_flags!(self.0, PageTableFlags::ACCESSED, PageFlags::ACCESSED)
| parse_flags!(self.0, PageTableFlags::DIRTY, PageFlags::DIRTY);
let priv_flags = parse_flags!(self.0, PageTableFlags::USER, PrivFlags::USER)
| parse_flags!(self.0, PageTableFlags::GLOBAL, PrivFlags::GLOBAL);
#[cfg(feature = "intel_tdx")]
let priv_flags =
priv_flags | parse_flags!(self.0, PageTableFlags::SHARED, PrivFlags::SHARED);
let cache = if self.0 & PageTableFlags::NO_CACHE.bits() != 0 {
CachePolicy::Uncacheable
} else if self.0 & PageTableFlags::WRITE_THROUGH.bits() != 0 {
@ -174,33 +198,10 @@ impl PageTableEntryTrait for PageTableEntry {
} else {
CachePolicy::Writeback
};
let mut status = MapStatus::empty();
if self.0 & PageTableFlags::ACCESSED.bits() != 0 {
status |= MapStatus::ACCESSED;
}
if self.0 & PageTableFlags::DIRTY.bits() != 0 {
status |= MapStatus::DIRTY;
}
let extension = {
#[cfg(feature = "intel_tdx")]
{
let mut ext = PageTableFlags::empty();
if self.0 & PageTableFlags::SHARED.bits() != 0 {
ext |= PageTableFlags::SHARED;
}
ext
}
#[cfg(not(feature = "intel_tdx"))]
0
};
MapInfo {
prop: MapProperty {
perm,
global,
extension,
cache,
},
status,
PageProperty {
flags: PageFlags::from_bits(flags as u8).unwrap(),
cache,
priv_flags: PrivFlags::from_bits(priv_flags as u8).unwrap(),
}
}
@ -214,12 +215,12 @@ impl fmt::Debug for PageTableEntry {
let mut f = f.debug_struct("PageTableEntry");
f.field("raw", &format_args!("{:#x}", self.0))
.field("paddr", &format_args!("{:#x}", self.paddr()))
.field("valid", &self.is_valid())
.field("present", &self.is_present())
.field(
"flags",
&PageTableFlags::from_bits_truncate(self.0 & !Self::PHYS_ADDR_MASK),
)
.field("info", &self.info())
.field("prop", &self.prop())
.finish()
}
}

View File

@ -15,7 +15,8 @@ use crate::{
vm::{
kspace::KERNEL_PAGE_TABLE,
paddr_to_vaddr,
page_table::{MapProperty, PageTableError},
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableError,
KERNEL_BASE_VADDR, KERNEL_END_VADDR, PAGE_SIZE,
},
};
@ -416,10 +417,12 @@ pub unsafe fn unprotect_gpa_range(gpa: TdxGpa, page_num: usize) -> Result<(), Pa
}
let vaddr = paddr_to_vaddr(gpa);
let pt = KERNEL_PAGE_TABLE.get().unwrap();
pt.protect(&(vaddr..page_num * PAGE_SIZE), |info| MapProperty {
perm: info.prop.perm,
extension: PageTableFlags::SHARED.bits() as u64,
cache: info.prop.cache,
pt.protect(&(vaddr..page_num * PAGE_SIZE), |prop| {
prop = PageProperty {
flags: prop.flags,
cache: prop.cache,
priv_flags: prop.priv_flags | PrivFlags::SHARED,
}
})
.map_err(PageConvertError::PageTableError)?;
map_gpa(
@ -450,12 +453,12 @@ pub unsafe fn protect_gpa_range(gpa: TdxGpa, page_num: usize) -> Result<(), Page
}
let vaddr = paddr_to_vaddr(gpa);
let pt = KERNEL_PAGE_TABLE.get().unwrap();
pt.protect(&(vaddr..page_num * PAGE_SIZE), |info| MapProperty {
perm: info.prop.perm,
extension: (PageTableFlags::from_bits_truncate(info.prop.extension as usize)
- PageTableFlags::SHARED)
.bits() as u64,
cache: info.prop.cache,
pt.protect(&(vaddr..page_num * PAGE_SIZE), |prop| {
prop = PageProperty {
flags: prop.flags,
cache: prop.cache,
priv_flags: prop.priv_flags - PrivFlags::SHARED,
}
})
.map_err(PageConvertError::PageTableError)?;
map_gpa((gpa & PAGE_MASK) as u64, (page_num * PAGE_SIZE) as u64)

View File

@ -16,7 +16,6 @@
#![feature(ptr_sub_ptr)]
#![feature(strict_provenance)]
#![feature(pointer_is_aligned)]
#![feature(unboxed_closures)]
#![allow(dead_code)]
#![allow(unused_variables)]
// The `generic_const_exprs` feature is incomplete however required for the page table

View File

@ -12,10 +12,7 @@ use crate::{
prelude::*,
sync::{SpinLock, SpinLockGuard},
user::UserSpace,
vm::{
kspace::KERNEL_PAGE_TABLE, page_table::perm_op, VmAllocOptions, VmPerm, VmSegment,
PAGE_SIZE,
},
vm::{kspace::KERNEL_PAGE_TABLE, PageFlags, VmAllocOptions, VmSegment, PAGE_SIZE},
};
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
@ -72,10 +69,9 @@ impl KernelStack {
// Safety: the segment allocated is not used by others so we can protect it.
unsafe {
page_table
.protect(
&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE),
perm_op(|p| p - VmPerm::RW),
)
.protect(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |p| {
p.flags -= PageFlags::RW
})
.unwrap();
}
Ok(Self {
@ -101,10 +97,9 @@ impl Drop for KernelStack {
// Safety: the segment allocated is not used by others so we can protect it.
unsafe {
page_table
.protect(
&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE),
perm_op(|p| p | VmPerm::RW),
)
.protect(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |p| {
p.flags |= PageFlags::RW
})
.unwrap();
}
}

View File

@ -20,8 +20,8 @@ use crate::{
cpu_local,
vm::{
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR},
page_table::{CachePolicy, MapProperty},
VmPerm, PAGE_SIZE, PHYS_MEM_VADDR_RANGE,
page_prop::{CachePolicy, PageProperty},
PageFlags, PrivilegedPageFlags as PrivFlags, PAGE_SIZE, PHYS_MEM_VADDR_RANGE,
},
};
@ -231,14 +231,13 @@ fn handle_kernel_page_fault(f: &TrapFrame) {
.map(
&(vaddr..vaddr + PAGE_SIZE),
&(paddr..paddr + PAGE_SIZE),
MapProperty {
perm: VmPerm::RW,
global: true,
#[cfg(feature = "intel_tdx")]
extension: PageTableFlags::SHARED.bits() as u64,
#[cfg(not(feature = "intel_tdx"))]
extension: 0,
PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Uncacheable,
#[cfg(not(feature = "intel_tdx"))]
priv_flags: PrivFlags::GLOBAL,
#[cfg(feature = "intel_tdx")]
priv_flags: PrivFlags::SHARED | PrivFlags::GLOBAL,
},
)
.unwrap();

View File

@ -14,7 +14,7 @@ use crate::{
vm::{
dma::{dma_type, Daddr, DmaType},
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
page_table::{cache_policy_op, CachePolicy},
page_prop::CachePolicy,
HasPaddr, Paddr, VmIo, VmReader, VmSegment, VmWriter, PAGE_SIZE,
},
};
@ -62,7 +62,7 @@ impl DmaCoherent {
// Safety: the physical mappings is only used by DMA so protecting it is safe.
unsafe {
page_table
.protect(&va_range, cache_policy_op(CachePolicy::Uncacheable))
.protect(&va_range, |p| p.cache = CachePolicy::Uncacheable)
.unwrap();
}
}
@ -149,7 +149,7 @@ impl Drop for DmaCoherentInner {
// Safety: the physical mappings is only used by DMA so protecting it is safe.
unsafe {
page_table
.protect(&va_range, cache_policy_op(CachePolicy::Writeback))
.protect(&va_range, |p| p.cache = CachePolicy::Writeback)
.unwrap();
}
}
@ -212,7 +212,7 @@ mod test {
assert!(dma_coherent.paddr() == vm_segment.paddr());
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let vaddr = paddr_to_vaddr(vm_segment.paddr());
assert!(page_table.query(vaddr).unwrap().1.prop.cache == CachePolicy::Uncacheable);
assert!(page_table.query(vaddr).unwrap().1.cache == CachePolicy::Uncacheable);
}
#[ktest]

View File

@ -6,9 +6,9 @@ use align_ext::AlignExt;
use spin::Once;
use super::{
page_table::{nr_ptes_per_node, page_walk, CachePolicy, KernelMode, MapProperty, PageTable},
space::VmPerm,
MemoryRegionType, Paddr, Vaddr, PAGE_SIZE,
page_table::{nr_ptes_per_node, page_walk, KernelMode, PageTable},
CachePolicy, MemoryRegionType, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr,
PAGE_SIZE,
};
use crate::arch::mm::{PageTableEntry, PagingConsts};
@ -58,60 +58,67 @@ pub fn init_kernel_page_table() {
nr_ptes_per_node::<PagingConsts>() / 2..nr_ptes_per_node::<PagingConsts>(),
);
let regions = crate::boot::memory_regions();
// Do linear mappings for the kernel.
let linear_mapping_size = {
let mut end = 0;
for r in regions {
end = end.max(r.base() + r.len());
{
let linear_mapping_size = {
let mut end = 0;
for r in regions {
end = end.max(r.base() + r.len());
}
end.align_up(PAGE_SIZE)
};
let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + linear_mapping_size;
let to = 0..linear_mapping_size;
let prop = PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::GLOBAL,
};
// Safety: we are doing the linear mapping for the kernel.
unsafe {
kpt.map(&from, &to, prop).unwrap();
}
end.align_up(PAGE_SIZE)
};
let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + linear_mapping_size;
let to = 0..linear_mapping_size;
let prop = MapProperty {
perm: VmPerm::RW,
global: true,
extension: 0,
cache: CachePolicy::Writeback,
};
// Safety: we are doing the linear mapping for the kernel.
unsafe {
kpt.map(&from, &to, prop).unwrap();
}
// Map for the I/O area.
// TODO: we need to have an allocator to allocate kernel space for
// the I/O areas, rather than doing it using the linear mappings.
let to = 0x8_0000_0000..0x9_0000_0000;
let from = LINEAR_MAPPING_BASE_VADDR + to.start..LINEAR_MAPPING_BASE_VADDR + to.end;
let prop = MapProperty {
perm: VmPerm::RW,
global: true,
extension: 0,
cache: CachePolicy::Uncacheable,
};
// Safety: we are doing I/O mappings for the kernel.
unsafe {
kpt.map(&from, &to, prop).unwrap();
{
let to = 0x8_0000_0000..0x9_0000_0000;
let from = LINEAR_MAPPING_BASE_VADDR + to.start..LINEAR_MAPPING_BASE_VADDR + to.end;
let prop = PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Uncacheable,
priv_flags: PrivilegedPageFlags::GLOBAL,
};
// Safety: we are doing I/O mappings for the kernel.
unsafe {
kpt.map(&from, &to, prop).unwrap();
}
}
// Map for the kernel code itself.
// TODO: set separated permissions for each segments in the kernel.
let region = regions
.iter()
.find(|r| r.typ() == MemoryRegionType::Kernel)
.unwrap();
let offset = kernel_loaded_offset();
let to =
region.base().align_down(PAGE_SIZE)..(region.base() + region.len()).align_up(PAGE_SIZE);
let from = to.start + offset..to.end + offset;
let prop = MapProperty {
perm: VmPerm::RWX,
global: true,
extension: 0,
cache: CachePolicy::Writeback,
};
// Safety: we are doing mappings for the kernel.
unsafe {
kpt.map(&from, &to, prop).unwrap();
{
let region = regions
.iter()
.find(|r| r.typ() == MemoryRegionType::Kernel)
.unwrap();
let offset = kernel_loaded_offset();
let to =
region.base().align_down(PAGE_SIZE)..(region.base() + region.len()).align_up(PAGE_SIZE);
let from = to.start + offset..to.end + offset;
let prop = PageProperty {
flags: PageFlags::RWX,
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::GLOBAL,
};
// Safety: we are doing mappings for the kernel.
unsafe {
kpt.map(&from, &to, prop).unwrap();
}
}
KERNEL_PAGE_TABLE.call_once(|| kpt);
}

View File

@ -16,6 +16,7 @@ mod io;
pub(crate) mod kspace;
mod offset;
mod options;
pub(crate) mod page_prop;
pub(crate) mod page_table;
mod space;
@ -30,12 +31,14 @@ pub use self::{
io::VmIo,
kspace::vaddr_to_paddr,
options::VmAllocOptions,
space::{VmMapOptions, VmPerm, VmSpace},
page_prop::{CachePolicy, PageFlags, PageProperty},
space::{VmMapOptions, VmSpace},
};
pub(crate) use self::{
kspace::paddr_to_vaddr, page_prop::PrivilegedPageFlags, page_table::PageTable,
};
pub(crate) use self::{kspace::paddr_to_vaddr, page_table::PageTable};
use crate::boot::memory_region::{MemoryRegion, MemoryRegionType};
/// DEPRECATED: use the property of `VmFrame` instead.
/// The size of a [`VmFrame`].
pub const PAGE_SIZE: usize = 0x1000;

View File

@ -0,0 +1,132 @@
// SPDX-License-Identifier: MPL-2.0
//! Definitions of page mapping properties.
use core::fmt::Debug;
use bitflags::bitflags;
/// The property of a mapped virtual memory page.
#[derive(Clone, Copy, Debug)]
pub struct PageProperty {
pub flags: PageFlags,
pub cache: CachePolicy,
pub(crate) priv_flags: PrivilegedPageFlags,
}
impl PageProperty {
/// Create a new `PageProperty` with the given flags and cache policy for the user.
pub fn new(flags: PageFlags, cache: CachePolicy) -> Self {
Self {
flags,
cache,
priv_flags: PrivilegedPageFlags::USER,
}
}
/// Create a page property that implies an invalid page without mappings.
pub fn new_absent() -> Self {
Self {
flags: PageFlags::empty(),
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::empty(),
}
}
}
// TODO: Make it more abstract when supporting other architectures.
/// A type to control the cacheability of the main memory.
///
/// The type currently follows the definition as defined by the AMD64 manual.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CachePolicy {
/// Uncacheable (UC).
///
/// Reads from, and writes to, UC memory are not cacheable.
/// Reads from UC memory cannot be speculative.
/// Write-combining to UC memory is not allowed.
/// Reads from or writes to UC memory cause the write buffers to be written to memory
/// and be invalidated prior to the access to UC memory.
///
/// The UC memory type is useful for memory-mapped I/O devices
/// where strict ordering of reads and writes is important.
Uncacheable,
/// Write-Combining (WC).
///
/// Reads from, and writes to, WC memory are not cacheable.
/// Reads from WC memory can be speculative.
///
/// Writes to this memory type can be combined internally by the processor
/// and written to memory as a single write operation to reduce memory accesses.
///
/// The WC memory type is useful for graphics-display memory buffers
/// where the order of writes is not important.
WriteCombining,
/// Write-Protect (WP).
///
/// Reads from WP memory are cacheable and allocate cache lines on a read miss.
/// Reads from WP memory can be speculative.
///
/// Writes to WP memory that hit in the cache do not update the cache.
/// Instead, all writes update memory (write to memory),
/// and writes that hit in the cache invalidate the cache line.
/// Write buffering of WP memory is allowed.
///
/// The WP memory type is useful for shadowed-ROM memory
/// where updates must be immediately visible to all devices that read the shadow locations.
WriteProtected,
/// Writethrough (WT).
///
/// Reads from WT memory are cacheable and allocate cache lines on a read miss.
/// Reads from WT memory can be speculative.
///
/// All writes to WT memory update main memory,
/// and writes that hit in the cache update the cache line.
/// Writes that miss the cache do not allocate a cache line.
/// Write buffering of WT memory is allowed.
Writethrough,
/// Writeback (WB).
///
/// The WB memory is the "normal" memory. See detailed descriptions in the manual.
///
/// This type of memory provides the highest-possible performance
/// and is useful for most software and data stored in system memory (DRAM).
Writeback,
}
bitflags! {
/// Page protection permissions and access status.
pub struct PageFlags: u8 {
/// Readable.
const R = 0b00000001;
/// Writable.
const W = 0b00000010;
/// Executable.
const X = 0b00000100;
/// Readable + writable.
const RW = Self::R.bits | Self::W.bits;
/// Readable + execuable.
const RX = Self::R.bits | Self::X.bits;
/// Readable + writable + executable.
const RWX = Self::R.bits | Self::W.bits | Self::X.bits;
/// Has the memory page been read or written.
const ACCESSED = 0b00001000;
/// Has the memory page been written.
const DIRTY = 0b00010000;
}
}
bitflags! {
/// Page property that are only accessible in the framework.
pub(crate) struct PrivilegedPageFlags: u8 {
/// Accessible from user mode.
const USER = 0b00000001;
/// Global page that won't be evicted from TLB with normal TLB flush.
const GLOBAL = 0b00000010;
/// (TEE only) If the page is shared with the host.
/// Otherwise the page is ensured confidential and not visible outside the guest.
#[cfg(feature = "intel_tdx")]
const SHARED = 0b10000000;
}
}

View File

@ -56,13 +56,12 @@ use core::{any::TypeId, ops::Range};
use align_ext::AlignExt;
use super::{
nr_ptes_per_node, page_size, pte_index, Child, KernelMode, MapInfo, MapOp, MapProperty,
PageTable, PageTableEntryTrait, PageTableError, PageTableFrame, PageTableMode,
PagingConstsTrait,
nr_ptes_per_node, page_size, pte_index, Child, KernelMode, PageTable, PageTableEntryTrait,
PageTableError, PageTableFrame, PageTableMode, PagingConstsTrait,
};
use crate::{
sync::{ArcSpinLockGuard, SpinLock},
vm::{Paddr, Vaddr, VmFrame},
vm::{Paddr, PageProperty, Vaddr, VmFrame},
};
/// The cursor for traversal over the page table.
@ -184,7 +183,7 @@ where
///
/// The caller should ensure that the virtual range being mapped does
/// not affect kernel's memory safety.
pub(crate) unsafe fn map(&mut self, frame: VmFrame, prop: MapProperty) {
pub(crate) unsafe fn map(&mut self, frame: VmFrame, prop: PageProperty) {
let end = self.va + C::BASE_PAGE_SIZE;
assert!(end <= self.barrier_va.end);
// Go down if not applicable.
@ -224,7 +223,7 @@ where
/// The caller should ensure that
/// - the range being mapped does not affect kernel's memory safety;
/// - the physical address to be mapped is valid and safe to use.
pub(crate) unsafe fn map_pa(&mut self, pa: &Range<Paddr>, prop: MapProperty) {
pub(crate) unsafe fn map_pa(&mut self, pa: &Range<Paddr>, prop: PageProperty) {
let end = self.va + pa.len();
let mut pa = pa.start;
assert!(end <= self.barrier_va.end);
@ -286,7 +285,7 @@ where
|| self.va % page_size::<C>(self.level) != 0
|| self.va + page_size::<C>(self.level) > end
{
self.level_down(Some(MapProperty::new_invalid()));
self.level_down(Some(PageProperty::new_absent()));
continue;
}
@ -313,7 +312,7 @@ where
pub(crate) unsafe fn protect(
&mut self,
len: usize,
op: impl MapOp,
mut op: impl FnMut(&mut PageProperty),
allow_protect_invalid: bool,
) -> Result<(), PageTableError> {
let end = self.va + len;
@ -333,19 +332,19 @@ where
}
let vaddr_not_fit = self.va % page_size::<C>(self.level) != 0
|| self.va + page_size::<C>(self.level) > end;
let cur_pte_info = self.read_cur_pte_info();
let protected_prop = op(cur_pte_info);
let mut pte_prop = self.read_cur_pte_prop();
op(&mut pte_prop);
// Go down if the page size is too big and we are protecting part
// of untyped huge pages.
if self.cur_child().is_untyped() && vaddr_not_fit {
self.level_down(Some(protected_prop));
self.level_down(Some(pte_prop));
continue;
} else if vaddr_not_fit {
return Err(PageTableError::ProtectingPartial);
}
let idx = self.cur_idx();
let level = self.level;
self.cur_node_mut().protect(idx, protected_prop, level);
self.cur_node_mut().protect(idx, pte_prop, level);
self.move_forward();
}
Ok(())
@ -359,14 +358,14 @@ where
loop {
let level = self.level;
let va = self.va;
let map_info = self.read_cur_pte_info();
let map_prop = self.read_cur_pte_prop();
match self.cur_child().clone() {
Child::Frame(frame) => {
self.move_forward();
return Some(PageTableQueryResult::Mapped {
va,
frame,
info: map_info,
prop: map_prop,
});
}
Child::PageTable(_) => {
@ -380,7 +379,7 @@ where
va,
pa,
len: page_size::<C>(level),
info: map_info,
prop: map_prop,
});
}
Child::None => {
@ -449,7 +448,7 @@ where
///
/// Also, the staticness of the page table is guaranteed if the caller make sure
/// that there is a child node for the current node.
fn level_down(&mut self, prop: Option<MapProperty>) {
fn level_down(&mut self, prop: Option<PageProperty>) {
debug_assert!(self.level > 1);
// Check if the child frame exists.
let nxt_lvl_frame = {
@ -507,8 +506,8 @@ where
self.cur_node().child(self.cur_idx())
}
fn read_cur_pte_info(&self) -> MapInfo {
self.cur_node().read_pte_info(self.cur_idx())
fn read_cur_pte_prop(&self) -> PageProperty {
self.cur_node().read_pte_prop(self.cur_idx())
}
}
@ -553,13 +552,13 @@ pub(crate) enum PageTableQueryResult {
Mapped {
va: Vaddr,
frame: VmFrame,
info: MapInfo,
prop: PageProperty,
},
MappedUntyped {
va: Vaddr,
pa: Paddr,
len: usize,
info: MapInfo,
prop: PageProperty,
},
}

View File

@ -2,10 +2,10 @@
use alloc::{boxed::Box, sync::Arc};
use super::{nr_ptes_per_node, page_size, MapInfo, MapProperty, PageTableEntryTrait};
use super::{nr_ptes_per_node, page_size, PageTableEntryTrait};
use crate::{
sync::SpinLock,
vm::{Paddr, PagingConstsTrait, VmAllocOptions, VmFrame},
vm::{page_prop::PageProperty, Paddr, PagingConstsTrait, VmAllocOptions, VmFrame},
};
/// A page table frame.
@ -123,8 +123,8 @@ where
}
/// Read the info from a page table entry at a given index.
pub(super) fn read_pte_info(&self, idx: usize) -> MapInfo {
self.read_pte(idx).info()
pub(super) fn read_pte_prop(&self, idx: usize) -> PageProperty {
self.read_pte(idx).prop()
}
/// Split the untracked huge page mapped at `idx` to smaller pages.
@ -134,21 +134,16 @@ where
let Child::Untracked(pa) = self.children[idx] else {
panic!("split_untracked_huge: not an untyped huge page");
};
let info = self.read_pte_info(idx);
let prop = self.read_pte_prop(idx);
let mut new_frame = Self::new();
for i in 0..nr_ptes_per_node::<C>() {
let small_pa = pa + i * page_size::<C>(cur_level - 1);
new_frame.set_child(
i,
Child::Untracked(small_pa),
Some(info.prop),
cur_level - 1 > 1,
);
new_frame.set_child(i, Child::Untracked(small_pa), Some(prop), cur_level - 1 > 1);
}
self.set_child(
idx,
Child::PageTable(Arc::new(SpinLock::new(new_frame))),
Some(info.prop),
Some(prop),
false,
);
}
@ -159,7 +154,7 @@ where
&mut self,
idx: usize,
child: Child<E, C>,
prop: Option<MapProperty>,
prop: Option<PageProperty>,
huge: bool,
) {
assert!(idx < nr_ptes_per_node::<C>());
@ -187,7 +182,7 @@ where
self.nr_valid_children += 1;
}
Child::None => {
self.write_pte(idx, E::new_invalid());
self.write_pte(idx, E::new_absent());
}
}
}
@ -198,7 +193,7 @@ where
}
/// Protect an already mapped child at a given index.
pub(super) fn protect(&mut self, idx: usize, prop: MapProperty, level: usize) {
pub(super) fn protect(&mut self, idx: usize, prop: PageProperty, level: usize) {
debug_assert!(self.children[idx].is_some());
let paddr = self.children[idx].paddr().unwrap();
// Safety: the index is within the bound and the PTE is valid.
@ -248,7 +243,7 @@ where
let pte = self.read_pte(i);
new_ptr.add(i).write(E::new(
cloned.inner.start_paddr(),
pte.info().prop,
pte.prop(),
false,
false,
));

View File

@ -3,14 +3,18 @@
use alloc::sync::Arc;
use core::{fmt::Debug, marker::PhantomData, ops::Range, panic};
use super::{paddr_to_vaddr, Paddr, PagingConstsTrait, Vaddr, VmPerm};
use pod::Pod;
use super::{
paddr_to_vaddr,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
Paddr, PagingConstsTrait, Vaddr,
};
use crate::{
arch::mm::{activate_page_table, PageTableEntry, PagingConsts},
sync::SpinLock,
};
mod properties;
pub use properties::*;
mod frame;
use frame::*;
mod cursor;
@ -119,7 +123,7 @@ where
cursor
.protect(
UserMode::VADDR_RANGE.len(),
perm_op(|perm| perm & !VmPerm::W),
|p: &mut PageProperty| p.flags -= PageFlags::W,
true,
)
.unwrap();
@ -137,12 +141,7 @@ where
if frame.nr_valid_children() != 0 {
let cloned = frame.clone();
let pt = Child::PageTable(Arc::new(SpinLock::new(cloned)));
new_root_frame.set_child(
i,
pt,
Some(root_frame.read_pte_info(i).prop),
false,
);
new_root_frame.set_child(i, pt, Some(root_frame.read_pte_prop(i)), false);
}
}
Child::None => {}
@ -156,7 +155,7 @@ where
new_root_frame.set_child(
i,
root_frame.child(i).clone(),
Some(root_frame.read_pte_info(i).prop),
Some(root_frame.read_pte_prop(i)),
false,
)
}
@ -186,7 +185,7 @@ where
new_root_frame.set_child(
i,
root_frame.child(i).clone(),
Some(root_frame.read_pte_info(i).prop),
Some(root_frame.read_pte_prop(i)),
false,
)
}
@ -215,11 +214,10 @@ where
root_frame.set_child(
i,
Child::PageTable(frame),
Some(MapProperty {
perm: VmPerm::RWX,
global: true,
extension: 0,
cache: CachePolicy::Uncacheable,
Some(PageProperty {
flags: PageFlags::RWX,
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::GLOBAL,
}),
false,
)
@ -250,7 +248,7 @@ where
&self,
vaddr: &Range<Vaddr>,
paddr: &Range<Paddr>,
prop: MapProperty,
prop: PageProperty,
) -> Result<(), PageTableError> {
self.cursor_mut(vaddr)?.map_pa(paddr, prop);
Ok(())
@ -264,7 +262,7 @@ where
pub(crate) unsafe fn protect(
&self,
vaddr: &Range<Vaddr>,
op: impl MapOp,
op: impl FnMut(&mut PageProperty),
) -> Result<(), PageTableError> {
self.cursor_mut(vaddr)?
.protect(vaddr.len(), op, true)
@ -277,7 +275,7 @@ where
/// Note that this function may fail reflect an accurate result if there are
/// cursors concurrently accessing the same virtual address range, just like what
/// happens for the hardware MMU walk.
pub(crate) fn query(&self, vaddr: Vaddr) -> Option<(Paddr, MapInfo)> {
pub(crate) fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
// Safety: The root frame is a valid page table frame so the address is valid.
unsafe { page_walk::<E, C>(self.root_paddr(), vaddr) }
}
@ -344,7 +342,7 @@ where
pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
root_paddr: Paddr,
vaddr: Vaddr,
) -> Option<(Paddr, MapInfo)> {
) -> Option<(Paddr, PageProperty)> {
let mut cur_level = C::NR_LEVELS;
let mut cur_pte = {
let frame_addr = paddr_to_vaddr(root_paddr);
@ -354,7 +352,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
};
while cur_level > 1 {
if !cur_pte.is_valid() {
if !cur_pte.is_present() {
return None;
}
if cur_pte.is_huge() {
@ -370,12 +368,40 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
};
}
if cur_pte.is_valid() {
if cur_pte.is_present() {
Some((
cur_pte.paddr() + (vaddr & (page_size::<C>(cur_level) - 1)),
cur_pte.info(),
cur_pte.prop(),
))
} else {
None
}
}
/// The interface for defining architecture-specific page table entries.
pub(crate) trait PageTableEntryTrait: Clone + Copy + Sized + Pod + Debug {
/// Create a set of new invalid page table flags that indicates an absent page.
///
/// Note that currently the implementation requires an all zero PTE to be an absent PTE.
fn new_absent() -> Self;
/// If the flags are present with valid mappings.
fn is_present(&self) -> bool;
/// Create a new PTE with the given physical address and flags.
/// The huge flag indicates that the PTE maps a huge page.
/// The last flag indicates that the PTE is the last level page table.
/// If the huge and last flags are both false, the PTE maps a page
/// table node.
fn new(paddr: Paddr, prop: PageProperty, huge: bool, last: bool) -> Self;
/// Get the physical address from the PTE.
/// The physical address recorded in the PTE is either:
/// - the physical address of the next level page table;
/// - or the physical address of the page frame it maps to.
fn paddr(&self) -> Paddr;
fn prop(&self) -> PageProperty;
/// If the PTE maps a huge page or a page table frame.
fn is_huge(&self) -> bool;
}

View File

@ -1,258 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use core::fmt::Debug;
use pod::Pod;
use crate::vm::{Paddr, VmPerm};
bitflags::bitflags! {
/// The status of a memory mapping recorded by the hardware.
pub struct MapStatus: u8 {
const ACCESSED = 0b0000_0001;
const DIRTY = 0b0000_0010;
}
}
// TODO: Make it more abstract when supporting other architectures.
/// A type to control the cacheability of the main memory.
///
/// The type currently follows the definition as defined by the AMD64 manual.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CachePolicy {
/// Uncacheable (UC).
///
/// Reads from, and writes to, UC memory are not cacheable.
/// Reads from UC memory cannot be speculative.
/// Write-combining to UC memory is not allowed.
/// Reads from or writes to UC memory cause the write buffers to be written to memory
/// and be invalidated prior to the access to UC memory.
///
/// The UC memory type is useful for memory-mapped I/O devices
/// where strict ordering of reads and writes is important.
Uncacheable,
/// Write-Combining (WC).
///
/// Reads from, and writes to, WC memory are not cacheable.
/// Reads from WC memory can be speculative.
///
/// Writes to this memory type can be combined internally by the processor
/// and written to memory as a single write operation to reduce memory accesses.
///
/// The WC memory type is useful for graphics-display memory buffers
/// where the order of writes is not important.
WriteCombining,
/// Write-Protect (WP).
///
/// Reads from WP memory are cacheable and allocate cache lines on a read miss.
/// Reads from WP memory can be speculative.
///
/// Writes to WP memory that hit in the cache do not update the cache.
/// Instead, all writes update memory (write to memory),
/// and writes that hit in the cache invalidate the cache line.
/// Write buffering of WP memory is allowed.
///
/// The WP memory type is useful for shadowed-ROM memory
/// where updates must be immediately visible to all devices that read the shadow locations.
WriteProtected,
/// Writethrough (WT).
///
/// Reads from WT memory are cacheable and allocate cache lines on a read miss.
/// Reads from WT memory can be speculative.
///
/// All writes to WT memory update main memory,
/// and writes that hit in the cache update the cache line.
/// Writes that miss the cache do not allocate a cache line.
/// Write buffering of WT memory is allowed.
Writethrough,
/// Writeback (WB).
///
/// The WB memory is the "normal" memory. See detailed descriptions in the manual.
///
/// This type of memory provides the highest-possible performance
/// and is useful for most software and data stored in system memory (DRAM).
Writeback,
}
#[derive(Clone, Copy, Debug)]
pub struct MapProperty {
pub perm: VmPerm,
/// Global.
/// A global page is not evicted from the TLB when TLB is flushed.
pub(crate) global: bool,
/// The properties of a memory mapping that is used and defined as flags in PTE
/// in specific architectures on an ad hoc basis. The logics provided by the
/// page table module will not be affected by this field.
pub(crate) extension: u64,
pub(crate) cache: CachePolicy,
}
/// Any functions that could be used to modify the map property of a memory mapping.
///
/// To protect a virtual address range, you can either directly use a `MapProperty` object,
///
/// ```rust
/// let page_table = KERNEL_PAGE_TABLE.get().unwrap();
/// let prop = MapProperty {
/// perm: VmPerm::R,
/// global: true,
/// extension: 0,
/// cache: CachePolicy::Writeback,
/// };
/// page_table.protect(0..PAGE_SIZE, prop);
/// ```
///
/// use a map operation
///
/// ```rust
/// let page_table = KERNEL_PAGE_TABLE.get().unwrap();
/// page_table.map(0..PAGE_SIZE, cache_policy_op(CachePolicy::Writeback));
/// page_table.map(0..PAGE_SIZE, perm_op(|perm| perm | VmPerm::R));
/// ```
///
/// or even customize a map operation using a closure
///
/// ```rust
/// let page_table = KERNEL_PAGE_TABLE.get().unwrap();
/// page_table.map(0..PAGE_SIZE, |info| {
/// assert!(info.prop.perm.contains(VmPerm::R));
/// MapProperty {
/// perm: info.prop.perm | VmPerm::W,
/// global: info.prop.global,
/// extension: info.prop.extension,
/// cache: info.prop.cache,
/// }
/// });
/// ```
pub trait MapOp: Fn(MapInfo) -> MapProperty {}
impl<F> MapOp for F where F: Fn(MapInfo) -> MapProperty {}
// These implementations allow a property or permission to be used as an
// overriding map operation. Other usages seems pointless.
impl FnOnce<(MapInfo,)> for MapProperty {
type Output = MapProperty;
extern "rust-call" fn call_once(self, _: (MapInfo,)) -> MapProperty {
self
}
}
impl FnMut<(MapInfo,)> for MapProperty {
extern "rust-call" fn call_mut(&mut self, _: (MapInfo,)) -> MapProperty {
*self
}
}
impl Fn<(MapInfo,)> for MapProperty {
extern "rust-call" fn call(&self, _: (MapInfo,)) -> MapProperty {
*self
}
}
impl FnOnce<(MapInfo,)> for VmPerm {
type Output = MapProperty;
extern "rust-call" fn call_once(self, info: (MapInfo,)) -> MapProperty {
MapProperty {
perm: self,
..info.0.prop
}
}
}
impl FnMut<(MapInfo,)> for VmPerm {
extern "rust-call" fn call_mut(&mut self, info: (MapInfo,)) -> MapProperty {
MapProperty {
perm: *self,
..info.0.prop
}
}
}
impl Fn<(MapInfo,)> for VmPerm {
extern "rust-call" fn call(&self, info: (MapInfo,)) -> MapProperty {
MapProperty {
perm: *self,
..info.0.prop
}
}
}
/// A life saver for creating a map operation that sets the cache policy.
pub fn cache_policy_op(cache: CachePolicy) -> impl MapOp {
move |info| MapProperty {
perm: info.prop.perm,
global: info.prop.global,
extension: info.prop.extension,
cache,
}
}
/// A life saver for creating a map operation that adjusts the permission.
pub fn perm_op(op: impl Fn(VmPerm) -> VmPerm) -> impl MapOp {
move |info| MapProperty {
perm: op(info.prop.perm),
global: info.prop.global,
extension: info.prop.extension,
cache: info.prop.cache,
}
}
impl MapProperty {
pub fn new_general(perm: VmPerm) -> Self {
Self {
perm,
global: false,
extension: 0,
cache: CachePolicy::Writeback,
}
}
pub fn new_invalid() -> Self {
Self {
perm: VmPerm::empty(),
global: false,
extension: 0,
cache: CachePolicy::Uncacheable,
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct MapInfo {
pub prop: MapProperty,
pub status: MapStatus,
}
impl MapInfo {
pub fn contains(&self, perm: VmPerm) -> bool {
self.prop.perm.contains(perm)
}
pub fn accessed(&self) -> bool {
self.status.contains(MapStatus::ACCESSED)
}
pub fn dirty(&self) -> bool {
self.status.contains(MapStatus::DIRTY)
}
}
pub trait PageTableEntryTrait: Clone + Copy + Sized + Pod + Debug {
/// Create a new invalid page table flags that causes page faults
/// when the MMU meets them.
fn new_invalid() -> Self;
/// If the flags are valid.
/// Note that the invalid PTE may be _valid_ in representation, but
/// just causing page faults when the MMU meets them.
fn is_valid(&self) -> bool;
/// Create a new PTE with the given physical address and flags.
/// The huge flag indicates that the PTE maps a huge page.
/// The last flag indicates that the PTE is the last level page table.
/// If the huge and last flags are both false, the PTE maps a page
/// table frame.
fn new(paddr: Paddr, prop: MapProperty, huge: bool, last: bool) -> Self;
/// Get the physical address from the PTE.
/// The physical address recorded in the PTE is either:
/// - the physical address of the next level page table;
/// - or the physical address of the page frame it maps to.
fn paddr(&self) -> Paddr;
fn info(&self) -> MapInfo;
/// If the PTE maps a huge page or a page table frame.
fn is_huge(&self) -> bool;
}

View File

@ -1,7 +1,11 @@
// SPDX-License-Identifier: MPL-2.0
use super::*;
use crate::vm::{kspace::LINEAR_MAPPING_BASE_VADDR, space::VmPerm, VmAllocOptions};
use crate::vm::{
kspace::LINEAR_MAPPING_BASE_VADDR,
page_prop::{CachePolicy, PageFlags},
VmAllocOptions,
};
const PAGE_SIZE: usize = 4096;
@ -26,7 +30,7 @@ fn test_map_unmap() {
let from = PAGE_SIZE..PAGE_SIZE * 2;
let frame = VmAllocOptions::new(1).alloc_single().unwrap();
let start_paddr = frame.start_paddr();
let prop = MapProperty::new_general(VmPerm::RW);
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.cursor_mut(&from).unwrap().map(frame.clone(), prop) };
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
unsafe { pt.unmap(&from).unwrap() };
@ -59,7 +63,7 @@ fn test_user_copy_on_write() {
let from = PAGE_SIZE..PAGE_SIZE * 2;
let frame = VmAllocOptions::new(1).alloc_single().unwrap();
let start_paddr = frame.start_paddr();
let prop = MapProperty::new_general(VmPerm::RW);
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.cursor_mut(&from).unwrap().map(frame.clone(), prop) };
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
unsafe { pt.unmap(&from).unwrap() };
@ -93,23 +97,23 @@ fn test_base_protect_query() {
let from_ppn = 1..1000;
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
let to = PAGE_SIZE * 1000..PAGE_SIZE * 1999;
let prop = MapProperty::new_general(VmPerm::RW);
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.map(&from, &to, prop).unwrap() };
for (qr, i) in pt.cursor(&from).unwrap().zip(from_ppn) {
let Qr::MappedUntyped { va, pa, len, info } = qr else {
let Qr::MappedUntyped { va, pa, len, prop } = qr else {
panic!("Expected MappedUntyped, got {:#x?}", qr);
};
assert_eq!(info.prop.perm, VmPerm::RW);
assert_eq!(info.prop.cache, CachePolicy::Writeback);
assert_eq!(prop.flags, PageFlags::RW);
assert_eq!(prop.cache, CachePolicy::Writeback);
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
}
let prot = PAGE_SIZE * 18..PAGE_SIZE * 20;
unsafe { pt.protect(&prot, perm_op(|p| p - VmPerm::W)).unwrap() };
unsafe { pt.protect(&prot, |p| p.flags -= PageFlags::W).unwrap() };
for (qr, i) in pt.cursor(&prot).unwrap().zip(18..20) {
let Qr::MappedUntyped { va, pa, len, info } = qr else {
let Qr::MappedUntyped { va, pa, len, prop } = qr else {
panic!("Expected MappedUntyped, got {:#x?}", qr);
};
assert_eq!(info.prop.perm, VmPerm::R);
assert_eq!(prop.flags, PageFlags::R);
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
}
}
@ -137,14 +141,14 @@ fn test_large_protect_query() {
// Thus all mappings except the last few pages are mapped in 2M huge pages
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
let to = PAGE_SIZE * to_ppn.start..PAGE_SIZE * to_ppn.end;
let prop = MapProperty::new_general(VmPerm::RW);
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.map(&from, &to, prop).unwrap() };
for (qr, i) in pt.cursor(&from).unwrap().zip(0..512 + 2 + 2) {
let Qr::MappedUntyped { va, pa, len, info } = qr else {
let Qr::MappedUntyped { va, pa, len, prop } = qr else {
panic!("Expected MappedUntyped, got {:#x?}", qr);
};
assert_eq!(info.prop.perm, VmPerm::RW);
assert_eq!(info.prop.cache, CachePolicy::Writeback);
assert_eq!(prop.flags, PageFlags::RW);
assert_eq!(prop.cache, CachePolicy::Writeback);
if i < 512 + 2 {
assert_eq!(va, from.start + i * PAGE_SIZE * 512);
assert_eq!(va + len, from.start + (i + 1) * PAGE_SIZE * 512);
@ -161,23 +165,23 @@ fn test_large_protect_query() {
}
let ppn = from_ppn.start + 18..from_ppn.start + 20;
let va = PAGE_SIZE * ppn.start..PAGE_SIZE * ppn.end;
unsafe { pt.protect(&va, perm_op(|p| p - VmPerm::W)).unwrap() };
unsafe { pt.protect(&va, |p| p.flags -= PageFlags::W).unwrap() };
for (qr, i) in pt
.cursor(&(va.start - PAGE_SIZE..va.start))
.unwrap()
.zip(ppn.start - 1..ppn.start)
{
let Qr::MappedUntyped { va, pa, len, info } = qr else {
let Qr::MappedUntyped { va, pa, len, prop } = qr else {
panic!("Expected MappedUntyped, got {:#x?}", qr);
};
assert_eq!(info.prop.perm, VmPerm::RW);
assert_eq!(prop.flags, PageFlags::RW);
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
}
for (qr, i) in pt.cursor(&va).unwrap().zip(ppn.clone()) {
let Qr::MappedUntyped { va, pa, len, info } = qr else {
let Qr::MappedUntyped { va, pa, len, prop } = qr else {
panic!("Expected MappedUntyped, got {:#x?}", qr);
};
assert_eq!(info.prop.perm, VmPerm::R);
assert_eq!(prop.flags, PageFlags::R);
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
}
for (qr, i) in pt
@ -185,10 +189,10 @@ fn test_large_protect_query() {
.unwrap()
.zip(ppn.end..ppn.end + 1)
{
let Qr::MappedUntyped { va, pa, len, info } = qr else {
let Qr::MappedUntyped { va, pa, len, prop } = qr else {
panic!("Expected MappedUntyped, got {:#x?}", qr);
};
assert_eq!(info.prop.perm, VmPerm::RW);
assert_eq!(prop.flags, PageFlags::RW);
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
}
}

View File

@ -3,22 +3,19 @@
use core::ops::Range;
use align_ext::AlignExt;
use bitflags::bitflags;
use super::{
is_page_aligned,
kspace::KERNEL_PAGE_TABLE,
page_table::{
MapInfo, MapOp, PageTable, PageTableMode, PageTableQueryResult as PtQr,
PageTableQueryResult, UserMode,
},
PagingConstsTrait, VmFrameVec, VmIo, PAGE_SIZE,
page_table::{PageTable, PageTableMode, UserMode},
CachePolicy, PageFlags, PageProperty, PagingConstsTrait, PrivilegedPageFlags, VmFrameVec, VmIo,
PAGE_SIZE,
};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
prelude::*,
vm::{
page_table::{CachePolicy, Cursor, MapProperty},
page_table::{Cursor, PageTableQueryResult as PtQr},
VmFrame, MAX_USERSPACE_VADDR,
},
Error,
@ -82,18 +79,17 @@ impl VmSpace {
// If overwrite is forbidden, we should check if there are existing mappings
if !options.can_overwrite {
while let Some(qr) = cursor.query() {
if matches!(qr, PageTableQueryResult::Mapped { .. }) {
if matches!(qr, PtQr::Mapped { .. }) {
return Err(Error::MapAlreadyMappedVaddr);
}
}
cursor.jump(va_range.start);
}
let prop = MapProperty {
perm: options.perm,
global: false,
extension: 0,
let prop = PageProperty {
flags: options.flags,
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::USER,
};
for frame in frames.into_iter() {
@ -118,11 +114,11 @@ impl VmSpace {
/// Query about the mapping information about a byte in virtual memory.
/// This is more handy than [`query_range`], but less efficient if you want
/// to query in a batch.
pub fn query(&self, vaddr: Vaddr) -> Result<Option<MapInfo>> {
pub fn query(&self, vaddr: Vaddr) -> Result<Option<PageProperty>> {
if !(0..MAX_USERSPACE_VADDR).contains(&vaddr) {
return Err(Error::AccessDenied);
}
Ok(self.pt.query(vaddr).map(|(_pa, info)| info))
Ok(self.pt.query(vaddr).map(|(_pa, prop)| prop))
}
/// Unmaps the physical memory pages within the VM address range.
@ -160,10 +156,13 @@ impl VmSpace {
/// The method panics when virtual address is not aligned to base page
/// size.
///
/// It is guarenteed that the operation is called once for each valid
/// page found in the range.
///
/// TODO: It returns error when invalid operations such as protect
/// partial huge page happens, and efforts are not reverted, leaving us
/// in a bad state.
pub fn protect(&self, range: &Range<Vaddr>, op: impl MapOp) -> Result<()> {
pub fn protect(&self, range: &Range<Vaddr>, op: impl FnMut(&mut PageProperty)) -> Result<()> {
if !is_page_aligned(range.start) || !is_page_aligned(range.end) {
return Err(Error::InvalidArgs);
}
@ -217,8 +216,8 @@ impl VmIo for VmSpace {
for qr in self.query_range(&(vaddr..vaddr + range_end))? {
match qr {
VmQueryResult::NotMapped { .. } => return Err(Error::AccessDenied),
VmQueryResult::Mapped { info, .. } => {
if !info.prop.perm.contains(VmPerm::W) {
VmQueryResult::Mapped { prop, .. } => {
if !prop.flags.contains(PageFlags::W) {
return Err(Error::AccessDenied);
}
}
@ -239,8 +238,8 @@ pub struct VmMapOptions {
addr: Option<Vaddr>,
/// map align
align: usize,
/// permission
perm: VmPerm,
/// page permissions and status
flags: PageFlags,
/// can overwrite
can_overwrite: bool,
}
@ -251,7 +250,7 @@ impl VmMapOptions {
Self {
addr: None,
align: PagingConsts::BASE_PAGE_SIZE,
perm: VmPerm::empty(),
flags: PageFlags::empty(),
can_overwrite: false,
}
}
@ -271,8 +270,8 @@ impl VmMapOptions {
/// the mapping can be read, written, or executed.
///
/// The default value of this option is read-only.
pub fn perm(&mut self, perm: VmPerm) -> &mut Self {
self.perm = perm;
pub fn flags(&mut self, flags: PageFlags) -> &mut Self {
self.flags = flags;
self
}
@ -304,40 +303,6 @@ impl Default for VmMapOptions {
}
}
bitflags! {
/// Virtual memory protection permissions.
pub struct VmPerm: u8 {
/// Readable.
const R = 0b00000001;
/// Writable.
const W = 0b00000010;
/// Executable.
const X = 0b00000100;
/// User accessible.
const U = 0b00001000;
/// Readable + writable.
const RW = Self::R.bits | Self::W.bits;
/// Readable + execuable.
const RX = Self::R.bits | Self::X.bits;
/// Readable + writable + executable.
const RWX = Self::R.bits | Self::W.bits | Self::X.bits;
/// Readable + writable + user.
const RWU = Self::R.bits | Self::W.bits | Self::U.bits;
/// Readable + execuable + user.
const RXU = Self::R.bits | Self::X.bits | Self::U.bits;
/// Readable + writable + executable + user.
const RWXU = Self::R.bits | Self::W.bits | Self::X.bits | Self::U.bits;
}
}
impl TryFrom<u64> for VmPerm {
type Error = Error;
fn try_from(value: u64) -> Result<Self> {
VmPerm::from_bits(value as u8).ok_or(Error::InvalidArgs)
}
}
/// The iterator for querying over the VM space without modifying it.
pub struct VmQueryIter<'a> {
cursor: Cursor<'a, UserMode, PageTableEntry, PagingConsts>,
@ -351,7 +316,7 @@ pub enum VmQueryResult {
Mapped {
va: Vaddr,
frame: VmFrame,
info: MapInfo,
prop: PageProperty,
},
}
@ -361,7 +326,7 @@ impl Iterator for VmQueryIter<'_> {
fn next(&mut self) -> Option<Self::Item> {
self.cursor.next().map(|ptqr| match ptqr {
PtQr::NotMapped { va, len } => VmQueryResult::NotMapped { va, len },
PtQr::Mapped { va, frame, info } => VmQueryResult::Mapped { va, frame, info },
PtQr::Mapped { va, frame, prop } => VmQueryResult::Mapped { va, frame, prop },
// It is not possible to map untyped memory in user space.
PtQr::MappedUntyped { .. } => unreachable!(),
})

View File

@ -4,10 +4,7 @@
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use align_ext::AlignExt;
use aster_frame::{
task::Task,
vm::{VmIo, VmPerm},
};
use aster_frame::{task::Task, vm::VmIo};
use aster_rights::{Full, Rights};
use xmas_elf::program::{self, ProgramHeader64};
@ -262,7 +259,7 @@ fn map_segment_vmo(
root_vmar: &Vmar<Full>,
base_addr: Vaddr,
) -> Result<()> {
let perms = VmPerms::from(parse_segment_perm(program_header.flags));
let perms = parse_segment_perm(program_header.flags);
let offset = (program_header.virtual_addr as Vaddr).align_down(PAGE_SIZE);
trace!(
"map segment vmo: virtual addr = 0x{:x}, size = 0x{:x}, perms = {:?}",
@ -359,16 +356,16 @@ fn init_segment_vmo(program_header: &ProgramHeader64, elf_file: &Dentry) -> Resu
Ok((segment_vmo.to_dyn(), anonymous_map_size))
}
fn parse_segment_perm(flags: xmas_elf::program::Flags) -> VmPerm {
let mut vm_perm = VmPerm::empty();
fn parse_segment_perm(flags: xmas_elf::program::Flags) -> VmPerms {
let mut vm_perm = VmPerms::empty();
if flags.is_read() {
vm_perm |= VmPerm::R;
vm_perm |= VmPerms::READ;
}
if flags.is_write() {
vm_perm |= VmPerm::W;
vm_perm |= VmPerms::WRITE;
}
if flags.is_execute() {
vm_perm |= VmPerm::X;
vm_perm |= VmPerms::EXEC;
}
vm_perm
}

View File

@ -3,7 +3,6 @@
//! This mod defines mmap flags and the handler to syscall mmap
use align_ext::AlignExt;
use aster_frame::vm::VmPerm;
use aster_rights::Rights;
use super::SyscallReturn;
@ -27,7 +26,7 @@ pub fn sys_mmap(
offset: u64,
) -> Result<SyscallReturn> {
log_syscall_entry!(SYS_MMAP);
let perms = VmPerm::try_from(perms).unwrap();
let perms = VmPerms::from_posix_prot_bits(perms as u32).unwrap();
let option = MMapOptions::try_from(flags as u32)?;
let res = do_sys_mmap(
addr as usize,
@ -43,14 +42,14 @@ pub fn sys_mmap(
fn do_sys_mmap(
addr: Vaddr,
len: usize,
vm_perm: VmPerm,
vm_perms: VmPerms,
option: MMapOptions,
fd: FileDesc,
offset: usize,
) -> Result<Vaddr> {
debug!(
"addr = 0x{:x}, len = 0x{:x}, perms = {:?}, option = {:?}, fd = {}, offset = 0x{:x}",
addr, len, vm_perm, option, fd, offset
addr, len, vm_perms, option, fd, offset
);
let len = len.align_up(PAGE_SIZE);
@ -58,7 +57,6 @@ fn do_sys_mmap(
if offset % PAGE_SIZE != 0 {
return_errno_with_message!(Errno::EINVAL, "mmap only support page-aligned offset");
}
let perms = VmPerms::from(vm_perm);
let vmo = if option.flags.contains(MMapFlags::MAP_ANONYMOUS) {
if offset != 0 {
@ -72,7 +70,7 @@ fn do_sys_mmap(
let current = current!();
let root_vmar = current.root_vmar();
let vm_map_options = {
let mut options = root_vmar.new_map(vmo.to_dyn(), perms)?;
let mut options = root_vmar.new_map(vmo.to_dyn(), vm_perms)?;
let flags = option.flags;
if flags.contains(MMapFlags::MAP_FIXED) {
options = options.offset(addr).can_overwrite(true);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use aster_frame::vm::VmPerm;
use aster_frame::vm::PageFlags;
use aster_rights::Rights;
use bitflags::bitflags;
@ -16,6 +16,12 @@ bitflags! {
}
}
impl VmPerms {
pub fn from_posix_prot_bits(bits: u32) -> Option<Self> {
VmPerms::from_bits(bits)
}
}
impl From<Rights> for VmPerms {
fn from(rights: Rights) -> VmPerms {
let mut vm_perm = VmPerms::empty();
@ -48,34 +54,34 @@ impl From<VmPerms> for Rights {
}
}
impl From<VmPerm> for VmPerms {
fn from(perm: VmPerm) -> Self {
impl From<PageFlags> for VmPerms {
fn from(flags: PageFlags) -> Self {
let mut perms = VmPerms::empty();
if perm.contains(VmPerm::R) {
if flags.contains(PageFlags::R) {
perms |= VmPerms::READ;
}
if perm.contains(VmPerm::W) {
if flags.contains(PageFlags::W) {
perms |= VmPerms::WRITE;
}
if perm.contains(VmPerm::X) {
if flags.contains(PageFlags::X) {
perms |= VmPerms::EXEC;
}
perms
}
}
impl From<VmPerms> for VmPerm {
fn from(perms: VmPerms) -> Self {
let mut perm = VmPerm::U;
if perms.contains(VmPerms::READ) {
perm |= VmPerm::R;
impl From<VmPerms> for PageFlags {
fn from(val: VmPerms) -> Self {
let mut flags = PageFlags::empty();
if val.contains(VmPerms::READ) {
flags |= PageFlags::R;
}
if perms.contains(VmPerms::WRITE) {
perm |= VmPerm::W;
if val.contains(VmPerms::WRITE) {
flags |= PageFlags::W;
}
if perms.contains(VmPerms::EXEC) {
perm |= VmPerm::X;
if val.contains(VmPerms::EXEC) {
flags |= PageFlags::X;
}
perm
flags
}
}

View File

@ -48,7 +48,7 @@ impl Vmar<Rights> {
///
/// This method requires the following access rights:
/// 1. The VMAR contains the rights corresponding to the memory permissions of
/// the mapping. For example, if `perms` contains `VmPerm::WRITE`,
/// the mapping. For example, if `perms` contains `VmPerms::WRITE`,
/// then the VMAR must have the Write right.
/// 2. Similarly, the VMO contains the rights corresponding to the memory
/// permissions of the mapping.

View File

@ -53,7 +53,7 @@ impl<R: TRights> Vmar<TRightSet<R>> {
///
/// This method requires the following access rights:
/// 1. The VMAR contains the rights corresponding to the memory permissions of
/// the mapping. For example, if `perms` contains `VmPerm::WRITE`,
/// the mapping. For example, if `perms` contains `VmPerms::WRITE`,
/// then the VMAR must have the Write right.
/// 2. Similarly, the VMO contains the rights corresponding to the memory
/// permissions of the mapping.

View File

@ -2,7 +2,7 @@
use core::ops::Range;
use aster_frame::vm::{VmFrame, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace};
use aster_frame::vm::{PageFlags, VmFrame, VmFrameVec, VmIo, VmMapOptions, VmSpace};
use super::{interval::Interval, is_intersected, Vmar, Vmar_};
use crate::{
@ -51,9 +51,9 @@ struct VmMappingInner {
is_destroyed: bool,
/// The pages already mapped. The key is the page index in vmo.
mapped_pages: BTreeSet<usize>,
/// The permission of pages in the mapping.
/// All pages within the same VmMapping have the same permission.
perm: VmPerm,
/// The permissions of pages in the mapping.
/// All pages within the same VmMapping have the same permissions.
perms: VmPerms,
}
impl Interval<usize> for Arc<VmMapping> {
@ -95,7 +95,7 @@ impl VmMapping {
map_to_addr,
is_destroyed: false,
mapped_pages: BTreeSet::new(),
perm: VmPerm::from(perms),
perms,
};
Ok(Self {
@ -113,15 +113,15 @@ impl VmMapping {
fn clone_partial(
&self,
range: Range<usize>,
new_perm: Option<VmPerm>,
new_perms: Option<VmPerms>,
) -> Result<Arc<VmMapping>> {
let partial_mapping = Arc::new(self.try_clone()?);
// Adjust the mapping range and the permission.
{
let mut inner = partial_mapping.inner.lock();
inner.shrink_to(range);
if let Some(perm) = new_perm {
inner.perm = perm;
if let Some(perms) = new_perms {
inner.perms = perms;
}
}
Ok(partial_mapping)
@ -174,8 +174,8 @@ impl VmMapping {
// TODO: the current logic is vulnerable to TOCTTOU attack, since the permission may change after check.
let page_idx_range = get_page_idx_range(&(vmo_read_offset..vmo_read_offset + buf.len()));
self.check_page_idx_range(&page_idx_range)?;
let read_perm = VmPerm::R;
self.check_perm(&read_perm)?;
let read_perms = VmPerms::READ;
self.check_perms(&read_perms)?;
self.vmo.read_bytes(vmo_read_offset, buf)?;
Ok(())
@ -186,8 +186,8 @@ impl VmMapping {
let page_idx_range = get_page_idx_range(&(vmo_write_offset..vmo_write_offset + buf.len()));
self.check_page_idx_range(&page_idx_range)?;
let write_perm = VmPerm::W;
self.check_perm(&write_perm)?;
let write_perms = VmPerms::WRITE;
self.check_perms(&write_perms)?;
let mut page_addr =
self.map_to_addr() - self.vmo_offset() + page_idx_range.start * PAGE_SIZE;
@ -200,7 +200,7 @@ impl VmMapping {
// page fault before writing at the VMO to guarantee the consistency between VMO and the page table.
let need_page_fault = vm_space
.query(page_addr)?
.is_some_and(|info| !info.contains(VmPerm::W));
.is_some_and(|prop| !prop.flags.contains(PageFlags::W));
if need_page_fault {
self.handle_page_fault(page_addr, false, true)?;
}
@ -239,8 +239,8 @@ impl VmMapping {
self.vmo.check_rights(Rights::READ)?;
}
let required_perm = if write { VmPerm::W } else { VmPerm::R };
self.check_perm(&required_perm)?;
let required_perm = if write { VmPerms::WRITE } else { VmPerms::READ };
self.check_perms(&required_perm)?;
let frame = self.vmo.get_committed_frame(page_idx, write)?;
@ -257,7 +257,7 @@ impl VmMapping {
/// it should not be called during the direct iteration of the `vm_mappings`.
pub(super) fn protect(&self, new_perms: VmPerms, range: Range<usize>) -> Result<()> {
// If `new_perms` is equal to `old_perms`, `protect()` will not modify any permission in the VmMapping.
let old_perms = VmPerms::from(self.inner.lock().perm);
let old_perms = self.inner.lock().perms;
if old_perms == new_perms {
return Ok(());
}
@ -265,7 +265,7 @@ impl VmMapping {
let rights = Rights::from(new_perms);
self.vmo().check_rights(rights)?;
// Protect permission for the perm in the VmMapping.
self.protect_with_subdivision(&range, VmPerm::from(new_perms))?;
self.protect_with_subdivision(&range, new_perms)?;
// Protect permission in the VmSpace.
let vmar = self.parent.upgrade().unwrap();
let vm_space = vmar.vm_space();
@ -291,7 +291,7 @@ impl VmMapping {
map_to_addr: inner.map_to_addr,
is_destroyed: inner.is_destroyed,
mapped_pages: BTreeSet::new(),
perm: inner.perm,
perms: inner.perms,
}
};
@ -321,12 +321,16 @@ impl VmMapping {
/// Generally, this function is only used in `protect()` method.
/// This method modifies the parent `Vmar` in the end if subdividing is required.
/// It removes current mapping and add splitted mapping to the Vmar.
fn protect_with_subdivision(&self, intersect_range: &Range<usize>, perm: VmPerm) -> Result<()> {
fn protect_with_subdivision(
&self,
intersect_range: &Range<usize>,
perms: VmPerms,
) -> Result<()> {
let mut additional_mappings = Vec::new();
let range = self.range();
// Condition 4, the `additional_mappings` will be empty.
if range.start == intersect_range.start && range.end == intersect_range.end {
self.inner.lock().perm = perm;
self.inner.lock().perms = perms;
return Ok(());
}
// Condition 1 or 3, which needs an additional new VmMapping with range (range.start..intersect_range.start)
@ -342,7 +346,7 @@ impl VmMapping {
additional_mappings.push(additional_right_mapping);
}
// The protected VmMapping must exist and its range is `intersect_range`.
let protected_mapping = self.clone_partial(intersect_range.clone(), Some(perm))?;
let protected_mapping = self.clone_partial(intersect_range.clone(), Some(perms))?;
// Begin to modify the `Vmar`.
let vmar = self.parent.upgrade().unwrap();
@ -427,8 +431,8 @@ impl VmMapping {
self.inner.lock().trim_right(vm_space, vaddr)
}
fn check_perm(&self, perm: &VmPerm) -> Result<()> {
self.inner.lock().check_perm(perm)
fn check_perms(&self, perms: &VmPerms) -> Result<()> {
self.inner.lock().check_perms(perms)
}
fn check_page_idx_range(&self, page_idx_range: &Range<usize>) -> Result<()> {
@ -447,19 +451,19 @@ impl VmMappingInner {
) -> Result<()> {
let map_addr = self.page_map_addr(page_idx);
let vm_perm = {
let mut perm = self.perm;
let vm_perms = {
let mut perms = self.perms;
if is_readonly {
debug_assert!(vmo.is_cow_vmo());
perm -= VmPerm::W;
perms -= VmPerms::WRITE;
}
perm
perms
};
let vm_map_options = {
let mut options = VmMapOptions::new();
options.addr(Some(map_addr));
options.perm(vm_perm);
options.flags(vm_perms.into());
options
};
@ -514,13 +518,13 @@ impl VmMappingInner {
debug_assert!(range.end % PAGE_SIZE == 0);
let start_page = (range.start - self.map_to_addr + self.vmo_offset) / PAGE_SIZE;
let end_page = (range.end - self.map_to_addr + self.vmo_offset) / PAGE_SIZE;
let perm = VmPerm::from(perms);
let flags: PageFlags = perms.into();
for page_idx in start_page..end_page {
let page_addr = self.page_map_addr(page_idx);
if vm_space.query(page_addr)?.is_some() {
// If the page is already mapped, we will modify page table
let page_range = page_addr..(page_addr + PAGE_SIZE);
vm_space.protect(&page_range, perm)?;
vm_space.protect(&page_range, |p| p.flags = flags)?;
}
}
Ok(())
@ -581,8 +585,8 @@ impl VmMappingInner {
self.map_to_addr..self.map_to_addr + self.map_size
}
fn check_perm(&self, perm: &VmPerm) -> Result<()> {
if !self.perm.contains(*perm) {
fn check_perms(&self, perms: &VmPerms) -> Result<()> {
if !self.perms.contains(*perms) {
return_errno_with_message!(Errno::EACCES, "perm check fails");
}
Ok(())
@ -617,7 +621,7 @@ impl<R1, R2> VmarMapOptions<R1, R2> {
/// permissions.
///
/// The VMO must have access rights that correspond to the memory
/// access permissions. For example, if `perms` contains `VmPerm::Write`,
/// access permissions. For example, if `perms` contains `VmPerms::Write`,
/// then `vmo.rights()` should contain `Rights::WRITE`.
pub fn new(parent: Vmar<R1>, vmo: Vmo<R2>, perms: VmPerms) -> Self {
let size = vmo.size();