mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-25 02:13:24 +00:00
Introduce DynPage
and make page table map it
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
40c32b5ff5
commit
cab348349e
75
Cargo.lock
generated
75
Cargo.lock
generated
@ -126,6 +126,9 @@ dependencies = [
|
||||
"linux-boot-params",
|
||||
"log",
|
||||
"multiboot2",
|
||||
"num",
|
||||
"num-derive",
|
||||
"num-traits",
|
||||
"pod",
|
||||
"rsdp",
|
||||
"spin 0.9.8",
|
||||
@ -1053,6 +1056,78 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23"
|
||||
dependencies = [
|
||||
"num-complex",
|
||||
"num-integer",
|
||||
"num-iter",
|
||||
"num-rational",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-complex"
|
||||
version = "0.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-derive"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.49",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.46"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-iter"
|
||||
version = "0.1.45"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-rational"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824"
|
||||
dependencies = [
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.18.0"
|
||||
|
@ -26,6 +26,9 @@ ktest = { path = "../libs/ktest" }
|
||||
id-alloc = { path = "../libs/id-alloc" }
|
||||
lazy_static = { version = "1.0", features = ["spin_no_std"] }
|
||||
log = "0.4"
|
||||
num = { version = "0.4", default-features = false }
|
||||
num-derive = { version = "0.4", default-features = false }
|
||||
num-traits = { version = "0.2", default-features = false }
|
||||
pod = { git = "https://github.com/asterinas/pod", rev = "d7dba56" }
|
||||
spin = "0.9.4"
|
||||
static_assertions = "1.1.0"
|
||||
|
@ -74,7 +74,7 @@ impl PageTableEntry {
|
||||
}
|
||||
|
||||
impl PageTableEntryTrait for PageTableEntry {
|
||||
fn new_frame(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
|
||||
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
|
||||
let mut pte = Self(paddr as u64 & Self::PHYS_MASK | PageTableFlags::LAST_PAGE.bits());
|
||||
pte.set_prop(prop);
|
||||
pte
|
||||
|
@ -144,7 +144,7 @@ impl PageTableEntryTrait for PageTableEntry {
|
||||
self.0 & PageTableFlags::PRESENT.bits() != 0
|
||||
}
|
||||
|
||||
fn new_frame(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
|
||||
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
|
||||
let mut pte = Self(
|
||||
paddr & Self::PHYS_ADDR_MASK
|
||||
| ((level != 1) as usize) << PageTableFlags::HUGE.bits().ilog2(),
|
||||
|
@ -19,12 +19,12 @@ pub use segment::Segment;
|
||||
|
||||
use super::page::{
|
||||
meta::{FrameMeta, MetaSlot, PageMeta, PageUsage},
|
||||
Page,
|
||||
DynPage, Page,
|
||||
};
|
||||
use crate::{
|
||||
mm::{
|
||||
io::{VmIo, VmReader, VmWriter},
|
||||
paddr_to_vaddr, HasPaddr, Paddr, PagingLevel, PAGE_SIZE,
|
||||
paddr_to_vaddr, HasPaddr, Paddr, PAGE_SIZE,
|
||||
},
|
||||
Error, Result,
|
||||
};
|
||||
@ -43,24 +43,6 @@ pub struct Frame {
|
||||
page: Page<FrameMeta>,
|
||||
}
|
||||
|
||||
impl From<Page<FrameMeta>> for Frame {
|
||||
fn from(page: Page<FrameMeta>) -> Self {
|
||||
Self { page }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Frame> for Page<FrameMeta> {
|
||||
fn from(frame: Frame) -> Self {
|
||||
frame.page
|
||||
}
|
||||
}
|
||||
|
||||
impl HasPaddr for Frame {
|
||||
fn paddr(&self) -> Paddr {
|
||||
self.start_paddr()
|
||||
}
|
||||
}
|
||||
|
||||
impl Frame {
|
||||
/// Returns the physical address of the page frame.
|
||||
pub fn start_paddr(&self) -> Paddr {
|
||||
@ -72,20 +54,9 @@ impl Frame {
|
||||
self.start_paddr() + PAGE_SIZE
|
||||
}
|
||||
|
||||
/// Gets the paging level of the frame.
|
||||
///
|
||||
/// This is the level of the page table entry that maps the frame,
|
||||
/// which determines the size of the frame.
|
||||
///
|
||||
/// Currently, the level is always 1, which means the frame is a regular
|
||||
/// page frame.
|
||||
pub(crate) fn level(&self) -> PagingLevel {
|
||||
1
|
||||
}
|
||||
|
||||
/// Returns the size of the frame
|
||||
pub const fn size(&self) -> usize {
|
||||
PAGE_SIZE
|
||||
self.page.size()
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the starting virtual address of the frame.
|
||||
@ -110,6 +81,36 @@ impl Frame {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Page<FrameMeta>> for Frame {
|
||||
fn from(page: Page<FrameMeta>) -> Self {
|
||||
Self { page }
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<DynPage> for Frame {
|
||||
type Error = DynPage;
|
||||
|
||||
/// Try converting a [`DynPage`] into the statically-typed [`Frame`].
|
||||
///
|
||||
/// If the dynamic page is not used as an untyped page frame, it will
|
||||
/// return the dynamic page itself as is.
|
||||
fn try_from(page: DynPage) -> core::result::Result<Self, Self::Error> {
|
||||
page.try_into().map(|p: Page<FrameMeta>| p.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Frame> for Page<FrameMeta> {
|
||||
fn from(frame: Frame) -> Self {
|
||||
frame.page
|
||||
}
|
||||
}
|
||||
|
||||
impl HasPaddr for Frame {
|
||||
fn paddr(&self) -> Paddr {
|
||||
self.start_paddr()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Frame {
|
||||
/// Returns a reader to read data from it.
|
||||
pub fn reader(&'a self) -> VmReader<'a> {
|
||||
@ -152,8 +153,8 @@ impl PageMeta for FrameMeta {
|
||||
const USAGE: PageUsage = PageUsage::Frame;
|
||||
|
||||
fn on_drop(_page: &mut Page<Self>) {
|
||||
// Nothing should be done so far since the dropping the page would
|
||||
// take all cared.
|
||||
// Nothing should be done so far since dropping the page would
|
||||
// have all taken care of.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,7 @@ pub mod mapping {
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use core::{
|
||||
marker::PhantomData,
|
||||
mem::{size_of, ManuallyDrop},
|
||||
ops::Range,
|
||||
panic,
|
||||
@ -44,20 +45,22 @@ use core::{
|
||||
};
|
||||
|
||||
use log::info;
|
||||
use num_derive::FromPrimitive;
|
||||
use static_assertions::const_assert_eq;
|
||||
|
||||
use super::Page;
|
||||
use super::{allocator, Page};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
mm::{
|
||||
kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, page_size,
|
||||
page_table::PageTableEntryTrait, CachePolicy, Paddr, PageFlags, PageProperty,
|
||||
PagingConstsTrait, PagingLevel, PrivilegedPageFlags, PAGE_SIZE,
|
||||
kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page_size, page_table::PageTableEntryTrait,
|
||||
CachePolicy, Paddr, PageFlags, PageProperty, PagingConstsTrait, PagingLevel,
|
||||
PrivilegedPageFlags, Vaddr, PAGE_SIZE,
|
||||
},
|
||||
};
|
||||
|
||||
/// Represents the usage of a page.
|
||||
#[repr(u8)]
|
||||
#[derive(Debug, FromPrimitive, PartialEq)]
|
||||
pub enum PageUsage {
|
||||
// The zero variant is reserved for the unused type. Only an unused page
|
||||
// can be designated for one of the other purposes.
|
||||
@ -95,8 +98,7 @@ pub(in crate::mm) struct MetaSlot {
|
||||
|
||||
pub(super) union MetaSlotInner {
|
||||
_frame: ManuallyDrop<FrameMeta>,
|
||||
// Make sure the the generic parameters don't effect the memory layout.
|
||||
_pt: ManuallyDrop<PageTablePageMeta<PageTableEntry, PagingConsts>>,
|
||||
_pt: ManuallyDrop<PageTablePageMeta>,
|
||||
}
|
||||
|
||||
// Currently the sizes of the `MetaSlotInner` union variants are no larger
|
||||
@ -121,6 +123,38 @@ pub trait PageMeta: Default + Sync + private::Sealed + Sized {
|
||||
fn on_drop(page: &mut Page<Self>);
|
||||
}
|
||||
|
||||
/// An internal routine in dropping implementations.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that the pointer points to a page's metadata slot. The
|
||||
/// page should have a last handle to the page, and the page is about to be dropped,
|
||||
/// as the metadata slot after this operation becomes uninitialized.
|
||||
pub(super) unsafe fn drop_as_last<M: PageMeta>(ptr: *const MetaSlot) {
|
||||
// This would be guaranteed as a safety requirement.
|
||||
debug_assert_eq!((*ptr).ref_count.load(Ordering::Relaxed), 0);
|
||||
// Let the custom dropper handle the drop.
|
||||
let mut page = Page::<M> {
|
||||
ptr,
|
||||
_marker: PhantomData,
|
||||
};
|
||||
M::on_drop(&mut page);
|
||||
let _ = ManuallyDrop::new(page);
|
||||
// Drop the metadata.
|
||||
core::ptr::drop_in_place(ptr as *mut M);
|
||||
// No handles means no usage. This also releases the page as unused for further
|
||||
// calls to `Page::from_unused`.
|
||||
(*ptr).usage.store(0, Ordering::Release);
|
||||
// Deallocate the page.
|
||||
// It would return the page to the allocator for further use. This would be done
|
||||
// after the release of the metadata to avoid re-allocation before the metadata
|
||||
// is reset.
|
||||
allocator::PAGE_ALLOCATOR.get().unwrap().lock().dealloc(
|
||||
mapping::meta_to_page::<PagingConsts>(ptr as Vaddr) / PAGE_SIZE,
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
mod private {
|
||||
pub trait Sealed {}
|
||||
}
|
||||
@ -141,10 +175,14 @@ pub struct FrameMeta {
|
||||
|
||||
impl Sealed for FrameMeta {}
|
||||
|
||||
/// The metadata of any kinds of page table pages.
|
||||
/// Make sure the the generic parameters don't effect the memory layout.
|
||||
#[derive(Debug, Default)]
|
||||
#[repr(C)]
|
||||
pub struct PageTablePageMeta<E: PageTableEntryTrait, C: PagingConstsTrait>
|
||||
where
|
||||
pub struct PageTablePageMeta<
|
||||
E: PageTableEntryTrait = PageTableEntry,
|
||||
C: PagingConstsTrait = PagingConsts,
|
||||
> where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
pub lock: AtomicU8,
|
||||
@ -230,7 +268,13 @@ pub(crate) fn init() -> Vec<Range<Paddr>> {
|
||||
|
||||
fn alloc_meta_pages(nframes: usize) -> Vec<Paddr> {
|
||||
let mut meta_pages = Vec::new();
|
||||
let start_frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(nframes).unwrap() * PAGE_SIZE;
|
||||
let start_frame = allocator::PAGE_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.alloc(nframes)
|
||||
.unwrap()
|
||||
* PAGE_SIZE;
|
||||
// Zero them out as initialization.
|
||||
let vaddr = paddr_to_vaddr(start_frame) as *mut u8;
|
||||
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE * nframes) };
|
||||
|
@ -20,18 +20,19 @@ pub(in crate::mm) mod meta;
|
||||
|
||||
use core::{
|
||||
marker::PhantomData,
|
||||
mem::ManuallyDrop,
|
||||
panic,
|
||||
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
use meta::{mapping, MetaSlot, PageMeta};
|
||||
use meta::{mapping, FrameMeta, MetaSlot, PageMeta, PageUsage};
|
||||
|
||||
use super::PAGE_SIZE;
|
||||
use super::{Frame, PagingLevel, PAGE_SIZE};
|
||||
use crate::mm::{Paddr, PagingConsts, Vaddr};
|
||||
|
||||
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
/// Representing a page that has a statically-known usage purpose,
|
||||
/// whose metadata is represented by `M`.
|
||||
/// A page with a statically-known usage, whose metadata is represented by `M`.
|
||||
#[derive(Debug)]
|
||||
pub struct Page<M: PageMeta> {
|
||||
pub(super) ptr: *const MetaSlot,
|
||||
@ -55,15 +56,17 @@ impl<M: PageMeta> Page<M> {
|
||||
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
|
||||
let ptr = vaddr as *const MetaSlot;
|
||||
|
||||
// SAFETY: The aligned pointer points to a initialized `MetaSlot`.
|
||||
let usage = unsafe { &(*ptr).usage };
|
||||
let get_ref_count = unsafe { &(*ptr).ref_count };
|
||||
// SAFETY: The aligned pointer points to a initialized `MetaSlot`.
|
||||
let ref_count = unsafe { &(*ptr).ref_count };
|
||||
|
||||
usage
|
||||
.compare_exchange(0, M::USAGE as u8, Ordering::SeqCst, Ordering::Relaxed)
|
||||
.expect("page already in use when trying to get a new handle");
|
||||
|
||||
let old_get_ref_count = get_ref_count.fetch_add(1, Ordering::Relaxed);
|
||||
debug_assert!(old_get_ref_count == 0);
|
||||
let old_ref_count = ref_count.fetch_add(1, Ordering::Relaxed);
|
||||
debug_assert_eq!(old_ref_count, 0);
|
||||
|
||||
// Initialize the metadata
|
||||
// SAFETY: The pointer points to the first byte of the `MetaSlot`
|
||||
@ -84,6 +87,7 @@ impl<M: PageMeta> Page<M> {
|
||||
/// A physical address to the page is returned in case the page needs to be
|
||||
/// restored using [`Page::from_raw`] later. This is useful when some architectural
|
||||
/// data structures need to hold the page handle such as the page table.
|
||||
#[allow(unused)]
|
||||
pub(in crate::mm) fn into_raw(self) -> Paddr {
|
||||
let paddr = self.paddr();
|
||||
core::mem::forget(self);
|
||||
@ -117,6 +121,22 @@ impl<M: PageMeta> Page<M> {
|
||||
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
|
||||
}
|
||||
|
||||
/// Get the paging level of this page.
|
||||
///
|
||||
/// This is the level of the page table entry that maps the frame,
|
||||
/// which determines the size of the frame.
|
||||
///
|
||||
/// Currently, the level is always 1, which means the frame is a regular
|
||||
/// page frame.
|
||||
pub const fn level(&self) -> PagingLevel {
|
||||
1
|
||||
}
|
||||
|
||||
/// Size of this page in bytes.
|
||||
pub const fn size(&self) -> usize {
|
||||
PAGE_SIZE
|
||||
}
|
||||
|
||||
/// Get the metadata of this page.
|
||||
pub fn meta(&self) -> &M {
|
||||
unsafe { &*(self.ptr as *const M) }
|
||||
@ -148,28 +168,157 @@ impl<M: PageMeta> Clone for Page<M> {
|
||||
|
||||
impl<M: PageMeta> Drop for Page<M> {
|
||||
fn drop(&mut self) {
|
||||
if self.get_ref_count().fetch_sub(1, Ordering::Release) == 1 {
|
||||
let last_ref_cnt = self.get_ref_count().fetch_sub(1, Ordering::Release);
|
||||
debug_assert!(last_ref_cnt > 0);
|
||||
if last_ref_cnt == 1 {
|
||||
// A fence is needed here with the same reasons stated in the implementation of
|
||||
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
|
||||
core::sync::atomic::fence(Ordering::Acquire);
|
||||
// Let the custom dropper handle the drop.
|
||||
M::on_drop(self);
|
||||
// Drop the metadata.
|
||||
// SAFETY: this is the last reference and is about to be dropped.
|
||||
unsafe {
|
||||
core::ptr::drop_in_place(self.ptr as *mut M);
|
||||
meta::drop_as_last::<M>(self.ptr);
|
||||
}
|
||||
// No handles means no usage. This also releases the page as unused for further
|
||||
// calls to `Page::from_unused`.
|
||||
unsafe { &*self.ptr }.usage.store(0, Ordering::Release);
|
||||
// Deallocate the page.
|
||||
// It would return the page to the allocator for further use. This would be done
|
||||
// after the release of the metadata to avoid re-allocation before the metadata
|
||||
// is reset.
|
||||
allocator::PAGE_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.dealloc(self.paddr() / PAGE_SIZE, 1);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A page with a dynamically-known usage.
|
||||
///
|
||||
/// It can also be used when the user don't care about the usage of the page.
|
||||
#[derive(Debug)]
|
||||
pub struct DynPage {
|
||||
ptr: *const MetaSlot,
|
||||
}
|
||||
|
||||
unsafe impl Send for DynPage {}
|
||||
unsafe impl Sync for DynPage {}
|
||||
|
||||
impl DynPage {
|
||||
/// Forget the handle to the page.
|
||||
///
|
||||
/// This is the same as [`Page::into_raw`].
|
||||
///
|
||||
/// This will result in the page being leaked without calling the custom dropper.
|
||||
///
|
||||
/// A physical address to the page is returned in case the page needs to be
|
||||
/// restored using [`Self::from_raw`] later.
|
||||
#[allow(unused)]
|
||||
pub(in crate::mm) fn into_raw(self) -> Paddr {
|
||||
let paddr = self.paddr();
|
||||
core::mem::forget(self);
|
||||
paddr
|
||||
}
|
||||
|
||||
/// Restore a forgotten page from a physical address.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The safety concerns are the same as [`Page::from_raw`].
|
||||
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
|
||||
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
|
||||
let ptr = vaddr as *const MetaSlot;
|
||||
|
||||
Self { ptr }
|
||||
}
|
||||
|
||||
/// Get the physical address of the start of the page
|
||||
pub fn paddr(&self) -> Paddr {
|
||||
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
|
||||
}
|
||||
|
||||
/// Get the paging level of this page.
|
||||
pub fn level(&self) -> PagingLevel {
|
||||
1
|
||||
}
|
||||
|
||||
/// Size of this page in bytes.
|
||||
pub fn size(&self) -> usize {
|
||||
PAGE_SIZE
|
||||
}
|
||||
|
||||
/// Get the usage of the page.
|
||||
pub fn usage(&self) -> PageUsage {
|
||||
// SAFETY: structure is safely created with a pointer that points
|
||||
// to initialized [`MetaSlot`] memory.
|
||||
let usage_raw = unsafe { (*self.ptr).usage.load(Ordering::Relaxed) };
|
||||
num::FromPrimitive::from_u8(usage_raw).unwrap()
|
||||
}
|
||||
|
||||
fn get_ref_count(&self) -> &AtomicU32 {
|
||||
unsafe { &(*self.ptr).ref_count }
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: PageMeta> TryFrom<DynPage> for Page<M> {
|
||||
type Error = DynPage;
|
||||
|
||||
/// Try converting a [`DynPage`] into the statically-typed [`Page`].
|
||||
///
|
||||
/// If the usage of the page is not the same as the expected usage, it will
|
||||
/// return the dynamic page itself as is.
|
||||
fn try_from(dyn_page: DynPage) -> Result<Self, Self::Error> {
|
||||
if dyn_page.usage() == M::USAGE {
|
||||
let result = Page {
|
||||
ptr: dyn_page.ptr,
|
||||
_marker: PhantomData,
|
||||
};
|
||||
let _ = ManuallyDrop::new(dyn_page);
|
||||
Ok(result)
|
||||
} else {
|
||||
Err(dyn_page)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: PageMeta> From<Page<M>> for DynPage {
|
||||
fn from(page: Page<M>) -> Self {
|
||||
let result = Self { ptr: page.ptr };
|
||||
let _ = ManuallyDrop::new(page);
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Frame> for DynPage {
|
||||
fn from(frame: Frame) -> Self {
|
||||
Page::<FrameMeta>::from(frame).into()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for DynPage {
|
||||
fn clone(&self) -> Self {
|
||||
self.get_ref_count().fetch_add(1, Ordering::Relaxed);
|
||||
Self { ptr: self.ptr }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DynPage {
|
||||
fn drop(&mut self) {
|
||||
let last_ref_cnt = self.get_ref_count().fetch_sub(1, Ordering::Release);
|
||||
debug_assert!(last_ref_cnt > 0);
|
||||
if last_ref_cnt == 1 {
|
||||
// A fence is needed here with the same reasons stated in the implementation of
|
||||
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
|
||||
core::sync::atomic::fence(Ordering::Acquire);
|
||||
// Drop the page and its metadata according to its usage.
|
||||
// SAFETY: all `drop_as_last` calls in match arms operates on a last, about to be
|
||||
// dropped page reference.
|
||||
unsafe {
|
||||
match self.usage() {
|
||||
PageUsage::Frame => {
|
||||
meta::drop_as_last::<meta::FrameMeta>(self.ptr);
|
||||
}
|
||||
PageUsage::PageTable => {
|
||||
meta::drop_as_last::<meta::PageTablePageMeta>(self.ptr);
|
||||
}
|
||||
// The following pages don't have metadata and can't be dropped.
|
||||
PageUsage::Unused
|
||||
| PageUsage::Reserved
|
||||
| PageUsage::Kernel
|
||||
| PageUsage::Meta => {
|
||||
panic!("dropping a dynamic page with usage {:?}", self.usage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,10 +79,10 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
|
||||
if pte.is_present() {
|
||||
panic!("mapping an already mapped page in the boot page table");
|
||||
}
|
||||
unsafe { pte_ptr.write(E::new_frame(to * C::BASE_PAGE_SIZE, 1, prop)) };
|
||||
unsafe { pte_ptr.write(E::new_page(to * C::BASE_PAGE_SIZE, 1, prop)) };
|
||||
}
|
||||
|
||||
/// Maps a base page to a frame.
|
||||
/// Set protections of a base page mapping.
|
||||
///
|
||||
/// This function may split a huge page into base pages, causing page allocations
|
||||
/// if the original mapping is a huge page.
|
||||
@ -117,7 +117,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
|
||||
let nxt_ptr =
|
||||
unsafe { (paddr_to_vaddr(frame * C::BASE_PAGE_SIZE) as *mut E).add(i) };
|
||||
unsafe {
|
||||
nxt_ptr.write(E::new_frame(
|
||||
nxt_ptr.write(E::new_page(
|
||||
huge_pa + i * C::BASE_PAGE_SIZE,
|
||||
level - 1,
|
||||
pte.prop(),
|
||||
@ -140,7 +140,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
|
||||
}
|
||||
let mut prop = pte.prop();
|
||||
op(&mut prop);
|
||||
unsafe { pte_ptr.write(E::new_frame(pte.paddr(), 1, prop)) };
|
||||
unsafe { pte_ptr.write(E::new_page(pte.paddr(), 1, prop)) };
|
||||
}
|
||||
|
||||
fn alloc_frame(&mut self) -> FrameNumber {
|
||||
|
@ -58,7 +58,7 @@ use super::{
|
||||
page_size, pte_index, Child, KernelMode, PageTable, PageTableEntryTrait, PageTableError,
|
||||
PageTableMode, PageTableNode, PagingConstsTrait, PagingLevel,
|
||||
};
|
||||
use crate::mm::{Frame, Paddr, PageProperty, Vaddr};
|
||||
use crate::mm::{page::DynPage, Paddr, PageProperty, Vaddr};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) enum PageTableQueryResult {
|
||||
@ -68,7 +68,7 @@ pub(crate) enum PageTableQueryResult {
|
||||
},
|
||||
Mapped {
|
||||
va: Vaddr,
|
||||
frame: Frame,
|
||||
page: DynPage,
|
||||
prop: PageProperty,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
@ -190,10 +190,10 @@ where
|
||||
}
|
||||
|
||||
match self.cur_child() {
|
||||
Child::Frame(frame) => {
|
||||
Child::Page(page) => {
|
||||
return Some(PageTableQueryResult::Mapped {
|
||||
va,
|
||||
frame,
|
||||
page,
|
||||
prop: pte.prop(),
|
||||
});
|
||||
}
|
||||
@ -214,8 +214,8 @@ where
|
||||
|
||||
/// Traverses forward in the current level to the next PTE.
|
||||
///
|
||||
/// If reached the end of a page table node, it leads itself up to the next frame of the parent
|
||||
/// frame if possible.
|
||||
/// If reached the end of a page table node, it leads itself up to the next page of the parent
|
||||
/// page if possible.
|
||||
fn move_forward(&mut self) {
|
||||
let page_size = page_size::<C>(self.level);
|
||||
let next_va = self.va.align_down(page_size) + page_size;
|
||||
@ -225,7 +225,7 @@ where
|
||||
self.va = next_va;
|
||||
}
|
||||
|
||||
/// Goes up a level. We release the current frame if it has no mappings since the cursor only moves
|
||||
/// Goes up a level. We release the current page if it has no mappings since the cursor only moves
|
||||
/// forward. And if needed we will do the final cleanup using this method after re-walk when the
|
||||
/// cursor is dropped.
|
||||
///
|
||||
@ -241,9 +241,9 @@ where
|
||||
fn level_down(&mut self) {
|
||||
debug_assert!(self.level > 1);
|
||||
|
||||
if let Child::PageTable(nxt_lvl_frame) = self.cur_child() {
|
||||
if let Child::PageTable(nxt_lvl_ptn) = self.cur_child() {
|
||||
self.level -= 1;
|
||||
self.guards[(C::NR_LEVELS - self.level) as usize] = Some(nxt_lvl_frame.lock());
|
||||
self.guards[(C::NR_LEVELS - self.level) as usize] = Some(nxt_lvl_ptn.lock());
|
||||
} else {
|
||||
panic!("Trying to level down when it is not mapped to a page table");
|
||||
}
|
||||
@ -359,21 +359,21 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Maps the range starting from the current address to a [`Frame`].
|
||||
/// Maps the range starting from the current address to a [`DynPage`].
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if
|
||||
/// - the virtual address range to be mapped is out of the range;
|
||||
/// - the alignment of the frame is not satisfied by the virtual address;
|
||||
/// - the alignment of the page is not satisfied by the virtual address;
|
||||
/// - it is already mapped to a huge page while the caller wants to map a smaller one.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that the virtual range being mapped does
|
||||
/// not affect kernel's memory safety.
|
||||
pub(crate) unsafe fn map(&mut self, frame: Frame, prop: PageProperty) {
|
||||
let end = self.0.va + frame.size();
|
||||
pub(crate) unsafe fn map(&mut self, page: DynPage, prop: PageProperty) {
|
||||
let end = self.0.va + page.size();
|
||||
assert!(end <= self.0.barrier_va.end);
|
||||
debug_assert!(self.0.in_tracked_range());
|
||||
|
||||
@ -392,12 +392,11 @@ where
|
||||
}
|
||||
continue;
|
||||
}
|
||||
debug_assert_eq!(self.0.level, frame.level());
|
||||
debug_assert_eq!(self.0.level, page.level());
|
||||
|
||||
// Map the current page.
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().set_child_frame(idx, frame, prop);
|
||||
|
||||
self.cur_node_mut().set_child_page(idx, page, prop);
|
||||
self.0.move_forward();
|
||||
}
|
||||
|
||||
@ -602,17 +601,16 @@ where
|
||||
|
||||
/// Goes down a level assuming the current slot is absent.
|
||||
///
|
||||
/// This method will create a new child frame and go down to it.
|
||||
/// This method will create a new child page table node and go down to it.
|
||||
fn level_down_create(&mut self) {
|
||||
debug_assert!(self.0.level > 1);
|
||||
|
||||
let new_frame = PageTableNode::<E, C>::alloc(self.0.level - 1);
|
||||
let new_node = PageTableNode::<E, C>::alloc(self.0.level - 1);
|
||||
let idx = self.0.cur_idx();
|
||||
let is_tracked = self.0.in_tracked_range();
|
||||
self.cur_node_mut()
|
||||
.set_child_pt(idx, new_frame.clone_raw(), is_tracked);
|
||||
.set_child_pt(idx, new_node.clone_raw(), is_tracked);
|
||||
self.0.level -= 1;
|
||||
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_frame);
|
||||
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_node);
|
||||
}
|
||||
|
||||
/// Goes down a level assuming the current slot is an untracked huge page.
|
||||
@ -625,11 +623,11 @@ where
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().split_untracked_huge(idx);
|
||||
|
||||
let Child::PageTable(new_frame) = self.0.cur_child() else {
|
||||
let Child::PageTable(new_node) = self.0.cur_child() else {
|
||||
unreachable!();
|
||||
};
|
||||
self.0.level -= 1;
|
||||
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_frame.lock());
|
||||
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_node.lock());
|
||||
}
|
||||
|
||||
fn cur_node_mut(&mut self) -> &mut PageTableNode<E, C> {
|
||||
|
@ -61,7 +61,7 @@ impl PageTableMode for KernelMode {
|
||||
|
||||
// Here are some const values that are determined by the paging constants.
|
||||
|
||||
/// The number of virtual address bits used to index a PTE in a frame.
|
||||
/// The number of virtual address bits used to index a PTE in a page.
|
||||
const fn nr_pte_index_bits<C: PagingConstsTrait>() -> usize {
|
||||
nr_subpage_per_huge::<C>().ilog2() as usize
|
||||
}
|
||||
@ -73,7 +73,7 @@ const fn pte_index<C: PagingConstsTrait>(va: Vaddr, level: PagingLevel) -> usize
|
||||
}
|
||||
|
||||
/// A handle to a page table.
|
||||
/// A page table can track the lifetime of the mapped physical frames.
|
||||
/// A page table can track the lifetime of the mapped physical pages.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PageTable<
|
||||
M: PageTableMode,
|
||||
@ -113,18 +113,18 @@ impl PageTable<UserMode> {
|
||||
.unwrap();
|
||||
};
|
||||
|
||||
let root_frame = cursor.leak_root_guard().unwrap();
|
||||
let root_node = cursor.leak_root_guard().unwrap();
|
||||
|
||||
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
|
||||
let new_root_frame = unsafe {
|
||||
root_frame.make_copy(
|
||||
let new_root_node = unsafe {
|
||||
root_node.make_copy(
|
||||
0..NR_PTES_PER_NODE / 2,
|
||||
NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE,
|
||||
)
|
||||
};
|
||||
|
||||
PageTable::<UserMode> {
|
||||
root: new_root_frame.into_raw(),
|
||||
root: new_root_node.into_raw(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@ -139,14 +139,14 @@ impl PageTable<KernelMode> {
|
||||
/// Then, one can use a user page table to call [`fork_copy_on_write`], creating
|
||||
/// other child page tables.
|
||||
pub(crate) fn create_user_page_table(&self) -> PageTable<UserMode> {
|
||||
let root_frame = self.root.clone_shallow().lock();
|
||||
let root_node = self.root.clone_shallow().lock();
|
||||
|
||||
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
|
||||
let new_root_frame =
|
||||
unsafe { root_frame.make_copy(0..0, NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE) };
|
||||
let new_root_node =
|
||||
unsafe { root_node.make_copy(0..0, NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE) };
|
||||
|
||||
PageTable::<UserMode> {
|
||||
root: new_root_frame.into_raw(),
|
||||
root: new_root_node.into_raw(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@ -166,11 +166,11 @@ impl PageTable<KernelMode> {
|
||||
let end = root_index.end;
|
||||
debug_assert!(end <= NR_PTES_PER_NODE);
|
||||
|
||||
let mut root_frame = self.root.clone_shallow().lock();
|
||||
let mut root_node = self.root.clone_shallow().lock();
|
||||
for i in start..end {
|
||||
if !root_frame.read_pte(i).is_present() {
|
||||
let frame = PageTableNode::alloc(PagingConsts::NR_LEVELS - 1);
|
||||
root_frame.set_child_pt(i, frame.into_raw(), i < NR_PTES_PER_NODE * 3 / 4);
|
||||
if !root_node.read_pte(i).is_present() {
|
||||
let node = PageTableNode::alloc(PagingConsts::NR_LEVELS - 1);
|
||||
root_node.set_child_pt(i, node.into_raw(), i < NR_PTES_PER_NODE * 3 / 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -232,7 +232,7 @@ where
|
||||
/// cursors concurrently accessing the same virtual address range, just like what
|
||||
/// happens for the hardware MMU walk.
|
||||
pub(crate) fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
|
||||
// SAFETY: The root frame is a valid page table node so the address is valid.
|
||||
// SAFETY: The root node is a valid page table node so the address is valid.
|
||||
unsafe { page_walk::<E, C>(self.root_paddr(), vaddr) }
|
||||
}
|
||||
|
||||
@ -283,7 +283,7 @@ where
|
||||
/// Because neither the hardware MMU nor the software page walk method
|
||||
/// would get the locks of the page table while reading, they can enter
|
||||
/// a to-be-recycled page table node and read the page table entries
|
||||
/// after the frame is recycled and reused.
|
||||
/// after the node is recycled and reused.
|
||||
///
|
||||
/// To mitigate this problem, the page table nodes are by default not
|
||||
/// actively recycled, until we find an appropriate solution.
|
||||
@ -297,10 +297,10 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
|
||||
|
||||
let mut cur_level = C::NR_LEVELS;
|
||||
let mut cur_pte = {
|
||||
let frame_addr = paddr_to_vaddr(root_paddr);
|
||||
let node_addr = paddr_to_vaddr(root_paddr);
|
||||
let offset = pte_index::<C>(vaddr, cur_level);
|
||||
// SAFETY: The offset does not exceed the value of PAGE_SIZE.
|
||||
unsafe { (frame_addr as *const E).add(offset).read() }
|
||||
unsafe { (node_addr as *const E).add(offset).read() }
|
||||
};
|
||||
|
||||
while cur_level > 1 {
|
||||
@ -315,10 +315,10 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
|
||||
|
||||
cur_level -= 1;
|
||||
cur_pte = {
|
||||
let frame_addr = paddr_to_vaddr(cur_pte.paddr());
|
||||
let node_addr = paddr_to_vaddr(cur_pte.paddr());
|
||||
let offset = pte_index::<C>(vaddr, cur_level);
|
||||
// SAFETY: The offset does not exceed the value of PAGE_SIZE.
|
||||
unsafe { (frame_addr as *const E).add(offset).read() }
|
||||
unsafe { (node_addr as *const E).add(offset).read() }
|
||||
};
|
||||
}
|
||||
|
||||
@ -348,8 +348,8 @@ pub(crate) trait PageTableEntryTrait:
|
||||
/// If the flags are present with valid mappings.
|
||||
fn is_present(&self) -> bool;
|
||||
|
||||
/// Create a new PTE with the given physical address and flags that map to a frame.
|
||||
fn new_frame(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self;
|
||||
/// Create a new PTE with the given physical address and flags that map to a page.
|
||||
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self;
|
||||
|
||||
/// Create a new PTE that map to a child page table.
|
||||
fn new_pt(paddr: Paddr) -> Self;
|
||||
@ -357,7 +357,7 @@ pub(crate) trait PageTableEntryTrait:
|
||||
/// Get the physical address from the PTE.
|
||||
/// The physical address recorded in the PTE is either:
|
||||
/// - the physical address of the next level page table;
|
||||
/// - or the physical address of the page frame it maps to.
|
||||
/// - or the physical address of the page it maps to.
|
||||
fn paddr(&self) -> Paddr;
|
||||
|
||||
fn prop(&self) -> PageProperty;
|
||||
|
@ -6,7 +6,7 @@
|
||||
//! documentations. It is essentially a page that contains page table entries (PTEs) that map
|
||||
//! to child page tables nodes or mapped pages.
|
||||
//!
|
||||
//! This module leverages the frame metadata to manage the page table frames, which makes it
|
||||
//! This module leverages the page metadata to manage the page table pages, which makes it
|
||||
//! easier to provide the following guarantees:
|
||||
//!
|
||||
//! The page table node is not freed when it is still in use by:
|
||||
@ -14,12 +14,12 @@
|
||||
//! - or a handle to a page table node,
|
||||
//! - or a processor.
|
||||
//!
|
||||
//! This is implemented by using a reference counter in the frame metadata. If the above
|
||||
//! This is implemented by using a reference counter in the page metadata. If the above
|
||||
//! conditions are not met, the page table node is ensured to be freed upon dropping the last
|
||||
//! reference.
|
||||
//!
|
||||
//! One can acquire exclusive access to a page table node using merely the physical address of
|
||||
//! the page table node. This is implemented by a lock in the frame metadata. Here the
|
||||
//! the page table node. This is implemented by a lock in the page metadata. Here the
|
||||
//! exclusiveness is only ensured for kernel code, and the processor's MMU is able to access the
|
||||
//! page table node while a lock is held. So the modification to the PTEs should be done after
|
||||
//! the initialization of the entity that the PTE points to. This is taken care in this module.
|
||||
@ -33,12 +33,12 @@ use crate::{
|
||||
mm::{
|
||||
paddr_to_vaddr,
|
||||
page::{
|
||||
allocator::PAGE_ALLOCATOR,
|
||||
meta::{FrameMeta, PageMeta, PageTablePageMeta, PageUsage},
|
||||
Page,
|
||||
self,
|
||||
meta::{PageMeta, PageTablePageMeta, PageUsage},
|
||||
DynPage, Page,
|
||||
},
|
||||
page_prop::PageProperty,
|
||||
Frame, Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
|
||||
Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
|
||||
},
|
||||
};
|
||||
|
||||
@ -49,7 +49,7 @@ use crate::{
|
||||
/// the page table node and subsequent children will be freed.
|
||||
///
|
||||
/// Only the CPU or a PTE can access a page table node using a raw handle. To access the page
|
||||
/// table frame from the kernel code, use the handle [`PageTableNode`].
|
||||
/// table node from the kernel code, use the handle [`PageTableNode`].
|
||||
#[derive(Debug)]
|
||||
pub(super) struct RawPageTableNode<E: PageTableEntryTrait, C: PagingConstsTrait>
|
||||
where
|
||||
@ -181,7 +181,7 @@ where
|
||||
/// The page table node can own a set of handles to children, ensuring that the children
|
||||
/// don't outlive the page table node. Cloning a page table node will create a deep copy
|
||||
/// of the page table. Dropping the page table node will also drop all handles if the page
|
||||
/// table frame has no references. You can set the page table node as a child of another
|
||||
/// table node has no references. You can set the page table node as a child of another
|
||||
/// page table node.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct PageTableNode<
|
||||
@ -200,8 +200,8 @@ where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
PageTable(RawPageTableNode<E, C>),
|
||||
Frame(Frame),
|
||||
/// Frames not tracked by handles.
|
||||
Page(DynPage),
|
||||
/// Pages not tracked by handles.
|
||||
Untracked(Paddr),
|
||||
None,
|
||||
}
|
||||
@ -216,8 +216,7 @@ where
|
||||
/// set the lock bit for performance as it is exclusive and unlocking is an
|
||||
/// extra unnecessary expensive operation.
|
||||
pub(super) fn alloc(level: PagingLevel) -> Self {
|
||||
let page_paddr = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE;
|
||||
let mut page = Page::<PageTablePageMeta<E, C>>::from_unused(page_paddr);
|
||||
let mut page = page::allocator::alloc_single::<PageTablePageMeta<E, C>>().unwrap();
|
||||
|
||||
// The lock is initialized as held.
|
||||
page.meta().lock.store(1, Ordering::Relaxed);
|
||||
@ -291,9 +290,9 @@ where
|
||||
// SAFETY: The physical address is recorded in a valid PTE
|
||||
// which would be casted from a handle. We are incrementing
|
||||
// the reference count so we restore and forget a cloned one.
|
||||
let page = unsafe { Page::<FrameMeta>::from_raw(paddr) };
|
||||
let page = unsafe { DynPage::from_raw(paddr) };
|
||||
core::mem::forget(page.clone());
|
||||
Child::Frame(page.into())
|
||||
Child::Page(page)
|
||||
} else {
|
||||
Child::Untracked(paddr)
|
||||
}
|
||||
@ -306,10 +305,10 @@ where
|
||||
/// For indexes in `deep`, the children are deep copied and this function will be recursively called.
|
||||
/// For indexes in `shallow`, the children are shallow copied as new references.
|
||||
///
|
||||
/// You cannot shallow copy a child that is mapped to a frame. Deep copying a frame child will not
|
||||
/// copy the mapped frame but will copy the handle to the frame.
|
||||
/// You cannot shallow copy a child that is mapped to a page. Deep copying a page child will not
|
||||
/// copy the mapped page but will copy the handle to the page.
|
||||
///
|
||||
/// You cannot either deep copy or shallow copy a child that is mapped to an untracked frame.
|
||||
/// You cannot either deep copy or shallow copy a child that is mapped to an untracked page.
|
||||
///
|
||||
/// The ranges must be disjoint.
|
||||
pub(super) unsafe fn make_copy(&self, deep: Range<usize>, shallow: Range<usize>) -> Self {
|
||||
@ -317,18 +316,18 @@ where
|
||||
debug_assert!(shallow.end <= nr_subpage_per_huge::<C>());
|
||||
debug_assert!(deep.end <= shallow.start || deep.start >= shallow.end);
|
||||
|
||||
let mut new_frame = Self::alloc(self.level());
|
||||
let mut new_pt = Self::alloc(self.level());
|
||||
|
||||
for i in deep {
|
||||
match self.child(i, true) {
|
||||
Child::PageTable(pt) => {
|
||||
let guard = pt.clone_shallow().lock();
|
||||
let new_child = guard.make_copy(0..nr_subpage_per_huge::<C>(), 0..0);
|
||||
new_frame.set_child_pt(i, new_child.into_raw(), /*meaningless*/ true);
|
||||
new_pt.set_child_pt(i, new_child.into_raw(), true);
|
||||
}
|
||||
Child::Frame(frame) => {
|
||||
Child::Page(page) => {
|
||||
let prop = self.read_pte_prop(i);
|
||||
new_frame.set_child_frame(i, frame.clone(), prop);
|
||||
new_pt.set_child_page(i, page.clone(), prop);
|
||||
}
|
||||
Child::None => {}
|
||||
Child::Untracked(_) => {
|
||||
@ -341,16 +340,16 @@ where
|
||||
debug_assert_eq!(self.level(), C::NR_LEVELS);
|
||||
match self.child(i, /*meaningless*/ true) {
|
||||
Child::PageTable(pt) => {
|
||||
new_frame.set_child_pt(i, pt.clone_shallow(), /*meaningless*/ true);
|
||||
new_pt.set_child_pt(i, pt.clone_shallow(), /*meaningless*/ true);
|
||||
}
|
||||
Child::None => {}
|
||||
Child::Frame(_) | Child::Untracked(_) => {
|
||||
Child::Page(_) | Child::Untracked(_) => {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
new_frame
|
||||
new_pt
|
||||
}
|
||||
|
||||
/// Removes a child if the child at the given index is present.
|
||||
@ -377,19 +376,19 @@ where
|
||||
let _ = ManuallyDrop::new(pt);
|
||||
}
|
||||
|
||||
/// Map a frame at a given index.
|
||||
pub(super) fn set_child_frame(&mut self, idx: usize, frame: Frame, prop: PageProperty) {
|
||||
/// Map a page at a given index.
|
||||
pub(super) fn set_child_page(&mut self, idx: usize, page: DynPage, prop: PageProperty) {
|
||||
// They should be ensured by the cursor.
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
debug_assert_eq!(frame.level(), self.level());
|
||||
debug_assert_eq!(page.level(), self.level());
|
||||
|
||||
let pte = Some(E::new_frame(frame.start_paddr(), self.level(), prop));
|
||||
let pte = Some(E::new_page(page.paddr(), self.level(), prop));
|
||||
self.overwrite_pte(idx, pte, true);
|
||||
// The ownership is transferred to a raw PTE. Don't drop the handle.
|
||||
let _ = ManuallyDrop::new(frame);
|
||||
let _ = ManuallyDrop::new(page);
|
||||
}
|
||||
|
||||
/// Sets an untracked child frame at a given index.
|
||||
/// Sets an untracked child page at a given index.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
@ -398,7 +397,7 @@ where
|
||||
// It should be ensured by the cursor.
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
let pte = Some(E::new_frame(pa, self.level(), prop));
|
||||
let pte = Some(E::new_page(pa, self.level(), prop));
|
||||
self.overwrite_pte(idx, pte, false);
|
||||
}
|
||||
|
||||
@ -418,15 +417,15 @@ where
|
||||
};
|
||||
let prop = self.read_pte_prop(idx);
|
||||
|
||||
let mut new_frame = PageTableNode::<E, C>::alloc(self.level() - 1);
|
||||
let mut new_page = PageTableNode::<E, C>::alloc(self.level() - 1);
|
||||
for i in 0..nr_subpage_per_huge::<C>() {
|
||||
let small_pa = pa + i * page_size::<C>(self.level() - 1);
|
||||
// SAFETY: the index is within the bound and either physical address and
|
||||
// the property are valid.
|
||||
unsafe { new_frame.set_child_untracked(i, small_pa, prop) };
|
||||
unsafe { new_page.set_child_untracked(i, small_pa, prop) };
|
||||
}
|
||||
|
||||
self.set_child_pt(idx, new_frame.into_raw(), false);
|
||||
self.set_child_pt(idx, new_page.into_raw(), false);
|
||||
}
|
||||
|
||||
/// Protects an already mapped child at a given index.
|
||||
@ -486,7 +485,7 @@ where
|
||||
drop(Page::<PageTablePageMeta<E, C>>::from_raw(paddr));
|
||||
} else if in_tracked_range {
|
||||
// This is a frame.
|
||||
drop(Page::<FrameMeta>::from_raw(paddr));
|
||||
drop(DynPage::from_raw(paddr));
|
||||
}
|
||||
}
|
||||
|
||||
@ -544,11 +543,11 @@ where
|
||||
// page table node.
|
||||
drop(unsafe { Page::<Self>::from_raw(pte.paddr()) });
|
||||
} else {
|
||||
// This is a frame. You cannot drop a page table node that maps to
|
||||
// untracked frames. This must be verified.
|
||||
// This is a page. You cannot drop a page table node that maps to
|
||||
// untracked pages. This must be verified.
|
||||
// SAFETY: The physical address must be casted from a handle to a
|
||||
// frame.
|
||||
drop(unsafe { Page::<FrameMeta>::from_raw(pte.paddr()) });
|
||||
// page.
|
||||
drop(unsafe { DynPage::from_raw(pte.paddr()) });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,8 +5,8 @@ use core::mem::ManuallyDrop;
|
||||
use super::*;
|
||||
use crate::mm::{
|
||||
kspace::LINEAR_MAPPING_BASE_VADDR,
|
||||
page::{allocator, meta::FrameMeta},
|
||||
page_prop::{CachePolicy, PageFlags},
|
||||
FrameAllocOptions,
|
||||
};
|
||||
|
||||
const PAGE_SIZE: usize = 4096;
|
||||
@ -17,7 +17,6 @@ fn test_range_check() {
|
||||
let good_va = 0..PAGE_SIZE;
|
||||
let bad_va = 0..PAGE_SIZE + 1;
|
||||
let bad_va2 = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + PAGE_SIZE;
|
||||
let to = FrameAllocOptions::new(1).alloc().unwrap();
|
||||
assert!(pt.cursor_mut(&good_va).is_ok());
|
||||
assert!(pt.cursor_mut(&bad_va).is_err());
|
||||
assert!(pt.cursor_mut(&bad_va2).is_err());
|
||||
@ -31,10 +30,10 @@ fn test_tracked_map_unmap() {
|
||||
let pt = PageTable::<UserMode>::empty();
|
||||
|
||||
let from = PAGE_SIZE..PAGE_SIZE * 2;
|
||||
let frame = FrameAllocOptions::new(1).alloc_single().unwrap();
|
||||
let start_paddr = frame.start_paddr();
|
||||
let page = allocator::alloc_single::<FrameMeta>().unwrap();
|
||||
let start_paddr = page.paddr();
|
||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(frame.clone(), prop) };
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) };
|
||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
unsafe { pt.unmap(&from).unwrap() };
|
||||
assert!(pt.query(from.start + 10).is_none());
|
||||
@ -75,14 +74,14 @@ fn test_untracked_map_unmap() {
|
||||
fn test_user_copy_on_write() {
|
||||
let pt = PageTable::<UserMode>::empty();
|
||||
let from = PAGE_SIZE..PAGE_SIZE * 2;
|
||||
let frame = FrameAllocOptions::new(1).alloc_single().unwrap();
|
||||
let start_paddr = frame.start_paddr();
|
||||
let page = allocator::alloc_single::<FrameMeta>().unwrap();
|
||||
let start_paddr = page.paddr();
|
||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(frame.clone(), prop) };
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
|
||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
unsafe { pt.unmap(&from).unwrap() };
|
||||
assert!(pt.query(from.start + 10).is_none());
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(frame.clone(), prop) };
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
|
||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
|
||||
let child_pt = pt.fork_copy_on_write();
|
||||
@ -103,7 +102,7 @@ fn test_user_copy_on_write() {
|
||||
sibling_pt
|
||||
.cursor_mut(&from)
|
||||
.unwrap()
|
||||
.map(frame.clone(), prop)
|
||||
.map(page.clone().into(), prop)
|
||||
};
|
||||
assert_eq!(
|
||||
sibling_pt.query(from.start + 10).unwrap().0,
|
||||
@ -131,30 +130,30 @@ fn test_base_protect_query() {
|
||||
|
||||
let from_ppn = 1..1000;
|
||||
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
|
||||
let to = FrameAllocOptions::new(999).alloc().unwrap();
|
||||
let to = allocator::alloc::<FrameMeta>(999 * PAGE_SIZE).unwrap();
|
||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||
unsafe {
|
||||
let mut cursor = pt.cursor_mut(&from).unwrap();
|
||||
for frame in to {
|
||||
cursor.map(frame.clone(), prop);
|
||||
for page in to {
|
||||
cursor.map(page.clone().into(), prop);
|
||||
}
|
||||
}
|
||||
for (qr, i) in pt.cursor(&from).unwrap().zip(from_ppn) {
|
||||
let Qr::Mapped { va, frame, prop } = qr else {
|
||||
let Qr::Mapped { va, page, prop } = qr else {
|
||||
panic!("Expected Mapped, got {:#x?}", qr);
|
||||
};
|
||||
assert_eq!(prop.flags, PageFlags::RW);
|
||||
assert_eq!(prop.cache, CachePolicy::Writeback);
|
||||
assert_eq!(va..va + frame.size(), i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
|
||||
assert_eq!(va..va + page.size(), i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
|
||||
}
|
||||
let prot = PAGE_SIZE * 18..PAGE_SIZE * 20;
|
||||
unsafe { pt.protect(&prot, |p| p.flags -= PageFlags::W).unwrap() };
|
||||
for (qr, i) in pt.cursor(&prot).unwrap().zip(18..20) {
|
||||
let Qr::Mapped { va, frame, prop } = qr else {
|
||||
let Qr::Mapped { va, page, prop } = qr else {
|
||||
panic!("Expected Mapped, got {:#x?}", qr);
|
||||
};
|
||||
assert_eq!(prop.flags, PageFlags::R);
|
||||
assert_eq!(va..va + frame.size(), i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
|
||||
assert_eq!(va..va + page.size(), i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,12 +184,14 @@ fn test_untracked_large_protect_query() {
|
||||
let from =
|
||||
UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.start..UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.end;
|
||||
let to = PAGE_SIZE * to_ppn.start..PAGE_SIZE * to_ppn.end;
|
||||
let mapped_pa_of_va = |va: Vaddr| va - (from.start - to.start);
|
||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||
unsafe { pt.map(&from, &to, prop).unwrap() };
|
||||
for (qr, i) in pt.cursor(&from).unwrap().zip(0..512 + 2 + 2) {
|
||||
let Qr::MappedUntracked { va, pa, len, prop } = qr else {
|
||||
panic!("Expected MappedUntracked, got {:#x?}", qr);
|
||||
};
|
||||
assert_eq!(pa, mapped_pa_of_va(va));
|
||||
assert_eq!(prop.flags, PageFlags::RW);
|
||||
assert_eq!(prop.cache, CachePolicy::Writeback);
|
||||
if i < 512 + 2 {
|
||||
@ -218,6 +219,7 @@ fn test_untracked_large_protect_query() {
|
||||
let Qr::MappedUntracked { va, pa, len, prop } = qr else {
|
||||
panic!("Expected MappedUntracked, got {:#x?}", qr);
|
||||
};
|
||||
assert_eq!(pa, mapped_pa_of_va(va));
|
||||
assert_eq!(prop.flags, PageFlags::RW);
|
||||
let va = va - UNTRACKED_OFFSET;
|
||||
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
|
||||
@ -226,6 +228,7 @@ fn test_untracked_large_protect_query() {
|
||||
let Qr::MappedUntracked { va, pa, len, prop } = qr else {
|
||||
panic!("Expected MappedUntracked, got {:#x?}", qr);
|
||||
};
|
||||
assert_eq!(pa, mapped_pa_of_va(va));
|
||||
assert_eq!(prop.flags, PageFlags::R);
|
||||
let va = va - UNTRACKED_OFFSET;
|
||||
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
|
||||
@ -238,6 +241,7 @@ fn test_untracked_large_protect_query() {
|
||||
let Qr::MappedUntracked { va, pa, len, prop } = qr else {
|
||||
panic!("Expected MappedUntracked, got {:#x?}", qr);
|
||||
};
|
||||
assert_eq!(pa, mapped_pa_of_va(va));
|
||||
assert_eq!(prop.flags, PageFlags::RW);
|
||||
let va = va - UNTRACKED_OFFSET;
|
||||
assert_eq!(va..va + len, i * PAGE_SIZE..(i + 1) * PAGE_SIZE);
|
||||
|
@ -105,7 +105,7 @@ impl VmSpace {
|
||||
for frame in frames.into_iter() {
|
||||
// SAFETY: mapping in the user space with `Frame` is safe.
|
||||
unsafe {
|
||||
cursor.map(frame, prop);
|
||||
cursor.map(frame.into(), prop);
|
||||
}
|
||||
}
|
||||
|
||||
@ -312,7 +312,11 @@ impl Iterator for VmQueryIter<'_> {
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.cursor.next().map(|ptqr| match ptqr {
|
||||
PtQr::NotMapped { va, len } => VmQueryResult::NotMapped { va, len },
|
||||
PtQr::Mapped { va, frame, prop } => VmQueryResult::Mapped { va, frame, prop },
|
||||
PtQr::Mapped { va, page, prop } => VmQueryResult::Mapped {
|
||||
va,
|
||||
frame: page.try_into().unwrap(),
|
||||
prop,
|
||||
},
|
||||
// It is not possible to map untyped memory in user space.
|
||||
PtQr::MappedUntracked { .. } => unreachable!(),
|
||||
})
|
||||
|
Reference in New Issue
Block a user