Fortify the metadata memory model and adjust the Page casting APIs

This commit is contained in:
Zhang Junyang 2024-05-27 09:10:09 +00:00 committed by Tate, Hongliang Tian
parent 14e1b1a9fc
commit 3579d88aa6
18 changed files with 139 additions and 1397 deletions

View File

@ -56,7 +56,7 @@ use aster_frame::cpu::UserContext;
use aster_frame::prelude::*;
use aster_frame::task::{Task, TaskOptions};
use aster_frame::user::{ReturnReason, UserMode, UserSpace};
use aster_frame::vm::{PageFlags, PAGE_SIZE, Vaddr, VmAllocOptions, VmIo, VmMapOptions, VmSpace};
use aster_frame::mm::{PageFlags, PAGE_SIZE, Vaddr, VmAllocOptions, VmIo, VmMapOptions, VmSpace};
/// The kernel's boot and initialization process is managed by Asterinas Framework.
/// After the process is done, the kernel's execution environment

View File

@ -20,7 +20,7 @@ impl PageTableMode for DeviceMode {
const VADDR_RANGE: Range<Vaddr> = 0..0x1_0000_0000;
}
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Default)]
pub(super) struct PagingConsts {}
impl PagingConstsTrait for PagingConsts {
@ -62,7 +62,7 @@ bitflags::bitflags! {
}
}
#[derive(Debug, Clone, Copy, Pod)]
#[derive(Debug, Clone, Copy, Pod, Default)]
#[repr(C)]
pub struct PageTableEntry(u64);
@ -86,10 +86,6 @@ impl PageTableEntryTrait for PageTableEntry {
(self.0 & Self::PHYS_MASK) as usize
}
fn new_absent() -> Self {
Self(0)
}
fn is_present(&self) -> bool {
self.0 & (PageTableFlags::READABLE | PageTableFlags::WRITABLE).bits() != 0
}

View File

@ -14,7 +14,7 @@ use crate::mm::{
pub(crate) const NR_ENTRIES_PER_PAGE: usize = 512;
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Default)]
pub struct PagingConsts {}
impl PagingConstsTrait for PagingConsts {
@ -87,7 +87,7 @@ pub(crate) fn tlb_flush_all_including_global() {
}
}
#[derive(Clone, Copy, Pod)]
#[derive(Clone, Copy, Pod, Default)]
#[repr(C)]
pub struct PageTableEntry(usize);
@ -138,10 +138,6 @@ macro_rules! parse_flags {
}
impl PageTableEntryTrait for PageTableEntry {
fn new_absent() -> Self {
Self(0)
}
fn is_present(&self) -> bool {
self.0 & PageTableFlags::PRESENT.bits() != 0
}

View File

@ -17,12 +17,12 @@ use crate::arch::{
use crate::{
cpu::{CpuException, PageFaultErrorCode, PAGE_FAULT},
cpu_local,
trap::call_irq_callback_functions,
vm::{
mm::{
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
page_prop::{CachePolicy, PageProperty},
PageFlags, PrivilegedPageFlags as PrivFlags, PAGE_SIZE,
},
trap::call_irq_callback_functions,
};
cpu_local! {

View File

@ -4,5 +4,3 @@
extern crate xarray as xarray_crate;
pub use xarray_crate::{Cursor, CursorMut, XArray, XMark};
pub use crate::mm::page::VmFrameRef;

View File

@ -190,8 +190,8 @@ pub fn init_kernel_page_table(
};
let mut cursor = kpt.cursor_mut(&from).unwrap();
for frame_paddr in to.step_by(PAGE_SIZE) {
let page = Page::<KernelMeta>::from_unused(frame_paddr).unwrap();
let paddr = page.forget();
let page = Page::<KernelMeta>::from_unused(frame_paddr);
let paddr = page.into_raw();
// SAFETY: we are doing mappings for the kernel.
unsafe {
cursor.map_pa(&(paddr..paddr + PAGE_SIZE), prop);

View File

@ -46,7 +46,7 @@ pub type PagingLevel = u8;
/// A minimal set of constants that determines the paging system.
/// This provides an abstraction over most paging modes in common architectures.
pub(crate) trait PagingConstsTrait: Clone + Debug + 'static {
pub(crate) trait PagingConstsTrait: Clone + Debug + Default + Sync + 'static {
/// The smallest page size.
/// This is also the page size at level 1 page tables.
const BASE_PAGE_SIZE: usize;

View File

@ -22,9 +22,8 @@ pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
let mut vector = Vec::new();
for i in 0..nframes {
let paddr = (start + i) * PAGE_SIZE;
// SAFETY: The frame index is valid.
let frame = Frame {
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
page: Page::<FrameMeta>::from_unused(paddr),
};
vector.push(frame);
}
@ -36,7 +35,7 @@ pub(crate) fn alloc_single() -> Option<Frame> {
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
let paddr = idx * PAGE_SIZE;
Frame {
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
page: Page::<FrameMeta>::from_unused(paddr),
}
})
}

View File

@ -4,7 +4,7 @@ use core::mem::ManuallyDrop;
use super::{
allocator,
meta::{FrameMeta, PageMeta, PageUsage},
meta::{FrameMeta, MetaSlot, PageMeta, PageUsage},
Page,
};
use crate::{
@ -15,7 +15,7 @@ use crate::{
Error, Result,
};
/// A handle to a page frame.
/// A handle to a physical memory page of untyped memory.
///
/// An instance of `Frame` is a handle to a page frame (a physical memory
/// page). A cloned `Frame` refers to the same page frame as the original.
@ -129,13 +129,15 @@ impl PageMeta for FrameMeta {
use core::{marker::PhantomData, ops::Deref};
/// `VmFrameRef` is a struct that can work as `&'a Frame`.
pub struct VmFrameRef<'a> {
/// `FrameRef` is a struct that can work as `&'a Frame`.
///
/// This is solely useful for [`crate::collections::xarray`].
pub struct FrameRef<'a> {
inner: ManuallyDrop<Frame>,
_marker: PhantomData<&'a Frame>,
}
impl<'a> Deref for VmFrameRef<'a> {
impl<'a> Deref for FrameRef<'a> {
type Target = Frame;
fn deref(&self) -> &Self::Target {
@ -143,18 +145,23 @@ impl<'a> Deref for VmFrameRef<'a> {
}
}
// SAFETY: `Frame` is essentially an `*const FrameMeta` that could be used as a `*const` pointer.
// SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
// The pointer is also aligned to 4.
unsafe impl xarray::ItemEntry for Frame {
type Ref<'a> = VmFrameRef<'a> where Self: 'a;
type Ref<'a> = FrameRef<'a> where Self: 'a;
fn into_raw(self) -> *const () {
self.page.forget() as *const ()
let ptr = self.page.ptr;
core::mem::forget(self);
ptr as *const ()
}
unsafe fn from_raw(raw: *const ()) -> Self {
Self {
page: Page::<FrameMeta>::restore(raw as Paddr),
page: Page::<FrameMeta> {
ptr: raw as *mut MetaSlot,
_marker: PhantomData,
},
}
}

View File

@ -1,6 +1,15 @@
// SPDX-License-Identifier: MPL-2.0
//! Metadata management of pages.
//!
//! You can picture a globally shared, static, gigantic arrary of metadata initialized for each page.
//! An entry in the array is called a `MetaSlot`, which contains the metadata of a page. There would
//! be a dedicated small "heap" space in each slot for dynamic metadata. You can store anything as the
//! metadata of a page as long as it's [`Sync`].
//!
//! In the implemetation level, the slots are placed in the metadata pages mapped to a certain virtual
//! address. It is faster, simpler, safer and more versatile compared with an actual static array
//! implementation.
pub mod mapping {
//! The metadata of each physical page is linear mapped to fixed virtual addresses
@ -53,8 +62,8 @@ use crate::{
/// Represents the usage of a page.
#[repr(u8)]
pub enum PageUsage {
// The zero variant is reserved for the unused type. A page can only
// be designated for one of the Other purposes if it is unused.
// The zero variant is reserved for the unused type. Only an unused page
// can be designated for one of the other purposes.
Unused = 0,
/// The page is reserved or unusable. The kernel should not touch it.
Reserved = 1,
@ -76,14 +85,11 @@ pub enum PageUsage {
pub(super) struct MetaSlot {
/// The metadata of the page.
///
/// It is placed at the first field to save memory if the metadata end
/// with an alignment not fitting a `u64`.
///
/// The implementation may cast a `*const MetaSlot` to a `*const PageMeta`.
_inner: MetaSlotInner,
/// To store [`PageUsage`].
pub(super) usage: AtomicU8,
pub(super) refcnt: AtomicU32,
pub(super) ref_count: AtomicU32,
}
pub(super) union MetaSlotInner {
@ -109,7 +115,7 @@ const_assert_eq!(size_of::<MetaSlot>(), 16);
/// If a page type needs specific drop behavior, it should specify
/// when implementing this trait. When we drop the last handle to
/// this page, the `on_drop` method will be called.
pub trait PageMeta: private::Sealed + Sized {
pub trait PageMeta: Default + Sync + private::Sealed + Sized {
const USAGE: PageUsage;
fn on_drop(page: &mut Page<Self>);
@ -123,13 +129,13 @@ mod private {
use private::Sealed;
#[derive(Debug)]
#[derive(Debug, Default)]
#[repr(C)]
pub struct FrameMeta {}
impl Sealed for FrameMeta {}
#[derive(Debug)]
#[derive(Debug, Default)]
#[repr(C)]
pub struct SegmentHeadMeta {
/// Length of the segment in bytes.
@ -144,9 +150,9 @@ impl From<Page<FrameMeta>> for Page<SegmentHeadMeta> {
// and a frame handle. However, `Vmo` holds a frame handle while block IO needs a
// segment handle from the same page.
// A segment cannot be mapped. So we have to introduce this enforcement soon:
// assert_eq!(page.ref_count(), 1);
// assert_eq!(page.count(), 1);
unsafe {
let mut head = Page::<SegmentHeadMeta>::restore(page.forget());
let mut head = Page::<SegmentHeadMeta>::from_raw(page.into_raw());
(*head.ptr)
.usage
.store(PageUsage::SegmentHead as u8, Ordering::Relaxed);
@ -156,7 +162,7 @@ impl From<Page<FrameMeta>> for Page<SegmentHeadMeta> {
}
}
#[derive(Debug)]
#[derive(Debug, Default)]
#[repr(C)]
pub struct PageTablePageMeta<E: PageTableEntryTrait, C: PagingConstsTrait>
where
@ -173,6 +179,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> Sealed for PageTablePageMeta<
{
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct MetaPageMeta {}
@ -184,6 +191,7 @@ impl PageMeta for MetaPageMeta {
}
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct KernelMeta {}
@ -232,7 +240,7 @@ pub(crate) fn init(boot_pt: &mut BootPageTable) -> Vec<Range<Paddr>> {
meta_pages
.into_iter()
.map(|paddr| {
let pa = Page::<MetaPageMeta>::from_unused(paddr).unwrap().forget();
let pa = Page::<MetaPageMeta>::from_unused(paddr).into_raw();
pa..pa + PAGE_SIZE
})
.collect()

View File

@ -11,19 +11,20 @@
//! address space of the users are backed by frames.
pub(crate) mod allocator;
pub(in crate::mm) mod meta;
use meta::{mapping, MetaSlot, PageMeta};
mod frame;
pub use frame::{Frame, VmFrameRef};
mod vm_frame_vec;
pub use vm_frame_vec::{FrameVecIter, VmFrameVec};
pub(in crate::mm) mod meta;
mod segment;
mod vm_frame_vec;
use core::{
marker::PhantomData,
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
};
pub use frame::Frame;
use meta::{mapping, MetaSlot, PageMeta};
pub use segment::Segment;
pub use vm_frame_vec::{FrameVecIter, VmFrameVec};
use super::PAGE_SIZE;
use crate::mm::{paddr_to_vaddr, Paddr, PagingConsts, Vaddr};
@ -53,8 +54,32 @@ pub enum PageHandleError {
}
impl<M: PageMeta> Page<M> {
/// Convert an unused page to a `Page` handle for a specific usage.
pub(in crate::mm) fn from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
/// Get a `Page` handle with a specific usage from a raw, unused page.
///
/// If the provided physical address is invalid or not aligned, this
/// function will panic.
///
/// If the provided page is already in use this function will block
/// until the page is released. This is a workaround since the page
/// allocator is decoupled from metadata management and page would be
/// reusable in the page allocator before resetting all metadata.
///
/// TODO: redesign the page allocator to be aware of metadata management.
pub fn from_unused(paddr: Paddr) -> Self {
loop {
match Self::try_from_unused(paddr) {
Ok(page) => return page,
Err(PageHandleError::InUse) => {
// Wait for the page to be released.
core::hint::spin_loop();
}
Err(e) => panic!("Failed to get a page handle: {:?}", e),
}
}
}
/// Get a `Page` handle with a specific usage from a raw, unused page.
pub(in crate::mm) fn try_from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
if paddr % PAGE_SIZE != 0 {
return Err(PageHandleError::NotAligned);
}
@ -66,12 +91,17 @@ impl<M: PageMeta> Page<M> {
let ptr = vaddr as *const MetaSlot;
let usage = unsafe { &(*ptr).usage };
let refcnt = unsafe { &(*ptr).refcnt };
let get_ref_count = unsafe { &(*ptr).ref_count };
usage
.compare_exchange(0, M::USAGE as u8, Ordering::SeqCst, Ordering::Relaxed)
.map_err(|_| PageHandleError::InUse)?;
refcnt.fetch_add(1, Ordering::Relaxed);
let old_get_ref_count = get_ref_count.fetch_add(1, Ordering::Relaxed);
debug_assert!(old_get_ref_count == 0);
// Initialize the metadata
unsafe { (ptr as *mut M).write(M::default()) }
Ok(Self {
ptr,
@ -82,7 +112,11 @@ impl<M: PageMeta> Page<M> {
/// Forget the handle to the page.
///
/// This will result in the page being leaked without calling the custom dropper.
pub fn forget(self) -> Paddr {
///
/// A physical address to the page is returned in case the page needs to be
/// restored using [`Page::from_raw`] later. This is useful when some architectural
/// data structures need to hold the page handle such as the page table.
pub(in crate::mm) fn into_raw(self) -> Paddr {
let paddr = self.paddr();
core::mem::forget(self);
paddr
@ -93,14 +127,14 @@ impl<M: PageMeta> Page<M> {
/// # Safety
///
/// The caller should only restore a `Page` that was previously forgotten using
/// [`Page::forget`].
/// [`Page::into_raw`].
///
/// And the restoring operation should only be done once for a forgotten
/// `Page`. Otherwise double-free will happen.
///
/// Also, the caller ensures that the usage of the page is correct. There's
/// no checking of the usage in this function.
pub(in crate::mm) unsafe fn restore(paddr: Paddr) -> Self {
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
@ -110,33 +144,12 @@ impl<M: PageMeta> Page<M> {
}
}
/// Clone a `Page` handle from a forgotten `Page` as a physical address.
///
/// This is similar to [`Page::restore`], but it also increments the reference count
/// and the forgotten page will be still leaked unless restored later.
///
/// # Safety
///
/// The safety requirements are the same as [`Page::restore`].
pub(in crate::mm) unsafe fn clone_restore(paddr: &Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(*paddr);
let ptr = vaddr as *const MetaSlot;
let refcnt = unsafe { &(*ptr).refcnt };
refcnt.fetch_add(1, Ordering::Relaxed);
Self {
ptr,
_marker: PhantomData,
}
}
/// Get the physical address.
pub fn paddr(&self) -> Paddr {
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
}
/// Get the reference count of this page.
/// Load the current reference count of this page.
///
/// # Safety
///
@ -144,8 +157,8 @@ impl<M: PageMeta> Page<M> {
/// Another thread can change the reference count at any time, including
/// potentially between calling this method and the action depending on the
/// result.
fn ref_count(&self) -> u32 {
self.refcnt().load(Ordering::Relaxed)
pub fn count(&self) -> u32 {
self.get_ref_count().load(Ordering::Relaxed)
}
/// Get the metadata of this page.
@ -162,14 +175,14 @@ impl<M: PageMeta> Page<M> {
unsafe { &mut *(self.ptr as *mut M) }
}
fn refcnt(&self) -> &AtomicU32 {
unsafe { &(*self.ptr).refcnt }
fn get_ref_count(&self) -> &AtomicU32 {
unsafe { &(*self.ptr).ref_count }
}
}
impl<M: PageMeta> Clone for Page<M> {
fn clone(&self) -> Self {
self.refcnt().fetch_add(1, Ordering::Relaxed);
self.get_ref_count().fetch_add(1, Ordering::Relaxed);
Self {
ptr: self.ptr,
_marker: PhantomData,
@ -179,13 +192,18 @@ impl<M: PageMeta> Clone for Page<M> {
impl<M: PageMeta> Drop for Page<M> {
fn drop(&mut self) {
if self.refcnt().fetch_sub(1, Ordering::Release) == 1 {
if self.get_ref_count().fetch_sub(1, Ordering::Release) == 1 {
// A fence is needed here with the same reasons stated in the implementation of
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
core::sync::atomic::fence(Ordering::Acquire);
// Let the custom dropper handle the drop.
M::on_drop(self);
// No handles means no usage.
// Drop the metadata.
unsafe {
core::ptr::drop_in_place(self.ptr as *mut M);
}
// No handles means no usage. This also releases the page as unused for further
// calls to `Page::from_unused`.
unsafe { &*self.ptr }.usage.store(0, Ordering::Release);
};
}

View File

@ -50,7 +50,7 @@ impl Segment {
/// The given range of page frames must not have been allocated before,
/// as part of either a `Frame` or `Segment`.
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
let mut head = Page::<SegmentHeadMeta>::from_unused(paddr).unwrap();
let mut head = Page::<SegmentHeadMeta>::from_unused(paddr);
head.meta_mut().seg_len = (nframes * PAGE_SIZE) as u64;
Self {
head_page: head,

View File

@ -68,7 +68,7 @@ where
/// Convert a raw handle to an accessible handle by pertaining the lock.
pub(super) fn lock(self) -> PageTableNode<E, C> {
let page = unsafe { Page::<PageTablePageMeta<E, C>>::restore(self.paddr()) };
let page = unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(self.paddr()) };
debug_assert!(page.meta().level == self.level);
// Acquire the lock.
while page
@ -86,7 +86,10 @@ where
/// Create a copy of the handle.
pub(super) fn copy_handle(&self) -> Self {
core::mem::forget(unsafe { Page::<PageTablePageMeta<E, C>>::clone_restore(&self.paddr()) });
let page = unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(self.paddr()) };
let inc_ref = page.clone();
core::mem::forget(page);
core::mem::forget(inc_ref);
Self {
raw: self.raw,
level: self.level,
@ -95,7 +98,7 @@ where
}
pub(super) fn nr_valid_children(&self) -> u16 {
let page = unsafe { Page::<PageTablePageMeta<E, C>>::restore(self.paddr()) };
let page = unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(self.paddr()) };
page.meta().nr_children
}
@ -131,7 +134,7 @@ where
// Increment the reference count of the current page table.
let page = unsafe { Page::<PageTablePageMeta<E, C>>::restore(self.paddr()) };
let page = unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(self.paddr()) };
core::mem::forget(page.clone());
core::mem::forget(page);
@ -163,7 +166,7 @@ where
[(); C::NR_LEVELS as usize]:,
{
fn drop(&mut self) {
drop(unsafe { Page::<PageTablePageMeta<E, C>>::restore(self.paddr()) });
drop(unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(self.paddr()) });
}
}
@ -208,7 +211,7 @@ where
/// extra unnecessary expensive operation.
pub(super) fn alloc(level: PagingLevel) -> Self {
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE;
let mut page = Page::<PageTablePageMeta<E, C>>::from_unused(frame).unwrap();
let mut page = Page::<PageTablePageMeta<E, C>>::from_unused(frame);
// The lock is initialized as held.
page.meta().lock.store(1, Ordering::Relaxed);
@ -258,16 +261,17 @@ where
} else {
let paddr = pte.paddr();
if !pte.is_last(self.level()) {
core::mem::forget(unsafe {
Page::<PageTablePageMeta<E, C>>::clone_restore(&paddr)
});
let node = unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(paddr) };
let inc_ref = node.clone();
core::mem::forget(node);
core::mem::forget(inc_ref);
Child::PageTable(RawPageTableNode {
raw: paddr,
level: self.level() - 1,
_phantom: PhantomData,
})
} else if tracked {
let page = unsafe { Page::<FrameMeta>::restore(paddr) };
let page = unsafe { Page::<FrameMeta>::from_raw(paddr) };
core::mem::forget(page.clone());
Child::Frame(Frame { page })
} else {
@ -446,10 +450,10 @@ where
let paddr = existing_pte.paddr();
if !existing_pte.is_last(self.level()) {
// This is a page table.
drop(unsafe { Page::<PageTablePageMeta<E, C>>::restore(paddr) });
drop(unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(paddr) });
} else if !in_untracked_range {
// This is a frame.
drop(unsafe { Page::<FrameMeta>::restore(paddr) });
drop(unsafe { Page::<FrameMeta>::from_raw(paddr) });
}
if pte.is_none() {
@ -497,11 +501,11 @@ where
// Just restore the handle and drop the handle.
if !pte.is_last(level) {
// This is a page table.
drop(unsafe { Page::<Self>::restore(pte.paddr()) });
drop(unsafe { Page::<Self>::from_raw(pte.paddr()) });
} else {
// This is a frame. You cannot drop a page table node that maps to
// untracked frames. This must be verified.
drop(unsafe { Page::<FrameMeta>::restore(pte.paddr()) });
drop(unsafe { Page::<FrameMeta>::from_raw(pte.paddr()) });
}
}
}

View File

@ -322,11 +322,18 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
}
/// The interface for defining architecture-specific page table entries.
pub(crate) trait PageTableEntryTrait: Clone + Copy + Sized + Pod + Debug {
///
/// Note that a default PTE shoud be a PTE that points to nothing.
pub(crate) trait PageTableEntryTrait:
Clone + Copy + Debug + Default + Pod + Sized + Sync
{
/// Create a set of new invalid page table flags that indicates an absent page.
///
/// Note that currently the implementation requires an all zero PTE to be an absent PTE.
fn new_absent() -> Self;
fn new_absent() -> Self {
Self::default()
}
/// If the flags are present with valid mappings.
fn is_present(&self) -> bool;

View File

@ -114,7 +114,7 @@ fn test_user_copy_on_write() {
type Qr = PageTableQueryResult;
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Default)]
struct BasePagingConsts {}
impl PagingConstsTrait for BasePagingConsts {
@ -158,7 +158,7 @@ fn test_base_protect_query() {
}
}
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Default)]
struct VeryHugePagingConsts {}
impl PagingConstsTrait for VeryHugePagingConsts {

View File

@ -1,760 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::vec;
use core::{
marker::PhantomData,
ops::{BitAnd, BitOr, Not, Range},
};
use pod::Pod;
use super::{frame_allocator, HasPaddr, VmIo};
use crate::{prelude::*, vm::PAGE_SIZE, Error};
/// A collection of page frames (physical memory pages).
///
/// For the most parts, `VmFrameVec` is like `Vec<VmFrame>`. But the
/// implementation may or may not be based on `Vec`. Having a dedicated
/// type to represent a series of page frames is convenient because,
/// more often than not, one needs to operate on a batch of frames rather
/// a single frame.
#[derive(Debug, Clone)]
pub struct VmFrameVec(pub(crate) Vec<VmFrame>);
impl VmFrameVec {
pub fn get(&self, index: usize) -> Option<&VmFrame> {
self.0.get(index)
}
/// returns an empty vmframe vec
pub fn empty() -> Self {
Self(Vec::new())
}
pub fn new_with_capacity(capacity: usize) -> Self {
Self(Vec::with_capacity(capacity))
}
/// Pushs a new frame to the collection.
pub fn push(&mut self, new_frame: VmFrame) {
self.0.push(new_frame);
}
/// Pop a frame from the collection.
pub fn pop(&mut self) -> Option<VmFrame> {
self.0.pop()
}
/// Removes a frame at a position.
pub fn remove(&mut self, at: usize) -> VmFrame {
self.0.remove(at)
}
/// Append some frames.
pub fn append(&mut self, more: &mut VmFrameVec) -> Result<()> {
self.0.append(&mut more.0);
Ok(())
}
/// Truncate some frames.
///
/// If `new_len >= self.len()`, then this method has no effect.
pub fn truncate(&mut self, new_len: usize) {
if new_len >= self.0.len() {
return;
}
self.0.truncate(new_len)
}
/// Returns an iterator
pub fn iter(&self) -> core::slice::Iter<'_, VmFrame> {
self.0.iter()
}
/// Returns the number of frames.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the frame collection is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of bytes.
///
/// This method is equivalent to `self.len() * PAGE_SIZE`.
pub fn nbytes(&self) -> usize {
self.0.len() * PAGE_SIZE
}
pub fn from_one_frame(frame: VmFrame) -> Self {
Self(vec![frame])
}
}
impl IntoIterator for VmFrameVec {
type Item = VmFrame;
type IntoIter = alloc::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl VmIo for VmFrameVec {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let num_unread_pages = offset / PAGE_SIZE;
let mut start = offset % PAGE_SIZE;
let mut buf_writer: VmWriter = buf.into();
for frame in self.0.iter().skip(num_unread_pages) {
let read_len = frame.reader().skip(start).read(&mut buf_writer);
if read_len == 0 {
break;
}
start = 0;
}
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let num_unwrite_pages = offset / PAGE_SIZE;
let mut start = offset % PAGE_SIZE;
let mut buf_reader: VmReader = buf.into();
for frame in self.0.iter().skip(num_unwrite_pages) {
let write_len = frame.writer().skip(start).write(&mut buf_reader);
if write_len == 0 {
break;
}
start = 0;
}
Ok(())
}
}
/// An iterator for frames.
pub struct VmFrameVecIter<'a> {
frames: &'a VmFrameVec,
current: usize,
// more...
}
impl<'a> VmFrameVecIter<'a> {
pub fn new(frames: &'a VmFrameVec) -> Self {
Self { frames, current: 0 }
}
}
impl<'a> Iterator for VmFrameVecIter<'a> {
type Item = &'a VmFrame;
fn next(&mut self) -> Option<Self::Item> {
if self.current >= self.frames.0.len() {
return None;
}
Some(self.frames.0.get(self.current).unwrap())
}
}
bitflags::bitflags! {
pub(crate) struct VmFrameFlags : usize {
const NEED_DEALLOC = 1 << 63;
}
}
#[derive(Debug)]
/// A handle to a page frame.
///
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
/// page). A cloned `VmFrame` refers to the same page frame as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other. Behind the scene,
/// a reference counter is maintained for each page frame so that
/// when all instances of `VmFrame` that refer to the
/// same page frame are dropped, the page frame will be freed.
/// Free page frames are allocated in bulk by `VmFrameVec::allocate`.
pub struct VmFrame {
pub(crate) frame_index: Arc<Paddr>,
}
impl Clone for VmFrame {
fn clone(&self) -> Self {
Self {
frame_index: self.frame_index.clone(),
}
}
}
impl HasPaddr for VmFrame {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl VmFrame {
/// Creates a new VmFrame.
///
/// # Safety
///
/// The given physical address must be valid for use.
pub(crate) unsafe fn new(paddr: Paddr, flags: VmFrameFlags) -> Self {
assert_eq!(paddr % PAGE_SIZE, 0);
Self {
frame_index: Arc::new((paddr / PAGE_SIZE).bitor(flags.bits)),
}
}
/// Returns the physical address of the page frame.
pub fn start_paddr(&self) -> Paddr {
self.frame_index() * PAGE_SIZE
}
pub fn end_paddr(&self) -> Paddr {
(self.frame_index() + 1) * PAGE_SIZE
}
fn need_dealloc(&self) -> bool {
(*self.frame_index & VmFrameFlags::NEED_DEALLOC.bits()) != 0
}
fn frame_index(&self) -> usize {
(*self.frame_index).bitand(VmFrameFlags::all().bits().not())
}
pub fn as_ptr(&self) -> *const u8 {
super::paddr_to_vaddr(self.start_paddr()) as *const u8
}
pub fn as_mut_ptr(&self) -> *mut u8 {
super::paddr_to_vaddr(self.start_paddr()) as *mut u8
}
pub fn copy_from_frame(&self, src: &VmFrame) {
if Arc::ptr_eq(&self.frame_index, &src.frame_index) {
return;
}
// SAFETY: src and dst is not overlapped.
unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), PAGE_SIZE);
}
}
}
impl<'a> VmFrame {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a> {
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
unsafe { VmReader::from_raw_parts(self.as_ptr(), PAGE_SIZE) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a> {
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
unsafe { VmWriter::from_raw_parts_mut(self.as_mut_ptr(), PAGE_SIZE) }
}
}
impl VmIo for VmFrame {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > PAGE_SIZE {
return Err(Error::InvalidArgs);
}
let len = self.reader().skip(offset).read(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > PAGE_SIZE {
return Err(Error::InvalidArgs);
}
let len = self.writer().skip(offset).write(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
}
impl Drop for VmFrame {
fn drop(&mut self) {
if self.need_dealloc() && Arc::strong_count(&self.frame_index) == 1 {
// SAFETY: the frame index is valid.
unsafe {
frame_allocator::dealloc_single(self.frame_index());
}
}
}
}
/// A handle to a contiguous range of page frames (physical memory pages).
///
/// The biggest difference between `VmSegment` and `VmFrameVec` is that
/// the page frames must be contiguous for `VmSegment`.
///
/// A cloned `VmSegment` refers to the same page frames as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other.
///
/// #Example
///
/// ```rust
/// let vm_segment = VmAllocOptions::new(2)
/// .is_contiguous(true)
/// .alloc_contiguous()?;
/// vm_segment.write_bytes(0, buf)?;
/// ```
#[derive(Debug, Clone)]
pub struct VmSegment {
inner: VmSegmentInner,
range: Range<usize>,
}
#[derive(Debug, Clone)]
struct VmSegmentInner {
start_frame_index: Arc<Paddr>,
nframes: usize,
}
impl VmSegmentInner {
/// Creates the inner part of 'VmSegment'.
///
/// # Safety
///
/// The constructor of 'VmSegment' ensures the safety.
unsafe fn new(paddr: Paddr, nframes: usize, flags: VmFrameFlags) -> Self {
assert_eq!(paddr % PAGE_SIZE, 0);
Self {
start_frame_index: Arc::new((paddr / PAGE_SIZE).bitor(flags.bits)),
nframes,
}
}
fn start_frame_index(&self) -> usize {
(*self.start_frame_index).bitand(VmFrameFlags::all().bits().not())
}
fn start_paddr(&self) -> Paddr {
self.start_frame_index() * PAGE_SIZE
}
}
impl HasPaddr for VmSegment {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl VmSegment {
/// Creates a new `VmSegment`.
///
/// # Safety
///
/// The given range of page frames must be contiguous and valid for use.
/// The given range of page frames must not have been allocated before,
/// as part of either a `VmFrame` or `VmSegment`.
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize, flags: VmFrameFlags) -> Self {
Self {
inner: VmSegmentInner::new(paddr, nframes, flags),
range: 0..nframes,
}
}
/// Returns a part of the `VmSegment`.
///
/// # Panic
///
/// If `range` is not within the range of this `VmSegment`,
/// then the method panics.
pub fn range(&self, range: Range<usize>) -> Self {
let orig_range = &self.range;
let adj_range = (range.start + orig_range.start)..(range.end + orig_range.start);
assert!(!adj_range.is_empty() && adj_range.end <= orig_range.end);
Self {
inner: self.inner.clone(),
range: adj_range,
}
}
/// Returns the start physical address.
pub fn start_paddr(&self) -> Paddr {
self.start_frame_index() * PAGE_SIZE
}
/// Returns the end physical address.
pub fn end_paddr(&self) -> Paddr {
(self.start_frame_index() + self.nframes()) * PAGE_SIZE
}
/// Returns the number of page frames.
pub fn nframes(&self) -> usize {
self.range.len()
}
/// Returns the number of bytes.
pub fn nbytes(&self) -> usize {
self.nframes() * PAGE_SIZE
}
fn need_dealloc(&self) -> bool {
(*self.inner.start_frame_index & VmFrameFlags::NEED_DEALLOC.bits()) != 0
}
fn start_frame_index(&self) -> usize {
self.inner.start_frame_index() + self.range.start
}
pub fn as_ptr(&self) -> *const u8 {
super::paddr_to_vaddr(self.start_paddr()) as *const u8
}
pub fn as_mut_ptr(&self) -> *mut u8 {
super::paddr_to_vaddr(self.start_paddr()) as *mut u8
}
}
impl<'a> VmSegment {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a> {
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
unsafe { VmReader::from_raw_parts(self.as_ptr(), self.nbytes()) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a> {
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
unsafe { VmWriter::from_raw_parts_mut(self.as_mut_ptr(), self.nbytes()) }
}
}
impl VmIo for VmSegment {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self.reader().skip(offset).read(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self.writer().skip(offset).write(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
}
impl Drop for VmSegment {
fn drop(&mut self) {
if self.need_dealloc() && Arc::strong_count(&self.inner.start_frame_index) == 1 {
// SAFETY: the range of contiguous page frames is valid.
unsafe {
frame_allocator::dealloc_contiguous(
self.inner.start_frame_index(),
self.inner.nframes,
);
}
}
}
}
impl From<VmFrame> for VmSegment {
fn from(frame: VmFrame) -> Self {
Self {
inner: VmSegmentInner {
start_frame_index: frame.frame_index.clone(),
nframes: 1,
},
range: 0..1,
}
}
}
/// VmReader is a reader for reading data from a contiguous range of memory.
///
/// # Example
///
/// ```rust
/// impl VmIo for VmFrame {
/// fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
/// if buf.len() + offset > PAGE_SIZE {
/// return Err(Error::InvalidArgs);
/// }
/// let len = self.reader().skip(offset).read(&mut buf.into());
/// debug_assert!(len == buf.len());
/// Ok(())
/// }
/// }
/// ```
pub struct VmReader<'a> {
cursor: *const u8,
end: *const u8,
phantom: PhantomData<&'a [u8]>,
}
impl<'a> VmReader<'a> {
/// Constructs a VmReader from a pointer and a length.
///
/// # Safety
///
/// User must ensure the memory from `ptr` to `ptr.add(len)` is contiguous.
/// User must ensure the memory is valid during the entire period of `'a`.
pub const unsafe fn from_raw_parts(ptr: *const u8, len: usize) -> Self {
Self {
cursor: ptr,
end: ptr.add(len),
phantom: PhantomData,
}
}
/// Returns the number of bytes for the remaining data.
pub const fn remain(&self) -> usize {
// SAFETY: the end is equal to or greater than the cursor.
unsafe { self.end.sub_ptr(self.cursor) }
}
/// Returns the cursor pointer, which refers to the address of the next byte to read.
pub const fn cursor(&self) -> *const u8 {
self.cursor
}
/// Returns if it has remaining data to read.
pub const fn has_remain(&self) -> bool {
self.remain() > 0
}
/// Limits the length of remaining data.
///
/// This method ensures the postcondition of `self.remain() <= max_remain`.
pub const fn limit(mut self, max_remain: usize) -> Self {
if max_remain < self.remain() {
// SAFETY: the new end is less than the old end.
unsafe { self.end = self.cursor.add(max_remain) };
}
self
}
/// Skips the first `nbytes` bytes of data.
/// The length of remaining data is decreased accordingly.
///
/// # Panic
///
/// If `nbytes` is greater than `self.remain()`, then the method panics.
pub fn skip(mut self, nbytes: usize) -> Self {
assert!(nbytes <= self.remain());
// SAFETY: the new cursor is less than or equal to the end.
unsafe { self.cursor = self.cursor.add(nbytes) };
self
}
/// Reads all data into the writer until one of the two conditions is met:
/// 1. The reader has no remaining data.
/// 2. The writer has no available space.
///
/// Returns the number of bytes read.
///
/// It pulls the number of bytes data from the reader and
/// fills in the writer with the number of bytes.
pub fn read(&mut self, writer: &mut VmWriter<'_>) -> usize {
let copy_len = self.remain().min(writer.avail());
if copy_len == 0 {
return 0;
}
// SAFETY: the memory range is valid since `copy_len` is the minimum
// of the reader's remaining data and the writer's available space.
unsafe {
core::ptr::copy(self.cursor, writer.cursor, copy_len);
self.cursor = self.cursor.add(copy_len);
writer.cursor = writer.cursor.add(copy_len);
}
copy_len
}
/// Read a value of `Pod` type.
///
/// # Panic
///
/// If the length of the `Pod` type exceeds `self.remain()`, then this method will panic.
pub fn read_val<T: Pod>(&mut self) -> T {
assert!(self.remain() >= core::mem::size_of::<T>());
let mut val = T::new_uninit();
let mut writer = VmWriter::from(val.as_bytes_mut());
let read_len = self.read(&mut writer);
val
}
}
impl<'a> From<&'a [u8]> for VmReader<'a> {
fn from(slice: &'a [u8]) -> Self {
// SAFETY: the range of memory is contiguous and is valid during `'a`.
unsafe { Self::from_raw_parts(slice.as_ptr(), slice.len()) }
}
}
/// VmWriter is a writer for writing data to a contiguous range of memory.
///
/// # Example
///
/// ```rust
/// impl VmIo for VmFrame {
/// fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
/// if buf.len() + offset > PAGE_SIZE {
/// return Err(Error::InvalidArgs);
/// }
/// let len = self.writer().skip(offset).write(&mut buf.into());
/// debug_assert!(len == buf.len());
/// Ok(())
/// }
/// }
/// ```
pub struct VmWriter<'a> {
cursor: *mut u8,
end: *mut u8,
phantom: PhantomData<&'a mut [u8]>,
}
impl<'a> VmWriter<'a> {
/// Constructs a VmWriter from a pointer and a length.
///
/// # Safety
///
/// User must ensure the memory from `ptr` to `ptr.add(len)` is contiguous.
/// User must ensure the memory is valid during the entire period of `'a`.
pub const unsafe fn from_raw_parts_mut(ptr: *mut u8, len: usize) -> Self {
Self {
cursor: ptr,
end: ptr.add(len),
phantom: PhantomData,
}
}
/// Returns the number of bytes for the available space.
pub const fn avail(&self) -> usize {
// SAFETY: the end is equal to or greater than the cursor.
unsafe { self.end.sub_ptr(self.cursor) }
}
/// Returns the cursor pointer, which refers to the address of the next byte to write.
pub const fn cursor(&self) -> *mut u8 {
self.cursor
}
/// Returns if it has avaliable space to write.
pub const fn has_avail(&self) -> bool {
self.avail() > 0
}
/// Limits the length of available space.
///
/// This method ensures the postcondition of `self.avail() <= max_avail`.
pub const fn limit(mut self, max_avail: usize) -> Self {
if max_avail < self.avail() {
// SAFETY: the new end is less than the old end.
unsafe { self.end = self.cursor.add(max_avail) };
}
self
}
/// Skips the first `nbytes` bytes of data.
/// The length of available space is decreased accordingly.
///
/// # Panic
///
/// If `nbytes` is greater than `self.avail()`, then the method panics.
pub fn skip(mut self, nbytes: usize) -> Self {
assert!(nbytes <= self.avail());
// SAFETY: the new cursor is less than or equal to the end.
unsafe { self.cursor = self.cursor.add(nbytes) };
self
}
/// Writes data from the reader until one of the two conditions is met:
/// 1. The writer has no available space.
/// 2. The reader has no remaining data.
///
/// Returns the number of bytes written.
///
/// It pulls the number of bytes data from the reader and
/// fills in the writer with the number of bytes.
pub fn write(&mut self, reader: &mut VmReader<'_>) -> usize {
let copy_len = self.avail().min(reader.remain());
if copy_len == 0 {
return 0;
}
// SAFETY: the memory range is valid since `copy_len` is the minimum
// of the reader's remaining data and the writer's available space.
unsafe {
core::ptr::copy(reader.cursor, self.cursor, copy_len);
self.cursor = self.cursor.add(copy_len);
reader.cursor = reader.cursor.add(copy_len);
}
copy_len
}
/// Fills the available space by repeating `value`.
///
/// Returns the number of values written.
///
/// # Panic
///
/// The size of the available space must be a multiple of the size of `value`.
/// Otherwise, the method would panic.
pub fn fill<T: Pod>(&mut self, value: T) -> usize {
let avail = self.avail();
assert!((self.cursor as *mut T).is_aligned());
assert!(avail % core::mem::size_of::<T>() == 0);
let written_num = avail / core::mem::size_of::<T>();
for i in 0..written_num {
// SAFETY: `written_num` is calculated by the avail size and the size of the type `T`,
// hence the `add` operation and `write` operation are valid and will only manipulate
// the memory managed by this writer.
unsafe {
(self.cursor as *mut T).add(i).write(value);
}
}
// The available space has been filled so this cursor can be moved to the end.
self.cursor = self.end;
written_num
}
}
impl<'a> From<&'a mut [u8]> for VmWriter<'a> {
fn from(slice: &'a mut [u8]) -> Self {
// SAFETY: the range of memory is contiguous and is valid during `'a`.
unsafe { Self::from_raw_parts_mut(slice.as_mut_ptr(), slice.len()) }
}
}

View File

@ -1,531 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
pub(crate) mod allocator;
pub(in crate::vm) mod meta;
use alloc::vec;
use core::{
mem::ManuallyDrop,
ops::Range,
sync::atomic::{self, Ordering},
};
use meta::{FrameMetaRef, FrameType};
use crate::{
prelude::*,
vm::{HasPaddr, PagingLevel, VmIo, VmReader, VmWriter, PAGE_SIZE},
Error,
};
/// A collection of base page frames (regular physical memory pages).
///
/// For the most parts, `VmFrameVec` is like `Vec<VmFrame>`. But the
/// implementation may or may not be based on `Vec`. Having a dedicated
/// type to represent a series of page frames is convenient because,
/// more often than not, one needs to operate on a batch of frames rather
/// a single frame.
#[derive(Debug, Clone)]
pub struct VmFrameVec(pub(crate) Vec<VmFrame>);
impl VmFrameVec {
pub fn get(&self, index: usize) -> Option<&VmFrame> {
self.0.get(index)
}
/// returns an empty VmFrame vec
pub fn empty() -> Self {
Self(Vec::new())
}
pub fn new_with_capacity(capacity: usize) -> Self {
Self(Vec::with_capacity(capacity))
}
/// Pushs a new frame to the collection.
pub fn push(&mut self, new_frame: VmFrame) {
self.0.push(new_frame);
}
/// Pop a frame from the collection.
pub fn pop(&mut self) -> Option<VmFrame> {
self.0.pop()
}
/// Removes a frame at a position.
pub fn remove(&mut self, at: usize) -> VmFrame {
self.0.remove(at)
}
/// Append some frames.
pub fn append(&mut self, more: &mut VmFrameVec) -> Result<()> {
self.0.append(&mut more.0);
Ok(())
}
/// Truncate some frames.
///
/// If `new_len >= self.len()`, then this method has no effect.
pub fn truncate(&mut self, new_len: usize) {
if new_len >= self.0.len() {
return;
}
self.0.truncate(new_len)
}
/// Returns an iterator
pub fn iter(&self) -> core::slice::Iter<'_, VmFrame> {
self.0.iter()
}
/// Returns the number of frames.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the frame collection is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of bytes.
///
/// This method is equivalent to `self.len() * BASE_PAGE_SIZE`.
pub fn nbytes(&self) -> usize {
self.0.len() * PAGE_SIZE
}
pub fn from_one_frame(frame: VmFrame) -> Self {
Self(vec![frame])
}
}
impl IntoIterator for VmFrameVec {
type Item = VmFrame;
type IntoIter = alloc::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl VmIo for VmFrameVec {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let num_unread_pages = offset / PAGE_SIZE;
let mut start = offset % PAGE_SIZE;
let mut buf_writer: VmWriter = buf.into();
for frame in self.0.iter().skip(num_unread_pages) {
let read_len = frame.reader().skip(start).read(&mut buf_writer);
if read_len == 0 {
break;
}
start = 0;
}
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let num_unwrite_pages = offset / PAGE_SIZE;
let mut start = offset % PAGE_SIZE;
let mut buf_reader: VmReader = buf.into();
for frame in self.0.iter().skip(num_unwrite_pages) {
let write_len = frame.writer().skip(start).write(&mut buf_reader);
if write_len == 0 {
break;
}
start = 0;
}
Ok(())
}
}
/// An iterator for frames.
pub struct FrameVecIter<'a> {
frames: &'a VmFrameVec,
current: usize,
}
impl<'a> FrameVecIter<'a> {
pub fn new(frames: &'a VmFrameVec) -> Self {
Self { frames, current: 0 }
}
}
impl<'a> Iterator for FrameVecIter<'a> {
type Item = &'a VmFrame;
fn next(&mut self) -> Option<Self::Item> {
if self.current >= self.frames.0.len() {
return None;
}
Some(self.frames.0.get(self.current).unwrap())
}
}
#[derive(Debug)]
/// A handle to a page frame.
///
/// The referenced page frame could either be huge or regular, which can be
/// told by the [`VmFrame::size`] method. It is ensured that there would be
/// only one TLB entry for such a frame if it is mapped to a virtual address
/// and the architecture supports huge TLB entries.
///
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
/// page). A cloned `VmFrame` refers to the same page frame as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other. Behind the scene, a reference
/// counter is maintained for each page frame so that when all instances of
/// `VmFrame` that refer to the same page frame are dropped, the page frame
/// will be globally freed.
pub struct VmFrame {
pub(crate) meta: FrameMetaRef,
}
unsafe impl Send for VmFrame {}
unsafe impl Sync for VmFrame {}
impl Clone for VmFrame {
fn clone(&self) -> Self {
self.meta.counter32_1.fetch_add(1, Ordering::Relaxed);
Self { meta: self.meta }
}
}
impl HasPaddr for VmFrame {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl VmFrame {
/// Creates a new `VmFrame` from the given physical address and level.
///
/// # Panic
///
/// The function panics if the given frame is not free or is managed
/// by a non-free super page.
///
/// # Safety
///
/// The caller must ensure that the given physical address is valid, and
/// the page is free thus not accessed by any other objects or handles.
pub(crate) unsafe fn from_free_raw(paddr: Paddr, level: PagingLevel) -> Self {
let mut meta = FrameMetaRef::from_raw(paddr, level);
assert!(matches!(meta.frame_type, FrameType::Free));
meta.deref_mut().frame_type = FrameType::Anonymous;
meta.counter32_1.fetch_add(1, Ordering::Relaxed);
Self { meta }
}
/// Returns the physical address of the page frame.
pub fn start_paddr(&self) -> Paddr {
self.meta.paddr()
}
pub fn size(&self) -> usize {
self.meta.size()
}
pub fn level(&self) -> PagingLevel {
self.meta.level()
}
pub fn end_paddr(&self) -> Paddr {
self.start_paddr() + self.size()
}
pub fn as_ptr(&self) -> *const u8 {
super::paddr_to_vaddr(self.start_paddr()) as *const u8
}
pub fn as_mut_ptr(&self) -> *mut u8 {
super::paddr_to_vaddr(self.start_paddr()) as *mut u8
}
pub fn copy_from(&self, src: &VmFrame) {
if self.meta == src.meta {
return;
}
if self.size() != src.size() {
panic!("The size of the source frame is different from the destination frame");
}
// SAFETY: the source and the destination does not overlap.
unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size());
}
}
}
impl<'a> VmFrame {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a> {
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
unsafe { VmReader::from_raw_parts(self.as_ptr(), self.size()) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a> {
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
unsafe { VmWriter::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
}
}
impl VmIo for VmFrame {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self.reader().skip(offset).read(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self.writer().skip(offset).write(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
}
impl Drop for VmFrame {
fn drop(&mut self) {
if self.meta.counter32_1.fetch_sub(1, Ordering::Release) == 1 {
// A fence is needed here with the same reasons stated in the implementation of
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
atomic::fence(Ordering::Acquire);
// SAFETY: the reference counter is 1 before decremented, so this is the only
// (exclusive) handle.
unsafe { self.meta.deref_mut().frame_type = FrameType::Free };
// SAFETY: the page frame is valid.
unsafe {
allocator::dealloc_contiguous(self.paddr() / PAGE_SIZE, self.size() / PAGE_SIZE);
}
}
}
}
/// A handle to a contiguous range of page frames (physical memory pages).
///
/// The biggest difference between `VmSegment` and `VmFrameVec` is that
/// the page frames must be contiguous for `VmSegment`.
///
/// A cloned `VmSegment` refers to the same page frames as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other.
///
/// #Example
///
/// ```rust
/// let vm_segment = VmAllocOptions::new(2)
/// .is_contiguous(true)
/// .alloc_contiguous()?;
/// vm_segment.write_bytes(0, buf)?;
/// ```
#[derive(Debug, Clone)]
pub struct VmSegment {
inner: VmSegmentInner,
range: Range<usize>,
}
unsafe impl Send for VmSegment {}
unsafe impl Sync for VmSegment {}
#[derive(Debug)]
struct VmSegmentInner {
meta: FrameMetaRef,
nframes: usize,
}
impl Clone for VmSegmentInner {
fn clone(&self) -> Self {
self.meta.counter32_1.fetch_add(1, Ordering::Relaxed);
Self {
meta: self.meta,
nframes: self.nframes,
}
}
}
impl VmSegmentInner {
/// Creates the inner part of 'VmSegment'.
///
/// # Safety
///
/// The constructor of 'VmSegment' ensures the safety.
unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
assert_eq!(paddr % PAGE_SIZE, 0);
let mut meta = FrameMetaRef::from_raw(paddr, 1);
assert!(matches!(meta.frame_type, FrameType::Free));
meta.deref_mut().frame_type = FrameType::Anonymous;
meta.counter32_1.fetch_add(1, Ordering::Relaxed);
Self { meta, nframes }
}
fn start_frame_index(&self) -> usize {
self.start_paddr() / PAGE_SIZE
}
fn start_paddr(&self) -> Paddr {
self.meta.paddr()
}
}
impl HasPaddr for VmSegment {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl VmSegment {
/// Creates a new `VmSegment`.
///
/// # Safety
///
/// The given range of page frames must be contiguous and valid for use.
/// The given range of page frames must not have been allocated before,
/// as part of either a `VmFrame` or `VmSegment`.
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
Self {
inner: VmSegmentInner::new(paddr, nframes),
range: 0..nframes,
}
}
/// Returns a part of the `VmSegment`.
///
/// # Panic
///
/// If `range` is not within the range of this `VmSegment`,
/// then the method panics.
pub fn range(&self, range: Range<usize>) -> Self {
let orig_range = &self.range;
let adj_range = (range.start + orig_range.start)..(range.end + orig_range.start);
assert!(!adj_range.is_empty() && adj_range.end <= orig_range.end);
Self {
inner: self.inner.clone(),
range: adj_range,
}
}
/// Returns the start physical address.
pub fn start_paddr(&self) -> Paddr {
self.start_frame_index() * PAGE_SIZE
}
/// Returns the end physical address.
pub fn end_paddr(&self) -> Paddr {
(self.start_frame_index() + self.nframes()) * PAGE_SIZE
}
/// Returns the number of page frames.
pub fn nframes(&self) -> usize {
self.range.len()
}
/// Returns the number of bytes.
pub fn nbytes(&self) -> usize {
self.nframes() * PAGE_SIZE
}
fn start_frame_index(&self) -> usize {
self.inner.start_frame_index() + self.range.start
}
pub fn as_ptr(&self) -> *const u8 {
super::paddr_to_vaddr(self.start_paddr()) as *const u8
}
pub fn as_mut_ptr(&self) -> *mut u8 {
super::paddr_to_vaddr(self.start_paddr()) as *mut u8
}
}
impl<'a> VmSegment {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a> {
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
unsafe { VmReader::from_raw_parts(self.as_ptr(), self.nbytes()) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a> {
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
unsafe { VmWriter::from_raw_parts_mut(self.as_mut_ptr(), self.nbytes()) }
}
}
impl VmIo for VmSegment {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self.reader().skip(offset).read(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self.writer().skip(offset).write(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
}
impl Drop for VmSegment {
fn drop(&mut self) {
if self.inner.meta.counter32_1.fetch_sub(1, Ordering::Release) == 1 {
// A fence is needed here with the same reasons stated in the implementation of
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
atomic::fence(Ordering::Acquire);
// SAFETY: the reference counter is 1 before decremented, so this is the only
// (exclusive) handle.
unsafe { self.inner.meta.deref_mut().frame_type = FrameType::Free };
// SAFETY: the range of contiguous page frames is valid.
unsafe {
allocator::dealloc_contiguous(self.inner.start_frame_index(), self.inner.nframes);
}
}
}
}
impl From<VmFrame> for VmSegment {
fn from(frame: VmFrame) -> Self {
let segment = Self {
inner: VmSegmentInner {
meta: frame.meta,
nframes: 1,
},
range: 0..1,
};
let _ = ManuallyDrop::new(frame);
segment
}
}

View File

@ -32,7 +32,7 @@ pub fn sys_sched_getaffinity(
// Placeholder for future implementation.
}
_ => {
match process_table::get_process(&pid) {
match process_table::get_process(pid) {
Some(_process) => { /* Placeholder if process-specific logic needed */ }
None => return Err(Error::with_message(Errno::ESRCH, "process does not exist")),
}