Let frame on-drop call backs take readers

This commit is contained in:
Zhang Junyang
2024-12-24 14:33:29 +08:00
committed by Tate, Hongliang Tian
parent 14308f81b6
commit ef6ff7ad84
3 changed files with 26 additions and 23 deletions

View File

@ -52,7 +52,8 @@ use crate::{
arch::mm::PagingConsts,
mm::{
kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, page_size, page_table::boot_pt,
CachePolicy, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr, PAGE_SIZE,
CachePolicy, Infallible, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr,
VmReader, PAGE_SIZE,
},
panic::abort,
};
@ -114,9 +115,14 @@ const_assert_eq!(size_of::<MetaSlot>(), META_SLOT_SIZE);
/// The implemented structure must have a size less than or equal to
/// [`PAGE_METADATA_MAX_SIZE`] and an alignment less than or equal to
/// [`PAGE_METADATA_MAX_ALIGN`].
///
/// The implementer of the `on_drop` method should ensure that the frame is
/// safe to be read.
pub unsafe trait FrameMeta: Any + Send + Sync + Debug + 'static {
/// Called when the last handle to the page is dropped.
fn on_drop(&mut self, _paddr: Paddr) {}
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
let _ = reader;
}
}
/// Makes a structure usable as a page metadata.
@ -126,14 +132,13 @@ pub unsafe trait FrameMeta: Any + Send + Sync + Debug + 'static {
/// compile-time checks.
#[macro_export]
macro_rules! impl_frame_meta_for {
($($t:ty),*) => {
$(
// Implement without specifying the drop behavior.
($t:ty) => {
use static_assertions::const_assert;
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE);
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN);
// SAFETY: The size and alignment of the structure are checked.
unsafe impl $crate::mm::frame::meta::FrameMeta for $t {}
)*
};
}
@ -182,12 +187,17 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
let meta_ptr: *mut dyn FrameMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr);
// SAFETY: The implementer of the frame metadata decides that if the frame
// is safe to be read or not.
let mut reader =
unsafe { VmReader::from_kernel_space(paddr_to_vaddr(paddr) as *const u8, PAGE_SIZE) };
// SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
// `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
// access to the page metadata.
unsafe {
// Invoke the custom `on_drop` handler.
(*meta_ptr).on_drop(paddr);
(*meta_ptr).on_drop(&mut reader);
// Drop the page metadata.
core::ptr::drop_in_place(meta_ptr);
}

View File

@ -4,7 +4,7 @@ use core::{fmt::Debug, marker::PhantomData, ops::Range};
use super::{
nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait, PagingLevel,
Vaddr,
PodOnce, Vaddr,
};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
@ -339,7 +339,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
///
/// Note that a default PTE should be a PTE that points to nothing.
pub trait PageTableEntryTrait:
Clone + Copy + Debug + Default + Pod + Sized + Send + Sync + 'static
Clone + Copy + Debug + Default + Pod + PodOnce + Sized + Send + Sync + 'static
{
/// Create a set of new invalid page table flags that indicates an absent page.
///

View File

@ -41,7 +41,7 @@ use crate::{
arch::mm::{PageTableEntry, PagingConsts},
mm::{
frame::{self, inc_page_ref_count, meta::FrameMeta, Frame},
paddr_to_vaddr, Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
paddr_to_vaddr, Infallible, Paddr, PagingConstsTrait, PagingLevel, VmReader, PAGE_SIZE,
},
};
@ -410,7 +410,7 @@ unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> FrameMeta for PageTabl
where
[(); C::NR_LEVELS as usize]:,
{
fn on_drop(&mut self, paddr: Paddr) {
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
let nr_children = self.nr_children.get_mut();
if *nr_children == 0 {
@ -421,14 +421,7 @@ where
let is_tracked = self.is_tracked;
// Drop the children.
for i in 0..nr_subpage_per_huge::<C>() {
// SAFETY: The index is within the bound and PTE is plain-old-data. The
// address is aligned as well. We also have an exclusive access ensured
// by reference counting.
let pte_ptr = unsafe { (paddr_to_vaddr(paddr) as *const E).add(i) };
// SAFETY: The pointer is valid and the PTE is plain-old-data.
let pte = unsafe { pte_ptr.read() };
while let Ok(pte) = reader.read_once::<E>() {
// Here if we use directly `Child::from_pte` we would experience a
// 50% increase in the overhead of the `drop` function. It seems that
// Rust is very conservative about inlining and optimizing dead code