mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-28 11:53:24 +00:00
Manage frame tracking outside the page table
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
2c917ba383
commit
e78927b449
@ -21,7 +21,7 @@ use core::{
|
||||
use align_ext::AlignExt;
|
||||
use aster_rights::Full;
|
||||
use ostd::{
|
||||
mm::{vm_space::VmItem, UntypedMem, VmIo, MAX_USERSPACE_VADDR},
|
||||
mm::{UntypedMem, VmIo, MAX_USERSPACE_VADDR},
|
||||
task::disable_preempt,
|
||||
};
|
||||
|
||||
@ -394,7 +394,7 @@ impl InitStackReader<'_> {
|
||||
&preempt_guard,
|
||||
&(page_base_addr..page_base_addr + PAGE_SIZE),
|
||||
)?;
|
||||
let VmItem::Mapped { frame, .. } = cursor.query()? else {
|
||||
let (_, Some((frame, _))) = cursor.query()? else {
|
||||
return_errno_with_message!(Errno::EACCES, "Page not accessible");
|
||||
};
|
||||
|
||||
@ -422,7 +422,7 @@ impl InitStackReader<'_> {
|
||||
&preempt_guard,
|
||||
&(page_base_addr..page_base_addr + PAGE_SIZE),
|
||||
)?;
|
||||
let VmItem::Mapped { frame, .. } = cursor.query()? else {
|
||||
let (_, Some((frame, _))) = cursor.query()? else {
|
||||
return_errno_with_message!(Errno::EACCES, "Page not accessible");
|
||||
};
|
||||
|
||||
@ -466,7 +466,7 @@ impl InitStackReader<'_> {
|
||||
&preempt_guard,
|
||||
&(page_base_addr..page_base_addr + PAGE_SIZE),
|
||||
)?;
|
||||
let VmItem::Mapped { frame, .. } = cursor.query()? else {
|
||||
let (_, Some((frame, _))) = cursor.query()? else {
|
||||
return_errno_with_message!(Errno::EACCES, "Page not accessible");
|
||||
};
|
||||
|
||||
|
@ -14,9 +14,7 @@ use aster_rights::Rights;
|
||||
use ostd::{
|
||||
cpu::CpuId,
|
||||
mm::{
|
||||
tlb::TlbFlushOp,
|
||||
vm_space::{CursorMut, VmItem},
|
||||
PageFlags, PageProperty, VmSpace, MAX_USERSPACE_VADDR,
|
||||
tlb::TlbFlushOp, vm_space::CursorMut, PageFlags, PageProperty, VmSpace, MAX_USERSPACE_VADDR,
|
||||
},
|
||||
task::disable_preempt,
|
||||
};
|
||||
@ -544,15 +542,10 @@ fn cow_copy_pt(src: &mut CursorMut<'_>, dst: &mut CursorMut<'_>, size: usize) ->
|
||||
};
|
||||
|
||||
while let Some(mapped_va) = src.find_next(remain_size) {
|
||||
let VmItem::Mapped {
|
||||
va,
|
||||
frame,
|
||||
mut prop,
|
||||
} = src.query().unwrap()
|
||||
else {
|
||||
let (va, Some((frame, mut prop))) = src.query().unwrap() else {
|
||||
panic!("Found mapped page but query failed");
|
||||
};
|
||||
debug_assert_eq!(mapped_va, va);
|
||||
debug_assert_eq!(mapped_va, va.start);
|
||||
|
||||
src.protect_next(end_va - mapped_va, op).unwrap();
|
||||
|
||||
@ -911,7 +904,7 @@ mod test {
|
||||
// Confirms the initial mapping.
|
||||
assert!(matches!(
|
||||
vm_space.cursor(&preempt_guard, &map_range).unwrap().query().unwrap(),
|
||||
VmItem::Mapped { va, frame, prop } if va == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::RW
|
||||
(va, Some((frame, prop))) if va.start == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::RW
|
||||
));
|
||||
|
||||
// Creates a child page table with copy-on-write protection.
|
||||
@ -926,7 +919,7 @@ mod test {
|
||||
// Confirms that parent and child VAs map to the same physical address.
|
||||
{
|
||||
let child_map_frame_addr = {
|
||||
let VmItem::Mapped { frame, .. } = child_space
|
||||
let (_, Some((frame, _))) = child_space
|
||||
.cursor(&preempt_guard, &map_range)
|
||||
.unwrap()
|
||||
.query()
|
||||
@ -937,7 +930,7 @@ mod test {
|
||||
frame.start_paddr()
|
||||
};
|
||||
let parent_map_frame_addr = {
|
||||
let VmItem::Mapped { frame, .. } = vm_space
|
||||
let (_, Some((frame, _))) = vm_space
|
||||
.cursor(&preempt_guard, &map_range)
|
||||
.unwrap()
|
||||
.query()
|
||||
@ -960,7 +953,7 @@ mod test {
|
||||
// Confirms that the child VA remains mapped.
|
||||
assert!(matches!(
|
||||
child_space.cursor(&preempt_guard, &map_range).unwrap().query().unwrap(),
|
||||
VmItem::Mapped { va, frame, prop } if va == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::R
|
||||
(va, Some((frame, prop))) if va.start == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::R
|
||||
));
|
||||
|
||||
// Creates a sibling page table (from the now-modified parent).
|
||||
@ -981,7 +974,7 @@ mod test {
|
||||
.unwrap()
|
||||
.query()
|
||||
.unwrap(),
|
||||
VmItem::NotMapped { .. }
|
||||
(_, None)
|
||||
));
|
||||
|
||||
// Drops the parent page table.
|
||||
@ -990,7 +983,7 @@ mod test {
|
||||
// Confirms that the child VA remains mapped after the parent is dropped.
|
||||
assert!(matches!(
|
||||
child_space.cursor(&preempt_guard, &map_range).unwrap().query().unwrap(),
|
||||
VmItem::Mapped { va, frame, prop } if va == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::R
|
||||
(va, Some((frame, prop))) if va.start == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::R
|
||||
));
|
||||
|
||||
// Unmaps the range from the child.
|
||||
@ -1008,7 +1001,7 @@ mod test {
|
||||
// Confirms that the sibling mapping points back to the original frame's physical address.
|
||||
assert!(matches!(
|
||||
sibling_space.cursor(&preempt_guard, &map_range).unwrap().query().unwrap(),
|
||||
VmItem::Mapped { va, frame, prop } if va == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::RW
|
||||
(va, Some((frame, prop))) if va.start == map_range.start && frame.start_paddr() == start_paddr && prop.flags == PageFlags::RW
|
||||
));
|
||||
|
||||
// Confirms that the child remains unmapped.
|
||||
@ -1018,7 +1011,7 @@ mod test {
|
||||
.unwrap()
|
||||
.query()
|
||||
.unwrap(),
|
||||
VmItem::NotMapped { .. }
|
||||
(_, None)
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -9,8 +9,7 @@ use core::{
|
||||
use align_ext::AlignExt;
|
||||
use ostd::{
|
||||
mm::{
|
||||
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty,
|
||||
UFrame, VmSpace,
|
||||
tlb::TlbFlushOp, CachePolicy, FrameAllocOptions, PageFlags, PageProperty, UFrame, VmSpace,
|
||||
},
|
||||
task::disable_preempt,
|
||||
};
|
||||
@ -170,7 +169,7 @@ impl VmMapping {
|
||||
&preempt_guard,
|
||||
&(page_aligned_addr..page_aligned_addr + PAGE_SIZE),
|
||||
)?;
|
||||
if let VmItem::Mapped { .. } = cursor.query().unwrap() {
|
||||
if let (_, Some((_, _))) = cursor.query().unwrap() {
|
||||
return Ok(rss_increment);
|
||||
}
|
||||
}
|
||||
@ -186,16 +185,13 @@ impl VmMapping {
|
||||
&(page_aligned_addr..page_aligned_addr + PAGE_SIZE),
|
||||
)?;
|
||||
|
||||
match cursor.query().unwrap() {
|
||||
VmItem::Mapped {
|
||||
va,
|
||||
frame,
|
||||
mut prop,
|
||||
} => {
|
||||
let (va, item) = cursor.query().unwrap();
|
||||
match item {
|
||||
Some((frame, mut prop)) => {
|
||||
if VmPerms::from(prop.flags).contains(page_fault_info.required_perms) {
|
||||
// The page fault is already handled maybe by other threads.
|
||||
// Just flush the TLB and return.
|
||||
TlbFlushOp::Address(va).perform_on_current();
|
||||
TlbFlushOp::Range(va).perform_on_current();
|
||||
return Ok(0);
|
||||
}
|
||||
assert!(is_write);
|
||||
@ -217,7 +213,7 @@ impl VmMapping {
|
||||
|
||||
if self.is_shared || only_reference {
|
||||
cursor.protect_next(PAGE_SIZE, |p| p.flags |= new_flags);
|
||||
cursor.flusher().issue_tlb_flush(TlbFlushOp::Address(va));
|
||||
cursor.flusher().issue_tlb_flush(TlbFlushOp::Range(va));
|
||||
cursor.flusher().dispatch_tlb_flush();
|
||||
} else {
|
||||
let new_frame = duplicate_frame(&frame)?;
|
||||
@ -227,7 +223,7 @@ impl VmMapping {
|
||||
}
|
||||
cursor.flusher().sync_tlb_flush();
|
||||
}
|
||||
VmItem::NotMapped { .. } => {
|
||||
None => {
|
||||
// Map a new frame to the page fault address.
|
||||
let (frame, is_readonly) = match self.prepare_page(address, is_write) {
|
||||
Ok((frame, is_readonly)) => (frame, is_readonly),
|
||||
@ -338,7 +334,7 @@ impl VmMapping {
|
||||
let operate =
|
||||
move |commit_fn: &mut dyn FnMut()
|
||||
-> core::result::Result<UFrame, VmoCommitError>| {
|
||||
if let VmItem::NotMapped { .. } = cursor.query().unwrap() {
|
||||
if let (_, None) = cursor.query().unwrap() {
|
||||
// We regard all the surrounding pages as accessed, no matter
|
||||
// if it is really so. Then the hardware won't bother to update
|
||||
// the accessed bit of the page table on following accesses.
|
||||
|
@ -14,7 +14,7 @@ use crate::{
|
||||
mm::{
|
||||
dma::Daddr,
|
||||
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
||||
page_table::{PageTableError, PageTableItem},
|
||||
page_table::PageTableError,
|
||||
Frame, FrameAllocOptions, Paddr, PageFlags, PageTable, VmIo, PAGE_SIZE,
|
||||
},
|
||||
task::disable_preempt,
|
||||
@ -308,7 +308,6 @@ impl ContextTable {
|
||||
);
|
||||
|
||||
let from = daddr..daddr + PAGE_SIZE;
|
||||
let to = paddr..paddr + PAGE_SIZE;
|
||||
let prop = PageProperty {
|
||||
flags: PageFlags::RW,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
@ -316,8 +315,11 @@ impl ContextTable {
|
||||
};
|
||||
|
||||
let pt = self.get_or_create_page_table(device);
|
||||
let preempt_guard = disable_preempt();
|
||||
let mut cursor = pt.cursor_mut(&preempt_guard, &from).unwrap();
|
||||
|
||||
// SAFETY: The safety is upheld by the caller.
|
||||
unsafe { pt.map(&from, &to, prop).unwrap() };
|
||||
unsafe { cursor.map((paddr, 1, prop)).unwrap() };
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -336,8 +338,8 @@ impl ContextTable {
|
||||
.unwrap();
|
||||
|
||||
// SAFETY: This unmaps a page from the context table, which is always safe.
|
||||
let item = unsafe { cursor.take_next(PAGE_SIZE) };
|
||||
debug_assert!(matches!(item, PageTableItem::MappedUntracked { .. }));
|
||||
let frag = unsafe { cursor.take_next(PAGE_SIZE) };
|
||||
debug_assert!(frag.is_some());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -19,13 +19,25 @@ use crate::{
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct IommuPtConfig {}
|
||||
|
||||
impl PageTableConfig for IommuPtConfig {
|
||||
// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
|
||||
unsafe impl PageTableConfig for IommuPtConfig {
|
||||
/// From section 3.6 in "Intel(R) Virtualization Technology for Directed I/O",
|
||||
/// only low canonical addresses can be used.
|
||||
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
|
||||
|
||||
type E = PageTableEntry;
|
||||
type C = PagingConsts;
|
||||
|
||||
/// All mappings are untracked.
|
||||
type Item = (Paddr, PagingLevel, PageProperty);
|
||||
|
||||
fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
|
||||
item
|
||||
}
|
||||
|
||||
unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
|
||||
(paddr, level, prop)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
|
@ -38,6 +38,7 @@ use crate::{
|
||||
page_prop::{CachePolicy, PageProperty},
|
||||
PageFlags, PrivilegedPageFlags as PrivFlags, MAX_USERSPACE_VADDR, PAGE_SIZE,
|
||||
},
|
||||
task::disable_preempt,
|
||||
trap::call_irq_callback_functions,
|
||||
};
|
||||
|
||||
@ -322,6 +323,8 @@ fn handle_user_page_fault(f: &mut TrapFrame, page_fault_addr: u64) {
|
||||
/// FIXME: this is a hack because we don't allocate kernel space for IO memory. We are currently
|
||||
/// using the linear mapping for IO memory. This is not a good practice.
|
||||
fn handle_kernel_page_fault(f: &TrapFrame, page_fault_vaddr: u64) {
|
||||
let preempt_guard = disable_preempt();
|
||||
|
||||
let error_code = PageFaultErrorCode::from_bits_truncate(f.error_code);
|
||||
debug!(
|
||||
"kernel page fault: address {:?}, error code {:?}",
|
||||
@ -362,23 +365,20 @@ fn handle_kernel_page_fault(f: &TrapFrame, page_fault_vaddr: u64) {
|
||||
} else {
|
||||
PrivFlags::GLOBAL
|
||||
});
|
||||
let prop = PageProperty {
|
||||
flags: PageFlags::RW,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
priv_flags,
|
||||
};
|
||||
|
||||
let mut cursor = page_table
|
||||
.cursor_mut(&preempt_guard, &(vaddr..vaddr + PAGE_SIZE))
|
||||
.unwrap();
|
||||
|
||||
// SAFETY:
|
||||
// 1. We have checked that the page fault address falls within the address range of the direct
|
||||
// mapping of physical memory.
|
||||
// 2. We map the address to the correct physical page with the correct flags, where the
|
||||
// correctness follows the semantics of the direct mapping of physical memory.
|
||||
unsafe {
|
||||
page_table
|
||||
.map(
|
||||
&(vaddr..vaddr + PAGE_SIZE),
|
||||
&(paddr..paddr + PAGE_SIZE),
|
||||
PageProperty {
|
||||
flags: PageFlags::RW,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
priv_flags,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
unsafe { cursor.map(crate::mm::kspace::MappedItem::Untracked(paddr, 1, prop)) }.unwrap();
|
||||
}
|
||||
|
@ -155,7 +155,9 @@ mod test {
|
||||
|
||||
#[ktest]
|
||||
fn conflict_region() {
|
||||
let io_mem_region_a = 0x4000_0000..0x4200_0000;
|
||||
let max_paddr = 0x100_000_000_000; // 16 TB
|
||||
|
||||
let io_mem_region_a = max_paddr..max_paddr + 0x200_0000;
|
||||
let io_mem_region_b =
|
||||
(io_mem_region_a.end + PAGE_SIZE)..(io_mem_region_a.end + 10 * PAGE_SIZE);
|
||||
let range = vec![io_mem_region_a.clone(), io_mem_region_b.clone()];
|
||||
|
@ -12,7 +12,7 @@ pub(super) use self::allocator::init;
|
||||
pub(crate) use self::allocator::IoMemAllocatorBuilder;
|
||||
use crate::{
|
||||
mm::{
|
||||
kspace::kvirt_area::{KVirtArea, Untracked},
|
||||
kspace::kvirt_area::KVirtArea,
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||
FallibleVmRead, FallibleVmWrite, HasPaddr, Infallible, Paddr, PodOnce, VmIo, VmIoOnce,
|
||||
VmReader, VmWriter, PAGE_SIZE,
|
||||
@ -24,7 +24,7 @@ use crate::{
|
||||
/// I/O memory.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IoMem {
|
||||
kvirt_area: Arc<KVirtArea<Untracked>>,
|
||||
kvirt_area: Arc<KVirtArea>,
|
||||
// The actually used range for MMIO is `kvirt_area.start + offset..kvirt_area.start + offset + limit`
|
||||
offset: usize,
|
||||
limit: usize,
|
||||
@ -86,6 +86,9 @@ impl IoMem {
|
||||
let first_page_start = range.start.align_down(PAGE_SIZE);
|
||||
let last_page_end = range.end.align_up(PAGE_SIZE);
|
||||
|
||||
let frames_range = first_page_start..last_page_end;
|
||||
let area_size = frames_range.len();
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
let priv_flags = crate::arch::if_tdx_enabled!({
|
||||
assert!(
|
||||
@ -95,7 +98,7 @@ impl IoMem {
|
||||
range.end,
|
||||
);
|
||||
|
||||
let pages = (last_page_end - first_page_start) / PAGE_SIZE;
|
||||
let num_pages = area_size / PAGE_SIZE;
|
||||
// SAFETY:
|
||||
// - The range `first_page_start..last_page_end` is always page aligned.
|
||||
// - FIXME: We currently do not limit the I/O memory allocator with the maximum GPA,
|
||||
@ -105,7 +108,7 @@ impl IoMem {
|
||||
// - The caller guarantees that operations on the I/O memory do not have any side
|
||||
// effects that may cause soundness problems, so the pages can safely be viewed as
|
||||
// untyped memory.
|
||||
unsafe { crate::arch::tdx_guest::unprotect_gpa_range(first_page_start, pages).unwrap() };
|
||||
unsafe { crate::arch::tdx_guest::unprotect_gpa_range(first_page_start, num_pages).unwrap() };
|
||||
|
||||
PrivilegedPageFlags::SHARED
|
||||
} else {
|
||||
@ -120,19 +123,12 @@ impl IoMem {
|
||||
priv_flags,
|
||||
};
|
||||
|
||||
// SAFETY: The caller of `IoMem::new()` and the constructor of `new_kvirt_area` has ensured the
|
||||
// safety of this mapping.
|
||||
let new_kvirt_area = unsafe {
|
||||
KVirtArea::<Untracked>::map_untracked_pages(
|
||||
last_page_end - first_page_start,
|
||||
0,
|
||||
first_page_start..last_page_end,
|
||||
prop,
|
||||
)
|
||||
};
|
||||
// SAFETY: The caller of `IoMem::new()` ensures that the given
|
||||
// physical address range is I/O memory, so it is safe to map.
|
||||
let kva = unsafe { KVirtArea::map_untracked_frames(area_size, 0, frames_range, prop) };
|
||||
|
||||
Self {
|
||||
kvirt_area: Arc::new(new_kvirt_area),
|
||||
kvirt_area: Arc::new(kva),
|
||||
offset: range.start - first_page_start,
|
||||
limit: range.len(),
|
||||
pa: range.start,
|
||||
|
@ -154,7 +154,8 @@ impl Drop for DmaCoherentInner {
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
iommu::unmap(paddr).unwrap();
|
||||
iommu::unmap(paddr as Daddr).unwrap();
|
||||
// FIXME: After dropping it could be reused. IOTLB needs to be flushed.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -193,7 +193,8 @@ impl Drop for DmaStreamInner {
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
iommu::unmap(paddr).unwrap();
|
||||
iommu::unmap(paddr as Daddr).unwrap();
|
||||
// FIXME: After dropping it could be reused. IOTLB needs to be flushed.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ mod dma_coherent {
|
||||
assert_eq!(dma_coherent.nbytes(), PAGE_SIZE);
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let vaddr = paddr_to_vaddr(segment.start_paddr());
|
||||
assert!(page_table.query(vaddr).unwrap().1.cache == CachePolicy::Uncacheable);
|
||||
assert!(page_table.page_walk(vaddr).unwrap().1.cache == CachePolicy::Uncacheable);
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
|
@ -550,11 +550,16 @@ fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
|
||||
(nr_meta_pages, start_paddr)
|
||||
}
|
||||
|
||||
/// The metadata of physical pages that cannot be allocated for general use.
|
||||
/// Unusable memory metadata. Cannot be used for any purposes.
|
||||
#[derive(Debug)]
|
||||
pub struct UnusableMemoryMeta;
|
||||
impl_frame_meta_for!(UnusableMemoryMeta);
|
||||
|
||||
/// Reserved memory metadata. Maybe later used as I/O memory.
|
||||
#[derive(Debug)]
|
||||
pub struct ReservedMemoryMeta;
|
||||
impl_frame_meta_for!(ReservedMemoryMeta);
|
||||
|
||||
/// The metadata of physical pages that contains the kernel itself.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct KernelMeta;
|
||||
@ -580,12 +585,12 @@ fn mark_unusable_ranges() {
|
||||
{
|
||||
match region.typ() {
|
||||
MemoryRegionType::BadMemory => mark_ranges!(region, UnusableMemoryMeta),
|
||||
MemoryRegionType::Unknown => mark_ranges!(region, UnusableMemoryMeta),
|
||||
MemoryRegionType::Unknown => mark_ranges!(region, ReservedMemoryMeta),
|
||||
MemoryRegionType::NonVolatileSleep => mark_ranges!(region, UnusableMemoryMeta),
|
||||
MemoryRegionType::Reserved => mark_ranges!(region, UnusableMemoryMeta),
|
||||
MemoryRegionType::Reserved => mark_ranges!(region, ReservedMemoryMeta),
|
||||
MemoryRegionType::Kernel => mark_ranges!(region, KernelMeta),
|
||||
MemoryRegionType::Module => mark_ranges!(region, UnusableMemoryMeta),
|
||||
MemoryRegionType::Framebuffer => mark_ranges!(region, UnusableMemoryMeta),
|
||||
MemoryRegionType::Framebuffer => mark_ranges!(region, ReservedMemoryMeta),
|
||||
MemoryRegionType::Reclaimable => mark_ranges!(region, UnusableMemoryMeta),
|
||||
MemoryRegionType::Usable => {} // By default it is initialized as usable.
|
||||
}
|
||||
|
@ -91,6 +91,13 @@ impl<M: AnyFrameMeta + ?Sized> core::fmt::Debug for Frame<M> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: AnyFrameMeta + ?Sized> PartialEq for Frame<M> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.start_paddr() == other.start_paddr()
|
||||
}
|
||||
}
|
||||
impl<M: AnyFrameMeta + ?Sized> Eq for Frame<M> {}
|
||||
|
||||
impl<M: AnyFrameMeta> Frame<M> {
|
||||
/// Gets a [`Frame`] with a specific usage from a raw, unused page.
|
||||
///
|
||||
@ -205,6 +212,8 @@ impl<M: AnyFrameMeta + ?Sized> Frame<M> {
|
||||
/// Also, the caller ensures that the usage of the frame is correct. There's
|
||||
/// no checking of the usage in this function.
|
||||
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
|
||||
debug_assert!(paddr < max_paddr());
|
||||
|
||||
let vaddr = mapping::frame_to_meta::<PagingConsts>(paddr);
|
||||
let ptr = vaddr as *const MetaSlot;
|
||||
|
||||
|
@ -2,54 +2,28 @@
|
||||
|
||||
//! Kernel virtual memory allocation
|
||||
|
||||
use core::{marker::PhantomData, ops::Range};
|
||||
use core::ops::Range;
|
||||
|
||||
use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE};
|
||||
use super::{KERNEL_PAGE_TABLE, VMALLOC_VADDR_RANGE};
|
||||
use crate::{
|
||||
mm::{
|
||||
frame::{meta::AnyFrameMeta, Frame},
|
||||
kspace::{KernelPtConfig, MappedItem},
|
||||
page_prop::PageProperty,
|
||||
page_table::PageTableItem,
|
||||
page_table::largest_pages,
|
||||
Paddr, Vaddr, PAGE_SIZE,
|
||||
},
|
||||
task::disable_preempt,
|
||||
util::range_alloc::RangeAllocator,
|
||||
};
|
||||
|
||||
static KVIRT_AREA_TRACKED_ALLOCATOR: RangeAllocator =
|
||||
RangeAllocator::new(TRACKED_MAPPED_PAGES_RANGE);
|
||||
static KVIRT_AREA_UNTRACKED_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
|
||||
static KVIRT_AREA_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Tracked;
|
||||
#[derive(Debug)]
|
||||
pub struct Untracked;
|
||||
|
||||
pub trait AllocatorSelector {
|
||||
fn select_allocator() -> &'static RangeAllocator;
|
||||
}
|
||||
|
||||
impl AllocatorSelector for Tracked {
|
||||
fn select_allocator() -> &'static RangeAllocator {
|
||||
&KVIRT_AREA_TRACKED_ALLOCATOR
|
||||
}
|
||||
}
|
||||
|
||||
impl AllocatorSelector for Untracked {
|
||||
fn select_allocator() -> &'static RangeAllocator {
|
||||
&KVIRT_AREA_UNTRACKED_ALLOCATOR
|
||||
}
|
||||
}
|
||||
|
||||
/// Kernel Virtual Area.
|
||||
/// Kernel virtual area.
|
||||
///
|
||||
/// A tracked kernel virtual area ([`KVirtArea<Tracked>`]) manages a range of
|
||||
/// memory in [`TRACKED_MAPPED_PAGES_RANGE`]. It can map a portion or the
|
||||
/// entirety of its virtual memory pages to frames tracked with metadata.
|
||||
///
|
||||
/// An untracked kernel virtual area ([`KVirtArea<Untracked>`]) manages a range
|
||||
/// of memory in [`VMALLOC_VADDR_RANGE`]. It can map a portion or the entirety
|
||||
/// of virtual memory to physical addresses not tracked with metadata.
|
||||
/// A kernel virtual area manages a range of memory in [`VMALLOC_VADDR_RANGE`].
|
||||
/// It can map a portion or the entirety of its virtual memory pages to
|
||||
/// physical memory, whether tracked with metadata or not.
|
||||
///
|
||||
/// It is the caller's responsibility to ensure TLB coherence before using the
|
||||
/// mapped virtual address on a certain CPU.
|
||||
@ -59,12 +33,11 @@ impl AllocatorSelector for Untracked {
|
||||
// `KVirtArea`. However, `IoMem` need some non trivial refactoring to support
|
||||
// being implemented on a `!Send` and `!Sync` guard.
|
||||
#[derive(Debug)]
|
||||
pub struct KVirtArea<M: AllocatorSelector + 'static> {
|
||||
pub struct KVirtArea {
|
||||
range: Range<Vaddr>,
|
||||
phantom: PhantomData<M>,
|
||||
}
|
||||
|
||||
impl<M: AllocatorSelector + 'static> KVirtArea<M> {
|
||||
impl KVirtArea {
|
||||
pub fn start(&self) -> Vaddr {
|
||||
self.range.start
|
||||
}
|
||||
@ -83,7 +56,7 @@ impl<M: AllocatorSelector + 'static> KVirtArea<M> {
|
||||
}
|
||||
|
||||
#[cfg(ktest)]
|
||||
fn query_page(&self, addr: Vaddr) -> PageTableItem {
|
||||
pub fn query(&self, addr: Vaddr) -> Option<super::MappedItem> {
|
||||
use align_ext::AlignExt;
|
||||
|
||||
assert!(self.start() <= addr && self.end() >= addr);
|
||||
@ -92,12 +65,10 @@ impl<M: AllocatorSelector + 'static> KVirtArea<M> {
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let preempt_guard = disable_preempt();
|
||||
let mut cursor = page_table.cursor(&preempt_guard, &vaddr).unwrap();
|
||||
cursor.query().unwrap()
|
||||
}
|
||||
cursor.query().unwrap().1
|
||||
}
|
||||
|
||||
impl KVirtArea<Tracked> {
|
||||
/// Create a kernel virtual area and map pages into it.
|
||||
/// Create a kernel virtual area and map tracked pages into it.
|
||||
///
|
||||
/// The created virtual area will have a size of `area_size`, and the pages
|
||||
/// will be mapped starting from `map_offset` in the area.
|
||||
@ -108,66 +79,43 @@ impl KVirtArea<Tracked> {
|
||||
/// - the area size is not a multiple of [`PAGE_SIZE`];
|
||||
/// - the map offset is not aligned to [`PAGE_SIZE`];
|
||||
/// - the map offset plus the size of the pages exceeds the area size.
|
||||
pub fn map_pages<T: AnyFrameMeta>(
|
||||
pub fn map_frames<T: AnyFrameMeta>(
|
||||
area_size: usize,
|
||||
map_offset: usize,
|
||||
pages: impl Iterator<Item = Frame<T>>,
|
||||
frames: impl Iterator<Item = Frame<T>>,
|
||||
prop: PageProperty,
|
||||
) -> Self {
|
||||
assert!(area_size % PAGE_SIZE == 0);
|
||||
assert!(map_offset % PAGE_SIZE == 0);
|
||||
let range = Tracked::select_allocator().alloc(area_size).unwrap();
|
||||
|
||||
let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
|
||||
let cursor_range = range.start + map_offset..range.end;
|
||||
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let preempt_guard = disable_preempt();
|
||||
let mut cursor = page_table
|
||||
.cursor_mut(&preempt_guard, &cursor_range)
|
||||
.unwrap();
|
||||
for page in pages.into_iter() {
|
||||
// SAFETY: The constructor of the `KVirtArea<Tracked>` structure
|
||||
// has already ensured that this mapping does not affect kernel's
|
||||
// memory safety.
|
||||
if let Some(_old) = unsafe { cursor.map(page.into(), prop) } {
|
||||
panic!("Pages mapped in a newly allocated `KVirtArea`");
|
||||
}
|
||||
}
|
||||
Self {
|
||||
range,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
|
||||
for frame in frames.into_iter() {
|
||||
// SAFETY: The constructor of the `KVirtArea` has already ensured
|
||||
// that this mapping does not affect kernel's memory safety.
|
||||
unsafe { cursor.map(MappedItem::Tracked(frame.into(), prop)) }
|
||||
.expect("Failed to map frame in a new `KVirtArea`");
|
||||
}
|
||||
|
||||
/// Gets the mapped tracked page.
|
||||
///
|
||||
/// This function returns None if the address is not mapped (`NotMapped`),
|
||||
/// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page.
|
||||
#[cfg(ktest)]
|
||||
pub fn get_page(&self, addr: Vaddr) -> Option<Frame<dyn AnyFrameMeta>> {
|
||||
let query_result = self.query_page(addr);
|
||||
match query_result {
|
||||
PageTableItem::Mapped {
|
||||
va: _,
|
||||
page,
|
||||
prop: _,
|
||||
} => Some(page),
|
||||
PageTableItem::NotMapped { .. } => None,
|
||||
_ => {
|
||||
panic!(
|
||||
"Found '{:?}' mapped into tracked `KVirtArea`, expected `Mapped`",
|
||||
query_result
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Self { range }
|
||||
}
|
||||
|
||||
impl KVirtArea<Untracked> {
|
||||
/// Creates a kernel virtual area and maps untracked frames into it.
|
||||
///
|
||||
/// The created virtual area will have a size of `area_size`, and the
|
||||
/// physical addresses will be mapped starting from `map_offset` in
|
||||
/// the area.
|
||||
///
|
||||
/// You can provide a `0..0` physical range to create a virtual area without
|
||||
/// mapping any physical memory.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if
|
||||
@ -175,8 +123,9 @@ impl KVirtArea<Untracked> {
|
||||
/// - the map offset is not aligned to [`PAGE_SIZE`];
|
||||
/// - the provided physical range is not aligned to [`PAGE_SIZE`];
|
||||
/// - the map offset plus the length of the physical range exceeds the
|
||||
/// area size.
|
||||
pub unsafe fn map_untracked_pages(
|
||||
/// area size;
|
||||
/// - the provided physical range contains tracked physical addresses.
|
||||
pub unsafe fn map_untracked_frames(
|
||||
area_size: usize,
|
||||
map_offset: usize,
|
||||
pa_range: Range<Paddr>,
|
||||
@ -187,50 +136,29 @@ impl KVirtArea<Untracked> {
|
||||
assert!(area_size % PAGE_SIZE == 0);
|
||||
assert!(map_offset % PAGE_SIZE == 0);
|
||||
assert!(map_offset + pa_range.len() <= area_size);
|
||||
let range = Untracked::select_allocator().alloc(area_size).unwrap();
|
||||
|
||||
let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
|
||||
|
||||
if !pa_range.is_empty() {
|
||||
let va_range = range.start + map_offset..range.start + map_offset + pa_range.len();
|
||||
let len = pa_range.len();
|
||||
let va_range = range.start + map_offset..range.start + map_offset + len;
|
||||
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let preempt_guard = disable_preempt();
|
||||
let mut cursor = page_table.cursor_mut(&preempt_guard, &va_range).unwrap();
|
||||
// SAFETY: The caller of `map_untracked_pages` has ensured the safety of this mapping.
|
||||
unsafe {
|
||||
cursor.map_pa(&pa_range, prop);
|
||||
}
|
||||
}
|
||||
Self {
|
||||
range,
|
||||
phantom: PhantomData,
|
||||
|
||||
for (pa, level) in largest_pages::<KernelPtConfig>(va_range.start, pa_range.start, len)
|
||||
{
|
||||
// SAFETY: The caller of `map_untracked_frames` has ensured the safety of this mapping.
|
||||
let _ = unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the mapped untracked page.
|
||||
///
|
||||
/// This function returns None if the address is not mapped (`NotMapped`),
|
||||
/// while panics if the address is mapped to a `Mapped` or `PageTableNode` page.
|
||||
#[cfg(ktest)]
|
||||
pub fn get_untracked_page(&self, addr: Vaddr) -> Option<(Paddr, usize)> {
|
||||
let query_result = self.query_page(addr);
|
||||
match query_result {
|
||||
PageTableItem::MappedUntracked {
|
||||
va: _,
|
||||
pa,
|
||||
len,
|
||||
prop: _,
|
||||
} => Some((pa, len)),
|
||||
PageTableItem::NotMapped { .. } => None,
|
||||
_ => {
|
||||
panic!(
|
||||
"Found '{:?}' mapped into untracked `KVirtArea`, expected `MappedUntracked`",
|
||||
query_result
|
||||
);
|
||||
}
|
||||
}
|
||||
Self { range }
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: AllocatorSelector + 'static> Drop for KVirtArea<M> {
|
||||
impl Drop for KVirtArea {
|
||||
fn drop(&mut self) {
|
||||
// 1. unmap all mapped pages.
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
@ -238,17 +166,13 @@ impl<M: AllocatorSelector + 'static> Drop for KVirtArea<M> {
|
||||
let preempt_guard = disable_preempt();
|
||||
let mut cursor = page_table.cursor_mut(&preempt_guard, &range).unwrap();
|
||||
loop {
|
||||
let result = unsafe { cursor.take_next(self.end() - cursor.virt_addr()) };
|
||||
if matches!(&result, PageTableItem::NotMapped { .. }) {
|
||||
// SAFETY: The range is under `KVirtArea` so it is safe to unmap.
|
||||
let Some(frag) = (unsafe { cursor.take_next(self.end() - cursor.virt_addr()) }) else {
|
||||
break;
|
||||
}
|
||||
// Dropping previously mapped pages is fine since accessing with
|
||||
// the virtual addresses in another CPU while we are dropping is
|
||||
// not allowed.
|
||||
drop(result);
|
||||
};
|
||||
drop(frag);
|
||||
}
|
||||
// 2. free the virtual block
|
||||
let allocator = M::select_allocator();
|
||||
allocator.free(range);
|
||||
KVIRT_AREA_ALLOCATOR.free(range);
|
||||
}
|
||||
}
|
||||
|
@ -11,16 +11,14 @@
|
||||
//!
|
||||
//! ```text
|
||||
//! +-+ <- the highest used address (0xffff_ffff_ffff_0000)
|
||||
//! | | For the kernel code, 1 GiB. Mapped frames are tracked.
|
||||
//! | | For the kernel code, 1 GiB.
|
||||
//! +-+ <- 0xffff_ffff_8000_0000
|
||||
//! | |
|
||||
//! | | Unused hole.
|
||||
//! +-+ <- 0xffff_e100_0000_0000
|
||||
//! | | For frame metadata, 1 TiB. Mapped frames are untracked.
|
||||
//! | | For frame metadata, 1 TiB.
|
||||
//! +-+ <- 0xffff_e000_0000_0000
|
||||
//! | | For [`KVirtArea<Tracked>`], 16 TiB. Mapped pages are tracked with handles.
|
||||
//! +-+ <- 0xffff_d000_0000_0000
|
||||
//! | | For [`KVirtArea<Untracked>`], 16 TiB. Mapped pages are untracked.
|
||||
//! | | For [`KVirtArea`], 32 TiB.
|
||||
//! +-+ <- the middle of the higher half (0xffff_c000_0000_0000)
|
||||
//! | |
|
||||
//! | |
|
||||
@ -47,16 +45,17 @@ mod test;
|
||||
|
||||
use super::{
|
||||
frame::{
|
||||
meta::{mapping, KernelMeta, MetaPageMeta},
|
||||
Frame, Segment,
|
||||
meta::{mapping, AnyFrameMeta, MetaPageMeta},
|
||||
Segment,
|
||||
},
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||
page_table::{PageTable, PageTableConfig},
|
||||
Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE,
|
||||
Frame, Paddr, PagingConstsTrait, Vaddr,
|
||||
};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
boot::memory_region::MemoryRegionType,
|
||||
mm::{page_table::largest_pages, PagingLevel},
|
||||
task::disable_preempt,
|
||||
};
|
||||
|
||||
@ -90,12 +89,8 @@ const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHI
|
||||
pub(in crate::mm) const FRAME_METADATA_RANGE: Range<Vaddr> =
|
||||
FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
|
||||
|
||||
const TRACKED_MAPPED_PAGES_BASE_VADDR: Vaddr = 0xffff_d000_0000_0000 << ADDR_WIDTH_SHIFT;
|
||||
pub const TRACKED_MAPPED_PAGES_RANGE: Range<Vaddr> =
|
||||
TRACKED_MAPPED_PAGES_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
|
||||
|
||||
const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT;
|
||||
pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..TRACKED_MAPPED_PAGES_BASE_VADDR;
|
||||
pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
|
||||
|
||||
/// The base address of the linear mapping of all physical
|
||||
/// memory in the kernel address space.
|
||||
@ -108,29 +103,61 @@ pub fn paddr_to_vaddr(pa: Paddr) -> usize {
|
||||
pa + LINEAR_MAPPING_BASE_VADDR
|
||||
}
|
||||
|
||||
/// Returns whether the given address should be mapped as tracked.
|
||||
///
|
||||
/// About what is tracked mapping, see [`crate::mm::frame::meta::MapTrackingStatus`].
|
||||
pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool {
|
||||
!(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct KernelPtConfig {}
|
||||
|
||||
impl PageTableConfig for KernelPtConfig {
|
||||
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 256..512;
|
||||
|
||||
type E = PageTableEntry;
|
||||
type C = PagingConsts;
|
||||
}
|
||||
|
||||
/// The kernel page table instance.
|
||||
///
|
||||
/// It manages the kernel mapping of all address spaces by sharing the kernel part. And it
|
||||
/// is unlikely to be activated.
|
||||
pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelPtConfig>> = Once::new();
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct KernelPtConfig {}
|
||||
|
||||
// We use the first available PTE bit to mark the frame as tracked.
|
||||
// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
|
||||
unsafe impl PageTableConfig for KernelPtConfig {
|
||||
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 256..512;
|
||||
const TOP_LEVEL_CAN_UNMAP: bool = false;
|
||||
|
||||
type E = PageTableEntry;
|
||||
type C = PagingConsts;
|
||||
|
||||
type Item = MappedItem;
|
||||
|
||||
fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
|
||||
match item {
|
||||
MappedItem::Tracked(frame, mut prop) => {
|
||||
debug_assert!(!prop.flags.contains(PageFlags::AVAIL1));
|
||||
prop.flags |= PageFlags::AVAIL1;
|
||||
let level = frame.map_level();
|
||||
let paddr = frame.into_raw();
|
||||
(paddr, level, prop)
|
||||
}
|
||||
MappedItem::Untracked(pa, level, mut prop) => {
|
||||
debug_assert!(!prop.flags.contains(PageFlags::AVAIL1));
|
||||
prop.flags -= PageFlags::AVAIL1;
|
||||
(pa, level, prop)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
|
||||
if prop.flags.contains(PageFlags::AVAIL1) {
|
||||
debug_assert_eq!(level, 1);
|
||||
// SAFETY: The caller ensures safety.
|
||||
let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
|
||||
MappedItem::Tracked(frame, prop)
|
||||
} else {
|
||||
MappedItem::Untracked(paddr, level, prop)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum MappedItem {
|
||||
Tracked(Frame<dyn AnyFrameMeta>, PageProperty),
|
||||
Untracked(Paddr, PagingLevel, PageProperty),
|
||||
}
|
||||
|
||||
/// Initializes the kernel page table.
|
||||
///
|
||||
/// This function should be called after:
|
||||
@ -150,15 +177,16 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
||||
{
|
||||
let max_paddr = crate::mm::frame::max_paddr();
|
||||
let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + max_paddr;
|
||||
let to = 0..max_paddr;
|
||||
let prop = PageProperty {
|
||||
flags: PageFlags::RW,
|
||||
cache: CachePolicy::Writeback,
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
|
||||
for (pa, level) in largest_pages::<KernelPtConfig>(from.start, 0, max_paddr) {
|
||||
// SAFETY: we are doing the linear mapping for the kernel.
|
||||
unsafe {
|
||||
kpt.map(&from, &to, prop).unwrap();
|
||||
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
|
||||
.expect("Kernel linear address space is mapped twice");
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,28 +200,16 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
|
||||
for meta_page in meta_pages {
|
||||
// SAFETY: we are doing the metadata mappings for the kernel.
|
||||
unsafe {
|
||||
let _old = cursor.map(meta_page.into(), prop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Map for the I/O area.
|
||||
// TODO: we need to have an allocator to allocate kernel space for
|
||||
// the I/O areas, rather than doing it using the linear mappings.
|
||||
// We use untracked mapping so that we can benefit from huge pages.
|
||||
// We won't unmap them anyway, so there's no leaking problem yet.
|
||||
// TODO: support tracked huge page mapping.
|
||||
let pa_range = meta_pages.into_raw();
|
||||
for (pa, level) in
|
||||
largest_pages::<KernelPtConfig>(from.start, pa_range.start, pa_range.len())
|
||||
{
|
||||
let to = 0x8_0000_0000..0x9_0000_0000;
|
||||
let from = LINEAR_MAPPING_BASE_VADDR + to.start..LINEAR_MAPPING_BASE_VADDR + to.end;
|
||||
let prop = PageProperty {
|
||||
flags: PageFlags::RW,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
// SAFETY: we are doing I/O mappings for the kernel.
|
||||
unsafe {
|
||||
kpt.map(&from, &to, prop).unwrap();
|
||||
// SAFETY: We are doing the metadata mappings for the kernel.
|
||||
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
|
||||
.expect("Frame metadata address space is mapped twice");
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,21 +222,17 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
||||
.find(|r| r.typ() == MemoryRegionType::Kernel)
|
||||
.unwrap();
|
||||
let offset = kernel_loaded_offset();
|
||||
let to = region.base()..region.end();
|
||||
let from = to.start + offset..to.end + offset;
|
||||
let from = region.base() + offset..region.end() + offset;
|
||||
let prop = PageProperty {
|
||||
flags: PageFlags::RWX,
|
||||
cache: CachePolicy::Writeback,
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
|
||||
for frame_paddr in to.step_by(PAGE_SIZE) {
|
||||
// SAFETY: They were initialized at `super::frame::meta::init`.
|
||||
let page = unsafe { Frame::<KernelMeta>::from_raw(frame_paddr) };
|
||||
// SAFETY: we are doing mappings for the kernel.
|
||||
unsafe {
|
||||
let _old = cursor.map(page.into(), prop);
|
||||
}
|
||||
for (pa, level) in largest_pages::<KernelPtConfig>(from.start, region.base(), from.len()) {
|
||||
// SAFETY: we are doing the kernel code mapping.
|
||||
unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
|
||||
.expect("Kernel code mapped twice");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,17 +2,21 @@
|
||||
|
||||
use crate::{
|
||||
mm::{
|
||||
frame::max_paddr,
|
||||
kspace::{
|
||||
kvirt_area::{KVirtArea, Tracked, Untracked},
|
||||
paddr_to_vaddr, should_map_as_tracked, LINEAR_MAPPING_BASE_VADDR,
|
||||
TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE,
|
||||
kvirt_area::KVirtArea, paddr_to_vaddr, MappedItem, LINEAR_MAPPING_BASE_VADDR,
|
||||
VMALLOC_VADDR_RANGE,
|
||||
},
|
||||
page_prop::PageProperty,
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty},
|
||||
Frame, FrameAllocOptions, Paddr, PAGE_SIZE,
|
||||
},
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
fn default_prop() -> PageProperty {
|
||||
PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback)
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn kvirt_area_tracked_map_pages() {
|
||||
let size = 2 * PAGE_SIZE;
|
||||
@ -21,28 +25,7 @@ fn kvirt_area_tracked_map_pages() {
|
||||
.unwrap();
|
||||
let start_paddr = frames.start_paddr();
|
||||
|
||||
let kvirt_area =
|
||||
KVirtArea::<Tracked>::map_pages(size, 0, frames.into_iter(), PageProperty::new_absent());
|
||||
|
||||
assert_eq!(kvirt_area.len(), size);
|
||||
assert!(kvirt_area.start() >= TRACKED_MAPPED_PAGES_RANGE.start);
|
||||
assert!(kvirt_area.end() <= TRACKED_MAPPED_PAGES_RANGE.end);
|
||||
|
||||
for i in 0..2 {
|
||||
let addr = kvirt_area.start() + i * PAGE_SIZE;
|
||||
let page = kvirt_area.get_page(addr).unwrap();
|
||||
assert_eq!(page.start_paddr(), start_paddr + (i * PAGE_SIZE));
|
||||
}
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn kvirt_area_untracked_map_pages() {
|
||||
let size = 2 * PAGE_SIZE;
|
||||
let pa_range = 0..2 * PAGE_SIZE as Paddr;
|
||||
|
||||
let kvirt_area = unsafe {
|
||||
KVirtArea::<Untracked>::map_untracked_pages(size, 0, pa_range, PageProperty::new_absent())
|
||||
};
|
||||
let kvirt_area = KVirtArea::map_frames(size, 0, frames.into_iter(), default_prop());
|
||||
|
||||
assert_eq!(kvirt_area.len(), size);
|
||||
assert!(kvirt_area.start() >= VMALLOC_VADDR_RANGE.start);
|
||||
@ -50,9 +33,35 @@ fn kvirt_area_untracked_map_pages() {
|
||||
|
||||
for i in 0..2 {
|
||||
let addr = kvirt_area.start() + i * PAGE_SIZE;
|
||||
let (pa, len) = kvirt_area.get_untracked_page(addr).unwrap();
|
||||
assert_eq!(pa, (i * PAGE_SIZE) as Paddr);
|
||||
assert_eq!(len, PAGE_SIZE);
|
||||
let MappedItem::Tracked(page, _) = kvirt_area.query(addr).unwrap() else {
|
||||
panic!("Expected a tracked page");
|
||||
};
|
||||
assert_eq!(page.start_paddr(), start_paddr + (i * PAGE_SIZE));
|
||||
}
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn kvirt_area_untracked_map_pages() {
|
||||
let max_paddr = max_paddr();
|
||||
|
||||
let size = 2 * PAGE_SIZE;
|
||||
let pa_range = max_paddr..max_paddr + 2 * PAGE_SIZE as Paddr;
|
||||
|
||||
let kvirt_area =
|
||||
unsafe { KVirtArea::map_untracked_frames(size, 0, pa_range.clone(), default_prop()) };
|
||||
|
||||
assert_eq!(kvirt_area.len(), size);
|
||||
assert!(kvirt_area.start() >= VMALLOC_VADDR_RANGE.start);
|
||||
assert!(kvirt_area.end() <= VMALLOC_VADDR_RANGE.end);
|
||||
|
||||
for i in 0..2 {
|
||||
let addr = kvirt_area.start() + i * PAGE_SIZE;
|
||||
|
||||
let MappedItem::Untracked(pa, level, _) = kvirt_area.query(addr).unwrap() else {
|
||||
panic!("Expected a untracked page");
|
||||
};
|
||||
assert_eq!(pa, pa_range.start + (i * PAGE_SIZE) as Paddr);
|
||||
assert_eq!(level, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -63,37 +72,30 @@ fn kvirt_area_tracked_drop() {
|
||||
.alloc_segment_with(2, |_| ())
|
||||
.unwrap();
|
||||
|
||||
let kvirt_area =
|
||||
KVirtArea::<Tracked>::map_pages(size, 0, frames.into_iter(), PageProperty::new_absent());
|
||||
let kvirt_area = KVirtArea::map_frames(size, 0, frames.into_iter(), default_prop());
|
||||
|
||||
drop(kvirt_area);
|
||||
|
||||
// After dropping, the virtual address range should be freed and no longer mapped.
|
||||
let kvirt_area = KVirtArea::<Tracked>::map_pages(
|
||||
size,
|
||||
0,
|
||||
core::iter::empty::<Frame<()>>(),
|
||||
PageProperty::new_absent(),
|
||||
);
|
||||
assert!(kvirt_area.get_page(kvirt_area.start()).is_none());
|
||||
let kvirt_area =
|
||||
KVirtArea::map_frames(size, 0, core::iter::empty::<Frame<()>>(), default_prop());
|
||||
assert_eq!(kvirt_area.query(kvirt_area.start()), None);
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn kvirt_area_untracked_drop() {
|
||||
let size = 2 * PAGE_SIZE;
|
||||
let pa_range = 0..2 * PAGE_SIZE as Paddr;
|
||||
let max_paddr = max_paddr();
|
||||
|
||||
let kvirt_area = unsafe {
|
||||
KVirtArea::<Untracked>::map_untracked_pages(size, 0, pa_range, PageProperty::new_absent())
|
||||
};
|
||||
let size = 2 * PAGE_SIZE;
|
||||
let pa_range = max_paddr..max_paddr + 2 * PAGE_SIZE as Paddr;
|
||||
|
||||
let kvirt_area = unsafe { KVirtArea::map_untracked_frames(size, 0, pa_range, default_prop()) };
|
||||
|
||||
drop(kvirt_area);
|
||||
|
||||
// After dropping, the virtual address range should be freed and no longer mapped.
|
||||
let kvirt_area = unsafe {
|
||||
KVirtArea::<Untracked>::map_untracked_pages(size, 0, 0..0, PageProperty::new_absent())
|
||||
};
|
||||
assert!(kvirt_area.get_untracked_page(kvirt_area.start()).is_none());
|
||||
let kvirt_area = unsafe { KVirtArea::map_untracked_frames(size, 0, 0..0, default_prop()) };
|
||||
assert!(kvirt_area.query(kvirt_area.start()).is_none());
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
@ -103,12 +105,3 @@ fn manual_paddr_to_vaddr() {
|
||||
|
||||
assert_eq!(va, LINEAR_MAPPING_BASE_VADDR + pa);
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn map_as_tracked() {
|
||||
let tracked_addr = TRACKED_MAPPED_PAGES_RANGE.start;
|
||||
let untracked_addr = VMALLOC_VADDR_RANGE.start;
|
||||
|
||||
assert!(should_map_as_tracked(tracked_addr));
|
||||
assert!(!should_map_as_tracked(untracked_addr));
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ use crate::{
|
||||
mm::{
|
||||
nr_subpage_per_huge, paddr_to_vaddr,
|
||||
page_table::{
|
||||
load_pte, page_size, pte_index, Child, MapTrackingStatus, PageTable, PageTableConfig,
|
||||
load_pte, page_size, pte_index, ChildRef, PageTable, PageTableConfig,
|
||||
PageTableEntryTrait, PageTableGuard, PageTableNodeRef, PagingConstsTrait, PagingLevel,
|
||||
},
|
||||
Vaddr,
|
||||
@ -23,7 +23,6 @@ pub(super) fn lock_range<'rcu, C: PageTableConfig>(
|
||||
pt: &'rcu PageTable<C>,
|
||||
guard: &'rcu dyn InAtomicMode,
|
||||
va: &Range<Vaddr>,
|
||||
new_pt_is_tracked: MapTrackingStatus,
|
||||
) -> Cursor<'rcu, C> {
|
||||
// The re-try loop of finding the sub-tree root.
|
||||
//
|
||||
@ -32,9 +31,7 @@ pub(super) fn lock_range<'rcu, C: PageTableConfig>(
|
||||
// sub-tree will not see the current state and will not change the current
|
||||
// state, breaking serializability.
|
||||
let mut subtree_root = loop {
|
||||
if let Some(subtree_root) =
|
||||
try_traverse_and_lock_subtree_root(pt, guard, va, new_pt_is_tracked)
|
||||
{
|
||||
if let Some(subtree_root) = try_traverse_and_lock_subtree_root(pt, guard, va) {
|
||||
break subtree_root;
|
||||
}
|
||||
};
|
||||
@ -93,7 +90,6 @@ fn try_traverse_and_lock_subtree_root<'rcu, C: PageTableConfig>(
|
||||
pt: &PageTable<C>,
|
||||
guard: &'rcu dyn InAtomicMode,
|
||||
va: &Range<Vaddr>,
|
||||
new_pt_is_tracked: MapTrackingStatus,
|
||||
) -> Option<PageTableGuard<'rcu, C>> {
|
||||
let mut cur_node_guard: Option<PageTableGuard<C>> = None;
|
||||
let mut cur_pt_addr = pt.root.start_paddr();
|
||||
@ -137,11 +133,11 @@ fn try_traverse_and_lock_subtree_root<'rcu, C: PageTableConfig>(
|
||||
|
||||
let mut cur_entry = pt_guard.entry(start_idx);
|
||||
if cur_entry.is_none() {
|
||||
let allocated_guard = cur_entry.alloc_if_none(guard, new_pt_is_tracked).unwrap();
|
||||
let allocated_guard = cur_entry.alloc_if_none(guard).unwrap();
|
||||
cur_pt_addr = allocated_guard.start_paddr();
|
||||
cur_node_guard = Some(allocated_guard);
|
||||
} else if cur_entry.is_node() {
|
||||
let Child::PageTableRef(pt) = cur_entry.to_ref() else {
|
||||
let ChildRef::PageTable(pt) = cur_entry.to_ref() else {
|
||||
unreachable!();
|
||||
};
|
||||
cur_pt_addr = pt.start_paddr();
|
||||
@ -187,7 +183,7 @@ fn dfs_acquire_lock<C: PageTableConfig>(
|
||||
for i in idx_range {
|
||||
let child = cur_node.entry(i);
|
||||
match child.to_ref() {
|
||||
Child::PageTableRef(pt) => {
|
||||
ChildRef::PageTable(pt) => {
|
||||
let mut pt_guard = pt.lock(guard);
|
||||
let child_node_va = cur_node_va + i * page_size::<C>(cur_level);
|
||||
let child_node_va_end = child_node_va + page_size::<C>(cur_level);
|
||||
@ -196,7 +192,7 @@ fn dfs_acquire_lock<C: PageTableConfig>(
|
||||
dfs_acquire_lock(guard, &mut pt_guard, child_node_va, va_start..va_end);
|
||||
let _ = ManuallyDrop::new(pt_guard);
|
||||
}
|
||||
Child::None | Child::Frame(_, _) | Child::Untracked(_, _, _) | Child::PageTable(_) => {}
|
||||
ChildRef::None | ChildRef::Frame(_, _, _) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -222,7 +218,7 @@ unsafe fn dfs_release_lock<'rcu, C: PageTableConfig>(
|
||||
for i in idx_range.rev() {
|
||||
let child = cur_node.entry(i);
|
||||
match child.to_ref() {
|
||||
Child::PageTableRef(pt) => {
|
||||
ChildRef::PageTable(pt) => {
|
||||
// SAFETY: The caller ensures that the node is locked and the new guard is unique.
|
||||
let child_node = unsafe { pt.make_guard_unchecked(guard) };
|
||||
let child_node_va = cur_node_va + i * page_size::<C>(cur_level);
|
||||
@ -233,7 +229,7 @@ unsafe fn dfs_release_lock<'rcu, C: PageTableConfig>(
|
||||
// guards are forgotten.
|
||||
unsafe { dfs_release_lock(guard, child_node, child_node_va, va_start..va_end) };
|
||||
}
|
||||
Child::None | Child::Frame(_, _) | Child::Untracked(_, _, _) | Child::PageTable(_) => {}
|
||||
ChildRef::None | ChildRef::Frame(_, _, _) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -241,10 +237,10 @@ unsafe fn dfs_release_lock<'rcu, C: PageTableConfig>(
|
||||
/// Marks all the nodes in the sub-tree rooted at the node as stray, and
|
||||
/// returns the num of pages mapped within the sub-tree.
|
||||
///
|
||||
/// This function must be called upon the node after the node is removed
|
||||
/// from the parent page table.
|
||||
/// It must be called upon the node after the node is removed from the parent
|
||||
/// page table. It also unlocks the nodes in the sub-tree.
|
||||
///
|
||||
/// This function also unlocks the nodes in the sub-tree.
|
||||
/// This function returns the number of physical frames mapped in the sub-tree.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
@ -263,23 +259,23 @@ pub(super) unsafe fn dfs_mark_stray_and_unlock<C: PageTableConfig>(
|
||||
return sub_tree.nr_children() as usize;
|
||||
}
|
||||
|
||||
let mut num_pages = 0;
|
||||
let mut num_frames = 0;
|
||||
|
||||
for i in (0..nr_subpage_per_huge::<C>()).rev() {
|
||||
let child = sub_tree.entry(i);
|
||||
match child.to_ref() {
|
||||
Child::PageTableRef(pt) => {
|
||||
ChildRef::PageTable(pt) => {
|
||||
// SAFETY: The caller ensures that the node is locked and the new guard is unique.
|
||||
let locked_pt = unsafe { pt.make_guard_unchecked(rcu_guard) };
|
||||
// SAFETY: The caller ensures that all the nodes in the sub-tree are locked and all
|
||||
// guards are forgotten.
|
||||
num_pages += unsafe { dfs_mark_stray_and_unlock(rcu_guard, locked_pt) };
|
||||
num_frames += unsafe { dfs_mark_stray_and_unlock(rcu_guard, locked_pt) };
|
||||
}
|
||||
Child::None | Child::Frame(_, _) | Child::Untracked(_, _, _) | Child::PageTable(_) => {}
|
||||
ChildRef::None | ChildRef::Frame(_, _, _) => {}
|
||||
}
|
||||
}
|
||||
|
||||
num_pages
|
||||
num_frames
|
||||
}
|
||||
|
||||
fn dfs_get_idx_range<C: PagingConstsTrait>(
|
||||
|
@ -29,19 +29,19 @@
|
||||
|
||||
mod locking;
|
||||
|
||||
use core::{any::TypeId, fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops::Range};
|
||||
use core::{fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops::Range};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
|
||||
use super::{
|
||||
is_valid_range, page_size, pte_index, Child, Entry, KernelPtConfig, MapTrackingStatus,
|
||||
PageTable, PageTableConfig, PageTableError, PageTableGuard, PagingConstsTrait, PagingLevel,
|
||||
UserPtConfig,
|
||||
page_size, pte_index, Child, ChildRef, Entry, PageTable, PageTableConfig, PageTableError,
|
||||
PageTableGuard, PagingConstsTrait, PagingLevel,
|
||||
};
|
||||
use crate::{
|
||||
mm::{
|
||||
frame::{meta::AnyFrameMeta, Frame},
|
||||
Paddr, PageProperty, Vaddr,
|
||||
page_table::is_valid_range,
|
||||
PageProperty, Vaddr,
|
||||
},
|
||||
task::atomic_mode::InAtomicMode,
|
||||
};
|
||||
@ -54,7 +54,7 @@ use crate::{
|
||||
/// A cursor is able to move to the next slot, to read page properties,
|
||||
/// and even to jump to a virtual address directly.
|
||||
#[derive(Debug)]
|
||||
pub struct Cursor<'rcu, C: PageTableConfig> {
|
||||
pub(crate) struct Cursor<'rcu, C: PageTableConfig> {
|
||||
/// The current path of the cursor.
|
||||
///
|
||||
/// The level 1 page table lock guard is at index 0, and the level N page
|
||||
@ -78,34 +78,37 @@ pub struct Cursor<'rcu, C: PageTableConfig> {
|
||||
/// The maximum value of `PagingConstsTrait::NR_LEVELS`.
|
||||
const MAX_NR_LEVELS: usize = 4;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum PageTableItem {
|
||||
NotMapped {
|
||||
va: Vaddr,
|
||||
len: usize,
|
||||
},
|
||||
Mapped {
|
||||
va: Vaddr,
|
||||
page: Frame<dyn AnyFrameMeta>,
|
||||
prop: PageProperty,
|
||||
},
|
||||
MappedUntracked {
|
||||
va: Vaddr,
|
||||
pa: Paddr,
|
||||
len: usize,
|
||||
prop: PageProperty,
|
||||
},
|
||||
/// This item can only show up as a return value of `take_next`. The caller
|
||||
/// is responsible to free the page table node after TLB coherence.
|
||||
/// FIXME: Separate into another type rather than `PageTableItem`?
|
||||
/// A fragment of a page table that can be taken out of the page table.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub(crate) enum PageTableFrag<C: PageTableConfig> {
|
||||
/// A mapped page table item.
|
||||
Mapped { va: Vaddr, item: C::Item },
|
||||
/// A sub-tree of a page table that is taken out of the page table.
|
||||
///
|
||||
/// The caller is responsible for dropping it after TLB coherence.
|
||||
StrayPageTable {
|
||||
pt: Frame<dyn AnyFrameMeta>,
|
||||
va: Vaddr,
|
||||
len: usize,
|
||||
num_pages: usize,
|
||||
num_frames: usize,
|
||||
},
|
||||
}
|
||||
|
||||
impl<C: PageTableConfig> PageTableFrag<C> {
|
||||
#[cfg(ktest)]
|
||||
pub(crate) fn va_range(&self) -> Range<Vaddr> {
|
||||
match self {
|
||||
PageTableFrag::Mapped { va, item } => {
|
||||
let (pa, level, prop) = C::item_into_raw(item.clone());
|
||||
drop(unsafe { C::item_from_raw(pa, level, prop) });
|
||||
*va..*va + page_size::<C>(level)
|
||||
}
|
||||
PageTableFrag::StrayPageTable { va, len, .. } => *va..*va + *len,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
|
||||
/// Creates a cursor claiming exclusive access over the given range.
|
||||
///
|
||||
@ -125,17 +128,20 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
|
||||
}
|
||||
|
||||
const { assert!(C::NR_LEVELS as usize <= MAX_NR_LEVELS) };
|
||||
let new_pt_is_tracked = if should_map_as_tracked::<C>(va.start) {
|
||||
MapTrackingStatus::Tracked
|
||||
} else {
|
||||
MapTrackingStatus::Untracked
|
||||
};
|
||||
|
||||
Ok(locking::lock_range(pt, guard, va, new_pt_is_tracked))
|
||||
Ok(locking::lock_range(pt, guard, va))
|
||||
}
|
||||
|
||||
/// Gets the information of the current slot.
|
||||
pub fn query(&mut self) -> Result<PageTableItem, PageTableError> {
|
||||
/// Gets the current virtual address.
|
||||
pub fn virt_addr(&self) -> Vaddr {
|
||||
self.va
|
||||
}
|
||||
|
||||
/// Queries the mapping at the current virtual address.
|
||||
///
|
||||
/// If the cursor is pointing to a valid virtual address that is locked,
|
||||
/// it will return the virtual address range and the item at that slot.
|
||||
pub fn query(&mut self) -> Result<PagesState<C>, PageTableError> {
|
||||
if self.va >= self.barrier_va.end {
|
||||
return Err(PageTableError::InvalidVaddr(self.va));
|
||||
}
|
||||
@ -143,85 +149,95 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
|
||||
let rcu_guard = self.rcu_guard;
|
||||
|
||||
loop {
|
||||
let cur_va = self.va;
|
||||
let level = self.level;
|
||||
let va = self.va;
|
||||
|
||||
let entry = self.cur_entry();
|
||||
|
||||
match entry.to_ref() {
|
||||
Child::PageTableRef(pt) => {
|
||||
let cur_child = self.cur_entry().to_ref();
|
||||
let item = match cur_child {
|
||||
ChildRef::PageTable(pt) => {
|
||||
// SAFETY: The `pt` must be locked and no other guards exist.
|
||||
let guard = unsafe { pt.make_guard_unchecked(rcu_guard) };
|
||||
self.push_level(guard);
|
||||
continue;
|
||||
}
|
||||
Child::PageTable(_) => {
|
||||
unreachable!();
|
||||
}
|
||||
Child::None => {
|
||||
return Ok(PageTableItem::NotMapped {
|
||||
va,
|
||||
len: page_size::<C>(level),
|
||||
});
|
||||
}
|
||||
Child::Frame(page, prop) => {
|
||||
return Ok(PageTableItem::Mapped { va, page, prop });
|
||||
}
|
||||
Child::Untracked(pa, plevel, prop) => {
|
||||
debug_assert_eq!(plevel, level);
|
||||
return Ok(PageTableItem::MappedUntracked {
|
||||
va,
|
||||
pa,
|
||||
len: page_size::<C>(level),
|
||||
prop,
|
||||
});
|
||||
}
|
||||
ChildRef::None => None,
|
||||
ChildRef::Frame(pa, ch_level, prop) => {
|
||||
debug_assert_eq!(ch_level, level);
|
||||
|
||||
// SAFETY:
|
||||
// This is part of (if `split_huge` happens) a page table item mapped
|
||||
// with a previous call to `C::item_into_raw`, where:
|
||||
// - The physical address and the paging level match it;
|
||||
// - The item part is still mapped so we don't take its ownership.
|
||||
//
|
||||
// For page table configs that require the `AVAIL1` flag to be kept
|
||||
// (currently, only kernel page tables), the callers of the unsafe
|
||||
// `protect_next` method uphold this invariant.
|
||||
let item = ManuallyDrop::new(unsafe { C::item_from_raw(pa, level, prop) });
|
||||
// TODO: Provide a `PageTableItemRef` to reduce copies.
|
||||
Some((*item).clone())
|
||||
}
|
||||
};
|
||||
|
||||
return Ok((cur_va..cur_va + page_size::<C>(level), item));
|
||||
}
|
||||
}
|
||||
|
||||
/// Moves the cursor forward to the next fragment in the range.
|
||||
/// Moves the cursor forward to the next mapped virtual address.
|
||||
///
|
||||
/// If there is mapped virtual address or child page table following the
|
||||
/// current address within next `len` bytes, it will return that address.
|
||||
/// In this case, the cursor will stop at the mapped address.
|
||||
/// If there is mapped virtual address following the current address within
|
||||
/// next `len` bytes, it will return that mapped address. In this case, the
|
||||
/// cursor will stop at the mapped address.
|
||||
///
|
||||
/// Otherwise, it will return `None`. And the cursor may stop at any
|
||||
/// address after `len` bytes.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the length is longer than the remaining range of the cursor.
|
||||
/// Panics if:
|
||||
/// - the length is longer than the remaining range of the cursor;
|
||||
/// - the length is not page-aligned.
|
||||
pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
|
||||
self.find_next_impl(len, true, false)
|
||||
self.find_next_impl(len, false, false)
|
||||
}
|
||||
|
||||
/// Moves the cursor forward to the next fragment in the range.
|
||||
///
|
||||
/// `find_leaf` specifies whether the cursor should only stop at leaf node
|
||||
/// entries that are mapped. If not specified, it can stop at an entry at
|
||||
/// any level.
|
||||
/// See [`Self::find_next`] for more details. Other than the semantics
|
||||
/// provided by [`Self::find_next`], this method also supports finding non-
|
||||
/// leaf entries and splitting huge pages if necessary.
|
||||
///
|
||||
/// `find_unmap_subtree` specifies whether the cursor should stop at the
|
||||
/// highest possible level for unmapping. If `false`, the cursor will only
|
||||
/// stop at leaf entries.
|
||||
///
|
||||
/// `split_huge` specifies whether the cursor should split huge pages when
|
||||
/// it finds a huge page that is mapped over the required range (`len`).
|
||||
///
|
||||
/// See [`Self::find_next`] for more details.
|
||||
fn find_next_impl(&mut self, len: usize, find_leaf: bool, split_huge: bool) -> Option<Vaddr> {
|
||||
fn find_next_impl(
|
||||
&mut self,
|
||||
len: usize,
|
||||
find_unmap_subtree: bool,
|
||||
split_huge: bool,
|
||||
) -> Option<Vaddr> {
|
||||
assert_eq!(len % C::BASE_PAGE_SIZE, 0);
|
||||
let end = self.va + len;
|
||||
assert!(end <= self.barrier_va.end);
|
||||
debug_assert_eq!(end % C::BASE_PAGE_SIZE, 0);
|
||||
|
||||
let rcu_guard = self.rcu_guard;
|
||||
|
||||
while self.va < end {
|
||||
let cur_va = self.va;
|
||||
let cur_page_size = page_size::<C>(self.level);
|
||||
let next_va = self.cur_va_range().end;
|
||||
let cur_entry_fits_range = cur_va % cur_page_size == 0 && next_va <= end;
|
||||
let mut cur_entry = self.cur_entry();
|
||||
let cur_va_range = self.cur_va_range();
|
||||
let cur_entry_fits_range = cur_va == cur_va_range.start && cur_va_range.end <= end;
|
||||
|
||||
let mut cur_entry = self.cur_entry();
|
||||
match cur_entry.to_ref() {
|
||||
Child::PageTableRef(pt) => {
|
||||
if !find_leaf && cur_entry_fits_range {
|
||||
ChildRef::PageTable(pt) => {
|
||||
if find_unmap_subtree
|
||||
&& cur_entry_fits_range
|
||||
&& (C::TOP_LEVEL_CAN_UNMAP || self.level != C::NR_LEVELS)
|
||||
{
|
||||
return Some(cur_va);
|
||||
}
|
||||
|
||||
@ -237,23 +253,17 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
|
||||
}
|
||||
continue;
|
||||
}
|
||||
Child::PageTable(_) => {
|
||||
unreachable!();
|
||||
}
|
||||
Child::None => {
|
||||
ChildRef::None => {
|
||||
self.move_forward();
|
||||
continue;
|
||||
}
|
||||
Child::Frame(_, _) => {
|
||||
return Some(cur_va);
|
||||
}
|
||||
Child::Untracked(_, _, _) => {
|
||||
ChildRef::Frame(_, _, _) => {
|
||||
if cur_entry_fits_range || !split_huge {
|
||||
return Some(cur_va);
|
||||
}
|
||||
|
||||
let split_child = cur_entry
|
||||
.split_if_untracked_huge(rcu_guard)
|
||||
.split_if_mapped_huge(rcu_guard)
|
||||
.expect("The entry must be a huge page");
|
||||
self.push_level(split_child);
|
||||
continue;
|
||||
@ -297,10 +307,6 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn virt_addr(&self) -> Vaddr {
|
||||
self.va
|
||||
}
|
||||
|
||||
/// Traverses forward to the end of [`Self::cur_va_range`].
|
||||
///
|
||||
/// If reached the end of the current page table node, it (recursively)
|
||||
@ -351,8 +357,13 @@ impl<C: PageTableConfig> Drop for Cursor<'_, C> {
|
||||
}
|
||||
}
|
||||
|
||||
/// The state of virtual pages represented by a page table.
|
||||
///
|
||||
/// This is the return type of the [`Cursor::query`] method.
|
||||
pub type PagesState<C> = (Range<Vaddr>, Option<<C as PageTableConfig>::Item>);
|
||||
|
||||
impl<C: PageTableConfig> Iterator for Cursor<'_, C> {
|
||||
type Item = PageTableItem;
|
||||
type Item = PagesState<C>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let result = self.query();
|
||||
@ -370,7 +381,7 @@ impl<C: PageTableConfig> Iterator for Cursor<'_, C> {
|
||||
/// in a page table can only be accessed by one cursor, regardless of the
|
||||
/// mutability of the cursor.
|
||||
#[derive(Debug)]
|
||||
pub struct CursorMut<'rcu, C: PageTableConfig>(Cursor<'rcu, C>);
|
||||
pub(crate) struct CursorMut<'rcu, C: PageTableConfig>(Cursor<'rcu, C>);
|
||||
|
||||
impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
|
||||
/// Creates a cursor claiming exclusive access over the given range.
|
||||
@ -410,187 +421,100 @@ impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
|
||||
self.0.virt_addr()
|
||||
}
|
||||
|
||||
/// Gets the information of the current slot.
|
||||
pub fn query(&mut self) -> Result<PageTableItem, PageTableError> {
|
||||
/// Queries the mapping at the current virtual address.
|
||||
///
|
||||
/// If the cursor is pointing to a valid virtual address that is locked,
|
||||
/// it will return the virtual address range and the item at that slot.
|
||||
pub fn query(&mut self) -> Result<PagesState<C>, PageTableError> {
|
||||
self.0.query()
|
||||
}
|
||||
|
||||
/// Maps the range starting from the current address to a [`Frame<dyn AnyFrameMeta>`].
|
||||
/// Maps the item starting from the current address to a physical address range.
|
||||
///
|
||||
/// It returns the previously mapped [`Frame<dyn AnyFrameMeta>`] if that exists.
|
||||
/// If the current address has already mapped pages, it will do a re-map,
|
||||
/// taking out the old physical address and replacing it with the new one.
|
||||
/// This function will return [`Err`] with a [`PageTableFrag`], the not
|
||||
/// mapped item. The caller should drop it after TLB coherence.
|
||||
///
|
||||
/// If there is no mapped pages in the specified virtual address range,
|
||||
/// the function will return [`None`].
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if
|
||||
/// - the virtual address range to be mapped is out of the range;
|
||||
/// - the alignment of the page is not satisfied by the virtual address;
|
||||
/// - it is already mapped to a huge page while the caller wants to map a smaller one.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that the virtual range being mapped does
|
||||
/// not affect kernel's memory safety.
|
||||
pub unsafe fn map(
|
||||
&mut self,
|
||||
frame: Frame<dyn AnyFrameMeta>,
|
||||
prop: PageProperty,
|
||||
) -> Option<Frame<dyn AnyFrameMeta>> {
|
||||
let end = self.0.va + frame.size();
|
||||
assert!(end <= self.0.barrier_va.end);
|
||||
|
||||
let rcu_guard = self.0.rcu_guard;
|
||||
|
||||
// Go down if not applicable.
|
||||
while self.0.level > frame.map_level()
|
||||
|| self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
debug_assert!(should_map_as_tracked::<C>(self.0.va));
|
||||
let mut cur_entry = self.0.cur_entry();
|
||||
match cur_entry.to_ref() {
|
||||
Child::PageTableRef(pt) => {
|
||||
// SAFETY: The `pt` must be locked and no other guards exist.
|
||||
let guard = unsafe { pt.make_guard_unchecked(rcu_guard) };
|
||||
self.0.push_level(guard);
|
||||
}
|
||||
Child::PageTable(_) => {
|
||||
unreachable!();
|
||||
}
|
||||
Child::None => {
|
||||
let child_guard = cur_entry
|
||||
.alloc_if_none(rcu_guard, MapTrackingStatus::Tracked)
|
||||
.unwrap();
|
||||
self.0.push_level(child_guard);
|
||||
}
|
||||
Child::Frame(_, _) => {
|
||||
panic!("Mapping a smaller frame in an already mapped huge page");
|
||||
}
|
||||
Child::Untracked(_, _, _) => {
|
||||
panic!("Mapping a tracked page in an untracked range");
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
debug_assert_eq!(self.0.level, frame.map_level());
|
||||
|
||||
// Map the current page.
|
||||
let mut cur_entry = self.0.cur_entry();
|
||||
let old = cur_entry.replace(Child::Frame(frame, prop));
|
||||
|
||||
let old_frame = match old {
|
||||
Child::Frame(old_page, _) => Some(old_page),
|
||||
Child::None => None,
|
||||
Child::PageTable(_) => {
|
||||
todo!("Dropping page table nodes while mapping requires TLB flush")
|
||||
}
|
||||
Child::Untracked(_, _, _) => panic!("Mapping a tracked page in an untracked range"),
|
||||
Child::PageTableRef(_) => unreachable!(),
|
||||
};
|
||||
|
||||
self.0.move_forward();
|
||||
|
||||
old_frame
|
||||
}
|
||||
|
||||
/// Maps the range starting from the current address to a physical address range.
|
||||
///
|
||||
/// The function will map as more huge pages as possible, and it will split
|
||||
/// the huge pages into smaller pages if necessary. If the input range is
|
||||
/// large, the resulting mappings may look like this (if very huge pages
|
||||
/// supported):
|
||||
///
|
||||
/// ```text
|
||||
/// start end
|
||||
/// |----|----------------|--------------------------------|----|----|
|
||||
/// base huge very huge base base
|
||||
/// 4KiB 2MiB 1GiB 4KiB 4KiB
|
||||
/// ```
|
||||
///
|
||||
/// In practice it is not suggested to use this method for safety and conciseness.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if
|
||||
/// - the virtual address range to be mapped is out of the range.
|
||||
/// - the virtual address range to be mapped is out of the locked range;
|
||||
/// - the current virtual address is not aligned to the page size of the
|
||||
/// item to be mapped;
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that
|
||||
/// - the range being mapped does not affect kernel's memory safety;
|
||||
/// - the physical address to be mapped is valid and safe to use;
|
||||
/// - it is allowed to map untracked pages in this virtual address range.
|
||||
pub unsafe fn map_pa(&mut self, pa: &Range<Paddr>, prop: PageProperty) {
|
||||
let end = self.0.va + pa.len();
|
||||
pub unsafe fn map(&mut self, item: C::Item) -> Result<(), PageTableFrag<C>> {
|
||||
assert!(self.0.va < self.0.barrier_va.end);
|
||||
let (pa, level, prop) = C::item_into_raw(item);
|
||||
assert!(level <= C::HIGHEST_TRANSLATION_LEVEL);
|
||||
let size = page_size::<C>(level);
|
||||
assert_eq!(self.0.va % size, 0);
|
||||
let end = self.0.va + size;
|
||||
assert!(end <= self.0.barrier_va.end);
|
||||
|
||||
let rcu_guard = self.0.rcu_guard;
|
||||
|
||||
let mut pa = pa.start;
|
||||
while self.0.va < end {
|
||||
// We ensure not mapping in reserved kernel shared tables or releasing it.
|
||||
// Although it may be an invariant for all architectures and will be optimized
|
||||
// out by the compiler since `C::NR_LEVELS - 1 > C::HIGHEST_TRANSLATION_LEVEL`.
|
||||
let is_kernel_shared_node = TypeId::of::<C>() == TypeId::of::<KernelPtConfig>()
|
||||
&& self.0.level >= C::NR_LEVELS - 1;
|
||||
if self.0.level > C::HIGHEST_TRANSLATION_LEVEL
|
||||
|| is_kernel_shared_node
|
||||
|| self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
|| pa % page_size::<C>(self.0.level) != 0
|
||||
{
|
||||
// Adjust ourselves to the level of the item.
|
||||
while self.0.level != level {
|
||||
if self.0.level < level {
|
||||
self.0.pop_level();
|
||||
continue;
|
||||
}
|
||||
// We are at a higher level, go down.
|
||||
let mut cur_entry = self.0.cur_entry();
|
||||
match cur_entry.to_ref() {
|
||||
Child::PageTableRef(pt) => {
|
||||
ChildRef::PageTable(pt) => {
|
||||
// SAFETY: The `pt` must be locked and no other guards exist.
|
||||
let guard = unsafe { pt.make_guard_unchecked(rcu_guard) };
|
||||
self.0.push_level(guard);
|
||||
let pt_guard = unsafe { pt.make_guard_unchecked(rcu_guard) };
|
||||
self.0.push_level(pt_guard);
|
||||
}
|
||||
Child::PageTable(_) => {
|
||||
unreachable!();
|
||||
}
|
||||
Child::None => {
|
||||
let child_guard = cur_entry
|
||||
.alloc_if_none(rcu_guard, MapTrackingStatus::Untracked)
|
||||
.unwrap();
|
||||
ChildRef::None => {
|
||||
let child_guard = cur_entry.alloc_if_none(rcu_guard).unwrap();
|
||||
self.0.push_level(child_guard);
|
||||
}
|
||||
Child::Frame(_, _) => {
|
||||
panic!("Mapping a smaller page in an already mapped huge page");
|
||||
}
|
||||
Child::Untracked(_, _, _) => {
|
||||
let split_child = cur_entry.split_if_untracked_huge(rcu_guard).unwrap();
|
||||
ChildRef::Frame(_, _, _) => {
|
||||
let split_child = cur_entry.split_if_mapped_huge(rcu_guard).unwrap();
|
||||
self.0.push_level(split_child);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Map the current page.
|
||||
debug_assert!(!should_map_as_tracked::<C>(self.0.va));
|
||||
let level = self.0.level;
|
||||
let mut cur_entry = self.0.cur_entry();
|
||||
let _ = cur_entry.replace(Child::Untracked(pa, level, prop));
|
||||
let frag = self.replace_cur_entry(Child::Frame(pa, level, prop));
|
||||
|
||||
// Move forward.
|
||||
pa += page_size::<C>(level);
|
||||
self.0.move_forward();
|
||||
|
||||
if let Some(frag) = frag {
|
||||
Err(frag)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Find and remove the first page in the cursor's following range.
|
||||
/// Finds and removes the first page table fragment in the following range.
|
||||
///
|
||||
/// The range to be found in is the current virtual address with the
|
||||
/// provided length.
|
||||
///
|
||||
/// The function stops and yields the page if it has actually removed a
|
||||
/// page, no matter if the following pages are also required to be unmapped.
|
||||
/// The returned page is the virtual page that existed before the removal
|
||||
/// but having just been unmapped.
|
||||
/// The function stops and yields the fragment if it has actually removed a
|
||||
/// fragment, no matter if the following pages are also required to be
|
||||
/// unmapped. The returned virtual address is the virtual page that existed
|
||||
/// before the removal but having just been unmapped.
|
||||
///
|
||||
/// It also makes the cursor moves forward to the next page after the
|
||||
/// removed one, when an actual page is removed. If no mapped pages exist
|
||||
/// in the following range, the cursor will stop at the end of the range
|
||||
/// and return [`PageTableItem::NotMapped`].
|
||||
/// and return [`None`].
|
||||
///
|
||||
/// The caller should handle TLB coherence if necessary, using the returned
|
||||
/// virtual address range.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
@ -599,60 +523,17 @@ impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the end range covers a part of a huge page
|
||||
/// and the next page is that huge page.
|
||||
pub unsafe fn take_next(&mut self, len: usize) -> PageTableItem {
|
||||
if self.0.find_next_impl(len, false, true).is_none() {
|
||||
return PageTableItem::NotMapped { va: self.0.va, len };
|
||||
};
|
||||
/// Panics if:
|
||||
/// - the length is longer than the remaining range of the cursor;
|
||||
/// - the length is not page-aligned.
|
||||
pub unsafe fn take_next(&mut self, len: usize) -> Option<PageTableFrag<C>> {
|
||||
self.0.find_next_impl(len, true, true)?;
|
||||
|
||||
let rcu_guard = self.0.rcu_guard;
|
||||
|
||||
// Unmap the current page and return it.
|
||||
let mut cur_entry = self.0.cur_entry();
|
||||
let old = cur_entry.replace(Child::None);
|
||||
let item = match old {
|
||||
Child::Frame(page, prop) => PageTableItem::Mapped {
|
||||
va: self.0.va,
|
||||
page,
|
||||
prop,
|
||||
},
|
||||
Child::Untracked(pa, level, prop) => {
|
||||
debug_assert_eq!(level, self.0.level);
|
||||
PageTableItem::MappedUntracked {
|
||||
va: self.0.va,
|
||||
pa,
|
||||
len: page_size::<C>(level),
|
||||
prop,
|
||||
}
|
||||
}
|
||||
Child::PageTable(pt) => {
|
||||
assert!(
|
||||
!(TypeId::of::<C>() == TypeId::of::<KernelPtConfig>()
|
||||
&& self.0.level == C::NR_LEVELS),
|
||||
"Unmapping shared kernel page table nodes"
|
||||
);
|
||||
|
||||
// SAFETY: The `pt` must be locked and no other guards exist.
|
||||
let locked_pt = unsafe { pt.borrow().make_guard_unchecked(rcu_guard) };
|
||||
// SAFETY:
|
||||
// - We checked that we are not unmapping shared kernel page table nodes.
|
||||
// - We must have locked the entire sub-tree since the range is locked.
|
||||
let num_pages = unsafe { locking::dfs_mark_stray_and_unlock(rcu_guard, locked_pt) };
|
||||
|
||||
PageTableItem::StrayPageTable {
|
||||
pt: (*pt).clone().into(),
|
||||
va: self.0.va,
|
||||
len: page_size::<C>(self.0.level),
|
||||
num_pages,
|
||||
}
|
||||
}
|
||||
Child::None | Child::PageTableRef(_) => unreachable!(),
|
||||
};
|
||||
let frag = self.replace_cur_entry(Child::None);
|
||||
|
||||
self.0.move_forward();
|
||||
|
||||
item
|
||||
frag
|
||||
}
|
||||
|
||||
/// Applies the operation to the next slot of mapping within the range.
|
||||
@ -670,36 +551,80 @@ impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that the range being protected with the
|
||||
/// operation does not affect kernel's memory safety.
|
||||
/// The caller should ensure that:
|
||||
/// - the range being protected with the operation does not affect
|
||||
/// kernel's memory safety;
|
||||
/// - the privileged flag `AVAIL1` should not be altered if in the kernel
|
||||
/// page table (the restriction may be lifted in the futures).
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if:
|
||||
/// - the range to be protected is out of the range where the cursor
|
||||
/// is required to operate;
|
||||
/// - the specified virtual address range only covers a part of a page.
|
||||
/// Panics if:
|
||||
/// - the length is longer than the remaining range of the cursor;
|
||||
/// - the length is not page-aligned.
|
||||
pub unsafe fn protect_next(
|
||||
&mut self,
|
||||
len: usize,
|
||||
op: &mut impl FnMut(&mut PageProperty),
|
||||
) -> Option<Range<Vaddr>> {
|
||||
self.0.find_next_impl(len, true, true)?;
|
||||
self.0.find_next_impl(len, false, true)?;
|
||||
|
||||
let mut cur_entry = self.0.cur_entry();
|
||||
|
||||
// Protect the current page.
|
||||
cur_entry.protect(op);
|
||||
self.0.cur_entry().protect(op);
|
||||
|
||||
let protected_va = self.0.cur_va_range();
|
||||
|
||||
self.0.move_forward();
|
||||
|
||||
Some(protected_va)
|
||||
}
|
||||
|
||||
fn replace_cur_entry(&mut self, new_child: Child<C>) -> Option<PageTableFrag<C>> {
|
||||
let rcu_guard = self.0.rcu_guard;
|
||||
|
||||
let va = self.0.va;
|
||||
let level = self.0.level;
|
||||
|
||||
let old = self.0.cur_entry().replace(new_child);
|
||||
match old {
|
||||
Child::None => None,
|
||||
Child::Frame(pa, ch_level, prop) => {
|
||||
debug_assert_eq!(ch_level, level);
|
||||
|
||||
// SAFETY:
|
||||
// This is part of (if `split_huge` happens) a page table item mapped
|
||||
// with a previous call to `C::item_into_raw`, where:
|
||||
// - The physical address and the paging level match it;
|
||||
// - The item part is now unmapped so we can take its ownership.
|
||||
//
|
||||
// For page table configs that require the `AVAIL1` flag to be kept
|
||||
// (currently, only kernel page tables), the callers of the unsafe
|
||||
// `protect_next` method uphold this invariant.
|
||||
let item = unsafe { C::item_from_raw(pa, level, prop) };
|
||||
Some(PageTableFrag::Mapped { va, item })
|
||||
}
|
||||
Child::PageTable(pt) => {
|
||||
debug_assert_eq!(pt.level(), level - 1);
|
||||
|
||||
if !C::TOP_LEVEL_CAN_UNMAP && level == C::NR_LEVELS {
|
||||
let _ = ManuallyDrop::new(pt); // leak it to make shared PTs stay `'static`.
|
||||
panic!("Unmapping shared kernel page table nodes");
|
||||
}
|
||||
|
||||
fn should_map_as_tracked<C: PageTableConfig>(va: Vaddr) -> bool {
|
||||
TypeId::of::<C>() == TypeId::of::<KernelPtConfig>()
|
||||
&& crate::mm::kspace::should_map_as_tracked(va)
|
||||
|| TypeId::of::<C>() == TypeId::of::<UserPtConfig>()
|
||||
// SAFETY: We must have locked this node.
|
||||
let locked_pt = unsafe { pt.borrow().make_guard_unchecked(rcu_guard) };
|
||||
// SAFETY:
|
||||
// - We checked that we are not unmapping shared kernel page table nodes.
|
||||
// - We must have locked the entire sub-tree since the range is locked.
|
||||
let num_frames =
|
||||
unsafe { locking::dfs_mark_stray_and_unlock(rcu_guard, locked_pt) };
|
||||
|
||||
Some(PageTableFrag::StrayPageTable {
|
||||
pt: (*pt).clone().into(),
|
||||
va,
|
||||
len: page_size::<C>(self.0.level),
|
||||
num_frames,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,9 +20,10 @@ use crate::{
|
||||
|
||||
mod node;
|
||||
use node::*;
|
||||
pub mod cursor;
|
||||
pub(crate) use cursor::PageTableItem;
|
||||
pub use cursor::{Cursor, CursorMut};
|
||||
mod cursor;
|
||||
|
||||
pub(crate) use cursor::{Cursor, CursorMut, PageTableFrag};
|
||||
|
||||
#[cfg(ktest)]
|
||||
mod test;
|
||||
|
||||
@ -46,7 +47,18 @@ pub enum PageTableError {
|
||||
/// - the trackedness of physical mappings;
|
||||
/// - the PTE layout;
|
||||
/// - the number of page table levels, etc.
|
||||
pub(crate) trait PageTableConfig: Clone + Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The implementor must ensure that the `item_into_raw` and `item_from_raw`
|
||||
/// are implemented correctly so that:
|
||||
/// - `item_into_raw` consumes the ownership of the item;
|
||||
/// - if the provided raw form matches the item that was consumed by
|
||||
/// `item_into_raw`, `item_from_raw` restores the exact item that was
|
||||
/// consumed by `item_into_raw`.
|
||||
pub(crate) unsafe trait PageTableConfig:
|
||||
Clone + Debug + Send + Sync + 'static
|
||||
{
|
||||
/// The index range at the top level (`C::NR_LEVELS`) page table.
|
||||
///
|
||||
/// When configured with this value, the [`PageTable`] instance will only
|
||||
@ -55,8 +67,65 @@ pub(crate) trait PageTableConfig: Clone + Debug + Send + Sync + 'static {
|
||||
/// specified by the hardware MMU (limited by `C::ADDRESS_WIDTH`).
|
||||
const TOP_LEVEL_INDEX_RANGE: Range<usize>;
|
||||
|
||||
/// If we can remove the top-level page table entries.
|
||||
///
|
||||
/// This is for the kernel page table, whose second-top-level page
|
||||
/// tables need `'static` lifetime to be shared with user page tables.
|
||||
/// Other page tables do not need to set this to `false`.
|
||||
const TOP_LEVEL_CAN_UNMAP: bool = true;
|
||||
|
||||
/// The type of the page table entry.
|
||||
type E: PageTableEntryTrait;
|
||||
|
||||
/// The paging constants.
|
||||
type C: PagingConstsTrait;
|
||||
|
||||
/// The item that can be mapped into the virtual memory space using the
|
||||
/// page table.
|
||||
///
|
||||
/// Usually, this item is a [`crate::mm::Frame`], which we call a "tracked"
|
||||
/// frame. The page table can also do "untracked" mappings that only maps
|
||||
/// to certain physical addresses without tracking the ownership of the
|
||||
/// mapped physical frame. The user of the page table APIs can choose by
|
||||
/// defining this type and the corresponding methods [`item_into_raw`] and
|
||||
/// [`item_from_raw`].
|
||||
///
|
||||
/// [`item_from_raw`]: PageTableConfig::item_from_raw
|
||||
/// [`item_into_raw`]: PageTableConfig::item_into_raw
|
||||
type Item: Clone;
|
||||
|
||||
/// Consumes the item and returns the physical address, the paging level,
|
||||
/// and the page property.
|
||||
///
|
||||
/// The ownership of the item will be consumed, i.e., the item will be
|
||||
/// forgotten after this function is called.
|
||||
fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty);
|
||||
|
||||
/// Restores the item from the physical address and the paging level.
|
||||
///
|
||||
/// There could be transformations after [`PageTableConfig::item_into_raw`]
|
||||
/// and before [`PageTableConfig::item_from_raw`], which include:
|
||||
/// - splitting and coalescing the items, for example, splitting one item
|
||||
/// into 512 `level - 1` items with and contiguous physical addresses;
|
||||
/// - protecting the items, for example, changing the page property.
|
||||
///
|
||||
/// Splitting and coalescing maintains ownership rules, i.e., if one
|
||||
/// physical address is within the range of one item, after splitting/
|
||||
/// coalescing, there should be exactly one item that contains the address.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure that:
|
||||
/// - the physical address and the paging level represent a page table
|
||||
/// item or part of it (as described above);
|
||||
/// - either the ownership of the item is properly transferred to the
|
||||
/// return value, or the return value is wrapped in a
|
||||
/// [`core::mem::ManuallyDrop`] that won't outlive the original item.
|
||||
///
|
||||
/// A concrete trait implementation may require the caller to ensure that
|
||||
/// - the [`super::PageFlags::AVAIL1`] flag is the same as that returned
|
||||
/// from [`PageTableConfig::item_into_raw`].
|
||||
unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item;
|
||||
}
|
||||
|
||||
// Implement it so that we can comfortably use low level functions
|
||||
@ -70,6 +139,62 @@ impl<C: PageTableConfig> PagingConstsTrait for C {
|
||||
const VA_SIGN_EXT: bool = C::C::VA_SIGN_EXT;
|
||||
}
|
||||
|
||||
/// Splits the address range into largest page table items.
|
||||
///
|
||||
/// Each of the returned items is a tuple of the physical address and the
|
||||
/// paging level. It is helpful when you want to map a physical address range
|
||||
/// into the provided virtual address.
|
||||
///
|
||||
/// For example, on x86-64, `C: PageTableConfig` may specify level 1 page as
|
||||
/// 4KiB, level 2 page as 2MiB, and level 3 page as 1GiB. Suppose that the
|
||||
/// supplied physical address range is from `0x3fdff000` to `0x80002000`,
|
||||
/// and the virtual address is also `0x3fdff000`, the following 5 items will
|
||||
/// be returned:
|
||||
///
|
||||
/// ```text
|
||||
/// 0x3fdff000 0x80002000
|
||||
/// start end
|
||||
/// |----|----------------|--------------------------------|----|----|
|
||||
/// 4KiB 2MiB 1GiB 4KiB 4KiB
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if:
|
||||
/// - any of `va`, `pa`, or `len` is not aligned to the base page size;
|
||||
/// - the range `va..(va + len)` is not valid for the page table.
|
||||
pub(crate) fn largest_pages<C: PageTableConfig>(
|
||||
mut va: Vaddr,
|
||||
mut pa: Paddr,
|
||||
mut len: usize,
|
||||
) -> impl Iterator<Item = (Paddr, PagingLevel)> {
|
||||
assert_eq!(va % C::BASE_PAGE_SIZE, 0);
|
||||
assert_eq!(pa % C::BASE_PAGE_SIZE, 0);
|
||||
assert_eq!(len % C::BASE_PAGE_SIZE, 0);
|
||||
assert!(is_valid_range::<C>(&(va..(va + len))));
|
||||
|
||||
core::iter::from_fn(move || {
|
||||
if len == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut level = C::HIGHEST_TRANSLATION_LEVEL;
|
||||
while page_size::<C>(level) > len
|
||||
|| va % page_size::<C>(level) != 0
|
||||
|| pa % page_size::<C>(level) != 0
|
||||
{
|
||||
level -= 1;
|
||||
}
|
||||
|
||||
let item_start = pa;
|
||||
va += page_size::<C>(level);
|
||||
pa += page_size::<C>(level);
|
||||
len -= page_size::<C>(level);
|
||||
|
||||
Some((item_start, level))
|
||||
})
|
||||
}
|
||||
|
||||
/// Gets the managed virtual addresses range for the page table.
|
||||
///
|
||||
/// It returns a [`RangeInclusive`] because the end address, if being
|
||||
@ -104,27 +229,27 @@ const fn vaddr_range<C: PageTableConfig>() -> RangeInclusive<Vaddr> {
|
||||
let mut start = pt_va_range_start::<C>();
|
||||
let mut end = pt_va_range_end::<C>();
|
||||
|
||||
if C::VA_SIGN_EXT {
|
||||
const {
|
||||
assert!(
|
||||
sign_bit_of_va::<C>(pt_va_range_start::<C>())
|
||||
== sign_bit_of_va::<C>(pt_va_range_end::<C>())
|
||||
!C::VA_SIGN_EXT
|
||||
|| sign_bit_of_va::<C>(pt_va_range_start::<C>())
|
||||
== sign_bit_of_va::<C>(pt_va_range_end::<C>()),
|
||||
"The sign bit of both range endpoints must be the same if sign extension is enabled"
|
||||
)
|
||||
}
|
||||
|
||||
if sign_bit_of_va::<C>(pt_va_range_start::<C>()) {
|
||||
if C::VA_SIGN_EXT && sign_bit_of_va::<C>(pt_va_range_start::<C>()) {
|
||||
start |= !0 ^ ((1 << C::ADDRESS_WIDTH) - 1);
|
||||
end |= !0 ^ ((1 << C::ADDRESS_WIDTH) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
start..=end
|
||||
}
|
||||
|
||||
/// Check if the given range is covered by the valid range of the page table.
|
||||
/// Checks if the given range is covered by the valid range of the page table.
|
||||
const fn is_valid_range<C: PageTableConfig>(r: &Range<Vaddr>) -> bool {
|
||||
let va_range = vaddr_range::<C>();
|
||||
*va_range.start() <= r.start && (r.end == 0 || r.end - 1 <= *va_range.end())
|
||||
(r.start == 0 && r.end == 0) || (*va_range.start() <= r.start && r.end - 1 <= *va_range.end())
|
||||
}
|
||||
|
||||
// Here are some const values that are determined by the paging constants.
|
||||
@ -177,16 +302,7 @@ impl PageTable<KernelPtConfig> {
|
||||
|
||||
for i in KernelPtConfig::TOP_LEVEL_INDEX_RANGE {
|
||||
let mut root_entry = root_node.entry(i);
|
||||
let is_tracked = if super::kspace::should_map_as_tracked(
|
||||
i * page_size::<PagingConsts>(PagingConsts::NR_LEVELS - 1),
|
||||
) {
|
||||
MapTrackingStatus::Tracked
|
||||
} else {
|
||||
MapTrackingStatus::Untracked
|
||||
};
|
||||
let _ = root_entry
|
||||
.alloc_if_none(&preempt_guard, is_tracked)
|
||||
.unwrap();
|
||||
let _ = root_entry.alloc_if_none(&preempt_guard).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,17 +314,24 @@ impl PageTable<KernelPtConfig> {
|
||||
/// This should be the only way to create the user page table, that is to
|
||||
/// duplicate the kernel page table with all the kernel mappings shared.
|
||||
pub(in crate::mm) fn create_user_page_table(&'static self) -> PageTable<UserPtConfig> {
|
||||
let new_root =
|
||||
PageTableNode::alloc(PagingConsts::NR_LEVELS, MapTrackingStatus::NotApplicable);
|
||||
let new_root = PageTableNode::alloc(PagingConsts::NR_LEVELS);
|
||||
|
||||
let preempt_guard = disable_preempt();
|
||||
let mut root_node = self.root.borrow().lock(&preempt_guard);
|
||||
let mut new_node = new_root.borrow().lock(&preempt_guard);
|
||||
|
||||
const {
|
||||
assert!(!KernelPtConfig::TOP_LEVEL_CAN_UNMAP);
|
||||
assert!(
|
||||
UserPtConfig::TOP_LEVEL_INDEX_RANGE.end
|
||||
<= KernelPtConfig::TOP_LEVEL_INDEX_RANGE.start
|
||||
);
|
||||
}
|
||||
|
||||
for i in KernelPtConfig::TOP_LEVEL_INDEX_RANGE {
|
||||
let root_entry = root_node.entry(i);
|
||||
let child = root_entry.to_ref();
|
||||
let Child::PageTableRef(pt) = child else {
|
||||
let ChildRef::PageTable(pt) = child else {
|
||||
panic!("The kernel page table doesn't contain shared nodes");
|
||||
};
|
||||
|
||||
@ -218,7 +341,8 @@ impl PageTable<KernelPtConfig> {
|
||||
// See also `<PageTablePageMeta as AnyFrameMeta>::on_drop`.
|
||||
let pt_addr = pt.start_paddr();
|
||||
let pte = PageTableEntry::new_pt(pt_addr);
|
||||
// SAFETY: The index is within the bounds and the new PTE is compatible.
|
||||
// SAFETY: The index is within the bounds and the level of the new
|
||||
// PTE matches the node.
|
||||
unsafe { new_node.write_pte(i, pte) };
|
||||
}
|
||||
drop(new_node);
|
||||
@ -257,7 +381,7 @@ impl<C: PageTableConfig> PageTable<C> {
|
||||
/// Useful for the IOMMU page tables only.
|
||||
pub fn empty() -> Self {
|
||||
PageTable {
|
||||
root: PageTableNode::<C>::alloc(C::NR_LEVELS, MapTrackingStatus::NotApplicable),
|
||||
root: PageTableNode::<C>::alloc(C::NR_LEVELS),
|
||||
}
|
||||
}
|
||||
|
||||
@ -275,26 +399,13 @@ impl<C: PageTableConfig> PageTable<C> {
|
||||
self.root.start_paddr()
|
||||
}
|
||||
|
||||
pub unsafe fn map(
|
||||
&self,
|
||||
vaddr: &Range<Vaddr>,
|
||||
paddr: &Range<Paddr>,
|
||||
prop: PageProperty,
|
||||
) -> Result<(), PageTableError> {
|
||||
let preempt_guard = disable_preempt();
|
||||
let mut cursor = self.cursor_mut(&preempt_guard, vaddr)?;
|
||||
// SAFETY: The safety is upheld by the caller.
|
||||
unsafe { cursor.map_pa(paddr, prop) };
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Query about the mapping of a single byte at the given virtual address.
|
||||
///
|
||||
/// Note that this function may fail reflect an accurate result if there are
|
||||
/// cursors concurrently accessing the same virtual address range, just like what
|
||||
/// happens for the hardware MMU walk.
|
||||
#[cfg(ktest)]
|
||||
pub fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
|
||||
pub fn page_walk(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
|
||||
// SAFETY: The root node is a valid page table node so the address is valid.
|
||||
unsafe { page_walk::<C>(self.root_paddr(), vaddr) }
|
||||
}
|
||||
|
@ -2,106 +2,53 @@
|
||||
|
||||
//! This module specifies the type of the children of a page table node.
|
||||
|
||||
use core::{mem::ManuallyDrop, panic};
|
||||
use core::mem::ManuallyDrop;
|
||||
|
||||
use super::{MapTrackingStatus, PageTableEntryTrait, PageTableNode};
|
||||
use super::{PageTableEntryTrait, PageTableNode, PageTableNodeRef};
|
||||
use crate::{
|
||||
mm::{
|
||||
frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame},
|
||||
page_prop::PageProperty,
|
||||
page_table::{PageTableConfig, PageTableNodeRef},
|
||||
Paddr, PagingLevel,
|
||||
},
|
||||
mm::{page_prop::PageProperty, page_table::PageTableConfig, Paddr, PagingLevel},
|
||||
sync::RcuDrop,
|
||||
};
|
||||
|
||||
/// A child of a page table node.
|
||||
// TODO: Distinguish between the reference and the owning child.
|
||||
/// A page table entry that owns the child of a page table node if present.
|
||||
#[derive(Debug)]
|
||||
pub(in crate::mm) enum Child<'a, C: PageTableConfig> {
|
||||
/// A owning handle to a raw page table node.
|
||||
pub(in crate::mm) enum Child<C: PageTableConfig> {
|
||||
/// A child page table node.
|
||||
PageTable(RcuDrop<PageTableNode<C>>),
|
||||
/// A reference of a child page table node.
|
||||
PageTableRef(PageTableNodeRef<'a, C>),
|
||||
/// A mapped frame.
|
||||
Frame(Frame<dyn AnyFrameMeta>, PageProperty),
|
||||
/// Mapped frames that are not tracked by handles.
|
||||
Untracked(Paddr, PagingLevel, PageProperty),
|
||||
/// Physical address of a mapped physical frame.
|
||||
///
|
||||
/// It is associated with the virtual page property and the level of the
|
||||
/// mapping node, which decides the size of the frame.
|
||||
Frame(Paddr, PagingLevel, PageProperty),
|
||||
None,
|
||||
}
|
||||
|
||||
impl<C: PageTableConfig> Child<'_, C> {
|
||||
/// Returns whether the child does not map to anything.
|
||||
impl<C: PageTableConfig> Child<C> {
|
||||
/// Returns whether the child is not present.
|
||||
pub(in crate::mm) fn is_none(&self) -> bool {
|
||||
matches!(self, Child::None)
|
||||
}
|
||||
|
||||
/// Returns whether the child is compatible with the given node.
|
||||
///
|
||||
/// In other words, it checks whether the child can be a child of a node
|
||||
/// with the given level and tracking status.
|
||||
pub(super) fn is_compatible(
|
||||
&self,
|
||||
node_level: PagingLevel,
|
||||
is_tracked: MapTrackingStatus,
|
||||
) -> bool {
|
||||
match self {
|
||||
Child::PageTable(pt) => node_level == pt.level() + 1,
|
||||
Child::PageTableRef(_) => false,
|
||||
Child::Frame(p, _) => {
|
||||
node_level == p.map_level() && is_tracked == MapTrackingStatus::Tracked
|
||||
}
|
||||
Child::Untracked(_, level, _) => {
|
||||
node_level == *level && is_tracked == MapTrackingStatus::Untracked
|
||||
}
|
||||
Child::None => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a child into a owning PTE.
|
||||
///
|
||||
/// By conversion it loses information about whether the page is tracked
|
||||
/// or not. Also it loses the level information. However, the returned PTE
|
||||
/// takes the ownership (reference count) of the child.
|
||||
///
|
||||
/// Usually this is for recording the PTE into a page table node. When the
|
||||
/// child is needed again by reading the PTE of a page table node, extra
|
||||
/// information should be provided using the [`Child::from_pte`] method.
|
||||
pub(super) fn into_pte(self) -> C::E {
|
||||
match self {
|
||||
Child::PageTable(pt) => {
|
||||
let pt = ManuallyDrop::new(pt);
|
||||
C::E::new_pt(pt.start_paddr())
|
||||
Child::PageTable(node) => {
|
||||
let paddr = node.start_paddr();
|
||||
let _ = ManuallyDrop::new(node);
|
||||
C::E::new_pt(paddr)
|
||||
}
|
||||
Child::PageTableRef(_) => {
|
||||
panic!("`PageTableRef` should not be converted to PTE");
|
||||
}
|
||||
Child::Frame(page, prop) => {
|
||||
let level = page.map_level();
|
||||
C::E::new_page(page.into_raw(), level, prop)
|
||||
}
|
||||
Child::Untracked(pa, level, prop) => C::E::new_page(pa, level, prop),
|
||||
Child::Frame(paddr, level, prop) => C::E::new_page(paddr, level, prop),
|
||||
Child::None => C::E::new_absent(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a PTE back to a child.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The provided PTE must be originated from [`Child::into_pte`]. And the
|
||||
/// provided information (level and tracking status) must be the same with
|
||||
/// the lost information during the conversion. Strictly speaking, the
|
||||
/// provided arguments must be compatible with the original child (
|
||||
/// specified by [`Child::is_compatible`]).
|
||||
/// The provided PTE must be the output of [`Self::into_pte`], and the PTE:
|
||||
/// - must not be used to created a [`Child`] twice;
|
||||
/// - must not be referenced by a living [`ChildRef`].
|
||||
///
|
||||
/// This method should be only used no more than once for a PTE that has
|
||||
/// been converted from a child using the [`Child::into_pte`] method.
|
||||
pub(super) unsafe fn from_pte(
|
||||
pte: C::E,
|
||||
level: PagingLevel,
|
||||
is_tracked: MapTrackingStatus,
|
||||
) -> Self {
|
||||
/// The level must match the original level of the child.
|
||||
pub(super) unsafe fn from_pte(pte: C::E, level: PagingLevel) -> Self {
|
||||
if !pte.is_present() {
|
||||
return Child::None;
|
||||
}
|
||||
@ -109,66 +56,56 @@ impl<C: PageTableConfig> Child<'_, C> {
|
||||
let paddr = pte.paddr();
|
||||
|
||||
if !pte.is_last(level) {
|
||||
// SAFETY: The physical address points to a valid page table node
|
||||
// at the given level.
|
||||
let pt = unsafe { PageTableNode::from_raw(paddr) };
|
||||
debug_assert_eq!(pt.level(), level - 1);
|
||||
return Child::PageTable(RcuDrop::new(pt));
|
||||
// SAFETY: The caller ensures that this node was created by
|
||||
// `into_pte`, so that restoring the forgotten reference is safe.
|
||||
let node = unsafe { PageTableNode::from_raw(paddr) };
|
||||
debug_assert_eq!(node.level(), level - 1);
|
||||
return Child::PageTable(RcuDrop::new(node));
|
||||
}
|
||||
|
||||
match is_tracked {
|
||||
MapTrackingStatus::Tracked => {
|
||||
// SAFETY: The physical address points to a valid page.
|
||||
let page = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
|
||||
Child::Frame(page, pte.prop())
|
||||
}
|
||||
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),
|
||||
MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"),
|
||||
Child::Frame(paddr, level, pte.prop())
|
||||
}
|
||||
}
|
||||
|
||||
/// Gains an extra reference to the child.
|
||||
/// A reference to the child of a page table node.
|
||||
#[derive(Debug)]
|
||||
pub(in crate::mm) enum ChildRef<'a, C: PageTableConfig> {
|
||||
/// A child page table node.
|
||||
PageTable(PageTableNodeRef<'a, C>),
|
||||
/// Physical address of a mapped physical frame.
|
||||
///
|
||||
/// If the child is a frame, it increases the reference count of the frame.
|
||||
///
|
||||
/// If the child is a page table node, it returns a [`PageTableNodeRef`],
|
||||
/// thus not affecting the reference count of the page table node.
|
||||
/// It is associated with the virtual page property and the level of the
|
||||
/// mapping node, which decides the size of the frame.
|
||||
Frame(Paddr, PagingLevel, PageProperty),
|
||||
None,
|
||||
}
|
||||
|
||||
impl<C: PageTableConfig> ChildRef<'_, C> {
|
||||
/// Converts a PTE to a child.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The provided PTE must be originated from [`Child::into_pte`], which is
|
||||
/// the same requirement as the [`Child::from_pte`] method.
|
||||
/// The PTE must be the output of a [`Child::into_pte`], where the child
|
||||
/// outlives the reference created by this function.
|
||||
///
|
||||
/// This method must not be used with a PTE that has been restored to a
|
||||
/// child using the [`Child::from_pte`] method.
|
||||
pub(super) unsafe fn ref_from_pte(
|
||||
pte: &C::E,
|
||||
level: PagingLevel,
|
||||
is_tracked: MapTrackingStatus,
|
||||
) -> Self {
|
||||
/// The provided level must be the same with the level of the page table
|
||||
/// node that contains this PTE.
|
||||
pub(super) unsafe fn from_pte(pte: &C::E, level: PagingLevel) -> Self {
|
||||
if !pte.is_present() {
|
||||
return Child::None;
|
||||
return ChildRef::None;
|
||||
}
|
||||
|
||||
let paddr = pte.paddr();
|
||||
|
||||
if !pte.is_last(level) {
|
||||
// SAFETY: If the caller ensures that the PTE is from a `Child`,
|
||||
// restoring the reference is safe.
|
||||
return Child::PageTableRef(unsafe { PageTableNodeRef::borrow_paddr(paddr) });
|
||||
// SAFETY: The caller ensures that the lifetime of the child is
|
||||
// contained by the residing node, and the physical address is
|
||||
// valid since the entry is present.
|
||||
let node = unsafe { PageTableNodeRef::borrow_paddr(paddr) };
|
||||
debug_assert_eq!(node.level(), level - 1);
|
||||
return ChildRef::PageTable(node);
|
||||
}
|
||||
|
||||
match is_tracked {
|
||||
MapTrackingStatus::Tracked => {
|
||||
// SAFETY: The physical address is valid and the PTE already owns
|
||||
// the reference to the page.
|
||||
unsafe { inc_frame_ref_count(paddr) };
|
||||
// SAFETY: The physical address points to a valid page.
|
||||
let page = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
|
||||
Child::Frame(page, pte.prop())
|
||||
}
|
||||
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),
|
||||
MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"),
|
||||
}
|
||||
ChildRef::Frame(paddr, level, pte.prop())
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
use core::mem::ManuallyDrop;
|
||||
|
||||
use super::{Child, MapTrackingStatus, PageTableEntryTrait, PageTableGuard, PageTableNode};
|
||||
use super::{Child, ChildRef, PageTableEntryTrait, PageTableGuard, PageTableNode};
|
||||
use crate::{
|
||||
mm::{
|
||||
nr_subpage_per_huge,
|
||||
@ -50,10 +50,11 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
|
||||
}
|
||||
|
||||
/// Gets a reference to the child.
|
||||
pub(in crate::mm) fn to_ref(&self) -> Child<'rcu, C> {
|
||||
// SAFETY: The entry structure represents an existent entry with the
|
||||
// right node information.
|
||||
unsafe { Child::ref_from_pte(&self.pte, self.node.level(), self.node.is_tracked()) }
|
||||
pub(in crate::mm) fn to_ref(&self) -> ChildRef<'rcu, C> {
|
||||
// SAFETY:
|
||||
// - The PTE outlives the reference (since we have `&self`).
|
||||
// - The level matches the current node.
|
||||
unsafe { ChildRef::from_pte(&self.pte, self.node.level()) }
|
||||
}
|
||||
|
||||
/// Operates on the mapping properties of the entry.
|
||||
@ -77,7 +78,7 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
|
||||
// SAFETY:
|
||||
// 1. The index is within the bounds.
|
||||
// 2. We replace the PTE with a new one, which differs only in
|
||||
// `PageProperty`, so it is still compatible with the current
|
||||
// `PageProperty`, so the level still matches the current
|
||||
// page table node.
|
||||
unsafe { self.node.write_pte(self.idx, self.pte) };
|
||||
}
|
||||
@ -88,16 +89,23 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The method panics if the given child is not compatible with the node.
|
||||
/// The compatibility is specified by the [`Child::is_compatible`].
|
||||
pub(in crate::mm) fn replace(&mut self, new_child: Child<'rcu, C>) -> Child<'rcu, C> {
|
||||
assert!(new_child.is_compatible(self.node.level(), self.node.is_tracked()));
|
||||
/// The method panics if the level of the new child does not match the
|
||||
/// current node.
|
||||
pub(in crate::mm) fn replace(&mut self, new_child: Child<C>) -> Child<C> {
|
||||
match &new_child {
|
||||
Child::PageTable(node) => {
|
||||
assert_eq!(node.level(), self.node.level() - 1);
|
||||
}
|
||||
Child::Frame(_, level, _) => {
|
||||
assert_eq!(*level, self.node.level());
|
||||
}
|
||||
Child::None => {}
|
||||
}
|
||||
|
||||
// SAFETY: The entry structure represents an existent entry with the
|
||||
// right node information. The old PTE is overwritten by the new child
|
||||
// so that it is not used anymore.
|
||||
let old_child =
|
||||
unsafe { Child::from_pte(self.pte, self.node.level(), self.node.is_tracked()) };
|
||||
// SAFETY:
|
||||
// - The PTE is not referenced by other `ChildRef`s (since we have `&mut self`).
|
||||
// - The level matches the current node.
|
||||
let old_child = unsafe { Child::from_pte(self.pte, self.node.level()) };
|
||||
|
||||
if old_child.is_none() && !new_child.is_none() {
|
||||
*self.node.nr_children_mut() += 1;
|
||||
@ -109,7 +117,7 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
|
||||
|
||||
// SAFETY:
|
||||
// 1. The index is within the bounds.
|
||||
// 2. The new PTE is compatible with the page table node, as asserted above.
|
||||
// 2. The new PTE is a valid child whose level matches the current page table node.
|
||||
unsafe { self.node.write_pte(self.idx, new_pte) };
|
||||
|
||||
self.pte = new_pte;
|
||||
@ -124,21 +132,20 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
|
||||
pub(in crate::mm::page_table) fn alloc_if_none(
|
||||
&mut self,
|
||||
guard: &'rcu dyn InAtomicMode,
|
||||
new_pt_is_tracked: MapTrackingStatus,
|
||||
) -> Option<PageTableGuard<'rcu, C>> {
|
||||
if !(self.is_none() && self.node.level() > 1) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let level = self.node.level();
|
||||
let new_page = PageTableNode::<C>::alloc(level - 1, new_pt_is_tracked);
|
||||
let new_page = PageTableNode::<C>::alloc(level - 1);
|
||||
|
||||
let paddr = new_page.start_paddr();
|
||||
let _ = ManuallyDrop::new(new_page.borrow().lock(guard));
|
||||
|
||||
// SAFETY:
|
||||
// 1. The index is within the bounds.
|
||||
// 2. The new PTE is compatible with the page table node.
|
||||
// 2. The new PTE is a valid child whose level matches the current page table node.
|
||||
unsafe {
|
||||
self.node.write_pte(
|
||||
self.idx,
|
||||
@ -155,37 +162,34 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
|
||||
Some(unsafe { pt_ref.make_guard_unchecked(guard) })
|
||||
}
|
||||
|
||||
/// Splits the entry to smaller pages if it maps to a untracked huge page.
|
||||
/// Splits the entry to smaller pages if it maps to a huge page.
|
||||
///
|
||||
/// If the entry does map to a untracked huge page, it is split into smaller
|
||||
/// pages mapped by a child page table node. The new child page table node
|
||||
/// If the entry does map to a huge page, it is split into smaller pages
|
||||
/// mapped by a child page table node. The new child page table node
|
||||
/// is returned.
|
||||
///
|
||||
/// If the entry does not map to a untracked huge page, the method returns
|
||||
/// `None`.
|
||||
pub(in crate::mm::page_table) fn split_if_untracked_huge(
|
||||
pub(in crate::mm::page_table) fn split_if_mapped_huge(
|
||||
&mut self,
|
||||
guard: &'rcu dyn InAtomicMode,
|
||||
) -> Option<PageTableGuard<'rcu, C>> {
|
||||
let level = self.node.level();
|
||||
|
||||
if !(self.pte.is_last(level)
|
||||
&& level > 1
|
||||
&& self.node.is_tracked() == MapTrackingStatus::Untracked)
|
||||
{
|
||||
if !(self.pte.is_last(level) && level > 1) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let pa = self.pte.paddr();
|
||||
let prop = self.pte.prop();
|
||||
|
||||
let new_page = PageTableNode::<C>::alloc(level - 1, MapTrackingStatus::Untracked);
|
||||
let new_page = PageTableNode::<C>::alloc(level - 1);
|
||||
let mut pt_lock_guard = new_page.borrow().lock(guard);
|
||||
|
||||
for i in 0..nr_subpage_per_huge::<C>() {
|
||||
let small_pa = pa + i * page_size::<C>(level - 1);
|
||||
let mut entry = pt_lock_guard.entry(i);
|
||||
let old = entry.replace(Child::Untracked(small_pa, level - 1, prop));
|
||||
let old = entry.replace(Child::Frame(small_pa, level - 1, prop));
|
||||
debug_assert!(old.is_none());
|
||||
}
|
||||
|
||||
@ -194,7 +198,7 @@ impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
|
||||
|
||||
// SAFETY:
|
||||
// 1. The index is within the bounds.
|
||||
// 2. The new PTE is compatible with the page table node.
|
||||
// 2. The new PTE is a valid child whose level matches the current page table node.
|
||||
unsafe {
|
||||
self.node.write_pte(
|
||||
self.idx,
|
||||
|
@ -35,7 +35,10 @@ use core::{
|
||||
sync::atomic::{AtomicU8, Ordering},
|
||||
};
|
||||
|
||||
pub(in crate::mm) use self::{child::Child, entry::Entry};
|
||||
pub(in crate::mm) use self::{
|
||||
child::{Child, ChildRef},
|
||||
entry::Entry,
|
||||
};
|
||||
use super::{nr_subpage_per_huge, PageTableConfig, PageTableEntryTrait};
|
||||
use crate::{
|
||||
mm::{
|
||||
@ -63,13 +66,9 @@ impl<C: PageTableConfig> PageTableNode<C> {
|
||||
self.meta().level
|
||||
}
|
||||
|
||||
pub(super) fn is_tracked(&self) -> MapTrackingStatus {
|
||||
self.meta().is_tracked
|
||||
}
|
||||
|
||||
/// Allocates a new empty page table node.
|
||||
pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
|
||||
let meta = PageTablePageMeta::new(level, is_tracked);
|
||||
pub(super) fn alloc(level: PagingLevel) -> Self {
|
||||
let meta = PageTablePageMeta::new(level);
|
||||
let frame = FrameAllocOptions::new()
|
||||
.zeroed(true)
|
||||
.alloc_frame_with(meta)
|
||||
@ -233,8 +232,8 @@ impl<'rcu, C: PageTableConfig> PageTableGuard<'rcu, C> {
|
||||
///
|
||||
/// The caller must ensure that:
|
||||
/// 1. The index must be within the bound;
|
||||
/// 2. The PTE must represent a child compatible with this page table node
|
||||
/// (see [`Child::is_compatible`]).
|
||||
/// 2. The PTE must represent a valid [`Child`] whose level is compatible
|
||||
/// with the page table node.
|
||||
pub(super) unsafe fn write_pte(&mut self, idx: usize, pte: C::E) {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
let ptr = paddr_to_vaddr(self.start_paddr()) as *mut C::E;
|
||||
@ -282,35 +281,16 @@ pub(in crate::mm) struct PageTablePageMeta<C: PageTableConfig> {
|
||||
pub level: PagingLevel,
|
||||
/// The lock for the page table page.
|
||||
pub lock: AtomicU8,
|
||||
/// Whether the pages mapped by the node is tracked.
|
||||
pub is_tracked: MapTrackingStatus,
|
||||
_phantom: core::marker::PhantomData<C>,
|
||||
}
|
||||
|
||||
/// Describe if the physical address recorded in this page table refers to a
|
||||
/// page tracked by metadata.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub(in crate::mm) enum MapTrackingStatus {
|
||||
/// The page table node cannot contain references to any pages. It can only
|
||||
/// contain references to child page table nodes.
|
||||
NotApplicable,
|
||||
/// The mapped pages are not tracked by metadata. If any child page table
|
||||
/// nodes exist, they should also be tracked.
|
||||
Untracked,
|
||||
/// The mapped pages are tracked by metadata. If any child page table nodes
|
||||
/// exist, they should also be tracked.
|
||||
Tracked,
|
||||
}
|
||||
|
||||
impl<C: PageTableConfig> PageTablePageMeta<C> {
|
||||
pub fn new(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
|
||||
pub fn new(level: PagingLevel) -> Self {
|
||||
Self {
|
||||
nr_children: SyncUnsafeCell::new(0),
|
||||
stray: SyncUnsafeCell::new(false),
|
||||
level,
|
||||
lock: AtomicU8::new(0),
|
||||
is_tracked,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@ -327,7 +307,6 @@ unsafe impl<C: PageTableConfig> AnyFrameMeta for PageTablePageMeta<C> {
|
||||
}
|
||||
|
||||
let level = self.level;
|
||||
let is_tracked = self.is_tracked;
|
||||
|
||||
// Drop the children.
|
||||
let range = if level == C::NR_LEVELS {
|
||||
@ -348,10 +327,10 @@ unsafe impl<C: PageTableConfig> AnyFrameMeta for PageTablePageMeta<C> {
|
||||
// SAFETY: The PTE points to a page table node. The ownership
|
||||
// of the child is transferred to the child then dropped.
|
||||
drop(unsafe { Frame::<Self>::from_raw(paddr) });
|
||||
} else if is_tracked == MapTrackingStatus::Tracked {
|
||||
// SAFETY: The PTE points to a tracked page. The ownership
|
||||
// of the child is transferred to the child then dropped.
|
||||
drop(unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) });
|
||||
} else {
|
||||
// SAFETY: The PTE points to a mapped item. The ownership
|
||||
// of the item is transferred here then dropped.
|
||||
drop(unsafe { C::item_from_raw(paddr, level, pte.prop()) });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,7 +9,7 @@ use crate::{
|
||||
mm::{
|
||||
io::{VmIo, VmReader, VmWriter},
|
||||
tlb::TlbFlushOp,
|
||||
vm_space::{get_activated_vm_space, VmItem},
|
||||
vm_space::get_activated_vm_space,
|
||||
CachePolicy, FallibleVmRead, FallibleVmWrite, FrameAllocOptions, PageFlags, PageProperty,
|
||||
UFrame, VmSpace,
|
||||
},
|
||||
@ -513,10 +513,7 @@ mod vmspace {
|
||||
let mut cursor = vmspace
|
||||
.cursor(&preempt_guard, &range)
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.next(),
|
||||
Some(VmItem::NotMapped { va: 0, len: 0x1000 })
|
||||
);
|
||||
assert_eq!(cursor.next(), Some((0..0x1000, None)));
|
||||
}
|
||||
|
||||
/// Maps and unmaps a single page using `CursorMut`.
|
||||
@ -533,13 +530,7 @@ mod vmspace {
|
||||
.cursor_mut(&preempt_guard, &range)
|
||||
.expect("Failed to create mutable cursor");
|
||||
// Initially, the page should not be mapped.
|
||||
assert_eq!(
|
||||
cursor_mut.query().unwrap(),
|
||||
VmItem::NotMapped {
|
||||
va: range.start,
|
||||
len: range.start + 0x1000
|
||||
}
|
||||
);
|
||||
assert_eq!(cursor_mut.query().unwrap(), (range.clone(), None));
|
||||
// Maps a frame.
|
||||
cursor_mut.map(frame.clone(), prop);
|
||||
}
|
||||
@ -552,11 +543,7 @@ mod vmspace {
|
||||
assert_eq!(cursor.virt_addr(), range.start);
|
||||
assert_eq!(
|
||||
cursor.query().unwrap(),
|
||||
VmItem::Mapped {
|
||||
va: range.start,
|
||||
frame,
|
||||
prop
|
||||
}
|
||||
(range.clone(), Some((frame.clone(), prop)))
|
||||
);
|
||||
}
|
||||
|
||||
@ -572,13 +559,7 @@ mod vmspace {
|
||||
let mut cursor = vmspace
|
||||
.cursor(&preempt_guard, &range)
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.query().unwrap(),
|
||||
VmItem::NotMapped {
|
||||
va: range.start,
|
||||
len: range.start + 0x1000
|
||||
}
|
||||
);
|
||||
assert_eq!(cursor.query().unwrap(), (range, None));
|
||||
}
|
||||
|
||||
/// Maps a page twice and unmaps twice using `CursorMut`.
|
||||
@ -603,11 +584,7 @@ mod vmspace {
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.query().unwrap(),
|
||||
VmItem::Mapped {
|
||||
va: range.start,
|
||||
frame: frame.clone(),
|
||||
prop
|
||||
}
|
||||
(range.clone(), Some((frame.clone(), prop)))
|
||||
);
|
||||
}
|
||||
|
||||
@ -624,11 +601,7 @@ mod vmspace {
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.query().unwrap(),
|
||||
VmItem::Mapped {
|
||||
va: range.start,
|
||||
frame,
|
||||
prop
|
||||
}
|
||||
(range.clone(), Some((frame.clone(), prop)))
|
||||
);
|
||||
}
|
||||
|
||||
@ -642,13 +615,7 @@ mod vmspace {
|
||||
let mut cursor = vmspace
|
||||
.cursor(&preempt_guard, &range)
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.query().unwrap(),
|
||||
VmItem::NotMapped {
|
||||
va: range.start,
|
||||
len: range.start + 0x1000
|
||||
}
|
||||
);
|
||||
assert_eq!(cursor.query().unwrap(), (range, None));
|
||||
}
|
||||
|
||||
/// Unmaps twice using `CursorMut`.
|
||||
@ -684,13 +651,7 @@ mod vmspace {
|
||||
let mut cursor = vmspace
|
||||
.cursor(&preempt_guard, &range)
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.query().unwrap(),
|
||||
VmItem::NotMapped {
|
||||
va: range.start,
|
||||
len: range.start + 0x1000
|
||||
}
|
||||
);
|
||||
assert_eq!(cursor.query().unwrap(), (range, None));
|
||||
}
|
||||
|
||||
/// Activates and deactivates the `VmSpace` in single-CPU scenarios.
|
||||
@ -700,12 +661,12 @@ mod vmspace {
|
||||
|
||||
// Activates the VmSpace.
|
||||
vmspace.activate();
|
||||
assert_eq!(get_activated_vm_space().unwrap(), Arc::as_ptr(&vmspace));
|
||||
assert_eq!(get_activated_vm_space(), Arc::as_ptr(&vmspace));
|
||||
|
||||
// Deactivates the VmSpace.
|
||||
let vmspace2 = Arc::new(VmSpace::new());
|
||||
vmspace2.activate();
|
||||
assert_eq!(get_activated_vm_space().unwrap(), Arc::as_ptr(&vmspace2));
|
||||
assert_eq!(get_activated_vm_space(), Arc::as_ptr(&vmspace2));
|
||||
}
|
||||
|
||||
/// Tests the `flusher` method of `CursorMut`.
|
||||
@ -730,12 +691,8 @@ mod vmspace {
|
||||
.cursor(&preempt_guard, &range)
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.next(),
|
||||
Some(VmItem::Mapped {
|
||||
va: 0x4000,
|
||||
frame: frame.clone(),
|
||||
prop: PageProperty::new_user(PageFlags::R, CachePolicy::Writeback),
|
||||
})
|
||||
cursor.next().unwrap(),
|
||||
(range.clone(), Some((frame.clone(), prop)))
|
||||
);
|
||||
}
|
||||
|
||||
@ -754,12 +711,14 @@ mod vmspace {
|
||||
.cursor(&preempt_guard, &range)
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.next(),
|
||||
Some(VmItem::Mapped {
|
||||
va: 0x4000,
|
||||
frame,
|
||||
prop: PageProperty::new_user(PageFlags::R, CachePolicy::Writeback),
|
||||
})
|
||||
cursor.next().unwrap(),
|
||||
(
|
||||
range.clone(),
|
||||
Some((
|
||||
frame.clone(),
|
||||
PageProperty::new_user(PageFlags::R, CachePolicy::Writeback)
|
||||
))
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -848,12 +807,14 @@ mod vmspace {
|
||||
assert!(cursor.jump(range.start).is_ok());
|
||||
let item = cursor.next();
|
||||
assert_eq!(
|
||||
item,
|
||||
Some(VmItem::Mapped {
|
||||
va: 0x6000,
|
||||
frame,
|
||||
prop: PageProperty::new_user(PageFlags::R, CachePolicy::Writeback),
|
||||
})
|
||||
item.unwrap(),
|
||||
(
|
||||
range.clone(),
|
||||
Some((
|
||||
frame.clone(),
|
||||
PageProperty::new_user(PageFlags::R, CachePolicy::Writeback)
|
||||
))
|
||||
)
|
||||
);
|
||||
|
||||
// Confirms no additional items.
|
||||
@ -885,18 +846,20 @@ mod vmspace {
|
||||
.cursor(&preempt_guard, &range)
|
||||
.expect("Failed to create cursor");
|
||||
assert_eq!(
|
||||
cursor.next(),
|
||||
Some(VmItem::Mapped {
|
||||
va: 0x7000,
|
||||
frame,
|
||||
prop: PageProperty::new_user(PageFlags::R, CachePolicy::Writeback),
|
||||
})
|
||||
cursor.next().unwrap(),
|
||||
(
|
||||
range.clone(),
|
||||
Some((
|
||||
frame.clone(),
|
||||
PageProperty::new_user(PageFlags::R, CachePolicy::Writeback)
|
||||
))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/// Attempts to map unaligned lengths and expects a panic.
|
||||
#[ktest]
|
||||
#[should_panic(expected = "assertion failed: len % super::PAGE_SIZE == 0")]
|
||||
#[should_panic]
|
||||
fn unaligned_unmap_panics() {
|
||||
let vmspace = VmSpace::new();
|
||||
let range = 0xA000..0xB000;
|
||||
|
@ -11,7 +11,6 @@
|
||||
|
||||
use core::{ops::Range, sync::atomic::Ordering};
|
||||
|
||||
use super::page_table::PageTableConfig;
|
||||
use crate::{
|
||||
arch::mm::{current_page_table_paddr, PageTableEntry, PagingConsts},
|
||||
cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
|
||||
@ -19,9 +18,10 @@ use crate::{
|
||||
mm::{
|
||||
io::Fallible,
|
||||
kspace::KERNEL_PAGE_TABLE,
|
||||
page_table::{self, PageTable, PageTableItem},
|
||||
page_table::{self, PageTable, PageTableConfig, PageTableFrag},
|
||||
tlb::{TlbFlushOp, TlbFlusher},
|
||||
PageProperty, UFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR,
|
||||
AnyUFrameMeta, Frame, PageProperty, PagingLevel, UFrame, VmReader, VmWriter,
|
||||
MAX_USERSPACE_VADDR,
|
||||
},
|
||||
prelude::*,
|
||||
task::{atomic_mode::AsAtomicModeGuard, disable_preempt, DisabledPreemptGuard},
|
||||
@ -202,19 +202,20 @@ impl Default for VmSpace {
|
||||
pub struct Cursor<'a>(page_table::Cursor<'a, UserPtConfig>);
|
||||
|
||||
impl Iterator for Cursor<'_> {
|
||||
type Item = VmItem;
|
||||
type Item = (Range<Vaddr>, Option<MappedItem>);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(|item| item.try_into().unwrap())
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl Cursor<'_> {
|
||||
/// Query about the current slot.
|
||||
/// Queries the mapping at the current virtual address.
|
||||
///
|
||||
/// This function won't bring the cursor to the next slot.
|
||||
pub fn query(&mut self) -> Result<VmItem> {
|
||||
Ok(self.0.query().map(|item| item.try_into().unwrap())?)
|
||||
/// If the cursor is pointing to a valid virtual address that is locked,
|
||||
/// it will return the virtual address range and the mapped item.
|
||||
pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
|
||||
Ok(self.0.query()?)
|
||||
}
|
||||
|
||||
/// Moves the cursor forward to the next mapped virtual address.
|
||||
@ -225,6 +226,10 @@ impl Cursor<'_> {
|
||||
///
|
||||
/// Otherwise, it will return `None`. And the cursor may stop at any
|
||||
/// address after `len` bytes.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the length is longer than the remaining range of the cursor.
|
||||
pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
|
||||
self.0.find_next(len)
|
||||
}
|
||||
@ -253,16 +258,14 @@ pub struct CursorMut<'a> {
|
||||
}
|
||||
|
||||
impl<'a> CursorMut<'a> {
|
||||
/// Query about the current slot.
|
||||
/// Queries the mapping at the current virtual address.
|
||||
///
|
||||
/// This is the same as [`Cursor::query`].
|
||||
///
|
||||
/// This function won't bring the cursor to the next slot.
|
||||
pub fn query(&mut self) -> Result<VmItem> {
|
||||
Ok(self
|
||||
.pt_cursor
|
||||
.query()
|
||||
.map(|item| item.try_into().unwrap())?)
|
||||
/// If the cursor is pointing to a valid virtual address that is locked,
|
||||
/// it will return the virtual address range and the mapped item.
|
||||
pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
|
||||
Ok(self.pt_cursor.query()?)
|
||||
}
|
||||
|
||||
/// Moves the cursor forward to the next mapped virtual address.
|
||||
@ -295,14 +298,25 @@ impl<'a> CursorMut<'a> {
|
||||
/// This method will bring the cursor to the next slot after the modification.
|
||||
pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
|
||||
let start_va = self.virt_addr();
|
||||
// SAFETY: It is safe to map untyped memory into the userspace.
|
||||
let old = unsafe { self.pt_cursor.map(frame.into(), prop) };
|
||||
let item = (frame, prop);
|
||||
|
||||
if let Some(old) = old {
|
||||
// SAFETY: It is safe to map untyped memory into the userspace.
|
||||
let Err(frag) = (unsafe { self.pt_cursor.map(item) }) else {
|
||||
return; // No mapping exists at the current address.
|
||||
};
|
||||
|
||||
match frag {
|
||||
PageTableFrag::Mapped { va, item } => {
|
||||
debug_assert_eq!(va, start_va);
|
||||
let (old_frame, _) = item;
|
||||
self.flusher
|
||||
.issue_tlb_flush_with(TlbFlushOp::Address(start_va), old);
|
||||
.issue_tlb_flush_with(TlbFlushOp::Address(start_va), old_frame.into());
|
||||
self.flusher.dispatch_tlb_flush();
|
||||
}
|
||||
PageTableFrag::StrayPageTable { .. } => {
|
||||
panic!("`UFrame` is base page sized but re-mapping out a child PT");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Clears the mapping starting from the current slot,
|
||||
@ -320,34 +334,33 @@ impl<'a> CursorMut<'a> {
|
||||
/// splitting the operation into multiple small ones.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if `len` is not page-aligned.
|
||||
/// Panics if:
|
||||
/// - the length is longer than the remaining range of the cursor;
|
||||
/// - the length is not page-aligned.
|
||||
pub fn unmap(&mut self, len: usize) -> usize {
|
||||
assert!(len % super::PAGE_SIZE == 0);
|
||||
let end_va = self.virt_addr() + len;
|
||||
let mut num_unmapped: usize = 0;
|
||||
loop {
|
||||
// SAFETY: It is safe to un-map memory in the userspace.
|
||||
let result = unsafe { self.pt_cursor.take_next(end_va - self.virt_addr()) };
|
||||
match result {
|
||||
PageTableItem::Mapped { va, page, .. } => {
|
||||
let Some(frag) = (unsafe { self.pt_cursor.take_next(end_va - self.virt_addr()) })
|
||||
else {
|
||||
break; // No more mappings in the range.
|
||||
};
|
||||
|
||||
match frag {
|
||||
PageTableFrag::Mapped { va, item, .. } => {
|
||||
let (frame, _) = item;
|
||||
num_unmapped += 1;
|
||||
self.flusher
|
||||
.issue_tlb_flush_with(TlbFlushOp::Address(va), page);
|
||||
.issue_tlb_flush_with(TlbFlushOp::Address(va), frame.into());
|
||||
}
|
||||
PageTableItem::NotMapped { .. } => {
|
||||
break;
|
||||
}
|
||||
PageTableItem::MappedUntracked { .. } => {
|
||||
panic!("found untracked memory mapped into `VmSpace`");
|
||||
}
|
||||
PageTableItem::StrayPageTable {
|
||||
PageTableFrag::StrayPageTable {
|
||||
pt,
|
||||
va,
|
||||
len,
|
||||
num_pages,
|
||||
num_frames,
|
||||
} => {
|
||||
num_unmapped += num_pages;
|
||||
num_unmapped += num_frames;
|
||||
self.flusher
|
||||
.issue_tlb_flush_with(TlbFlushOp::Range(va..va + len), pt);
|
||||
}
|
||||
@ -355,6 +368,7 @@ impl<'a> CursorMut<'a> {
|
||||
}
|
||||
|
||||
self.flusher.dispatch_tlb_flush();
|
||||
|
||||
num_unmapped
|
||||
}
|
||||
|
||||
@ -377,10 +391,7 @@ impl<'a> CursorMut<'a> {
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if:
|
||||
/// - the range to be protected is out of the range where the cursor
|
||||
/// is required to operate;
|
||||
/// - the specified virtual address range only covers a part of a page.
|
||||
/// Panics if the length is longer than the remaining range of the cursor.
|
||||
pub fn protect_next(
|
||||
&mut self,
|
||||
len: usize,
|
||||
@ -402,88 +413,36 @@ cpu_local_cell! {
|
||||
}
|
||||
|
||||
#[cfg(ktest)]
|
||||
pub(crate) fn get_activated_vm_space() -> Option<*const VmSpace> {
|
||||
let ptr = ACTIVATED_VM_SPACE.load();
|
||||
if ptr.is_null() {
|
||||
None
|
||||
} else {
|
||||
// SAFETY: The pointer is only set to a valid `Arc` pointer.
|
||||
Some(ptr)
|
||||
}
|
||||
pub(super) fn get_activated_vm_space() -> *const VmSpace {
|
||||
ACTIVATED_VM_SPACE.load()
|
||||
}
|
||||
|
||||
/// The result of a query over the VM space.
|
||||
#[derive(Debug)]
|
||||
pub enum VmItem {
|
||||
/// The current slot is not mapped.
|
||||
NotMapped {
|
||||
/// The virtual address of the slot.
|
||||
va: Vaddr,
|
||||
/// The length of the slot.
|
||||
len: usize,
|
||||
},
|
||||
/// The current slot is mapped.
|
||||
Mapped {
|
||||
/// The virtual address of the slot.
|
||||
va: Vaddr,
|
||||
/// The mapped frame.
|
||||
frame: UFrame,
|
||||
/// The property of the slot.
|
||||
prop: PageProperty,
|
||||
},
|
||||
}
|
||||
|
||||
impl PartialEq for VmItem {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
// The `len` varies, so we only compare `va`.
|
||||
(VmItem::NotMapped { va: va1, len: _ }, VmItem::NotMapped { va: va2, len: _ }) => {
|
||||
va1 == va2
|
||||
}
|
||||
(
|
||||
VmItem::Mapped {
|
||||
va: va1,
|
||||
frame: frame1,
|
||||
prop: prop1,
|
||||
},
|
||||
VmItem::Mapped {
|
||||
va: va2,
|
||||
frame: frame2,
|
||||
prop: prop2,
|
||||
},
|
||||
) => va1 == va2 && frame1.start_paddr() == frame2.start_paddr() && prop1 == prop2,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PageTableItem> for VmItem {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(item: PageTableItem) -> core::result::Result<Self, Self::Error> {
|
||||
match item {
|
||||
PageTableItem::NotMapped { va, len } => Ok(VmItem::NotMapped { va, len }),
|
||||
PageTableItem::Mapped { va, page, prop } => Ok(VmItem::Mapped {
|
||||
va,
|
||||
frame: page
|
||||
.try_into()
|
||||
.map_err(|_| "Found typed memory mapped into `VmSpace`")?,
|
||||
prop,
|
||||
}),
|
||||
PageTableItem::MappedUntracked { .. } => {
|
||||
Err("Found untracked memory mapped into `VmSpace`")
|
||||
}
|
||||
PageTableItem::StrayPageTable { .. } => Err("Stray page table cannot be query results"),
|
||||
}
|
||||
}
|
||||
}
|
||||
/// The item that can be mapped into the [`VmSpace`].
|
||||
pub type MappedItem = (UFrame, PageProperty);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct UserPtConfig {}
|
||||
|
||||
impl PageTableConfig for UserPtConfig {
|
||||
// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
|
||||
unsafe impl PageTableConfig for UserPtConfig {
|
||||
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
|
||||
|
||||
type E = PageTableEntry;
|
||||
type C = PagingConsts;
|
||||
|
||||
type Item = MappedItem;
|
||||
|
||||
fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
|
||||
let (frame, prop) = item;
|
||||
let level = frame.map_level();
|
||||
let paddr = frame.into_raw();
|
||||
(paddr, level, prop)
|
||||
}
|
||||
|
||||
unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
|
||||
debug_assert_eq!(level, 1);
|
||||
// SAFETY: The caller ensures safety.
|
||||
let frame = unsafe { Frame::<dyn AnyUFrameMeta>::from_raw(paddr) };
|
||||
(frame, prop)
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ use crate::{
|
||||
cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
|
||||
impl_frame_meta_for,
|
||||
mm::{
|
||||
kspace::kvirt_area::{KVirtArea, Tracked},
|
||||
kspace::kvirt_area::KVirtArea,
|
||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||
FrameAllocOptions, PAGE_SIZE,
|
||||
},
|
||||
@ -34,7 +34,7 @@ pub static KERNEL_STACK_SIZE: usize = STACK_SIZE_IN_PAGES as usize * PAGE_SIZE;
|
||||
#[derive(Debug)]
|
||||
#[expect(dead_code)]
|
||||
pub struct KernelStack {
|
||||
kvirt_area: KVirtArea<Tracked>,
|
||||
kvirt_area: KVirtArea,
|
||||
tlb_coherent: AtomicCpuSet,
|
||||
end_vaddr: Vaddr,
|
||||
has_guard_page: bool,
|
||||
@ -63,7 +63,7 @@ impl KernelStack {
|
||||
cache: CachePolicy::Writeback,
|
||||
priv_flags: PrivilegedPageFlags::empty(),
|
||||
};
|
||||
let new_kvirt_area = KVirtArea::<Tracked>::map_pages(
|
||||
let new_kvirt_area = KVirtArea::map_frames(
|
||||
KERNEL_STACK_SIZE + 4 * PAGE_SIZE,
|
||||
2 * PAGE_SIZE,
|
||||
pages.into_iter(),
|
||||
|
Reference in New Issue
Block a user