Implement a new set of physical page APIs

This commit is contained in:
Zhang Junyang
2024-12-24 18:20:55 +08:00
committed by Tate, Hongliang Tian
parent 6e1c36965a
commit cdac59beda
56 changed files with 882 additions and 995 deletions

View File

@ -15,7 +15,7 @@ use crate::{
dma::Daddr,
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableError, PageTableItem},
FrameAllocOptions, Paddr, PageFlags, PageTable, UntypedFrame, VmIo, PAGE_SIZE,
Frame, FrameAllocOptions, Paddr, PageFlags, PageTable, VmIo, PAGE_SIZE,
},
};
@ -38,7 +38,7 @@ impl RootEntry {
pub struct RootTable {
/// Total 256 bus, each entry is 128 bits.
root_frame: UntypedFrame,
root_frame: Frame<()>,
// TODO: Use radix tree instead.
context_tables: BTreeMap<Paddr, ContextTable>,
}
@ -57,7 +57,7 @@ impl RootTable {
pub(super) fn new() -> Self {
Self {
root_frame: FrameAllocOptions::new(1).alloc_single().unwrap(),
root_frame: FrameAllocOptions::new().alloc_frame().unwrap(),
context_tables: BTreeMap::new(),
}
}
@ -236,14 +236,14 @@ pub enum AddressWidth {
pub struct ContextTable {
/// Total 32 devices, each device has 8 functions.
entries_frame: UntypedFrame,
entries_frame: Frame<()>,
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
}
impl ContextTable {
fn new() -> Self {
Self {
entries_frame: FrameAllocOptions::new(1).alloc_single().unwrap(),
entries_frame: FrameAllocOptions::new().alloc_frame().unwrap(),
page_tables: BTreeMap::new(),
}
}

View File

@ -9,7 +9,7 @@ use int_to_c_enum::TryFromInt;
use super::IrtEntryHandle;
use crate::{
mm::{paddr_to_vaddr, FrameAllocOptions, UntypedSegment, PAGE_SIZE},
mm::{paddr_to_vaddr, FrameAllocOptions, Segment, PAGE_SIZE},
sync::{LocalIrqDisabled, SpinLock},
};
@ -23,7 +23,7 @@ enum ExtendedInterruptMode {
pub struct IntRemappingTable {
size: u16,
extended_interrupt_mode: ExtendedInterruptMode,
frames: UntypedSegment,
frames: Segment<()>,
/// The global allocator for Interrupt remapping entry.
allocator: SpinLock<IdAlloc, LocalIrqDisabled>,
handles: Vec<Arc<SpinLock<IrtEntryHandle, LocalIrqDisabled>>>,
@ -35,12 +35,11 @@ impl IntRemappingTable {
Some(self.handles.get(id).unwrap().clone())
}
/// Creates an Interrupt Remapping Table with one UntypedFrame (default).
/// Creates an Interrupt Remapping Table with one DynUFrame (default).
pub(super) fn new() -> Self {
const DEFAULT_PAGES: usize = 1;
let segment = FrameAllocOptions::new(DEFAULT_PAGES)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment(DEFAULT_PAGES)
.unwrap();
let entry_number = (DEFAULT_PAGES * PAGE_SIZE / size_of::<u128>()) as u16;

View File

@ -3,12 +3,12 @@
use core::mem::size_of;
use crate::{
mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE},
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE},
prelude::Paddr,
};
pub struct Queue {
segment: UntypedSegment,
segment: Segment<()>,
queue_size: usize,
tail: usize,
}
@ -38,9 +38,8 @@ impl Queue {
pub(super) fn new() -> Self {
const DEFAULT_PAGES: usize = 1;
let segment = FrameAllocOptions::new(DEFAULT_PAGES)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment(DEFAULT_PAGES)
.unwrap();
Self {
segment,

View File

@ -10,11 +10,7 @@ use spin::Once;
use crate::{
arch::boot::smp::{bringup_all_aps, get_num_processors},
cpu,
mm::{
frame::{self, Segment},
kspace::KernelMeta,
paddr_to_vaddr, PAGE_SIZE,
},
mm::{frame::Segment, kspace::KernelMeta, paddr_to_vaddr, FrameAllocOptions, PAGE_SIZE},
task::Task,
};
@ -62,14 +58,17 @@ pub fn boot_all_aps() {
AP_BOOT_INFO.call_once(|| {
let mut per_ap_info = BTreeMap::new();
// Use two pages to place stack pointers of all APs, thus support up to 1024 APs.
let boot_stack_array =
frame::allocator::alloc_contiguous(2 * PAGE_SIZE, |_| KernelMeta::default()).unwrap();
let boot_stack_array = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment_with(2, |_| KernelMeta)
.unwrap();
assert!(num_cpus < 1024);
for ap in 1..num_cpus {
let boot_stack_pages =
frame::allocator::alloc_contiguous(AP_BOOT_STACK_SIZE, |_| KernelMeta::default())
.unwrap();
let boot_stack_pages = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment_with(AP_BOOT_STACK_SIZE / PAGE_SIZE, |_| KernelMeta)
.unwrap();
let boot_stack_ptr = paddr_to_vaddr(boot_stack_pages.end_paddr());
let stack_array_ptr = paddr_to_vaddr(boot_stack_array.start_paddr()) as *mut u64;
// SAFETY: The `stack_array_ptr` is valid and aligned.

View File

@ -43,11 +43,7 @@ use spin::Once;
use crate::{
arch,
mm::{
frame::{self, Segment},
kspace::KernelMeta,
paddr_to_vaddr, PAGE_SIZE,
},
mm::{frame::Segment, kspace::KernelMeta, paddr_to_vaddr, FrameAllocOptions, PAGE_SIZE},
};
// These symbols are provided by the linker script.
@ -99,7 +95,10 @@ pub unsafe fn init_on_bsp() {
for _ in 1..num_cpus {
let ap_pages = {
let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE);
frame::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap()
FrameAllocOptions::new()
.zeroed(false)
.alloc_segment_with(nbytes / PAGE_SIZE, |_| KernelMeta)
.unwrap()
};
let ap_pages_ptr = paddr_to_vaddr(ap_pages.start_paddr()) as *mut u8;

View File

@ -13,7 +13,8 @@ use crate::{
io::VmIoOnce,
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
page_prop::CachePolicy,
HasPaddr, Infallible, Paddr, PodOnce, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE,
DynUSegment, HasPaddr, Infallible, Paddr, PodOnce, UntypedMem, VmIo, VmReader, VmWriter,
PAGE_SIZE,
},
prelude::*,
};
@ -38,27 +39,27 @@ pub struct DmaCoherent {
#[derive(Debug)]
struct DmaCoherentInner {
vm_segment: UntypedSegment,
segment: DynUSegment,
start_daddr: Daddr,
is_cache_coherent: bool,
}
impl DmaCoherent {
/// Creates a coherent DMA mapping backed by `vm_segment`.
/// Creates a coherent DMA mapping backed by `segment`.
///
/// The `is_cache_coherent` argument specifies whether
/// the target device that the DMA mapping is prepared for
/// can access the main memory in a CPU cache coherent way
/// or not.
///
/// The method fails if any part of the given `vm_segment`
/// The method fails if any part of the given `segment`
/// already belongs to a DMA mapping.
pub fn map(
vm_segment: UntypedSegment,
segment: DynUSegment,
is_cache_coherent: bool,
) -> core::result::Result<Self, DmaError> {
let frame_count = vm_segment.nbytes() / PAGE_SIZE;
let start_paddr = vm_segment.start_paddr();
let frame_count = segment.size() / PAGE_SIZE;
let start_paddr = segment.start_paddr();
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
return Err(DmaError::AlreadyMapped);
}
@ -93,7 +94,7 @@ impl DmaCoherent {
DmaType::Iommu => {
for i in 0..frame_count {
let paddr = start_paddr + (i * PAGE_SIZE);
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`.
unsafe {
iommu::map(paddr as Daddr, paddr).unwrap();
}
@ -103,7 +104,7 @@ impl DmaCoherent {
};
Ok(Self {
inner: Arc::new(DmaCoherentInner {
vm_segment,
segment,
start_daddr,
is_cache_coherent,
}),
@ -112,7 +113,7 @@ impl DmaCoherent {
/// Returns the number of bytes in the DMA mapping.
pub fn nbytes(&self) -> usize {
self.inner.vm_segment.nbytes()
self.inner.segment.size()
}
}
@ -123,16 +124,16 @@ impl HasDaddr for DmaCoherent {
}
impl Deref for DmaCoherent {
type Target = UntypedSegment;
type Target = DynUSegment;
fn deref(&self) -> &Self::Target {
&self.inner.vm_segment
&self.inner.segment
}
}
impl Drop for DmaCoherentInner {
fn drop(&mut self) {
let frame_count = self.vm_segment.nbytes() / PAGE_SIZE;
let start_paddr = self.vm_segment.start_paddr();
let frame_count = self.segment.size() / PAGE_SIZE;
let start_paddr = self.segment.start_paddr();
// Ensure that the addresses used later will not overflow
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
match dma_type() {
@ -173,43 +174,39 @@ impl Drop for DmaCoherentInner {
impl VmIo for DmaCoherent {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
self.inner.vm_segment.read(offset, writer)
self.inner.segment.read(offset, writer)
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
self.inner.vm_segment.write(offset, reader)
self.inner.segment.write(offset, reader)
}
}
impl VmIoOnce for DmaCoherent {
fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T> {
self.inner.vm_segment.reader().skip(offset).read_once()
self.inner.segment.reader().skip(offset).read_once()
}
fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()> {
self.inner
.vm_segment
.writer()
.skip(offset)
.write_once(new_val)
self.inner.segment.writer().skip(offset).write_once(new_val)
}
}
impl<'a> DmaCoherent {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
self.inner.vm_segment.reader()
self.inner.segment.reader()
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
self.inner.vm_segment.writer()
self.inner.segment.writer()
}
}
impl HasPaddr for DmaCoherent {
fn paddr(&self) -> Paddr {
self.inner.vm_segment.start_paddr()
self.inner.segment.start_paddr()
}
}
@ -222,46 +219,42 @@ mod test {
#[ktest]
fn map_with_coherent_device() {
let vm_segment = FrameAllocOptions::new(1)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(1, |_| ())
.unwrap();
let dma_coherent = DmaCoherent::map(vm_segment.clone(), true).unwrap();
assert!(dma_coherent.paddr() == vm_segment.paddr());
let dma_coherent = DmaCoherent::map(segment.clone().into(), true).unwrap();
assert!(dma_coherent.paddr() == segment.start_paddr());
}
#[ktest]
fn map_with_incoherent_device() {
let vm_segment = FrameAllocOptions::new(1)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(1, |_| ())
.unwrap();
let dma_coherent = DmaCoherent::map(vm_segment.clone(), false).unwrap();
assert!(dma_coherent.paddr() == vm_segment.paddr());
let dma_coherent = DmaCoherent::map(segment.clone().into(), false).unwrap();
assert!(dma_coherent.paddr() == segment.start_paddr());
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let vaddr = paddr_to_vaddr(vm_segment.paddr());
let vaddr = paddr_to_vaddr(segment.start_paddr());
assert!(page_table.query(vaddr).unwrap().1.cache == CachePolicy::Uncacheable);
}
#[ktest]
fn duplicate_map() {
let vm_segment_parent = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
.unwrap();
let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE));
let _dma_coherent_parent = DmaCoherent::map(vm_segment_parent, false);
let dma_coherent_child = DmaCoherent::map(vm_segment_child, false);
let segment_child = segment.slice(&(0..PAGE_SIZE));
let _dma_coherent_parent = DmaCoherent::map(segment.into(), false);
let dma_coherent_child = DmaCoherent::map(segment_child.into(), false);
assert!(dma_coherent_child.is_err());
}
#[ktest]
fn read_and_write() {
let vm_segment = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
.unwrap();
let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap();
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
let buf_write = vec![1u8; 2 * PAGE_SIZE];
dma_coherent.write_bytes(0, &buf_write).unwrap();
@ -272,11 +265,10 @@ mod test {
#[ktest]
fn reader_and_writer() {
let vm_segment = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
.unwrap();
let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap();
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
let buf_write = vec![1u8; PAGE_SIZE];
let mut writer = dma_coherent.writer();

View File

@ -11,7 +11,7 @@ use crate::{
error::Error,
mm::{
dma::{dma_type, Daddr, DmaType},
HasPaddr, Infallible, Paddr, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE,
DynUSegment, HasPaddr, Infallible, Paddr, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE,
},
};
@ -34,7 +34,7 @@ pub struct DmaStream {
#[derive(Debug)]
struct DmaStreamInner {
vm_segment: UntypedSegment,
segment: DynUSegment,
start_daddr: Daddr,
/// TODO: remove this field when on x86.
#[allow(unused)]
@ -55,16 +55,16 @@ pub enum DmaDirection {
}
impl DmaStream {
/// Establishes DMA stream mapping for a given [`UntypedSegment`].
/// Establishes DMA stream mapping for a given [`DynUSegment`].
///
/// The method fails if the segment already belongs to a DMA mapping.
pub fn map(
vm_segment: UntypedSegment,
segment: DynUSegment,
direction: DmaDirection,
is_cache_coherent: bool,
) -> Result<Self, DmaError> {
let frame_count = vm_segment.nbytes() / PAGE_SIZE;
let start_paddr = vm_segment.start_paddr();
let frame_count = segment.size() / PAGE_SIZE;
let start_paddr = segment.start_paddr();
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
return Err(DmaError::AlreadyMapped);
}
@ -88,7 +88,7 @@ impl DmaStream {
DmaType::Iommu => {
for i in 0..frame_count {
let paddr = start_paddr + (i * PAGE_SIZE);
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`.
unsafe {
iommu::map(paddr as Daddr, paddr).unwrap();
}
@ -99,7 +99,7 @@ impl DmaStream {
Ok(Self {
inner: Arc::new(DmaStreamInner {
vm_segment,
segment,
start_daddr,
is_cache_coherent,
direction,
@ -107,24 +107,24 @@ impl DmaStream {
})
}
/// Gets the underlying [`UntypedSegment`].
/// Gets the underlying [`DynUSegment`].
///
/// Usually, the CPU side should not access the memory
/// after the DMA mapping is established because
/// there is a chance that the device is updating
/// the memory. Do this at your own risk.
pub fn vm_segment(&self) -> &UntypedSegment {
&self.inner.vm_segment
pub fn segment(&self) -> &DynUSegment {
&self.inner.segment
}
/// Returns the number of frames.
pub fn nframes(&self) -> usize {
self.inner.vm_segment.nbytes() / PAGE_SIZE
self.inner.segment.size() / PAGE_SIZE
}
/// Returns the number of bytes.
pub fn nbytes(&self) -> usize {
self.inner.vm_segment.nbytes()
self.inner.segment.size()
}
/// Returns the DMA direction.
@ -156,7 +156,7 @@ impl DmaStream {
if self.inner.is_cache_coherent {
return Ok(());
}
let start_va = crate::mm::paddr_to_vaddr(self.inner.vm_segment.paddr()) as *const u8;
let start_va = crate::mm::paddr_to_vaddr(self.inner.segment.paddr()) as *const u8;
// TODO: Query the CPU for the cache line size via CPUID, we use 64 bytes as the cache line size here.
for i in _byte_range.step_by(64) {
// TODO: Call the cache line flush command in the corresponding architecture.
@ -176,8 +176,8 @@ impl HasDaddr for DmaStream {
impl Drop for DmaStreamInner {
fn drop(&mut self) {
let frame_count = self.vm_segment.nbytes() / PAGE_SIZE;
let start_paddr = self.vm_segment.start_paddr();
let frame_count = self.segment.size() / PAGE_SIZE;
let start_paddr = self.segment.start_paddr();
// Ensure that the addresses used later will not overflow
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
match dma_type() {
@ -211,7 +211,7 @@ impl VmIo for DmaStream {
if self.inner.direction == DmaDirection::ToDevice {
return Err(Error::AccessDenied);
}
self.inner.vm_segment.read(offset, writer)
self.inner.segment.read(offset, writer)
}
/// Writes data from the buffer.
@ -219,7 +219,7 @@ impl VmIo for DmaStream {
if self.inner.direction == DmaDirection::FromDevice {
return Err(Error::AccessDenied);
}
self.inner.vm_segment.write(offset, reader)
self.inner.segment.write(offset, reader)
}
}
@ -229,7 +229,7 @@ impl<'a> DmaStream {
if self.inner.direction == DmaDirection::ToDevice {
return Err(Error::AccessDenied);
}
Ok(self.inner.vm_segment.reader())
Ok(self.inner.segment.reader())
}
/// Returns a writer to write data into it.
@ -237,13 +237,13 @@ impl<'a> DmaStream {
if self.inner.direction == DmaDirection::FromDevice {
return Err(Error::AccessDenied);
}
Ok(self.inner.vm_segment.writer())
Ok(self.inner.segment.writer())
}
}
impl HasPaddr for DmaStream {
fn paddr(&self) -> Paddr {
self.inner.vm_segment.start_paddr()
self.inner.segment.start_paddr()
}
}
@ -373,36 +373,35 @@ mod test {
#[ktest]
fn streaming_map() {
let vm_segment = FrameAllocOptions::new(1)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(1, |_| ())
.unwrap();
let dma_stream =
DmaStream::map(vm_segment.clone(), DmaDirection::Bidirectional, true).unwrap();
assert!(dma_stream.paddr() == vm_segment.paddr());
DmaStream::map(segment.clone().into(), DmaDirection::Bidirectional, true).unwrap();
assert!(dma_stream.paddr() == segment.start_paddr());
}
#[ktest]
fn duplicate_map() {
let vm_segment_parent = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()
let segment_parent = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
.unwrap();
let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE));
let segment_child = segment_parent.slice(&(0..PAGE_SIZE));
let dma_stream_parent =
DmaStream::map(vm_segment_parent, DmaDirection::Bidirectional, false);
let dma_stream_child = DmaStream::map(vm_segment_child, DmaDirection::Bidirectional, false);
DmaStream::map(segment_parent.into(), DmaDirection::Bidirectional, false);
let dma_stream_child =
DmaStream::map(segment_child.into(), DmaDirection::Bidirectional, false);
assert!(dma_stream_parent.is_ok());
assert!(dma_stream_child.is_err());
}
#[ktest]
fn read_and_write() {
let vm_segment = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
.unwrap();
let dma_stream = DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap();
let dma_stream =
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap();
let buf_write = vec![1u8; 2 * PAGE_SIZE];
dma_stream.write_bytes(0, &buf_write).unwrap();
@ -414,11 +413,11 @@ mod test {
#[ktest]
fn reader_and_writer() {
let vm_segment = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
.unwrap();
let dma_stream = DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap();
let dma_stream =
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap();
let buf_write = vec![1u8; PAGE_SIZE];
let mut writer = dma_stream.writer().unwrap();

View File

@ -13,10 +13,135 @@ use spin::Once;
use super::{meta::FrameMeta, segment::Segment, Frame};
use crate::{
boot::memory_region::MemoryRegionType,
mm::{Paddr, PAGE_SIZE},
error::Error,
mm::{paddr_to_vaddr, Paddr, PAGE_SIZE},
prelude::*,
sync::SpinLock,
};
/// Options for allocating physical memory frames.
pub struct FrameAllocOptions {
zeroed: bool,
}
impl Default for FrameAllocOptions {
fn default() -> Self {
Self::new()
}
}
impl FrameAllocOptions {
/// Creates new options for allocating the specified number of frames.
pub fn new() -> Self {
Self { zeroed: true }
}
/// Sets whether the allocated frames should be initialized with zeros.
///
/// If `zeroed` is `true`, the allocated frames are filled with zeros.
/// If not, the allocated frames will contain sensitive data and the caller
/// should clear them before sharing them with other components.
///
/// By default, the frames are zero-initialized.
pub fn zeroed(&mut self, zeroed: bool) -> &mut Self {
self.zeroed = zeroed;
self
}
/// Allocates a single untyped frame without metadata.
pub fn alloc_frame(&self) -> Result<Frame<()>> {
self.alloc_frame_with(())
}
/// Allocates a single frame with additional metadata.
pub fn alloc_frame_with<M: FrameMeta>(&self, metadata: M) -> Result<Frame<M>> {
let frame = PAGE_ALLOCATOR
.get()
.unwrap()
.disable_irq()
.lock()
.alloc(1)
.map(|idx| {
let paddr = idx * PAGE_SIZE;
Frame::from_unused(paddr, metadata)
})
.ok_or(Error::NoMemory)?;
if self.zeroed {
let addr = paddr_to_vaddr(frame.start_paddr()) as *mut u8;
// SAFETY: The newly allocated frame is guaranteed to be valid.
unsafe { core::ptr::write_bytes(addr, 0, PAGE_SIZE) }
}
Ok(frame)
}
/// Allocates a contiguous range of untyped frames without metadata.
pub fn alloc_segment(&self, nframes: usize) -> Result<Segment<()>> {
self.alloc_segment_with(nframes, |_| ())
}
/// Allocates a contiguous range of frames with additional metadata.
///
/// The returned [`Segment`] contains at least one frame. The method returns
/// an error if the number of frames is zero.
pub fn alloc_segment_with<M: FrameMeta, F>(
&self,
nframes: usize,
metadata_fn: F,
) -> Result<Segment<M>>
where
F: FnMut(Paddr) -> M,
{
if nframes == 0 {
return Err(Error::InvalidArgs);
}
let segment = PAGE_ALLOCATOR
.get()
.unwrap()
.disable_irq()
.lock()
.alloc(nframes)
.map(|start| {
Segment::from_unused(
start * PAGE_SIZE..start * PAGE_SIZE + nframes * PAGE_SIZE,
metadata_fn,
)
})
.ok_or(Error::NoMemory)?;
if self.zeroed {
let addr = paddr_to_vaddr(segment.start_paddr()) as *mut u8;
// SAFETY: The newly allocated segment is guaranteed to be valid.
unsafe { core::ptr::write_bytes(addr, 0, nframes * PAGE_SIZE) }
}
Ok(segment)
}
}
#[cfg(ktest)]
#[ktest]
fn test_alloc_dealloc() {
// Here we allocate and deallocate frames in random orders to test the allocator.
// We expect the test to fail if the underlying implementation panics.
let single_options = FrameAllocOptions::new();
let mut contiguous_options = FrameAllocOptions::new();
contiguous_options.zeroed(false);
let mut remember_vec = Vec::new();
for _ in 0..10 {
for i in 0..10 {
let single_frame = single_options.alloc_frame().unwrap();
if i % 3 == 0 {
remember_vec.push(single_frame);
}
}
let contiguous_segment = contiguous_options.alloc_segment(10).unwrap();
drop(contiguous_segment);
remember_vec.pop();
}
}
/// FrameAllocator with a counter for allocated memory
pub(in crate::mm) struct CountingFrameAllocator {
allocator: FrameAllocator,
@ -59,45 +184,6 @@ impl CountingFrameAllocator {
pub(in crate::mm) static PAGE_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>> = Once::new();
/// Allocate a single page.
///
/// The metadata of the page is initialized with the given metadata.
pub(crate) fn alloc_single<M: FrameMeta>(metadata: M) -> Option<Frame<M>> {
PAGE_ALLOCATOR
.get()
.unwrap()
.disable_irq()
.lock()
.alloc(1)
.map(|idx| {
let paddr = idx * PAGE_SIZE;
Frame::from_unused(paddr, metadata)
})
}
/// Allocate a contiguous range of pages of a given length in bytes.
///
/// The caller must provide a closure to initialize metadata for all the pages.
/// The closure receives the physical address of the page and returns the
/// metadata, which is similar to [`core::array::from_fn`].
///
/// # Panics
///
/// The function panics if the length is not base-page-aligned.
pub(crate) fn alloc_contiguous<M: FrameMeta, F>(len: usize, metadata_fn: F) -> Option<Segment<M>>
where
F: FnMut(Paddr) -> M,
{
assert!(len % PAGE_SIZE == 0);
PAGE_ALLOCATOR
.get()
.unwrap()
.disable_irq()
.lock()
.alloc(len / PAGE_SIZE)
.map(|start| Segment::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len, metadata_fn))
}
pub(crate) fn init() {
let regions = crate::boot::memory_regions();
let mut total: usize = 0;

View File

@ -60,7 +60,7 @@ use crate::{
/// The maximum number of bytes of the metadata of a page.
pub const PAGE_METADATA_MAX_SIZE: usize =
META_SLOT_SIZE - size_of::<AtomicU32>() - size_of::<FrameMetaVtablePtr>();
META_SLOT_SIZE - size_of::<bool>() - size_of::<AtomicU32>() - size_of::<FrameMetaVtablePtr>();
/// The maximum alignment in bytes of the metadata of a page.
pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::<MetaSlot>();
@ -77,19 +77,24 @@ pub(in crate::mm) struct MetaSlot {
/// at most `PAGE_METADATA_ALIGN` bytes of alignment;
/// - the subsequent fields can utilize the padding of the
/// reference count to save space.
storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>,
///
/// Don't access this field by a reference to the slot.
_storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>,
/// The reference count of the page.
///
/// Specifically, the reference count has the following meaning:
/// * `REF_COUNT_UNUSED`: The page is not in use.
/// * `0`: The page is being constructed ([`Page::from_unused`])
/// - `REF_COUNT_UNUSED`: The page is not in use.
/// - `0`: The page is being constructed ([`Frame::from_unused`])
/// or destructured ([`drop_last_in_place`]).
/// * `1..REF_COUNT_MAX`: The page is in use.
/// * `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to
/// - `1..REF_COUNT_MAX`: The page is in use.
/// - `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to
/// prevent the reference count from overflowing. Otherwise,
/// overflowing the reference count will cause soundness issue.
///
/// [`Frame::from_unused`]: super::Frame::from_unused
//
// Other than this field the fields should be `MaybeUninit`.
// See initialization in `alloc_meta_pages`.
pub(super) ref_count: AtomicU32,
/// The virtual table that indicates the type of the metadata.
pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>,
@ -123,6 +128,16 @@ pub unsafe trait FrameMeta: Any + Send + Sync + Debug + 'static {
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
let _ = reader;
}
/// Whether the metadata's associated frame is untyped.
///
/// If a type implements [`UFrameMeta`], this should be `true`.
/// Otherwise, it should be `false`.
///
/// [`UFrameMeta`]: super::untyped::UFrameMeta
fn is_untyped(&self) -> bool {
false
}
}
/// Makes a structure usable as a page metadata.
@ -202,7 +217,7 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
core::ptr::drop_in_place(meta_ptr);
}
// `Release` pairs with the `Acquire` in `Page::from_unused` and ensures `drop_in_place` won't
// `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures `drop_in_place` won't
// be reordered after this memory store.
slot.ref_count.store(REF_COUNT_UNUSED, Ordering::Release);
@ -280,20 +295,15 @@ fn alloc_meta_pages(num_pages: usize) -> (usize, Paddr) {
* PAGE_SIZE;
let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
for i in 0..num_pages {
// SAFETY: The memory is successfully allocated with `num_pages` slots so the index must be
// within the range.
let slot = unsafe { slots.add(i) };
// SAFETY: The memory is just allocated so we have exclusive access and it's valid for
// writing.
unsafe {
slot.write(MetaSlot {
storage: UnsafeCell::new([0; PAGE_METADATA_MAX_SIZE]),
ref_count: AtomicU32::new(REF_COUNT_UNUSED),
vtable_ptr: UnsafeCell::new(MaybeUninit::uninit()),
});
}
// Fill the metadata pages with a byte pattern of `REF_COUNT_UNUSED`.
debug_assert_eq!(REF_COUNT_UNUSED.to_ne_bytes(), [0xff, 0xff, 0xff, 0xff]);
// SAFETY: `slots` and the length is a valid region for the metadata pages
// that are going to be treated as metadata slots. The byte pattern is
// valid as the initial value of the reference count (other fields are
// either not accessed or `MaybeUninit`).
unsafe {
core::ptr::write_bytes(slots as *mut u8, 0xff, num_pages * size_of::<MetaSlot>());
}
(num_meta_pages, start_paddr)

View File

@ -16,12 +16,11 @@
pub mod allocator;
pub mod meta;
mod segment;
pub mod segment;
pub mod untyped;
use core::{
marker::PhantomData,
mem::ManuallyDrop,
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
};
@ -29,14 +28,14 @@ use meta::{
mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED,
};
pub use segment::Segment;
use untyped::UntypedMeta;
use untyped::{DynUFrame, UFrameMeta};
use super::{PagingLevel, UntypedFrame, PAGE_SIZE};
use super::{PagingLevel, PAGE_SIZE};
use crate::mm::{Paddr, PagingConsts, Vaddr};
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
/// A page with a statically-known usage, whose metadata is represented by `M`.
/// A physical memory frame with a statically-known usage, whose metadata is represented by `M`.
#[derive(Debug)]
#[repr(transparent)]
pub struct Frame<M: FrameMeta + ?Sized> {
@ -44,6 +43,13 @@ pub struct Frame<M: FrameMeta + ?Sized> {
pub(super) _marker: PhantomData<M>,
}
/// A physical memory frame with a dynamically-known usage.
///
/// The usage of this frame will not be changed while this object is alive. But the
/// usage is not known at compile time. An [`DynFrame`] as a parameter accepts any
/// type of frames.
pub type DynFrame = Frame<dyn FrameMeta>;
unsafe impl<M: FrameMeta + ?Sized> Send for Frame<M> {}
unsafe impl<M: FrameMeta + ?Sized> Sync for Frame<M> {}
@ -79,7 +85,8 @@ impl<M: FrameMeta> Frame<M> {
.compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed)
.expect("Frame already in use when trying to get a new handle");
// SAFETY: We have exclusive access to the page metadata.
// SAFETY: We have exclusive access to the page metadata. These fields are mutably
// borrowed only once.
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta));
@ -114,7 +121,7 @@ impl<M: FrameMeta> Frame<M> {
impl<M: FrameMeta + ?Sized> Frame<M> {
/// Get the physical address.
pub fn paddr(&self) -> Paddr {
pub fn start_paddr(&self) -> Paddr {
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
}
@ -183,7 +190,7 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
/// data structures need to hold the page handle such as the page table.
#[allow(unused)]
pub(in crate::mm) fn into_raw(self) -> Paddr {
let paddr = self.paddr();
let paddr = self.start_paddr();
core::mem::forget(self);
paddr
}
@ -256,12 +263,8 @@ impl<M: FrameMeta> TryFrom<Frame<dyn FrameMeta>> for Frame<M> {
/// return the dynamic page itself as is.
fn try_from(dyn_frame: Frame<dyn FrameMeta>) -> Result<Self, Self::Error> {
if (dyn_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
let result = Frame {
ptr: dyn_frame.ptr,
_marker: PhantomData,
};
let _ = ManuallyDrop::new(dyn_frame);
Ok(result)
// SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<Frame<dyn FrameMeta>, Frame<M>>(dyn_frame) })
} else {
Err(dyn_frame)
}
@ -270,18 +273,46 @@ impl<M: FrameMeta> TryFrom<Frame<dyn FrameMeta>> for Frame<M> {
impl<M: FrameMeta> From<Frame<M>> for Frame<dyn FrameMeta> {
fn from(frame: Frame<M>) -> Self {
let result = Self {
ptr: frame.ptr,
_marker: PhantomData,
};
let _ = ManuallyDrop::new(frame);
result
// SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) }
}
}
impl From<UntypedFrame> for Frame<dyn FrameMeta> {
fn from(frame: UntypedFrame) -> Self {
Frame::<UntypedMeta>::from(frame).into()
impl<M: UFrameMeta> From<Frame<M>> for DynUFrame {
fn from(frame: Frame<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) }
}
}
impl<M: UFrameMeta> From<&Frame<M>> for &DynUFrame {
fn from(frame: &Frame<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) }
}
}
impl From<DynUFrame> for Frame<dyn FrameMeta> {
fn from(frame: DynUFrame) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) }
}
}
impl TryFrom<Frame<dyn FrameMeta>> for DynUFrame {
type Error = Frame<dyn FrameMeta>;
/// Try converting a [`Frame<dyn FrameMeta>`] into [`DynUFrame`].
///
/// If the usage of the page is not the same as the expected usage, it will
/// return the dynamic page itself as is.
fn try_from(dyn_frame: Frame<dyn FrameMeta>) -> Result<Self, Self::Error> {
if dyn_frame.dyn_meta().is_untyped() {
// SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<Frame<dyn FrameMeta>, DynUFrame>(dyn_frame) })
} else {
Err(dyn_frame)
}
}
}

View File

@ -2,11 +2,10 @@
//! A contiguous range of pages.
use alloc::vec::Vec;
use core::{mem::ManuallyDrop, ops::Range};
use super::{inc_page_ref_count, meta::FrameMeta, Frame};
use crate::mm::{Paddr, PAGE_SIZE};
use crate::mm::{Paddr, UFrameMeta, PAGE_SIZE};
/// A contiguous range of homogeneous physical memory pages.
///
@ -21,11 +20,30 @@ use crate::mm::{Paddr, PAGE_SIZE};
/// All the metadata of the pages are homogeneous, i.e., they are of the same
/// type.
#[derive(Debug)]
#[repr(transparent)]
pub struct Segment<M: FrameMeta + ?Sized> {
range: Range<Paddr>,
_marker: core::marker::PhantomData<M>,
}
/// A contiguous range of homogeneous physical memory frames that have any metadata.
///
/// In other words, the metadata of the frames are of the same type but the type
/// is not known at compile time. An [`DynSegment`] as a parameter accepts any
/// type of segments.
///
/// The usage of this frame will not be changed while this object is alive.
pub type DynSegment = Segment<dyn FrameMeta>;
/// A contiguous range of homogeneous untyped physical memory pages that have any metadata.
///
/// In other words, the metadata of the frames are of the same type, and they
/// are untyped, but the type of metadata is not known at compile time. An
/// [`DynUSegment`] as a parameter accepts any untyped segments.
///
/// The usage of this frame will not be changed while this object is alive.
pub type DynUSegment = Segment<dyn UFrameMeta>;
impl<M: FrameMeta + ?Sized> Drop for Segment<M> {
fn drop(&mut self) {
for paddr in self.range.clone().step_by(PAGE_SIZE) {
@ -89,7 +107,7 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
}
/// Gets the length in bytes of the contiguous pages.
pub fn nbytes(&self) -> usize {
pub fn size(&self) -> usize {
self.range.end - self.range.start
}
@ -104,7 +122,7 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
/// not base-page-aligned.
pub fn split(self, offset: usize) -> (Self, Self) {
assert!(offset % PAGE_SIZE == 0);
assert!(0 < offset && offset < self.nbytes());
assert!(0 < offset && offset < self.size());
let old = ManuallyDrop::new(self);
let at = old.range.start + offset;
@ -152,7 +170,7 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
impl<M: FrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
fn from(page: Frame<M>) -> Self {
let pa = page.paddr();
let pa = page.start_paddr();
let _ = ManuallyDrop::new(page);
Self {
range: pa..pa + PAGE_SIZE,
@ -161,22 +179,6 @@ impl<M: FrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
}
}
impl<M: FrameMeta + ?Sized> From<Segment<M>> for Vec<Frame<M>> {
fn from(pages: Segment<M>) -> Self {
let vector = pages
.range
.clone()
.step_by(PAGE_SIZE)
.map(|i|
// SAFETY: for each page there would be a forgotten handle
// when creating the `Segment` object.
unsafe { Frame::<M>::from_raw(i) })
.collect();
let _ = ManuallyDrop::new(pages);
vector
}
}
impl<M: FrameMeta + ?Sized> Iterator for Segment<M> {
type Item = Frame<M>;
@ -194,3 +196,83 @@ impl<M: FrameMeta + ?Sized> Iterator for Segment<M> {
}
}
}
impl<M: FrameMeta> From<Segment<M>> for DynSegment {
fn from(seg: Segment<M>) -> Self {
let seg = ManuallyDrop::new(seg);
Self {
range: seg.range.clone(),
_marker: core::marker::PhantomData,
}
}
}
impl<M: FrameMeta> TryFrom<DynSegment> for Segment<M> {
type Error = DynSegment;
fn try_from(seg: DynSegment) -> core::result::Result<Self, Self::Error> {
// SAFETY: for each page there would be a forgotten handle
// when creating the `Segment` object.
let first_frame = unsafe { Frame::<dyn FrameMeta>::from_raw(seg.range.start) };
let first_frame = ManuallyDrop::new(first_frame);
if !(first_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
return Err(seg);
}
// Since segments are homogeneous, we can safely assume that the rest
// of the frames are of the same type. We just debug-check here.
#[cfg(debug_assertions)]
{
for paddr in seg.range.clone().step_by(PAGE_SIZE) {
let frame = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) };
let frame = ManuallyDrop::new(frame);
debug_assert!((frame.dyn_meta() as &dyn core::any::Any).is::<M>());
}
}
// SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<DynSegment, Segment<M>>(seg) })
}
}
impl<M: UFrameMeta> From<Segment<M>> for DynUSegment {
fn from(seg: Segment<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(seg) }
}
}
impl<M: UFrameMeta> From<&Segment<M>> for &DynUSegment {
fn from(seg: &Segment<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(seg) }
}
}
impl TryFrom<DynSegment> for DynUSegment {
type Error = DynSegment;
/// Try converting a [`DynSegment`] into [`DynUSegment`].
///
/// If the usage of the page is not the same as the expected usage, it will
/// return the dynamic page itself as is.
fn try_from(seg: DynSegment) -> core::result::Result<Self, Self::Error> {
// SAFETY: for each page there would be a forgotten handle
// when creating the `Segment` object.
let first_frame = unsafe { Frame::<dyn FrameMeta>::from_raw(seg.range.start) };
let first_frame = ManuallyDrop::new(first_frame);
if !first_frame.dyn_meta().is_untyped() {
return Err(seg);
}
// Since segments are homogeneous, we can safely assume that the rest
// of the frames are of the same type. We just debug-check here.
#[cfg(debug_assertions)]
{
for paddr in seg.range.clone().step_by(PAGE_SIZE) {
let frame = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) };
let frame = ManuallyDrop::new(frame);
debug_assert!(frame.dyn_meta().is_untyped());
}
}
// SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<DynSegment, DynUSegment>(seg) })
}
}

View File

@ -0,0 +1,197 @@
// SPDX-License-Identifier: MPL-2.0
//! Untyped physical memory management.
//!
//! A frame is a special page that is _untyped_ memory.
//! It is used to store data irrelevant to the integrity of the kernel.
//! All pages mapped to the virtual address space of the users are backed by
//! frames. Frames, with all the properties of pages, can additionally be safely
//! read and written by the kernel or the user.
use super::{meta::FrameMeta, Frame, Segment};
use crate::{
mm::{
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
paddr_to_vaddr, Infallible,
},
Error, Result,
};
/// The metadata of untyped frame.
///
/// If a structure `M` implements [`UFrameMeta`], it can be used as the
/// metadata of a type of untyped frames [`Frame<M>`]. All frames of such type
/// will be accessible as untyped memory.
pub trait UFrameMeta: FrameMeta {}
/// An untyped frame with any metadata.
///
/// The usage of this frame will not be changed while this object is alive.
/// The metadata of the frame is not known at compile time but the frame must
/// be an untyped one. An [`DynUFrame`] as a parameter accepts any type of
/// untyped frame metadata.
pub type DynUFrame = Frame<dyn UFrameMeta>;
/// Makes a structure usable as untyped frame metadata.
///
/// Directly implementing [`FrameMeta`] is not safe since the size and
/// alignment must be checked. This macro provides a safe way to implement both
/// [`FrameMeta`] and [`UFrameMeta`] with compile-time checks.
///
/// If this macro is used for built-in typed frame metadata, it won't compile.
#[macro_export]
macro_rules! impl_untyped_frame_meta_for {
// Implement without specifying the drop behavior.
($t:ty) => {
use static_assertions::const_assert;
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE);
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN);
// SAFETY: The size and alignment of the structure are checked.
unsafe impl $crate::mm::frame::meta::FrameMeta for $t {
fn is_untyped(&self) -> bool {
true
}
}
impl $crate::mm::frame::untyped::UFrameMeta for $t {}
};
// Implement with a customized drop function.
($t:ty, $body:expr) => {
use static_assertions::const_assert;
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE);
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN);
// SAFETY: The size and alignment of the structure are checked.
// Outside OSTD the user cannot implement a `on_drop` method for typed
// frames. And untyped frames can be safely read.
unsafe impl $crate::mm::frame::meta::FrameMeta for $t {
fn on_drop(&mut self, reader: &mut $crate::mm::VmReader<$crate::mm::Infallible>) {
$body
}
fn is_untyped(&self) -> bool {
true
}
}
impl $crate::mm::frame::untyped::UFrameMeta for $t {}
};
}
// A special case of untyped metadata is the unit type.
impl_untyped_frame_meta_for!(());
/// A physical memory range that is untyped.
///
/// Untyped frames or segments can be safely read and written by the kernel or
/// the user.
pub trait UntypedMem {
/// Borrows a reader that can read the untyped memory.
fn reader(&self) -> VmReader<'_, Infallible>;
/// Borrows a writer that can write the untyped memory.
fn writer(&self) -> VmWriter<'_, Infallible>;
}
macro_rules! impl_untyped_for {
($t:ident) => {
impl<UM: UFrameMeta + ?Sized> UntypedMem for $t<UM> {
fn reader(&self) -> VmReader<'_, Infallible> {
let ptr = paddr_to_vaddr(self.start_paddr()) as *const u8;
// SAFETY: Only untyped frames are allowed to be read.
unsafe { VmReader::from_kernel_space(ptr, self.size()) }
}
fn writer(&self) -> VmWriter<'_, Infallible> {
let ptr = paddr_to_vaddr(self.start_paddr()) as *mut u8;
// SAFETY: Only untyped frames are allowed to be written.
unsafe { VmWriter::from_kernel_space(ptr, self.size()) }
}
}
impl<UM: UFrameMeta + ?Sized> VmIo for $t<UM> {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let read_len = writer.avail().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(len == read_len);
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let write_len = reader.remain().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(len == write_len);
Ok(())
}
}
};
}
impl_untyped_for!(Frame);
impl_untyped_for!(Segment);
// Here are implementations for `xarray`.
use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref};
/// `FrameRef` is a struct that can work as `&'a Frame<m>`.
///
/// This is solely useful for [`crate::collections::xarray`].
pub struct FrameRef<'a, M: UFrameMeta + ?Sized> {
inner: ManuallyDrop<Frame<M>>,
_marker: PhantomData<&'a Frame<M>>,
}
impl<M: UFrameMeta + ?Sized> Deref for FrameRef<'_, M> {
type Target = Frame<M>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
// SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
// The pointer is also aligned to 4.
unsafe impl<M: UFrameMeta + ?Sized> xarray::ItemEntry for Frame<M> {
type Ref<'a>
= FrameRef<'a, M>
where
Self: 'a;
fn into_raw(self) -> *const () {
let ptr = self.ptr;
let _ = ManuallyDrop::new(self);
ptr as *const ()
}
unsafe fn from_raw(raw: *const ()) -> Self {
Self {
ptr: raw as *const _,
_marker: PhantomData,
}
}
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
Self::Ref {
inner: ManuallyDrop::new(Frame {
ptr: raw as *const _,
_marker: PhantomData,
}),
_marker: PhantomData,
}
}
}

View File

@ -1,236 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Untyped physical memory management.
//!
//! A frame is a special page that is _untyped_ memory.
//! It is used to store data irrelevant to the integrity of the kernel.
//! All pages mapped to the virtual address space of the users are backed by
//! frames. Frames, with all the properties of pages, can additionally be safely
//! read and written by the kernel or the user.
pub mod options;
mod segment;
use core::mem::ManuallyDrop;
pub use segment::UntypedSegment;
use super::{
meta::{impl_frame_meta_for, FrameMeta, MetaSlot},
Frame,
};
use crate::{
mm::{
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
paddr_to_vaddr, HasPaddr, Infallible, Paddr, PAGE_SIZE,
},
Error, Result,
};
/// A handle to a physical memory page of untyped memory.
///
/// An instance of `UntypedFrame` is a handle to a page frame (a physical memory
/// page). A cloned `UntypedFrame` refers to the same page frame as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other. Behind the scene, a reference
/// counter is maintained for each page frame so that when all instances of
/// `UntypedFrame` that refer to the same page frame are dropped, the page frame
/// will be globally freed.
#[derive(Debug, Clone)]
pub struct UntypedFrame {
page: Frame<UntypedMeta>,
}
impl UntypedFrame {
/// Returns the physical address of the page frame.
pub fn start_paddr(&self) -> Paddr {
self.page.paddr()
}
/// Returns the end physical address of the page frame.
pub fn end_paddr(&self) -> Paddr {
self.start_paddr() + PAGE_SIZE
}
/// Returns the size of the frame
pub const fn size(&self) -> usize {
self.page.size()
}
/// Returns a raw pointer to the starting virtual address of the frame.
pub fn as_ptr(&self) -> *const u8 {
paddr_to_vaddr(self.start_paddr()) as *const u8
}
/// Returns a mutable raw pointer to the starting virtual address of the frame.
pub fn as_mut_ptr(&self) -> *mut u8 {
paddr_to_vaddr(self.start_paddr()) as *mut u8
}
/// Copies the content of `src` to the frame.
pub fn copy_from(&self, src: &UntypedFrame) {
if self.paddr() == src.paddr() {
return;
}
// SAFETY: the source and the destination does not overlap.
unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size());
}
}
/// Get the reference count of the frame.
///
/// It returns the number of all references to the page, including all the
/// existing page handles ([`UntypedFrame`]) and all the mappings in the page
/// table that points to the page.
///
/// # Safety
///
/// The function is safe to call, but using it requires extra care. The
/// reference count can be changed by other threads at any time including
/// potentially between calling this method and acting on the result.
pub fn reference_count(&self) -> u32 {
self.page.reference_count()
}
}
impl From<Frame<UntypedMeta>> for UntypedFrame {
fn from(page: Frame<UntypedMeta>) -> Self {
Self { page }
}
}
impl TryFrom<Frame<dyn FrameMeta>> for UntypedFrame {
type Error = Frame<dyn FrameMeta>;
/// Try converting a [`Frame<dyn FrameMeta>`] into the statically-typed [`UntypedFrame`].
///
/// If the dynamic page is not used as an untyped page frame, it will
/// return the dynamic page itself as is.
fn try_from(page: Frame<dyn FrameMeta>) -> core::result::Result<Self, Self::Error> {
page.try_into().map(|p: Frame<UntypedMeta>| p.into())
}
}
impl From<UntypedFrame> for Frame<UntypedMeta> {
fn from(frame: UntypedFrame) -> Self {
frame.page
}
}
impl HasPaddr for UntypedFrame {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl<'a> UntypedFrame {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
// SAFETY:
// - The memory range points to untyped memory.
// - The frame is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
unsafe { VmReader::from_kernel_space(self.as_ptr(), self.size()) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
// SAFETY:
// - The memory range points to untyped memory.
// - The frame is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.size()) }
}
}
impl VmIo for UntypedFrame {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let read_len = writer.avail().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(len == read_len);
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let write_len = reader.remain().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(len == write_len);
Ok(())
}
}
/// Metadata for a frame.
#[derive(Debug, Default)]
pub struct UntypedMeta {}
impl_frame_meta_for!(UntypedMeta);
// Here are implementations for `xarray`.
use core::{marker::PhantomData, ops::Deref};
/// `FrameRef` is a struct that can work as `&'a UntypedFrame`.
///
/// This is solely useful for [`crate::collections::xarray`].
pub struct FrameRef<'a> {
inner: ManuallyDrop<UntypedFrame>,
_marker: PhantomData<&'a UntypedFrame>,
}
impl Deref for FrameRef<'_> {
type Target = UntypedFrame;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
// SAFETY: `UntypedFrame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
// The pointer is also aligned to 4.
unsafe impl xarray::ItemEntry for UntypedFrame {
type Ref<'a>
= FrameRef<'a>
where
Self: 'a;
fn into_raw(self) -> *const () {
let ptr = self.page.ptr;
core::mem::forget(self);
ptr as *const ()
}
unsafe fn from_raw(raw: *const ()) -> Self {
Self {
page: Frame::<UntypedMeta> {
ptr: raw as *mut MetaSlot,
_marker: PhantomData,
},
}
}
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
Self::Ref {
inner: ManuallyDrop::new(UntypedFrame::from_raw(raw)),
_marker: PhantomData,
}
}
}

View File

@ -1,112 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Options for allocating frames
use super::{UntypedFrame, UntypedSegment};
use crate::{
mm::{frame, frame::untyped::UntypedMeta, PAGE_SIZE},
prelude::*,
Error,
};
/// Options for allocating physical memory pages (or frames).
///
/// All allocated frames are safe to use in the sense that they are
/// not _typed memory_. We define typed memory as the memory that
/// may store Rust objects or affect Rust memory safety, e.g.,
/// the code and data segments of the OS kernel, the stack and heap
/// allocated for the OS kernel.
pub struct FrameAllocOptions {
nframes: usize,
is_contiguous: bool,
uninit: bool,
}
impl FrameAllocOptions {
/// Creates new options for allocating the specified number of frames.
pub fn new(nframes: usize) -> Self {
Self {
nframes,
is_contiguous: false,
uninit: false,
}
}
/// Sets whether the allocated frames should be contiguous.
///
/// The default value is `false`.
pub fn is_contiguous(&mut self, is_contiguous: bool) -> &mut Self {
self.is_contiguous = is_contiguous;
self
}
/// Sets whether the allocated frames should be uninitialized.
///
/// If `uninit` is set as `false`, the frame will be zeroed once allocated.
/// If `uninit` is set as `true`, the frame will **NOT** be zeroed and should *NOT* be read before writing.
///
/// The default value is false.
pub fn uninit(&mut self, uninit: bool) -> &mut Self {
self.uninit = uninit;
self
}
/// Allocates a single page frame according to the given options.
pub fn alloc_single(&self) -> Result<UntypedFrame> {
if self.nframes != 1 {
return Err(Error::InvalidArgs);
}
let page = frame::allocator::alloc_single(UntypedMeta::default()).ok_or(Error::NoMemory)?;
let frame = UntypedFrame { page };
if !self.uninit {
frame.writer().fill(0);
}
Ok(frame)
}
/// Allocates a contiguous range of page frames according to the given options.
///
/// The returned [`UntypedSegment`] contains at least one page frame.
pub fn alloc_contiguous(&self) -> Result<UntypedSegment> {
// It's no use to checking `self.is_contiguous` here.
if self.nframes == 0 {
return Err(Error::InvalidArgs);
}
let segment: UntypedSegment =
frame::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| {
UntypedMeta::default()
})
.ok_or(Error::NoMemory)?
.into();
if !self.uninit {
segment.writer().fill(0);
}
Ok(segment)
}
}
#[cfg(ktest)]
#[ktest]
fn test_alloc_dealloc() {
// Here we allocate and deallocate frames in random orders to test the allocator.
// We expect the test to fail if the underlying implementation panics.
let single_options = FrameAllocOptions::new(1);
let mut contiguous_options = FrameAllocOptions::new(10);
contiguous_options.is_contiguous(true);
let mut remember_vec = Vec::new();
for _ in 0..10 {
for i in 0..10 {
let single_frame = single_options.alloc_single().unwrap();
if i % 3 == 0 {
remember_vec.push(single_frame);
}
}
let contiguous_segment = contiguous_options.alloc_contiguous().unwrap();
drop(contiguous_segment);
remember_vec.pop();
}
}

View File

@ -1,177 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! A contiguous segment of untyped memory pages.
use core::ops::Range;
use crate::{
mm::{
frame::{untyped::UntypedMeta, Segment},
io::{FallibleVmRead, FallibleVmWrite},
HasPaddr, Infallible, Paddr, UntypedFrame, VmIo, VmReader, VmWriter,
},
Error, Result,
};
/// A contiguous segment of untyped memory pages.
///
/// A [`UntypedSegment`] object is a handle to a contiguous range of untyped memory
/// pages, and the underlying pages can be shared among multiple threads.
/// [`UntypedSegment::slice`] can be used to clone a slice of the segment (also can be
/// used to clone the entire range). Reference counts are maintained for each
/// page in the segment. So cloning the handle may not be cheap as it
/// increments the reference count of all the cloned pages.
///
/// Other [`UntypedFrame`] handles can also refer to the pages in the segment. And
/// the segment can be iterated over to get all the frames in it.
///
/// To allocate a segment, use [`crate::mm::FrameAllocator`].
///
/// # Example
///
/// ```rust
/// let vm_segment = FrameAllocOptions::new(2)
/// .is_contiguous(true)
/// .alloc_contiguous()?;
/// vm_segment.write_bytes(0, buf)?;
/// ```
#[derive(Debug)]
pub struct UntypedSegment {
pages: Segment<UntypedMeta>,
}
impl HasPaddr for UntypedSegment {
fn paddr(&self) -> Paddr {
self.pages.start_paddr()
}
}
impl Clone for UntypedSegment {
fn clone(&self) -> Self {
Self {
pages: self.pages.clone(),
}
}
}
impl UntypedSegment {
/// Returns the start physical address.
pub fn start_paddr(&self) -> Paddr {
self.pages.start_paddr()
}
/// Returns the end physical address.
pub fn end_paddr(&self) -> Paddr {
self.pages.end_paddr()
}
/// Returns the number of bytes in it.
pub fn nbytes(&self) -> usize {
self.pages.nbytes()
}
/// Split the segment into two at the given byte offset from the start.
///
/// The resulting segments cannot be empty. So the byte offset cannot be
/// neither zero nor the length of the segment.
///
/// # Panics
///
/// The function panics if the byte offset is out of bounds, at either ends, or
/// not base-page-aligned.
pub fn split(self, offset: usize) -> (Self, Self) {
let (left, right) = self.pages.split(offset);
(Self { pages: left }, Self { pages: right })
}
/// Get an extra handle to the segment in the byte range.
///
/// The sliced byte range in indexed by the offset from the start of the
/// segment. The resulting segment holds extra reference counts.
///
/// # Panics
///
/// The function panics if the byte range is out of bounds, or if any of
/// the ends of the byte range is not base-page aligned.
pub fn slice(&self, range: &Range<usize>) -> Self {
Self {
pages: self.pages.slice(range),
}
}
/// Gets a [`VmReader`] to read from the segment from the beginning to the end.
pub fn reader(&self) -> VmReader<'_, Infallible> {
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8;
// SAFETY:
// - The memory range points to untyped memory.
// - The segment is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) }
}
/// Gets a [`VmWriter`] to write to the segment from the beginning to the end.
pub fn writer(&self) -> VmWriter<'_, Infallible> {
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8;
// SAFETY:
// - The memory range points to untyped memory.
// - The segment is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) }
}
}
impl From<UntypedFrame> for UntypedSegment {
fn from(frame: UntypedFrame) -> Self {
Self {
pages: Segment::from(frame.page),
}
}
}
impl From<Segment<UntypedMeta>> for UntypedSegment {
fn from(pages: Segment<UntypedMeta>) -> Self {
Self { pages }
}
}
impl VmIo for UntypedSegment {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let read_len = writer.avail();
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self
.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(len == read_len);
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let write_len = reader.remain();
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self
.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(len == write_len);
Ok(())
}
}
impl Iterator for UntypedSegment {
type Item = UntypedFrame;
fn next(&mut self) -> Option<Self::Item> {
self.pages.next().map(|page| UntypedFrame { page })
}
}

View File

@ -7,11 +7,11 @@
//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
//! (e.g., `&[u8]`) or untyped memory (e.g, [`UntypedFrame`]). Behind the scene, `VmReader` and `VmWriter`
//! (e.g., `&[u8]`) or untyped memory (e.g, [`DynUFrame`]). Behind the scene, `VmReader` and `VmWriter`
//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
//! safety depends on whether the given memory regions are _valid_ or not.
//!
//! [`UntypedFrame`]: crate::mm::UntypedFrame
//! [`DynUFrame`]: crate::mm::DynUFrame
//! [`from_user_space`]: `VmReader::from_user_space`
//! [`from_kernel_space`]: `VmReader::from_kernel_space`
//!
@ -58,7 +58,7 @@ use crate::{
};
/// A trait that enables reading/writing data from/to a VM object,
/// e.g., [`UntypedSegment`], [`Vec<UntypedFrame>`] and [`UntypedFrame`].
/// e.g., [`DynUSegment`], [`Vec<DynUFrame>`] and [`DynUFrame`].
///
/// # Concurrency
///
@ -67,8 +67,8 @@ use crate::{
/// desire predictability or atomicity, the users should add extra mechanism
/// for such properties.
///
/// [`UntypedSegment`]: crate::mm::UntypedSegment
/// [`UntypedFrame`]: crate::mm::UntypedFrame
/// [`DynUSegment`]: crate::mm::DynUSegment
/// [`DynUFrame`]: crate::mm::DynUFrame
pub trait VmIo: Send + Sync {
/// Reads requested data at a specified offset into a given `VmWriter`.
///

View File

@ -164,7 +164,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
// Map the metadata pages.
{
let start_va = mapping::page_to_meta::<PagingConsts>(0);
let from = start_va..start_va + meta_pages.nbytes();
let from = start_va..start_va + meta_pages.size();
let prop = PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Writeback,
@ -214,7 +214,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
};
let mut cursor = kpt.cursor_mut(&from).unwrap();
for frame_paddr in to.step_by(PAGE_SIZE) {
let page = Frame::<KernelMeta>::from_unused(frame_paddr, KernelMeta::default());
let page = Frame::<KernelMeta>::from_unused(frame_paddr, KernelMeta);
// SAFETY: we are doing mappings for the kernel.
unsafe {
let _old = cursor.map(page.into(), prop);
@ -249,6 +249,6 @@ pub unsafe fn activate_kernel_page_table() {
/// The metadata of pages that contains the kernel itself.
#[derive(Debug, Default)]
pub struct KernelMeta {}
pub struct KernelMeta;
impl_frame_meta_for!(KernelMeta);

View File

@ -24,7 +24,12 @@ use core::{fmt::Debug, ops::Range};
pub use self::{
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
frame::untyped::{options::FrameAllocOptions, UntypedFrame, UntypedSegment},
frame::{
allocator::FrameAllocOptions,
segment::{DynSegment, DynUSegment, Segment},
untyped::{DynUFrame, UFrameMeta, UntypedMem},
DynFrame, Frame,
},
io::{
Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader,
VmWriter,

View File

@ -250,7 +250,7 @@ fn test_boot_pt_map_protect() {
mm::{CachePolicy, FrameAllocOptions, PageFlags},
};
let root_frame = FrameAllocOptions::new(1).alloc_single().unwrap();
let root_frame = FrameAllocOptions::new().alloc_frame().unwrap();
let root_paddr = root_frame.start_paddr();
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts> {

View File

@ -40,8 +40,9 @@ use super::{nr_subpage_per_huge, PageTableEntryTrait};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
mm::{
frame::{self, inc_page_ref_count, meta::FrameMeta, Frame},
paddr_to_vaddr, Infallible, Paddr, PagingConstsTrait, PagingLevel, VmReader, PAGE_SIZE,
frame::{inc_page_ref_count, meta::FrameMeta, Frame},
paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel,
VmReader,
},
};
@ -260,13 +261,11 @@ where
/// extra unnecessary expensive operation.
pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
let meta = PageTablePageMeta::new_locked(level, is_tracked);
let page = frame::allocator::alloc_single::<PageTablePageMeta<E, C>>(meta).unwrap();
// Zero out the page table node.
let ptr = paddr_to_vaddr(page.paddr()) as *mut u8;
// SAFETY: The page is exclusively owned here. Pointers are valid also.
// We rely on the fact that 0 represents an absent entry to speed up `memset`.
unsafe { core::ptr::write_bytes(ptr, 0, PAGE_SIZE) };
let page = FrameAllocOptions::new()
.zeroed(true)
.alloc_frame_with(meta)
.expect("Failed to allocate a page table node");
// The allocated frame is zeroed. Make sure zero is absent PTE.
debug_assert!(E::new_absent().as_bytes().iter().all(|&b| b == 0));
Self { page }
@ -281,7 +280,7 @@ where
// SAFETY: The provided physical address is valid and the level is
// correct. The reference count is not changed.
unsafe { RawPageTableNode::from_raw_parts(this.page.paddr(), this.page.meta().level) }
unsafe { RawPageTableNode::from_raw_parts(this.page.start_paddr(), this.page.meta().level) }
}
/// Gets a raw handle while still preserving the original handle.
@ -290,7 +289,7 @@ where
// SAFETY: The provided physical address is valid and the level is
// correct. The reference count is increased by one.
unsafe { RawPageTableNode::from_raw_parts(page.paddr(), page.meta().level) }
unsafe { RawPageTableNode::from_raw_parts(page.start_paddr(), page.meta().level) }
}
/// Gets the number of valid PTEs in the node.
@ -310,7 +309,7 @@ where
/// The caller must ensure that the index is within the bound.
unsafe fn read_pte(&self, idx: usize) -> E {
debug_assert!(idx < nr_subpage_per_huge::<C>());
let ptr = paddr_to_vaddr(self.page.paddr()) as *const E;
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *const E;
// SAFETY: The index is within the bound and the PTE is plain-old-data.
unsafe { ptr.add(idx).read() }
}
@ -330,7 +329,7 @@ where
/// (see [`Child::is_compatible`]).
unsafe fn write_pte(&mut self, idx: usize, pte: E) {
debug_assert!(idx < nr_subpage_per_huge::<C>());
let ptr = paddr_to_vaddr(self.page.paddr()) as *mut E;
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E;
// SAFETY: The index is within the bound and the PTE is plain-old-data.
unsafe { ptr.add(idx).write(pte) }
}

View File

@ -5,10 +5,9 @@ use core::mem::ManuallyDrop;
use super::*;
use crate::{
mm::{
frame::{allocator, untyped::UntypedMeta},
kspace::LINEAR_MAPPING_BASE_VADDR,
page_prop::{CachePolicy, PageFlags},
MAX_USERSPACE_VADDR,
FrameAllocOptions, MAX_USERSPACE_VADDR,
},
prelude::*,
};
@ -31,8 +30,8 @@ fn test_tracked_map_unmap() {
let pt = PageTable::<UserMode>::empty();
let from = PAGE_SIZE..PAGE_SIZE * 2;
let page = allocator::alloc_single(UntypedMeta::default()).unwrap();
let start_paddr = page.paddr();
let page = FrameAllocOptions::new().alloc_frame().unwrap();
let start_paddr = page.start_paddr();
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) };
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
@ -87,8 +86,8 @@ fn test_user_copy_on_write() {
let pt = PageTable::<UserMode>::empty();
let from = PAGE_SIZE..PAGE_SIZE * 2;
let page = allocator::alloc_single(UntypedMeta::default()).unwrap();
let start_paddr = page.paddr();
let page = FrameAllocOptions::new().alloc_frame().unwrap();
let start_paddr = page.start_paddr();
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
@ -172,7 +171,7 @@ fn test_base_protect_query() {
let from_ppn = 1..1000;
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| UntypedMeta::default()).unwrap();
let to = FrameAllocOptions::new().alloc_segment(999).unwrap();
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe {
let mut cursor = pt.cursor_mut(&from).unwrap();

View File

@ -22,7 +22,7 @@ use crate::{
kspace::KERNEL_PAGE_TABLE,
page_table::{self, PageTable, PageTableItem, UserMode},
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
PageProperty, UntypedFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR,
DynUFrame, PageProperty, VmReader, VmWriter, MAX_USERSPACE_VADDR,
},
prelude::*,
sync::{PreemptDisabled, RwLock, RwLockReadGuard},
@ -40,7 +40,7 @@ use crate::{
///
/// A newly-created `VmSpace` is not backed by any physical memory pages. To
/// provide memory pages for a `VmSpace`, one can allocate and map physical
/// memory ([`UntypedFrame`]s) to the `VmSpace` using the cursor.
/// memory ([`DynUFrame`]s) to the `VmSpace` using the cursor.
///
/// A `VmSpace` can also attach a page fault handler, which will be invoked to
/// handle page faults generated from user space.
@ -323,7 +323,7 @@ impl CursorMut<'_, '_> {
/// Map a frame into the current slot.
///
/// This method will bring the cursor to the next slot after the modification.
pub fn map(&mut self, frame: UntypedFrame, prop: PageProperty) {
pub fn map(&mut self, frame: DynUFrame, prop: PageProperty) {
let start_va = self.virt_addr();
// SAFETY: It is safe to map untyped memory into the userspace.
let old = unsafe { self.pt_cursor.map(frame.into(), prop) };
@ -475,7 +475,7 @@ pub enum VmItem {
/// The virtual address of the slot.
va: Vaddr,
/// The mapped frame.
frame: UntypedFrame,
frame: DynUFrame,
/// The property of the slot.
prop: PageProperty,
},

View File

@ -14,6 +14,6 @@ pub use ostd_macros::ktest;
pub use crate::{
early_print as print, early_println as println,
mm::{Paddr, Vaddr},
mm::{Paddr, UntypedMem, Vaddr},
panic::abort,
};

View File

@ -3,10 +3,9 @@
use crate::{
impl_frame_meta_for,
mm::{
frame::allocator,
kspace::kvirt_area::{KVirtArea, Tracked},
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
PAGE_SIZE,
FrameAllocOptions, PAGE_SIZE,
},
prelude::*,
};
@ -36,7 +35,7 @@ pub struct KernelStack {
}
#[derive(Debug, Default)]
struct KernelStackMeta {}
struct KernelStackMeta;
impl_frame_meta_for!(KernelStackMeta);
@ -47,8 +46,9 @@ impl KernelStack {
let mut new_kvirt_area = KVirtArea::<Tracked>::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE);
let mapped_start = new_kvirt_area.range().start + 2 * PAGE_SIZE;
let mapped_end = mapped_start + KERNEL_STACK_SIZE;
let pages =
allocator::alloc_contiguous(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap();
let pages = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment_with(KERNEL_STACK_SIZE / PAGE_SIZE, |_| KernelStackMeta)?;
let prop = PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Writeback,