diff --git a/kernel/comps/block/src/bio.rs b/kernel/comps/block/src/bio.rs index 181ec16cd..f18759e5b 100644 --- a/kernel/comps/block/src/bio.rs +++ b/kernel/comps/block/src/bio.rs @@ -5,8 +5,8 @@ use bitvec::array::BitArray; use int_to_c_enum::TryFromInt; use ostd::{ mm::{ - DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, UntypedSegment, - VmIo, VmReader, VmWriter, + DmaDirection, DmaStream, DmaStreamSlice, DynUSegment, FrameAllocOptions, Infallible, VmIo, + VmReader, VmWriter, }, sync::{SpinLock, WaitQueue}, Error, @@ -426,11 +426,11 @@ impl<'a> BioSegment { let bio_segment_inner = target_pool(direction) .and_then(|pool| pool.alloc(nblocks, offset_within_first_block, len)) .unwrap_or_else(|| { - let segment = FrameAllocOptions::new(nblocks) - .uninit(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment(nblocks) .unwrap(); - let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap(); + let dma_stream = DmaStream::map(segment.into(), direction.into(), false).unwrap(); BioSegmentInner { dma_slice: DmaStreamSlice::new(dma_stream, offset_within_first_block, len), from_pool: false, @@ -442,9 +442,9 @@ impl<'a> BioSegment { } } - /// Constructs a new `BioSegment` with a given `UntypedSegment` and the bio direction. - pub fn new_from_segment(segment: UntypedSegment, direction: BioDirection) -> Self { - let len = segment.nbytes(); + /// Constructs a new `BioSegment` with a given `DynUSegment` and the bio direction. + pub fn new_from_segment(segment: DynUSegment, direction: BioDirection) -> Self { + let len = segment.size(); let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap(); Self { inner: Arc::new(BioSegmentInner { @@ -481,8 +481,8 @@ impl<'a> BioSegment { /// Returns the inner VM segment. #[cfg(ktest)] - pub fn inner_segment(&self) -> &UntypedSegment { - self.inner.dma_slice.stream().vm_segment() + pub fn inner_segment(&self) -> &DynUSegment { + self.inner.dma_slice.stream().segment() } /// Returns a reader to read data from it. @@ -560,11 +560,11 @@ impl BioSegmentPool { pub fn new(direction: BioDirection) -> Self { let total_blocks = POOL_DEFAULT_NBLOCKS; let pool = { - let segment = FrameAllocOptions::new(total_blocks) - .uninit(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment(total_blocks) .unwrap(); - DmaStream::map(segment, direction.into(), false).unwrap() + DmaStream::map(segment.into(), direction.into(), false).unwrap() }; let manager = SpinLock::new(PoolSlotManager { occupied: BitArray::ZERO, diff --git a/kernel/comps/network/src/buffer.rs b/kernel/comps/network/src/buffer.rs index b2ebea896..8c4f64499 100644 --- a/kernel/comps/network/src/buffer.rs +++ b/kernel/comps/network/src/buffer.rs @@ -34,10 +34,10 @@ impl TxBuffer { let dma_stream = if let Some(stream) = pool.lock().pop_front() { stream } else { - let segment = FrameAllocOptions::new(TX_BUFFER_LEN / PAGE_SIZE) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment(TX_BUFFER_LEN / PAGE_SIZE) .unwrap(); - DmaStream::map(segment, DmaDirection::ToDevice, false).unwrap() + DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap() }; let tx_buffer = { diff --git a/kernel/comps/network/src/dma_pool.rs b/kernel/comps/network/src/dma_pool.rs index fe74b13e9..fe34ac5e9 100644 --- a/kernel/comps/network/src/dma_pool.rs +++ b/kernel/comps/network/src/dma_pool.rs @@ -152,9 +152,9 @@ impl DmaPage { pool: Weak, ) -> Result { let dma_stream = { - let segment = FrameAllocOptions::new(1).alloc_contiguous()?; + let segment = FrameAllocOptions::new().alloc_segment(1)?; - DmaStream::map(segment, direction, is_cache_coherent) + DmaStream::map(segment.into(), direction, is_cache_coherent) .map_err(|_| ostd::Error::AccessDenied)? }; diff --git a/kernel/comps/virtio/src/device/block/device.rs b/kernel/comps/virtio/src/device/block/device.rs index ddcca3af4..78f871e49 100644 --- a/kernel/comps/virtio/src/device/block/device.rs +++ b/kernel/comps/virtio/src/device/block/device.rs @@ -141,13 +141,13 @@ impl DeviceInner { let queue = VirtQueue::new(0, Self::QUEUE_SIZE, transport.as_mut()) .expect("create virtqueue failed"); let block_requests = { - let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap(); - DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap() + let segment = FrameAllocOptions::new().alloc_segment(1).unwrap(); + DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap() }; assert!(Self::QUEUE_SIZE as usize * REQ_SIZE <= block_requests.nbytes()); let block_responses = { - let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap(); - DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap() + let segment = FrameAllocOptions::new().alloc_segment(1).unwrap(); + DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap() }; assert!(Self::QUEUE_SIZE as usize * RESP_SIZE <= block_responses.nbytes()); @@ -261,11 +261,11 @@ impl DeviceInner { }; const MAX_ID_LENGTH: usize = 20; let device_id_stream = { - let segment = FrameAllocOptions::new(1) - .uninit(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment(1) .unwrap(); - DmaStream::map(segment, DmaDirection::FromDevice, false).unwrap() + DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap() }; let device_id_slice = DmaStreamSlice::new(&device_id_stream, 0, MAX_ID_LENGTH); let outputs = vec![&device_id_slice, &resp_slice]; diff --git a/kernel/comps/virtio/src/device/console/device.rs b/kernel/comps/virtio/src/device/console/device.rs index efa9aa312..b04d0040b 100644 --- a/kernel/comps/virtio/src/device/console/device.rs +++ b/kernel/comps/virtio/src/device/console/device.rs @@ -87,13 +87,13 @@ impl ConsoleDevice { SpinLock::new(VirtQueue::new(TRANSMIT0_QUEUE_INDEX, 2, transport.as_mut()).unwrap()); let send_buffer = { - let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap(); - DmaStream::map(vm_segment, DmaDirection::ToDevice, false).unwrap() + let segment = FrameAllocOptions::new().alloc_segment(1).unwrap(); + DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap() }; let receive_buffer = { - let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap(); - DmaStream::map(vm_segment, DmaDirection::FromDevice, false).unwrap() + let segment = FrameAllocOptions::new().alloc_segment(1).unwrap(); + DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap() }; let device = Arc::new(Self { diff --git a/kernel/comps/virtio/src/device/input/device.rs b/kernel/comps/virtio/src/device/input/device.rs index 6b76cccee..971fbf692 100644 --- a/kernel/comps/virtio/src/device/input/device.rs +++ b/kernel/comps/virtio/src/device/input/device.rs @@ -261,14 +261,14 @@ impl EventTable { fn new(num_events: usize) -> Self { assert!(num_events * mem::size_of::() <= PAGE_SIZE); - let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap(); + let segment = FrameAllocOptions::new().alloc_segment(1).unwrap(); let default_event = VirtioInputEvent::default(); let iter = iter::repeat(&default_event).take(EVENT_SIZE); - let nr_written = vm_segment.write_vals(0, iter, 0).unwrap(); + let nr_written = segment.write_vals(0, iter, 0).unwrap(); assert_eq!(nr_written, EVENT_SIZE); - let stream = DmaStream::map(vm_segment, DmaDirection::FromDevice, false).unwrap(); + let stream = DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap(); Self { stream, num_events } } diff --git a/kernel/comps/virtio/src/queue.rs b/kernel/comps/virtio/src/queue.rs index 9bc92b16a..6112ac10c 100644 --- a/kernel/comps/virtio/src/queue.rs +++ b/kernel/comps/virtio/src/queue.rs @@ -76,7 +76,7 @@ impl VirtQueue { } let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() { - // Currently, we use one UntypedFrame to place the descriptors and available rings, one UntypedFrame to place used rings + // Currently, we use one DynUFrame to place the descriptors and available rings, one DynUFrame to place used rings // because the virtio-mmio legacy required the address to be continuous. The max queue size is 128. if size > 128 { return Err(QueueError::InvalidArgs); @@ -89,8 +89,8 @@ impl VirtQueue { let align_size = VirtioPciLegacyTransport::QUEUE_ALIGN_SIZE; let total_frames = VirtioPciLegacyTransport::calc_virtqueue_size_aligned(queue_size) / align_size; - let continue_segment = FrameAllocOptions::new(total_frames) - .alloc_contiguous() + let continue_segment = FrameAllocOptions::new() + .alloc_segment(total_frames) .unwrap(); let avial_size = size_of::() * (3 + queue_size); @@ -99,12 +99,12 @@ impl VirtQueue { continue_segment.split(seg1_frames * align_size) }; let desc_frame_ptr: SafePtr = - SafePtr::new(DmaCoherent::map(seg1, true).unwrap(), 0); + SafePtr::new(DmaCoherent::map(seg1.into(), true).unwrap(), 0); let mut avail_frame_ptr: SafePtr = desc_frame_ptr.clone().cast(); avail_frame_ptr.byte_add(desc_size); let used_frame_ptr: SafePtr = - SafePtr::new(DmaCoherent::map(seg2, true).unwrap(), 0); + SafePtr::new(DmaCoherent::map(seg2.into(), true).unwrap(), 0); (desc_frame_ptr, avail_frame_ptr, used_frame_ptr) } else { if size > 256 { @@ -112,18 +112,27 @@ impl VirtQueue { } ( SafePtr::new( - DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true) - .unwrap(), + DmaCoherent::map( + FrameAllocOptions::new().alloc_segment(1).unwrap().into(), + true, + ) + .unwrap(), 0, ), SafePtr::new( - DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true) - .unwrap(), + DmaCoherent::map( + FrameAllocOptions::new().alloc_segment(1).unwrap().into(), + true, + ) + .unwrap(), 0, ), SafePtr::new( - DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true) - .unwrap(), + DmaCoherent::map( + FrameAllocOptions::new().alloc_segment(1).unwrap().into(), + true, + ) + .unwrap(), 0, ), ) diff --git a/kernel/libs/aster-util/src/safe_ptr.rs b/kernel/libs/aster-util/src/safe_ptr.rs index cf7574a8b..89b1c26ff 100644 --- a/kernel/libs/aster-util/src/safe_ptr.rs +++ b/kernel/libs/aster-util/src/safe_ptr.rs @@ -54,7 +54,7 @@ use ostd::{ /// /// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo` /// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and -/// `UntypedFrame`. The blanket implementations of `VmIo` also include pointer-like +/// `DynUFrame`. The blanket implementations of `VmIo` also include pointer-like /// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box`, /// and `Arc`. /// diff --git a/kernel/libs/aster-util/src/segment_slice.rs b/kernel/libs/aster-util/src/segment_slice.rs index f72ea90cd..e0971b97d 100644 --- a/kernel/libs/aster-util/src/segment_slice.rs +++ b/kernel/libs/aster-util/src/segment_slice.rs @@ -2,41 +2,41 @@ // SPDX-License-Identifier: MPL-2.0 -//! Provides [`SegmentSlice`] for quick duplication and slicing over [`UntypedSegment`]. +//! Provides [`SegmentSlice`] for quick duplication and slicing over [`DynUSegment`]. use alloc::sync::Arc; use core::ops::Range; use ostd::{ mm::{ - FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedFrame, UntypedSegment, VmIo, - VmReader, VmWriter, PAGE_SIZE, + DynUFrame, DynUSegment, FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedMem, + VmIo, VmReader, VmWriter, PAGE_SIZE, }, Error, Result, }; -/// A reference to a slice of a [`UntypedSegment`]. +/// A reference to a slice of a [`DynUSegment`]. /// /// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference -/// count. While cloning a [`UntypedSegment`] will increment the reference count of +/// count. While cloning a [`DynUSegment`] will increment the reference count of /// many underlying pages. /// /// The downside is that the [`SegmentSlice`] requires heap allocation. Also, -/// if any [`SegmentSlice`] of the original [`UntypedSegment`] is alive, all pages in -/// the original [`UntypedSegment`], including the pages that are not referenced, will +/// if any [`SegmentSlice`] of the original [`DynUSegment`] is alive, all pages in +/// the original [`DynUSegment`], including the pages that are not referenced, will /// not be freed. #[derive(Debug, Clone)] pub struct SegmentSlice { - inner: Arc, + inner: Arc, range: Range, } impl SegmentSlice { - /// Returns a part of the `UntypedSegment`. + /// Returns a part of the `DynUSegment`. /// /// # Panics /// - /// If `range` is not within the range of this `UntypedSegment`, + /// If `range` is not within the range of this `DynUSegment`, /// then the method panics. pub fn range(&self, range: Range) -> Self { let orig_range = &self.range; @@ -124,9 +124,9 @@ impl VmIo for SegmentSlice { } } -impl From for SegmentSlice { - fn from(segment: UntypedSegment) -> Self { - let range = 0..segment.nbytes() / PAGE_SIZE; +impl From for SegmentSlice { + fn from(segment: DynUSegment) -> Self { + let range = 0..segment.size() / PAGE_SIZE; Self { inner: Arc::new(segment), range, @@ -134,7 +134,7 @@ impl From for SegmentSlice { } } -impl From for UntypedSegment { +impl From for DynUSegment { fn from(slice: SegmentSlice) -> Self { let start = slice.range.start * PAGE_SIZE; let end = slice.range.end * PAGE_SIZE; @@ -142,8 +142,8 @@ impl From for UntypedSegment { } } -impl From for SegmentSlice { - fn from(frame: UntypedFrame) -> Self { - SegmentSlice::from(UntypedSegment::from(frame)) +impl From for SegmentSlice { + fn from(frame: DynUFrame) -> Self { + SegmentSlice::from(DynUSegment::from(frame)) } } diff --git a/kernel/src/device/tdxguest/mod.rs b/kernel/src/device/tdxguest/mod.rs index 181ae8736..b2f2f722b 100644 --- a/kernel/src/device/tdxguest/mod.rs +++ b/kernel/src/device/tdxguest/mod.rs @@ -88,11 +88,8 @@ fn handle_get_report(arg: usize) -> Result { let user_space = CurrentUserSpace::new(¤t_task); let user_request: TdxReportRequest = user_space.read_val(arg)?; - let vm_segment = FrameAllocOptions::new(2) - .is_contiguous(true) - .alloc_contiguous() - .unwrap(); - let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap(); + let segment = FrameAllocOptions::new().alloc_segment(2).unwrap(); + let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap(); dma_coherent .write_bytes(0, &user_request.report_data) .unwrap(); diff --git a/kernel/src/fs/exfat/fs.rs b/kernel/src/fs/exfat/fs.rs index 26e3ab840..fc9313209 100644 --- a/kernel/src/fs/exfat/fs.rs +++ b/kernel/src/fs/exfat/fs.rs @@ -12,7 +12,7 @@ use aster_block::{ }; use hashbrown::HashMap; use lru::LruCache; -use ostd::mm::UntypedFrame; +use ostd::mm::DynUFrame; pub(super) use ostd::mm::VmIo; use super::{ @@ -368,7 +368,7 @@ impl ExfatFS { } impl PageCacheBackend for ExfatFS { - fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid read size") } @@ -380,7 +380,7 @@ impl PageCacheBackend for ExfatFS { Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid write size") } diff --git a/kernel/src/fs/exfat/inode.rs b/kernel/src/fs/exfat/inode.rs index c72c22b0c..d1497fc2f 100644 --- a/kernel/src/fs/exfat/inode.rs +++ b/kernel/src/fs/exfat/inode.rs @@ -13,7 +13,7 @@ use aster_block::{ BLOCK_SIZE, }; use aster_rights::Full; -use ostd::mm::{UntypedFrame, VmIo}; +use ostd::mm::{DynUFrame, VmIo}; use super::{ constants::*, @@ -135,7 +135,7 @@ struct ExfatInodeInner { } impl PageCacheBackend for ExfatInode { - fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { let inner = self.inner.read(); if inner.size < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "Invalid read size") @@ -150,7 +150,7 @@ impl PageCacheBackend for ExfatInode { Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { let inner = self.inner.read(); let sector_size = inner.fs().sector_size(); diff --git a/kernel/src/fs/exfat/mod.rs b/kernel/src/fs/exfat/mod.rs index 03d46090a..0457eb49f 100644 --- a/kernel/src/fs/exfat/mod.rs +++ b/kernel/src/fs/exfat/mod.rs @@ -22,7 +22,7 @@ mod test { BlockDevice, BlockDeviceMeta, }; use ostd::{ - mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE}, + mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE}, prelude::*, }; use rand::{rngs::SmallRng, RngCore, SeedableRng}; @@ -40,15 +40,15 @@ mod test { /// Followings are implementations of memory simulated block device pub const SECTOR_SIZE: usize = 512; - struct ExfatMemoryBioQueue(UntypedSegment); + struct ExfatMemoryBioQueue(Segment<()>); impl ExfatMemoryBioQueue { - pub fn new(segment: UntypedSegment) -> Self { + pub fn new(segment: Segment<()>) -> Self { ExfatMemoryBioQueue(segment) } pub fn sectors_count(&self) -> usize { - self.0.nbytes() / SECTOR_SIZE + self.0.size() / SECTOR_SIZE } } @@ -57,7 +57,7 @@ mod test { } impl ExfatMemoryDisk { - pub fn new(segment: UntypedSegment) -> Self { + pub fn new(segment: Segment<()>) -> Self { ExfatMemoryDisk { queue: ExfatMemoryBioQueue::new(segment), } @@ -111,20 +111,20 @@ mod test { static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../test/build/exfat.img"); /// Read exfat disk image - fn new_vm_segment_from_image() -> UntypedSegment { - let vm_segment = FrameAllocOptions::new(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE)) - .uninit(true) - .alloc_contiguous() + fn new_vm_segment_from_image() -> Segment<()> { + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE)) .unwrap(); - vm_segment.write_bytes(0, EXFAT_IMAGE).unwrap(); - vm_segment + segment.write_bytes(0, EXFAT_IMAGE).unwrap(); + segment } // Generate a simulated exfat file system fn load_exfat() -> Arc { - let vm_segment = new_vm_segment_from_image(); - let disk = ExfatMemoryDisk::new(vm_segment); + let segment = new_vm_segment_from_image(); + let disk = ExfatMemoryDisk::new(segment); let mount_option = ExfatMountOptions::default(); let fs = ExfatFS::open(Arc::new(disk), mount_option); assert!(fs.is_ok(), "Fs failed to init:{:?}", fs.unwrap_err()); diff --git a/kernel/src/fs/ext2/block_group.rs b/kernel/src/fs/ext2/block_group.rs index 9fd28ea9f..fa2c6c4d0 100644 --- a/kernel/src/fs/ext2/block_group.rs +++ b/kernel/src/fs/ext2/block_group.rs @@ -28,7 +28,7 @@ struct BlockGroupImpl { impl BlockGroup { /// Loads and constructs a block group. pub fn load( - group_descriptors_segment: &UntypedSegment, + group_descriptors_segment: &DynUSegment, idx: usize, block_device: &dyn BlockDevice, super_block: &SuperBlock, @@ -318,7 +318,7 @@ impl Debug for BlockGroup { } impl PageCacheBackend for BlockGroupImpl { - fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; let bio_segment = BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice); @@ -328,7 +328,7 @@ impl PageCacheBackend for BlockGroupImpl { .read_blocks_async(bid, bio_segment) } - fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; let bio_segment = BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice); diff --git a/kernel/src/fs/ext2/fs.rs b/kernel/src/fs/ext2/fs.rs index 036d66bd4..a8c36b25c 100644 --- a/kernel/src/fs/ext2/fs.rs +++ b/kernel/src/fs/ext2/fs.rs @@ -23,7 +23,7 @@ pub struct Ext2 { blocks_per_group: Ext2Bid, inode_size: usize, block_size: usize, - group_descriptors_segment: UntypedSegment, + group_descriptors_segment: DynUSegment, self_ref: Weak, } @@ -46,11 +46,11 @@ impl Ext2 { let npages = ((super_block.block_groups_count() as usize) * core::mem::size_of::()) .div_ceil(BLOCK_SIZE); - let segment = FrameAllocOptions::new(npages) - .uninit(true) - .alloc_contiguous()?; + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment(npages)?; let bio_segment = - BioSegment::new_from_segment(segment.clone(), BioDirection::FromDevice); + BioSegment::new_from_segment(segment.clone().into(), BioDirection::FromDevice); match block_device.read_blocks(super_block.group_descriptors_bid(0), bio_segment)? { BioStatus::Complete => (), err_status => { @@ -63,7 +63,7 @@ impl Ext2 { // Load the block groups information let load_block_groups = |fs: Weak, block_device: &dyn BlockDevice, - group_descriptors_segment: &UntypedSegment| + group_descriptors_segment: &DynUSegment| -> Result> { let block_groups_count = super_block.block_groups_count() as usize; let mut block_groups = Vec::with_capacity(block_groups_count); @@ -88,12 +88,12 @@ impl Ext2 { block_groups: load_block_groups( weak_ref.clone(), block_device.as_ref(), - &group_descriptors_segment, + (&group_descriptors_segment).into(), ) .unwrap(), block_device, super_block: RwMutex::new(Dirty::new(super_block)), - group_descriptors_segment, + group_descriptors_segment: group_descriptors_segment.into(), self_ref: weak_ref.clone(), }); Ok(ext2) diff --git a/kernel/src/fs/ext2/indirect_block_cache.rs b/kernel/src/fs/ext2/indirect_block_cache.rs index 51125febe..a54d5d5d7 100644 --- a/kernel/src/fs/ext2/indirect_block_cache.rs +++ b/kernel/src/fs/ext2/indirect_block_cache.rs @@ -42,8 +42,10 @@ impl IndirectBlockCache { let fs = self.fs(); let load_block = || -> Result { let mut block = IndirectBlock::alloc_uninit()?; - let bio_segment = - BioSegment::new_from_segment(block.frame.clone().into(), BioDirection::FromDevice); + let bio_segment = BioSegment::new_from_segment( + Segment::<()>::from(block.frame.clone()).into(), + BioDirection::FromDevice, + ); fs.read_blocks(bid, bio_segment)?; block.state = State::UpToDate; Ok(block) @@ -61,8 +63,10 @@ impl IndirectBlockCache { let fs = self.fs(); let load_block = || -> Result { let mut block = IndirectBlock::alloc_uninit()?; - let bio_segment = - BioSegment::new_from_segment(block.frame.clone().into(), BioDirection::FromDevice); + let bio_segment = BioSegment::new_from_segment( + Segment::<()>::from(block.frame.clone()).into(), + BioDirection::FromDevice, + ); fs.read_blocks(bid, bio_segment)?; block.state = State::UpToDate; Ok(block) @@ -109,7 +113,7 @@ impl IndirectBlockCache { let (bid, block) = self.cache.pop_lru().unwrap(); if block.is_dirty() { let bio_segment = BioSegment::new_from_segment( - block.frame.clone().into(), + Segment::<()>::from(block.frame.clone()).into(), BioDirection::ToDevice, ); bio_waiter.concat(self.fs().write_blocks_async(bid, bio_segment)?); @@ -132,7 +136,7 @@ impl IndirectBlockCache { /// Represents a single indirect block buffer cached by the `IndirectCache`. #[derive(Clone, Debug)] pub struct IndirectBlock { - frame: UntypedFrame, + frame: Frame<()>, state: State, } @@ -140,7 +144,7 @@ impl IndirectBlock { /// Allocates an uninitialized block whose bytes are to be populated with /// data loaded from the disk. fn alloc_uninit() -> Result { - let frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?; + let frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?; Ok(Self { frame, state: State::Uninit, @@ -149,7 +153,7 @@ impl IndirectBlock { /// Allocates a new block with its bytes initialized to zero. pub fn alloc() -> Result { - let frame = FrameAllocOptions::new(1).alloc_single()?; + let frame = FrameAllocOptions::new().alloc_frame()?; Ok(Self { frame, state: State::Dirty, diff --git a/kernel/src/fs/ext2/inode.rs b/kernel/src/fs/ext2/inode.rs index dae0202a9..3ee65f555 100644 --- a/kernel/src/fs/ext2/inode.rs +++ b/kernel/src/fs/ext2/inode.rs @@ -1733,7 +1733,7 @@ impl InodeImpl { writer: &mut VmWriter, ) -> Result; pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>; - pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result; + pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result; pub fn write_blocks_async( &self, bid: Ext2Bid, @@ -1741,7 +1741,7 @@ impl InodeImpl { reader: &mut VmReader, ) -> Result; pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>; - pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result; + pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result; } /// Manages the inode blocks and block I/O operations. @@ -1789,7 +1789,7 @@ impl InodeBlockManager { } } - pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result { + pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result { let mut bio_waiter = BioWaiter::new(); for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { @@ -1834,7 +1834,7 @@ impl InodeBlockManager { } } - pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result { + pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result { let mut bio_waiter = BioWaiter::new(); for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { @@ -1858,12 +1858,12 @@ impl InodeBlockManager { } impl PageCacheBackend for InodeBlockManager { - fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { let bid = idx as Ext2Bid; self.read_block_async(bid, frame) } - fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { let bid = idx as Ext2Bid; self.write_block_async(bid, frame) } diff --git a/kernel/src/fs/ext2/prelude.rs b/kernel/src/fs/ext2/prelude.rs index 0ec1f3d8a..588d7ff9c 100644 --- a/kernel/src/fs/ext2/prelude.rs +++ b/kernel/src/fs/ext2/prelude.rs @@ -13,7 +13,7 @@ pub(super) use aster_block::{ }; pub(super) use aster_rights::Full; pub(super) use ostd::{ - mm::{FrameAllocOptions, UntypedFrame, UntypedSegment, VmIo}, + mm::{DynUFrame, DynUSegment, Frame, FrameAllocOptions, Segment, VmIo}, sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard}, }; pub(super) use static_assertions::const_assert; diff --git a/kernel/src/fs/ramfs/fs.rs b/kernel/src/fs/ramfs/fs.rs index 5fcbd9ada..f4569f7dc 100644 --- a/kernel/src/fs/ramfs/fs.rs +++ b/kernel/src/fs/ramfs/fs.rs @@ -11,7 +11,7 @@ use aster_rights::Full; use aster_util::slot_vec::SlotVec; use hashbrown::HashMap; use ostd::{ - mm::{UntypedFrame, VmIo}, + mm::{DynUFrame, UntypedMem, VmIo}, sync::{PreemptDisabled, RwLockWriteGuard}, }; @@ -484,7 +484,7 @@ impl RamInode { } impl PageCacheBackend for RamInode { - fn read_page_async(&self, _idx: usize, frame: &UntypedFrame) -> Result { + fn read_page_async(&self, _idx: usize, frame: &DynUFrame) -> Result { // Initially, any block/page in a RamFs inode contains all zeros frame .writer() @@ -494,7 +494,7 @@ impl PageCacheBackend for RamInode { Ok(BioWaiter::new()) } - fn write_page_async(&self, _idx: usize, _frame: &UntypedFrame) -> Result { + fn write_page_async(&self, _idx: usize, _frame: &DynUFrame) -> Result { // do nothing Ok(BioWaiter::new()) } diff --git a/kernel/src/fs/utils/page_cache.rs b/kernel/src/fs/utils/page_cache.rs index 8f4ca844d..92a15c055 100644 --- a/kernel/src/fs/utils/page_cache.rs +++ b/kernel/src/fs/utils/page_cache.rs @@ -8,7 +8,7 @@ use align_ext::AlignExt; use aster_block::bio::{BioStatus, BioWaiter}; use aster_rights::Full; use lru::LruCache; -use ostd::mm::{FrameAllocOptions, UntypedFrame, VmIo}; +use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, VmIo}; use crate::{ prelude::*, @@ -305,7 +305,7 @@ impl ReadaheadState { }; for async_idx in window.readahead_range() { let mut async_page = Page::alloc()?; - let pg_waiter = backend.read_page_async(async_idx, async_page.frame())?; + let pg_waiter = backend.read_page_async(async_idx, async_page.frame().into())?; if pg_waiter.nreqs() > 0 { self.waiter.concat(pg_waiter); } else { @@ -361,7 +361,7 @@ impl PageCacheManager { for idx in page_idx_range.start..page_idx_range.end { if let Some(page) = pages.peek(&idx) { if *page.state() == PageState::Dirty && idx < backend_npages { - let waiter = backend.write_page_async(idx, page.frame())?; + let waiter = backend.write_page_async(idx, page.frame().into())?; bio_waiter.concat(waiter); } } @@ -381,7 +381,7 @@ impl PageCacheManager { Ok(()) } - fn ondemand_readahead(&self, idx: usize) -> Result { + fn ondemand_readahead(&self, idx: usize) -> Result { let mut pages = self.pages.lock(); let mut ra_state = self.ra_state.lock(); let backend = self.backend(); @@ -410,7 +410,7 @@ impl PageCacheManager { // Conducts the sync read operation. let page = if idx < backend.npages() { let mut page = Page::alloc()?; - backend.read_page(idx, page.frame())?; + backend.read_page(idx, page.frame().into())?; page.set_state(PageState::UpToDate); page } else { @@ -425,7 +425,7 @@ impl PageCacheManager { ra_state.conduct_readahead(&mut pages, backend)?; } ra_state.set_prev_page(idx); - Ok(frame) + Ok(frame.into()) } } @@ -438,7 +438,7 @@ impl Debug for PageCacheManager { } impl Pager for PageCacheManager { - fn commit_page(&self, idx: usize) -> Result { + fn commit_page(&self, idx: usize) -> Result { self.ondemand_readahead(idx) } @@ -461,7 +461,7 @@ impl Pager for PageCacheManager { return Ok(()); }; if idx < backend.npages() { - backend.write_page(idx, page.frame())?; + backend.write_page(idx, page.frame().into())?; } } } @@ -469,25 +469,31 @@ impl Pager for PageCacheManager { Ok(()) } - fn commit_overwrite(&self, idx: usize) -> Result { + fn commit_overwrite(&self, idx: usize) -> Result { if let Some(page) = self.pages.lock().get(&idx) { - return Ok(page.frame.clone()); + return Ok(page.frame.clone().into()); } let page = Page::alloc_zero()?; - Ok(self.pages.lock().get_or_insert(idx, || page).frame.clone()) + Ok(self + .pages + .lock() + .get_or_insert(idx, || page) + .frame + .clone() + .into()) } } #[derive(Debug)] struct Page { - frame: UntypedFrame, + frame: Frame<()>, state: PageState, } impl Page { pub fn alloc() -> Result { - let frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?; + let frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?; Ok(Self { frame, state: PageState::Uninit, @@ -495,14 +501,14 @@ impl Page { } pub fn alloc_zero() -> Result { - let frame = FrameAllocOptions::new(1).alloc_single()?; + let frame = FrameAllocOptions::new().alloc_frame()?; Ok(Self { frame, state: PageState::Dirty, }) } - pub fn frame(&self) -> &UntypedFrame { + pub fn frame(&self) -> &Frame<()> { &self.frame } @@ -531,16 +537,16 @@ enum PageState { /// This trait represents the backend for the page cache. pub trait PageCacheBackend: Sync + Send { /// Reads a page from the backend asynchronously. - fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result; + fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result; /// Writes a page to the backend asynchronously. - fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result; + fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result; /// Returns the number of pages in the backend. fn npages(&self) -> usize; } impl dyn PageCacheBackend { /// Reads a page from the backend synchronously. - fn read_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> { + fn read_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> { let waiter = self.read_page_async(idx, frame)?; match waiter.wait() { Some(BioStatus::Complete) => Ok(()), @@ -548,7 +554,7 @@ impl dyn PageCacheBackend { } } /// Writes a page to the backend synchronously. - fn write_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> { + fn write_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> { let waiter = self.write_page_async(idx, frame)?; match waiter.wait() { Some(BioStatus::Complete) => Ok(()), diff --git a/kernel/src/process/process_vm/init_stack/mod.rs b/kernel/src/process/process_vm/init_stack/mod.rs index 99fe1cfae..1dc2b26a1 100644 --- a/kernel/src/process/process_vm/init_stack/mod.rs +++ b/kernel/src/process/process_vm/init_stack/mod.rs @@ -20,7 +20,7 @@ use core::{ use align_ext::AlignExt; use aster_rights::Full; -use ostd::mm::{vm_space::VmItem, VmIo, VmSpace, MAX_USERSPACE_VADDR}; +use ostd::mm::{vm_space::VmItem, UntypedMem, VmIo, VmSpace, MAX_USERSPACE_VADDR}; use self::aux_vec::{AuxKey, AuxVec}; use crate::{ diff --git a/kernel/src/process/program_loader/elf/load_elf.rs b/kernel/src/process/program_loader/elf/load_elf.rs index 711931b63..4ca0c5002 100644 --- a/kernel/src/process/program_loader/elf/load_elf.rs +++ b/kernel/src/process/program_loader/elf/load_elf.rs @@ -306,7 +306,7 @@ fn map_segment_vmo( new_frame }; let head_idx = segment_offset / PAGE_SIZE; - segment_vmo.replace(new_frame, head_idx)?; + segment_vmo.replace(new_frame.into(), head_idx)?; } // Tail padding. @@ -324,7 +324,7 @@ fn map_segment_vmo( }; let tail_idx = (segment_offset + tail_padding_offset) / PAGE_SIZE; - segment_vmo.replace(new_frame, tail_idx).unwrap(); + segment_vmo.replace(new_frame.into(), tail_idx).unwrap(); } let perms = parse_segment_perm(program_header.flags); diff --git a/kernel/src/util/ring_buffer.rs b/kernel/src/util/ring_buffer.rs index 4daf9ce2d..e42d41bed 100644 --- a/kernel/src/util/ring_buffer.rs +++ b/kernel/src/util/ring_buffer.rs @@ -8,12 +8,12 @@ use core::{ use align_ext::AlignExt; use inherit_methods_macro::inherit_methods; -use ostd::mm::{FrameAllocOptions, UntypedSegment, VmIo}; +use ostd::mm::{FrameAllocOptions, Segment, UntypedMem, VmIo}; use super::{MultiRead, MultiWrite}; use crate::prelude::*; -/// A lock-free SPSC FIFO ring buffer backed by a [`UntypedSegment`]. +/// A lock-free SPSC FIFO ring buffer backed by a [`Segment<()>`]. /// /// The ring buffer supports `push`/`pop` any `T: Pod` items, also /// supports `write`/`read` any bytes data based on [`VmReader`]/[`VmWriter`]. @@ -46,7 +46,7 @@ use crate::prelude::*; /// } /// ``` pub struct RingBuffer { - segment: UntypedSegment, + segment: Segment<()>, capacity: usize, tail: AtomicUsize, head: AtomicUsize, @@ -78,9 +78,9 @@ impl RingBuffer { "capacity must be a power of two" ); let nframes = capacity.saturating_mul(Self::T_SIZE).align_up(PAGE_SIZE) / PAGE_SIZE; - let segment = FrameAllocOptions::new(nframes) - .uninit(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment(nframes) .unwrap(); Self { segment, diff --git a/kernel/src/vdso.rs b/kernel/src/vdso.rs index fcfa8f07e..ddcdc8de8 100644 --- a/kernel/src/vdso.rs +++ b/kernel/src/vdso.rs @@ -21,7 +21,7 @@ use aster_rights::Rights; use aster_time::{read_monotonic_time, Instant}; use aster_util::coeff::Coeff; use ostd::{ - mm::{UntypedFrame, VmIo, PAGE_SIZE}, + mm::{DynUFrame, VmIo, PAGE_SIZE}, sync::SpinLock, Pod, }; @@ -199,9 +199,9 @@ struct Vdso { data: SpinLock, /// The VMO of the entire VDSO, including the library text and the VDSO data. vmo: Arc, - /// The `UntypedFrame` that contains the VDSO data. This frame is contained in and + /// The `DynUFrame` that contains the VDSO data. This frame is contained in and /// will not be removed from the VDSO VMO. - data_frame: UntypedFrame, + data_frame: DynUFrame, } /// A `SpinLock` for the `seq` field in `VdsoData`. diff --git a/kernel/src/vm/util.rs b/kernel/src/vm/util.rs index c5ccc5439..26983fb01 100644 --- a/kernel/src/vm/util.rs +++ b/kernel/src/vm/util.rs @@ -1,12 +1,14 @@ // SPDX-License-Identifier: MPL-2.0 -use ostd::mm::{FrameAllocOptions, UntypedFrame}; +use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, UntypedMem}; use crate::prelude::*; -/// Creates a new `UntypedFrame` and initializes it with the contents of the `src`. -pub fn duplicate_frame(src: &UntypedFrame) -> Result { - let new_frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?; - new_frame.copy_from(src); +/// Creates a new `Frame<()>` and initializes it with the contents of the `src`. +/// +/// Note that it only duplicates the contents not the metadata. +pub fn duplicate_frame(src: &DynUFrame) -> Result> { + let new_frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?; + new_frame.writer().write(&mut src.reader()); Ok(new_frame) } diff --git a/kernel/src/vm/vmar/vm_mapping.rs b/kernel/src/vm/vmar/vm_mapping.rs index 4528888bb..7bb56b37d 100644 --- a/kernel/src/vm/vmar/vm_mapping.rs +++ b/kernel/src/vm/vmar/vm_mapping.rs @@ -8,8 +8,8 @@ use core::{ use align_ext::AlignExt; use ostd::mm::{ - tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty, - UntypedFrame, VmSpace, + tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, DynUFrame, FrameAllocOptions, PageFlags, + PageProperty, VmSpace, }; use super::interval_set::Interval; @@ -186,7 +186,7 @@ impl VmMapping { } else { let new_frame = duplicate_frame(&frame)?; prop.flags |= new_flags; - cursor.map(new_frame, prop); + cursor.map(new_frame.into(), prop); } } VmItem::NotMapped { .. } => { @@ -216,17 +216,17 @@ impl VmMapping { Ok(()) } - fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(UntypedFrame, bool)> { + fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(DynUFrame, bool)> { let mut is_readonly = false; let Some(vmo) = &self.vmo else { - return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly)); + return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly)); }; let page_offset = page_fault_addr.align_down(PAGE_SIZE) - self.map_to_addr; let Ok(page) = vmo.get_committed_frame(page_offset) else { if !self.is_shared { // The page index is outside the VMO. This is only allowed in private mapping. - return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly)); + return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly)); } else { return_errno_with_message!( Errno::EFAULT, @@ -237,7 +237,7 @@ impl VmMapping { if !self.is_shared && write { // Write access to private VMO-backed mapping. Performs COW directly. - Ok((duplicate_frame(&page)?, is_readonly)) + Ok((duplicate_frame(&page)?.into(), is_readonly)) } else { // Operations to shared mapping or read access to private VMO-backed mapping. // If read access to private VMO-backed mapping triggers a page fault, @@ -264,7 +264,7 @@ impl VmMapping { let vm_perms = self.perms - VmPerms::WRITE; let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?; - let operate = move |commit_fn: &mut dyn FnMut() -> Result| { + let operate = move |commit_fn: &mut dyn FnMut() -> Result| { if let VmItem::NotMapped { .. } = cursor.query().unwrap() { // We regard all the surrounding pages as accessed, no matter // if it is really so. Then the hardware won't bother to update @@ -432,7 +432,7 @@ impl MappedVmo { /// /// If the VMO has not committed a frame at this index, it will commit /// one first and return it. - fn get_committed_frame(&self, page_offset: usize) -> Result { + fn get_committed_frame(&self, page_offset: usize) -> Result { debug_assert!(page_offset < self.range.len()); debug_assert!(page_offset % PAGE_SIZE == 0); self.vmo.commit_page(self.range.start + page_offset) @@ -444,7 +444,7 @@ impl MappedVmo { /// perform other operations. fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { debug_assert!(range.start < self.range.len()); debug_assert!(range.end <= self.range.len()); diff --git a/kernel/src/vm/vmo/dyn_cap.rs b/kernel/src/vm/vmo/dyn_cap.rs index f38d31e5a..bb72cd741 100644 --- a/kernel/src/vm/vmo/dyn_cap.rs +++ b/kernel/src/vm/vmo/dyn_cap.rs @@ -3,14 +3,14 @@ use core::ops::Range; use aster_rights::{Rights, TRights}; -use ostd::mm::{UntypedFrame, VmIo}; +use ostd::mm::{DynUFrame, VmIo}; use super::{CommitFlags, Vmo, VmoRightsOp}; use crate::prelude::*; impl Vmo { /// Commits a page at specific offset - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { self.check_rights(Rights::WRITE)?; self.0.commit_page(offset) } @@ -39,7 +39,7 @@ impl Vmo { /// perform other operations. pub(in crate::vm) fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.check_rights(Rights::WRITE)?; self.0 @@ -112,7 +112,7 @@ impl Vmo { /// # Access rights /// /// The method requires the Write right. - pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> { + pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { self.check_rights(Rights::WRITE)?; self.0.replace(page, page_idx) } diff --git a/kernel/src/vm/vmo/mod.rs b/kernel/src/vm/vmo/mod.rs index 0d5e8cf49..708d55542 100644 --- a/kernel/src/vm/vmo/mod.rs +++ b/kernel/src/vm/vmo/mod.rs @@ -11,7 +11,7 @@ use align_ext::AlignExt; use aster_rights::Rights; use ostd::{ collections::xarray::{CursorMut, XArray}, - mm::{FrameAllocOptions, UntypedFrame, VmReader, VmWriter}, + mm::{DynUFrame, FrameAllocOptions, UntypedMem, VmReader, VmWriter}, }; use crate::prelude::*; @@ -66,8 +66,8 @@ pub use pager::Pager; /// # Implementation /// /// `Vmo` provides high-level APIs for address space management by wrapping -/// around its low-level counterpart [`ostd::mm::UntypedFrame`]. -/// Compared with `UntypedFrame`, +/// around its low-level counterpart [`ostd::mm::DynUFrame`]. +/// Compared with `DynUFrame`, /// `Vmo` is easier to use (by offering more powerful APIs) and /// harder to misuse (thanks to its nature of being capability). #[derive(Debug)] @@ -125,12 +125,12 @@ bitflags! { } } -/// `Pages` is the struct that manages the `UntypedFrame`s stored in `Vmo_`. +/// `Pages` is the struct that manages the `DynUFrame`s stored in `Vmo_`. pub(super) enum Pages { /// `Pages` that cannot be resized. This kind of `Pages` will have a constant size. - Nonresizable(Mutex>, usize), + Nonresizable(Mutex>, usize), /// `Pages` that can be resized and have a variable size. - Resizable(Mutex<(XArray, usize)>), + Resizable(Mutex<(XArray, usize)>), } impl Clone for Pages { @@ -149,7 +149,7 @@ impl Clone for Pages { impl Pages { fn with(&self, func: F) -> R where - F: FnOnce(&mut XArray, usize) -> R, + F: FnOnce(&mut XArray, usize) -> R, { match self { Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size), @@ -201,28 +201,28 @@ impl CommitFlags { } impl Vmo_ { - /// Prepares a new `UntypedFrame` for the target index in pages, returns this new frame. - fn prepare_page(&self, page_idx: usize) -> Result { + /// Prepares a new `DynUFrame` for the target index in pages, returns this new frame. + fn prepare_page(&self, page_idx: usize) -> Result { match &self.pager { - None => Ok(FrameAllocOptions::new(1).alloc_single()?), + None => Ok(FrameAllocOptions::new().alloc_frame()?.into()), Some(pager) => pager.commit_page(page_idx), } } - /// Prepares a new `UntypedFrame` for the target index in the VMO, returns this new frame. - fn prepare_overwrite(&self, page_idx: usize) -> Result { + /// Prepares a new `DynUFrame` for the target index in the VMO, returns this new frame. + fn prepare_overwrite(&self, page_idx: usize) -> Result { if let Some(pager) = &self.pager { pager.commit_overwrite(page_idx) } else { - Ok(FrameAllocOptions::new(1).alloc_single()?) + Ok(FrameAllocOptions::new().alloc_frame()?.into()) } } fn commit_with_cursor( &self, - cursor: &mut CursorMut<'_, UntypedFrame>, + cursor: &mut CursorMut<'_, DynUFrame>, commit_flags: CommitFlags, - ) -> Result { + ) -> Result { let new_page = { if let Some(committed_page) = cursor.load() { // Fast path: return the page directly. @@ -241,7 +241,7 @@ impl Vmo_ { /// Commits the page corresponding to the target offset in the VMO and return that page. /// If the current offset has already been committed, the page will be returned directly. - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { let page_idx = offset / PAGE_SIZE; self.pages.with(|pages, size| { if offset >= size { @@ -279,7 +279,7 @@ impl Vmo_ { commit_flags: CommitFlags, ) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.pages.with(|pages, size| { if range.end > size { @@ -315,7 +315,7 @@ impl Vmo_ { let read_range = offset..(offset + read_len); let mut read_offset = offset % PAGE_SIZE; - let read = move |commit_fn: &mut dyn FnMut() -> Result| { + let read = move |commit_fn: &mut dyn FnMut() -> Result| { let frame = commit_fn()?; frame.reader().skip(read_offset).read_fallible(writer)?; read_offset = 0; @@ -331,7 +331,7 @@ impl Vmo_ { let write_range = offset..(offset + write_len); let mut write_offset = offset % PAGE_SIZE; - let mut write = move |commit_fn: &mut dyn FnMut() -> Result| { + let mut write = move |commit_fn: &mut dyn FnMut() -> Result| { let frame = commit_fn()?; frame.writer().skip(write_offset).write_fallible(reader)?; write_offset = 0; @@ -401,7 +401,7 @@ impl Vmo_ { Ok(()) } - fn decommit_pages(&self, pages: &mut XArray, range: Range) -> Result<()> { + fn decommit_pages(&self, pages: &mut XArray, range: Range) -> Result<()> { let page_idx_range = get_page_idx_range(&range); let mut cursor = pages.cursor_mut(page_idx_range.start as u64); for page_idx in page_idx_range { @@ -426,7 +426,7 @@ impl Vmo_ { self.flags } - fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> { + fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { self.pages.with(|pages, size| { if page_idx >= size / PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo"); diff --git a/kernel/src/vm/vmo/options.rs b/kernel/src/vm/vmo/options.rs index db8e7ed52..77a418669 100644 --- a/kernel/src/vm/vmo/options.rs +++ b/kernel/src/vm/vmo/options.rs @@ -8,7 +8,7 @@ use align_ext::AlignExt; use aster_rights::{Rights, TRightSet, TRights}; use ostd::{ collections::xarray::XArray, - mm::{FrameAllocOptions, UntypedFrame}, + mm::{DynUFrame, DynUSegment, FrameAllocOptions}, }; use super::{Pager, Pages, Vmo, VmoFlags}; @@ -137,13 +137,11 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option>) -> Re }) } -fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result> { +fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result> { if flags.contains(VmoFlags::CONTIGUOUS) { // if the vmo is continuous, we need to allocate frames for the vmo let frames_num = size / PAGE_SIZE; - let segment = FrameAllocOptions::new(frames_num) - .is_contiguous(true) - .alloc_contiguous()?; + let segment: DynUSegment = FrameAllocOptions::new().alloc_segment(frames_num)?.into(); let mut committed_pages = XArray::new(); let mut cursor = committed_pages.cursor_mut(0); for frame in segment { diff --git a/kernel/src/vm/vmo/pager.rs b/kernel/src/vm/vmo/pager.rs index 2dc4555a6..38cac55b1 100644 --- a/kernel/src/vm/vmo/pager.rs +++ b/kernel/src/vm/vmo/pager.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 -use ostd::mm::UntypedFrame; +use ostd::mm::DynUFrame; use crate::prelude::*; @@ -26,7 +26,7 @@ pub trait Pager: Send + Sync { /// whatever frame that may or may not be the same as the last time. /// /// It is up to the pager to decide the range of valid indices. - fn commit_page(&self, idx: usize) -> Result; + fn commit_page(&self, idx: usize) -> Result; /// Notify the pager that the frame at a specified index has been updated. /// @@ -54,5 +54,5 @@ pub trait Pager: Send + Sync { /// Ask the pager to provide a frame at a specified index. /// Notify the pager that the frame will be fully overwritten soon, so pager can /// choose not to initialize it. - fn commit_overwrite(&self, idx: usize) -> Result; + fn commit_overwrite(&self, idx: usize) -> Result; } diff --git a/kernel/src/vm/vmo/static_cap.rs b/kernel/src/vm/vmo/static_cap.rs index e0b246922..fde449dd4 100644 --- a/kernel/src/vm/vmo/static_cap.rs +++ b/kernel/src/vm/vmo/static_cap.rs @@ -4,14 +4,14 @@ use core::ops::Range; use aster_rights::{Dup, Rights, TRightSet, TRights, Write}; use aster_rights_proc::require; -use ostd::mm::{UntypedFrame, VmIo}; +use ostd::mm::{DynUFrame, VmIo}; use super::{CommitFlags, Vmo, VmoRightsOp}; use crate::prelude::*; impl Vmo> { /// Commits a page at specific offset. - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { self.check_rights(Rights::WRITE)?; self.0.commit_page(offset) } @@ -41,7 +41,7 @@ impl Vmo> { #[require(R > Write)] pub(in crate::vm) fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.0 .operate_on_range(range, operate, CommitFlags::empty()) @@ -114,7 +114,7 @@ impl Vmo> { /// /// The method requires the Write right. #[require(R > Write)] - pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> { + pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { self.0.replace(page, page_idx) } diff --git a/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs b/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs index 5939cedf8..21c69f9e9 100644 --- a/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs +++ b/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs @@ -36,11 +36,11 @@ pub fn main() { fn create_user_space(program: &[u8]) -> UserSpace { let nbytes = program.len().align_up(PAGE_SIZE); let user_pages = { - let segment = FrameAllocOptions::new(nbytes / PAGE_SIZE) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment(nbytes / PAGE_SIZE) .unwrap(); // Physical memory pages can be only accessed - // via the `UntypedFrame` or `UntypedSegment` abstraction. + // via the `DynUFrame` or `DynUSegment` abstraction. segment.write_bytes(0, program).unwrap(); segment }; @@ -54,7 +54,7 @@ fn create_user_space(program: &[u8]) -> UserSpace { let mut cursor = vm_space.cursor_mut(&(MAP_ADDR..MAP_ADDR + nbytes)).unwrap(); let map_prop = PageProperty::new(PageFlags::RWX, CachePolicy::Writeback); for frame in user_pages { - cursor.map(frame, map_prop); + cursor.map(frame.into(), map_prop); } drop(cursor); Arc::new(vm_space) diff --git a/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs b/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs index 98a7ac24b..d0caf484e 100644 --- a/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs +++ b/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs @@ -15,7 +15,7 @@ use crate::{ dma::Daddr, page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags}, page_table::{PageTableError, PageTableItem}, - FrameAllocOptions, Paddr, PageFlags, PageTable, UntypedFrame, VmIo, PAGE_SIZE, + Frame, FrameAllocOptions, Paddr, PageFlags, PageTable, VmIo, PAGE_SIZE, }, }; @@ -38,7 +38,7 @@ impl RootEntry { pub struct RootTable { /// Total 256 bus, each entry is 128 bits. - root_frame: UntypedFrame, + root_frame: Frame<()>, // TODO: Use radix tree instead. context_tables: BTreeMap, } @@ -57,7 +57,7 @@ impl RootTable { pub(super) fn new() -> Self { Self { - root_frame: FrameAllocOptions::new(1).alloc_single().unwrap(), + root_frame: FrameAllocOptions::new().alloc_frame().unwrap(), context_tables: BTreeMap::new(), } } @@ -236,14 +236,14 @@ pub enum AddressWidth { pub struct ContextTable { /// Total 32 devices, each device has 8 functions. - entries_frame: UntypedFrame, + entries_frame: Frame<()>, page_tables: BTreeMap>, } impl ContextTable { fn new() -> Self { Self { - entries_frame: FrameAllocOptions::new(1).alloc_single().unwrap(), + entries_frame: FrameAllocOptions::new().alloc_frame().unwrap(), page_tables: BTreeMap::new(), } } diff --git a/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs index 41b6efbc3..00e61d6ef 100644 --- a/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs +++ b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs @@ -9,7 +9,7 @@ use int_to_c_enum::TryFromInt; use super::IrtEntryHandle; use crate::{ - mm::{paddr_to_vaddr, FrameAllocOptions, UntypedSegment, PAGE_SIZE}, + mm::{paddr_to_vaddr, FrameAllocOptions, Segment, PAGE_SIZE}, sync::{LocalIrqDisabled, SpinLock}, }; @@ -23,7 +23,7 @@ enum ExtendedInterruptMode { pub struct IntRemappingTable { size: u16, extended_interrupt_mode: ExtendedInterruptMode, - frames: UntypedSegment, + frames: Segment<()>, /// The global allocator for Interrupt remapping entry. allocator: SpinLock, handles: Vec>>, @@ -35,12 +35,11 @@ impl IntRemappingTable { Some(self.handles.get(id).unwrap().clone()) } - /// Creates an Interrupt Remapping Table with one UntypedFrame (default). + /// Creates an Interrupt Remapping Table with one DynUFrame (default). pub(super) fn new() -> Self { const DEFAULT_PAGES: usize = 1; - let segment = FrameAllocOptions::new(DEFAULT_PAGES) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment(DEFAULT_PAGES) .unwrap(); let entry_number = (DEFAULT_PAGES * PAGE_SIZE / size_of::()) as u16; diff --git a/ostd/src/arch/x86/iommu/invalidate/queue.rs b/ostd/src/arch/x86/iommu/invalidate/queue.rs index 4a4fc3818..6adbabb54 100644 --- a/ostd/src/arch/x86/iommu/invalidate/queue.rs +++ b/ostd/src/arch/x86/iommu/invalidate/queue.rs @@ -3,12 +3,12 @@ use core::mem::size_of; use crate::{ - mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE}, + mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE}, prelude::Paddr, }; pub struct Queue { - segment: UntypedSegment, + segment: Segment<()>, queue_size: usize, tail: usize, } @@ -38,9 +38,8 @@ impl Queue { pub(super) fn new() -> Self { const DEFAULT_PAGES: usize = 1; - let segment = FrameAllocOptions::new(DEFAULT_PAGES) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment(DEFAULT_PAGES) .unwrap(); Self { segment, diff --git a/ostd/src/boot/smp.rs b/ostd/src/boot/smp.rs index 543e8d371..512816b53 100644 --- a/ostd/src/boot/smp.rs +++ b/ostd/src/boot/smp.rs @@ -10,11 +10,7 @@ use spin::Once; use crate::{ arch::boot::smp::{bringup_all_aps, get_num_processors}, cpu, - mm::{ - frame::{self, Segment}, - kspace::KernelMeta, - paddr_to_vaddr, PAGE_SIZE, - }, + mm::{frame::Segment, kspace::KernelMeta, paddr_to_vaddr, FrameAllocOptions, PAGE_SIZE}, task::Task, }; @@ -62,14 +58,17 @@ pub fn boot_all_aps() { AP_BOOT_INFO.call_once(|| { let mut per_ap_info = BTreeMap::new(); // Use two pages to place stack pointers of all APs, thus support up to 1024 APs. - let boot_stack_array = - frame::allocator::alloc_contiguous(2 * PAGE_SIZE, |_| KernelMeta::default()).unwrap(); + let boot_stack_array = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment_with(2, |_| KernelMeta) + .unwrap(); assert!(num_cpus < 1024); for ap in 1..num_cpus { - let boot_stack_pages = - frame::allocator::alloc_contiguous(AP_BOOT_STACK_SIZE, |_| KernelMeta::default()) - .unwrap(); + let boot_stack_pages = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment_with(AP_BOOT_STACK_SIZE / PAGE_SIZE, |_| KernelMeta) + .unwrap(); let boot_stack_ptr = paddr_to_vaddr(boot_stack_pages.end_paddr()); let stack_array_ptr = paddr_to_vaddr(boot_stack_array.start_paddr()) as *mut u64; // SAFETY: The `stack_array_ptr` is valid and aligned. diff --git a/ostd/src/cpu/local/mod.rs b/ostd/src/cpu/local/mod.rs index ed05248d6..bd122cef0 100644 --- a/ostd/src/cpu/local/mod.rs +++ b/ostd/src/cpu/local/mod.rs @@ -43,11 +43,7 @@ use spin::Once; use crate::{ arch, - mm::{ - frame::{self, Segment}, - kspace::KernelMeta, - paddr_to_vaddr, PAGE_SIZE, - }, + mm::{frame::Segment, kspace::KernelMeta, paddr_to_vaddr, FrameAllocOptions, PAGE_SIZE}, }; // These symbols are provided by the linker script. @@ -99,7 +95,10 @@ pub unsafe fn init_on_bsp() { for _ in 1..num_cpus { let ap_pages = { let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE); - frame::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap() + FrameAllocOptions::new() + .zeroed(false) + .alloc_segment_with(nbytes / PAGE_SIZE, |_| KernelMeta) + .unwrap() }; let ap_pages_ptr = paddr_to_vaddr(ap_pages.start_paddr()) as *mut u8; diff --git a/ostd/src/mm/dma/dma_coherent.rs b/ostd/src/mm/dma/dma_coherent.rs index 872b868f4..b9233ddd0 100644 --- a/ostd/src/mm/dma/dma_coherent.rs +++ b/ostd/src/mm/dma/dma_coherent.rs @@ -13,7 +13,8 @@ use crate::{ io::VmIoOnce, kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE}, page_prop::CachePolicy, - HasPaddr, Infallible, Paddr, PodOnce, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE, + DynUSegment, HasPaddr, Infallible, Paddr, PodOnce, UntypedMem, VmIo, VmReader, VmWriter, + PAGE_SIZE, }, prelude::*, }; @@ -38,27 +39,27 @@ pub struct DmaCoherent { #[derive(Debug)] struct DmaCoherentInner { - vm_segment: UntypedSegment, + segment: DynUSegment, start_daddr: Daddr, is_cache_coherent: bool, } impl DmaCoherent { - /// Creates a coherent DMA mapping backed by `vm_segment`. + /// Creates a coherent DMA mapping backed by `segment`. /// /// The `is_cache_coherent` argument specifies whether /// the target device that the DMA mapping is prepared for /// can access the main memory in a CPU cache coherent way /// or not. /// - /// The method fails if any part of the given `vm_segment` + /// The method fails if any part of the given `segment` /// already belongs to a DMA mapping. pub fn map( - vm_segment: UntypedSegment, + segment: DynUSegment, is_cache_coherent: bool, ) -> core::result::Result { - let frame_count = vm_segment.nbytes() / PAGE_SIZE; - let start_paddr = vm_segment.start_paddr(); + let frame_count = segment.size() / PAGE_SIZE; + let start_paddr = segment.start_paddr(); if !check_and_insert_dma_mapping(start_paddr, frame_count) { return Err(DmaError::AlreadyMapped); } @@ -93,7 +94,7 @@ impl DmaCoherent { DmaType::Iommu => { for i in 0..frame_count { let paddr = start_paddr + (i * PAGE_SIZE); - // SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`. + // SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`. unsafe { iommu::map(paddr as Daddr, paddr).unwrap(); } @@ -103,7 +104,7 @@ impl DmaCoherent { }; Ok(Self { inner: Arc::new(DmaCoherentInner { - vm_segment, + segment, start_daddr, is_cache_coherent, }), @@ -112,7 +113,7 @@ impl DmaCoherent { /// Returns the number of bytes in the DMA mapping. pub fn nbytes(&self) -> usize { - self.inner.vm_segment.nbytes() + self.inner.segment.size() } } @@ -123,16 +124,16 @@ impl HasDaddr for DmaCoherent { } impl Deref for DmaCoherent { - type Target = UntypedSegment; + type Target = DynUSegment; fn deref(&self) -> &Self::Target { - &self.inner.vm_segment + &self.inner.segment } } impl Drop for DmaCoherentInner { fn drop(&mut self) { - let frame_count = self.vm_segment.nbytes() / PAGE_SIZE; - let start_paddr = self.vm_segment.start_paddr(); + let frame_count = self.segment.size() / PAGE_SIZE; + let start_paddr = self.segment.start_paddr(); // Ensure that the addresses used later will not overflow start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap(); match dma_type() { @@ -173,43 +174,39 @@ impl Drop for DmaCoherentInner { impl VmIo for DmaCoherent { fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { - self.inner.vm_segment.read(offset, writer) + self.inner.segment.read(offset, writer) } fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { - self.inner.vm_segment.write(offset, reader) + self.inner.segment.write(offset, reader) } } impl VmIoOnce for DmaCoherent { fn read_once(&self, offset: usize) -> Result { - self.inner.vm_segment.reader().skip(offset).read_once() + self.inner.segment.reader().skip(offset).read_once() } fn write_once(&self, offset: usize, new_val: &T) -> Result<()> { - self.inner - .vm_segment - .writer() - .skip(offset) - .write_once(new_val) + self.inner.segment.writer().skip(offset).write_once(new_val) } } impl<'a> DmaCoherent { /// Returns a reader to read data from it. pub fn reader(&'a self) -> VmReader<'a, Infallible> { - self.inner.vm_segment.reader() + self.inner.segment.reader() } /// Returns a writer to write data into it. pub fn writer(&'a self) -> VmWriter<'a, Infallible> { - self.inner.vm_segment.writer() + self.inner.segment.writer() } } impl HasPaddr for DmaCoherent { fn paddr(&self) -> Paddr { - self.inner.vm_segment.start_paddr() + self.inner.segment.start_paddr() } } @@ -222,46 +219,42 @@ mod test { #[ktest] fn map_with_coherent_device() { - let vm_segment = FrameAllocOptions::new(1) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(1, |_| ()) .unwrap(); - let dma_coherent = DmaCoherent::map(vm_segment.clone(), true).unwrap(); - assert!(dma_coherent.paddr() == vm_segment.paddr()); + let dma_coherent = DmaCoherent::map(segment.clone().into(), true).unwrap(); + assert!(dma_coherent.paddr() == segment.start_paddr()); } #[ktest] fn map_with_incoherent_device() { - let vm_segment = FrameAllocOptions::new(1) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(1, |_| ()) .unwrap(); - let dma_coherent = DmaCoherent::map(vm_segment.clone(), false).unwrap(); - assert!(dma_coherent.paddr() == vm_segment.paddr()); + let dma_coherent = DmaCoherent::map(segment.clone().into(), false).unwrap(); + assert!(dma_coherent.paddr() == segment.start_paddr()); let page_table = KERNEL_PAGE_TABLE.get().unwrap(); - let vaddr = paddr_to_vaddr(vm_segment.paddr()); + let vaddr = paddr_to_vaddr(segment.start_paddr()); assert!(page_table.query(vaddr).unwrap().1.cache == CachePolicy::Uncacheable); } #[ktest] fn duplicate_map() { - let vm_segment_parent = FrameAllocOptions::new(2) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(2, |_| ()) .unwrap(); - let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE)); - let _dma_coherent_parent = DmaCoherent::map(vm_segment_parent, false); - let dma_coherent_child = DmaCoherent::map(vm_segment_child, false); + let segment_child = segment.slice(&(0..PAGE_SIZE)); + let _dma_coherent_parent = DmaCoherent::map(segment.into(), false); + let dma_coherent_child = DmaCoherent::map(segment_child.into(), false); assert!(dma_coherent_child.is_err()); } #[ktest] fn read_and_write() { - let vm_segment = FrameAllocOptions::new(2) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(2, |_| ()) .unwrap(); - let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap(); + let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap(); let buf_write = vec![1u8; 2 * PAGE_SIZE]; dma_coherent.write_bytes(0, &buf_write).unwrap(); @@ -272,11 +265,10 @@ mod test { #[ktest] fn reader_and_writer() { - let vm_segment = FrameAllocOptions::new(2) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(2, |_| ()) .unwrap(); - let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap(); + let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap(); let buf_write = vec![1u8; PAGE_SIZE]; let mut writer = dma_coherent.writer(); diff --git a/ostd/src/mm/dma/dma_stream.rs b/ostd/src/mm/dma/dma_stream.rs index 389f74dd4..ace44c5ee 100644 --- a/ostd/src/mm/dma/dma_stream.rs +++ b/ostd/src/mm/dma/dma_stream.rs @@ -11,7 +11,7 @@ use crate::{ error::Error, mm::{ dma::{dma_type, Daddr, DmaType}, - HasPaddr, Infallible, Paddr, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE, + DynUSegment, HasPaddr, Infallible, Paddr, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE, }, }; @@ -34,7 +34,7 @@ pub struct DmaStream { #[derive(Debug)] struct DmaStreamInner { - vm_segment: UntypedSegment, + segment: DynUSegment, start_daddr: Daddr, /// TODO: remove this field when on x86. #[allow(unused)] @@ -55,16 +55,16 @@ pub enum DmaDirection { } impl DmaStream { - /// Establishes DMA stream mapping for a given [`UntypedSegment`]. + /// Establishes DMA stream mapping for a given [`DynUSegment`]. /// /// The method fails if the segment already belongs to a DMA mapping. pub fn map( - vm_segment: UntypedSegment, + segment: DynUSegment, direction: DmaDirection, is_cache_coherent: bool, ) -> Result { - let frame_count = vm_segment.nbytes() / PAGE_SIZE; - let start_paddr = vm_segment.start_paddr(); + let frame_count = segment.size() / PAGE_SIZE; + let start_paddr = segment.start_paddr(); if !check_and_insert_dma_mapping(start_paddr, frame_count) { return Err(DmaError::AlreadyMapped); } @@ -88,7 +88,7 @@ impl DmaStream { DmaType::Iommu => { for i in 0..frame_count { let paddr = start_paddr + (i * PAGE_SIZE); - // SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`. + // SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`. unsafe { iommu::map(paddr as Daddr, paddr).unwrap(); } @@ -99,7 +99,7 @@ impl DmaStream { Ok(Self { inner: Arc::new(DmaStreamInner { - vm_segment, + segment, start_daddr, is_cache_coherent, direction, @@ -107,24 +107,24 @@ impl DmaStream { }) } - /// Gets the underlying [`UntypedSegment`]. + /// Gets the underlying [`DynUSegment`]. /// /// Usually, the CPU side should not access the memory /// after the DMA mapping is established because /// there is a chance that the device is updating /// the memory. Do this at your own risk. - pub fn vm_segment(&self) -> &UntypedSegment { - &self.inner.vm_segment + pub fn segment(&self) -> &DynUSegment { + &self.inner.segment } /// Returns the number of frames. pub fn nframes(&self) -> usize { - self.inner.vm_segment.nbytes() / PAGE_SIZE + self.inner.segment.size() / PAGE_SIZE } /// Returns the number of bytes. pub fn nbytes(&self) -> usize { - self.inner.vm_segment.nbytes() + self.inner.segment.size() } /// Returns the DMA direction. @@ -156,7 +156,7 @@ impl DmaStream { if self.inner.is_cache_coherent { return Ok(()); } - let start_va = crate::mm::paddr_to_vaddr(self.inner.vm_segment.paddr()) as *const u8; + let start_va = crate::mm::paddr_to_vaddr(self.inner.segment.paddr()) as *const u8; // TODO: Query the CPU for the cache line size via CPUID, we use 64 bytes as the cache line size here. for i in _byte_range.step_by(64) { // TODO: Call the cache line flush command in the corresponding architecture. @@ -176,8 +176,8 @@ impl HasDaddr for DmaStream { impl Drop for DmaStreamInner { fn drop(&mut self) { - let frame_count = self.vm_segment.nbytes() / PAGE_SIZE; - let start_paddr = self.vm_segment.start_paddr(); + let frame_count = self.segment.size() / PAGE_SIZE; + let start_paddr = self.segment.start_paddr(); // Ensure that the addresses used later will not overflow start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap(); match dma_type() { @@ -211,7 +211,7 @@ impl VmIo for DmaStream { if self.inner.direction == DmaDirection::ToDevice { return Err(Error::AccessDenied); } - self.inner.vm_segment.read(offset, writer) + self.inner.segment.read(offset, writer) } /// Writes data from the buffer. @@ -219,7 +219,7 @@ impl VmIo for DmaStream { if self.inner.direction == DmaDirection::FromDevice { return Err(Error::AccessDenied); } - self.inner.vm_segment.write(offset, reader) + self.inner.segment.write(offset, reader) } } @@ -229,7 +229,7 @@ impl<'a> DmaStream { if self.inner.direction == DmaDirection::ToDevice { return Err(Error::AccessDenied); } - Ok(self.inner.vm_segment.reader()) + Ok(self.inner.segment.reader()) } /// Returns a writer to write data into it. @@ -237,13 +237,13 @@ impl<'a> DmaStream { if self.inner.direction == DmaDirection::FromDevice { return Err(Error::AccessDenied); } - Ok(self.inner.vm_segment.writer()) + Ok(self.inner.segment.writer()) } } impl HasPaddr for DmaStream { fn paddr(&self) -> Paddr { - self.inner.vm_segment.start_paddr() + self.inner.segment.start_paddr() } } @@ -373,36 +373,35 @@ mod test { #[ktest] fn streaming_map() { - let vm_segment = FrameAllocOptions::new(1) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(1, |_| ()) .unwrap(); let dma_stream = - DmaStream::map(vm_segment.clone(), DmaDirection::Bidirectional, true).unwrap(); - assert!(dma_stream.paddr() == vm_segment.paddr()); + DmaStream::map(segment.clone().into(), DmaDirection::Bidirectional, true).unwrap(); + assert!(dma_stream.paddr() == segment.start_paddr()); } #[ktest] fn duplicate_map() { - let vm_segment_parent = FrameAllocOptions::new(2) - .is_contiguous(true) - .alloc_contiguous() + let segment_parent = FrameAllocOptions::new() + .alloc_segment_with(2, |_| ()) .unwrap(); - let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE)); + let segment_child = segment_parent.slice(&(0..PAGE_SIZE)); let dma_stream_parent = - DmaStream::map(vm_segment_parent, DmaDirection::Bidirectional, false); - let dma_stream_child = DmaStream::map(vm_segment_child, DmaDirection::Bidirectional, false); + DmaStream::map(segment_parent.into(), DmaDirection::Bidirectional, false); + let dma_stream_child = + DmaStream::map(segment_child.into(), DmaDirection::Bidirectional, false); assert!(dma_stream_parent.is_ok()); assert!(dma_stream_child.is_err()); } #[ktest] fn read_and_write() { - let vm_segment = FrameAllocOptions::new(2) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(2, |_| ()) .unwrap(); - let dma_stream = DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap(); + let dma_stream = + DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap(); let buf_write = vec![1u8; 2 * PAGE_SIZE]; dma_stream.write_bytes(0, &buf_write).unwrap(); @@ -414,11 +413,11 @@ mod test { #[ktest] fn reader_and_writer() { - let vm_segment = FrameAllocOptions::new(2) - .is_contiguous(true) - .alloc_contiguous() + let segment = FrameAllocOptions::new() + .alloc_segment_with(2, |_| ()) .unwrap(); - let dma_stream = DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap(); + let dma_stream = + DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap(); let buf_write = vec![1u8; PAGE_SIZE]; let mut writer = dma_stream.writer().unwrap(); diff --git a/ostd/src/mm/frame/allocator.rs b/ostd/src/mm/frame/allocator.rs index 5913444fe..4ea06ee12 100644 --- a/ostd/src/mm/frame/allocator.rs +++ b/ostd/src/mm/frame/allocator.rs @@ -13,10 +13,135 @@ use spin::Once; use super::{meta::FrameMeta, segment::Segment, Frame}; use crate::{ boot::memory_region::MemoryRegionType, - mm::{Paddr, PAGE_SIZE}, + error::Error, + mm::{paddr_to_vaddr, Paddr, PAGE_SIZE}, + prelude::*, sync::SpinLock, }; +/// Options for allocating physical memory frames. +pub struct FrameAllocOptions { + zeroed: bool, +} + +impl Default for FrameAllocOptions { + fn default() -> Self { + Self::new() + } +} + +impl FrameAllocOptions { + /// Creates new options for allocating the specified number of frames. + pub fn new() -> Self { + Self { zeroed: true } + } + + /// Sets whether the allocated frames should be initialized with zeros. + /// + /// If `zeroed` is `true`, the allocated frames are filled with zeros. + /// If not, the allocated frames will contain sensitive data and the caller + /// should clear them before sharing them with other components. + /// + /// By default, the frames are zero-initialized. + pub fn zeroed(&mut self, zeroed: bool) -> &mut Self { + self.zeroed = zeroed; + self + } + + /// Allocates a single untyped frame without metadata. + pub fn alloc_frame(&self) -> Result> { + self.alloc_frame_with(()) + } + + /// Allocates a single frame with additional metadata. + pub fn alloc_frame_with(&self, metadata: M) -> Result> { + let frame = PAGE_ALLOCATOR + .get() + .unwrap() + .disable_irq() + .lock() + .alloc(1) + .map(|idx| { + let paddr = idx * PAGE_SIZE; + Frame::from_unused(paddr, metadata) + }) + .ok_or(Error::NoMemory)?; + + if self.zeroed { + let addr = paddr_to_vaddr(frame.start_paddr()) as *mut u8; + // SAFETY: The newly allocated frame is guaranteed to be valid. + unsafe { core::ptr::write_bytes(addr, 0, PAGE_SIZE) } + } + + Ok(frame) + } + + /// Allocates a contiguous range of untyped frames without metadata. + pub fn alloc_segment(&self, nframes: usize) -> Result> { + self.alloc_segment_with(nframes, |_| ()) + } + + /// Allocates a contiguous range of frames with additional metadata. + /// + /// The returned [`Segment`] contains at least one frame. The method returns + /// an error if the number of frames is zero. + pub fn alloc_segment_with( + &self, + nframes: usize, + metadata_fn: F, + ) -> Result> + where + F: FnMut(Paddr) -> M, + { + if nframes == 0 { + return Err(Error::InvalidArgs); + } + let segment = PAGE_ALLOCATOR + .get() + .unwrap() + .disable_irq() + .lock() + .alloc(nframes) + .map(|start| { + Segment::from_unused( + start * PAGE_SIZE..start * PAGE_SIZE + nframes * PAGE_SIZE, + metadata_fn, + ) + }) + .ok_or(Error::NoMemory)?; + + if self.zeroed { + let addr = paddr_to_vaddr(segment.start_paddr()) as *mut u8; + // SAFETY: The newly allocated segment is guaranteed to be valid. + unsafe { core::ptr::write_bytes(addr, 0, nframes * PAGE_SIZE) } + } + + Ok(segment) + } +} + +#[cfg(ktest)] +#[ktest] +fn test_alloc_dealloc() { + // Here we allocate and deallocate frames in random orders to test the allocator. + // We expect the test to fail if the underlying implementation panics. + let single_options = FrameAllocOptions::new(); + let mut contiguous_options = FrameAllocOptions::new(); + contiguous_options.zeroed(false); + let mut remember_vec = Vec::new(); + for _ in 0..10 { + for i in 0..10 { + let single_frame = single_options.alloc_frame().unwrap(); + if i % 3 == 0 { + remember_vec.push(single_frame); + } + } + let contiguous_segment = contiguous_options.alloc_segment(10).unwrap(); + drop(contiguous_segment); + remember_vec.pop(); + } +} + /// FrameAllocator with a counter for allocated memory pub(in crate::mm) struct CountingFrameAllocator { allocator: FrameAllocator, @@ -59,45 +184,6 @@ impl CountingFrameAllocator { pub(in crate::mm) static PAGE_ALLOCATOR: Once> = Once::new(); -/// Allocate a single page. -/// -/// The metadata of the page is initialized with the given metadata. -pub(crate) fn alloc_single(metadata: M) -> Option> { - PAGE_ALLOCATOR - .get() - .unwrap() - .disable_irq() - .lock() - .alloc(1) - .map(|idx| { - let paddr = idx * PAGE_SIZE; - Frame::from_unused(paddr, metadata) - }) -} - -/// Allocate a contiguous range of pages of a given length in bytes. -/// -/// The caller must provide a closure to initialize metadata for all the pages. -/// The closure receives the physical address of the page and returns the -/// metadata, which is similar to [`core::array::from_fn`]. -/// -/// # Panics -/// -/// The function panics if the length is not base-page-aligned. -pub(crate) fn alloc_contiguous(len: usize, metadata_fn: F) -> Option> -where - F: FnMut(Paddr) -> M, -{ - assert!(len % PAGE_SIZE == 0); - PAGE_ALLOCATOR - .get() - .unwrap() - .disable_irq() - .lock() - .alloc(len / PAGE_SIZE) - .map(|start| Segment::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len, metadata_fn)) -} - pub(crate) fn init() { let regions = crate::boot::memory_regions(); let mut total: usize = 0; diff --git a/ostd/src/mm/frame/meta.rs b/ostd/src/mm/frame/meta.rs index 55185034c..4200fa721 100644 --- a/ostd/src/mm/frame/meta.rs +++ b/ostd/src/mm/frame/meta.rs @@ -60,7 +60,7 @@ use crate::{ /// The maximum number of bytes of the metadata of a page. pub const PAGE_METADATA_MAX_SIZE: usize = - META_SLOT_SIZE - size_of::() - size_of::(); + META_SLOT_SIZE - size_of::() - size_of::() - size_of::(); /// The maximum alignment in bytes of the metadata of a page. pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::(); @@ -77,19 +77,24 @@ pub(in crate::mm) struct MetaSlot { /// at most `PAGE_METADATA_ALIGN` bytes of alignment; /// - the subsequent fields can utilize the padding of the /// reference count to save space. - storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>, + /// + /// Don't access this field by a reference to the slot. + _storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>, /// The reference count of the page. /// /// Specifically, the reference count has the following meaning: - /// * `REF_COUNT_UNUSED`: The page is not in use. - /// * `0`: The page is being constructed ([`Page::from_unused`]) + /// - `REF_COUNT_UNUSED`: The page is not in use. + /// - `0`: The page is being constructed ([`Frame::from_unused`]) /// or destructured ([`drop_last_in_place`]). - /// * `1..REF_COUNT_MAX`: The page is in use. - /// * `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to + /// - `1..REF_COUNT_MAX`: The page is in use. + /// - `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to /// prevent the reference count from overflowing. Otherwise, /// overflowing the reference count will cause soundness issue. /// /// [`Frame::from_unused`]: super::Frame::from_unused + // + // Other than this field the fields should be `MaybeUninit`. + // See initialization in `alloc_meta_pages`. pub(super) ref_count: AtomicU32, /// The virtual table that indicates the type of the metadata. pub(super) vtable_ptr: UnsafeCell>, @@ -123,6 +128,16 @@ pub unsafe trait FrameMeta: Any + Send + Sync + Debug + 'static { fn on_drop(&mut self, reader: &mut VmReader) { let _ = reader; } + + /// Whether the metadata's associated frame is untyped. + /// + /// If a type implements [`UFrameMeta`], this should be `true`. + /// Otherwise, it should be `false`. + /// + /// [`UFrameMeta`]: super::untyped::UFrameMeta + fn is_untyped(&self) -> bool { + false + } } /// Makes a structure usable as a page metadata. @@ -202,7 +217,7 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { core::ptr::drop_in_place(meta_ptr); } - // `Release` pairs with the `Acquire` in `Page::from_unused` and ensures `drop_in_place` won't + // `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures `drop_in_place` won't // be reordered after this memory store. slot.ref_count.store(REF_COUNT_UNUSED, Ordering::Release); @@ -280,20 +295,15 @@ fn alloc_meta_pages(num_pages: usize) -> (usize, Paddr) { * PAGE_SIZE; let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot; - for i in 0..num_pages { - // SAFETY: The memory is successfully allocated with `num_pages` slots so the index must be - // within the range. - let slot = unsafe { slots.add(i) }; - // SAFETY: The memory is just allocated so we have exclusive access and it's valid for - // writing. - unsafe { - slot.write(MetaSlot { - storage: UnsafeCell::new([0; PAGE_METADATA_MAX_SIZE]), - ref_count: AtomicU32::new(REF_COUNT_UNUSED), - vtable_ptr: UnsafeCell::new(MaybeUninit::uninit()), - }); - } + // Fill the metadata pages with a byte pattern of `REF_COUNT_UNUSED`. + debug_assert_eq!(REF_COUNT_UNUSED.to_ne_bytes(), [0xff, 0xff, 0xff, 0xff]); + // SAFETY: `slots` and the length is a valid region for the metadata pages + // that are going to be treated as metadata slots. The byte pattern is + // valid as the initial value of the reference count (other fields are + // either not accessed or `MaybeUninit`). + unsafe { + core::ptr::write_bytes(slots as *mut u8, 0xff, num_pages * size_of::()); } (num_meta_pages, start_paddr) diff --git a/ostd/src/mm/frame/mod.rs b/ostd/src/mm/frame/mod.rs index 003965c80..8fb688dd8 100644 --- a/ostd/src/mm/frame/mod.rs +++ b/ostd/src/mm/frame/mod.rs @@ -16,12 +16,11 @@ pub mod allocator; pub mod meta; -mod segment; +pub mod segment; pub mod untyped; use core::{ marker::PhantomData, - mem::ManuallyDrop, sync::atomic::{AtomicU32, AtomicUsize, Ordering}, }; @@ -29,14 +28,14 @@ use meta::{ mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED, }; pub use segment::Segment; -use untyped::UntypedMeta; +use untyped::{DynUFrame, UFrameMeta}; -use super::{PagingLevel, UntypedFrame, PAGE_SIZE}; +use super::{PagingLevel, PAGE_SIZE}; use crate::mm::{Paddr, PagingConsts, Vaddr}; static MAX_PADDR: AtomicUsize = AtomicUsize::new(0); -/// A page with a statically-known usage, whose metadata is represented by `M`. +/// A physical memory frame with a statically-known usage, whose metadata is represented by `M`. #[derive(Debug)] #[repr(transparent)] pub struct Frame { @@ -44,6 +43,13 @@ pub struct Frame { pub(super) _marker: PhantomData, } +/// A physical memory frame with a dynamically-known usage. +/// +/// The usage of this frame will not be changed while this object is alive. But the +/// usage is not known at compile time. An [`DynFrame`] as a parameter accepts any +/// type of frames. +pub type DynFrame = Frame; + unsafe impl Send for Frame {} unsafe impl Sync for Frame {} @@ -79,7 +85,8 @@ impl Frame { .compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed) .expect("Frame already in use when trying to get a new handle"); - // SAFETY: We have exclusive access to the page metadata. + // SAFETY: We have exclusive access to the page metadata. These fields are mutably + // borrowed only once. let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() }; vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta)); @@ -114,7 +121,7 @@ impl Frame { impl Frame { /// Get the physical address. - pub fn paddr(&self) -> Paddr { + pub fn start_paddr(&self) -> Paddr { mapping::meta_to_page::(self.ptr as Vaddr) } @@ -183,7 +190,7 @@ impl Frame { /// data structures need to hold the page handle such as the page table. #[allow(unused)] pub(in crate::mm) fn into_raw(self) -> Paddr { - let paddr = self.paddr(); + let paddr = self.start_paddr(); core::mem::forget(self); paddr } @@ -256,12 +263,8 @@ impl TryFrom> for Frame { /// return the dynamic page itself as is. fn try_from(dyn_frame: Frame) -> Result { if (dyn_frame.dyn_meta() as &dyn core::any::Any).is::() { - let result = Frame { - ptr: dyn_frame.ptr, - _marker: PhantomData, - }; - let _ = ManuallyDrop::new(dyn_frame); - Ok(result) + // SAFETY: The metadata is coerceable and the struct is transmutable. + Ok(unsafe { core::mem::transmute::, Frame>(dyn_frame) }) } else { Err(dyn_frame) } @@ -270,18 +273,46 @@ impl TryFrom> for Frame { impl From> for Frame { fn from(frame: Frame) -> Self { - let result = Self { - ptr: frame.ptr, - _marker: PhantomData, - }; - let _ = ManuallyDrop::new(frame); - result + // SAFETY: The metadata is coerceable and the struct is transmutable. + unsafe { core::mem::transmute(frame) } } } -impl From for Frame { - fn from(frame: UntypedFrame) -> Self { - Frame::::from(frame).into() +impl From> for DynUFrame { + fn from(frame: Frame) -> Self { + // SAFETY: The metadata is coerceable and the struct is transmutable. + unsafe { core::mem::transmute(frame) } + } +} + +impl From<&Frame> for &DynUFrame { + fn from(frame: &Frame) -> Self { + // SAFETY: The metadata is coerceable and the struct is transmutable. + unsafe { core::mem::transmute(frame) } + } +} + +impl From for Frame { + fn from(frame: DynUFrame) -> Self { + // SAFETY: The metadata is coerceable and the struct is transmutable. + unsafe { core::mem::transmute(frame) } + } +} + +impl TryFrom> for DynUFrame { + type Error = Frame; + + /// Try converting a [`Frame`] into [`DynUFrame`]. + /// + /// If the usage of the page is not the same as the expected usage, it will + /// return the dynamic page itself as is. + fn try_from(dyn_frame: Frame) -> Result { + if dyn_frame.dyn_meta().is_untyped() { + // SAFETY: The metadata is coerceable and the struct is transmutable. + Ok(unsafe { core::mem::transmute::, DynUFrame>(dyn_frame) }) + } else { + Err(dyn_frame) + } } } diff --git a/ostd/src/mm/frame/segment.rs b/ostd/src/mm/frame/segment.rs index 6c7b15f68..77da52213 100644 --- a/ostd/src/mm/frame/segment.rs +++ b/ostd/src/mm/frame/segment.rs @@ -2,11 +2,10 @@ //! A contiguous range of pages. -use alloc::vec::Vec; use core::{mem::ManuallyDrop, ops::Range}; use super::{inc_page_ref_count, meta::FrameMeta, Frame}; -use crate::mm::{Paddr, PAGE_SIZE}; +use crate::mm::{Paddr, UFrameMeta, PAGE_SIZE}; /// A contiguous range of homogeneous physical memory pages. /// @@ -21,11 +20,30 @@ use crate::mm::{Paddr, PAGE_SIZE}; /// All the metadata of the pages are homogeneous, i.e., they are of the same /// type. #[derive(Debug)] +#[repr(transparent)] pub struct Segment { range: Range, _marker: core::marker::PhantomData, } +/// A contiguous range of homogeneous physical memory frames that have any metadata. +/// +/// In other words, the metadata of the frames are of the same type but the type +/// is not known at compile time. An [`DynSegment`] as a parameter accepts any +/// type of segments. +/// +/// The usage of this frame will not be changed while this object is alive. +pub type DynSegment = Segment; + +/// A contiguous range of homogeneous untyped physical memory pages that have any metadata. +/// +/// In other words, the metadata of the frames are of the same type, and they +/// are untyped, but the type of metadata is not known at compile time. An +/// [`DynUSegment`] as a parameter accepts any untyped segments. +/// +/// The usage of this frame will not be changed while this object is alive. +pub type DynUSegment = Segment; + impl Drop for Segment { fn drop(&mut self) { for paddr in self.range.clone().step_by(PAGE_SIZE) { @@ -89,7 +107,7 @@ impl Segment { } /// Gets the length in bytes of the contiguous pages. - pub fn nbytes(&self) -> usize { + pub fn size(&self) -> usize { self.range.end - self.range.start } @@ -104,7 +122,7 @@ impl Segment { /// not base-page-aligned. pub fn split(self, offset: usize) -> (Self, Self) { assert!(offset % PAGE_SIZE == 0); - assert!(0 < offset && offset < self.nbytes()); + assert!(0 < offset && offset < self.size()); let old = ManuallyDrop::new(self); let at = old.range.start + offset; @@ -152,7 +170,7 @@ impl Segment { impl From> for Segment { fn from(page: Frame) -> Self { - let pa = page.paddr(); + let pa = page.start_paddr(); let _ = ManuallyDrop::new(page); Self { range: pa..pa + PAGE_SIZE, @@ -161,22 +179,6 @@ impl From> for Segment { } } -impl From> for Vec> { - fn from(pages: Segment) -> Self { - let vector = pages - .range - .clone() - .step_by(PAGE_SIZE) - .map(|i| - // SAFETY: for each page there would be a forgotten handle - // when creating the `Segment` object. - unsafe { Frame::::from_raw(i) }) - .collect(); - let _ = ManuallyDrop::new(pages); - vector - } -} - impl Iterator for Segment { type Item = Frame; @@ -194,3 +196,83 @@ impl Iterator for Segment { } } } + +impl From> for DynSegment { + fn from(seg: Segment) -> Self { + let seg = ManuallyDrop::new(seg); + Self { + range: seg.range.clone(), + _marker: core::marker::PhantomData, + } + } +} + +impl TryFrom for Segment { + type Error = DynSegment; + + fn try_from(seg: DynSegment) -> core::result::Result { + // SAFETY: for each page there would be a forgotten handle + // when creating the `Segment` object. + let first_frame = unsafe { Frame::::from_raw(seg.range.start) }; + let first_frame = ManuallyDrop::new(first_frame); + if !(first_frame.dyn_meta() as &dyn core::any::Any).is::() { + return Err(seg); + } + // Since segments are homogeneous, we can safely assume that the rest + // of the frames are of the same type. We just debug-check here. + #[cfg(debug_assertions)] + { + for paddr in seg.range.clone().step_by(PAGE_SIZE) { + let frame = unsafe { Frame::::from_raw(paddr) }; + let frame = ManuallyDrop::new(frame); + debug_assert!((frame.dyn_meta() as &dyn core::any::Any).is::()); + } + } + // SAFETY: The metadata is coerceable and the struct is transmutable. + Ok(unsafe { core::mem::transmute::>(seg) }) + } +} + +impl From> for DynUSegment { + fn from(seg: Segment) -> Self { + // SAFETY: The metadata is coerceable and the struct is transmutable. + unsafe { core::mem::transmute(seg) } + } +} + +impl From<&Segment> for &DynUSegment { + fn from(seg: &Segment) -> Self { + // SAFETY: The metadata is coerceable and the struct is transmutable. + unsafe { core::mem::transmute(seg) } + } +} + +impl TryFrom for DynUSegment { + type Error = DynSegment; + + /// Try converting a [`DynSegment`] into [`DynUSegment`]. + /// + /// If the usage of the page is not the same as the expected usage, it will + /// return the dynamic page itself as is. + fn try_from(seg: DynSegment) -> core::result::Result { + // SAFETY: for each page there would be a forgotten handle + // when creating the `Segment` object. + let first_frame = unsafe { Frame::::from_raw(seg.range.start) }; + let first_frame = ManuallyDrop::new(first_frame); + if !first_frame.dyn_meta().is_untyped() { + return Err(seg); + } + // Since segments are homogeneous, we can safely assume that the rest + // of the frames are of the same type. We just debug-check here. + #[cfg(debug_assertions)] + { + for paddr in seg.range.clone().step_by(PAGE_SIZE) { + let frame = unsafe { Frame::::from_raw(paddr) }; + let frame = ManuallyDrop::new(frame); + debug_assert!(frame.dyn_meta().is_untyped()); + } + } + // SAFETY: The metadata is coerceable and the struct is transmutable. + Ok(unsafe { core::mem::transmute::(seg) }) + } +} diff --git a/ostd/src/mm/frame/untyped.rs b/ostd/src/mm/frame/untyped.rs new file mode 100644 index 000000000..1ecb860ff --- /dev/null +++ b/ostd/src/mm/frame/untyped.rs @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Untyped physical memory management. +//! +//! A frame is a special page that is _untyped_ memory. +//! It is used to store data irrelevant to the integrity of the kernel. +//! All pages mapped to the virtual address space of the users are backed by +//! frames. Frames, with all the properties of pages, can additionally be safely +//! read and written by the kernel or the user. + +use super::{meta::FrameMeta, Frame, Segment}; +use crate::{ + mm::{ + io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter}, + paddr_to_vaddr, Infallible, + }, + Error, Result, +}; + +/// The metadata of untyped frame. +/// +/// If a structure `M` implements [`UFrameMeta`], it can be used as the +/// metadata of a type of untyped frames [`Frame`]. All frames of such type +/// will be accessible as untyped memory. +pub trait UFrameMeta: FrameMeta {} + +/// An untyped frame with any metadata. +/// +/// The usage of this frame will not be changed while this object is alive. +/// The metadata of the frame is not known at compile time but the frame must +/// be an untyped one. An [`DynUFrame`] as a parameter accepts any type of +/// untyped frame metadata. +pub type DynUFrame = Frame; + +/// Makes a structure usable as untyped frame metadata. +/// +/// Directly implementing [`FrameMeta`] is not safe since the size and +/// alignment must be checked. This macro provides a safe way to implement both +/// [`FrameMeta`] and [`UFrameMeta`] with compile-time checks. +/// +/// If this macro is used for built-in typed frame metadata, it won't compile. +#[macro_export] +macro_rules! impl_untyped_frame_meta_for { + // Implement without specifying the drop behavior. + ($t:ty) => { + use static_assertions::const_assert; + const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); + const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); + // SAFETY: The size and alignment of the structure are checked. + unsafe impl $crate::mm::frame::meta::FrameMeta for $t { + fn is_untyped(&self) -> bool { + true + } + } + impl $crate::mm::frame::untyped::UFrameMeta for $t {} + }; + // Implement with a customized drop function. + ($t:ty, $body:expr) => { + use static_assertions::const_assert; + const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); + const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); + // SAFETY: The size and alignment of the structure are checked. + // Outside OSTD the user cannot implement a `on_drop` method for typed + // frames. And untyped frames can be safely read. + unsafe impl $crate::mm::frame::meta::FrameMeta for $t { + fn on_drop(&mut self, reader: &mut $crate::mm::VmReader<$crate::mm::Infallible>) { + $body + } + + fn is_untyped(&self) -> bool { + true + } + } + impl $crate::mm::frame::untyped::UFrameMeta for $t {} + }; +} + +// A special case of untyped metadata is the unit type. +impl_untyped_frame_meta_for!(()); + +/// A physical memory range that is untyped. +/// +/// Untyped frames or segments can be safely read and written by the kernel or +/// the user. +pub trait UntypedMem { + /// Borrows a reader that can read the untyped memory. + fn reader(&self) -> VmReader<'_, Infallible>; + /// Borrows a writer that can write the untyped memory. + fn writer(&self) -> VmWriter<'_, Infallible>; +} + +macro_rules! impl_untyped_for { + ($t:ident) => { + impl UntypedMem for $t { + fn reader(&self) -> VmReader<'_, Infallible> { + let ptr = paddr_to_vaddr(self.start_paddr()) as *const u8; + // SAFETY: Only untyped frames are allowed to be read. + unsafe { VmReader::from_kernel_space(ptr, self.size()) } + } + + fn writer(&self) -> VmWriter<'_, Infallible> { + let ptr = paddr_to_vaddr(self.start_paddr()) as *mut u8; + // SAFETY: Only untyped frames are allowed to be written. + unsafe { VmWriter::from_kernel_space(ptr, self.size()) } + } + } + + impl VmIo for $t { + fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { + let read_len = writer.avail().min(self.size().saturating_sub(offset)); + // Do bound check with potential integer overflow in mind + let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?; + if max_offset > self.size() { + return Err(Error::InvalidArgs); + } + let len = self + .reader() + .skip(offset) + .read_fallible(writer) + .map_err(|(e, _)| e)?; + debug_assert!(len == read_len); + Ok(()) + } + + fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { + let write_len = reader.remain().min(self.size().saturating_sub(offset)); + // Do bound check with potential integer overflow in mind + let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?; + if max_offset > self.size() { + return Err(Error::InvalidArgs); + } + let len = self + .writer() + .skip(offset) + .write_fallible(reader) + .map_err(|(e, _)| e)?; + debug_assert!(len == write_len); + Ok(()) + } + } + }; +} + +impl_untyped_for!(Frame); +impl_untyped_for!(Segment); + +// Here are implementations for `xarray`. + +use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref}; + +/// `FrameRef` is a struct that can work as `&'a Frame`. +/// +/// This is solely useful for [`crate::collections::xarray`]. +pub struct FrameRef<'a, M: UFrameMeta + ?Sized> { + inner: ManuallyDrop>, + _marker: PhantomData<&'a Frame>, +} + +impl Deref for FrameRef<'_, M> { + type Target = Frame; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +// SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer. +// The pointer is also aligned to 4. +unsafe impl xarray::ItemEntry for Frame { + type Ref<'a> + = FrameRef<'a, M> + where + Self: 'a; + + fn into_raw(self) -> *const () { + let ptr = self.ptr; + let _ = ManuallyDrop::new(self); + ptr as *const () + } + + unsafe fn from_raw(raw: *const ()) -> Self { + Self { + ptr: raw as *const _, + _marker: PhantomData, + } + } + + unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> { + Self::Ref { + inner: ManuallyDrop::new(Frame { + ptr: raw as *const _, + _marker: PhantomData, + }), + _marker: PhantomData, + } + } +} diff --git a/ostd/src/mm/frame/untyped/mod.rs b/ostd/src/mm/frame/untyped/mod.rs deleted file mode 100644 index 7760b260a..000000000 --- a/ostd/src/mm/frame/untyped/mod.rs +++ /dev/null @@ -1,236 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//! Untyped physical memory management. -//! -//! A frame is a special page that is _untyped_ memory. -//! It is used to store data irrelevant to the integrity of the kernel. -//! All pages mapped to the virtual address space of the users are backed by -//! frames. Frames, with all the properties of pages, can additionally be safely -//! read and written by the kernel or the user. - -pub mod options; -mod segment; - -use core::mem::ManuallyDrop; - -pub use segment::UntypedSegment; - -use super::{ - meta::{impl_frame_meta_for, FrameMeta, MetaSlot}, - Frame, -}; -use crate::{ - mm::{ - io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter}, - paddr_to_vaddr, HasPaddr, Infallible, Paddr, PAGE_SIZE, - }, - Error, Result, -}; - -/// A handle to a physical memory page of untyped memory. -/// -/// An instance of `UntypedFrame` is a handle to a page frame (a physical memory -/// page). A cloned `UntypedFrame` refers to the same page frame as the original. -/// As the original and cloned instances point to the same physical address, -/// they are treated as equal to each other. Behind the scene, a reference -/// counter is maintained for each page frame so that when all instances of -/// `UntypedFrame` that refer to the same page frame are dropped, the page frame -/// will be globally freed. -#[derive(Debug, Clone)] -pub struct UntypedFrame { - page: Frame, -} - -impl UntypedFrame { - /// Returns the physical address of the page frame. - pub fn start_paddr(&self) -> Paddr { - self.page.paddr() - } - - /// Returns the end physical address of the page frame. - pub fn end_paddr(&self) -> Paddr { - self.start_paddr() + PAGE_SIZE - } - - /// Returns the size of the frame - pub const fn size(&self) -> usize { - self.page.size() - } - - /// Returns a raw pointer to the starting virtual address of the frame. - pub fn as_ptr(&self) -> *const u8 { - paddr_to_vaddr(self.start_paddr()) as *const u8 - } - - /// Returns a mutable raw pointer to the starting virtual address of the frame. - pub fn as_mut_ptr(&self) -> *mut u8 { - paddr_to_vaddr(self.start_paddr()) as *mut u8 - } - - /// Copies the content of `src` to the frame. - pub fn copy_from(&self, src: &UntypedFrame) { - if self.paddr() == src.paddr() { - return; - } - // SAFETY: the source and the destination does not overlap. - unsafe { - core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size()); - } - } - - /// Get the reference count of the frame. - /// - /// It returns the number of all references to the page, including all the - /// existing page handles ([`UntypedFrame`]) and all the mappings in the page - /// table that points to the page. - /// - /// # Safety - /// - /// The function is safe to call, but using it requires extra care. The - /// reference count can be changed by other threads at any time including - /// potentially between calling this method and acting on the result. - pub fn reference_count(&self) -> u32 { - self.page.reference_count() - } -} - -impl From> for UntypedFrame { - fn from(page: Frame) -> Self { - Self { page } - } -} - -impl TryFrom> for UntypedFrame { - type Error = Frame; - - /// Try converting a [`Frame`] into the statically-typed [`UntypedFrame`]. - /// - /// If the dynamic page is not used as an untyped page frame, it will - /// return the dynamic page itself as is. - fn try_from(page: Frame) -> core::result::Result { - page.try_into().map(|p: Frame| p.into()) - } -} - -impl From for Frame { - fn from(frame: UntypedFrame) -> Self { - frame.page - } -} - -impl HasPaddr for UntypedFrame { - fn paddr(&self) -> Paddr { - self.start_paddr() - } -} - -impl<'a> UntypedFrame { - /// Returns a reader to read data from it. - pub fn reader(&'a self) -> VmReader<'a, Infallible> { - // SAFETY: - // - The memory range points to untyped memory. - // - The frame is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the frame. - unsafe { VmReader::from_kernel_space(self.as_ptr(), self.size()) } - } - - /// Returns a writer to write data into it. - pub fn writer(&'a self) -> VmWriter<'a, Infallible> { - // SAFETY: - // - The memory range points to untyped memory. - // - The frame is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the frame. - unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.size()) } - } -} - -impl VmIo for UntypedFrame { - fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { - let read_len = writer.avail().min(self.size().saturating_sub(offset)); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?; - if max_offset > self.size() { - return Err(Error::InvalidArgs); - } - let len = self - .reader() - .skip(offset) - .read_fallible(writer) - .map_err(|(e, _)| e)?; - debug_assert!(len == read_len); - Ok(()) - } - - fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { - let write_len = reader.remain().min(self.size().saturating_sub(offset)); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?; - if max_offset > self.size() { - return Err(Error::InvalidArgs); - } - let len = self - .writer() - .skip(offset) - .write_fallible(reader) - .map_err(|(e, _)| e)?; - debug_assert!(len == write_len); - Ok(()) - } -} - -/// Metadata for a frame. -#[derive(Debug, Default)] -pub struct UntypedMeta {} - -impl_frame_meta_for!(UntypedMeta); - -// Here are implementations for `xarray`. - -use core::{marker::PhantomData, ops::Deref}; - -/// `FrameRef` is a struct that can work as `&'a UntypedFrame`. -/// -/// This is solely useful for [`crate::collections::xarray`]. -pub struct FrameRef<'a> { - inner: ManuallyDrop, - _marker: PhantomData<&'a UntypedFrame>, -} - -impl Deref for FrameRef<'_> { - type Target = UntypedFrame; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -// SAFETY: `UntypedFrame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer. -// The pointer is also aligned to 4. -unsafe impl xarray::ItemEntry for UntypedFrame { - type Ref<'a> - = FrameRef<'a> - where - Self: 'a; - - fn into_raw(self) -> *const () { - let ptr = self.page.ptr; - core::mem::forget(self); - ptr as *const () - } - - unsafe fn from_raw(raw: *const ()) -> Self { - Self { - page: Frame:: { - ptr: raw as *mut MetaSlot, - _marker: PhantomData, - }, - } - } - - unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> { - Self::Ref { - inner: ManuallyDrop::new(UntypedFrame::from_raw(raw)), - _marker: PhantomData, - } - } -} diff --git a/ostd/src/mm/frame/untyped/options.rs b/ostd/src/mm/frame/untyped/options.rs deleted file mode 100644 index 966210fd8..000000000 --- a/ostd/src/mm/frame/untyped/options.rs +++ /dev/null @@ -1,112 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//! Options for allocating frames - -use super::{UntypedFrame, UntypedSegment}; -use crate::{ - mm::{frame, frame::untyped::UntypedMeta, PAGE_SIZE}, - prelude::*, - Error, -}; - -/// Options for allocating physical memory pages (or frames). -/// -/// All allocated frames are safe to use in the sense that they are -/// not _typed memory_. We define typed memory as the memory that -/// may store Rust objects or affect Rust memory safety, e.g., -/// the code and data segments of the OS kernel, the stack and heap -/// allocated for the OS kernel. -pub struct FrameAllocOptions { - nframes: usize, - is_contiguous: bool, - uninit: bool, -} - -impl FrameAllocOptions { - /// Creates new options for allocating the specified number of frames. - pub fn new(nframes: usize) -> Self { - Self { - nframes, - is_contiguous: false, - uninit: false, - } - } - - /// Sets whether the allocated frames should be contiguous. - /// - /// The default value is `false`. - pub fn is_contiguous(&mut self, is_contiguous: bool) -> &mut Self { - self.is_contiguous = is_contiguous; - self - } - - /// Sets whether the allocated frames should be uninitialized. - /// - /// If `uninit` is set as `false`, the frame will be zeroed once allocated. - /// If `uninit` is set as `true`, the frame will **NOT** be zeroed and should *NOT* be read before writing. - /// - /// The default value is false. - pub fn uninit(&mut self, uninit: bool) -> &mut Self { - self.uninit = uninit; - self - } - - /// Allocates a single page frame according to the given options. - pub fn alloc_single(&self) -> Result { - if self.nframes != 1 { - return Err(Error::InvalidArgs); - } - - let page = frame::allocator::alloc_single(UntypedMeta::default()).ok_or(Error::NoMemory)?; - let frame = UntypedFrame { page }; - if !self.uninit { - frame.writer().fill(0); - } - - Ok(frame) - } - - /// Allocates a contiguous range of page frames according to the given options. - /// - /// The returned [`UntypedSegment`] contains at least one page frame. - pub fn alloc_contiguous(&self) -> Result { - // It's no use to checking `self.is_contiguous` here. - if self.nframes == 0 { - return Err(Error::InvalidArgs); - } - - let segment: UntypedSegment = - frame::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| { - UntypedMeta::default() - }) - .ok_or(Error::NoMemory)? - .into(); - if !self.uninit { - segment.writer().fill(0); - } - - Ok(segment) - } -} - -#[cfg(ktest)] -#[ktest] -fn test_alloc_dealloc() { - // Here we allocate and deallocate frames in random orders to test the allocator. - // We expect the test to fail if the underlying implementation panics. - let single_options = FrameAllocOptions::new(1); - let mut contiguous_options = FrameAllocOptions::new(10); - contiguous_options.is_contiguous(true); - let mut remember_vec = Vec::new(); - for _ in 0..10 { - for i in 0..10 { - let single_frame = single_options.alloc_single().unwrap(); - if i % 3 == 0 { - remember_vec.push(single_frame); - } - } - let contiguous_segment = contiguous_options.alloc_contiguous().unwrap(); - drop(contiguous_segment); - remember_vec.pop(); - } -} diff --git a/ostd/src/mm/frame/untyped/segment.rs b/ostd/src/mm/frame/untyped/segment.rs deleted file mode 100644 index 977f5035a..000000000 --- a/ostd/src/mm/frame/untyped/segment.rs +++ /dev/null @@ -1,177 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//! A contiguous segment of untyped memory pages. - -use core::ops::Range; - -use crate::{ - mm::{ - frame::{untyped::UntypedMeta, Segment}, - io::{FallibleVmRead, FallibleVmWrite}, - HasPaddr, Infallible, Paddr, UntypedFrame, VmIo, VmReader, VmWriter, - }, - Error, Result, -}; - -/// A contiguous segment of untyped memory pages. -/// -/// A [`UntypedSegment`] object is a handle to a contiguous range of untyped memory -/// pages, and the underlying pages can be shared among multiple threads. -/// [`UntypedSegment::slice`] can be used to clone a slice of the segment (also can be -/// used to clone the entire range). Reference counts are maintained for each -/// page in the segment. So cloning the handle may not be cheap as it -/// increments the reference count of all the cloned pages. -/// -/// Other [`UntypedFrame`] handles can also refer to the pages in the segment. And -/// the segment can be iterated over to get all the frames in it. -/// -/// To allocate a segment, use [`crate::mm::FrameAllocator`]. -/// -/// # Example -/// -/// ```rust -/// let vm_segment = FrameAllocOptions::new(2) -/// .is_contiguous(true) -/// .alloc_contiguous()?; -/// vm_segment.write_bytes(0, buf)?; -/// ``` -#[derive(Debug)] -pub struct UntypedSegment { - pages: Segment, -} - -impl HasPaddr for UntypedSegment { - fn paddr(&self) -> Paddr { - self.pages.start_paddr() - } -} - -impl Clone for UntypedSegment { - fn clone(&self) -> Self { - Self { - pages: self.pages.clone(), - } - } -} - -impl UntypedSegment { - /// Returns the start physical address. - pub fn start_paddr(&self) -> Paddr { - self.pages.start_paddr() - } - - /// Returns the end physical address. - pub fn end_paddr(&self) -> Paddr { - self.pages.end_paddr() - } - - /// Returns the number of bytes in it. - pub fn nbytes(&self) -> usize { - self.pages.nbytes() - } - - /// Split the segment into two at the given byte offset from the start. - /// - /// The resulting segments cannot be empty. So the byte offset cannot be - /// neither zero nor the length of the segment. - /// - /// # Panics - /// - /// The function panics if the byte offset is out of bounds, at either ends, or - /// not base-page-aligned. - pub fn split(self, offset: usize) -> (Self, Self) { - let (left, right) = self.pages.split(offset); - (Self { pages: left }, Self { pages: right }) - } - - /// Get an extra handle to the segment in the byte range. - /// - /// The sliced byte range in indexed by the offset from the start of the - /// segment. The resulting segment holds extra reference counts. - /// - /// # Panics - /// - /// The function panics if the byte range is out of bounds, or if any of - /// the ends of the byte range is not base-page aligned. - pub fn slice(&self, range: &Range) -> Self { - Self { - pages: self.pages.slice(range), - } - } - - /// Gets a [`VmReader`] to read from the segment from the beginning to the end. - pub fn reader(&self) -> VmReader<'_, Infallible> { - let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8; - // SAFETY: - // - The memory range points to untyped memory. - // - The segment is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the segment. - unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) } - } - - /// Gets a [`VmWriter`] to write to the segment from the beginning to the end. - pub fn writer(&self) -> VmWriter<'_, Infallible> { - let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8; - // SAFETY: - // - The memory range points to untyped memory. - // - The segment is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the segment. - unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) } - } -} - -impl From for UntypedSegment { - fn from(frame: UntypedFrame) -> Self { - Self { - pages: Segment::from(frame.page), - } - } -} - -impl From> for UntypedSegment { - fn from(pages: Segment) -> Self { - Self { pages } - } -} - -impl VmIo for UntypedSegment { - fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { - let read_len = writer.avail(); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?; - if max_offset > self.nbytes() { - return Err(Error::InvalidArgs); - } - let len = self - .reader() - .skip(offset) - .read_fallible(writer) - .map_err(|(e, _)| e)?; - debug_assert!(len == read_len); - Ok(()) - } - - fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { - let write_len = reader.remain(); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?; - if max_offset > self.nbytes() { - return Err(Error::InvalidArgs); - } - let len = self - .writer() - .skip(offset) - .write_fallible(reader) - .map_err(|(e, _)| e)?; - debug_assert!(len == write_len); - Ok(()) - } -} - -impl Iterator for UntypedSegment { - type Item = UntypedFrame; - - fn next(&mut self) -> Option { - self.pages.next().map(|page| UntypedFrame { page }) - } -} diff --git a/ostd/src/mm/io.rs b/ostd/src/mm/io.rs index dcd20525a..23083ddb7 100644 --- a/ostd/src/mm/io.rs +++ b/ostd/src/mm/io.rs @@ -7,11 +7,11 @@ //! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and //! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_. //! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory -//! (e.g., `&[u8]`) or untyped memory (e.g, [`UntypedFrame`]). Behind the scene, `VmReader` and `VmWriter` +//! (e.g., `&[u8]`) or untyped memory (e.g, [`DynUFrame`]). Behind the scene, `VmReader` and `VmWriter` //! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose //! safety depends on whether the given memory regions are _valid_ or not. //! -//! [`UntypedFrame`]: crate::mm::UntypedFrame +//! [`DynUFrame`]: crate::mm::DynUFrame //! [`from_user_space`]: `VmReader::from_user_space` //! [`from_kernel_space`]: `VmReader::from_kernel_space` //! @@ -58,7 +58,7 @@ use crate::{ }; /// A trait that enables reading/writing data from/to a VM object, -/// e.g., [`UntypedSegment`], [`Vec`] and [`UntypedFrame`]. +/// e.g., [`DynUSegment`], [`Vec`] and [`DynUFrame`]. /// /// # Concurrency /// @@ -67,8 +67,8 @@ use crate::{ /// desire predictability or atomicity, the users should add extra mechanism /// for such properties. /// -/// [`UntypedSegment`]: crate::mm::UntypedSegment -/// [`UntypedFrame`]: crate::mm::UntypedFrame +/// [`DynUSegment`]: crate::mm::DynUSegment +/// [`DynUFrame`]: crate::mm::DynUFrame pub trait VmIo: Send + Sync { /// Reads requested data at a specified offset into a given `VmWriter`. /// diff --git a/ostd/src/mm/kspace/mod.rs b/ostd/src/mm/kspace/mod.rs index bc3e7d2a1..197e1ee66 100644 --- a/ostd/src/mm/kspace/mod.rs +++ b/ostd/src/mm/kspace/mod.rs @@ -164,7 +164,7 @@ pub fn init_kernel_page_table(meta_pages: Segment) { // Map the metadata pages. { let start_va = mapping::page_to_meta::(0); - let from = start_va..start_va + meta_pages.nbytes(); + let from = start_va..start_va + meta_pages.size(); let prop = PageProperty { flags: PageFlags::RW, cache: CachePolicy::Writeback, @@ -214,7 +214,7 @@ pub fn init_kernel_page_table(meta_pages: Segment) { }; let mut cursor = kpt.cursor_mut(&from).unwrap(); for frame_paddr in to.step_by(PAGE_SIZE) { - let page = Frame::::from_unused(frame_paddr, KernelMeta::default()); + let page = Frame::::from_unused(frame_paddr, KernelMeta); // SAFETY: we are doing mappings for the kernel. unsafe { let _old = cursor.map(page.into(), prop); @@ -249,6 +249,6 @@ pub unsafe fn activate_kernel_page_table() { /// The metadata of pages that contains the kernel itself. #[derive(Debug, Default)] -pub struct KernelMeta {} +pub struct KernelMeta; impl_frame_meta_for!(KernelMeta); diff --git a/ostd/src/mm/mod.rs b/ostd/src/mm/mod.rs index f6a61e55f..638a6b942 100644 --- a/ostd/src/mm/mod.rs +++ b/ostd/src/mm/mod.rs @@ -24,7 +24,12 @@ use core::{fmt::Debug, ops::Range}; pub use self::{ dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr}, - frame::untyped::{options::FrameAllocOptions, UntypedFrame, UntypedSegment}, + frame::{ + allocator::FrameAllocOptions, + segment::{DynSegment, DynUSegment, Segment}, + untyped::{DynUFrame, UFrameMeta, UntypedMem}, + DynFrame, Frame, + }, io::{ Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader, VmWriter, diff --git a/ostd/src/mm/page_table/boot_pt.rs b/ostd/src/mm/page_table/boot_pt.rs index c26cca65b..fe1710579 100644 --- a/ostd/src/mm/page_table/boot_pt.rs +++ b/ostd/src/mm/page_table/boot_pt.rs @@ -250,7 +250,7 @@ fn test_boot_pt_map_protect() { mm::{CachePolicy, FrameAllocOptions, PageFlags}, }; - let root_frame = FrameAllocOptions::new(1).alloc_single().unwrap(); + let root_frame = FrameAllocOptions::new().alloc_frame().unwrap(); let root_paddr = root_frame.start_paddr(); let mut boot_pt = BootPageTable:: { diff --git a/ostd/src/mm/page_table/node/mod.rs b/ostd/src/mm/page_table/node/mod.rs index 0209d51e9..0439c263b 100644 --- a/ostd/src/mm/page_table/node/mod.rs +++ b/ostd/src/mm/page_table/node/mod.rs @@ -40,8 +40,9 @@ use super::{nr_subpage_per_huge, PageTableEntryTrait}; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, mm::{ - frame::{self, inc_page_ref_count, meta::FrameMeta, Frame}, - paddr_to_vaddr, Infallible, Paddr, PagingConstsTrait, PagingLevel, VmReader, PAGE_SIZE, + frame::{inc_page_ref_count, meta::FrameMeta, Frame}, + paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel, + VmReader, }, }; @@ -260,13 +261,11 @@ where /// extra unnecessary expensive operation. pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self { let meta = PageTablePageMeta::new_locked(level, is_tracked); - let page = frame::allocator::alloc_single::>(meta).unwrap(); - - // Zero out the page table node. - let ptr = paddr_to_vaddr(page.paddr()) as *mut u8; - // SAFETY: The page is exclusively owned here. Pointers are valid also. - // We rely on the fact that 0 represents an absent entry to speed up `memset`. - unsafe { core::ptr::write_bytes(ptr, 0, PAGE_SIZE) }; + let page = FrameAllocOptions::new() + .zeroed(true) + .alloc_frame_with(meta) + .expect("Failed to allocate a page table node"); + // The allocated frame is zeroed. Make sure zero is absent PTE. debug_assert!(E::new_absent().as_bytes().iter().all(|&b| b == 0)); Self { page } @@ -281,7 +280,7 @@ where // SAFETY: The provided physical address is valid and the level is // correct. The reference count is not changed. - unsafe { RawPageTableNode::from_raw_parts(this.page.paddr(), this.page.meta().level) } + unsafe { RawPageTableNode::from_raw_parts(this.page.start_paddr(), this.page.meta().level) } } /// Gets a raw handle while still preserving the original handle. @@ -290,7 +289,7 @@ where // SAFETY: The provided physical address is valid and the level is // correct. The reference count is increased by one. - unsafe { RawPageTableNode::from_raw_parts(page.paddr(), page.meta().level) } + unsafe { RawPageTableNode::from_raw_parts(page.start_paddr(), page.meta().level) } } /// Gets the number of valid PTEs in the node. @@ -310,7 +309,7 @@ where /// The caller must ensure that the index is within the bound. unsafe fn read_pte(&self, idx: usize) -> E { debug_assert!(idx < nr_subpage_per_huge::()); - let ptr = paddr_to_vaddr(self.page.paddr()) as *const E; + let ptr = paddr_to_vaddr(self.page.start_paddr()) as *const E; // SAFETY: The index is within the bound and the PTE is plain-old-data. unsafe { ptr.add(idx).read() } } @@ -330,7 +329,7 @@ where /// (see [`Child::is_compatible`]). unsafe fn write_pte(&mut self, idx: usize, pte: E) { debug_assert!(idx < nr_subpage_per_huge::()); - let ptr = paddr_to_vaddr(self.page.paddr()) as *mut E; + let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E; // SAFETY: The index is within the bound and the PTE is plain-old-data. unsafe { ptr.add(idx).write(pte) } } diff --git a/ostd/src/mm/page_table/test.rs b/ostd/src/mm/page_table/test.rs index ecf2ae0c7..06283e6b5 100644 --- a/ostd/src/mm/page_table/test.rs +++ b/ostd/src/mm/page_table/test.rs @@ -5,10 +5,9 @@ use core::mem::ManuallyDrop; use super::*; use crate::{ mm::{ - frame::{allocator, untyped::UntypedMeta}, kspace::LINEAR_MAPPING_BASE_VADDR, page_prop::{CachePolicy, PageFlags}, - MAX_USERSPACE_VADDR, + FrameAllocOptions, MAX_USERSPACE_VADDR, }, prelude::*, }; @@ -31,8 +30,8 @@ fn test_tracked_map_unmap() { let pt = PageTable::::empty(); let from = PAGE_SIZE..PAGE_SIZE * 2; - let page = allocator::alloc_single(UntypedMeta::default()).unwrap(); - let start_paddr = page.paddr(); + let page = FrameAllocOptions::new().alloc_frame().unwrap(); + let start_paddr = page.start_paddr(); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) }; assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10); @@ -87,8 +86,8 @@ fn test_user_copy_on_write() { let pt = PageTable::::empty(); let from = PAGE_SIZE..PAGE_SIZE * 2; - let page = allocator::alloc_single(UntypedMeta::default()).unwrap(); - let start_paddr = page.paddr(); + let page = FrameAllocOptions::new().alloc_frame().unwrap(); + let start_paddr = page.start_paddr(); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) }; assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10); @@ -172,7 +171,7 @@ fn test_base_protect_query() { let from_ppn = 1..1000; let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end; - let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| UntypedMeta::default()).unwrap(); + let to = FrameAllocOptions::new().alloc_segment(999).unwrap(); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); unsafe { let mut cursor = pt.cursor_mut(&from).unwrap(); diff --git a/ostd/src/mm/vm_space.rs b/ostd/src/mm/vm_space.rs index 1c330fbfe..e14c901e2 100644 --- a/ostd/src/mm/vm_space.rs +++ b/ostd/src/mm/vm_space.rs @@ -22,7 +22,7 @@ use crate::{ kspace::KERNEL_PAGE_TABLE, page_table::{self, PageTable, PageTableItem, UserMode}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, - PageProperty, UntypedFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR, + DynUFrame, PageProperty, VmReader, VmWriter, MAX_USERSPACE_VADDR, }, prelude::*, sync::{PreemptDisabled, RwLock, RwLockReadGuard}, @@ -40,7 +40,7 @@ use crate::{ /// /// A newly-created `VmSpace` is not backed by any physical memory pages. To /// provide memory pages for a `VmSpace`, one can allocate and map physical -/// memory ([`UntypedFrame`]s) to the `VmSpace` using the cursor. +/// memory ([`DynUFrame`]s) to the `VmSpace` using the cursor. /// /// A `VmSpace` can also attach a page fault handler, which will be invoked to /// handle page faults generated from user space. @@ -323,7 +323,7 @@ impl CursorMut<'_, '_> { /// Map a frame into the current slot. /// /// This method will bring the cursor to the next slot after the modification. - pub fn map(&mut self, frame: UntypedFrame, prop: PageProperty) { + pub fn map(&mut self, frame: DynUFrame, prop: PageProperty) { let start_va = self.virt_addr(); // SAFETY: It is safe to map untyped memory into the userspace. let old = unsafe { self.pt_cursor.map(frame.into(), prop) }; @@ -475,7 +475,7 @@ pub enum VmItem { /// The virtual address of the slot. va: Vaddr, /// The mapped frame. - frame: UntypedFrame, + frame: DynUFrame, /// The property of the slot. prop: PageProperty, }, diff --git a/ostd/src/prelude.rs b/ostd/src/prelude.rs index b11fe5ad1..443296493 100644 --- a/ostd/src/prelude.rs +++ b/ostd/src/prelude.rs @@ -14,6 +14,6 @@ pub use ostd_macros::ktest; pub use crate::{ early_print as print, early_println as println, - mm::{Paddr, Vaddr}, + mm::{Paddr, UntypedMem, Vaddr}, panic::abort, }; diff --git a/ostd/src/task/kernel_stack.rs b/ostd/src/task/kernel_stack.rs index b25f2adb8..c89497d22 100644 --- a/ostd/src/task/kernel_stack.rs +++ b/ostd/src/task/kernel_stack.rs @@ -3,10 +3,9 @@ use crate::{ impl_frame_meta_for, mm::{ - frame::allocator, kspace::kvirt_area::{KVirtArea, Tracked}, page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, - PAGE_SIZE, + FrameAllocOptions, PAGE_SIZE, }, prelude::*, }; @@ -36,7 +35,7 @@ pub struct KernelStack { } #[derive(Debug, Default)] -struct KernelStackMeta {} +struct KernelStackMeta; impl_frame_meta_for!(KernelStackMeta); @@ -47,8 +46,9 @@ impl KernelStack { let mut new_kvirt_area = KVirtArea::::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE); let mapped_start = new_kvirt_area.range().start + 2 * PAGE_SIZE; let mapped_end = mapped_start + KERNEL_STACK_SIZE; - let pages = - allocator::alloc_contiguous(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap(); + let pages = FrameAllocOptions::new() + .zeroed(false) + .alloc_segment_with(KERNEL_STACK_SIZE / PAGE_SIZE, |_| KernelStackMeta)?; let prop = PageProperty { flags: PageFlags::RW, cache: CachePolicy::Writeback,