Implement a new set of physical page APIs

This commit is contained in:
Zhang Junyang
2024-12-24 18:20:55 +08:00
committed by Tate, Hongliang Tian
parent 6e1c36965a
commit cdac59beda
56 changed files with 882 additions and 995 deletions

View File

@ -88,11 +88,8 @@ fn handle_get_report(arg: usize) -> Result<i32> {
let user_space = CurrentUserSpace::new(&current_task);
let user_request: TdxReportRequest = user_space.read_val(arg)?;
let vm_segment = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()
.unwrap();
let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap();
let segment = FrameAllocOptions::new().alloc_segment(2).unwrap();
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
dma_coherent
.write_bytes(0, &user_request.report_data)
.unwrap();

View File

@ -12,7 +12,7 @@ use aster_block::{
};
use hashbrown::HashMap;
use lru::LruCache;
use ostd::mm::UntypedFrame;
use ostd::mm::DynUFrame;
pub(super) use ostd::mm::VmIo;
use super::{
@ -368,7 +368,7 @@ impl ExfatFS {
}
impl PageCacheBackend for ExfatFS {
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid read size")
}
@ -380,7 +380,7 @@ impl PageCacheBackend for ExfatFS {
Ok(waiter)
}
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid write size")
}

View File

@ -13,7 +13,7 @@ use aster_block::{
BLOCK_SIZE,
};
use aster_rights::Full;
use ostd::mm::{UntypedFrame, VmIo};
use ostd::mm::{DynUFrame, VmIo};
use super::{
constants::*,
@ -135,7 +135,7 @@ struct ExfatInodeInner {
}
impl PageCacheBackend for ExfatInode {
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
let inner = self.inner.read();
if inner.size < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "Invalid read size")
@ -150,7 +150,7 @@ impl PageCacheBackend for ExfatInode {
Ok(waiter)
}
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
let inner = self.inner.read();
let sector_size = inner.fs().sector_size();

View File

@ -22,7 +22,7 @@ mod test {
BlockDevice, BlockDeviceMeta,
};
use ostd::{
mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE},
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE},
prelude::*,
};
use rand::{rngs::SmallRng, RngCore, SeedableRng};
@ -40,15 +40,15 @@ mod test {
/// Followings are implementations of memory simulated block device
pub const SECTOR_SIZE: usize = 512;
struct ExfatMemoryBioQueue(UntypedSegment);
struct ExfatMemoryBioQueue(Segment<()>);
impl ExfatMemoryBioQueue {
pub fn new(segment: UntypedSegment) -> Self {
pub fn new(segment: Segment<()>) -> Self {
ExfatMemoryBioQueue(segment)
}
pub fn sectors_count(&self) -> usize {
self.0.nbytes() / SECTOR_SIZE
self.0.size() / SECTOR_SIZE
}
}
@ -57,7 +57,7 @@ mod test {
}
impl ExfatMemoryDisk {
pub fn new(segment: UntypedSegment) -> Self {
pub fn new(segment: Segment<()>) -> Self {
ExfatMemoryDisk {
queue: ExfatMemoryBioQueue::new(segment),
}
@ -111,20 +111,20 @@ mod test {
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../test/build/exfat.img");
/// Read exfat disk image
fn new_vm_segment_from_image() -> UntypedSegment {
let vm_segment = FrameAllocOptions::new(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE))
.uninit(true)
.alloc_contiguous()
fn new_vm_segment_from_image() -> Segment<()> {
let segment = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE))
.unwrap();
vm_segment.write_bytes(0, EXFAT_IMAGE).unwrap();
vm_segment
segment.write_bytes(0, EXFAT_IMAGE).unwrap();
segment
}
// Generate a simulated exfat file system
fn load_exfat() -> Arc<ExfatFS> {
let vm_segment = new_vm_segment_from_image();
let disk = ExfatMemoryDisk::new(vm_segment);
let segment = new_vm_segment_from_image();
let disk = ExfatMemoryDisk::new(segment);
let mount_option = ExfatMountOptions::default();
let fs = ExfatFS::open(Arc::new(disk), mount_option);
assert!(fs.is_ok(), "Fs failed to init:{:?}", fs.unwrap_err());

View File

@ -28,7 +28,7 @@ struct BlockGroupImpl {
impl BlockGroup {
/// Loads and constructs a block group.
pub fn load(
group_descriptors_segment: &UntypedSegment,
group_descriptors_segment: &DynUSegment,
idx: usize,
block_device: &dyn BlockDevice,
super_block: &SuperBlock,
@ -318,7 +318,7 @@ impl Debug for BlockGroup {
}
impl PageCacheBackend for BlockGroupImpl {
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
let bid = self.inode_table_bid + idx as Ext2Bid;
let bio_segment =
BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice);
@ -328,7 +328,7 @@ impl PageCacheBackend for BlockGroupImpl {
.read_blocks_async(bid, bio_segment)
}
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
let bid = self.inode_table_bid + idx as Ext2Bid;
let bio_segment =
BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice);

View File

@ -23,7 +23,7 @@ pub struct Ext2 {
blocks_per_group: Ext2Bid,
inode_size: usize,
block_size: usize,
group_descriptors_segment: UntypedSegment,
group_descriptors_segment: DynUSegment,
self_ref: Weak<Self>,
}
@ -46,11 +46,11 @@ impl Ext2 {
let npages = ((super_block.block_groups_count() as usize)
* core::mem::size_of::<RawGroupDescriptor>())
.div_ceil(BLOCK_SIZE);
let segment = FrameAllocOptions::new(npages)
.uninit(true)
.alloc_contiguous()?;
let segment = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment(npages)?;
let bio_segment =
BioSegment::new_from_segment(segment.clone(), BioDirection::FromDevice);
BioSegment::new_from_segment(segment.clone().into(), BioDirection::FromDevice);
match block_device.read_blocks(super_block.group_descriptors_bid(0), bio_segment)? {
BioStatus::Complete => (),
err_status => {
@ -63,7 +63,7 @@ impl Ext2 {
// Load the block groups information
let load_block_groups = |fs: Weak<Ext2>,
block_device: &dyn BlockDevice,
group_descriptors_segment: &UntypedSegment|
group_descriptors_segment: &DynUSegment|
-> Result<Vec<BlockGroup>> {
let block_groups_count = super_block.block_groups_count() as usize;
let mut block_groups = Vec::with_capacity(block_groups_count);
@ -88,12 +88,12 @@ impl Ext2 {
block_groups: load_block_groups(
weak_ref.clone(),
block_device.as_ref(),
&group_descriptors_segment,
(&group_descriptors_segment).into(),
)
.unwrap(),
block_device,
super_block: RwMutex::new(Dirty::new(super_block)),
group_descriptors_segment,
group_descriptors_segment: group_descriptors_segment.into(),
self_ref: weak_ref.clone(),
});
Ok(ext2)

View File

@ -42,8 +42,10 @@ impl IndirectBlockCache {
let fs = self.fs();
let load_block = || -> Result<IndirectBlock> {
let mut block = IndirectBlock::alloc_uninit()?;
let bio_segment =
BioSegment::new_from_segment(block.frame.clone().into(), BioDirection::FromDevice);
let bio_segment = BioSegment::new_from_segment(
Segment::<()>::from(block.frame.clone()).into(),
BioDirection::FromDevice,
);
fs.read_blocks(bid, bio_segment)?;
block.state = State::UpToDate;
Ok(block)
@ -61,8 +63,10 @@ impl IndirectBlockCache {
let fs = self.fs();
let load_block = || -> Result<IndirectBlock> {
let mut block = IndirectBlock::alloc_uninit()?;
let bio_segment =
BioSegment::new_from_segment(block.frame.clone().into(), BioDirection::FromDevice);
let bio_segment = BioSegment::new_from_segment(
Segment::<()>::from(block.frame.clone()).into(),
BioDirection::FromDevice,
);
fs.read_blocks(bid, bio_segment)?;
block.state = State::UpToDate;
Ok(block)
@ -109,7 +113,7 @@ impl IndirectBlockCache {
let (bid, block) = self.cache.pop_lru().unwrap();
if block.is_dirty() {
let bio_segment = BioSegment::new_from_segment(
block.frame.clone().into(),
Segment::<()>::from(block.frame.clone()).into(),
BioDirection::ToDevice,
);
bio_waiter.concat(self.fs().write_blocks_async(bid, bio_segment)?);
@ -132,7 +136,7 @@ impl IndirectBlockCache {
/// Represents a single indirect block buffer cached by the `IndirectCache`.
#[derive(Clone, Debug)]
pub struct IndirectBlock {
frame: UntypedFrame,
frame: Frame<()>,
state: State,
}
@ -140,7 +144,7 @@ impl IndirectBlock {
/// Allocates an uninitialized block whose bytes are to be populated with
/// data loaded from the disk.
fn alloc_uninit() -> Result<Self> {
let frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?;
let frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?;
Ok(Self {
frame,
state: State::Uninit,
@ -149,7 +153,7 @@ impl IndirectBlock {
/// Allocates a new block with its bytes initialized to zero.
pub fn alloc() -> Result<Self> {
let frame = FrameAllocOptions::new(1).alloc_single()?;
let frame = FrameAllocOptions::new().alloc_frame()?;
Ok(Self {
frame,
state: State::Dirty,

View File

@ -1733,7 +1733,7 @@ impl InodeImpl {
writer: &mut VmWriter,
) -> Result<BioWaiter>;
pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>;
pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter>;
pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter>;
pub fn write_blocks_async(
&self,
bid: Ext2Bid,
@ -1741,7 +1741,7 @@ impl InodeImpl {
reader: &mut VmReader,
) -> Result<BioWaiter>;
pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>;
pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter>;
pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter>;
}
/// Manages the inode blocks and block I/O operations.
@ -1789,7 +1789,7 @@ impl InodeBlockManager {
}
}
pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter> {
pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter> {
let mut bio_waiter = BioWaiter::new();
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
@ -1834,7 +1834,7 @@ impl InodeBlockManager {
}
}
pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter> {
pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter> {
let mut bio_waiter = BioWaiter::new();
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
@ -1858,12 +1858,12 @@ impl InodeBlockManager {
}
impl PageCacheBackend for InodeBlockManager {
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
let bid = idx as Ext2Bid;
self.read_block_async(bid, frame)
}
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
let bid = idx as Ext2Bid;
self.write_block_async(bid, frame)
}

View File

@ -13,7 +13,7 @@ pub(super) use aster_block::{
};
pub(super) use aster_rights::Full;
pub(super) use ostd::{
mm::{FrameAllocOptions, UntypedFrame, UntypedSegment, VmIo},
mm::{DynUFrame, DynUSegment, Frame, FrameAllocOptions, Segment, VmIo},
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
};
pub(super) use static_assertions::const_assert;

View File

@ -11,7 +11,7 @@ use aster_rights::Full;
use aster_util::slot_vec::SlotVec;
use hashbrown::HashMap;
use ostd::{
mm::{UntypedFrame, VmIo},
mm::{DynUFrame, UntypedMem, VmIo},
sync::{PreemptDisabled, RwLockWriteGuard},
};
@ -484,7 +484,7 @@ impl RamInode {
}
impl PageCacheBackend for RamInode {
fn read_page_async(&self, _idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
fn read_page_async(&self, _idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
// Initially, any block/page in a RamFs inode contains all zeros
frame
.writer()
@ -494,7 +494,7 @@ impl PageCacheBackend for RamInode {
Ok(BioWaiter::new())
}
fn write_page_async(&self, _idx: usize, _frame: &UntypedFrame) -> Result<BioWaiter> {
fn write_page_async(&self, _idx: usize, _frame: &DynUFrame) -> Result<BioWaiter> {
// do nothing
Ok(BioWaiter::new())
}

View File

@ -8,7 +8,7 @@ use align_ext::AlignExt;
use aster_block::bio::{BioStatus, BioWaiter};
use aster_rights::Full;
use lru::LruCache;
use ostd::mm::{FrameAllocOptions, UntypedFrame, VmIo};
use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, VmIo};
use crate::{
prelude::*,
@ -305,7 +305,7 @@ impl ReadaheadState {
};
for async_idx in window.readahead_range() {
let mut async_page = Page::alloc()?;
let pg_waiter = backend.read_page_async(async_idx, async_page.frame())?;
let pg_waiter = backend.read_page_async(async_idx, async_page.frame().into())?;
if pg_waiter.nreqs() > 0 {
self.waiter.concat(pg_waiter);
} else {
@ -361,7 +361,7 @@ impl PageCacheManager {
for idx in page_idx_range.start..page_idx_range.end {
if let Some(page) = pages.peek(&idx) {
if *page.state() == PageState::Dirty && idx < backend_npages {
let waiter = backend.write_page_async(idx, page.frame())?;
let waiter = backend.write_page_async(idx, page.frame().into())?;
bio_waiter.concat(waiter);
}
}
@ -381,7 +381,7 @@ impl PageCacheManager {
Ok(())
}
fn ondemand_readahead(&self, idx: usize) -> Result<UntypedFrame> {
fn ondemand_readahead(&self, idx: usize) -> Result<DynUFrame> {
let mut pages = self.pages.lock();
let mut ra_state = self.ra_state.lock();
let backend = self.backend();
@ -410,7 +410,7 @@ impl PageCacheManager {
// Conducts the sync read operation.
let page = if idx < backend.npages() {
let mut page = Page::alloc()?;
backend.read_page(idx, page.frame())?;
backend.read_page(idx, page.frame().into())?;
page.set_state(PageState::UpToDate);
page
} else {
@ -425,7 +425,7 @@ impl PageCacheManager {
ra_state.conduct_readahead(&mut pages, backend)?;
}
ra_state.set_prev_page(idx);
Ok(frame)
Ok(frame.into())
}
}
@ -438,7 +438,7 @@ impl Debug for PageCacheManager {
}
impl Pager for PageCacheManager {
fn commit_page(&self, idx: usize) -> Result<UntypedFrame> {
fn commit_page(&self, idx: usize) -> Result<DynUFrame> {
self.ondemand_readahead(idx)
}
@ -461,7 +461,7 @@ impl Pager for PageCacheManager {
return Ok(());
};
if idx < backend.npages() {
backend.write_page(idx, page.frame())?;
backend.write_page(idx, page.frame().into())?;
}
}
}
@ -469,25 +469,31 @@ impl Pager for PageCacheManager {
Ok(())
}
fn commit_overwrite(&self, idx: usize) -> Result<UntypedFrame> {
fn commit_overwrite(&self, idx: usize) -> Result<DynUFrame> {
if let Some(page) = self.pages.lock().get(&idx) {
return Ok(page.frame.clone());
return Ok(page.frame.clone().into());
}
let page = Page::alloc_zero()?;
Ok(self.pages.lock().get_or_insert(idx, || page).frame.clone())
Ok(self
.pages
.lock()
.get_or_insert(idx, || page)
.frame
.clone()
.into())
}
}
#[derive(Debug)]
struct Page {
frame: UntypedFrame,
frame: Frame<()>,
state: PageState,
}
impl Page {
pub fn alloc() -> Result<Self> {
let frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?;
let frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?;
Ok(Self {
frame,
state: PageState::Uninit,
@ -495,14 +501,14 @@ impl Page {
}
pub fn alloc_zero() -> Result<Self> {
let frame = FrameAllocOptions::new(1).alloc_single()?;
let frame = FrameAllocOptions::new().alloc_frame()?;
Ok(Self {
frame,
state: PageState::Dirty,
})
}
pub fn frame(&self) -> &UntypedFrame {
pub fn frame(&self) -> &Frame<()> {
&self.frame
}
@ -531,16 +537,16 @@ enum PageState {
/// This trait represents the backend for the page cache.
pub trait PageCacheBackend: Sync + Send {
/// Reads a page from the backend asynchronously.
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter>;
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter>;
/// Writes a page to the backend asynchronously.
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter>;
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter>;
/// Returns the number of pages in the backend.
fn npages(&self) -> usize;
}
impl dyn PageCacheBackend {
/// Reads a page from the backend synchronously.
fn read_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> {
fn read_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> {
let waiter = self.read_page_async(idx, frame)?;
match waiter.wait() {
Some(BioStatus::Complete) => Ok(()),
@ -548,7 +554,7 @@ impl dyn PageCacheBackend {
}
}
/// Writes a page to the backend synchronously.
fn write_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> {
fn write_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> {
let waiter = self.write_page_async(idx, frame)?;
match waiter.wait() {
Some(BioStatus::Complete) => Ok(()),

View File

@ -20,7 +20,7 @@ use core::{
use align_ext::AlignExt;
use aster_rights::Full;
use ostd::mm::{vm_space::VmItem, VmIo, VmSpace, MAX_USERSPACE_VADDR};
use ostd::mm::{vm_space::VmItem, UntypedMem, VmIo, VmSpace, MAX_USERSPACE_VADDR};
use self::aux_vec::{AuxKey, AuxVec};
use crate::{

View File

@ -306,7 +306,7 @@ fn map_segment_vmo(
new_frame
};
let head_idx = segment_offset / PAGE_SIZE;
segment_vmo.replace(new_frame, head_idx)?;
segment_vmo.replace(new_frame.into(), head_idx)?;
}
// Tail padding.
@ -324,7 +324,7 @@ fn map_segment_vmo(
};
let tail_idx = (segment_offset + tail_padding_offset) / PAGE_SIZE;
segment_vmo.replace(new_frame, tail_idx).unwrap();
segment_vmo.replace(new_frame.into(), tail_idx).unwrap();
}
let perms = parse_segment_perm(program_header.flags);

View File

@ -8,12 +8,12 @@ use core::{
use align_ext::AlignExt;
use inherit_methods_macro::inherit_methods;
use ostd::mm::{FrameAllocOptions, UntypedSegment, VmIo};
use ostd::mm::{FrameAllocOptions, Segment, UntypedMem, VmIo};
use super::{MultiRead, MultiWrite};
use crate::prelude::*;
/// A lock-free SPSC FIFO ring buffer backed by a [`UntypedSegment`].
/// A lock-free SPSC FIFO ring buffer backed by a [`Segment<()>`].
///
/// The ring buffer supports `push`/`pop` any `T: Pod` items, also
/// supports `write`/`read` any bytes data based on [`VmReader`]/[`VmWriter`].
@ -46,7 +46,7 @@ use crate::prelude::*;
/// }
/// ```
pub struct RingBuffer<T> {
segment: UntypedSegment,
segment: Segment<()>,
capacity: usize,
tail: AtomicUsize,
head: AtomicUsize,
@ -78,9 +78,9 @@ impl<T> RingBuffer<T> {
"capacity must be a power of two"
);
let nframes = capacity.saturating_mul(Self::T_SIZE).align_up(PAGE_SIZE) / PAGE_SIZE;
let segment = FrameAllocOptions::new(nframes)
.uninit(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment(nframes)
.unwrap();
Self {
segment,

View File

@ -21,7 +21,7 @@ use aster_rights::Rights;
use aster_time::{read_monotonic_time, Instant};
use aster_util::coeff::Coeff;
use ostd::{
mm::{UntypedFrame, VmIo, PAGE_SIZE},
mm::{DynUFrame, VmIo, PAGE_SIZE},
sync::SpinLock,
Pod,
};
@ -199,9 +199,9 @@ struct Vdso {
data: SpinLock<VdsoData>,
/// The VMO of the entire VDSO, including the library text and the VDSO data.
vmo: Arc<Vmo>,
/// The `UntypedFrame` that contains the VDSO data. This frame is contained in and
/// The `DynUFrame` that contains the VDSO data. This frame is contained in and
/// will not be removed from the VDSO VMO.
data_frame: UntypedFrame,
data_frame: DynUFrame,
}
/// A `SpinLock` for the `seq` field in `VdsoData`.

View File

@ -1,12 +1,14 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::mm::{FrameAllocOptions, UntypedFrame};
use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, UntypedMem};
use crate::prelude::*;
/// Creates a new `UntypedFrame` and initializes it with the contents of the `src`.
pub fn duplicate_frame(src: &UntypedFrame) -> Result<UntypedFrame> {
let new_frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?;
new_frame.copy_from(src);
/// Creates a new `Frame<()>` and initializes it with the contents of the `src`.
///
/// Note that it only duplicates the contents not the metadata.
pub fn duplicate_frame(src: &DynUFrame) -> Result<Frame<()>> {
let new_frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?;
new_frame.writer().write(&mut src.reader());
Ok(new_frame)
}

View File

@ -8,8 +8,8 @@ use core::{
use align_ext::AlignExt;
use ostd::mm::{
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty,
UntypedFrame, VmSpace,
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, DynUFrame, FrameAllocOptions, PageFlags,
PageProperty, VmSpace,
};
use super::interval_set::Interval;
@ -186,7 +186,7 @@ impl VmMapping {
} else {
let new_frame = duplicate_frame(&frame)?;
prop.flags |= new_flags;
cursor.map(new_frame, prop);
cursor.map(new_frame.into(), prop);
}
}
VmItem::NotMapped { .. } => {
@ -216,17 +216,17 @@ impl VmMapping {
Ok(())
}
fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(UntypedFrame, bool)> {
fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(DynUFrame, bool)> {
let mut is_readonly = false;
let Some(vmo) = &self.vmo else {
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly));
};
let page_offset = page_fault_addr.align_down(PAGE_SIZE) - self.map_to_addr;
let Ok(page) = vmo.get_committed_frame(page_offset) else {
if !self.is_shared {
// The page index is outside the VMO. This is only allowed in private mapping.
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly));
} else {
return_errno_with_message!(
Errno::EFAULT,
@ -237,7 +237,7 @@ impl VmMapping {
if !self.is_shared && write {
// Write access to private VMO-backed mapping. Performs COW directly.
Ok((duplicate_frame(&page)?, is_readonly))
Ok((duplicate_frame(&page)?.into(), is_readonly))
} else {
// Operations to shared mapping or read access to private VMO-backed mapping.
// If read access to private VMO-backed mapping triggers a page fault,
@ -264,7 +264,7 @@ impl VmMapping {
let vm_perms = self.perms - VmPerms::WRITE;
let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?;
let operate = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
let operate = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| {
if let VmItem::NotMapped { .. } = cursor.query().unwrap() {
// We regard all the surrounding pages as accessed, no matter
// if it is really so. Then the hardware won't bother to update
@ -432,7 +432,7 @@ impl MappedVmo {
///
/// If the VMO has not committed a frame at this index, it will commit
/// one first and return it.
fn get_committed_frame(&self, page_offset: usize) -> Result<UntypedFrame> {
fn get_committed_frame(&self, page_offset: usize) -> Result<DynUFrame> {
debug_assert!(page_offset < self.range.len());
debug_assert!(page_offset % PAGE_SIZE == 0);
self.vmo.commit_page(self.range.start + page_offset)
@ -444,7 +444,7 @@ impl MappedVmo {
/// perform other operations.
fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
{
debug_assert!(range.start < self.range.len());
debug_assert!(range.end <= self.range.len());

View File

@ -3,14 +3,14 @@
use core::ops::Range;
use aster_rights::{Rights, TRights};
use ostd::mm::{UntypedFrame, VmIo};
use ostd::mm::{DynUFrame, VmIo};
use super::{CommitFlags, Vmo, VmoRightsOp};
use crate::prelude::*;
impl Vmo<Rights> {
/// Commits a page at specific offset
pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> {
self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset)
}
@ -39,7 +39,7 @@ impl Vmo<Rights> {
/// perform other operations.
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
{
self.check_rights(Rights::WRITE)?;
self.0
@ -112,7 +112,7 @@ impl Vmo<Rights> {
/// # Access rights
///
/// The method requires the Write right.
pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.replace(page, page_idx)
}

View File

@ -11,7 +11,7 @@ use align_ext::AlignExt;
use aster_rights::Rights;
use ostd::{
collections::xarray::{CursorMut, XArray},
mm::{FrameAllocOptions, UntypedFrame, VmReader, VmWriter},
mm::{DynUFrame, FrameAllocOptions, UntypedMem, VmReader, VmWriter},
};
use crate::prelude::*;
@ -66,8 +66,8 @@ pub use pager::Pager;
/// # Implementation
///
/// `Vmo` provides high-level APIs for address space management by wrapping
/// around its low-level counterpart [`ostd::mm::UntypedFrame`].
/// Compared with `UntypedFrame`,
/// around its low-level counterpart [`ostd::mm::DynUFrame`].
/// Compared with `DynUFrame`,
/// `Vmo` is easier to use (by offering more powerful APIs) and
/// harder to misuse (thanks to its nature of being capability).
#[derive(Debug)]
@ -125,12 +125,12 @@ bitflags! {
}
}
/// `Pages` is the struct that manages the `UntypedFrame`s stored in `Vmo_`.
/// `Pages` is the struct that manages the `DynUFrame`s stored in `Vmo_`.
pub(super) enum Pages {
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
Nonresizable(Mutex<XArray<UntypedFrame>>, usize),
Nonresizable(Mutex<XArray<DynUFrame>>, usize),
/// `Pages` that can be resized and have a variable size.
Resizable(Mutex<(XArray<UntypedFrame>, usize)>),
Resizable(Mutex<(XArray<DynUFrame>, usize)>),
}
impl Clone for Pages {
@ -149,7 +149,7 @@ impl Clone for Pages {
impl Pages {
fn with<R, F>(&self, func: F) -> R
where
F: FnOnce(&mut XArray<UntypedFrame>, usize) -> R,
F: FnOnce(&mut XArray<DynUFrame>, usize) -> R,
{
match self {
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
@ -201,28 +201,28 @@ impl CommitFlags {
}
impl Vmo_ {
/// Prepares a new `UntypedFrame` for the target index in pages, returns this new frame.
fn prepare_page(&self, page_idx: usize) -> Result<UntypedFrame> {
/// Prepares a new `DynUFrame` for the target index in pages, returns this new frame.
fn prepare_page(&self, page_idx: usize) -> Result<DynUFrame> {
match &self.pager {
None => Ok(FrameAllocOptions::new(1).alloc_single()?),
None => Ok(FrameAllocOptions::new().alloc_frame()?.into()),
Some(pager) => pager.commit_page(page_idx),
}
}
/// Prepares a new `UntypedFrame` for the target index in the VMO, returns this new frame.
fn prepare_overwrite(&self, page_idx: usize) -> Result<UntypedFrame> {
/// Prepares a new `DynUFrame` for the target index in the VMO, returns this new frame.
fn prepare_overwrite(&self, page_idx: usize) -> Result<DynUFrame> {
if let Some(pager) = &self.pager {
pager.commit_overwrite(page_idx)
} else {
Ok(FrameAllocOptions::new(1).alloc_single()?)
Ok(FrameAllocOptions::new().alloc_frame()?.into())
}
}
fn commit_with_cursor(
&self,
cursor: &mut CursorMut<'_, UntypedFrame>,
cursor: &mut CursorMut<'_, DynUFrame>,
commit_flags: CommitFlags,
) -> Result<UntypedFrame> {
) -> Result<DynUFrame> {
let new_page = {
if let Some(committed_page) = cursor.load() {
// Fast path: return the page directly.
@ -241,7 +241,7 @@ impl Vmo_ {
/// Commits the page corresponding to the target offset in the VMO and return that page.
/// If the current offset has already been committed, the page will be returned directly.
pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> {
let page_idx = offset / PAGE_SIZE;
self.pages.with(|pages, size| {
if offset >= size {
@ -279,7 +279,7 @@ impl Vmo_ {
commit_flags: CommitFlags,
) -> Result<()>
where
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
{
self.pages.with(|pages, size| {
if range.end > size {
@ -315,7 +315,7 @@ impl Vmo_ {
let read_range = offset..(offset + read_len);
let mut read_offset = offset % PAGE_SIZE;
let read = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
let read = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| {
let frame = commit_fn()?;
frame.reader().skip(read_offset).read_fallible(writer)?;
read_offset = 0;
@ -331,7 +331,7 @@ impl Vmo_ {
let write_range = offset..(offset + write_len);
let mut write_offset = offset % PAGE_SIZE;
let mut write = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
let mut write = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| {
let frame = commit_fn()?;
frame.writer().skip(write_offset).write_fallible(reader)?;
write_offset = 0;
@ -401,7 +401,7 @@ impl Vmo_ {
Ok(())
}
fn decommit_pages(&self, pages: &mut XArray<UntypedFrame>, range: Range<usize>) -> Result<()> {
fn decommit_pages(&self, pages: &mut XArray<DynUFrame>, range: Range<usize>) -> Result<()> {
let page_idx_range = get_page_idx_range(&range);
let mut cursor = pages.cursor_mut(page_idx_range.start as u64);
for page_idx in page_idx_range {
@ -426,7 +426,7 @@ impl Vmo_ {
self.flags
}
fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> {
self.pages.with(|pages, size| {
if page_idx >= size / PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo");

View File

@ -8,7 +8,7 @@ use align_ext::AlignExt;
use aster_rights::{Rights, TRightSet, TRights};
use ostd::{
collections::xarray::XArray,
mm::{FrameAllocOptions, UntypedFrame},
mm::{DynUFrame, DynUSegment, FrameAllocOptions},
};
use super::{Pager, Pages, Vmo, VmoFlags};
@ -137,13 +137,11 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option<Arc<dyn Pager>>) -> Re
})
}
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<UntypedFrame>> {
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<DynUFrame>> {
if flags.contains(VmoFlags::CONTIGUOUS) {
// if the vmo is continuous, we need to allocate frames for the vmo
let frames_num = size / PAGE_SIZE;
let segment = FrameAllocOptions::new(frames_num)
.is_contiguous(true)
.alloc_contiguous()?;
let segment: DynUSegment = FrameAllocOptions::new().alloc_segment(frames_num)?.into();
let mut committed_pages = XArray::new();
let mut cursor = committed_pages.cursor_mut(0);
for frame in segment {

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::mm::UntypedFrame;
use ostd::mm::DynUFrame;
use crate::prelude::*;
@ -26,7 +26,7 @@ pub trait Pager: Send + Sync {
/// whatever frame that may or may not be the same as the last time.
///
/// It is up to the pager to decide the range of valid indices.
fn commit_page(&self, idx: usize) -> Result<UntypedFrame>;
fn commit_page(&self, idx: usize) -> Result<DynUFrame>;
/// Notify the pager that the frame at a specified index has been updated.
///
@ -54,5 +54,5 @@ pub trait Pager: Send + Sync {
/// Ask the pager to provide a frame at a specified index.
/// Notify the pager that the frame will be fully overwritten soon, so pager can
/// choose not to initialize it.
fn commit_overwrite(&self, idx: usize) -> Result<UntypedFrame>;
fn commit_overwrite(&self, idx: usize) -> Result<DynUFrame>;
}

View File

@ -4,14 +4,14 @@ use core::ops::Range;
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
use aster_rights_proc::require;
use ostd::mm::{UntypedFrame, VmIo};
use ostd::mm::{DynUFrame, VmIo};
use super::{CommitFlags, Vmo, VmoRightsOp};
use crate::prelude::*;
impl<R: TRights> Vmo<TRightSet<R>> {
/// Commits a page at specific offset.
pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> {
self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset)
}
@ -41,7 +41,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
#[require(R > Write)]
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
{
self.0
.operate_on_range(range, operate, CommitFlags::empty())
@ -114,7 +114,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
///
/// The method requires the Write right.
#[require(R > Write)]
pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> {
self.0.replace(page, page_idx)
}