mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-18 12:06:43 +00:00
Rename various concepts related to memory management
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
03a39c94ca
commit
14e1b1a9fc
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
pub use aster_frame::arch::console;
|
||||
use aster_frame::vm::VmReader;
|
||||
use aster_frame::mm::VmReader;
|
||||
use spin::Once;
|
||||
|
||||
use crate::{
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::Full;
|
||||
|
||||
use super::{
|
||||
|
@ -3,8 +3,8 @@
|
||||
use core::{num::NonZeroUsize, ops::Range, sync::atomic::AtomicU64};
|
||||
|
||||
use aster_block::{bio::BioWaiter, id::BlockId, BlockDevice};
|
||||
use aster_frame::vm::VmFrame;
|
||||
pub(super) use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::Frame;
|
||||
pub(super) use aster_frame::mm::VmIo;
|
||||
use hashbrown::HashMap;
|
||||
use lru::LruCache;
|
||||
|
||||
@ -361,7 +361,7 @@ impl ExfatFS {
|
||||
}
|
||||
|
||||
impl PageCacheBackend for ExfatFS {
|
||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
if self.fs_size() < idx * PAGE_SIZE {
|
||||
return_errno_with_message!(Errno::EINVAL, "invalid read size")
|
||||
}
|
||||
@ -371,7 +371,7 @@ impl PageCacheBackend for ExfatFS {
|
||||
Ok(waiter)
|
||||
}
|
||||
|
||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
if self.fs_size() < idx * PAGE_SIZE {
|
||||
return_errno_with_message!(Errno::EINVAL, "invalid write size")
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ use aster_block::{
|
||||
id::{Bid, BlockId},
|
||||
BLOCK_SIZE,
|
||||
};
|
||||
use aster_frame::vm::{VmAllocOptions, VmFrame, VmIo};
|
||||
use aster_frame::mm::{Frame, VmAllocOptions, VmIo};
|
||||
use aster_rights::Full;
|
||||
|
||||
use super::{
|
||||
@ -132,7 +132,7 @@ struct ExfatInodeInner {
|
||||
}
|
||||
|
||||
impl PageCacheBackend for ExfatInode {
|
||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
let inner = self.inner.read();
|
||||
if inner.size < idx * PAGE_SIZE {
|
||||
return_errno_with_message!(Errno::EINVAL, "Invalid read size")
|
||||
@ -145,7 +145,7 @@ impl PageCacheBackend for ExfatInode {
|
||||
Ok(waiter)
|
||||
}
|
||||
|
||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
let inner = self.inner.read();
|
||||
let sector_size = inner.fs().sector_size();
|
||||
|
||||
|
@ -21,7 +21,7 @@ mod test {
|
||||
bio::{BioEnqueueError, BioStatus, BioType, SubmittedBio},
|
||||
BlockDevice,
|
||||
};
|
||||
use aster_frame::vm::{VmAllocOptions, VmIo, VmSegment};
|
||||
use aster_frame::mm::{Segment, VmAllocOptions, VmIo};
|
||||
use rand::{rngs::SmallRng, RngCore, SeedableRng};
|
||||
|
||||
use crate::{
|
||||
@ -37,10 +37,10 @@ mod test {
|
||||
|
||||
/// Followings are implementations of memory simulated block device
|
||||
pub const SECTOR_SIZE: usize = 512;
|
||||
struct ExfatMemoryBioQueue(VmSegment);
|
||||
struct ExfatMemoryBioQueue(Segment);
|
||||
|
||||
impl ExfatMemoryBioQueue {
|
||||
pub fn new(segment: VmSegment) -> Self {
|
||||
pub fn new(segment: Segment) -> Self {
|
||||
ExfatMemoryBioQueue(segment)
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ mod test {
|
||||
}
|
||||
|
||||
impl ExfatMemoryDisk {
|
||||
pub fn new(segment: VmSegment) -> Self {
|
||||
pub fn new(segment: Segment) -> Self {
|
||||
ExfatMemoryDisk {
|
||||
queue: ExfatMemoryBioQueue::new(segment),
|
||||
}
|
||||
@ -100,7 +100,7 @@ mod test {
|
||||
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../../regression/build/exfat.img");
|
||||
|
||||
/// Read exfat disk image
|
||||
fn new_vm_segment_from_image() -> VmSegment {
|
||||
fn new_vm_segment_from_image() -> Segment {
|
||||
let vm_segment = {
|
||||
VmAllocOptions::new(EXFAT_IMAGE.len() / PAGE_SIZE)
|
||||
.is_contiguous(true)
|
||||
|
@ -28,7 +28,7 @@ struct BlockGroupImpl {
|
||||
impl BlockGroup {
|
||||
/// Loads and constructs a block group.
|
||||
pub fn load(
|
||||
group_descriptors_segment: &VmSegment,
|
||||
group_descriptors_segment: &Segment,
|
||||
idx: usize,
|
||||
block_device: &dyn BlockDevice,
|
||||
super_block: &SuperBlock,
|
||||
@ -318,12 +318,12 @@ impl Debug for BlockGroup {
|
||||
}
|
||||
|
||||
impl PageCacheBackend for BlockGroupImpl {
|
||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||
self.fs.upgrade().unwrap().read_block_async(bid, frame)
|
||||
}
|
||||
|
||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||
self.fs.upgrade().unwrap().write_block_async(bid, frame)
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ pub struct Ext2 {
|
||||
blocks_per_group: Ext2Bid,
|
||||
inode_size: usize,
|
||||
block_size: usize,
|
||||
group_descriptors_segment: VmSegment,
|
||||
group_descriptors_segment: Segment,
|
||||
self_ref: Weak<Self>,
|
||||
}
|
||||
|
||||
@ -55,7 +55,7 @@ impl Ext2 {
|
||||
// Load the block groups information
|
||||
let load_block_groups = |fs: Weak<Ext2>,
|
||||
block_device: &dyn BlockDevice,
|
||||
group_descriptors_segment: &VmSegment|
|
||||
group_descriptors_segment: &Segment|
|
||||
-> Result<Vec<BlockGroup>> {
|
||||
let block_groups_count = super_block.block_groups_count() as usize;
|
||||
let mut block_groups = Vec::with_capacity(block_groups_count);
|
||||
@ -291,7 +291,7 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Reads contiguous blocks starting from the `bid` synchronously.
|
||||
pub(super) fn read_blocks(&self, bid: Ext2Bid, segment: &VmSegment) -> Result<()> {
|
||||
pub(super) fn read_blocks(&self, bid: Ext2Bid, segment: &Segment) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.read_blocks_sync(Bid::new(bid as u64), segment)?;
|
||||
@ -302,7 +302,7 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Reads one block indicated by the `bid` synchronously.
|
||||
pub(super) fn read_block(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<()> {
|
||||
pub(super) fn read_block(&self, bid: Ext2Bid, frame: &Frame) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.read_block_sync(Bid::new(bid as u64), frame)?;
|
||||
@ -313,13 +313,13 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Reads one block indicated by the `bid` asynchronously.
|
||||
pub(super) fn read_block_async(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
pub(super) fn read_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter> {
|
||||
let waiter = self.block_device.read_block(Bid::new(bid as u64), frame)?;
|
||||
Ok(waiter)
|
||||
}
|
||||
|
||||
/// Writes contiguous blocks starting from the `bid` synchronously.
|
||||
pub(super) fn write_blocks(&self, bid: Ext2Bid, segment: &VmSegment) -> Result<()> {
|
||||
pub(super) fn write_blocks(&self, bid: Ext2Bid, segment: &Segment) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.write_blocks_sync(Bid::new(bid as u64), segment)?;
|
||||
@ -330,7 +330,7 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Writes one block indicated by the `bid` synchronously.
|
||||
pub(super) fn write_block(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<()> {
|
||||
pub(super) fn write_block(&self, bid: Ext2Bid, frame: &Frame) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.write_block_sync(Bid::new(bid as u64), frame)?;
|
||||
@ -341,7 +341,7 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Writes one block indicated by the `bid` asynchronously.
|
||||
pub(super) fn write_block_async(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
pub(super) fn write_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter> {
|
||||
let waiter = self.block_device.write_block(Bid::new(bid as u64), frame)?;
|
||||
Ok(waiter)
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ impl IndirectBlockCache {
|
||||
/// Represents a single indirect block buffer cached by the `IndirectCache`.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IndirectBlock {
|
||||
frame: VmFrame,
|
||||
frame: Frame,
|
||||
state: State,
|
||||
}
|
||||
|
||||
|
@ -838,7 +838,7 @@ impl InodeImpl_ {
|
||||
self.inode().fs()
|
||||
}
|
||||
|
||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||
if bid >= self.desc.blocks_count() {
|
||||
return_errno!(Errno::EINVAL);
|
||||
}
|
||||
@ -852,14 +852,14 @@ impl InodeImpl_ {
|
||||
self.fs().read_block_async(device_range.start, block)
|
||||
}
|
||||
|
||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||
match self.read_block_async(bid, block)?.wait() {
|
||||
Some(BioStatus::Complete) => Ok(()),
|
||||
_ => return_errno!(Errno::EIO),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||
if bid >= self.desc.blocks_count() {
|
||||
return_errno!(Errno::EINVAL);
|
||||
}
|
||||
@ -872,7 +872,7 @@ impl InodeImpl_ {
|
||||
Ok(waiter)
|
||||
}
|
||||
|
||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||
match self.write_block_async(bid, block)?.wait() {
|
||||
Some(BioStatus::Complete) => Ok(()),
|
||||
_ => return_errno!(Errno::EIO),
|
||||
@ -1525,19 +1525,19 @@ impl InodeImpl {
|
||||
self.0.read().desc.ctime
|
||||
}
|
||||
|
||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||
self.0.read().read_block_sync(bid, block)
|
||||
}
|
||||
|
||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||
self.0.read().read_block_async(bid, block)
|
||||
}
|
||||
|
||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||
self.0.read().write_block_sync(bid, block)
|
||||
}
|
||||
|
||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||
self.0.read().write_block_async(bid, block)
|
||||
}
|
||||
|
||||
@ -1612,12 +1612,12 @@ impl InodeImpl {
|
||||
}
|
||||
|
||||
impl PageCacheBackend for InodeImpl {
|
||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
let bid = idx as Ext2Bid;
|
||||
self.read_block_async(bid, frame)
|
||||
}
|
||||
|
||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
let bid = idx as Ext2Bid;
|
||||
self.write_block_async(bid, frame)
|
||||
}
|
||||
|
@ -12,8 +12,8 @@ pub(super) use aster_block::{
|
||||
BlockDevice, BLOCK_SIZE,
|
||||
};
|
||||
pub(super) use aster_frame::{
|
||||
mm::{Frame, Segment, VmAllocOptions, VmIo},
|
||||
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
||||
vm::{VmAllocOptions, VmFrame, VmIo, VmSegment},
|
||||
};
|
||||
pub(super) use aster_rights::Full;
|
||||
pub(super) use static_assertions::const_assert;
|
||||
|
@ -7,8 +7,8 @@ use core::{
|
||||
|
||||
use aster_block::bio::BioWaiter;
|
||||
use aster_frame::{
|
||||
mm::{Frame, VmIo},
|
||||
sync::RwMutexWriteGuard,
|
||||
vm::{VmFrame, VmIo},
|
||||
};
|
||||
use aster_rights::Full;
|
||||
use aster_util::slot_vec::SlotVec;
|
||||
@ -436,13 +436,13 @@ impl RamInode {
|
||||
}
|
||||
|
||||
impl PageCacheBackend for RamInode {
|
||||
fn read_page(&self, _idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn read_page(&self, _idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||
// Initially, any block/page in a RamFs inode contains all zeros
|
||||
frame.writer().fill(0);
|
||||
Ok(BioWaiter::new())
|
||||
}
|
||||
|
||||
fn write_page(&self, _idx: usize, _frame: &VmFrame) -> Result<BioWaiter> {
|
||||
fn write_page(&self, _idx: usize, _frame: &Frame) -> Result<BioWaiter> {
|
||||
// do nothing
|
||||
Ok(BioWaiter::new())
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_block::bio::{BioStatus, BioWaiter};
|
||||
use aster_frame::vm::{VmAllocOptions, VmFrame};
|
||||
use aster_frame::mm::{Frame, VmAllocOptions};
|
||||
use aster_rights::Full;
|
||||
use lru::LruCache;
|
||||
|
||||
@ -151,7 +151,7 @@ impl Debug for PageCacheManager {
|
||||
}
|
||||
|
||||
impl Pager for PageCacheManager {
|
||||
fn commit_page(&self, idx: usize) -> Result<VmFrame> {
|
||||
fn commit_page(&self, idx: usize) -> Result<Frame> {
|
||||
if let Some(page) = self.pages.lock().get(&idx) {
|
||||
return Ok(page.frame.clone());
|
||||
}
|
||||
@ -202,7 +202,7 @@ impl Pager for PageCacheManager {
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Page {
|
||||
frame: VmFrame,
|
||||
frame: Frame,
|
||||
state: PageState,
|
||||
}
|
||||
|
||||
@ -223,7 +223,7 @@ impl Page {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn frame(&self) -> &VmFrame {
|
||||
pub fn frame(&self) -> &Frame {
|
||||
&self.frame
|
||||
}
|
||||
|
||||
@ -252,16 +252,16 @@ enum PageState {
|
||||
/// This trait represents the backend for the page cache.
|
||||
pub trait PageCacheBackend: Sync + Send {
|
||||
/// Reads a page from the backend asynchronously.
|
||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter>;
|
||||
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter>;
|
||||
/// Writes a page to the backend asynchronously.
|
||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter>;
|
||||
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter>;
|
||||
/// Returns the number of pages in the backend.
|
||||
fn npages(&self) -> usize;
|
||||
}
|
||||
|
||||
impl dyn PageCacheBackend {
|
||||
/// Reads a page from the backend synchronously.
|
||||
fn read_page_sync(&self, idx: usize, frame: &VmFrame) -> Result<()> {
|
||||
fn read_page_sync(&self, idx: usize, frame: &Frame) -> Result<()> {
|
||||
let waiter = self.read_page(idx, frame)?;
|
||||
match waiter.wait() {
|
||||
Some(BioStatus::Complete) => Ok(()),
|
||||
@ -269,7 +269,7 @@ impl dyn PageCacheBackend {
|
||||
}
|
||||
}
|
||||
/// Writes a page to the backend synchronously.
|
||||
fn write_page_sync(&self, idx: usize, frame: &VmFrame) -> Result<()> {
|
||||
fn write_page_sync(&self, idx: usize, frame: &Frame) -> Result<()> {
|
||||
let waiter = self.write_page(idx, frame)?;
|
||||
match waiter.wait() {
|
||||
Some(BioStatus::Complete) => Ok(()),
|
||||
|
@ -14,8 +14,8 @@ pub(crate) use alloc::{
|
||||
pub(crate) use core::{any::Any, ffi::CStr, fmt::Debug};
|
||||
|
||||
pub(crate) use aster_frame::{
|
||||
mm::{Vaddr, PAGE_SIZE},
|
||||
sync::{Mutex, MutexGuard, RwLock, RwMutex, SpinLock, SpinLockGuard},
|
||||
vm::{Vaddr, PAGE_SIZE},
|
||||
};
|
||||
pub(crate) use bitflags::bitflags;
|
||||
pub(crate) use int_to_c_enum::TryFromInt;
|
||||
|
@ -4,8 +4,8 @@ use core::sync::atomic::Ordering;
|
||||
|
||||
use aster_frame::{
|
||||
cpu::UserContext,
|
||||
mm::VmIo,
|
||||
user::{UserContextApi, UserSpace},
|
||||
vm::VmIo,
|
||||
};
|
||||
use aster_rights::Full;
|
||||
|
||||
|
@ -17,7 +17,7 @@ use core::{
|
||||
};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use aster_frame::vm::{VmIo, MAX_USERSPACE_VADDR};
|
||||
use aster_frame::mm::{VmIo, MAX_USERSPACE_VADDR};
|
||||
use aster_rights::{Full, Rights};
|
||||
|
||||
use self::aux_vec::{AuxKey, AuxVec};
|
||||
|
@ -4,7 +4,7 @@
|
||||
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use aster_frame::{task::Task, vm::VmIo};
|
||||
use aster_frame::{mm::VmIo, task::Task};
|
||||
use aster_rights::{Full, Rights};
|
||||
use xmas_elf::program::{self, ProgramHeader64};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
|
||||
use super::SyscallReturn;
|
||||
use crate::{
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::mem;
|
||||
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::Full;
|
||||
|
||||
use crate::{prelude::*, vm::vmar::Vmar};
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::time::Duration;
|
||||
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::Full;
|
||||
|
||||
use crate::{
|
||||
|
@ -15,8 +15,8 @@ use alloc::{boxed::Box, sync::Arc};
|
||||
use core::{mem::ManuallyDrop, time::Duration};
|
||||
|
||||
use aster_frame::{
|
||||
mm::{Frame, VmIo, PAGE_SIZE},
|
||||
sync::SpinLock,
|
||||
vm::{VmFrame, VmIo, PAGE_SIZE},
|
||||
};
|
||||
use aster_rights::Rights;
|
||||
use aster_time::{read_monotonic_time, Instant};
|
||||
@ -196,9 +196,9 @@ struct Vdso {
|
||||
data: SpinLock<VdsoData>,
|
||||
/// The vmo of the entire VDSO, including the library text and the VDSO data.
|
||||
vmo: Arc<Vmo>,
|
||||
/// The `VmFrame` that contains the VDSO data. This frame is contained in and
|
||||
/// The `Frame` that contains the VDSO data. This frame is contained in and
|
||||
/// will not be removed from the VDSO vmo.
|
||||
data_frame: VmFrame,
|
||||
data_frame: Frame,
|
||||
}
|
||||
|
||||
/// A `SpinLock` for the `seq` field in `VdsoData`.
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use aster_frame::vm::PageFlags;
|
||||
use aster_frame::mm::PageFlags;
|
||||
use aster_rights::Rights;
|
||||
use bitflags::bitflags;
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::Rights;
|
||||
|
||||
use super::{
|
||||
@ -26,8 +26,8 @@ impl Vmar<Rights> {
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use aster_std::prelude::*;
|
||||
/// use aster_std::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
||||
/// use aster_nix::prelude::*;
|
||||
/// use aster_nix::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
||||
///
|
||||
/// let vmar = Vmar::new().unwrap();
|
||||
/// let vmo = VmoOptions::new(PAGE_SIZE).alloc().unwrap();
|
||||
|
@ -11,7 +11,7 @@ pub mod vm_mapping;
|
||||
use core::{cmp::min, ops::Range};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use aster_frame::vm::{VmSpace, MAX_USERSPACE_VADDR};
|
||||
use aster_frame::mm::{VmSpace, MAX_USERSPACE_VADDR};
|
||||
use aster_rights::Rights;
|
||||
|
||||
use self::{
|
||||
@ -39,15 +39,6 @@ use crate::{prelude::*, vm::perms::VmPerms};
|
||||
///
|
||||
/// VMARs are implemented with two flavors of capabilities:
|
||||
/// the dynamic one (`Vmar<Rights>`) and the static one (`Vmar<R: TRights>).
|
||||
///
|
||||
/// # Implementation
|
||||
///
|
||||
/// `Vmar` provides high-level APIs for address space management by wrapping
|
||||
/// around its low-level counterpart `_frame::vm::VmFrames`.
|
||||
/// Compared with `VmFrames`,
|
||||
/// `Vmar` is easier to use (by offering more powerful APIs) and
|
||||
/// harder to misuse (thanks to its nature of being capability).
|
||||
///
|
||||
pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
|
||||
|
||||
pub trait VmarRightsOp {
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
//! Options for allocating child VMARs.
|
||||
|
||||
use aster_frame::{vm::PAGE_SIZE, Error, Result};
|
||||
use aster_frame::{mm::PAGE_SIZE, Error, Result};
|
||||
|
||||
use super::Vmar;
|
||||
|
||||
@ -14,7 +14,7 @@ use super::Vmar;
|
||||
/// A child VMAR created from a parent VMAR of _dynamic_ capability is also a
|
||||
/// _dynamic_ capability.
|
||||
/// ```
|
||||
/// use aster_std::vm::{PAGE_SIZE, Vmar};
|
||||
/// use aster_nix::vm::{PAGE_SIZE, Vmar};
|
||||
///
|
||||
/// let parent_vmar = Vmar::new();
|
||||
/// let child_size = 10 * PAGE_SIZE;
|
||||
@ -29,8 +29,8 @@ use super::Vmar;
|
||||
/// A child VMAR created from a parent VMAR of _static_ capability is also a
|
||||
/// _static_ capability.
|
||||
/// ```
|
||||
/// use aster_std::prelude::*;
|
||||
/// use aster_std::vm::{PAGE_SIZE, Vmar};
|
||||
/// use aster_nix::prelude::*;
|
||||
/// use aster_nix::vm::{PAGE_SIZE, Vmar};
|
||||
///
|
||||
/// let parent_vmar: Vmar<Full> = Vmar::new();
|
||||
/// let child_size = 10 * PAGE_SIZE;
|
||||
@ -135,7 +135,7 @@ impl<R> VmarChildOptions<R> {
|
||||
|
||||
#[cfg(ktest)]
|
||||
mod test {
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::Full;
|
||||
|
||||
use super::*;
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::{Dup, Rights, TRightSet, TRights};
|
||||
use aster_rights_proc::require;
|
||||
|
||||
@ -31,8 +31,8 @@ impl<R: TRights> Vmar<TRightSet<R>> {
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use aster_std::prelude::*;
|
||||
/// use aster_std::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
||||
/// use aster_nix::prelude::*;
|
||||
/// use aster_nix::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
||||
///
|
||||
/// let vmar = Vmar::<RightsWrapper<Full>>::new().unwrap();
|
||||
/// let vmo = VmoOptions::new(PAGE_SIZE).alloc().unwrap();
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::vm::{PageFlags, VmFrame, VmFrameVec, VmIo, VmMapOptions, VmSpace};
|
||||
use aster_frame::mm::{Frame, PageFlags, VmFrameVec, VmIo, VmMapOptions, VmSpace};
|
||||
|
||||
use super::{interval::Interval, is_intersected, Vmar, Vmar_};
|
||||
use crate::{
|
||||
@ -143,7 +143,7 @@ impl VmMapping {
|
||||
pub(super) fn map_one_page(
|
||||
&self,
|
||||
page_idx: usize,
|
||||
frame: VmFrame,
|
||||
frame: Frame,
|
||||
is_readonly: bool,
|
||||
) -> Result<()> {
|
||||
let parent = self.parent.upgrade().unwrap();
|
||||
@ -458,7 +458,7 @@ impl VmMappingInner {
|
||||
vmo: &Vmo<Rights>,
|
||||
vm_space: &VmSpace,
|
||||
page_idx: usize,
|
||||
frame: VmFrame,
|
||||
frame: Frame,
|
||||
is_readonly: bool,
|
||||
) -> Result<()> {
|
||||
let map_addr = self.page_map_addr(page_idx);
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::vm::{VmFrame, VmIo};
|
||||
use aster_frame::mm::{Frame, VmIo};
|
||||
use aster_rights::{Rights, TRights};
|
||||
|
||||
use super::{
|
||||
@ -68,7 +68,7 @@ impl Vmo<Rights> {
|
||||
}
|
||||
|
||||
/// commit a page at specific offset
|
||||
pub fn commit_page(&self, offset: usize) -> Result<VmFrame> {
|
||||
pub fn commit_page(&self, offset: usize) -> Result<Frame> {
|
||||
self.check_rights(Rights::WRITE)?;
|
||||
self.0.commit_page(offset, false)
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ use core::ops::Range;
|
||||
use align_ext::AlignExt;
|
||||
use aster_frame::{
|
||||
collections::xarray::{CursorMut, XArray, XMark},
|
||||
vm::{VmAllocOptions, VmFrame, VmReader, VmWriter},
|
||||
mm::{Frame, VmAllocOptions, VmReader, VmWriter},
|
||||
};
|
||||
use aster_rights::Rights;
|
||||
|
||||
@ -139,8 +139,8 @@ pub(super) enum VmoMark {
|
||||
/// The VMO whose `pages` is marked as `CowVmo` may require a Copy-On-Write (COW) operation
|
||||
/// when performing a write action.
|
||||
CowVmo,
|
||||
/// Marks used for the `VmFrame` stored within the pages marked as `CowVmo`,
|
||||
/// `VmFrame`s marked as `ExclusivePage` are newly created through the COW mechanism
|
||||
/// Marks used for the `Frame` stored within the pages marked as `CowVmo`,
|
||||
/// `Frame`s marked as `ExclusivePage` are newly created through the COW mechanism
|
||||
/// and do not require further COW operations.
|
||||
ExclusivePage,
|
||||
}
|
||||
@ -154,19 +154,19 @@ impl From<VmoMark> for XMark {
|
||||
}
|
||||
}
|
||||
|
||||
/// `Pages` is the struct that manages the `VmFrame`s stored in `Vmo_`.
|
||||
/// `Pages` is the struct that manages the `Frame`s stored in `Vmo_`.
|
||||
pub(super) enum Pages {
|
||||
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
|
||||
Nonresizable(Arc<Mutex<XArray<VmFrame, VmoMark>>>, usize),
|
||||
Nonresizable(Arc<Mutex<XArray<Frame, VmoMark>>>, usize),
|
||||
/// `Pages` that can be resized and have a variable size, and such `Pages` cannot
|
||||
/// be shared between different VMOs.
|
||||
Resizable(Mutex<(XArray<VmFrame, VmoMark>, usize)>),
|
||||
Resizable(Mutex<(XArray<Frame, VmoMark>, usize)>),
|
||||
}
|
||||
|
||||
impl Pages {
|
||||
fn with<R, F>(&self, func: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut XArray<VmFrame, VmoMark>, usize) -> R,
|
||||
F: FnOnce(&mut XArray<Frame, VmoMark>, usize) -> R,
|
||||
{
|
||||
match self {
|
||||
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
|
||||
@ -194,14 +194,14 @@ pub(super) struct Vmo_ {
|
||||
pages: Pages,
|
||||
}
|
||||
|
||||
fn clone_page(page: &VmFrame) -> Result<VmFrame> {
|
||||
fn clone_page(page: &Frame) -> Result<Frame> {
|
||||
let new_page = VmAllocOptions::new(1).alloc_single()?;
|
||||
new_page.copy_from(page);
|
||||
Ok(new_page)
|
||||
}
|
||||
|
||||
impl Vmo_ {
|
||||
/// Prepare a new `VmFrame` for the target index in pages, returning the new page as well as
|
||||
/// Prepare a new `Frame` for the target index in pages, returning the new page as well as
|
||||
/// whether this page needs to be marked as exclusive.
|
||||
///
|
||||
/// Based on the type of VMO and the impending operation on the prepared page, there are 3 conditions:
|
||||
@ -216,7 +216,7 @@ impl Vmo_ {
|
||||
page_idx: usize,
|
||||
is_cow_vmo: bool,
|
||||
will_write: bool,
|
||||
) -> Result<(VmFrame, bool)> {
|
||||
) -> Result<(Frame, bool)> {
|
||||
let (page, should_mark_exclusive) = match &self.pager {
|
||||
None => {
|
||||
// Condition 1. The new anonymous page only need to be marked as `ExclusivePage`
|
||||
@ -227,8 +227,8 @@ impl Vmo_ {
|
||||
let page = pager.commit_page(page_idx)?;
|
||||
// The prerequisite for triggering the COW mechanism here is that the current
|
||||
// VMO requires COW and the prepared page is about to undergo a write operation.
|
||||
// At this point, the `VmFrame` obtained from the pager needs to be cloned to
|
||||
// avoid subsequent modifications affecting the content of the `VmFrame` in the pager.
|
||||
// At this point, the `Frame` obtained from the pager needs to be cloned to
|
||||
// avoid subsequent modifications affecting the content of the `Frame` in the pager.
|
||||
let trigger_cow = is_cow_vmo && will_write;
|
||||
if trigger_cow {
|
||||
// Condition 3.
|
||||
@ -244,10 +244,10 @@ impl Vmo_ {
|
||||
|
||||
fn commit_with_cursor(
|
||||
&self,
|
||||
cursor: &mut CursorMut<'_, VmFrame, VmoMark>,
|
||||
cursor: &mut CursorMut<'_, Frame, VmoMark>,
|
||||
is_cow_vmo: bool,
|
||||
will_write: bool,
|
||||
) -> Result<VmFrame> {
|
||||
) -> Result<Frame> {
|
||||
let (new_page, is_exclusive) = {
|
||||
let is_exclusive = cursor.is_marked(VmoMark::ExclusivePage);
|
||||
if let Some(committed_page) = cursor.load() {
|
||||
@ -276,7 +276,7 @@ impl Vmo_ {
|
||||
/// Commit the page corresponding to the target offset in the VMO and return that page.
|
||||
/// If the current offset has already been committed, the page will be returned directly.
|
||||
/// During the commit process, the Copy-On-Write (COW) mechanism may be triggered depending on the circumstances.
|
||||
pub fn commit_page(&self, offset: usize, will_write: bool) -> Result<VmFrame> {
|
||||
pub fn commit_page(&self, offset: usize, will_write: bool) -> Result<Frame> {
|
||||
let page_idx = offset / PAGE_SIZE + self.page_idx_offset;
|
||||
self.pages.with(|pages, size| {
|
||||
let is_cow_vmo = pages.is_marked(VmoMark::CowVmo);
|
||||
@ -310,7 +310,7 @@ impl Vmo_ {
|
||||
will_write: bool,
|
||||
) -> Result<()>
|
||||
where
|
||||
F: FnMut(VmFrame),
|
||||
F: FnMut(Frame),
|
||||
{
|
||||
self.pages.with(|pages, size| {
|
||||
if range.end > size {
|
||||
@ -348,7 +348,7 @@ impl Vmo_ {
|
||||
let mut read_offset = offset % PAGE_SIZE;
|
||||
let mut buf_writer: VmWriter = buf.into();
|
||||
|
||||
let read = move |page: VmFrame| {
|
||||
let read = move |page: Frame| {
|
||||
page.reader().skip(read_offset).read(&mut buf_writer);
|
||||
read_offset = 0;
|
||||
};
|
||||
@ -363,7 +363,7 @@ impl Vmo_ {
|
||||
let mut write_offset = offset % PAGE_SIZE;
|
||||
let mut buf_reader: VmReader = buf.into();
|
||||
|
||||
let write = move |page: VmFrame| {
|
||||
let write = move |page: Frame| {
|
||||
page.writer().skip(write_offset).write(&mut buf_reader);
|
||||
write_offset = 0;
|
||||
};
|
||||
@ -518,7 +518,7 @@ impl Vmo_ {
|
||||
|
||||
fn decommit_pages(
|
||||
&self,
|
||||
pages: &mut XArray<VmFrame, VmoMark>,
|
||||
pages: &mut XArray<Frame, VmoMark>,
|
||||
range: Range<usize>,
|
||||
) -> Result<()> {
|
||||
let raw_page_idx_range = get_page_idx_range(&range);
|
||||
@ -575,7 +575,7 @@ impl<R> Vmo<R> {
|
||||
self.0.is_page_committed(page_idx)
|
||||
}
|
||||
|
||||
pub fn get_committed_frame(&self, page_idx: usize, write_page: bool) -> Result<VmFrame> {
|
||||
pub fn get_committed_frame(&self, page_idx: usize, write_page: bool) -> Result<Frame> {
|
||||
self.0.commit_page(page_idx * PAGE_SIZE, write_page)
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ use core::{marker::PhantomData, ops::Range};
|
||||
use align_ext::AlignExt;
|
||||
use aster_frame::{
|
||||
collections::xarray::XArray,
|
||||
vm::{VmAllocOptions, VmFrame},
|
||||
mm::{Frame, VmAllocOptions},
|
||||
};
|
||||
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
||||
use aster_rights_proc::require;
|
||||
@ -22,7 +22,7 @@ use crate::{prelude::*, vm::vmo::Vmo_};
|
||||
///
|
||||
/// Creating a VMO as a _dynamic_ capability with full access rights:
|
||||
/// ```
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||
///
|
||||
/// let vmo = VmoOptions::new(PAGE_SIZE)
|
||||
/// .alloc()
|
||||
@ -31,8 +31,8 @@ use crate::{prelude::*, vm::vmo::Vmo_};
|
||||
///
|
||||
/// Creating a VMO as a _static_ capability with all access rights:
|
||||
/// ```
|
||||
/// use aster_std::prelude::*;
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
||||
/// use aster_nix::prelude::*;
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||
///
|
||||
/// let vmo = VmoOptions::<Full>::new(PAGE_SIZE)
|
||||
/// .alloc()
|
||||
@ -43,7 +43,7 @@ use crate::{prelude::*, vm::vmo::Vmo_};
|
||||
/// physically contiguous:
|
||||
///
|
||||
/// ```
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoFlags};
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoFlags};
|
||||
///
|
||||
/// let vmo = VmoOptions::new(10 * PAGE_SIZE)
|
||||
/// .flags(VmoFlags::RESIZABLE)
|
||||
@ -140,7 +140,7 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option<Arc<dyn Pager>>) -> Re
|
||||
})
|
||||
}
|
||||
|
||||
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<VmFrame, VmoMark>> {
|
||||
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<Frame, VmoMark>> {
|
||||
if flags.contains(VmoFlags::CONTIGUOUS) {
|
||||
// if the vmo is continuous, we need to allocate frames for the vmo
|
||||
let frames_num = size / PAGE_SIZE;
|
||||
@ -168,7 +168,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
||||
/// A child VMO created from a parent VMO of _dynamic_ capability is also a
|
||||
/// _dynamic_ capability.
|
||||
/// ```
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||
///
|
||||
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
||||
/// .alloc()
|
||||
@ -182,8 +182,8 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
||||
/// A child VMO created from a parent VMO of _static_ capability is also a
|
||||
/// _static_ capability.
|
||||
/// ```
|
||||
/// use aster_std::prelude::*;
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||
/// use aster_nix::prelude::*;
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||
///
|
||||
/// let parent_vmo: Vmo<Full> = VmoOptions::new(PAGE_SIZE)
|
||||
/// .alloc()
|
||||
@ -200,7 +200,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
||||
/// right regardless of whether the parent is writable or not.
|
||||
///
|
||||
/// ```
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||
///
|
||||
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
||||
/// .alloc()
|
||||
@ -215,7 +215,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
||||
/// The above rule for COW VMO children also applies to static capabilities.
|
||||
///
|
||||
/// ```
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||
///
|
||||
/// let parent_vmo = VmoOptions::<TRights![Read, Dup]>::new(PAGE_SIZE)
|
||||
/// .alloc()
|
||||
@ -231,7 +231,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
||||
/// Note that a slice VMO child and its parent cannot not be resizable.
|
||||
///
|
||||
/// ```rust
|
||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
||||
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||
///
|
||||
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
||||
/// .alloc()
|
||||
@ -474,7 +474,7 @@ impl VmoChildType for VmoCowChild {}
|
||||
|
||||
#[cfg(ktest)]
|
||||
mod test {
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::Full;
|
||||
|
||||
use super::*;
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use aster_frame::vm::VmFrame;
|
||||
use aster_frame::mm::Frame;
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
@ -26,7 +26,7 @@ pub trait Pager: Send + Sync {
|
||||
/// whatever frame that may or may not be the same as the last time.
|
||||
///
|
||||
/// It is up to the pager to decide the range of valid indices.
|
||||
fn commit_page(&self, idx: usize) -> Result<VmFrame>;
|
||||
fn commit_page(&self, idx: usize) -> Result<Frame>;
|
||||
|
||||
/// Notify the pager that the frame at a specified index has been updated.
|
||||
///
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::vm::{VmFrame, VmIo};
|
||||
use aster_frame::mm::{Frame, VmIo};
|
||||
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
||||
use aster_rights_proc::require;
|
||||
|
||||
@ -68,7 +68,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
|
||||
}
|
||||
|
||||
/// commit a page at specific offset
|
||||
pub fn commit_page(&self, offset: usize) -> Result<VmFrame> {
|
||||
pub fn commit_page(&self, offset: usize) -> Result<Frame> {
|
||||
self.check_rights(Rights::WRITE)?;
|
||||
self.0.commit_page(offset, false)
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use aster_frame::{
|
||||
mm::{Frame, Segment, VmReader, VmWriter},
|
||||
sync::WaitQueue,
|
||||
vm::{VmFrame, VmReader, VmSegment, VmWriter},
|
||||
};
|
||||
use int_to_c_enum::TryFromInt;
|
||||
|
||||
@ -359,7 +359,7 @@ pub enum BioStatus {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BioSegment {
|
||||
/// The contiguous pages on which this segment resides.
|
||||
pages: VmSegment,
|
||||
pages: Segment,
|
||||
/// The starting offset (in bytes) within the first page.
|
||||
/// The offset should always be aligned to the sector size and
|
||||
/// must not exceed the size of a single page.
|
||||
@ -373,8 +373,8 @@ pub struct BioSegment {
|
||||
const SECTOR_SIZE: u16 = super::SECTOR_SIZE as u16;
|
||||
|
||||
impl<'a> BioSegment {
|
||||
/// Constructs a new `BioSegment` from `VmSegment`.
|
||||
pub fn from_segment(segment: VmSegment, offset: usize, len: usize) -> Self {
|
||||
/// Constructs a new `BioSegment` from `Segment`.
|
||||
pub fn from_segment(segment: Segment, offset: usize, len: usize) -> Self {
|
||||
assert!(offset + len <= segment.nbytes());
|
||||
|
||||
Self {
|
||||
@ -384,12 +384,12 @@ impl<'a> BioSegment {
|
||||
}
|
||||
}
|
||||
|
||||
/// Constructs a new `BioSegment` from `VmFrame`.
|
||||
pub fn from_frame(frame: VmFrame, offset: usize, len: usize) -> Self {
|
||||
/// Constructs a new `BioSegment` from `Frame`.
|
||||
pub fn from_frame(frame: Frame, offset: usize, len: usize) -> Self {
|
||||
assert!(offset + len <= super::BLOCK_SIZE);
|
||||
|
||||
Self {
|
||||
pages: VmSegment::from(frame),
|
||||
pages: Segment::from(frame),
|
||||
offset: AlignedUsize::<SECTOR_SIZE>::new(offset).unwrap(),
|
||||
len: AlignedUsize::<SECTOR_SIZE>::new(len).unwrap(),
|
||||
}
|
||||
@ -411,7 +411,7 @@ impl<'a> BioSegment {
|
||||
}
|
||||
|
||||
/// Returns the contiguous pages on which this segment resides.
|
||||
pub fn pages(&self) -> &VmSegment {
|
||||
pub fn pages(&self) -> &Segment {
|
||||
&self.pages
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use aster_frame::vm::{VmAllocOptions, VmFrame, VmIo, VmSegment};
|
||||
use aster_frame::mm::{Frame, Segment, VmAllocOptions, VmIo};
|
||||
|
||||
use super::{
|
||||
bio::{Bio, BioEnqueueError, BioSegment, BioStatus, BioType, BioWaiter, SubmittedBio},
|
||||
@ -16,7 +16,7 @@ impl dyn BlockDevice {
|
||||
pub fn read_blocks_sync(
|
||||
&self,
|
||||
bid: Bid,
|
||||
segment: &VmSegment,
|
||||
segment: &Segment,
|
||||
) -> Result<BioStatus, BioEnqueueError> {
|
||||
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
||||
let status = bio.submit_sync(self)?;
|
||||
@ -24,20 +24,20 @@ impl dyn BlockDevice {
|
||||
}
|
||||
|
||||
/// Asynchronously reads contiguous blocks starting from the `bid`.
|
||||
pub fn read_blocks(&self, bid: Bid, segment: &VmSegment) -> Result<BioWaiter, BioEnqueueError> {
|
||||
pub fn read_blocks(&self, bid: Bid, segment: &Segment) -> Result<BioWaiter, BioEnqueueError> {
|
||||
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
||||
bio.submit(self)
|
||||
}
|
||||
|
||||
/// Synchronously reads one block indicated by the `bid`.
|
||||
pub fn read_block_sync(&self, bid: Bid, frame: &VmFrame) -> Result<BioStatus, BioEnqueueError> {
|
||||
pub fn read_block_sync(&self, bid: Bid, frame: &Frame) -> Result<BioStatus, BioEnqueueError> {
|
||||
let bio = create_bio_from_frame(BioType::Read, bid, frame);
|
||||
let status = bio.submit_sync(self)?;
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
/// Asynchronously reads one block indicated by the `bid`.
|
||||
pub fn read_block(&self, bid: Bid, frame: &VmFrame) -> Result<BioWaiter, BioEnqueueError> {
|
||||
pub fn read_block(&self, bid: Bid, frame: &Frame) -> Result<BioWaiter, BioEnqueueError> {
|
||||
let bio = create_bio_from_frame(BioType::Read, bid, frame);
|
||||
bio.submit(self)
|
||||
}
|
||||
@ -46,7 +46,7 @@ impl dyn BlockDevice {
|
||||
pub fn write_blocks_sync(
|
||||
&self,
|
||||
bid: Bid,
|
||||
segment: &VmSegment,
|
||||
segment: &Segment,
|
||||
) -> Result<BioStatus, BioEnqueueError> {
|
||||
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
||||
let status = bio.submit_sync(self)?;
|
||||
@ -54,28 +54,20 @@ impl dyn BlockDevice {
|
||||
}
|
||||
|
||||
/// Asynchronously writes contiguous blocks starting from the `bid`.
|
||||
pub fn write_blocks(
|
||||
&self,
|
||||
bid: Bid,
|
||||
segment: &VmSegment,
|
||||
) -> Result<BioWaiter, BioEnqueueError> {
|
||||
pub fn write_blocks(&self, bid: Bid, segment: &Segment) -> Result<BioWaiter, BioEnqueueError> {
|
||||
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
||||
bio.submit(self)
|
||||
}
|
||||
|
||||
/// Synchronously writes one block indicated by the `bid`.
|
||||
pub fn write_block_sync(
|
||||
&self,
|
||||
bid: Bid,
|
||||
frame: &VmFrame,
|
||||
) -> Result<BioStatus, BioEnqueueError> {
|
||||
pub fn write_block_sync(&self, bid: Bid, frame: &Frame) -> Result<BioStatus, BioEnqueueError> {
|
||||
let bio = create_bio_from_frame(BioType::Write, bid, frame);
|
||||
let status = bio.submit_sync(self)?;
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
/// Asynchronously writes one block indicated by the `bid`.
|
||||
pub fn write_block(&self, bid: Bid, frame: &VmFrame) -> Result<BioWaiter, BioEnqueueError> {
|
||||
pub fn write_block(&self, bid: Bid, frame: &Frame) -> Result<BioWaiter, BioEnqueueError> {
|
||||
let bio = create_bio_from_frame(BioType::Write, bid, frame);
|
||||
bio.submit(self)
|
||||
}
|
||||
@ -202,7 +194,7 @@ impl dyn BlockDevice {
|
||||
}
|
||||
|
||||
// TODO: Maybe we should have a builder for `Bio`.
|
||||
fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &VmSegment) -> Bio {
|
||||
fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &Segment) -> Bio {
|
||||
let bio_segment = BioSegment::from_segment(segment.clone(), 0, segment.nbytes());
|
||||
Bio::new(
|
||||
type_,
|
||||
@ -213,7 +205,7 @@ fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &VmSegment) -> Bio
|
||||
}
|
||||
|
||||
// TODO: Maybe we should have a builder for `Bio`.
|
||||
fn create_bio_from_frame(type_: BioType, bid: Bid, frame: &VmFrame) -> Bio {
|
||||
fn create_bio_from_frame(type_: BioType, bid: Bid, frame: &Frame) -> Bio {
|
||||
let bio_segment = BioSegment::from_frame(frame.clone(), 0, BLOCK_SIZE);
|
||||
Bio::new(
|
||||
type_,
|
||||
|
@ -49,7 +49,7 @@ use self::{
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
pub const BLOCK_SIZE: usize = aster_frame::vm::PAGE_SIZE;
|
||||
pub const BLOCK_SIZE: usize = aster_frame::mm::PAGE_SIZE;
|
||||
pub const SECTOR_SIZE: usize = 512;
|
||||
|
||||
pub trait BlockDevice: Send + Sync + Any + Debug {
|
||||
|
@ -10,7 +10,7 @@ extern crate alloc;
|
||||
use alloc::{collections::BTreeMap, fmt::Debug, string::String, sync::Arc, vec::Vec};
|
||||
use core::any::Any;
|
||||
|
||||
use aster_frame::{sync::SpinLock, vm::VmReader};
|
||||
use aster_frame::{mm::VmReader, sync::SpinLock};
|
||||
use component::{init_component, ComponentInitError};
|
||||
use spin::Once;
|
||||
|
||||
|
@ -16,8 +16,8 @@ use core::{
|
||||
use aster_frame::{
|
||||
boot,
|
||||
io_mem::IoMem,
|
||||
mm::{VmIo, PAGE_SIZE},
|
||||
sync::SpinLock,
|
||||
vm::{VmIo, PAGE_SIZE},
|
||||
};
|
||||
use component::{init_component, ComponentInitError};
|
||||
use font8x8::UnicodeFonts;
|
||||
@ -39,7 +39,7 @@ pub(crate) fn init() {
|
||||
let mut writer = {
|
||||
let framebuffer = boot::framebuffer_arg();
|
||||
let mut size = 0;
|
||||
for i in aster_frame::vm::FRAMEBUFFER_REGIONS.get().unwrap().iter() {
|
||||
for i in aster_frame::mm::FRAMEBUFFER_REGIONS.get().unwrap().iter() {
|
||||
size = i.len();
|
||||
}
|
||||
|
||||
|
@ -4,8 +4,8 @@ use alloc::{collections::LinkedList, sync::Arc};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use aster_frame::{
|
||||
mm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||
sync::SpinLock,
|
||||
vm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||
};
|
||||
use pod::Pod;
|
||||
use spin::Once;
|
||||
|
@ -9,8 +9,8 @@ use alloc::{
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::{
|
||||
mm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||
sync::{RwLock, SpinLock},
|
||||
vm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||
};
|
||||
use bitvec::{array::BitArray, prelude::Lsb0};
|
||||
use ktest::ktest;
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use alloc::vec;
|
||||
|
||||
use aster_frame::vm::VmWriter;
|
||||
use aster_frame::mm::VmWriter;
|
||||
use smoltcp::{phy, time::Instant};
|
||||
|
||||
use crate::{buffer::RxBuffer, AnyNetworkDevice};
|
||||
|
@ -9,9 +9,9 @@ use aster_block::{
|
||||
};
|
||||
use aster_frame::{
|
||||
io_mem::IoMem,
|
||||
mm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmIo},
|
||||
sync::SpinLock,
|
||||
trap::TrapFrame,
|
||||
vm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmIo},
|
||||
};
|
||||
use aster_util::safe_ptr::SafePtr;
|
||||
use id_alloc::IdAlloc;
|
||||
|
@ -6,9 +6,9 @@ use core::hint::spin_loop;
|
||||
use aster_console::{AnyConsoleDevice, ConsoleCallback};
|
||||
use aster_frame::{
|
||||
io_mem::IoMem,
|
||||
mm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmReader},
|
||||
sync::{RwLock, SpinLock},
|
||||
trap::TrapFrame,
|
||||
vm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmReader},
|
||||
};
|
||||
use aster_util::safe_ptr::SafePtr;
|
||||
use log::debug;
|
||||
|
@ -10,10 +10,10 @@ use core::{fmt::Debug, iter, mem};
|
||||
|
||||
use aster_frame::{
|
||||
io_mem::IoMem,
|
||||
mm::{DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmIo, PAGE_SIZE},
|
||||
offset_of,
|
||||
sync::{RwLock, SpinLock},
|
||||
trap::TrapFrame,
|
||||
vm::{DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmIo, PAGE_SIZE},
|
||||
};
|
||||
use aster_input::{
|
||||
key::{Key, KeyStatus},
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use aster_frame::vm::{DmaCoherent, DmaStream, DmaStreamSlice, HasDaddr};
|
||||
use aster_frame::mm::{DmaCoherent, DmaStream, DmaStreamSlice, HasDaddr};
|
||||
use aster_network::{DmaSegment, RxBuffer, TxBuffer};
|
||||
|
||||
/// A DMA-capable buffer.
|
||||
|
@ -10,8 +10,8 @@ use core::{
|
||||
|
||||
use aster_frame::{
|
||||
io_mem::IoMem,
|
||||
mm::{DmaCoherent, VmAllocOptions},
|
||||
offset_of,
|
||||
vm::{DmaCoherent, VmAllocOptions},
|
||||
};
|
||||
use aster_rights::{Dup, TRightSet, TRights, Write};
|
||||
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
||||
@ -74,7 +74,7 @@ impl VirtQueue {
|
||||
|
||||
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
|
||||
// FIXME: How about pci legacy?
|
||||
// Currently, we use one VmFrame to place the descriptors and avaliable rings, one VmFrame to place used rings
|
||||
// Currently, we use one Frame to place the descriptors and avaliable rings, one Frame to place used rings
|
||||
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
|
||||
if size > 128 {
|
||||
return Err(QueueError::InvalidArgs);
|
||||
|
@ -9,10 +9,10 @@ use aster_frame::{
|
||||
device::{MmioCommonDevice, VirtioMmioVersion},
|
||||
},
|
||||
io_mem::IoMem,
|
||||
mm::{DmaCoherent, PAGE_SIZE},
|
||||
offset_of,
|
||||
sync::RwLock,
|
||||
trap::IrqCallbackFunction,
|
||||
vm::{DmaCoherent, PAGE_SIZE},
|
||||
};
|
||||
use aster_rights::{ReadOp, WriteOp};
|
||||
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
||||
|
@ -3,7 +3,7 @@
|
||||
use alloc::boxed::Box;
|
||||
use core::fmt::Debug;
|
||||
|
||||
use aster_frame::{io_mem::IoMem, trap::IrqCallbackFunction, vm::DmaCoherent};
|
||||
use aster_frame::{io_mem::IoMem, mm::DmaCoherent, trap::IrqCallbackFunction};
|
||||
use aster_util::safe_ptr::SafePtr;
|
||||
|
||||
use self::{mmio::virtio_mmio_init, pci::virtio_pci_init};
|
||||
|
@ -11,9 +11,9 @@ use aster_frame::{
|
||||
BusProbeError,
|
||||
},
|
||||
io_mem::IoMem,
|
||||
mm::DmaCoherent,
|
||||
offset_of,
|
||||
trap::IrqCallbackFunction,
|
||||
vm::DmaCoherent,
|
||||
};
|
||||
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
||||
use log::{info, warn};
|
||||
|
@ -3,7 +3,7 @@
|
||||
use core::{fmt::Debug, marker::PhantomData};
|
||||
|
||||
use aster_frame::{
|
||||
vm::{Daddr, DmaStream, HasDaddr, HasPaddr, Paddr, VmIo},
|
||||
mm::{Daddr, DmaStream, HasDaddr, HasPaddr, Paddr, VmIo},
|
||||
Result,
|
||||
};
|
||||
use aster_rights::{Dup, Exec, Full, Read, Signal, TRightSet, TRights, Write};
|
||||
@ -56,7 +56,7 @@ pub use typeflags_util::SetContain;
|
||||
///
|
||||
/// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo`
|
||||
/// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and
|
||||
/// `VmFrame`. The blanket implementations of `VmIo` also include pointer-like
|
||||
/// `Frame`. The blanket implementations of `VmIo` also include pointer-like
|
||||
/// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`,
|
||||
/// and `Arc<IoMem>`.
|
||||
///
|
||||
@ -382,7 +382,7 @@ impl<T, M: Debug, R> Debug for SafePtr<T, M, R> {
|
||||
macro_rules! field_ptr {
|
||||
($ptr:expr, $type:ty, $($field:tt)+) => {{
|
||||
use aster_frame::offset_of;
|
||||
use aster_frame::vm::VmIo;
|
||||
use aster_frame::mm::VmIo;
|
||||
use aster_rights::Dup;
|
||||
use aster_rights::TRightSet;
|
||||
use aster_rights::TRights;
|
||||
|
Reference in New Issue
Block a user