mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-21 16:33:24 +00:00
Separate SegmentSlice
from Segment
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
d930829866
commit
909639fd70
@ -1,9 +1,10 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
|
use aster_util::segment_slice::SegmentSlice;
|
||||||
use int_to_c_enum::TryFromInt;
|
use int_to_c_enum::TryFromInt;
|
||||||
use ostd::{
|
use ostd::{
|
||||||
mm::{Frame, Infallible, Segment, VmReader, VmWriter},
|
mm::{Frame, Infallible, VmReader, VmWriter},
|
||||||
sync::WaitQueue,
|
sync::WaitQueue,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -366,7 +367,7 @@ pub enum BioStatus {
|
|||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct BioSegment {
|
pub struct BioSegment {
|
||||||
/// The contiguous pages on which this segment resides.
|
/// The contiguous pages on which this segment resides.
|
||||||
pages: Segment,
|
pages: SegmentSlice,
|
||||||
/// The starting offset (in bytes) within the first page.
|
/// The starting offset (in bytes) within the first page.
|
||||||
/// The offset should always be aligned to the sector size and
|
/// The offset should always be aligned to the sector size and
|
||||||
/// must not exceed the size of a single page.
|
/// must not exceed the size of a single page.
|
||||||
@ -381,9 +382,8 @@ const SECTOR_SIZE: u16 = super::SECTOR_SIZE as u16;
|
|||||||
|
|
||||||
impl<'a> BioSegment {
|
impl<'a> BioSegment {
|
||||||
/// Constructs a new `BioSegment` from `Segment`.
|
/// Constructs a new `BioSegment` from `Segment`.
|
||||||
pub fn from_segment(segment: Segment, offset: usize, len: usize) -> Self {
|
pub fn from_segment(segment: SegmentSlice, offset: usize, len: usize) -> Self {
|
||||||
assert!(offset + len <= segment.nbytes());
|
assert!(offset + len <= segment.nbytes());
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
pages: segment.range(frame_range(&(offset..offset + len))),
|
pages: segment.range(frame_range(&(offset..offset + len))),
|
||||||
offset: AlignedUsize::<SECTOR_SIZE>::new(offset % super::BLOCK_SIZE).unwrap(),
|
offset: AlignedUsize::<SECTOR_SIZE>::new(offset % super::BLOCK_SIZE).unwrap(),
|
||||||
@ -396,7 +396,7 @@ impl<'a> BioSegment {
|
|||||||
assert!(offset + len <= super::BLOCK_SIZE);
|
assert!(offset + len <= super::BLOCK_SIZE);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
pages: Segment::from(frame),
|
pages: SegmentSlice::from(frame),
|
||||||
offset: AlignedUsize::<SECTOR_SIZE>::new(offset).unwrap(),
|
offset: AlignedUsize::<SECTOR_SIZE>::new(offset).unwrap(),
|
||||||
len: AlignedUsize::<SECTOR_SIZE>::new(len).unwrap(),
|
len: AlignedUsize::<SECTOR_SIZE>::new(len).unwrap(),
|
||||||
}
|
}
|
||||||
@ -418,7 +418,7 @@ impl<'a> BioSegment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the contiguous pages on which this segment resides.
|
/// Returns the contiguous pages on which this segment resides.
|
||||||
pub fn pages(&self) -> &Segment {
|
pub fn pages(&self) -> &SegmentSlice {
|
||||||
&self.pages
|
&self.pages
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
use aster_util::segment_slice::SegmentSlice;
|
||||||
use ostd::mm::{
|
use ostd::mm::{
|
||||||
FallibleVmRead, FallibleVmWrite, Frame, FrameAllocOptions, Segment, VmIo, VmReader, VmWriter,
|
FallibleVmRead, FallibleVmWrite, Frame, FrameAllocOptions, VmIo, VmReader, VmWriter,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@ -16,7 +17,11 @@ use crate::prelude::*;
|
|||||||
// TODO: Add API to submit bio with multiple segments in scatter/gather manner.
|
// TODO: Add API to submit bio with multiple segments in scatter/gather manner.
|
||||||
impl dyn BlockDevice {
|
impl dyn BlockDevice {
|
||||||
/// Synchronously reads contiguous blocks starting from the `bid`.
|
/// Synchronously reads contiguous blocks starting from the `bid`.
|
||||||
pub fn read_blocks(&self, bid: Bid, segment: &Segment) -> Result<BioStatus, BioEnqueueError> {
|
pub fn read_blocks(
|
||||||
|
&self,
|
||||||
|
bid: Bid,
|
||||||
|
segment: &SegmentSlice,
|
||||||
|
) -> Result<BioStatus, BioEnqueueError> {
|
||||||
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
||||||
let status = bio.submit_and_wait(self)?;
|
let status = bio.submit_and_wait(self)?;
|
||||||
Ok(status)
|
Ok(status)
|
||||||
@ -26,7 +31,7 @@ impl dyn BlockDevice {
|
|||||||
pub fn read_blocks_async(
|
pub fn read_blocks_async(
|
||||||
&self,
|
&self,
|
||||||
bid: Bid,
|
bid: Bid,
|
||||||
segment: &Segment,
|
segment: &SegmentSlice,
|
||||||
) -> Result<BioWaiter, BioEnqueueError> {
|
) -> Result<BioWaiter, BioEnqueueError> {
|
||||||
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
||||||
bio.submit(self)
|
bio.submit(self)
|
||||||
@ -46,7 +51,11 @@ impl dyn BlockDevice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Synchronously writes contiguous blocks starting from the `bid`.
|
/// Synchronously writes contiguous blocks starting from the `bid`.
|
||||||
pub fn write_blocks(&self, bid: Bid, segment: &Segment) -> Result<BioStatus, BioEnqueueError> {
|
pub fn write_blocks(
|
||||||
|
&self,
|
||||||
|
bid: Bid,
|
||||||
|
segment: &SegmentSlice,
|
||||||
|
) -> Result<BioStatus, BioEnqueueError> {
|
||||||
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
||||||
let status = bio.submit_and_wait(self)?;
|
let status = bio.submit_and_wait(self)?;
|
||||||
Ok(status)
|
Ok(status)
|
||||||
@ -56,7 +65,7 @@ impl dyn BlockDevice {
|
|||||||
pub fn write_blocks_async(
|
pub fn write_blocks_async(
|
||||||
&self,
|
&self,
|
||||||
bid: Bid,
|
bid: Bid,
|
||||||
segment: &Segment,
|
segment: &SegmentSlice,
|
||||||
) -> Result<BioWaiter, BioEnqueueError> {
|
) -> Result<BioWaiter, BioEnqueueError> {
|
||||||
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
||||||
bio.submit(self)
|
bio.submit(self)
|
||||||
@ -96,7 +105,8 @@ impl VmIo for dyn BlockDevice {
|
|||||||
let segment = FrameAllocOptions::new(num_blocks as usize)
|
let segment = FrameAllocOptions::new(num_blocks as usize)
|
||||||
.uninit(true)
|
.uninit(true)
|
||||||
.alloc_contiguous()?;
|
.alloc_contiguous()?;
|
||||||
let bio_segment = BioSegment::from_segment(segment, offset % BLOCK_SIZE, read_len);
|
let bio_segment =
|
||||||
|
BioSegment::from_segment(segment.into(), offset % BLOCK_SIZE, read_len);
|
||||||
|
|
||||||
(
|
(
|
||||||
Bio::new(
|
Bio::new(
|
||||||
@ -147,7 +157,7 @@ impl VmIo for dyn BlockDevice {
|
|||||||
.skip(offset % BLOCK_SIZE)
|
.skip(offset % BLOCK_SIZE)
|
||||||
.write_fallible(reader)
|
.write_fallible(reader)
|
||||||
.map_err(|(e, _)| e)?;
|
.map_err(|(e, _)| e)?;
|
||||||
let bio_segment = BioSegment::from_segment(segment, offset % BLOCK_SIZE, len);
|
let bio_segment = BioSegment::from_segment(segment.into(), offset % BLOCK_SIZE, len);
|
||||||
Bio::new(
|
Bio::new(
|
||||||
BioType::Write,
|
BioType::Write,
|
||||||
Sid::from_offset(offset),
|
Sid::from_offset(offset),
|
||||||
@ -188,7 +198,7 @@ impl dyn BlockDevice {
|
|||||||
.writer()
|
.writer()
|
||||||
.skip(offset % BLOCK_SIZE)
|
.skip(offset % BLOCK_SIZE)
|
||||||
.write(&mut buf.into());
|
.write(&mut buf.into());
|
||||||
let bio_segment = BioSegment::from_segment(segment, offset % BLOCK_SIZE, len);
|
let bio_segment = BioSegment::from_segment(segment.into(), offset % BLOCK_SIZE, len);
|
||||||
Bio::new(
|
Bio::new(
|
||||||
BioType::Write,
|
BioType::Write,
|
||||||
Sid::from_offset(offset),
|
Sid::from_offset(offset),
|
||||||
@ -203,7 +213,7 @@ impl dyn BlockDevice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Maybe we should have a builder for `Bio`.
|
// TODO: Maybe we should have a builder for `Bio`.
|
||||||
fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &Segment) -> Bio {
|
fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &SegmentSlice) -> Bio {
|
||||||
let bio_segment = BioSegment::from_segment(segment.clone(), 0, segment.nbytes());
|
let bio_segment = BioSegment::from_segment(segment.clone(), 0, segment.nbytes());
|
||||||
Bio::new(
|
Bio::new(
|
||||||
type_,
|
type_,
|
||||||
|
@ -152,9 +152,9 @@ impl DmaPage {
|
|||||||
pool: Weak<DmaPool>,
|
pool: Weak<DmaPool>,
|
||||||
) -> Result<Self, ostd::Error> {
|
) -> Result<Self, ostd::Error> {
|
||||||
let dma_stream = {
|
let dma_stream = {
|
||||||
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous()?;
|
let segment = FrameAllocOptions::new(1).alloc_contiguous()?;
|
||||||
|
|
||||||
DmaStream::map(vm_segment, direction, is_cache_coherent)
|
DmaStream::map(segment, direction, is_cache_coherent)
|
||||||
.map_err(|_| ostd::Error::AccessDenied)?
|
.map_err(|_| ostd::Error::AccessDenied)?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -419,7 +419,8 @@ impl DeviceInner {
|
|||||||
.flat_map(|bio| {
|
.flat_map(|bio| {
|
||||||
bio.segments().iter().map(|segment| {
|
bio.segments().iter().map(|segment| {
|
||||||
let dma_stream =
|
let dma_stream =
|
||||||
DmaStream::map(segment.pages().clone(), dma_direction, false).unwrap();
|
DmaStream::map(segment.pages().clone().into(), dma_direction, false)
|
||||||
|
.unwrap();
|
||||||
(dma_stream, segment.offset(), segment.nbytes())
|
(dma_stream, segment.offset(), segment.nbytes())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -81,10 +81,8 @@ impl VirtQueue {
|
|||||||
let desc_size = size_of::<Descriptor>() * size as usize;
|
let desc_size = size_of::<Descriptor>() * size as usize;
|
||||||
|
|
||||||
let (seg1, seg2) = {
|
let (seg1, seg2) = {
|
||||||
let continue_segment = FrameAllocOptions::new(2).alloc_contiguous().unwrap();
|
let segment = FrameAllocOptions::new(2).alloc_contiguous().unwrap();
|
||||||
let seg1 = continue_segment.range(0..1);
|
segment.split(ostd::mm::PAGE_SIZE)
|
||||||
let seg2 = continue_segment.range(1..2);
|
|
||||||
(seg1, seg2)
|
|
||||||
};
|
};
|
||||||
let desc_frame_ptr: SafePtr<Descriptor, DmaCoherent> =
|
let desc_frame_ptr: SafePtr<Descriptor, DmaCoherent> =
|
||||||
SafePtr::new(DmaCoherent::map(seg1, true).unwrap(), 0);
|
SafePtr::new(DmaCoherent::map(seg1, true).unwrap(), 0);
|
||||||
|
@ -10,5 +10,6 @@ extern crate alloc;
|
|||||||
pub mod coeff;
|
pub mod coeff;
|
||||||
pub mod dup;
|
pub mod dup;
|
||||||
pub mod safe_ptr;
|
pub mod safe_ptr;
|
||||||
|
pub mod segment_slice;
|
||||||
pub mod slot_vec;
|
pub mod slot_vec;
|
||||||
pub mod union_read_ptr;
|
pub mod union_read_ptr;
|
||||||
|
148
kernel/libs/aster-util/src/segment_slice.rs
Normal file
148
kernel/libs/aster-util/src/segment_slice.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
//! Provides [`SegmentSlice`] for quick duplication and slicing over [`Segment`].
|
||||||
|
|
||||||
|
use alloc::sync::Arc;
|
||||||
|
use core::ops::Range;
|
||||||
|
|
||||||
|
use ostd::{
|
||||||
|
mm::{
|
||||||
|
FallibleVmRead, FallibleVmWrite, Frame, Infallible, Paddr, Segment, VmIo, VmReader,
|
||||||
|
VmWriter, PAGE_SIZE,
|
||||||
|
},
|
||||||
|
Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A reference to a slice of a [`Segment`].
|
||||||
|
///
|
||||||
|
/// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference
|
||||||
|
/// count. While cloning a [`Segment`] will increment the reference count of
|
||||||
|
/// many underlying pages.
|
||||||
|
///
|
||||||
|
/// The downside is that the [`SegmentSlice`] requires heap allocation. Also,
|
||||||
|
/// if any [`SegmentSlice`] of the original [`Segment`] is alive, all pages in
|
||||||
|
/// the original [`Segment`], including the pages that are not referenced, will
|
||||||
|
/// not be freed.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct SegmentSlice {
|
||||||
|
inner: Arc<Segment>,
|
||||||
|
range: Range<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentSlice {
|
||||||
|
/// Returns a part of the `Segment`.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// If `range` is not within the range of this `Segment`,
|
||||||
|
/// then the method panics.
|
||||||
|
pub fn range(&self, range: Range<usize>) -> Self {
|
||||||
|
let orig_range = &self.range;
|
||||||
|
let adj_range = (range.start + orig_range.start)..(range.end + orig_range.start);
|
||||||
|
assert!(!adj_range.is_empty() && adj_range.end <= orig_range.end);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
inner: self.inner.clone(),
|
||||||
|
range: adj_range,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the start physical address.
|
||||||
|
pub fn start_paddr(&self) -> Paddr {
|
||||||
|
self.start_frame_index() * PAGE_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the end physical address.
|
||||||
|
pub fn end_paddr(&self) -> Paddr {
|
||||||
|
(self.start_frame_index() + self.nframes()) * PAGE_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of page frames.
|
||||||
|
pub fn nframes(&self) -> usize {
|
||||||
|
self.range.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of bytes.
|
||||||
|
pub fn nbytes(&self) -> usize {
|
||||||
|
self.nframes() * PAGE_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a reader for the slice.
|
||||||
|
pub fn reader(&self) -> VmReader<'_, Infallible> {
|
||||||
|
self.inner
|
||||||
|
.reader()
|
||||||
|
.skip(self.start_paddr() - self.inner.start_paddr())
|
||||||
|
.limit(self.nbytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a writer for the slice.
|
||||||
|
pub fn writer(&self) -> VmWriter<'_, Infallible> {
|
||||||
|
self.inner
|
||||||
|
.writer()
|
||||||
|
.skip(self.start_paddr() - self.inner.start_paddr())
|
||||||
|
.limit(self.nbytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_frame_index(&self) -> usize {
|
||||||
|
self.inner.start_paddr() / PAGE_SIZE + self.range.start
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VmIo for SegmentSlice {
|
||||||
|
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
||||||
|
let read_len = writer.avail();
|
||||||
|
// Do bound check with potential integer overflow in mind
|
||||||
|
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
|
||||||
|
if max_offset > self.nbytes() {
|
||||||
|
return Err(Error::InvalidArgs);
|
||||||
|
}
|
||||||
|
let len = self
|
||||||
|
.reader()
|
||||||
|
.skip(offset)
|
||||||
|
.read_fallible(writer)
|
||||||
|
.map_err(|(e, _)| e)?;
|
||||||
|
debug_assert!(len == read_len);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
||||||
|
let write_len = reader.remain();
|
||||||
|
// Do bound check with potential integer overflow in mind
|
||||||
|
let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?;
|
||||||
|
if max_offset > self.nbytes() {
|
||||||
|
return Err(Error::InvalidArgs);
|
||||||
|
}
|
||||||
|
let len = self
|
||||||
|
.writer()
|
||||||
|
.skip(offset)
|
||||||
|
.write_fallible(reader)
|
||||||
|
.map_err(|(e, _)| e)?;
|
||||||
|
debug_assert!(len == write_len);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Segment> for SegmentSlice {
|
||||||
|
fn from(segment: Segment) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(segment),
|
||||||
|
range: 0..1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SegmentSlice> for Segment {
|
||||||
|
fn from(slice: SegmentSlice) -> Self {
|
||||||
|
let start = slice.range.start * PAGE_SIZE;
|
||||||
|
let end = slice.range.end * PAGE_SIZE;
|
||||||
|
slice.inner.slice(&(start..end))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Frame> for SegmentSlice {
|
||||||
|
fn from(frame: Frame) -> Self {
|
||||||
|
SegmentSlice::from(Segment::from(frame))
|
||||||
|
}
|
||||||
|
}
|
@ -22,7 +22,7 @@ mod test {
|
|||||||
BlockDevice, BlockDeviceMeta,
|
BlockDevice, BlockDeviceMeta,
|
||||||
};
|
};
|
||||||
use ostd::{
|
use ostd::{
|
||||||
mm::{FrameAllocOptions, Segment, VmIo},
|
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
use rand::{rngs::SmallRng, RngCore, SeedableRng};
|
use rand::{rngs::SmallRng, RngCore, SeedableRng};
|
||||||
@ -48,7 +48,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sectors_count(&self) -> usize {
|
pub fn sectors_count(&self) -> usize {
|
||||||
self.0.nframes() * (PAGE_SIZE / SECTOR_SIZE)
|
self.0.nbytes() / SECTOR_SIZE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,13 +111,10 @@ mod test {
|
|||||||
|
|
||||||
/// Read exfat disk image
|
/// Read exfat disk image
|
||||||
fn new_vm_segment_from_image() -> Segment {
|
fn new_vm_segment_from_image() -> Segment {
|
||||||
let vm_segment = {
|
let vm_segment = FrameAllocOptions::new(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE))
|
||||||
FrameAllocOptions::new(EXFAT_IMAGE.len() / PAGE_SIZE)
|
.uninit(true)
|
||||||
.is_contiguous(true)
|
.alloc_contiguous()
|
||||||
.uninit(true)
|
.unwrap();
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
vm_segment.write_bytes(0, EXFAT_IMAGE).unwrap();
|
vm_segment.write_bytes(0, EXFAT_IMAGE).unwrap();
|
||||||
vm_segment
|
vm_segment
|
||||||
|
@ -28,7 +28,7 @@ struct BlockGroupImpl {
|
|||||||
impl BlockGroup {
|
impl BlockGroup {
|
||||||
/// Loads and constructs a block group.
|
/// Loads and constructs a block group.
|
||||||
pub fn load(
|
pub fn load(
|
||||||
group_descriptors_segment: &Segment,
|
group_descriptors_segment: &SegmentSlice,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
block_device: &dyn BlockDevice,
|
block_device: &dyn BlockDevice,
|
||||||
super_block: &SuperBlock,
|
super_block: &SuperBlock,
|
||||||
|
@ -23,7 +23,7 @@ pub struct Ext2 {
|
|||||||
blocks_per_group: Ext2Bid,
|
blocks_per_group: Ext2Bid,
|
||||||
inode_size: usize,
|
inode_size: usize,
|
||||||
block_size: usize,
|
block_size: usize,
|
||||||
group_descriptors_segment: Segment,
|
group_descriptors_segment: SegmentSlice,
|
||||||
self_ref: Weak<Self>,
|
self_ref: Weak<Self>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,7 +48,8 @@ impl Ext2 {
|
|||||||
.div_ceil(BLOCK_SIZE);
|
.div_ceil(BLOCK_SIZE);
|
||||||
let segment = FrameAllocOptions::new(npages)
|
let segment = FrameAllocOptions::new(npages)
|
||||||
.uninit(true)
|
.uninit(true)
|
||||||
.alloc_contiguous()?;
|
.alloc_contiguous()?
|
||||||
|
.into();
|
||||||
match block_device.read_blocks(super_block.group_descriptors_bid(0), &segment)? {
|
match block_device.read_blocks(super_block.group_descriptors_bid(0), &segment)? {
|
||||||
BioStatus::Complete => (),
|
BioStatus::Complete => (),
|
||||||
err_status => {
|
err_status => {
|
||||||
@ -61,7 +62,7 @@ impl Ext2 {
|
|||||||
// Load the block groups information
|
// Load the block groups information
|
||||||
let load_block_groups = |fs: Weak<Ext2>,
|
let load_block_groups = |fs: Weak<Ext2>,
|
||||||
block_device: &dyn BlockDevice,
|
block_device: &dyn BlockDevice,
|
||||||
group_descriptors_segment: &Segment|
|
group_descriptors_segment: &SegmentSlice|
|
||||||
-> Result<Vec<BlockGroup>> {
|
-> Result<Vec<BlockGroup>> {
|
||||||
let block_groups_count = super_block.block_groups_count() as usize;
|
let block_groups_count = super_block.block_groups_count() as usize;
|
||||||
let mut block_groups = Vec::with_capacity(block_groups_count);
|
let mut block_groups = Vec::with_capacity(block_groups_count);
|
||||||
@ -297,7 +298,7 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reads contiguous blocks starting from the `bid` synchronously.
|
/// Reads contiguous blocks starting from the `bid` synchronously.
|
||||||
pub(super) fn read_blocks(&self, bid: Ext2Bid, segment: &Segment) -> Result<()> {
|
pub(super) fn read_blocks(&self, bid: Ext2Bid, segment: &SegmentSlice) -> Result<()> {
|
||||||
let status = self
|
let status = self
|
||||||
.block_device
|
.block_device
|
||||||
.read_blocks(Bid::new(bid as u64), segment)?;
|
.read_blocks(Bid::new(bid as u64), segment)?;
|
||||||
@ -308,7 +309,11 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reads contiguous blocks starting from the `bid` asynchronously.
|
/// Reads contiguous blocks starting from the `bid` asynchronously.
|
||||||
pub(super) fn read_blocks_async(&self, bid: Ext2Bid, segment: &Segment) -> Result<BioWaiter> {
|
pub(super) fn read_blocks_async(
|
||||||
|
&self,
|
||||||
|
bid: Ext2Bid,
|
||||||
|
segment: &SegmentSlice,
|
||||||
|
) -> Result<BioWaiter> {
|
||||||
let waiter = self
|
let waiter = self
|
||||||
.block_device
|
.block_device
|
||||||
.read_blocks_async(Bid::new(bid as u64), segment)?;
|
.read_blocks_async(Bid::new(bid as u64), segment)?;
|
||||||
@ -333,7 +338,7 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Writes contiguous blocks starting from the `bid` synchronously.
|
/// Writes contiguous blocks starting from the `bid` synchronously.
|
||||||
pub(super) fn write_blocks(&self, bid: Ext2Bid, segment: &Segment) -> Result<()> {
|
pub(super) fn write_blocks(&self, bid: Ext2Bid, segment: &SegmentSlice) -> Result<()> {
|
||||||
let status = self
|
let status = self
|
||||||
.block_device
|
.block_device
|
||||||
.write_blocks(Bid::new(bid as u64), segment)?;
|
.write_blocks(Bid::new(bid as u64), segment)?;
|
||||||
@ -344,7 +349,11 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Writes contiguous blocks starting from the `bid` asynchronously.
|
/// Writes contiguous blocks starting from the `bid` asynchronously.
|
||||||
pub(super) fn write_blocks_async(&self, bid: Ext2Bid, segment: &Segment) -> Result<BioWaiter> {
|
pub(super) fn write_blocks_async(
|
||||||
|
&self,
|
||||||
|
bid: Ext2Bid,
|
||||||
|
segment: &SegmentSlice,
|
||||||
|
) -> Result<BioWaiter> {
|
||||||
let waiter = self
|
let waiter = self
|
||||||
.block_device
|
.block_device
|
||||||
.write_blocks_async(Bid::new(bid as u64), segment)?;
|
.write_blocks_async(Bid::new(bid as u64), segment)?;
|
||||||
|
@ -944,7 +944,8 @@ impl Inner {
|
|||||||
let buf_nblocks = read_len / BLOCK_SIZE;
|
let buf_nblocks = read_len / BLOCK_SIZE;
|
||||||
let segment = FrameAllocOptions::new(buf_nblocks)
|
let segment = FrameAllocOptions::new(buf_nblocks)
|
||||||
.uninit(true)
|
.uninit(true)
|
||||||
.alloc_contiguous()?;
|
.alloc_contiguous()?
|
||||||
|
.into();
|
||||||
|
|
||||||
self.inode_impl.read_blocks(start_bid, &segment)?;
|
self.inode_impl.read_blocks(start_bid, &segment)?;
|
||||||
segment.read(0, writer)?;
|
segment.read(0, writer)?;
|
||||||
@ -986,7 +987,7 @@ impl Inner {
|
|||||||
.uninit(true)
|
.uninit(true)
|
||||||
.alloc_contiguous()?;
|
.alloc_contiguous()?;
|
||||||
segment.write(0, reader)?;
|
segment.write(0, reader)?;
|
||||||
segment
|
segment.into()
|
||||||
};
|
};
|
||||||
|
|
||||||
self.inode_impl.write_blocks(start_bid, &segment)?;
|
self.inode_impl.write_blocks(start_bid, &segment)?;
|
||||||
@ -1128,7 +1129,7 @@ impl InodeImpl_ {
|
|||||||
self.inode().fs()
|
self.inode().fs()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_blocks_async(&self, bid: Ext2Bid, blocks: &Segment) -> Result<BioWaiter> {
|
pub fn read_blocks_async(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<BioWaiter> {
|
||||||
let nblocks = blocks.nframes();
|
let nblocks = blocks.nframes();
|
||||||
let mut segments = Vec::new();
|
let mut segments = Vec::new();
|
||||||
|
|
||||||
@ -1183,14 +1184,14 @@ impl InodeImpl_ {
|
|||||||
Ok(bio_waiter)
|
Ok(bio_waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_blocks(&self, bid: Ext2Bid, blocks: &Segment) -> Result<()> {
|
pub fn read_blocks(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<()> {
|
||||||
match self.read_blocks_async(bid, blocks)?.wait() {
|
match self.read_blocks_async(bid, blocks)?.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
_ => return_errno!(Errno::EIO),
|
_ => return_errno!(Errno::EIO),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_blocks_async(&self, bid: Ext2Bid, blocks: &Segment) -> Result<BioWaiter> {
|
pub fn write_blocks_async(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<BioWaiter> {
|
||||||
let nblocks = blocks.nframes();
|
let nblocks = blocks.nframes();
|
||||||
let mut bio_waiter = BioWaiter::new();
|
let mut bio_waiter = BioWaiter::new();
|
||||||
|
|
||||||
@ -1214,7 +1215,7 @@ impl InodeImpl_ {
|
|||||||
Ok(bio_waiter)
|
Ok(bio_waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_blocks(&self, bid: Ext2Bid, blocks: &Segment) -> Result<()> {
|
pub fn write_blocks(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<()> {
|
||||||
match self.write_blocks_async(bid, blocks)?.wait() {
|
match self.write_blocks_async(bid, blocks)?.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
_ => return_errno!(Errno::EIO),
|
_ => return_errno!(Errno::EIO),
|
||||||
@ -1873,20 +1874,20 @@ impl InodeImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reads one or multiple blocks to the segment start from `bid` asynchronously.
|
/// Reads one or multiple blocks to the segment start from `bid` asynchronously.
|
||||||
pub fn read_blocks_async(&self, bid: Ext2Bid, blocks: &Segment) -> Result<BioWaiter> {
|
pub fn read_blocks_async(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<BioWaiter> {
|
||||||
self.0.read().read_blocks_async(bid, blocks)
|
self.0.read().read_blocks_async(bid, blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_blocks(&self, bid: Ext2Bid, blocks: &Segment) -> Result<()> {
|
pub fn read_blocks(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<()> {
|
||||||
self.0.read().read_blocks(bid, blocks)
|
self.0.read().read_blocks(bid, blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Writes one or multiple blocks from the segment start from `bid` asynchronously.
|
/// Writes one or multiple blocks from the segment start from `bid` asynchronously.
|
||||||
pub fn write_blocks_async(&self, bid: Ext2Bid, blocks: &Segment) -> Result<BioWaiter> {
|
pub fn write_blocks_async(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<BioWaiter> {
|
||||||
self.0.read().write_blocks_async(bid, blocks)
|
self.0.read().write_blocks_async(bid, blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_blocks(&self, bid: Ext2Bid, blocks: &Segment) -> Result<()> {
|
pub fn write_blocks(&self, bid: Ext2Bid, blocks: &SegmentSlice) -> Result<()> {
|
||||||
self.0.read().write_blocks(bid, blocks)
|
self.0.read().write_blocks(bid, blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1962,7 +1963,9 @@ impl InodeImpl {
|
|||||||
|
|
||||||
// TODO: If we can persist the `blocks_hole_desc`, Can we avoid zeroing all the holes on the device?
|
// TODO: If we can persist the `blocks_hole_desc`, Can we avoid zeroing all the holes on the device?
|
||||||
debug_assert!(max_batch_len > 0);
|
debug_assert!(max_batch_len > 0);
|
||||||
let zeroed_segment = FrameAllocOptions::new(max_batch_len).alloc_contiguous()?;
|
let zeroed_segment: SegmentSlice = FrameAllocOptions::new(max_batch_len)
|
||||||
|
.alloc_contiguous()?
|
||||||
|
.into();
|
||||||
for (start_bid, batch_len) in data_hole_batches {
|
for (start_bid, batch_len) in data_hole_batches {
|
||||||
inner.write_blocks(start_bid, &zeroed_segment.range(0..batch_len))?;
|
inner.write_blocks(start_bid, &zeroed_segment.range(0..batch_len))?;
|
||||||
}
|
}
|
||||||
@ -2001,12 +2004,12 @@ impl InodeImpl {
|
|||||||
impl PageCacheBackend for InodeImpl {
|
impl PageCacheBackend for InodeImpl {
|
||||||
fn read_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
fn read_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let bid = idx as Ext2Bid;
|
let bid = idx as Ext2Bid;
|
||||||
self.read_blocks_async(bid, &Segment::from(frame.clone()))
|
self.read_blocks_async(bid, &SegmentSlice::from(frame.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
fn write_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let bid = idx as Ext2Bid;
|
let bid = idx as Ext2Bid;
|
||||||
self.write_blocks_async(bid, &Segment::from(frame.clone()))
|
self.write_blocks_async(bid, &SegmentSlice::from(frame.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn npages(&self) -> usize {
|
fn npages(&self) -> usize {
|
||||||
|
@ -12,8 +12,9 @@ pub(super) use aster_block::{
|
|||||||
BlockDevice, BLOCK_SIZE,
|
BlockDevice, BLOCK_SIZE,
|
||||||
};
|
};
|
||||||
pub(super) use aster_rights::Full;
|
pub(super) use aster_rights::Full;
|
||||||
|
pub(super) use aster_util::segment_slice::SegmentSlice;
|
||||||
pub(super) use ostd::{
|
pub(super) use ostd::{
|
||||||
mm::{Frame, FrameAllocOptions, Segment, VmIo},
|
mm::{Frame, FrameAllocOptions, VmIo},
|
||||||
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
||||||
};
|
};
|
||||||
pub(super) use static_assertions::const_assert;
|
pub(super) use static_assertions::const_assert;
|
||||||
|
@ -35,13 +35,15 @@ pub fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_user_space(program: &[u8]) -> UserSpace {
|
fn create_user_space(program: &[u8]) -> UserSpace {
|
||||||
let nframes = program.len().align_up(PAGE_SIZE) / PAGE_SIZE;
|
let nbytes = program.len().align_up(PAGE_SIZE);
|
||||||
let user_pages = {
|
let user_pages = {
|
||||||
let vm_frames = FrameAllocOptions::new(nframes).alloc().unwrap();
|
let segment = FrameAllocOptions::new(nbytes / PAGE_SIZE)
|
||||||
|
.alloc_contiguous()
|
||||||
|
.unwrap();
|
||||||
// Physical memory pages can be only accessed
|
// Physical memory pages can be only accessed
|
||||||
// via the Frame abstraction.
|
// via the `Frame` or `Segment` abstraction.
|
||||||
vm_frames.write_bytes(0, program).unwrap();
|
segment.write_bytes(0, program).unwrap();
|
||||||
vm_frames
|
segment
|
||||||
};
|
};
|
||||||
let user_address_space = {
|
let user_address_space = {
|
||||||
const MAP_ADDR: Vaddr = 0x0040_0000; // The map addr for statically-linked executable
|
const MAP_ADDR: Vaddr = 0x0040_0000; // The map addr for statically-linked executable
|
||||||
@ -50,9 +52,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
|
|||||||
// created and manipulated safely through
|
// created and manipulated safely through
|
||||||
// the `VmSpace` abstraction.
|
// the `VmSpace` abstraction.
|
||||||
let vm_space = VmSpace::new();
|
let vm_space = VmSpace::new();
|
||||||
let mut cursor = vm_space
|
let mut cursor = vm_space.cursor_mut(&(MAP_ADDR..MAP_ADDR + nbytes)).unwrap();
|
||||||
.cursor_mut(&(MAP_ADDR..MAP_ADDR + nframes * PAGE_SIZE))
|
|
||||||
.unwrap();
|
|
||||||
let map_prop = PageProperty::new(PageFlags::RWX, CachePolicy::Writeback);
|
let map_prop = PageProperty::new(PageFlags::RWX, CachePolicy::Writeback);
|
||||||
for frame in user_pages {
|
for frame in user_pages {
|
||||||
cursor.map(frame, map_prop);
|
cursor.map(frame, map_prop);
|
||||||
|
@ -57,7 +57,7 @@ impl DmaCoherent {
|
|||||||
vm_segment: Segment,
|
vm_segment: Segment,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
) -> core::result::Result<Self, DmaError> {
|
) -> core::result::Result<Self, DmaError> {
|
||||||
let frame_count = vm_segment.nframes();
|
let frame_count = vm_segment.nbytes() / PAGE_SIZE;
|
||||||
let start_paddr = vm_segment.start_paddr();
|
let start_paddr = vm_segment.start_paddr();
|
||||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||||
return Err(DmaError::AlreadyMapped);
|
return Err(DmaError::AlreadyMapped);
|
||||||
@ -109,6 +109,11 @@ impl DmaCoherent {
|
|||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the number of bytes in the DMA mapping.
|
||||||
|
pub fn nbytes(&self) -> usize {
|
||||||
|
self.inner.vm_segment.nbytes()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasDaddr for DmaCoherent {
|
impl HasDaddr for DmaCoherent {
|
||||||
@ -126,7 +131,7 @@ impl Deref for DmaCoherent {
|
|||||||
|
|
||||||
impl Drop for DmaCoherentInner {
|
impl Drop for DmaCoherentInner {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let frame_count = self.vm_segment.nframes();
|
let frame_count = self.vm_segment.nbytes() / PAGE_SIZE;
|
||||||
let start_paddr = self.vm_segment.start_paddr();
|
let start_paddr = self.vm_segment.start_paddr();
|
||||||
// Ensure that the addresses used later will not overflow
|
// Ensure that the addresses used later will not overflow
|
||||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||||
@ -244,7 +249,7 @@ mod test {
|
|||||||
.is_contiguous(true)
|
.is_contiguous(true)
|
||||||
.alloc_contiguous()
|
.alloc_contiguous()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let vm_segment_child = vm_segment_parent.range(0..1);
|
let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE));
|
||||||
let _dma_coherent_parent = DmaCoherent::map(vm_segment_parent, false);
|
let _dma_coherent_parent = DmaCoherent::map(vm_segment_parent, false);
|
||||||
let dma_coherent_child = DmaCoherent::map(vm_segment_child, false);
|
let dma_coherent_child = DmaCoherent::map(vm_segment_child, false);
|
||||||
assert!(dma_coherent_child.is_err());
|
assert!(dma_coherent_child.is_err());
|
||||||
|
@ -63,7 +63,7 @@ impl DmaStream {
|
|||||||
direction: DmaDirection,
|
direction: DmaDirection,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
) -> Result<Self, DmaError> {
|
) -> Result<Self, DmaError> {
|
||||||
let frame_count = vm_segment.nframes();
|
let frame_count = vm_segment.nbytes() / PAGE_SIZE;
|
||||||
let start_paddr = vm_segment.start_paddr();
|
let start_paddr = vm_segment.start_paddr();
|
||||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||||
return Err(DmaError::AlreadyMapped);
|
return Err(DmaError::AlreadyMapped);
|
||||||
@ -119,7 +119,7 @@ impl DmaStream {
|
|||||||
|
|
||||||
/// Returns the number of frames
|
/// Returns the number of frames
|
||||||
pub fn nframes(&self) -> usize {
|
pub fn nframes(&self) -> usize {
|
||||||
self.inner.vm_segment.nframes()
|
self.inner.vm_segment.nbytes() / PAGE_SIZE
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of bytes
|
/// Returns the number of bytes
|
||||||
@ -171,7 +171,7 @@ impl HasDaddr for DmaStream {
|
|||||||
|
|
||||||
impl Drop for DmaStreamInner {
|
impl Drop for DmaStreamInner {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let frame_count = self.vm_segment.nframes();
|
let frame_count = self.vm_segment.nbytes() / PAGE_SIZE;
|
||||||
let start_paddr = self.vm_segment.start_paddr();
|
let start_paddr = self.vm_segment.start_paddr();
|
||||||
// Ensure that the addresses used later will not overflow
|
// Ensure that the addresses used later will not overflow
|
||||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||||
@ -333,7 +333,7 @@ mod test {
|
|||||||
.is_contiguous(true)
|
.is_contiguous(true)
|
||||||
.alloc_contiguous()
|
.alloc_contiguous()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let vm_segment_child = vm_segment_parent.range(0..1);
|
let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE));
|
||||||
let dma_stream_parent =
|
let dma_stream_parent =
|
||||||
DmaStream::map(vm_segment_parent, DmaDirection::Bidirectional, false);
|
DmaStream::map(vm_segment_parent, DmaDirection::Bidirectional, false);
|
||||||
let dma_stream_child = DmaStream::map(vm_segment_child, DmaDirection::Bidirectional, false);
|
let dma_stream_child = DmaStream::map(vm_segment_child, DmaDirection::Bidirectional, false);
|
||||||
|
@ -9,23 +9,20 @@
|
|||||||
//! read and written by the kernel or the user.
|
//! read and written by the kernel or the user.
|
||||||
|
|
||||||
pub mod options;
|
pub mod options;
|
||||||
pub mod segment;
|
mod segment;
|
||||||
|
|
||||||
use core::mem::ManuallyDrop;
|
use core::mem::ManuallyDrop;
|
||||||
|
|
||||||
pub use segment::Segment;
|
pub use segment::Segment;
|
||||||
|
|
||||||
use super::{
|
use super::page::{
|
||||||
page::{
|
meta::{FrameMeta, MetaSlot, PageMeta, PageUsage},
|
||||||
meta::{FrameMeta, MetaSlot, PageMeta, PageUsage},
|
DynPage, Page,
|
||||||
DynPage, Page,
|
|
||||||
},
|
|
||||||
Infallible,
|
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
mm::{
|
mm::{
|
||||||
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
|
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
|
||||||
paddr_to_vaddr, HasPaddr, Paddr, PAGE_SIZE,
|
paddr_to_vaddr, HasPaddr, Infallible, Paddr, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
Error, Result,
|
Error, Result,
|
||||||
};
|
};
|
||||||
@ -181,54 +178,6 @@ impl VmIo for Frame {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VmIo for alloc::vec::Vec<Frame> {
|
|
||||||
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
|
||||||
// Do bound check with potential integer overflow in mind
|
|
||||||
let max_offset = offset.checked_add(writer.avail()).ok_or(Error::Overflow)?;
|
|
||||||
if max_offset > self.len() * PAGE_SIZE {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
|
|
||||||
let num_skip_pages = offset / PAGE_SIZE;
|
|
||||||
let mut start = offset % PAGE_SIZE;
|
|
||||||
for frame in self.iter().skip(num_skip_pages) {
|
|
||||||
let read_len = frame
|
|
||||||
.reader()
|
|
||||||
.skip(start)
|
|
||||||
.read_fallible(writer)
|
|
||||||
.map_err(|(e, _)| e)?;
|
|
||||||
if read_len == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
start = 0;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
|
||||||
// Do bound check with potential integer overflow in mind
|
|
||||||
let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?;
|
|
||||||
if max_offset > self.len() * PAGE_SIZE {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
|
|
||||||
let num_skip_pages = offset / PAGE_SIZE;
|
|
||||||
let mut start = offset % PAGE_SIZE;
|
|
||||||
for frame in self.iter().skip(num_skip_pages) {
|
|
||||||
let write_len = frame
|
|
||||||
.writer()
|
|
||||||
.skip(start)
|
|
||||||
.write_fallible(reader)
|
|
||||||
.map_err(|(e, _)| e)?;
|
|
||||||
if write_len == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
start = 0;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PageMeta for FrameMeta {
|
impl PageMeta for FrameMeta {
|
||||||
const USAGE: PageUsage = PageUsage::Frame;
|
const USAGE: PageUsage = PageUsage::Frame;
|
||||||
|
|
||||||
|
@ -1,27 +1,33 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
//! A contiguous range of page frames.
|
//! A contiguous segment of untyped memory pages.
|
||||||
|
|
||||||
use alloc::sync::Arc;
|
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use super::Frame;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
mm::{
|
mm::{
|
||||||
page::{cont_pages::ContPages, meta::FrameMeta, Page},
|
io::{FallibleVmRead, FallibleVmWrite},
|
||||||
FallibleVmRead, FallibleVmWrite, HasPaddr, Infallible, Paddr, VmIo, VmReader, VmWriter,
|
page::{meta::FrameMeta, ContPages},
|
||||||
PAGE_SIZE,
|
Frame, HasPaddr, Infallible, Paddr, VmIo, VmReader, VmWriter,
|
||||||
},
|
},
|
||||||
Error, Result,
|
Error, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A handle to a contiguous range of page frames (physical memory pages).
|
/// A contiguous segment of untyped memory pages.
|
||||||
///
|
///
|
||||||
/// A cloned `Segment` refers to the same page frames as the original.
|
/// A [`Segment`] object is a handle to a contiguous range of untyped memory
|
||||||
/// As the original and cloned instances point to the same physical address,
|
/// pages, and the underlying pages can be shared among multiple threads.
|
||||||
/// they are treated as equal to each other.
|
/// [`Segment::slice`] can be used to clone a slice of the segment (also can be
|
||||||
|
/// used to clone the entire range). Reference counts are maintained for each
|
||||||
|
/// page in the segment. So cloning the handle may not be cheap as it
|
||||||
|
/// increments the reference count of all the cloned pages.
|
||||||
///
|
///
|
||||||
/// #Example
|
/// Other [`Frame`] handles can also refer to the pages in the segment. And
|
||||||
|
/// the segment can be iterated over to get all the frames in it.
|
||||||
|
///
|
||||||
|
/// To allocate a segment, use [`crate::mm::FrameAllocator`].
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// let vm_segment = FrameAllocOptions::new(2)
|
/// let vm_segment = FrameAllocOptions::new(2)
|
||||||
@ -29,88 +35,102 @@ use crate::{
|
|||||||
/// .alloc_contiguous()?;
|
/// .alloc_contiguous()?;
|
||||||
/// vm_segment.write_bytes(0, buf)?;
|
/// vm_segment.write_bytes(0, buf)?;
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug)]
|
||||||
pub struct Segment {
|
pub struct Segment {
|
||||||
inner: Arc<ContPages<FrameMeta>>,
|
pages: ContPages<FrameMeta>,
|
||||||
range: Range<usize>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasPaddr for Segment {
|
impl HasPaddr for Segment {
|
||||||
fn paddr(&self) -> Paddr {
|
fn paddr(&self) -> Paddr {
|
||||||
self.start_paddr()
|
self.pages.start_paddr()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for Segment {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
pages: self.pages.clone(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Segment {
|
impl Segment {
|
||||||
/// Returns a part of the `Segment`.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// If `range` is not within the range of this `Segment`,
|
|
||||||
/// then the method panics.
|
|
||||||
pub fn range(&self, range: Range<usize>) -> Self {
|
|
||||||
let orig_range = &self.range;
|
|
||||||
let adj_range = (range.start + orig_range.start)..(range.end + orig_range.start);
|
|
||||||
assert!(!adj_range.is_empty() && adj_range.end <= orig_range.end);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
inner: self.inner.clone(),
|
|
||||||
range: adj_range,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the start physical address.
|
/// Returns the start physical address.
|
||||||
pub fn start_paddr(&self) -> Paddr {
|
pub fn start_paddr(&self) -> Paddr {
|
||||||
self.start_frame_index() * PAGE_SIZE
|
self.pages.start_paddr()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the end physical address.
|
/// Returns the end physical address.
|
||||||
pub fn end_paddr(&self) -> Paddr {
|
pub fn end_paddr(&self) -> Paddr {
|
||||||
(self.start_frame_index() + self.nframes()) * PAGE_SIZE
|
self.pages.end_paddr()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of page frames.
|
/// Returns the number of bytes in it.
|
||||||
pub fn nframes(&self) -> usize {
|
|
||||||
self.range.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of bytes.
|
|
||||||
pub fn nbytes(&self) -> usize {
|
pub fn nbytes(&self) -> usize {
|
||||||
self.nframes() * PAGE_SIZE
|
self.pages.nbytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_frame_index(&self) -> usize {
|
/// Split the segment into two at the given byte offset from the start.
|
||||||
self.inner.start_paddr() / PAGE_SIZE + self.range.start
|
///
|
||||||
|
/// The resulting segments cannot be empty. So the byte offset cannot be
|
||||||
|
/// neither zero nor the length of the segment.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// The function panics if the byte offset is out of bounds, at either ends, or
|
||||||
|
/// not base-page-aligned.
|
||||||
|
pub fn split(self, offset: usize) -> (Self, Self) {
|
||||||
|
let (left, right) = self.pages.split(offset);
|
||||||
|
(Self { pages: left }, Self { pages: right })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a raw pointer to the starting virtual address of the `Segment`.
|
/// Get an extra handle to the segment in the byte range.
|
||||||
pub fn as_ptr(&self) -> *const u8 {
|
///
|
||||||
super::paddr_to_vaddr(self.start_paddr()) as *const u8
|
/// The sliced byte range in indexed by the offset from the start of the
|
||||||
|
/// segment. The resulting segment holds extra reference counts.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// The function panics if the byte range is out of bounds, or if any of
|
||||||
|
/// the ends of the byte range is not base-page aligned.
|
||||||
|
pub fn slice(&self, range: &Range<usize>) -> Self {
|
||||||
|
Self {
|
||||||
|
pages: self.pages.slice(range),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a mutable raw pointer to the starting virtual address of the `Segment`.
|
/// Gets a [`VmReader`] to read from the segment from the beginning to the end.
|
||||||
pub fn as_mut_ptr(&self) -> *mut u8 {
|
pub fn reader(&self) -> VmReader<'_, Infallible> {
|
||||||
super::paddr_to_vaddr(self.start_paddr()) as *mut u8
|
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8;
|
||||||
|
// SAFETY:
|
||||||
|
// - The memory range points to untyped memory.
|
||||||
|
// - The segment is alive during the lifetime `'a`.
|
||||||
|
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
|
||||||
|
unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a [`VmWriter`] to write to the segment from the beginning to the end.
|
||||||
|
pub fn writer(&self) -> VmWriter<'_, Infallible> {
|
||||||
|
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8;
|
||||||
|
// SAFETY:
|
||||||
|
// - The memory range points to untyped memory.
|
||||||
|
// - The segment is alive during the lifetime `'a`.
|
||||||
|
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
|
||||||
|
unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Segment {
|
impl From<Frame> for Segment {
|
||||||
/// Returns a reader to read data from it.
|
fn from(frame: Frame) -> Self {
|
||||||
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
|
Self {
|
||||||
// SAFETY:
|
pages: ContPages::from(frame.page),
|
||||||
// - The memory range points to untyped memory.
|
}
|
||||||
// - The segment is alive during the lifetime `'a`.
|
|
||||||
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
|
|
||||||
unsafe { VmReader::from_kernel_space(self.as_ptr(), self.nbytes()) }
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a writer to write data into it.
|
impl From<ContPages<FrameMeta>> for Segment {
|
||||||
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
|
fn from(pages: ContPages<FrameMeta>) -> Self {
|
||||||
// SAFETY:
|
Self { pages }
|
||||||
// - The memory range points to untyped memory.
|
|
||||||
// - The segment is alive during the lifetime `'a`.
|
|
||||||
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
|
|
||||||
unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.nbytes()) }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,21 +168,10 @@ impl VmIo for Segment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Frame> for Segment {
|
impl Iterator for Segment {
|
||||||
fn from(frame: Frame) -> Self {
|
type Item = Frame;
|
||||||
Self {
|
|
||||||
inner: Arc::new(Page::<FrameMeta>::from(frame).into()),
|
|
||||||
range: 0..1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ContPages<FrameMeta>> for Segment {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
fn from(cont_pages: ContPages<FrameMeta>) -> Self {
|
self.pages.next().map(|page| Frame { page })
|
||||||
let len = cont_pages.len();
|
|
||||||
Self {
|
|
||||||
inner: Arc::new(cont_pages),
|
|
||||||
range: 0..len / PAGE_SIZE,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use core::{mem::ManuallyDrop, ops::Range};
|
use core::{mem::ManuallyDrop, ops::Range};
|
||||||
|
|
||||||
use super::{meta::PageMeta, Page};
|
use super::{inc_page_ref_count, meta::PageMeta, Page};
|
||||||
use crate::mm::{Paddr, PAGE_SIZE};
|
use crate::mm::{Paddr, PAGE_SIZE};
|
||||||
|
|
||||||
/// A contiguous range of physical memory pages.
|
/// A contiguous range of physical memory pages.
|
||||||
@ -25,16 +25,31 @@ pub struct ContPages<M: PageMeta> {
|
|||||||
|
|
||||||
impl<M: PageMeta> Drop for ContPages<M> {
|
impl<M: PageMeta> Drop for ContPages<M> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
for i in self.range.clone().step_by(PAGE_SIZE) {
|
for paddr in self.range.clone().step_by(PAGE_SIZE) {
|
||||||
// SAFETY: for each page there would be a forgotten handle
|
// SAFETY: for each page there would be a forgotten handle
|
||||||
// when creating the `ContPages` object.
|
// when creating the `ContPages` object.
|
||||||
drop(unsafe { Page::<M>::from_raw(i) });
|
drop(unsafe { Page::<M>::from_raw(paddr) });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: PageMeta> Clone for ContPages<M> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
for paddr in self.range.clone().step_by(PAGE_SIZE) {
|
||||||
|
// SAFETY: for each page there would be a forgotten handle
|
||||||
|
// when creating the `ContPages` object, so we already have
|
||||||
|
// reference counts for the pages.
|
||||||
|
unsafe { inc_page_ref_count(paddr) };
|
||||||
|
}
|
||||||
|
Self {
|
||||||
|
range: self.range.clone(),
|
||||||
|
_marker: core::marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M: PageMeta> ContPages<M> {
|
impl<M: PageMeta> ContPages<M> {
|
||||||
/// Create a new `ContPages` from unused pages.
|
/// Creates a new `ContPages` from unused pages.
|
||||||
///
|
///
|
||||||
/// The caller must provide a closure to initialize metadata for all the pages.
|
/// The caller must provide a closure to initialize metadata for all the pages.
|
||||||
/// The closure receives the physical address of the page and returns the
|
/// The closure receives the physical address of the page and returns the
|
||||||
@ -49,8 +64,8 @@ impl<M: PageMeta> ContPages<M> {
|
|||||||
where
|
where
|
||||||
F: FnMut(Paddr) -> M,
|
F: FnMut(Paddr) -> M,
|
||||||
{
|
{
|
||||||
for i in range.clone().step_by(PAGE_SIZE) {
|
for paddr in range.clone().step_by(PAGE_SIZE) {
|
||||||
let _ = ManuallyDrop::new(Page::<M>::from_unused(i, metadata_fn(i)));
|
let _ = ManuallyDrop::new(Page::<M>::from_unused(paddr, metadata_fn(paddr)));
|
||||||
}
|
}
|
||||||
Self {
|
Self {
|
||||||
range,
|
range,
|
||||||
@ -58,20 +73,76 @@ impl<M: PageMeta> ContPages<M> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the start physical address of the contiguous pages.
|
/// Gets the start physical address of the contiguous pages.
|
||||||
pub fn start_paddr(&self) -> Paddr {
|
pub fn start_paddr(&self) -> Paddr {
|
||||||
self.range.start
|
self.range.start
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the end physical address of the contiguous pages.
|
/// Gets the end physical address of the contiguous pages.
|
||||||
pub fn end_paddr(&self) -> Paddr {
|
pub fn end_paddr(&self) -> Paddr {
|
||||||
self.range.end
|
self.range.end
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the length in bytes of the contiguous pages.
|
/// Gets the length in bytes of the contiguous pages.
|
||||||
pub fn len(&self) -> usize {
|
pub fn nbytes(&self) -> usize {
|
||||||
self.range.end - self.range.start
|
self.range.end - self.range.start
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Splits the pages into two at the given byte offset from the start.
|
||||||
|
///
|
||||||
|
/// The resulting pages cannot be empty. So the offset cannot be neither
|
||||||
|
/// zero nor the length of the pages.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// The function panics if the offset is out of bounds, at either ends, or
|
||||||
|
/// not base-page-aligned.
|
||||||
|
pub fn split(self, offset: usize) -> (Self, Self) {
|
||||||
|
assert!(offset % PAGE_SIZE == 0);
|
||||||
|
assert!(0 < offset && offset < self.nbytes());
|
||||||
|
|
||||||
|
let old = ManuallyDrop::new(self);
|
||||||
|
let at = old.range.start + offset;
|
||||||
|
|
||||||
|
(
|
||||||
|
Self {
|
||||||
|
range: old.range.start..at,
|
||||||
|
_marker: core::marker::PhantomData,
|
||||||
|
},
|
||||||
|
Self {
|
||||||
|
range: at..old.range.end,
|
||||||
|
_marker: core::marker::PhantomData,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets an extra handle to the pages in the byte offset range.
|
||||||
|
///
|
||||||
|
/// The sliced byte offset range in indexed by the offset from the start of
|
||||||
|
/// the contiguous pages. The resulting pages holds extra reference counts.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// The function panics if the byte offset range is out of bounds, or if
|
||||||
|
/// any of the ends of the byte offset range is not base-page aligned.
|
||||||
|
pub fn slice(&self, range: &Range<usize>) -> Self {
|
||||||
|
assert!(range.start % PAGE_SIZE == 0 && range.end % PAGE_SIZE == 0);
|
||||||
|
let start = self.range.start + range.start;
|
||||||
|
let end = self.range.start + range.end;
|
||||||
|
assert!(start <= end && end <= self.range.end);
|
||||||
|
|
||||||
|
for paddr in (start..end).step_by(PAGE_SIZE) {
|
||||||
|
// SAFETY: We already have reference counts for the pages since
|
||||||
|
// for each page there would be a forgotten handle when creating
|
||||||
|
// the `ContPages` object.
|
||||||
|
unsafe { inc_page_ref_count(paddr) };
|
||||||
|
}
|
||||||
|
|
||||||
|
Self {
|
||||||
|
range: start..end,
|
||||||
|
_marker: core::marker::PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M: PageMeta> From<Page<M>> for ContPages<M> {
|
impl<M: PageMeta> From<Page<M>> for ContPages<M> {
|
||||||
@ -100,3 +171,21 @@ impl<M: PageMeta> From<ContPages<M>> for Vec<Page<M>> {
|
|||||||
vector
|
vector
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<M: PageMeta> Iterator for ContPages<M> {
|
||||||
|
type Item = Page<M>;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
if self.range.start < self.range.end {
|
||||||
|
// SAFETY: each page in the range would be a handle forgotten
|
||||||
|
// when creating the `ContPages` object.
|
||||||
|
let page = unsafe { Page::<M>::from_raw(self.range.start) };
|
||||||
|
self.range.start += PAGE_SIZE;
|
||||||
|
// The end cannot be non-page-aligned.
|
||||||
|
debug_assert!(self.range.start <= self.range.end);
|
||||||
|
Some(page)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
//! the handle only a pointer to the metadata.
|
//! the handle only a pointer to the metadata.
|
||||||
|
|
||||||
pub mod allocator;
|
pub mod allocator;
|
||||||
pub mod cont_pages;
|
mod cont_pages;
|
||||||
pub mod meta;
|
pub mod meta;
|
||||||
|
|
||||||
use core::{
|
use core::{
|
||||||
|
Reference in New Issue
Block a user