mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-10 05:46:48 +00:00
Implement a new set of physical page APIs
This commit is contained in:
parent
6e1c36965a
commit
cdac59beda
@ -5,8 +5,8 @@ use bitvec::array::BitArray;
|
|||||||
use int_to_c_enum::TryFromInt;
|
use int_to_c_enum::TryFromInt;
|
||||||
use ostd::{
|
use ostd::{
|
||||||
mm::{
|
mm::{
|
||||||
DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, UntypedSegment,
|
DmaDirection, DmaStream, DmaStreamSlice, DynUSegment, FrameAllocOptions, Infallible, VmIo,
|
||||||
VmIo, VmReader, VmWriter,
|
VmReader, VmWriter,
|
||||||
},
|
},
|
||||||
sync::{SpinLock, WaitQueue},
|
sync::{SpinLock, WaitQueue},
|
||||||
Error,
|
Error,
|
||||||
@ -426,11 +426,11 @@ impl<'a> BioSegment {
|
|||||||
let bio_segment_inner = target_pool(direction)
|
let bio_segment_inner = target_pool(direction)
|
||||||
.and_then(|pool| pool.alloc(nblocks, offset_within_first_block, len))
|
.and_then(|pool| pool.alloc(nblocks, offset_within_first_block, len))
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
let segment = FrameAllocOptions::new(nblocks)
|
let segment = FrameAllocOptions::new()
|
||||||
.uninit(true)
|
.zeroed(false)
|
||||||
.alloc_contiguous()
|
.alloc_segment(nblocks)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
|
let dma_stream = DmaStream::map(segment.into(), direction.into(), false).unwrap();
|
||||||
BioSegmentInner {
|
BioSegmentInner {
|
||||||
dma_slice: DmaStreamSlice::new(dma_stream, offset_within_first_block, len),
|
dma_slice: DmaStreamSlice::new(dma_stream, offset_within_first_block, len),
|
||||||
from_pool: false,
|
from_pool: false,
|
||||||
@ -442,9 +442,9 @@ impl<'a> BioSegment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Constructs a new `BioSegment` with a given `UntypedSegment` and the bio direction.
|
/// Constructs a new `BioSegment` with a given `DynUSegment` and the bio direction.
|
||||||
pub fn new_from_segment(segment: UntypedSegment, direction: BioDirection) -> Self {
|
pub fn new_from_segment(segment: DynUSegment, direction: BioDirection) -> Self {
|
||||||
let len = segment.nbytes();
|
let len = segment.size();
|
||||||
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
|
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
|
||||||
Self {
|
Self {
|
||||||
inner: Arc::new(BioSegmentInner {
|
inner: Arc::new(BioSegmentInner {
|
||||||
@ -481,8 +481,8 @@ impl<'a> BioSegment {
|
|||||||
|
|
||||||
/// Returns the inner VM segment.
|
/// Returns the inner VM segment.
|
||||||
#[cfg(ktest)]
|
#[cfg(ktest)]
|
||||||
pub fn inner_segment(&self) -> &UntypedSegment {
|
pub fn inner_segment(&self) -> &DynUSegment {
|
||||||
self.inner.dma_slice.stream().vm_segment()
|
self.inner.dma_slice.stream().segment()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reader to read data from it.
|
/// Returns a reader to read data from it.
|
||||||
@ -560,11 +560,11 @@ impl BioSegmentPool {
|
|||||||
pub fn new(direction: BioDirection) -> Self {
|
pub fn new(direction: BioDirection) -> Self {
|
||||||
let total_blocks = POOL_DEFAULT_NBLOCKS;
|
let total_blocks = POOL_DEFAULT_NBLOCKS;
|
||||||
let pool = {
|
let pool = {
|
||||||
let segment = FrameAllocOptions::new(total_blocks)
|
let segment = FrameAllocOptions::new()
|
||||||
.uninit(true)
|
.zeroed(false)
|
||||||
.alloc_contiguous()
|
.alloc_segment(total_blocks)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
DmaStream::map(segment, direction.into(), false).unwrap()
|
DmaStream::map(segment.into(), direction.into(), false).unwrap()
|
||||||
};
|
};
|
||||||
let manager = SpinLock::new(PoolSlotManager {
|
let manager = SpinLock::new(PoolSlotManager {
|
||||||
occupied: BitArray::ZERO,
|
occupied: BitArray::ZERO,
|
||||||
|
@ -34,10 +34,10 @@ impl TxBuffer {
|
|||||||
let dma_stream = if let Some(stream) = pool.lock().pop_front() {
|
let dma_stream = if let Some(stream) = pool.lock().pop_front() {
|
||||||
stream
|
stream
|
||||||
} else {
|
} else {
|
||||||
let segment = FrameAllocOptions::new(TX_BUFFER_LEN / PAGE_SIZE)
|
let segment = FrameAllocOptions::new()
|
||||||
.alloc_contiguous()
|
.alloc_segment(TX_BUFFER_LEN / PAGE_SIZE)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
DmaStream::map(segment, DmaDirection::ToDevice, false).unwrap()
|
DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
let tx_buffer = {
|
let tx_buffer = {
|
||||||
|
@ -152,9 +152,9 @@ impl DmaPage {
|
|||||||
pool: Weak<DmaPool>,
|
pool: Weak<DmaPool>,
|
||||||
) -> Result<Self, ostd::Error> {
|
) -> Result<Self, ostd::Error> {
|
||||||
let dma_stream = {
|
let dma_stream = {
|
||||||
let segment = FrameAllocOptions::new(1).alloc_contiguous()?;
|
let segment = FrameAllocOptions::new().alloc_segment(1)?;
|
||||||
|
|
||||||
DmaStream::map(segment, direction, is_cache_coherent)
|
DmaStream::map(segment.into(), direction, is_cache_coherent)
|
||||||
.map_err(|_| ostd::Error::AccessDenied)?
|
.map_err(|_| ostd::Error::AccessDenied)?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -141,13 +141,13 @@ impl DeviceInner {
|
|||||||
let queue = VirtQueue::new(0, Self::QUEUE_SIZE, transport.as_mut())
|
let queue = VirtQueue::new(0, Self::QUEUE_SIZE, transport.as_mut())
|
||||||
.expect("create virtqueue failed");
|
.expect("create virtqueue failed");
|
||||||
let block_requests = {
|
let block_requests = {
|
||||||
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
|
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||||
DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap()
|
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap()
|
||||||
};
|
};
|
||||||
assert!(Self::QUEUE_SIZE as usize * REQ_SIZE <= block_requests.nbytes());
|
assert!(Self::QUEUE_SIZE as usize * REQ_SIZE <= block_requests.nbytes());
|
||||||
let block_responses = {
|
let block_responses = {
|
||||||
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
|
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||||
DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap()
|
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap()
|
||||||
};
|
};
|
||||||
assert!(Self::QUEUE_SIZE as usize * RESP_SIZE <= block_responses.nbytes());
|
assert!(Self::QUEUE_SIZE as usize * RESP_SIZE <= block_responses.nbytes());
|
||||||
|
|
||||||
@ -261,11 +261,11 @@ impl DeviceInner {
|
|||||||
};
|
};
|
||||||
const MAX_ID_LENGTH: usize = 20;
|
const MAX_ID_LENGTH: usize = 20;
|
||||||
let device_id_stream = {
|
let device_id_stream = {
|
||||||
let segment = FrameAllocOptions::new(1)
|
let segment = FrameAllocOptions::new()
|
||||||
.uninit(true)
|
.zeroed(false)
|
||||||
.alloc_contiguous()
|
.alloc_segment(1)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
DmaStream::map(segment, DmaDirection::FromDevice, false).unwrap()
|
DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap()
|
||||||
};
|
};
|
||||||
let device_id_slice = DmaStreamSlice::new(&device_id_stream, 0, MAX_ID_LENGTH);
|
let device_id_slice = DmaStreamSlice::new(&device_id_stream, 0, MAX_ID_LENGTH);
|
||||||
let outputs = vec![&device_id_slice, &resp_slice];
|
let outputs = vec![&device_id_slice, &resp_slice];
|
||||||
|
@ -87,13 +87,13 @@ impl ConsoleDevice {
|
|||||||
SpinLock::new(VirtQueue::new(TRANSMIT0_QUEUE_INDEX, 2, transport.as_mut()).unwrap());
|
SpinLock::new(VirtQueue::new(TRANSMIT0_QUEUE_INDEX, 2, transport.as_mut()).unwrap());
|
||||||
|
|
||||||
let send_buffer = {
|
let send_buffer = {
|
||||||
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
|
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||||
DmaStream::map(vm_segment, DmaDirection::ToDevice, false).unwrap()
|
DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
let receive_buffer = {
|
let receive_buffer = {
|
||||||
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
|
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||||
DmaStream::map(vm_segment, DmaDirection::FromDevice, false).unwrap()
|
DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
let device = Arc::new(Self {
|
let device = Arc::new(Self {
|
||||||
|
@ -261,14 +261,14 @@ impl EventTable {
|
|||||||
fn new(num_events: usize) -> Self {
|
fn new(num_events: usize) -> Self {
|
||||||
assert!(num_events * mem::size_of::<VirtioInputEvent>() <= PAGE_SIZE);
|
assert!(num_events * mem::size_of::<VirtioInputEvent>() <= PAGE_SIZE);
|
||||||
|
|
||||||
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
|
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||||
|
|
||||||
let default_event = VirtioInputEvent::default();
|
let default_event = VirtioInputEvent::default();
|
||||||
let iter = iter::repeat(&default_event).take(EVENT_SIZE);
|
let iter = iter::repeat(&default_event).take(EVENT_SIZE);
|
||||||
let nr_written = vm_segment.write_vals(0, iter, 0).unwrap();
|
let nr_written = segment.write_vals(0, iter, 0).unwrap();
|
||||||
assert_eq!(nr_written, EVENT_SIZE);
|
assert_eq!(nr_written, EVENT_SIZE);
|
||||||
|
|
||||||
let stream = DmaStream::map(vm_segment, DmaDirection::FromDevice, false).unwrap();
|
let stream = DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap();
|
||||||
Self { stream, num_events }
|
Self { stream, num_events }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ impl VirtQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
|
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
|
||||||
// Currently, we use one UntypedFrame to place the descriptors and available rings, one UntypedFrame to place used rings
|
// Currently, we use one DynUFrame to place the descriptors and available rings, one DynUFrame to place used rings
|
||||||
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
|
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
|
||||||
if size > 128 {
|
if size > 128 {
|
||||||
return Err(QueueError::InvalidArgs);
|
return Err(QueueError::InvalidArgs);
|
||||||
@ -89,8 +89,8 @@ impl VirtQueue {
|
|||||||
let align_size = VirtioPciLegacyTransport::QUEUE_ALIGN_SIZE;
|
let align_size = VirtioPciLegacyTransport::QUEUE_ALIGN_SIZE;
|
||||||
let total_frames =
|
let total_frames =
|
||||||
VirtioPciLegacyTransport::calc_virtqueue_size_aligned(queue_size) / align_size;
|
VirtioPciLegacyTransport::calc_virtqueue_size_aligned(queue_size) / align_size;
|
||||||
let continue_segment = FrameAllocOptions::new(total_frames)
|
let continue_segment = FrameAllocOptions::new()
|
||||||
.alloc_contiguous()
|
.alloc_segment(total_frames)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let avial_size = size_of::<u16>() * (3 + queue_size);
|
let avial_size = size_of::<u16>() * (3 + queue_size);
|
||||||
@ -99,12 +99,12 @@ impl VirtQueue {
|
|||||||
continue_segment.split(seg1_frames * align_size)
|
continue_segment.split(seg1_frames * align_size)
|
||||||
};
|
};
|
||||||
let desc_frame_ptr: SafePtr<Descriptor, DmaCoherent> =
|
let desc_frame_ptr: SafePtr<Descriptor, DmaCoherent> =
|
||||||
SafePtr::new(DmaCoherent::map(seg1, true).unwrap(), 0);
|
SafePtr::new(DmaCoherent::map(seg1.into(), true).unwrap(), 0);
|
||||||
let mut avail_frame_ptr: SafePtr<AvailRing, DmaCoherent> =
|
let mut avail_frame_ptr: SafePtr<AvailRing, DmaCoherent> =
|
||||||
desc_frame_ptr.clone().cast();
|
desc_frame_ptr.clone().cast();
|
||||||
avail_frame_ptr.byte_add(desc_size);
|
avail_frame_ptr.byte_add(desc_size);
|
||||||
let used_frame_ptr: SafePtr<UsedRing, DmaCoherent> =
|
let used_frame_ptr: SafePtr<UsedRing, DmaCoherent> =
|
||||||
SafePtr::new(DmaCoherent::map(seg2, true).unwrap(), 0);
|
SafePtr::new(DmaCoherent::map(seg2.into(), true).unwrap(), 0);
|
||||||
(desc_frame_ptr, avail_frame_ptr, used_frame_ptr)
|
(desc_frame_ptr, avail_frame_ptr, used_frame_ptr)
|
||||||
} else {
|
} else {
|
||||||
if size > 256 {
|
if size > 256 {
|
||||||
@ -112,18 +112,27 @@ impl VirtQueue {
|
|||||||
}
|
}
|
||||||
(
|
(
|
||||||
SafePtr::new(
|
SafePtr::new(
|
||||||
DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true)
|
DmaCoherent::map(
|
||||||
.unwrap(),
|
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
0,
|
0,
|
||||||
),
|
),
|
||||||
SafePtr::new(
|
SafePtr::new(
|
||||||
DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true)
|
DmaCoherent::map(
|
||||||
.unwrap(),
|
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
0,
|
0,
|
||||||
),
|
),
|
||||||
SafePtr::new(
|
SafePtr::new(
|
||||||
DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true)
|
DmaCoherent::map(
|
||||||
.unwrap(),
|
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
0,
|
0,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -54,7 +54,7 @@ use ostd::{
|
|||||||
///
|
///
|
||||||
/// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo`
|
/// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo`
|
||||||
/// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and
|
/// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and
|
||||||
/// `UntypedFrame`. The blanket implementations of `VmIo` also include pointer-like
|
/// `DynUFrame`. The blanket implementations of `VmIo` also include pointer-like
|
||||||
/// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`,
|
/// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`,
|
||||||
/// and `Arc<IoMem>`.
|
/// and `Arc<IoMem>`.
|
||||||
///
|
///
|
||||||
|
@ -2,41 +2,41 @@
|
|||||||
|
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
//! Provides [`SegmentSlice`] for quick duplication and slicing over [`UntypedSegment`].
|
//! Provides [`SegmentSlice`] for quick duplication and slicing over [`DynUSegment`].
|
||||||
|
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use ostd::{
|
use ostd::{
|
||||||
mm::{
|
mm::{
|
||||||
FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedFrame, UntypedSegment, VmIo,
|
DynUFrame, DynUSegment, FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedMem,
|
||||||
VmReader, VmWriter, PAGE_SIZE,
|
VmIo, VmReader, VmWriter, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
Error, Result,
|
Error, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A reference to a slice of a [`UntypedSegment`].
|
/// A reference to a slice of a [`DynUSegment`].
|
||||||
///
|
///
|
||||||
/// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference
|
/// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference
|
||||||
/// count. While cloning a [`UntypedSegment`] will increment the reference count of
|
/// count. While cloning a [`DynUSegment`] will increment the reference count of
|
||||||
/// many underlying pages.
|
/// many underlying pages.
|
||||||
///
|
///
|
||||||
/// The downside is that the [`SegmentSlice`] requires heap allocation. Also,
|
/// The downside is that the [`SegmentSlice`] requires heap allocation. Also,
|
||||||
/// if any [`SegmentSlice`] of the original [`UntypedSegment`] is alive, all pages in
|
/// if any [`SegmentSlice`] of the original [`DynUSegment`] is alive, all pages in
|
||||||
/// the original [`UntypedSegment`], including the pages that are not referenced, will
|
/// the original [`DynUSegment`], including the pages that are not referenced, will
|
||||||
/// not be freed.
|
/// not be freed.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct SegmentSlice {
|
pub struct SegmentSlice {
|
||||||
inner: Arc<UntypedSegment>,
|
inner: Arc<DynUSegment>,
|
||||||
range: Range<usize>,
|
range: Range<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentSlice {
|
impl SegmentSlice {
|
||||||
/// Returns a part of the `UntypedSegment`.
|
/// Returns a part of the `DynUSegment`.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// If `range` is not within the range of this `UntypedSegment`,
|
/// If `range` is not within the range of this `DynUSegment`,
|
||||||
/// then the method panics.
|
/// then the method panics.
|
||||||
pub fn range(&self, range: Range<usize>) -> Self {
|
pub fn range(&self, range: Range<usize>) -> Self {
|
||||||
let orig_range = &self.range;
|
let orig_range = &self.range;
|
||||||
@ -124,9 +124,9 @@ impl VmIo for SegmentSlice {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<UntypedSegment> for SegmentSlice {
|
impl From<DynUSegment> for SegmentSlice {
|
||||||
fn from(segment: UntypedSegment) -> Self {
|
fn from(segment: DynUSegment) -> Self {
|
||||||
let range = 0..segment.nbytes() / PAGE_SIZE;
|
let range = 0..segment.size() / PAGE_SIZE;
|
||||||
Self {
|
Self {
|
||||||
inner: Arc::new(segment),
|
inner: Arc::new(segment),
|
||||||
range,
|
range,
|
||||||
@ -134,7 +134,7 @@ impl From<UntypedSegment> for SegmentSlice {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<SegmentSlice> for UntypedSegment {
|
impl From<SegmentSlice> for DynUSegment {
|
||||||
fn from(slice: SegmentSlice) -> Self {
|
fn from(slice: SegmentSlice) -> Self {
|
||||||
let start = slice.range.start * PAGE_SIZE;
|
let start = slice.range.start * PAGE_SIZE;
|
||||||
let end = slice.range.end * PAGE_SIZE;
|
let end = slice.range.end * PAGE_SIZE;
|
||||||
@ -142,8 +142,8 @@ impl From<SegmentSlice> for UntypedSegment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<UntypedFrame> for SegmentSlice {
|
impl From<DynUFrame> for SegmentSlice {
|
||||||
fn from(frame: UntypedFrame) -> Self {
|
fn from(frame: DynUFrame) -> Self {
|
||||||
SegmentSlice::from(UntypedSegment::from(frame))
|
SegmentSlice::from(DynUSegment::from(frame))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,11 +88,8 @@ fn handle_get_report(arg: usize) -> Result<i32> {
|
|||||||
let user_space = CurrentUserSpace::new(¤t_task);
|
let user_space = CurrentUserSpace::new(¤t_task);
|
||||||
let user_request: TdxReportRequest = user_space.read_val(arg)?;
|
let user_request: TdxReportRequest = user_space.read_val(arg)?;
|
||||||
|
|
||||||
let vm_segment = FrameAllocOptions::new(2)
|
let segment = FrameAllocOptions::new().alloc_segment(2).unwrap();
|
||||||
.is_contiguous(true)
|
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
|
||||||
let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap();
|
|
||||||
dma_coherent
|
dma_coherent
|
||||||
.write_bytes(0, &user_request.report_data)
|
.write_bytes(0, &user_request.report_data)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -12,7 +12,7 @@ use aster_block::{
|
|||||||
};
|
};
|
||||||
use hashbrown::HashMap;
|
use hashbrown::HashMap;
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
use ostd::mm::UntypedFrame;
|
use ostd::mm::DynUFrame;
|
||||||
pub(super) use ostd::mm::VmIo;
|
pub(super) use ostd::mm::VmIo;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@ -368,7 +368,7 @@ impl ExfatFS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for ExfatFS {
|
impl PageCacheBackend for ExfatFS {
|
||||||
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
if self.fs_size() < idx * PAGE_SIZE {
|
if self.fs_size() < idx * PAGE_SIZE {
|
||||||
return_errno_with_message!(Errno::EINVAL, "invalid read size")
|
return_errno_with_message!(Errno::EINVAL, "invalid read size")
|
||||||
}
|
}
|
||||||
@ -380,7 +380,7 @@ impl PageCacheBackend for ExfatFS {
|
|||||||
Ok(waiter)
|
Ok(waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
if self.fs_size() < idx * PAGE_SIZE {
|
if self.fs_size() < idx * PAGE_SIZE {
|
||||||
return_errno_with_message!(Errno::EINVAL, "invalid write size")
|
return_errno_with_message!(Errno::EINVAL, "invalid write size")
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ use aster_block::{
|
|||||||
BLOCK_SIZE,
|
BLOCK_SIZE,
|
||||||
};
|
};
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
use ostd::mm::{UntypedFrame, VmIo};
|
use ostd::mm::{DynUFrame, VmIo};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
constants::*,
|
constants::*,
|
||||||
@ -135,7 +135,7 @@ struct ExfatInodeInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for ExfatInode {
|
impl PageCacheBackend for ExfatInode {
|
||||||
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
if inner.size < idx * PAGE_SIZE {
|
if inner.size < idx * PAGE_SIZE {
|
||||||
return_errno_with_message!(Errno::EINVAL, "Invalid read size")
|
return_errno_with_message!(Errno::EINVAL, "Invalid read size")
|
||||||
@ -150,7 +150,7 @@ impl PageCacheBackend for ExfatInode {
|
|||||||
Ok(waiter)
|
Ok(waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
let sector_size = inner.fs().sector_size();
|
let sector_size = inner.fs().sector_size();
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ mod test {
|
|||||||
BlockDevice, BlockDeviceMeta,
|
BlockDevice, BlockDeviceMeta,
|
||||||
};
|
};
|
||||||
use ostd::{
|
use ostd::{
|
||||||
mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE},
|
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
use rand::{rngs::SmallRng, RngCore, SeedableRng};
|
use rand::{rngs::SmallRng, RngCore, SeedableRng};
|
||||||
@ -40,15 +40,15 @@ mod test {
|
|||||||
|
|
||||||
/// Followings are implementations of memory simulated block device
|
/// Followings are implementations of memory simulated block device
|
||||||
pub const SECTOR_SIZE: usize = 512;
|
pub const SECTOR_SIZE: usize = 512;
|
||||||
struct ExfatMemoryBioQueue(UntypedSegment);
|
struct ExfatMemoryBioQueue(Segment<()>);
|
||||||
|
|
||||||
impl ExfatMemoryBioQueue {
|
impl ExfatMemoryBioQueue {
|
||||||
pub fn new(segment: UntypedSegment) -> Self {
|
pub fn new(segment: Segment<()>) -> Self {
|
||||||
ExfatMemoryBioQueue(segment)
|
ExfatMemoryBioQueue(segment)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sectors_count(&self) -> usize {
|
pub fn sectors_count(&self) -> usize {
|
||||||
self.0.nbytes() / SECTOR_SIZE
|
self.0.size() / SECTOR_SIZE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ExfatMemoryDisk {
|
impl ExfatMemoryDisk {
|
||||||
pub fn new(segment: UntypedSegment) -> Self {
|
pub fn new(segment: Segment<()>) -> Self {
|
||||||
ExfatMemoryDisk {
|
ExfatMemoryDisk {
|
||||||
queue: ExfatMemoryBioQueue::new(segment),
|
queue: ExfatMemoryBioQueue::new(segment),
|
||||||
}
|
}
|
||||||
@ -111,20 +111,20 @@ mod test {
|
|||||||
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../test/build/exfat.img");
|
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../test/build/exfat.img");
|
||||||
|
|
||||||
/// Read exfat disk image
|
/// Read exfat disk image
|
||||||
fn new_vm_segment_from_image() -> UntypedSegment {
|
fn new_vm_segment_from_image() -> Segment<()> {
|
||||||
let vm_segment = FrameAllocOptions::new(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE))
|
let segment = FrameAllocOptions::new()
|
||||||
.uninit(true)
|
.zeroed(false)
|
||||||
.alloc_contiguous()
|
.alloc_segment(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
vm_segment.write_bytes(0, EXFAT_IMAGE).unwrap();
|
segment.write_bytes(0, EXFAT_IMAGE).unwrap();
|
||||||
vm_segment
|
segment
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate a simulated exfat file system
|
// Generate a simulated exfat file system
|
||||||
fn load_exfat() -> Arc<ExfatFS> {
|
fn load_exfat() -> Arc<ExfatFS> {
|
||||||
let vm_segment = new_vm_segment_from_image();
|
let segment = new_vm_segment_from_image();
|
||||||
let disk = ExfatMemoryDisk::new(vm_segment);
|
let disk = ExfatMemoryDisk::new(segment);
|
||||||
let mount_option = ExfatMountOptions::default();
|
let mount_option = ExfatMountOptions::default();
|
||||||
let fs = ExfatFS::open(Arc::new(disk), mount_option);
|
let fs = ExfatFS::open(Arc::new(disk), mount_option);
|
||||||
assert!(fs.is_ok(), "Fs failed to init:{:?}", fs.unwrap_err());
|
assert!(fs.is_ok(), "Fs failed to init:{:?}", fs.unwrap_err());
|
||||||
|
@ -28,7 +28,7 @@ struct BlockGroupImpl {
|
|||||||
impl BlockGroup {
|
impl BlockGroup {
|
||||||
/// Loads and constructs a block group.
|
/// Loads and constructs a block group.
|
||||||
pub fn load(
|
pub fn load(
|
||||||
group_descriptors_segment: &UntypedSegment,
|
group_descriptors_segment: &DynUSegment,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
block_device: &dyn BlockDevice,
|
block_device: &dyn BlockDevice,
|
||||||
super_block: &SuperBlock,
|
super_block: &SuperBlock,
|
||||||
@ -318,7 +318,7 @@ impl Debug for BlockGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for BlockGroupImpl {
|
impl PageCacheBackend for BlockGroupImpl {
|
||||||
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||||
let bio_segment =
|
let bio_segment =
|
||||||
BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice);
|
BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice);
|
||||||
@ -328,7 +328,7 @@ impl PageCacheBackend for BlockGroupImpl {
|
|||||||
.read_blocks_async(bid, bio_segment)
|
.read_blocks_async(bid, bio_segment)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||||
let bio_segment =
|
let bio_segment =
|
||||||
BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice);
|
BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice);
|
||||||
|
@ -23,7 +23,7 @@ pub struct Ext2 {
|
|||||||
blocks_per_group: Ext2Bid,
|
blocks_per_group: Ext2Bid,
|
||||||
inode_size: usize,
|
inode_size: usize,
|
||||||
block_size: usize,
|
block_size: usize,
|
||||||
group_descriptors_segment: UntypedSegment,
|
group_descriptors_segment: DynUSegment,
|
||||||
self_ref: Weak<Self>,
|
self_ref: Weak<Self>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,11 +46,11 @@ impl Ext2 {
|
|||||||
let npages = ((super_block.block_groups_count() as usize)
|
let npages = ((super_block.block_groups_count() as usize)
|
||||||
* core::mem::size_of::<RawGroupDescriptor>())
|
* core::mem::size_of::<RawGroupDescriptor>())
|
||||||
.div_ceil(BLOCK_SIZE);
|
.div_ceil(BLOCK_SIZE);
|
||||||
let segment = FrameAllocOptions::new(npages)
|
let segment = FrameAllocOptions::new()
|
||||||
.uninit(true)
|
.zeroed(false)
|
||||||
.alloc_contiguous()?;
|
.alloc_segment(npages)?;
|
||||||
let bio_segment =
|
let bio_segment =
|
||||||
BioSegment::new_from_segment(segment.clone(), BioDirection::FromDevice);
|
BioSegment::new_from_segment(segment.clone().into(), BioDirection::FromDevice);
|
||||||
match block_device.read_blocks(super_block.group_descriptors_bid(0), bio_segment)? {
|
match block_device.read_blocks(super_block.group_descriptors_bid(0), bio_segment)? {
|
||||||
BioStatus::Complete => (),
|
BioStatus::Complete => (),
|
||||||
err_status => {
|
err_status => {
|
||||||
@ -63,7 +63,7 @@ impl Ext2 {
|
|||||||
// Load the block groups information
|
// Load the block groups information
|
||||||
let load_block_groups = |fs: Weak<Ext2>,
|
let load_block_groups = |fs: Weak<Ext2>,
|
||||||
block_device: &dyn BlockDevice,
|
block_device: &dyn BlockDevice,
|
||||||
group_descriptors_segment: &UntypedSegment|
|
group_descriptors_segment: &DynUSegment|
|
||||||
-> Result<Vec<BlockGroup>> {
|
-> Result<Vec<BlockGroup>> {
|
||||||
let block_groups_count = super_block.block_groups_count() as usize;
|
let block_groups_count = super_block.block_groups_count() as usize;
|
||||||
let mut block_groups = Vec::with_capacity(block_groups_count);
|
let mut block_groups = Vec::with_capacity(block_groups_count);
|
||||||
@ -88,12 +88,12 @@ impl Ext2 {
|
|||||||
block_groups: load_block_groups(
|
block_groups: load_block_groups(
|
||||||
weak_ref.clone(),
|
weak_ref.clone(),
|
||||||
block_device.as_ref(),
|
block_device.as_ref(),
|
||||||
&group_descriptors_segment,
|
(&group_descriptors_segment).into(),
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
block_device,
|
block_device,
|
||||||
super_block: RwMutex::new(Dirty::new(super_block)),
|
super_block: RwMutex::new(Dirty::new(super_block)),
|
||||||
group_descriptors_segment,
|
group_descriptors_segment: group_descriptors_segment.into(),
|
||||||
self_ref: weak_ref.clone(),
|
self_ref: weak_ref.clone(),
|
||||||
});
|
});
|
||||||
Ok(ext2)
|
Ok(ext2)
|
||||||
|
@ -42,8 +42,10 @@ impl IndirectBlockCache {
|
|||||||
let fs = self.fs();
|
let fs = self.fs();
|
||||||
let load_block = || -> Result<IndirectBlock> {
|
let load_block = || -> Result<IndirectBlock> {
|
||||||
let mut block = IndirectBlock::alloc_uninit()?;
|
let mut block = IndirectBlock::alloc_uninit()?;
|
||||||
let bio_segment =
|
let bio_segment = BioSegment::new_from_segment(
|
||||||
BioSegment::new_from_segment(block.frame.clone().into(), BioDirection::FromDevice);
|
Segment::<()>::from(block.frame.clone()).into(),
|
||||||
|
BioDirection::FromDevice,
|
||||||
|
);
|
||||||
fs.read_blocks(bid, bio_segment)?;
|
fs.read_blocks(bid, bio_segment)?;
|
||||||
block.state = State::UpToDate;
|
block.state = State::UpToDate;
|
||||||
Ok(block)
|
Ok(block)
|
||||||
@ -61,8 +63,10 @@ impl IndirectBlockCache {
|
|||||||
let fs = self.fs();
|
let fs = self.fs();
|
||||||
let load_block = || -> Result<IndirectBlock> {
|
let load_block = || -> Result<IndirectBlock> {
|
||||||
let mut block = IndirectBlock::alloc_uninit()?;
|
let mut block = IndirectBlock::alloc_uninit()?;
|
||||||
let bio_segment =
|
let bio_segment = BioSegment::new_from_segment(
|
||||||
BioSegment::new_from_segment(block.frame.clone().into(), BioDirection::FromDevice);
|
Segment::<()>::from(block.frame.clone()).into(),
|
||||||
|
BioDirection::FromDevice,
|
||||||
|
);
|
||||||
fs.read_blocks(bid, bio_segment)?;
|
fs.read_blocks(bid, bio_segment)?;
|
||||||
block.state = State::UpToDate;
|
block.state = State::UpToDate;
|
||||||
Ok(block)
|
Ok(block)
|
||||||
@ -109,7 +113,7 @@ impl IndirectBlockCache {
|
|||||||
let (bid, block) = self.cache.pop_lru().unwrap();
|
let (bid, block) = self.cache.pop_lru().unwrap();
|
||||||
if block.is_dirty() {
|
if block.is_dirty() {
|
||||||
let bio_segment = BioSegment::new_from_segment(
|
let bio_segment = BioSegment::new_from_segment(
|
||||||
block.frame.clone().into(),
|
Segment::<()>::from(block.frame.clone()).into(),
|
||||||
BioDirection::ToDevice,
|
BioDirection::ToDevice,
|
||||||
);
|
);
|
||||||
bio_waiter.concat(self.fs().write_blocks_async(bid, bio_segment)?);
|
bio_waiter.concat(self.fs().write_blocks_async(bid, bio_segment)?);
|
||||||
@ -132,7 +136,7 @@ impl IndirectBlockCache {
|
|||||||
/// Represents a single indirect block buffer cached by the `IndirectCache`.
|
/// Represents a single indirect block buffer cached by the `IndirectCache`.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct IndirectBlock {
|
pub struct IndirectBlock {
|
||||||
frame: UntypedFrame,
|
frame: Frame<()>,
|
||||||
state: State,
|
state: State,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,7 +144,7 @@ impl IndirectBlock {
|
|||||||
/// Allocates an uninitialized block whose bytes are to be populated with
|
/// Allocates an uninitialized block whose bytes are to be populated with
|
||||||
/// data loaded from the disk.
|
/// data loaded from the disk.
|
||||||
fn alloc_uninit() -> Result<Self> {
|
fn alloc_uninit() -> Result<Self> {
|
||||||
let frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?;
|
let frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
frame,
|
frame,
|
||||||
state: State::Uninit,
|
state: State::Uninit,
|
||||||
@ -149,7 +153,7 @@ impl IndirectBlock {
|
|||||||
|
|
||||||
/// Allocates a new block with its bytes initialized to zero.
|
/// Allocates a new block with its bytes initialized to zero.
|
||||||
pub fn alloc() -> Result<Self> {
|
pub fn alloc() -> Result<Self> {
|
||||||
let frame = FrameAllocOptions::new(1).alloc_single()?;
|
let frame = FrameAllocOptions::new().alloc_frame()?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
frame,
|
frame,
|
||||||
state: State::Dirty,
|
state: State::Dirty,
|
||||||
|
@ -1733,7 +1733,7 @@ impl InodeImpl {
|
|||||||
writer: &mut VmWriter,
|
writer: &mut VmWriter,
|
||||||
) -> Result<BioWaiter>;
|
) -> Result<BioWaiter>;
|
||||||
pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>;
|
pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>;
|
||||||
pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter>;
|
pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter>;
|
||||||
pub fn write_blocks_async(
|
pub fn write_blocks_async(
|
||||||
&self,
|
&self,
|
||||||
bid: Ext2Bid,
|
bid: Ext2Bid,
|
||||||
@ -1741,7 +1741,7 @@ impl InodeImpl {
|
|||||||
reader: &mut VmReader,
|
reader: &mut VmReader,
|
||||||
) -> Result<BioWaiter>;
|
) -> Result<BioWaiter>;
|
||||||
pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>;
|
pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>;
|
||||||
pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter>;
|
pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Manages the inode blocks and block I/O operations.
|
/// Manages the inode blocks and block I/O operations.
|
||||||
@ -1789,7 +1789,7 @@ impl InodeBlockManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter> {
|
pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let mut bio_waiter = BioWaiter::new();
|
let mut bio_waiter = BioWaiter::new();
|
||||||
|
|
||||||
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
|
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
|
||||||
@ -1834,7 +1834,7 @@ impl InodeBlockManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter> {
|
pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let mut bio_waiter = BioWaiter::new();
|
let mut bio_waiter = BioWaiter::new();
|
||||||
|
|
||||||
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
|
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
|
||||||
@ -1858,12 +1858,12 @@ impl InodeBlockManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for InodeBlockManager {
|
impl PageCacheBackend for InodeBlockManager {
|
||||||
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let bid = idx as Ext2Bid;
|
let bid = idx as Ext2Bid;
|
||||||
self.read_block_async(bid, frame)
|
self.read_block_async(bid, frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
let bid = idx as Ext2Bid;
|
let bid = idx as Ext2Bid;
|
||||||
self.write_block_async(bid, frame)
|
self.write_block_async(bid, frame)
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ pub(super) use aster_block::{
|
|||||||
};
|
};
|
||||||
pub(super) use aster_rights::Full;
|
pub(super) use aster_rights::Full;
|
||||||
pub(super) use ostd::{
|
pub(super) use ostd::{
|
||||||
mm::{FrameAllocOptions, UntypedFrame, UntypedSegment, VmIo},
|
mm::{DynUFrame, DynUSegment, Frame, FrameAllocOptions, Segment, VmIo},
|
||||||
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
||||||
};
|
};
|
||||||
pub(super) use static_assertions::const_assert;
|
pub(super) use static_assertions::const_assert;
|
||||||
|
@ -11,7 +11,7 @@ use aster_rights::Full;
|
|||||||
use aster_util::slot_vec::SlotVec;
|
use aster_util::slot_vec::SlotVec;
|
||||||
use hashbrown::HashMap;
|
use hashbrown::HashMap;
|
||||||
use ostd::{
|
use ostd::{
|
||||||
mm::{UntypedFrame, VmIo},
|
mm::{DynUFrame, UntypedMem, VmIo},
|
||||||
sync::{PreemptDisabled, RwLockWriteGuard},
|
sync::{PreemptDisabled, RwLockWriteGuard},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -484,7 +484,7 @@ impl RamInode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for RamInode {
|
impl PageCacheBackend for RamInode {
|
||||||
fn read_page_async(&self, _idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn read_page_async(&self, _idx: usize, frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
// Initially, any block/page in a RamFs inode contains all zeros
|
// Initially, any block/page in a RamFs inode contains all zeros
|
||||||
frame
|
frame
|
||||||
.writer()
|
.writer()
|
||||||
@ -494,7 +494,7 @@ impl PageCacheBackend for RamInode {
|
|||||||
Ok(BioWaiter::new())
|
Ok(BioWaiter::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page_async(&self, _idx: usize, _frame: &UntypedFrame) -> Result<BioWaiter> {
|
fn write_page_async(&self, _idx: usize, _frame: &DynUFrame) -> Result<BioWaiter> {
|
||||||
// do nothing
|
// do nothing
|
||||||
Ok(BioWaiter::new())
|
Ok(BioWaiter::new())
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ use align_ext::AlignExt;
|
|||||||
use aster_block::bio::{BioStatus, BioWaiter};
|
use aster_block::bio::{BioStatus, BioWaiter};
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
use ostd::mm::{FrameAllocOptions, UntypedFrame, VmIo};
|
use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, VmIo};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
prelude::*,
|
prelude::*,
|
||||||
@ -305,7 +305,7 @@ impl ReadaheadState {
|
|||||||
};
|
};
|
||||||
for async_idx in window.readahead_range() {
|
for async_idx in window.readahead_range() {
|
||||||
let mut async_page = Page::alloc()?;
|
let mut async_page = Page::alloc()?;
|
||||||
let pg_waiter = backend.read_page_async(async_idx, async_page.frame())?;
|
let pg_waiter = backend.read_page_async(async_idx, async_page.frame().into())?;
|
||||||
if pg_waiter.nreqs() > 0 {
|
if pg_waiter.nreqs() > 0 {
|
||||||
self.waiter.concat(pg_waiter);
|
self.waiter.concat(pg_waiter);
|
||||||
} else {
|
} else {
|
||||||
@ -361,7 +361,7 @@ impl PageCacheManager {
|
|||||||
for idx in page_idx_range.start..page_idx_range.end {
|
for idx in page_idx_range.start..page_idx_range.end {
|
||||||
if let Some(page) = pages.peek(&idx) {
|
if let Some(page) = pages.peek(&idx) {
|
||||||
if *page.state() == PageState::Dirty && idx < backend_npages {
|
if *page.state() == PageState::Dirty && idx < backend_npages {
|
||||||
let waiter = backend.write_page_async(idx, page.frame())?;
|
let waiter = backend.write_page_async(idx, page.frame().into())?;
|
||||||
bio_waiter.concat(waiter);
|
bio_waiter.concat(waiter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -381,7 +381,7 @@ impl PageCacheManager {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ondemand_readahead(&self, idx: usize) -> Result<UntypedFrame> {
|
fn ondemand_readahead(&self, idx: usize) -> Result<DynUFrame> {
|
||||||
let mut pages = self.pages.lock();
|
let mut pages = self.pages.lock();
|
||||||
let mut ra_state = self.ra_state.lock();
|
let mut ra_state = self.ra_state.lock();
|
||||||
let backend = self.backend();
|
let backend = self.backend();
|
||||||
@ -410,7 +410,7 @@ impl PageCacheManager {
|
|||||||
// Conducts the sync read operation.
|
// Conducts the sync read operation.
|
||||||
let page = if idx < backend.npages() {
|
let page = if idx < backend.npages() {
|
||||||
let mut page = Page::alloc()?;
|
let mut page = Page::alloc()?;
|
||||||
backend.read_page(idx, page.frame())?;
|
backend.read_page(idx, page.frame().into())?;
|
||||||
page.set_state(PageState::UpToDate);
|
page.set_state(PageState::UpToDate);
|
||||||
page
|
page
|
||||||
} else {
|
} else {
|
||||||
@ -425,7 +425,7 @@ impl PageCacheManager {
|
|||||||
ra_state.conduct_readahead(&mut pages, backend)?;
|
ra_state.conduct_readahead(&mut pages, backend)?;
|
||||||
}
|
}
|
||||||
ra_state.set_prev_page(idx);
|
ra_state.set_prev_page(idx);
|
||||||
Ok(frame)
|
Ok(frame.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,7 +438,7 @@ impl Debug for PageCacheManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Pager for PageCacheManager {
|
impl Pager for PageCacheManager {
|
||||||
fn commit_page(&self, idx: usize) -> Result<UntypedFrame> {
|
fn commit_page(&self, idx: usize) -> Result<DynUFrame> {
|
||||||
self.ondemand_readahead(idx)
|
self.ondemand_readahead(idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,7 +461,7 @@ impl Pager for PageCacheManager {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
if idx < backend.npages() {
|
if idx < backend.npages() {
|
||||||
backend.write_page(idx, page.frame())?;
|
backend.write_page(idx, page.frame().into())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -469,25 +469,31 @@ impl Pager for PageCacheManager {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn commit_overwrite(&self, idx: usize) -> Result<UntypedFrame> {
|
fn commit_overwrite(&self, idx: usize) -> Result<DynUFrame> {
|
||||||
if let Some(page) = self.pages.lock().get(&idx) {
|
if let Some(page) = self.pages.lock().get(&idx) {
|
||||||
return Ok(page.frame.clone());
|
return Ok(page.frame.clone().into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let page = Page::alloc_zero()?;
|
let page = Page::alloc_zero()?;
|
||||||
Ok(self.pages.lock().get_or_insert(idx, || page).frame.clone())
|
Ok(self
|
||||||
|
.pages
|
||||||
|
.lock()
|
||||||
|
.get_or_insert(idx, || page)
|
||||||
|
.frame
|
||||||
|
.clone()
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct Page {
|
struct Page {
|
||||||
frame: UntypedFrame,
|
frame: Frame<()>,
|
||||||
state: PageState,
|
state: PageState,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Page {
|
impl Page {
|
||||||
pub fn alloc() -> Result<Self> {
|
pub fn alloc() -> Result<Self> {
|
||||||
let frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?;
|
let frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
frame,
|
frame,
|
||||||
state: PageState::Uninit,
|
state: PageState::Uninit,
|
||||||
@ -495,14 +501,14 @@ impl Page {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc_zero() -> Result<Self> {
|
pub fn alloc_zero() -> Result<Self> {
|
||||||
let frame = FrameAllocOptions::new(1).alloc_single()?;
|
let frame = FrameAllocOptions::new().alloc_frame()?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
frame,
|
frame,
|
||||||
state: PageState::Dirty,
|
state: PageState::Dirty,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn frame(&self) -> &UntypedFrame {
|
pub fn frame(&self) -> &Frame<()> {
|
||||||
&self.frame
|
&self.frame
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -531,16 +537,16 @@ enum PageState {
|
|||||||
/// This trait represents the backend for the page cache.
|
/// This trait represents the backend for the page cache.
|
||||||
pub trait PageCacheBackend: Sync + Send {
|
pub trait PageCacheBackend: Sync + Send {
|
||||||
/// Reads a page from the backend asynchronously.
|
/// Reads a page from the backend asynchronously.
|
||||||
fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter>;
|
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter>;
|
||||||
/// Writes a page to the backend asynchronously.
|
/// Writes a page to the backend asynchronously.
|
||||||
fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter>;
|
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter>;
|
||||||
/// Returns the number of pages in the backend.
|
/// Returns the number of pages in the backend.
|
||||||
fn npages(&self) -> usize;
|
fn npages(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl dyn PageCacheBackend {
|
impl dyn PageCacheBackend {
|
||||||
/// Reads a page from the backend synchronously.
|
/// Reads a page from the backend synchronously.
|
||||||
fn read_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> {
|
fn read_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> {
|
||||||
let waiter = self.read_page_async(idx, frame)?;
|
let waiter = self.read_page_async(idx, frame)?;
|
||||||
match waiter.wait() {
|
match waiter.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
@ -548,7 +554,7 @@ impl dyn PageCacheBackend {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Writes a page to the backend synchronously.
|
/// Writes a page to the backend synchronously.
|
||||||
fn write_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> {
|
fn write_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> {
|
||||||
let waiter = self.write_page_async(idx, frame)?;
|
let waiter = self.write_page_async(idx, frame)?;
|
||||||
match waiter.wait() {
|
match waiter.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
|
@ -20,7 +20,7 @@ use core::{
|
|||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
use ostd::mm::{vm_space::VmItem, VmIo, VmSpace, MAX_USERSPACE_VADDR};
|
use ostd::mm::{vm_space::VmItem, UntypedMem, VmIo, VmSpace, MAX_USERSPACE_VADDR};
|
||||||
|
|
||||||
use self::aux_vec::{AuxKey, AuxVec};
|
use self::aux_vec::{AuxKey, AuxVec};
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -306,7 +306,7 @@ fn map_segment_vmo(
|
|||||||
new_frame
|
new_frame
|
||||||
};
|
};
|
||||||
let head_idx = segment_offset / PAGE_SIZE;
|
let head_idx = segment_offset / PAGE_SIZE;
|
||||||
segment_vmo.replace(new_frame, head_idx)?;
|
segment_vmo.replace(new_frame.into(), head_idx)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tail padding.
|
// Tail padding.
|
||||||
@ -324,7 +324,7 @@ fn map_segment_vmo(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let tail_idx = (segment_offset + tail_padding_offset) / PAGE_SIZE;
|
let tail_idx = (segment_offset + tail_padding_offset) / PAGE_SIZE;
|
||||||
segment_vmo.replace(new_frame, tail_idx).unwrap();
|
segment_vmo.replace(new_frame.into(), tail_idx).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let perms = parse_segment_perm(program_header.flags);
|
let perms = parse_segment_perm(program_header.flags);
|
||||||
|
@ -8,12 +8,12 @@ use core::{
|
|||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use inherit_methods_macro::inherit_methods;
|
use inherit_methods_macro::inherit_methods;
|
||||||
use ostd::mm::{FrameAllocOptions, UntypedSegment, VmIo};
|
use ostd::mm::{FrameAllocOptions, Segment, UntypedMem, VmIo};
|
||||||
|
|
||||||
use super::{MultiRead, MultiWrite};
|
use super::{MultiRead, MultiWrite};
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
/// A lock-free SPSC FIFO ring buffer backed by a [`UntypedSegment`].
|
/// A lock-free SPSC FIFO ring buffer backed by a [`Segment<()>`].
|
||||||
///
|
///
|
||||||
/// The ring buffer supports `push`/`pop` any `T: Pod` items, also
|
/// The ring buffer supports `push`/`pop` any `T: Pod` items, also
|
||||||
/// supports `write`/`read` any bytes data based on [`VmReader`]/[`VmWriter`].
|
/// supports `write`/`read` any bytes data based on [`VmReader`]/[`VmWriter`].
|
||||||
@ -46,7 +46,7 @@ use crate::prelude::*;
|
|||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub struct RingBuffer<T> {
|
pub struct RingBuffer<T> {
|
||||||
segment: UntypedSegment,
|
segment: Segment<()>,
|
||||||
capacity: usize,
|
capacity: usize,
|
||||||
tail: AtomicUsize,
|
tail: AtomicUsize,
|
||||||
head: AtomicUsize,
|
head: AtomicUsize,
|
||||||
@ -78,9 +78,9 @@ impl<T> RingBuffer<T> {
|
|||||||
"capacity must be a power of two"
|
"capacity must be a power of two"
|
||||||
);
|
);
|
||||||
let nframes = capacity.saturating_mul(Self::T_SIZE).align_up(PAGE_SIZE) / PAGE_SIZE;
|
let nframes = capacity.saturating_mul(Self::T_SIZE).align_up(PAGE_SIZE) / PAGE_SIZE;
|
||||||
let segment = FrameAllocOptions::new(nframes)
|
let segment = FrameAllocOptions::new()
|
||||||
.uninit(true)
|
.zeroed(false)
|
||||||
.alloc_contiguous()
|
.alloc_segment(nframes)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
Self {
|
Self {
|
||||||
segment,
|
segment,
|
||||||
|
@ -21,7 +21,7 @@ use aster_rights::Rights;
|
|||||||
use aster_time::{read_monotonic_time, Instant};
|
use aster_time::{read_monotonic_time, Instant};
|
||||||
use aster_util::coeff::Coeff;
|
use aster_util::coeff::Coeff;
|
||||||
use ostd::{
|
use ostd::{
|
||||||
mm::{UntypedFrame, VmIo, PAGE_SIZE},
|
mm::{DynUFrame, VmIo, PAGE_SIZE},
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
Pod,
|
Pod,
|
||||||
};
|
};
|
||||||
@ -199,9 +199,9 @@ struct Vdso {
|
|||||||
data: SpinLock<VdsoData>,
|
data: SpinLock<VdsoData>,
|
||||||
/// The VMO of the entire VDSO, including the library text and the VDSO data.
|
/// The VMO of the entire VDSO, including the library text and the VDSO data.
|
||||||
vmo: Arc<Vmo>,
|
vmo: Arc<Vmo>,
|
||||||
/// The `UntypedFrame` that contains the VDSO data. This frame is contained in and
|
/// The `DynUFrame` that contains the VDSO data. This frame is contained in and
|
||||||
/// will not be removed from the VDSO VMO.
|
/// will not be removed from the VDSO VMO.
|
||||||
data_frame: UntypedFrame,
|
data_frame: DynUFrame,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `SpinLock` for the `seq` field in `VdsoData`.
|
/// A `SpinLock` for the `seq` field in `VdsoData`.
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use ostd::mm::{FrameAllocOptions, UntypedFrame};
|
use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, UntypedMem};
|
||||||
|
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
/// Creates a new `UntypedFrame` and initializes it with the contents of the `src`.
|
/// Creates a new `Frame<()>` and initializes it with the contents of the `src`.
|
||||||
pub fn duplicate_frame(src: &UntypedFrame) -> Result<UntypedFrame> {
|
///
|
||||||
let new_frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?;
|
/// Note that it only duplicates the contents not the metadata.
|
||||||
new_frame.copy_from(src);
|
pub fn duplicate_frame(src: &DynUFrame) -> Result<Frame<()>> {
|
||||||
|
let new_frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?;
|
||||||
|
new_frame.writer().write(&mut src.reader());
|
||||||
Ok(new_frame)
|
Ok(new_frame)
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ use core::{
|
|||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use ostd::mm::{
|
use ostd::mm::{
|
||||||
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty,
|
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, DynUFrame, FrameAllocOptions, PageFlags,
|
||||||
UntypedFrame, VmSpace,
|
PageProperty, VmSpace,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::interval_set::Interval;
|
use super::interval_set::Interval;
|
||||||
@ -186,7 +186,7 @@ impl VmMapping {
|
|||||||
} else {
|
} else {
|
||||||
let new_frame = duplicate_frame(&frame)?;
|
let new_frame = duplicate_frame(&frame)?;
|
||||||
prop.flags |= new_flags;
|
prop.flags |= new_flags;
|
||||||
cursor.map(new_frame, prop);
|
cursor.map(new_frame.into(), prop);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
VmItem::NotMapped { .. } => {
|
VmItem::NotMapped { .. } => {
|
||||||
@ -216,17 +216,17 @@ impl VmMapping {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(UntypedFrame, bool)> {
|
fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(DynUFrame, bool)> {
|
||||||
let mut is_readonly = false;
|
let mut is_readonly = false;
|
||||||
let Some(vmo) = &self.vmo else {
|
let Some(vmo) = &self.vmo else {
|
||||||
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
|
return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly));
|
||||||
};
|
};
|
||||||
|
|
||||||
let page_offset = page_fault_addr.align_down(PAGE_SIZE) - self.map_to_addr;
|
let page_offset = page_fault_addr.align_down(PAGE_SIZE) - self.map_to_addr;
|
||||||
let Ok(page) = vmo.get_committed_frame(page_offset) else {
|
let Ok(page) = vmo.get_committed_frame(page_offset) else {
|
||||||
if !self.is_shared {
|
if !self.is_shared {
|
||||||
// The page index is outside the VMO. This is only allowed in private mapping.
|
// The page index is outside the VMO. This is only allowed in private mapping.
|
||||||
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
|
return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly));
|
||||||
} else {
|
} else {
|
||||||
return_errno_with_message!(
|
return_errno_with_message!(
|
||||||
Errno::EFAULT,
|
Errno::EFAULT,
|
||||||
@ -237,7 +237,7 @@ impl VmMapping {
|
|||||||
|
|
||||||
if !self.is_shared && write {
|
if !self.is_shared && write {
|
||||||
// Write access to private VMO-backed mapping. Performs COW directly.
|
// Write access to private VMO-backed mapping. Performs COW directly.
|
||||||
Ok((duplicate_frame(&page)?, is_readonly))
|
Ok((duplicate_frame(&page)?.into(), is_readonly))
|
||||||
} else {
|
} else {
|
||||||
// Operations to shared mapping or read access to private VMO-backed mapping.
|
// Operations to shared mapping or read access to private VMO-backed mapping.
|
||||||
// If read access to private VMO-backed mapping triggers a page fault,
|
// If read access to private VMO-backed mapping triggers a page fault,
|
||||||
@ -264,7 +264,7 @@ impl VmMapping {
|
|||||||
|
|
||||||
let vm_perms = self.perms - VmPerms::WRITE;
|
let vm_perms = self.perms - VmPerms::WRITE;
|
||||||
let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?;
|
let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?;
|
||||||
let operate = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
|
let operate = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| {
|
||||||
if let VmItem::NotMapped { .. } = cursor.query().unwrap() {
|
if let VmItem::NotMapped { .. } = cursor.query().unwrap() {
|
||||||
// We regard all the surrounding pages as accessed, no matter
|
// We regard all the surrounding pages as accessed, no matter
|
||||||
// if it is really so. Then the hardware won't bother to update
|
// if it is really so. Then the hardware won't bother to update
|
||||||
@ -432,7 +432,7 @@ impl MappedVmo {
|
|||||||
///
|
///
|
||||||
/// If the VMO has not committed a frame at this index, it will commit
|
/// If the VMO has not committed a frame at this index, it will commit
|
||||||
/// one first and return it.
|
/// one first and return it.
|
||||||
fn get_committed_frame(&self, page_offset: usize) -> Result<UntypedFrame> {
|
fn get_committed_frame(&self, page_offset: usize) -> Result<DynUFrame> {
|
||||||
debug_assert!(page_offset < self.range.len());
|
debug_assert!(page_offset < self.range.len());
|
||||||
debug_assert!(page_offset % PAGE_SIZE == 0);
|
debug_assert!(page_offset % PAGE_SIZE == 0);
|
||||||
self.vmo.commit_page(self.range.start + page_offset)
|
self.vmo.commit_page(self.range.start + page_offset)
|
||||||
@ -444,7 +444,7 @@ impl MappedVmo {
|
|||||||
/// perform other operations.
|
/// perform other operations.
|
||||||
fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
|
fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
|
||||||
where
|
where
|
||||||
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
|
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
|
||||||
{
|
{
|
||||||
debug_assert!(range.start < self.range.len());
|
debug_assert!(range.start < self.range.len());
|
||||||
debug_assert!(range.end <= self.range.len());
|
debug_assert!(range.end <= self.range.len());
|
||||||
|
@ -3,14 +3,14 @@
|
|||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_rights::{Rights, TRights};
|
use aster_rights::{Rights, TRights};
|
||||||
use ostd::mm::{UntypedFrame, VmIo};
|
use ostd::mm::{DynUFrame, VmIo};
|
||||||
|
|
||||||
use super::{CommitFlags, Vmo, VmoRightsOp};
|
use super::{CommitFlags, Vmo, VmoRightsOp};
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
impl Vmo<Rights> {
|
impl Vmo<Rights> {
|
||||||
/// Commits a page at specific offset
|
/// Commits a page at specific offset
|
||||||
pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
|
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> {
|
||||||
self.check_rights(Rights::WRITE)?;
|
self.check_rights(Rights::WRITE)?;
|
||||||
self.0.commit_page(offset)
|
self.0.commit_page(offset)
|
||||||
}
|
}
|
||||||
@ -39,7 +39,7 @@ impl Vmo<Rights> {
|
|||||||
/// perform other operations.
|
/// perform other operations.
|
||||||
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
|
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
|
||||||
where
|
where
|
||||||
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
|
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
|
||||||
{
|
{
|
||||||
self.check_rights(Rights::WRITE)?;
|
self.check_rights(Rights::WRITE)?;
|
||||||
self.0
|
self.0
|
||||||
@ -112,7 +112,7 @@ impl Vmo<Rights> {
|
|||||||
/// # Access rights
|
/// # Access rights
|
||||||
///
|
///
|
||||||
/// The method requires the Write right.
|
/// The method requires the Write right.
|
||||||
pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
|
pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> {
|
||||||
self.check_rights(Rights::WRITE)?;
|
self.check_rights(Rights::WRITE)?;
|
||||||
self.0.replace(page, page_idx)
|
self.0.replace(page, page_idx)
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ use align_ext::AlignExt;
|
|||||||
use aster_rights::Rights;
|
use aster_rights::Rights;
|
||||||
use ostd::{
|
use ostd::{
|
||||||
collections::xarray::{CursorMut, XArray},
|
collections::xarray::{CursorMut, XArray},
|
||||||
mm::{FrameAllocOptions, UntypedFrame, VmReader, VmWriter},
|
mm::{DynUFrame, FrameAllocOptions, UntypedMem, VmReader, VmWriter},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
@ -66,8 +66,8 @@ pub use pager::Pager;
|
|||||||
/// # Implementation
|
/// # Implementation
|
||||||
///
|
///
|
||||||
/// `Vmo` provides high-level APIs for address space management by wrapping
|
/// `Vmo` provides high-level APIs for address space management by wrapping
|
||||||
/// around its low-level counterpart [`ostd::mm::UntypedFrame`].
|
/// around its low-level counterpart [`ostd::mm::DynUFrame`].
|
||||||
/// Compared with `UntypedFrame`,
|
/// Compared with `DynUFrame`,
|
||||||
/// `Vmo` is easier to use (by offering more powerful APIs) and
|
/// `Vmo` is easier to use (by offering more powerful APIs) and
|
||||||
/// harder to misuse (thanks to its nature of being capability).
|
/// harder to misuse (thanks to its nature of being capability).
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -125,12 +125,12 @@ bitflags! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `Pages` is the struct that manages the `UntypedFrame`s stored in `Vmo_`.
|
/// `Pages` is the struct that manages the `DynUFrame`s stored in `Vmo_`.
|
||||||
pub(super) enum Pages {
|
pub(super) enum Pages {
|
||||||
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
|
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
|
||||||
Nonresizable(Mutex<XArray<UntypedFrame>>, usize),
|
Nonresizable(Mutex<XArray<DynUFrame>>, usize),
|
||||||
/// `Pages` that can be resized and have a variable size.
|
/// `Pages` that can be resized and have a variable size.
|
||||||
Resizable(Mutex<(XArray<UntypedFrame>, usize)>),
|
Resizable(Mutex<(XArray<DynUFrame>, usize)>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for Pages {
|
impl Clone for Pages {
|
||||||
@ -149,7 +149,7 @@ impl Clone for Pages {
|
|||||||
impl Pages {
|
impl Pages {
|
||||||
fn with<R, F>(&self, func: F) -> R
|
fn with<R, F>(&self, func: F) -> R
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut XArray<UntypedFrame>, usize) -> R,
|
F: FnOnce(&mut XArray<DynUFrame>, usize) -> R,
|
||||||
{
|
{
|
||||||
match self {
|
match self {
|
||||||
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
|
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
|
||||||
@ -201,28 +201,28 @@ impl CommitFlags {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Vmo_ {
|
impl Vmo_ {
|
||||||
/// Prepares a new `UntypedFrame` for the target index in pages, returns this new frame.
|
/// Prepares a new `DynUFrame` for the target index in pages, returns this new frame.
|
||||||
fn prepare_page(&self, page_idx: usize) -> Result<UntypedFrame> {
|
fn prepare_page(&self, page_idx: usize) -> Result<DynUFrame> {
|
||||||
match &self.pager {
|
match &self.pager {
|
||||||
None => Ok(FrameAllocOptions::new(1).alloc_single()?),
|
None => Ok(FrameAllocOptions::new().alloc_frame()?.into()),
|
||||||
Some(pager) => pager.commit_page(page_idx),
|
Some(pager) => pager.commit_page(page_idx),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepares a new `UntypedFrame` for the target index in the VMO, returns this new frame.
|
/// Prepares a new `DynUFrame` for the target index in the VMO, returns this new frame.
|
||||||
fn prepare_overwrite(&self, page_idx: usize) -> Result<UntypedFrame> {
|
fn prepare_overwrite(&self, page_idx: usize) -> Result<DynUFrame> {
|
||||||
if let Some(pager) = &self.pager {
|
if let Some(pager) = &self.pager {
|
||||||
pager.commit_overwrite(page_idx)
|
pager.commit_overwrite(page_idx)
|
||||||
} else {
|
} else {
|
||||||
Ok(FrameAllocOptions::new(1).alloc_single()?)
|
Ok(FrameAllocOptions::new().alloc_frame()?.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn commit_with_cursor(
|
fn commit_with_cursor(
|
||||||
&self,
|
&self,
|
||||||
cursor: &mut CursorMut<'_, UntypedFrame>,
|
cursor: &mut CursorMut<'_, DynUFrame>,
|
||||||
commit_flags: CommitFlags,
|
commit_flags: CommitFlags,
|
||||||
) -> Result<UntypedFrame> {
|
) -> Result<DynUFrame> {
|
||||||
let new_page = {
|
let new_page = {
|
||||||
if let Some(committed_page) = cursor.load() {
|
if let Some(committed_page) = cursor.load() {
|
||||||
// Fast path: return the page directly.
|
// Fast path: return the page directly.
|
||||||
@ -241,7 +241,7 @@ impl Vmo_ {
|
|||||||
|
|
||||||
/// Commits the page corresponding to the target offset in the VMO and return that page.
|
/// Commits the page corresponding to the target offset in the VMO and return that page.
|
||||||
/// If the current offset has already been committed, the page will be returned directly.
|
/// If the current offset has already been committed, the page will be returned directly.
|
||||||
pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
|
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> {
|
||||||
let page_idx = offset / PAGE_SIZE;
|
let page_idx = offset / PAGE_SIZE;
|
||||||
self.pages.with(|pages, size| {
|
self.pages.with(|pages, size| {
|
||||||
if offset >= size {
|
if offset >= size {
|
||||||
@ -279,7 +279,7 @@ impl Vmo_ {
|
|||||||
commit_flags: CommitFlags,
|
commit_flags: CommitFlags,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
|
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
|
||||||
{
|
{
|
||||||
self.pages.with(|pages, size| {
|
self.pages.with(|pages, size| {
|
||||||
if range.end > size {
|
if range.end > size {
|
||||||
@ -315,7 +315,7 @@ impl Vmo_ {
|
|||||||
let read_range = offset..(offset + read_len);
|
let read_range = offset..(offset + read_len);
|
||||||
let mut read_offset = offset % PAGE_SIZE;
|
let mut read_offset = offset % PAGE_SIZE;
|
||||||
|
|
||||||
let read = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
|
let read = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| {
|
||||||
let frame = commit_fn()?;
|
let frame = commit_fn()?;
|
||||||
frame.reader().skip(read_offset).read_fallible(writer)?;
|
frame.reader().skip(read_offset).read_fallible(writer)?;
|
||||||
read_offset = 0;
|
read_offset = 0;
|
||||||
@ -331,7 +331,7 @@ impl Vmo_ {
|
|||||||
let write_range = offset..(offset + write_len);
|
let write_range = offset..(offset + write_len);
|
||||||
let mut write_offset = offset % PAGE_SIZE;
|
let mut write_offset = offset % PAGE_SIZE;
|
||||||
|
|
||||||
let mut write = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
|
let mut write = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| {
|
||||||
let frame = commit_fn()?;
|
let frame = commit_fn()?;
|
||||||
frame.writer().skip(write_offset).write_fallible(reader)?;
|
frame.writer().skip(write_offset).write_fallible(reader)?;
|
||||||
write_offset = 0;
|
write_offset = 0;
|
||||||
@ -401,7 +401,7 @@ impl Vmo_ {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decommit_pages(&self, pages: &mut XArray<UntypedFrame>, range: Range<usize>) -> Result<()> {
|
fn decommit_pages(&self, pages: &mut XArray<DynUFrame>, range: Range<usize>) -> Result<()> {
|
||||||
let page_idx_range = get_page_idx_range(&range);
|
let page_idx_range = get_page_idx_range(&range);
|
||||||
let mut cursor = pages.cursor_mut(page_idx_range.start as u64);
|
let mut cursor = pages.cursor_mut(page_idx_range.start as u64);
|
||||||
for page_idx in page_idx_range {
|
for page_idx in page_idx_range {
|
||||||
@ -426,7 +426,7 @@ impl Vmo_ {
|
|||||||
self.flags
|
self.flags
|
||||||
}
|
}
|
||||||
|
|
||||||
fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
|
fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> {
|
||||||
self.pages.with(|pages, size| {
|
self.pages.with(|pages, size| {
|
||||||
if page_idx >= size / PAGE_SIZE {
|
if page_idx >= size / PAGE_SIZE {
|
||||||
return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo");
|
return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo");
|
||||||
|
@ -8,7 +8,7 @@ use align_ext::AlignExt;
|
|||||||
use aster_rights::{Rights, TRightSet, TRights};
|
use aster_rights::{Rights, TRightSet, TRights};
|
||||||
use ostd::{
|
use ostd::{
|
||||||
collections::xarray::XArray,
|
collections::xarray::XArray,
|
||||||
mm::{FrameAllocOptions, UntypedFrame},
|
mm::{DynUFrame, DynUSegment, FrameAllocOptions},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{Pager, Pages, Vmo, VmoFlags};
|
use super::{Pager, Pages, Vmo, VmoFlags};
|
||||||
@ -137,13 +137,11 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option<Arc<dyn Pager>>) -> Re
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<UntypedFrame>> {
|
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<DynUFrame>> {
|
||||||
if flags.contains(VmoFlags::CONTIGUOUS) {
|
if flags.contains(VmoFlags::CONTIGUOUS) {
|
||||||
// if the vmo is continuous, we need to allocate frames for the vmo
|
// if the vmo is continuous, we need to allocate frames for the vmo
|
||||||
let frames_num = size / PAGE_SIZE;
|
let frames_num = size / PAGE_SIZE;
|
||||||
let segment = FrameAllocOptions::new(frames_num)
|
let segment: DynUSegment = FrameAllocOptions::new().alloc_segment(frames_num)?.into();
|
||||||
.is_contiguous(true)
|
|
||||||
.alloc_contiguous()?;
|
|
||||||
let mut committed_pages = XArray::new();
|
let mut committed_pages = XArray::new();
|
||||||
let mut cursor = committed_pages.cursor_mut(0);
|
let mut cursor = committed_pages.cursor_mut(0);
|
||||||
for frame in segment {
|
for frame in segment {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use ostd::mm::UntypedFrame;
|
use ostd::mm::DynUFrame;
|
||||||
|
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ pub trait Pager: Send + Sync {
|
|||||||
/// whatever frame that may or may not be the same as the last time.
|
/// whatever frame that may or may not be the same as the last time.
|
||||||
///
|
///
|
||||||
/// It is up to the pager to decide the range of valid indices.
|
/// It is up to the pager to decide the range of valid indices.
|
||||||
fn commit_page(&self, idx: usize) -> Result<UntypedFrame>;
|
fn commit_page(&self, idx: usize) -> Result<DynUFrame>;
|
||||||
|
|
||||||
/// Notify the pager that the frame at a specified index has been updated.
|
/// Notify the pager that the frame at a specified index has been updated.
|
||||||
///
|
///
|
||||||
@ -54,5 +54,5 @@ pub trait Pager: Send + Sync {
|
|||||||
/// Ask the pager to provide a frame at a specified index.
|
/// Ask the pager to provide a frame at a specified index.
|
||||||
/// Notify the pager that the frame will be fully overwritten soon, so pager can
|
/// Notify the pager that the frame will be fully overwritten soon, so pager can
|
||||||
/// choose not to initialize it.
|
/// choose not to initialize it.
|
||||||
fn commit_overwrite(&self, idx: usize) -> Result<UntypedFrame>;
|
fn commit_overwrite(&self, idx: usize) -> Result<DynUFrame>;
|
||||||
}
|
}
|
||||||
|
@ -4,14 +4,14 @@ use core::ops::Range;
|
|||||||
|
|
||||||
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
||||||
use aster_rights_proc::require;
|
use aster_rights_proc::require;
|
||||||
use ostd::mm::{UntypedFrame, VmIo};
|
use ostd::mm::{DynUFrame, VmIo};
|
||||||
|
|
||||||
use super::{CommitFlags, Vmo, VmoRightsOp};
|
use super::{CommitFlags, Vmo, VmoRightsOp};
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
impl<R: TRights> Vmo<TRightSet<R>> {
|
impl<R: TRights> Vmo<TRightSet<R>> {
|
||||||
/// Commits a page at specific offset.
|
/// Commits a page at specific offset.
|
||||||
pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
|
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> {
|
||||||
self.check_rights(Rights::WRITE)?;
|
self.check_rights(Rights::WRITE)?;
|
||||||
self.0.commit_page(offset)
|
self.0.commit_page(offset)
|
||||||
}
|
}
|
||||||
@ -41,7 +41,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
|
|||||||
#[require(R > Write)]
|
#[require(R > Write)]
|
||||||
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
|
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
|
||||||
where
|
where
|
||||||
F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
|
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>,
|
||||||
{
|
{
|
||||||
self.0
|
self.0
|
||||||
.operate_on_range(range, operate, CommitFlags::empty())
|
.operate_on_range(range, operate, CommitFlags::empty())
|
||||||
@ -114,7 +114,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
|
|||||||
///
|
///
|
||||||
/// The method requires the Write right.
|
/// The method requires the Write right.
|
||||||
#[require(R > Write)]
|
#[require(R > Write)]
|
||||||
pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
|
pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> {
|
||||||
self.0.replace(page, page_idx)
|
self.0.replace(page, page_idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,11 +36,11 @@ pub fn main() {
|
|||||||
fn create_user_space(program: &[u8]) -> UserSpace {
|
fn create_user_space(program: &[u8]) -> UserSpace {
|
||||||
let nbytes = program.len().align_up(PAGE_SIZE);
|
let nbytes = program.len().align_up(PAGE_SIZE);
|
||||||
let user_pages = {
|
let user_pages = {
|
||||||
let segment = FrameAllocOptions::new(nbytes / PAGE_SIZE)
|
let segment = FrameAllocOptions::new()
|
||||||
.alloc_contiguous()
|
.alloc_segment(nbytes / PAGE_SIZE)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// Physical memory pages can be only accessed
|
// Physical memory pages can be only accessed
|
||||||
// via the `UntypedFrame` or `UntypedSegment` abstraction.
|
// via the `DynUFrame` or `DynUSegment` abstraction.
|
||||||
segment.write_bytes(0, program).unwrap();
|
segment.write_bytes(0, program).unwrap();
|
||||||
segment
|
segment
|
||||||
};
|
};
|
||||||
@ -54,7 +54,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
|
|||||||
let mut cursor = vm_space.cursor_mut(&(MAP_ADDR..MAP_ADDR + nbytes)).unwrap();
|
let mut cursor = vm_space.cursor_mut(&(MAP_ADDR..MAP_ADDR + nbytes)).unwrap();
|
||||||
let map_prop = PageProperty::new(PageFlags::RWX, CachePolicy::Writeback);
|
let map_prop = PageProperty::new(PageFlags::RWX, CachePolicy::Writeback);
|
||||||
for frame in user_pages {
|
for frame in user_pages {
|
||||||
cursor.map(frame, map_prop);
|
cursor.map(frame.into(), map_prop);
|
||||||
}
|
}
|
||||||
drop(cursor);
|
drop(cursor);
|
||||||
Arc::new(vm_space)
|
Arc::new(vm_space)
|
||||||
|
@ -15,7 +15,7 @@ use crate::{
|
|||||||
dma::Daddr,
|
dma::Daddr,
|
||||||
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
||||||
page_table::{PageTableError, PageTableItem},
|
page_table::{PageTableError, PageTableItem},
|
||||||
FrameAllocOptions, Paddr, PageFlags, PageTable, UntypedFrame, VmIo, PAGE_SIZE,
|
Frame, FrameAllocOptions, Paddr, PageFlags, PageTable, VmIo, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ impl RootEntry {
|
|||||||
|
|
||||||
pub struct RootTable {
|
pub struct RootTable {
|
||||||
/// Total 256 bus, each entry is 128 bits.
|
/// Total 256 bus, each entry is 128 bits.
|
||||||
root_frame: UntypedFrame,
|
root_frame: Frame<()>,
|
||||||
// TODO: Use radix tree instead.
|
// TODO: Use radix tree instead.
|
||||||
context_tables: BTreeMap<Paddr, ContextTable>,
|
context_tables: BTreeMap<Paddr, ContextTable>,
|
||||||
}
|
}
|
||||||
@ -57,7 +57,7 @@ impl RootTable {
|
|||||||
|
|
||||||
pub(super) fn new() -> Self {
|
pub(super) fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
root_frame: FrameAllocOptions::new(1).alloc_single().unwrap(),
|
root_frame: FrameAllocOptions::new().alloc_frame().unwrap(),
|
||||||
context_tables: BTreeMap::new(),
|
context_tables: BTreeMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -236,14 +236,14 @@ pub enum AddressWidth {
|
|||||||
|
|
||||||
pub struct ContextTable {
|
pub struct ContextTable {
|
||||||
/// Total 32 devices, each device has 8 functions.
|
/// Total 32 devices, each device has 8 functions.
|
||||||
entries_frame: UntypedFrame,
|
entries_frame: Frame<()>,
|
||||||
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
|
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ContextTable {
|
impl ContextTable {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
entries_frame: FrameAllocOptions::new(1).alloc_single().unwrap(),
|
entries_frame: FrameAllocOptions::new().alloc_frame().unwrap(),
|
||||||
page_tables: BTreeMap::new(),
|
page_tables: BTreeMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ use int_to_c_enum::TryFromInt;
|
|||||||
|
|
||||||
use super::IrtEntryHandle;
|
use super::IrtEntryHandle;
|
||||||
use crate::{
|
use crate::{
|
||||||
mm::{paddr_to_vaddr, FrameAllocOptions, UntypedSegment, PAGE_SIZE},
|
mm::{paddr_to_vaddr, FrameAllocOptions, Segment, PAGE_SIZE},
|
||||||
sync::{LocalIrqDisabled, SpinLock},
|
sync::{LocalIrqDisabled, SpinLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -23,7 +23,7 @@ enum ExtendedInterruptMode {
|
|||||||
pub struct IntRemappingTable {
|
pub struct IntRemappingTable {
|
||||||
size: u16,
|
size: u16,
|
||||||
extended_interrupt_mode: ExtendedInterruptMode,
|
extended_interrupt_mode: ExtendedInterruptMode,
|
||||||
frames: UntypedSegment,
|
frames: Segment<()>,
|
||||||
/// The global allocator for Interrupt remapping entry.
|
/// The global allocator for Interrupt remapping entry.
|
||||||
allocator: SpinLock<IdAlloc, LocalIrqDisabled>,
|
allocator: SpinLock<IdAlloc, LocalIrqDisabled>,
|
||||||
handles: Vec<Arc<SpinLock<IrtEntryHandle, LocalIrqDisabled>>>,
|
handles: Vec<Arc<SpinLock<IrtEntryHandle, LocalIrqDisabled>>>,
|
||||||
@ -35,12 +35,11 @@ impl IntRemappingTable {
|
|||||||
Some(self.handles.get(id).unwrap().clone())
|
Some(self.handles.get(id).unwrap().clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates an Interrupt Remapping Table with one UntypedFrame (default).
|
/// Creates an Interrupt Remapping Table with one DynUFrame (default).
|
||||||
pub(super) fn new() -> Self {
|
pub(super) fn new() -> Self {
|
||||||
const DEFAULT_PAGES: usize = 1;
|
const DEFAULT_PAGES: usize = 1;
|
||||||
let segment = FrameAllocOptions::new(DEFAULT_PAGES)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment(DEFAULT_PAGES)
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let entry_number = (DEFAULT_PAGES * PAGE_SIZE / size_of::<u128>()) as u16;
|
let entry_number = (DEFAULT_PAGES * PAGE_SIZE / size_of::<u128>()) as u16;
|
||||||
|
|
||||||
|
@ -3,12 +3,12 @@
|
|||||||
use core::mem::size_of;
|
use core::mem::size_of;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE},
|
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE},
|
||||||
prelude::Paddr,
|
prelude::Paddr,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct Queue {
|
pub struct Queue {
|
||||||
segment: UntypedSegment,
|
segment: Segment<()>,
|
||||||
queue_size: usize,
|
queue_size: usize,
|
||||||
tail: usize,
|
tail: usize,
|
||||||
}
|
}
|
||||||
@ -38,9 +38,8 @@ impl Queue {
|
|||||||
|
|
||||||
pub(super) fn new() -> Self {
|
pub(super) fn new() -> Self {
|
||||||
const DEFAULT_PAGES: usize = 1;
|
const DEFAULT_PAGES: usize = 1;
|
||||||
let segment = FrameAllocOptions::new(DEFAULT_PAGES)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment(DEFAULT_PAGES)
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
Self {
|
Self {
|
||||||
segment,
|
segment,
|
||||||
|
@ -10,11 +10,7 @@ use spin::Once;
|
|||||||
use crate::{
|
use crate::{
|
||||||
arch::boot::smp::{bringup_all_aps, get_num_processors},
|
arch::boot::smp::{bringup_all_aps, get_num_processors},
|
||||||
cpu,
|
cpu,
|
||||||
mm::{
|
mm::{frame::Segment, kspace::KernelMeta, paddr_to_vaddr, FrameAllocOptions, PAGE_SIZE},
|
||||||
frame::{self, Segment},
|
|
||||||
kspace::KernelMeta,
|
|
||||||
paddr_to_vaddr, PAGE_SIZE,
|
|
||||||
},
|
|
||||||
task::Task,
|
task::Task,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -62,14 +58,17 @@ pub fn boot_all_aps() {
|
|||||||
AP_BOOT_INFO.call_once(|| {
|
AP_BOOT_INFO.call_once(|| {
|
||||||
let mut per_ap_info = BTreeMap::new();
|
let mut per_ap_info = BTreeMap::new();
|
||||||
// Use two pages to place stack pointers of all APs, thus support up to 1024 APs.
|
// Use two pages to place stack pointers of all APs, thus support up to 1024 APs.
|
||||||
let boot_stack_array =
|
let boot_stack_array = FrameAllocOptions::new()
|
||||||
frame::allocator::alloc_contiguous(2 * PAGE_SIZE, |_| KernelMeta::default()).unwrap();
|
.zeroed(false)
|
||||||
|
.alloc_segment_with(2, |_| KernelMeta)
|
||||||
|
.unwrap();
|
||||||
assert!(num_cpus < 1024);
|
assert!(num_cpus < 1024);
|
||||||
|
|
||||||
for ap in 1..num_cpus {
|
for ap in 1..num_cpus {
|
||||||
let boot_stack_pages =
|
let boot_stack_pages = FrameAllocOptions::new()
|
||||||
frame::allocator::alloc_contiguous(AP_BOOT_STACK_SIZE, |_| KernelMeta::default())
|
.zeroed(false)
|
||||||
.unwrap();
|
.alloc_segment_with(AP_BOOT_STACK_SIZE / PAGE_SIZE, |_| KernelMeta)
|
||||||
|
.unwrap();
|
||||||
let boot_stack_ptr = paddr_to_vaddr(boot_stack_pages.end_paddr());
|
let boot_stack_ptr = paddr_to_vaddr(boot_stack_pages.end_paddr());
|
||||||
let stack_array_ptr = paddr_to_vaddr(boot_stack_array.start_paddr()) as *mut u64;
|
let stack_array_ptr = paddr_to_vaddr(boot_stack_array.start_paddr()) as *mut u64;
|
||||||
// SAFETY: The `stack_array_ptr` is valid and aligned.
|
// SAFETY: The `stack_array_ptr` is valid and aligned.
|
||||||
|
@ -43,11 +43,7 @@ use spin::Once;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch,
|
arch,
|
||||||
mm::{
|
mm::{frame::Segment, kspace::KernelMeta, paddr_to_vaddr, FrameAllocOptions, PAGE_SIZE},
|
||||||
frame::{self, Segment},
|
|
||||||
kspace::KernelMeta,
|
|
||||||
paddr_to_vaddr, PAGE_SIZE,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// These symbols are provided by the linker script.
|
// These symbols are provided by the linker script.
|
||||||
@ -99,7 +95,10 @@ pub unsafe fn init_on_bsp() {
|
|||||||
for _ in 1..num_cpus {
|
for _ in 1..num_cpus {
|
||||||
let ap_pages = {
|
let ap_pages = {
|
||||||
let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE);
|
let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE);
|
||||||
frame::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap()
|
FrameAllocOptions::new()
|
||||||
|
.zeroed(false)
|
||||||
|
.alloc_segment_with(nbytes / PAGE_SIZE, |_| KernelMeta)
|
||||||
|
.unwrap()
|
||||||
};
|
};
|
||||||
let ap_pages_ptr = paddr_to_vaddr(ap_pages.start_paddr()) as *mut u8;
|
let ap_pages_ptr = paddr_to_vaddr(ap_pages.start_paddr()) as *mut u8;
|
||||||
|
|
||||||
|
@ -13,7 +13,8 @@ use crate::{
|
|||||||
io::VmIoOnce,
|
io::VmIoOnce,
|
||||||
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
|
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
|
||||||
page_prop::CachePolicy,
|
page_prop::CachePolicy,
|
||||||
HasPaddr, Infallible, Paddr, PodOnce, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
DynUSegment, HasPaddr, Infallible, Paddr, PodOnce, UntypedMem, VmIo, VmReader, VmWriter,
|
||||||
|
PAGE_SIZE,
|
||||||
},
|
},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
@ -38,27 +39,27 @@ pub struct DmaCoherent {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct DmaCoherentInner {
|
struct DmaCoherentInner {
|
||||||
vm_segment: UntypedSegment,
|
segment: DynUSegment,
|
||||||
start_daddr: Daddr,
|
start_daddr: Daddr,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DmaCoherent {
|
impl DmaCoherent {
|
||||||
/// Creates a coherent DMA mapping backed by `vm_segment`.
|
/// Creates a coherent DMA mapping backed by `segment`.
|
||||||
///
|
///
|
||||||
/// The `is_cache_coherent` argument specifies whether
|
/// The `is_cache_coherent` argument specifies whether
|
||||||
/// the target device that the DMA mapping is prepared for
|
/// the target device that the DMA mapping is prepared for
|
||||||
/// can access the main memory in a CPU cache coherent way
|
/// can access the main memory in a CPU cache coherent way
|
||||||
/// or not.
|
/// or not.
|
||||||
///
|
///
|
||||||
/// The method fails if any part of the given `vm_segment`
|
/// The method fails if any part of the given `segment`
|
||||||
/// already belongs to a DMA mapping.
|
/// already belongs to a DMA mapping.
|
||||||
pub fn map(
|
pub fn map(
|
||||||
vm_segment: UntypedSegment,
|
segment: DynUSegment,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
) -> core::result::Result<Self, DmaError> {
|
) -> core::result::Result<Self, DmaError> {
|
||||||
let frame_count = vm_segment.nbytes() / PAGE_SIZE;
|
let frame_count = segment.size() / PAGE_SIZE;
|
||||||
let start_paddr = vm_segment.start_paddr();
|
let start_paddr = segment.start_paddr();
|
||||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||||
return Err(DmaError::AlreadyMapped);
|
return Err(DmaError::AlreadyMapped);
|
||||||
}
|
}
|
||||||
@ -93,7 +94,7 @@ impl DmaCoherent {
|
|||||||
DmaType::Iommu => {
|
DmaType::Iommu => {
|
||||||
for i in 0..frame_count {
|
for i in 0..frame_count {
|
||||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||||
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
|
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`.
|
||||||
unsafe {
|
unsafe {
|
||||||
iommu::map(paddr as Daddr, paddr).unwrap();
|
iommu::map(paddr as Daddr, paddr).unwrap();
|
||||||
}
|
}
|
||||||
@ -103,7 +104,7 @@ impl DmaCoherent {
|
|||||||
};
|
};
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
inner: Arc::new(DmaCoherentInner {
|
inner: Arc::new(DmaCoherentInner {
|
||||||
vm_segment,
|
segment,
|
||||||
start_daddr,
|
start_daddr,
|
||||||
is_cache_coherent,
|
is_cache_coherent,
|
||||||
}),
|
}),
|
||||||
@ -112,7 +113,7 @@ impl DmaCoherent {
|
|||||||
|
|
||||||
/// Returns the number of bytes in the DMA mapping.
|
/// Returns the number of bytes in the DMA mapping.
|
||||||
pub fn nbytes(&self) -> usize {
|
pub fn nbytes(&self) -> usize {
|
||||||
self.inner.vm_segment.nbytes()
|
self.inner.segment.size()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,16 +124,16 @@ impl HasDaddr for DmaCoherent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Deref for DmaCoherent {
|
impl Deref for DmaCoherent {
|
||||||
type Target = UntypedSegment;
|
type Target = DynUSegment;
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
&self.inner.vm_segment
|
&self.inner.segment
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for DmaCoherentInner {
|
impl Drop for DmaCoherentInner {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let frame_count = self.vm_segment.nbytes() / PAGE_SIZE;
|
let frame_count = self.segment.size() / PAGE_SIZE;
|
||||||
let start_paddr = self.vm_segment.start_paddr();
|
let start_paddr = self.segment.start_paddr();
|
||||||
// Ensure that the addresses used later will not overflow
|
// Ensure that the addresses used later will not overflow
|
||||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||||
match dma_type() {
|
match dma_type() {
|
||||||
@ -173,43 +174,39 @@ impl Drop for DmaCoherentInner {
|
|||||||
|
|
||||||
impl VmIo for DmaCoherent {
|
impl VmIo for DmaCoherent {
|
||||||
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
||||||
self.inner.vm_segment.read(offset, writer)
|
self.inner.segment.read(offset, writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
||||||
self.inner.vm_segment.write(offset, reader)
|
self.inner.segment.write(offset, reader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VmIoOnce for DmaCoherent {
|
impl VmIoOnce for DmaCoherent {
|
||||||
fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T> {
|
fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T> {
|
||||||
self.inner.vm_segment.reader().skip(offset).read_once()
|
self.inner.segment.reader().skip(offset).read_once()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()> {
|
fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()> {
|
||||||
self.inner
|
self.inner.segment.writer().skip(offset).write_once(new_val)
|
||||||
.vm_segment
|
|
||||||
.writer()
|
|
||||||
.skip(offset)
|
|
||||||
.write_once(new_val)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> DmaCoherent {
|
impl<'a> DmaCoherent {
|
||||||
/// Returns a reader to read data from it.
|
/// Returns a reader to read data from it.
|
||||||
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
|
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
|
||||||
self.inner.vm_segment.reader()
|
self.inner.segment.reader()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a writer to write data into it.
|
/// Returns a writer to write data into it.
|
||||||
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
|
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
|
||||||
self.inner.vm_segment.writer()
|
self.inner.segment.writer()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasPaddr for DmaCoherent {
|
impl HasPaddr for DmaCoherent {
|
||||||
fn paddr(&self) -> Paddr {
|
fn paddr(&self) -> Paddr {
|
||||||
self.inner.vm_segment.start_paddr()
|
self.inner.segment.start_paddr()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,46 +219,42 @@ mod test {
|
|||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn map_with_coherent_device() {
|
fn map_with_coherent_device() {
|
||||||
let vm_segment = FrameAllocOptions::new(1)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(1, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_coherent = DmaCoherent::map(vm_segment.clone(), true).unwrap();
|
let dma_coherent = DmaCoherent::map(segment.clone().into(), true).unwrap();
|
||||||
assert!(dma_coherent.paddr() == vm_segment.paddr());
|
assert!(dma_coherent.paddr() == segment.start_paddr());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn map_with_incoherent_device() {
|
fn map_with_incoherent_device() {
|
||||||
let vm_segment = FrameAllocOptions::new(1)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(1, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_coherent = DmaCoherent::map(vm_segment.clone(), false).unwrap();
|
let dma_coherent = DmaCoherent::map(segment.clone().into(), false).unwrap();
|
||||||
assert!(dma_coherent.paddr() == vm_segment.paddr());
|
assert!(dma_coherent.paddr() == segment.start_paddr());
|
||||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||||
let vaddr = paddr_to_vaddr(vm_segment.paddr());
|
let vaddr = paddr_to_vaddr(segment.start_paddr());
|
||||||
assert!(page_table.query(vaddr).unwrap().1.cache == CachePolicy::Uncacheable);
|
assert!(page_table.query(vaddr).unwrap().1.cache == CachePolicy::Uncacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn duplicate_map() {
|
fn duplicate_map() {
|
||||||
let vm_segment_parent = FrameAllocOptions::new(2)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(2, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE));
|
let segment_child = segment.slice(&(0..PAGE_SIZE));
|
||||||
let _dma_coherent_parent = DmaCoherent::map(vm_segment_parent, false);
|
let _dma_coherent_parent = DmaCoherent::map(segment.into(), false);
|
||||||
let dma_coherent_child = DmaCoherent::map(vm_segment_child, false);
|
let dma_coherent_child = DmaCoherent::map(segment_child.into(), false);
|
||||||
assert!(dma_coherent_child.is_err());
|
assert!(dma_coherent_child.is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn read_and_write() {
|
fn read_and_write() {
|
||||||
let vm_segment = FrameAllocOptions::new(2)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(2, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap();
|
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
|
||||||
|
|
||||||
let buf_write = vec![1u8; 2 * PAGE_SIZE];
|
let buf_write = vec![1u8; 2 * PAGE_SIZE];
|
||||||
dma_coherent.write_bytes(0, &buf_write).unwrap();
|
dma_coherent.write_bytes(0, &buf_write).unwrap();
|
||||||
@ -272,11 +265,10 @@ mod test {
|
|||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn reader_and_writer() {
|
fn reader_and_writer() {
|
||||||
let vm_segment = FrameAllocOptions::new(2)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(2, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_coherent = DmaCoherent::map(vm_segment, false).unwrap();
|
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
|
||||||
|
|
||||||
let buf_write = vec![1u8; PAGE_SIZE];
|
let buf_write = vec![1u8; PAGE_SIZE];
|
||||||
let mut writer = dma_coherent.writer();
|
let mut writer = dma_coherent.writer();
|
||||||
|
@ -11,7 +11,7 @@ use crate::{
|
|||||||
error::Error,
|
error::Error,
|
||||||
mm::{
|
mm::{
|
||||||
dma::{dma_type, Daddr, DmaType},
|
dma::{dma_type, Daddr, DmaType},
|
||||||
HasPaddr, Infallible, Paddr, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
DynUSegment, HasPaddr, Infallible, Paddr, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -34,7 +34,7 @@ pub struct DmaStream {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct DmaStreamInner {
|
struct DmaStreamInner {
|
||||||
vm_segment: UntypedSegment,
|
segment: DynUSegment,
|
||||||
start_daddr: Daddr,
|
start_daddr: Daddr,
|
||||||
/// TODO: remove this field when on x86.
|
/// TODO: remove this field when on x86.
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
@ -55,16 +55,16 @@ pub enum DmaDirection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DmaStream {
|
impl DmaStream {
|
||||||
/// Establishes DMA stream mapping for a given [`UntypedSegment`].
|
/// Establishes DMA stream mapping for a given [`DynUSegment`].
|
||||||
///
|
///
|
||||||
/// The method fails if the segment already belongs to a DMA mapping.
|
/// The method fails if the segment already belongs to a DMA mapping.
|
||||||
pub fn map(
|
pub fn map(
|
||||||
vm_segment: UntypedSegment,
|
segment: DynUSegment,
|
||||||
direction: DmaDirection,
|
direction: DmaDirection,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
) -> Result<Self, DmaError> {
|
) -> Result<Self, DmaError> {
|
||||||
let frame_count = vm_segment.nbytes() / PAGE_SIZE;
|
let frame_count = segment.size() / PAGE_SIZE;
|
||||||
let start_paddr = vm_segment.start_paddr();
|
let start_paddr = segment.start_paddr();
|
||||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||||
return Err(DmaError::AlreadyMapped);
|
return Err(DmaError::AlreadyMapped);
|
||||||
}
|
}
|
||||||
@ -88,7 +88,7 @@ impl DmaStream {
|
|||||||
DmaType::Iommu => {
|
DmaType::Iommu => {
|
||||||
for i in 0..frame_count {
|
for i in 0..frame_count {
|
||||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||||
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
|
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`.
|
||||||
unsafe {
|
unsafe {
|
||||||
iommu::map(paddr as Daddr, paddr).unwrap();
|
iommu::map(paddr as Daddr, paddr).unwrap();
|
||||||
}
|
}
|
||||||
@ -99,7 +99,7 @@ impl DmaStream {
|
|||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
inner: Arc::new(DmaStreamInner {
|
inner: Arc::new(DmaStreamInner {
|
||||||
vm_segment,
|
segment,
|
||||||
start_daddr,
|
start_daddr,
|
||||||
is_cache_coherent,
|
is_cache_coherent,
|
||||||
direction,
|
direction,
|
||||||
@ -107,24 +107,24 @@ impl DmaStream {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the underlying [`UntypedSegment`].
|
/// Gets the underlying [`DynUSegment`].
|
||||||
///
|
///
|
||||||
/// Usually, the CPU side should not access the memory
|
/// Usually, the CPU side should not access the memory
|
||||||
/// after the DMA mapping is established because
|
/// after the DMA mapping is established because
|
||||||
/// there is a chance that the device is updating
|
/// there is a chance that the device is updating
|
||||||
/// the memory. Do this at your own risk.
|
/// the memory. Do this at your own risk.
|
||||||
pub fn vm_segment(&self) -> &UntypedSegment {
|
pub fn segment(&self) -> &DynUSegment {
|
||||||
&self.inner.vm_segment
|
&self.inner.segment
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of frames.
|
/// Returns the number of frames.
|
||||||
pub fn nframes(&self) -> usize {
|
pub fn nframes(&self) -> usize {
|
||||||
self.inner.vm_segment.nbytes() / PAGE_SIZE
|
self.inner.segment.size() / PAGE_SIZE
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of bytes.
|
/// Returns the number of bytes.
|
||||||
pub fn nbytes(&self) -> usize {
|
pub fn nbytes(&self) -> usize {
|
||||||
self.inner.vm_segment.nbytes()
|
self.inner.segment.size()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the DMA direction.
|
/// Returns the DMA direction.
|
||||||
@ -156,7 +156,7 @@ impl DmaStream {
|
|||||||
if self.inner.is_cache_coherent {
|
if self.inner.is_cache_coherent {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let start_va = crate::mm::paddr_to_vaddr(self.inner.vm_segment.paddr()) as *const u8;
|
let start_va = crate::mm::paddr_to_vaddr(self.inner.segment.paddr()) as *const u8;
|
||||||
// TODO: Query the CPU for the cache line size via CPUID, we use 64 bytes as the cache line size here.
|
// TODO: Query the CPU for the cache line size via CPUID, we use 64 bytes as the cache line size here.
|
||||||
for i in _byte_range.step_by(64) {
|
for i in _byte_range.step_by(64) {
|
||||||
// TODO: Call the cache line flush command in the corresponding architecture.
|
// TODO: Call the cache line flush command in the corresponding architecture.
|
||||||
@ -176,8 +176,8 @@ impl HasDaddr for DmaStream {
|
|||||||
|
|
||||||
impl Drop for DmaStreamInner {
|
impl Drop for DmaStreamInner {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let frame_count = self.vm_segment.nbytes() / PAGE_SIZE;
|
let frame_count = self.segment.size() / PAGE_SIZE;
|
||||||
let start_paddr = self.vm_segment.start_paddr();
|
let start_paddr = self.segment.start_paddr();
|
||||||
// Ensure that the addresses used later will not overflow
|
// Ensure that the addresses used later will not overflow
|
||||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||||
match dma_type() {
|
match dma_type() {
|
||||||
@ -211,7 +211,7 @@ impl VmIo for DmaStream {
|
|||||||
if self.inner.direction == DmaDirection::ToDevice {
|
if self.inner.direction == DmaDirection::ToDevice {
|
||||||
return Err(Error::AccessDenied);
|
return Err(Error::AccessDenied);
|
||||||
}
|
}
|
||||||
self.inner.vm_segment.read(offset, writer)
|
self.inner.segment.read(offset, writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Writes data from the buffer.
|
/// Writes data from the buffer.
|
||||||
@ -219,7 +219,7 @@ impl VmIo for DmaStream {
|
|||||||
if self.inner.direction == DmaDirection::FromDevice {
|
if self.inner.direction == DmaDirection::FromDevice {
|
||||||
return Err(Error::AccessDenied);
|
return Err(Error::AccessDenied);
|
||||||
}
|
}
|
||||||
self.inner.vm_segment.write(offset, reader)
|
self.inner.segment.write(offset, reader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,7 +229,7 @@ impl<'a> DmaStream {
|
|||||||
if self.inner.direction == DmaDirection::ToDevice {
|
if self.inner.direction == DmaDirection::ToDevice {
|
||||||
return Err(Error::AccessDenied);
|
return Err(Error::AccessDenied);
|
||||||
}
|
}
|
||||||
Ok(self.inner.vm_segment.reader())
|
Ok(self.inner.segment.reader())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a writer to write data into it.
|
/// Returns a writer to write data into it.
|
||||||
@ -237,13 +237,13 @@ impl<'a> DmaStream {
|
|||||||
if self.inner.direction == DmaDirection::FromDevice {
|
if self.inner.direction == DmaDirection::FromDevice {
|
||||||
return Err(Error::AccessDenied);
|
return Err(Error::AccessDenied);
|
||||||
}
|
}
|
||||||
Ok(self.inner.vm_segment.writer())
|
Ok(self.inner.segment.writer())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasPaddr for DmaStream {
|
impl HasPaddr for DmaStream {
|
||||||
fn paddr(&self) -> Paddr {
|
fn paddr(&self) -> Paddr {
|
||||||
self.inner.vm_segment.start_paddr()
|
self.inner.segment.start_paddr()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,36 +373,35 @@ mod test {
|
|||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn streaming_map() {
|
fn streaming_map() {
|
||||||
let vm_segment = FrameAllocOptions::new(1)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(1, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_stream =
|
let dma_stream =
|
||||||
DmaStream::map(vm_segment.clone(), DmaDirection::Bidirectional, true).unwrap();
|
DmaStream::map(segment.clone().into(), DmaDirection::Bidirectional, true).unwrap();
|
||||||
assert!(dma_stream.paddr() == vm_segment.paddr());
|
assert!(dma_stream.paddr() == segment.start_paddr());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn duplicate_map() {
|
fn duplicate_map() {
|
||||||
let vm_segment_parent = FrameAllocOptions::new(2)
|
let segment_parent = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(2, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let vm_segment_child = vm_segment_parent.slice(&(0..PAGE_SIZE));
|
let segment_child = segment_parent.slice(&(0..PAGE_SIZE));
|
||||||
let dma_stream_parent =
|
let dma_stream_parent =
|
||||||
DmaStream::map(vm_segment_parent, DmaDirection::Bidirectional, false);
|
DmaStream::map(segment_parent.into(), DmaDirection::Bidirectional, false);
|
||||||
let dma_stream_child = DmaStream::map(vm_segment_child, DmaDirection::Bidirectional, false);
|
let dma_stream_child =
|
||||||
|
DmaStream::map(segment_child.into(), DmaDirection::Bidirectional, false);
|
||||||
assert!(dma_stream_parent.is_ok());
|
assert!(dma_stream_parent.is_ok());
|
||||||
assert!(dma_stream_child.is_err());
|
assert!(dma_stream_child.is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn read_and_write() {
|
fn read_and_write() {
|
||||||
let vm_segment = FrameAllocOptions::new(2)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(2, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_stream = DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap();
|
let dma_stream =
|
||||||
|
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap();
|
||||||
|
|
||||||
let buf_write = vec![1u8; 2 * PAGE_SIZE];
|
let buf_write = vec![1u8; 2 * PAGE_SIZE];
|
||||||
dma_stream.write_bytes(0, &buf_write).unwrap();
|
dma_stream.write_bytes(0, &buf_write).unwrap();
|
||||||
@ -414,11 +413,11 @@ mod test {
|
|||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn reader_and_writer() {
|
fn reader_and_writer() {
|
||||||
let vm_segment = FrameAllocOptions::new(2)
|
let segment = FrameAllocOptions::new()
|
||||||
.is_contiguous(true)
|
.alloc_segment_with(2, |_| ())
|
||||||
.alloc_contiguous()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dma_stream = DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap();
|
let dma_stream =
|
||||||
|
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap();
|
||||||
|
|
||||||
let buf_write = vec![1u8; PAGE_SIZE];
|
let buf_write = vec![1u8; PAGE_SIZE];
|
||||||
let mut writer = dma_stream.writer().unwrap();
|
let mut writer = dma_stream.writer().unwrap();
|
||||||
|
@ -13,10 +13,135 @@ use spin::Once;
|
|||||||
use super::{meta::FrameMeta, segment::Segment, Frame};
|
use super::{meta::FrameMeta, segment::Segment, Frame};
|
||||||
use crate::{
|
use crate::{
|
||||||
boot::memory_region::MemoryRegionType,
|
boot::memory_region::MemoryRegionType,
|
||||||
mm::{Paddr, PAGE_SIZE},
|
error::Error,
|
||||||
|
mm::{paddr_to_vaddr, Paddr, PAGE_SIZE},
|
||||||
|
prelude::*,
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Options for allocating physical memory frames.
|
||||||
|
pub struct FrameAllocOptions {
|
||||||
|
zeroed: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for FrameAllocOptions {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FrameAllocOptions {
|
||||||
|
/// Creates new options for allocating the specified number of frames.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self { zeroed: true }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets whether the allocated frames should be initialized with zeros.
|
||||||
|
///
|
||||||
|
/// If `zeroed` is `true`, the allocated frames are filled with zeros.
|
||||||
|
/// If not, the allocated frames will contain sensitive data and the caller
|
||||||
|
/// should clear them before sharing them with other components.
|
||||||
|
///
|
||||||
|
/// By default, the frames are zero-initialized.
|
||||||
|
pub fn zeroed(&mut self, zeroed: bool) -> &mut Self {
|
||||||
|
self.zeroed = zeroed;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocates a single untyped frame without metadata.
|
||||||
|
pub fn alloc_frame(&self) -> Result<Frame<()>> {
|
||||||
|
self.alloc_frame_with(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocates a single frame with additional metadata.
|
||||||
|
pub fn alloc_frame_with<M: FrameMeta>(&self, metadata: M) -> Result<Frame<M>> {
|
||||||
|
let frame = PAGE_ALLOCATOR
|
||||||
|
.get()
|
||||||
|
.unwrap()
|
||||||
|
.disable_irq()
|
||||||
|
.lock()
|
||||||
|
.alloc(1)
|
||||||
|
.map(|idx| {
|
||||||
|
let paddr = idx * PAGE_SIZE;
|
||||||
|
Frame::from_unused(paddr, metadata)
|
||||||
|
})
|
||||||
|
.ok_or(Error::NoMemory)?;
|
||||||
|
|
||||||
|
if self.zeroed {
|
||||||
|
let addr = paddr_to_vaddr(frame.start_paddr()) as *mut u8;
|
||||||
|
// SAFETY: The newly allocated frame is guaranteed to be valid.
|
||||||
|
unsafe { core::ptr::write_bytes(addr, 0, PAGE_SIZE) }
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocates a contiguous range of untyped frames without metadata.
|
||||||
|
pub fn alloc_segment(&self, nframes: usize) -> Result<Segment<()>> {
|
||||||
|
self.alloc_segment_with(nframes, |_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocates a contiguous range of frames with additional metadata.
|
||||||
|
///
|
||||||
|
/// The returned [`Segment`] contains at least one frame. The method returns
|
||||||
|
/// an error if the number of frames is zero.
|
||||||
|
pub fn alloc_segment_with<M: FrameMeta, F>(
|
||||||
|
&self,
|
||||||
|
nframes: usize,
|
||||||
|
metadata_fn: F,
|
||||||
|
) -> Result<Segment<M>>
|
||||||
|
where
|
||||||
|
F: FnMut(Paddr) -> M,
|
||||||
|
{
|
||||||
|
if nframes == 0 {
|
||||||
|
return Err(Error::InvalidArgs);
|
||||||
|
}
|
||||||
|
let segment = PAGE_ALLOCATOR
|
||||||
|
.get()
|
||||||
|
.unwrap()
|
||||||
|
.disable_irq()
|
||||||
|
.lock()
|
||||||
|
.alloc(nframes)
|
||||||
|
.map(|start| {
|
||||||
|
Segment::from_unused(
|
||||||
|
start * PAGE_SIZE..start * PAGE_SIZE + nframes * PAGE_SIZE,
|
||||||
|
metadata_fn,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok_or(Error::NoMemory)?;
|
||||||
|
|
||||||
|
if self.zeroed {
|
||||||
|
let addr = paddr_to_vaddr(segment.start_paddr()) as *mut u8;
|
||||||
|
// SAFETY: The newly allocated segment is guaranteed to be valid.
|
||||||
|
unsafe { core::ptr::write_bytes(addr, 0, nframes * PAGE_SIZE) }
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(segment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(ktest)]
|
||||||
|
#[ktest]
|
||||||
|
fn test_alloc_dealloc() {
|
||||||
|
// Here we allocate and deallocate frames in random orders to test the allocator.
|
||||||
|
// We expect the test to fail if the underlying implementation panics.
|
||||||
|
let single_options = FrameAllocOptions::new();
|
||||||
|
let mut contiguous_options = FrameAllocOptions::new();
|
||||||
|
contiguous_options.zeroed(false);
|
||||||
|
let mut remember_vec = Vec::new();
|
||||||
|
for _ in 0..10 {
|
||||||
|
for i in 0..10 {
|
||||||
|
let single_frame = single_options.alloc_frame().unwrap();
|
||||||
|
if i % 3 == 0 {
|
||||||
|
remember_vec.push(single_frame);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let contiguous_segment = contiguous_options.alloc_segment(10).unwrap();
|
||||||
|
drop(contiguous_segment);
|
||||||
|
remember_vec.pop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// FrameAllocator with a counter for allocated memory
|
/// FrameAllocator with a counter for allocated memory
|
||||||
pub(in crate::mm) struct CountingFrameAllocator {
|
pub(in crate::mm) struct CountingFrameAllocator {
|
||||||
allocator: FrameAllocator,
|
allocator: FrameAllocator,
|
||||||
@ -59,45 +184,6 @@ impl CountingFrameAllocator {
|
|||||||
|
|
||||||
pub(in crate::mm) static PAGE_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>> = Once::new();
|
pub(in crate::mm) static PAGE_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>> = Once::new();
|
||||||
|
|
||||||
/// Allocate a single page.
|
|
||||||
///
|
|
||||||
/// The metadata of the page is initialized with the given metadata.
|
|
||||||
pub(crate) fn alloc_single<M: FrameMeta>(metadata: M) -> Option<Frame<M>> {
|
|
||||||
PAGE_ALLOCATOR
|
|
||||||
.get()
|
|
||||||
.unwrap()
|
|
||||||
.disable_irq()
|
|
||||||
.lock()
|
|
||||||
.alloc(1)
|
|
||||||
.map(|idx| {
|
|
||||||
let paddr = idx * PAGE_SIZE;
|
|
||||||
Frame::from_unused(paddr, metadata)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Allocate a contiguous range of pages of a given length in bytes.
|
|
||||||
///
|
|
||||||
/// The caller must provide a closure to initialize metadata for all the pages.
|
|
||||||
/// The closure receives the physical address of the page and returns the
|
|
||||||
/// metadata, which is similar to [`core::array::from_fn`].
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// The function panics if the length is not base-page-aligned.
|
|
||||||
pub(crate) fn alloc_contiguous<M: FrameMeta, F>(len: usize, metadata_fn: F) -> Option<Segment<M>>
|
|
||||||
where
|
|
||||||
F: FnMut(Paddr) -> M,
|
|
||||||
{
|
|
||||||
assert!(len % PAGE_SIZE == 0);
|
|
||||||
PAGE_ALLOCATOR
|
|
||||||
.get()
|
|
||||||
.unwrap()
|
|
||||||
.disable_irq()
|
|
||||||
.lock()
|
|
||||||
.alloc(len / PAGE_SIZE)
|
|
||||||
.map(|start| Segment::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len, metadata_fn))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn init() {
|
pub(crate) fn init() {
|
||||||
let regions = crate::boot::memory_regions();
|
let regions = crate::boot::memory_regions();
|
||||||
let mut total: usize = 0;
|
let mut total: usize = 0;
|
||||||
|
@ -60,7 +60,7 @@ use crate::{
|
|||||||
|
|
||||||
/// The maximum number of bytes of the metadata of a page.
|
/// The maximum number of bytes of the metadata of a page.
|
||||||
pub const PAGE_METADATA_MAX_SIZE: usize =
|
pub const PAGE_METADATA_MAX_SIZE: usize =
|
||||||
META_SLOT_SIZE - size_of::<AtomicU32>() - size_of::<FrameMetaVtablePtr>();
|
META_SLOT_SIZE - size_of::<bool>() - size_of::<AtomicU32>() - size_of::<FrameMetaVtablePtr>();
|
||||||
/// The maximum alignment in bytes of the metadata of a page.
|
/// The maximum alignment in bytes of the metadata of a page.
|
||||||
pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::<MetaSlot>();
|
pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::<MetaSlot>();
|
||||||
|
|
||||||
@ -77,19 +77,24 @@ pub(in crate::mm) struct MetaSlot {
|
|||||||
/// at most `PAGE_METADATA_ALIGN` bytes of alignment;
|
/// at most `PAGE_METADATA_ALIGN` bytes of alignment;
|
||||||
/// - the subsequent fields can utilize the padding of the
|
/// - the subsequent fields can utilize the padding of the
|
||||||
/// reference count to save space.
|
/// reference count to save space.
|
||||||
storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>,
|
///
|
||||||
|
/// Don't access this field by a reference to the slot.
|
||||||
|
_storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>,
|
||||||
/// The reference count of the page.
|
/// The reference count of the page.
|
||||||
///
|
///
|
||||||
/// Specifically, the reference count has the following meaning:
|
/// Specifically, the reference count has the following meaning:
|
||||||
/// * `REF_COUNT_UNUSED`: The page is not in use.
|
/// - `REF_COUNT_UNUSED`: The page is not in use.
|
||||||
/// * `0`: The page is being constructed ([`Page::from_unused`])
|
/// - `0`: The page is being constructed ([`Frame::from_unused`])
|
||||||
/// or destructured ([`drop_last_in_place`]).
|
/// or destructured ([`drop_last_in_place`]).
|
||||||
/// * `1..REF_COUNT_MAX`: The page is in use.
|
/// - `1..REF_COUNT_MAX`: The page is in use.
|
||||||
/// * `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to
|
/// - `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to
|
||||||
/// prevent the reference count from overflowing. Otherwise,
|
/// prevent the reference count from overflowing. Otherwise,
|
||||||
/// overflowing the reference count will cause soundness issue.
|
/// overflowing the reference count will cause soundness issue.
|
||||||
///
|
///
|
||||||
/// [`Frame::from_unused`]: super::Frame::from_unused
|
/// [`Frame::from_unused`]: super::Frame::from_unused
|
||||||
|
//
|
||||||
|
// Other than this field the fields should be `MaybeUninit`.
|
||||||
|
// See initialization in `alloc_meta_pages`.
|
||||||
pub(super) ref_count: AtomicU32,
|
pub(super) ref_count: AtomicU32,
|
||||||
/// The virtual table that indicates the type of the metadata.
|
/// The virtual table that indicates the type of the metadata.
|
||||||
pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>,
|
pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>,
|
||||||
@ -123,6 +128,16 @@ pub unsafe trait FrameMeta: Any + Send + Sync + Debug + 'static {
|
|||||||
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
|
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
|
||||||
let _ = reader;
|
let _ = reader;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Whether the metadata's associated frame is untyped.
|
||||||
|
///
|
||||||
|
/// If a type implements [`UFrameMeta`], this should be `true`.
|
||||||
|
/// Otherwise, it should be `false`.
|
||||||
|
///
|
||||||
|
/// [`UFrameMeta`]: super::untyped::UFrameMeta
|
||||||
|
fn is_untyped(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Makes a structure usable as a page metadata.
|
/// Makes a structure usable as a page metadata.
|
||||||
@ -202,7 +217,7 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
|
|||||||
core::ptr::drop_in_place(meta_ptr);
|
core::ptr::drop_in_place(meta_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// `Release` pairs with the `Acquire` in `Page::from_unused` and ensures `drop_in_place` won't
|
// `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures `drop_in_place` won't
|
||||||
// be reordered after this memory store.
|
// be reordered after this memory store.
|
||||||
slot.ref_count.store(REF_COUNT_UNUSED, Ordering::Release);
|
slot.ref_count.store(REF_COUNT_UNUSED, Ordering::Release);
|
||||||
|
|
||||||
@ -280,20 +295,15 @@ fn alloc_meta_pages(num_pages: usize) -> (usize, Paddr) {
|
|||||||
* PAGE_SIZE;
|
* PAGE_SIZE;
|
||||||
|
|
||||||
let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
|
let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
|
||||||
for i in 0..num_pages {
|
|
||||||
// SAFETY: The memory is successfully allocated with `num_pages` slots so the index must be
|
|
||||||
// within the range.
|
|
||||||
let slot = unsafe { slots.add(i) };
|
|
||||||
|
|
||||||
// SAFETY: The memory is just allocated so we have exclusive access and it's valid for
|
// Fill the metadata pages with a byte pattern of `REF_COUNT_UNUSED`.
|
||||||
// writing.
|
debug_assert_eq!(REF_COUNT_UNUSED.to_ne_bytes(), [0xff, 0xff, 0xff, 0xff]);
|
||||||
unsafe {
|
// SAFETY: `slots` and the length is a valid region for the metadata pages
|
||||||
slot.write(MetaSlot {
|
// that are going to be treated as metadata slots. The byte pattern is
|
||||||
storage: UnsafeCell::new([0; PAGE_METADATA_MAX_SIZE]),
|
// valid as the initial value of the reference count (other fields are
|
||||||
ref_count: AtomicU32::new(REF_COUNT_UNUSED),
|
// either not accessed or `MaybeUninit`).
|
||||||
vtable_ptr: UnsafeCell::new(MaybeUninit::uninit()),
|
unsafe {
|
||||||
});
|
core::ptr::write_bytes(slots as *mut u8, 0xff, num_pages * size_of::<MetaSlot>());
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
(num_meta_pages, start_paddr)
|
(num_meta_pages, start_paddr)
|
||||||
|
@ -16,12 +16,11 @@
|
|||||||
|
|
||||||
pub mod allocator;
|
pub mod allocator;
|
||||||
pub mod meta;
|
pub mod meta;
|
||||||
mod segment;
|
pub mod segment;
|
||||||
pub mod untyped;
|
pub mod untyped;
|
||||||
|
|
||||||
use core::{
|
use core::{
|
||||||
marker::PhantomData,
|
marker::PhantomData,
|
||||||
mem::ManuallyDrop,
|
|
||||||
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
|
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -29,14 +28,14 @@ use meta::{
|
|||||||
mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED,
|
mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED,
|
||||||
};
|
};
|
||||||
pub use segment::Segment;
|
pub use segment::Segment;
|
||||||
use untyped::UntypedMeta;
|
use untyped::{DynUFrame, UFrameMeta};
|
||||||
|
|
||||||
use super::{PagingLevel, UntypedFrame, PAGE_SIZE};
|
use super::{PagingLevel, PAGE_SIZE};
|
||||||
use crate::mm::{Paddr, PagingConsts, Vaddr};
|
use crate::mm::{Paddr, PagingConsts, Vaddr};
|
||||||
|
|
||||||
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
|
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
/// A page with a statically-known usage, whose metadata is represented by `M`.
|
/// A physical memory frame with a statically-known usage, whose metadata is represented by `M`.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
pub struct Frame<M: FrameMeta + ?Sized> {
|
pub struct Frame<M: FrameMeta + ?Sized> {
|
||||||
@ -44,6 +43,13 @@ pub struct Frame<M: FrameMeta + ?Sized> {
|
|||||||
pub(super) _marker: PhantomData<M>,
|
pub(super) _marker: PhantomData<M>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A physical memory frame with a dynamically-known usage.
|
||||||
|
///
|
||||||
|
/// The usage of this frame will not be changed while this object is alive. But the
|
||||||
|
/// usage is not known at compile time. An [`DynFrame`] as a parameter accepts any
|
||||||
|
/// type of frames.
|
||||||
|
pub type DynFrame = Frame<dyn FrameMeta>;
|
||||||
|
|
||||||
unsafe impl<M: FrameMeta + ?Sized> Send for Frame<M> {}
|
unsafe impl<M: FrameMeta + ?Sized> Send for Frame<M> {}
|
||||||
|
|
||||||
unsafe impl<M: FrameMeta + ?Sized> Sync for Frame<M> {}
|
unsafe impl<M: FrameMeta + ?Sized> Sync for Frame<M> {}
|
||||||
@ -79,7 +85,8 @@ impl<M: FrameMeta> Frame<M> {
|
|||||||
.compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed)
|
.compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed)
|
||||||
.expect("Frame already in use when trying to get a new handle");
|
.expect("Frame already in use when trying to get a new handle");
|
||||||
|
|
||||||
// SAFETY: We have exclusive access to the page metadata.
|
// SAFETY: We have exclusive access to the page metadata. These fields are mutably
|
||||||
|
// borrowed only once.
|
||||||
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
|
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
|
||||||
vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta));
|
vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta));
|
||||||
|
|
||||||
@ -114,7 +121,7 @@ impl<M: FrameMeta> Frame<M> {
|
|||||||
|
|
||||||
impl<M: FrameMeta + ?Sized> Frame<M> {
|
impl<M: FrameMeta + ?Sized> Frame<M> {
|
||||||
/// Get the physical address.
|
/// Get the physical address.
|
||||||
pub fn paddr(&self) -> Paddr {
|
pub fn start_paddr(&self) -> Paddr {
|
||||||
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
|
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,7 +190,7 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
|
|||||||
/// data structures need to hold the page handle such as the page table.
|
/// data structures need to hold the page handle such as the page table.
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub(in crate::mm) fn into_raw(self) -> Paddr {
|
pub(in crate::mm) fn into_raw(self) -> Paddr {
|
||||||
let paddr = self.paddr();
|
let paddr = self.start_paddr();
|
||||||
core::mem::forget(self);
|
core::mem::forget(self);
|
||||||
paddr
|
paddr
|
||||||
}
|
}
|
||||||
@ -256,12 +263,8 @@ impl<M: FrameMeta> TryFrom<Frame<dyn FrameMeta>> for Frame<M> {
|
|||||||
/// return the dynamic page itself as is.
|
/// return the dynamic page itself as is.
|
||||||
fn try_from(dyn_frame: Frame<dyn FrameMeta>) -> Result<Self, Self::Error> {
|
fn try_from(dyn_frame: Frame<dyn FrameMeta>) -> Result<Self, Self::Error> {
|
||||||
if (dyn_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
|
if (dyn_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
|
||||||
let result = Frame {
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
ptr: dyn_frame.ptr,
|
Ok(unsafe { core::mem::transmute::<Frame<dyn FrameMeta>, Frame<M>>(dyn_frame) })
|
||||||
_marker: PhantomData,
|
|
||||||
};
|
|
||||||
let _ = ManuallyDrop::new(dyn_frame);
|
|
||||||
Ok(result)
|
|
||||||
} else {
|
} else {
|
||||||
Err(dyn_frame)
|
Err(dyn_frame)
|
||||||
}
|
}
|
||||||
@ -270,18 +273,46 @@ impl<M: FrameMeta> TryFrom<Frame<dyn FrameMeta>> for Frame<M> {
|
|||||||
|
|
||||||
impl<M: FrameMeta> From<Frame<M>> for Frame<dyn FrameMeta> {
|
impl<M: FrameMeta> From<Frame<M>> for Frame<dyn FrameMeta> {
|
||||||
fn from(frame: Frame<M>) -> Self {
|
fn from(frame: Frame<M>) -> Self {
|
||||||
let result = Self {
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
ptr: frame.ptr,
|
unsafe { core::mem::transmute(frame) }
|
||||||
_marker: PhantomData,
|
|
||||||
};
|
|
||||||
let _ = ManuallyDrop::new(frame);
|
|
||||||
result
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<UntypedFrame> for Frame<dyn FrameMeta> {
|
impl<M: UFrameMeta> From<Frame<M>> for DynUFrame {
|
||||||
fn from(frame: UntypedFrame) -> Self {
|
fn from(frame: Frame<M>) -> Self {
|
||||||
Frame::<UntypedMeta>::from(frame).into()
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
unsafe { core::mem::transmute(frame) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: UFrameMeta> From<&Frame<M>> for &DynUFrame {
|
||||||
|
fn from(frame: &Frame<M>) -> Self {
|
||||||
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
unsafe { core::mem::transmute(frame) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<DynUFrame> for Frame<dyn FrameMeta> {
|
||||||
|
fn from(frame: DynUFrame) -> Self {
|
||||||
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
unsafe { core::mem::transmute(frame) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<Frame<dyn FrameMeta>> for DynUFrame {
|
||||||
|
type Error = Frame<dyn FrameMeta>;
|
||||||
|
|
||||||
|
/// Try converting a [`Frame<dyn FrameMeta>`] into [`DynUFrame`].
|
||||||
|
///
|
||||||
|
/// If the usage of the page is not the same as the expected usage, it will
|
||||||
|
/// return the dynamic page itself as is.
|
||||||
|
fn try_from(dyn_frame: Frame<dyn FrameMeta>) -> Result<Self, Self::Error> {
|
||||||
|
if dyn_frame.dyn_meta().is_untyped() {
|
||||||
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
Ok(unsafe { core::mem::transmute::<Frame<dyn FrameMeta>, DynUFrame>(dyn_frame) })
|
||||||
|
} else {
|
||||||
|
Err(dyn_frame)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,11 +2,10 @@
|
|||||||
|
|
||||||
//! A contiguous range of pages.
|
//! A contiguous range of pages.
|
||||||
|
|
||||||
use alloc::vec::Vec;
|
|
||||||
use core::{mem::ManuallyDrop, ops::Range};
|
use core::{mem::ManuallyDrop, ops::Range};
|
||||||
|
|
||||||
use super::{inc_page_ref_count, meta::FrameMeta, Frame};
|
use super::{inc_page_ref_count, meta::FrameMeta, Frame};
|
||||||
use crate::mm::{Paddr, PAGE_SIZE};
|
use crate::mm::{Paddr, UFrameMeta, PAGE_SIZE};
|
||||||
|
|
||||||
/// A contiguous range of homogeneous physical memory pages.
|
/// A contiguous range of homogeneous physical memory pages.
|
||||||
///
|
///
|
||||||
@ -21,11 +20,30 @@ use crate::mm::{Paddr, PAGE_SIZE};
|
|||||||
/// All the metadata of the pages are homogeneous, i.e., they are of the same
|
/// All the metadata of the pages are homogeneous, i.e., they are of the same
|
||||||
/// type.
|
/// type.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
#[repr(transparent)]
|
||||||
pub struct Segment<M: FrameMeta + ?Sized> {
|
pub struct Segment<M: FrameMeta + ?Sized> {
|
||||||
range: Range<Paddr>,
|
range: Range<Paddr>,
|
||||||
_marker: core::marker::PhantomData<M>,
|
_marker: core::marker::PhantomData<M>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A contiguous range of homogeneous physical memory frames that have any metadata.
|
||||||
|
///
|
||||||
|
/// In other words, the metadata of the frames are of the same type but the type
|
||||||
|
/// is not known at compile time. An [`DynSegment`] as a parameter accepts any
|
||||||
|
/// type of segments.
|
||||||
|
///
|
||||||
|
/// The usage of this frame will not be changed while this object is alive.
|
||||||
|
pub type DynSegment = Segment<dyn FrameMeta>;
|
||||||
|
|
||||||
|
/// A contiguous range of homogeneous untyped physical memory pages that have any metadata.
|
||||||
|
///
|
||||||
|
/// In other words, the metadata of the frames are of the same type, and they
|
||||||
|
/// are untyped, but the type of metadata is not known at compile time. An
|
||||||
|
/// [`DynUSegment`] as a parameter accepts any untyped segments.
|
||||||
|
///
|
||||||
|
/// The usage of this frame will not be changed while this object is alive.
|
||||||
|
pub type DynUSegment = Segment<dyn UFrameMeta>;
|
||||||
|
|
||||||
impl<M: FrameMeta + ?Sized> Drop for Segment<M> {
|
impl<M: FrameMeta + ?Sized> Drop for Segment<M> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
for paddr in self.range.clone().step_by(PAGE_SIZE) {
|
for paddr in self.range.clone().step_by(PAGE_SIZE) {
|
||||||
@ -89,7 +107,7 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the length in bytes of the contiguous pages.
|
/// Gets the length in bytes of the contiguous pages.
|
||||||
pub fn nbytes(&self) -> usize {
|
pub fn size(&self) -> usize {
|
||||||
self.range.end - self.range.start
|
self.range.end - self.range.start
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,7 +122,7 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
|
|||||||
/// not base-page-aligned.
|
/// not base-page-aligned.
|
||||||
pub fn split(self, offset: usize) -> (Self, Self) {
|
pub fn split(self, offset: usize) -> (Self, Self) {
|
||||||
assert!(offset % PAGE_SIZE == 0);
|
assert!(offset % PAGE_SIZE == 0);
|
||||||
assert!(0 < offset && offset < self.nbytes());
|
assert!(0 < offset && offset < self.size());
|
||||||
|
|
||||||
let old = ManuallyDrop::new(self);
|
let old = ManuallyDrop::new(self);
|
||||||
let at = old.range.start + offset;
|
let at = old.range.start + offset;
|
||||||
@ -152,7 +170,7 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
|
|||||||
|
|
||||||
impl<M: FrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
|
impl<M: FrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
|
||||||
fn from(page: Frame<M>) -> Self {
|
fn from(page: Frame<M>) -> Self {
|
||||||
let pa = page.paddr();
|
let pa = page.start_paddr();
|
||||||
let _ = ManuallyDrop::new(page);
|
let _ = ManuallyDrop::new(page);
|
||||||
Self {
|
Self {
|
||||||
range: pa..pa + PAGE_SIZE,
|
range: pa..pa + PAGE_SIZE,
|
||||||
@ -161,22 +179,6 @@ impl<M: FrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M: FrameMeta + ?Sized> From<Segment<M>> for Vec<Frame<M>> {
|
|
||||||
fn from(pages: Segment<M>) -> Self {
|
|
||||||
let vector = pages
|
|
||||||
.range
|
|
||||||
.clone()
|
|
||||||
.step_by(PAGE_SIZE)
|
|
||||||
.map(|i|
|
|
||||||
// SAFETY: for each page there would be a forgotten handle
|
|
||||||
// when creating the `Segment` object.
|
|
||||||
unsafe { Frame::<M>::from_raw(i) })
|
|
||||||
.collect();
|
|
||||||
let _ = ManuallyDrop::new(pages);
|
|
||||||
vector
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M: FrameMeta + ?Sized> Iterator for Segment<M> {
|
impl<M: FrameMeta + ?Sized> Iterator for Segment<M> {
|
||||||
type Item = Frame<M>;
|
type Item = Frame<M>;
|
||||||
|
|
||||||
@ -194,3 +196,83 @@ impl<M: FrameMeta + ?Sized> Iterator for Segment<M> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<M: FrameMeta> From<Segment<M>> for DynSegment {
|
||||||
|
fn from(seg: Segment<M>) -> Self {
|
||||||
|
let seg = ManuallyDrop::new(seg);
|
||||||
|
Self {
|
||||||
|
range: seg.range.clone(),
|
||||||
|
_marker: core::marker::PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: FrameMeta> TryFrom<DynSegment> for Segment<M> {
|
||||||
|
type Error = DynSegment;
|
||||||
|
|
||||||
|
fn try_from(seg: DynSegment) -> core::result::Result<Self, Self::Error> {
|
||||||
|
// SAFETY: for each page there would be a forgotten handle
|
||||||
|
// when creating the `Segment` object.
|
||||||
|
let first_frame = unsafe { Frame::<dyn FrameMeta>::from_raw(seg.range.start) };
|
||||||
|
let first_frame = ManuallyDrop::new(first_frame);
|
||||||
|
if !(first_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
|
||||||
|
return Err(seg);
|
||||||
|
}
|
||||||
|
// Since segments are homogeneous, we can safely assume that the rest
|
||||||
|
// of the frames are of the same type. We just debug-check here.
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
for paddr in seg.range.clone().step_by(PAGE_SIZE) {
|
||||||
|
let frame = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) };
|
||||||
|
let frame = ManuallyDrop::new(frame);
|
||||||
|
debug_assert!((frame.dyn_meta() as &dyn core::any::Any).is::<M>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
Ok(unsafe { core::mem::transmute::<DynSegment, Segment<M>>(seg) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: UFrameMeta> From<Segment<M>> for DynUSegment {
|
||||||
|
fn from(seg: Segment<M>) -> Self {
|
||||||
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
unsafe { core::mem::transmute(seg) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: UFrameMeta> From<&Segment<M>> for &DynUSegment {
|
||||||
|
fn from(seg: &Segment<M>) -> Self {
|
||||||
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
unsafe { core::mem::transmute(seg) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<DynSegment> for DynUSegment {
|
||||||
|
type Error = DynSegment;
|
||||||
|
|
||||||
|
/// Try converting a [`DynSegment`] into [`DynUSegment`].
|
||||||
|
///
|
||||||
|
/// If the usage of the page is not the same as the expected usage, it will
|
||||||
|
/// return the dynamic page itself as is.
|
||||||
|
fn try_from(seg: DynSegment) -> core::result::Result<Self, Self::Error> {
|
||||||
|
// SAFETY: for each page there would be a forgotten handle
|
||||||
|
// when creating the `Segment` object.
|
||||||
|
let first_frame = unsafe { Frame::<dyn FrameMeta>::from_raw(seg.range.start) };
|
||||||
|
let first_frame = ManuallyDrop::new(first_frame);
|
||||||
|
if !first_frame.dyn_meta().is_untyped() {
|
||||||
|
return Err(seg);
|
||||||
|
}
|
||||||
|
// Since segments are homogeneous, we can safely assume that the rest
|
||||||
|
// of the frames are of the same type. We just debug-check here.
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
for paddr in seg.range.clone().step_by(PAGE_SIZE) {
|
||||||
|
let frame = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) };
|
||||||
|
let frame = ManuallyDrop::new(frame);
|
||||||
|
debug_assert!(frame.dyn_meta().is_untyped());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// SAFETY: The metadata is coerceable and the struct is transmutable.
|
||||||
|
Ok(unsafe { core::mem::transmute::<DynSegment, DynUSegment>(seg) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
197
ostd/src/mm/frame/untyped.rs
Normal file
197
ostd/src/mm/frame/untyped.rs
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
//! Untyped physical memory management.
|
||||||
|
//!
|
||||||
|
//! A frame is a special page that is _untyped_ memory.
|
||||||
|
//! It is used to store data irrelevant to the integrity of the kernel.
|
||||||
|
//! All pages mapped to the virtual address space of the users are backed by
|
||||||
|
//! frames. Frames, with all the properties of pages, can additionally be safely
|
||||||
|
//! read and written by the kernel or the user.
|
||||||
|
|
||||||
|
use super::{meta::FrameMeta, Frame, Segment};
|
||||||
|
use crate::{
|
||||||
|
mm::{
|
||||||
|
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
|
||||||
|
paddr_to_vaddr, Infallible,
|
||||||
|
},
|
||||||
|
Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The metadata of untyped frame.
|
||||||
|
///
|
||||||
|
/// If a structure `M` implements [`UFrameMeta`], it can be used as the
|
||||||
|
/// metadata of a type of untyped frames [`Frame<M>`]. All frames of such type
|
||||||
|
/// will be accessible as untyped memory.
|
||||||
|
pub trait UFrameMeta: FrameMeta {}
|
||||||
|
|
||||||
|
/// An untyped frame with any metadata.
|
||||||
|
///
|
||||||
|
/// The usage of this frame will not be changed while this object is alive.
|
||||||
|
/// The metadata of the frame is not known at compile time but the frame must
|
||||||
|
/// be an untyped one. An [`DynUFrame`] as a parameter accepts any type of
|
||||||
|
/// untyped frame metadata.
|
||||||
|
pub type DynUFrame = Frame<dyn UFrameMeta>;
|
||||||
|
|
||||||
|
/// Makes a structure usable as untyped frame metadata.
|
||||||
|
///
|
||||||
|
/// Directly implementing [`FrameMeta`] is not safe since the size and
|
||||||
|
/// alignment must be checked. This macro provides a safe way to implement both
|
||||||
|
/// [`FrameMeta`] and [`UFrameMeta`] with compile-time checks.
|
||||||
|
///
|
||||||
|
/// If this macro is used for built-in typed frame metadata, it won't compile.
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! impl_untyped_frame_meta_for {
|
||||||
|
// Implement without specifying the drop behavior.
|
||||||
|
($t:ty) => {
|
||||||
|
use static_assertions::const_assert;
|
||||||
|
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE);
|
||||||
|
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN);
|
||||||
|
// SAFETY: The size and alignment of the structure are checked.
|
||||||
|
unsafe impl $crate::mm::frame::meta::FrameMeta for $t {
|
||||||
|
fn is_untyped(&self) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl $crate::mm::frame::untyped::UFrameMeta for $t {}
|
||||||
|
};
|
||||||
|
// Implement with a customized drop function.
|
||||||
|
($t:ty, $body:expr) => {
|
||||||
|
use static_assertions::const_assert;
|
||||||
|
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE);
|
||||||
|
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN);
|
||||||
|
// SAFETY: The size and alignment of the structure are checked.
|
||||||
|
// Outside OSTD the user cannot implement a `on_drop` method for typed
|
||||||
|
// frames. And untyped frames can be safely read.
|
||||||
|
unsafe impl $crate::mm::frame::meta::FrameMeta for $t {
|
||||||
|
fn on_drop(&mut self, reader: &mut $crate::mm::VmReader<$crate::mm::Infallible>) {
|
||||||
|
$body
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_untyped(&self) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl $crate::mm::frame::untyped::UFrameMeta for $t {}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// A special case of untyped metadata is the unit type.
|
||||||
|
impl_untyped_frame_meta_for!(());
|
||||||
|
|
||||||
|
/// A physical memory range that is untyped.
|
||||||
|
///
|
||||||
|
/// Untyped frames or segments can be safely read and written by the kernel or
|
||||||
|
/// the user.
|
||||||
|
pub trait UntypedMem {
|
||||||
|
/// Borrows a reader that can read the untyped memory.
|
||||||
|
fn reader(&self) -> VmReader<'_, Infallible>;
|
||||||
|
/// Borrows a writer that can write the untyped memory.
|
||||||
|
fn writer(&self) -> VmWriter<'_, Infallible>;
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! impl_untyped_for {
|
||||||
|
($t:ident) => {
|
||||||
|
impl<UM: UFrameMeta + ?Sized> UntypedMem for $t<UM> {
|
||||||
|
fn reader(&self) -> VmReader<'_, Infallible> {
|
||||||
|
let ptr = paddr_to_vaddr(self.start_paddr()) as *const u8;
|
||||||
|
// SAFETY: Only untyped frames are allowed to be read.
|
||||||
|
unsafe { VmReader::from_kernel_space(ptr, self.size()) }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn writer(&self) -> VmWriter<'_, Infallible> {
|
||||||
|
let ptr = paddr_to_vaddr(self.start_paddr()) as *mut u8;
|
||||||
|
// SAFETY: Only untyped frames are allowed to be written.
|
||||||
|
unsafe { VmWriter::from_kernel_space(ptr, self.size()) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<UM: UFrameMeta + ?Sized> VmIo for $t<UM> {
|
||||||
|
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
||||||
|
let read_len = writer.avail().min(self.size().saturating_sub(offset));
|
||||||
|
// Do bound check with potential integer overflow in mind
|
||||||
|
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
|
||||||
|
if max_offset > self.size() {
|
||||||
|
return Err(Error::InvalidArgs);
|
||||||
|
}
|
||||||
|
let len = self
|
||||||
|
.reader()
|
||||||
|
.skip(offset)
|
||||||
|
.read_fallible(writer)
|
||||||
|
.map_err(|(e, _)| e)?;
|
||||||
|
debug_assert!(len == read_len);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
||||||
|
let write_len = reader.remain().min(self.size().saturating_sub(offset));
|
||||||
|
// Do bound check with potential integer overflow in mind
|
||||||
|
let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?;
|
||||||
|
if max_offset > self.size() {
|
||||||
|
return Err(Error::InvalidArgs);
|
||||||
|
}
|
||||||
|
let len = self
|
||||||
|
.writer()
|
||||||
|
.skip(offset)
|
||||||
|
.write_fallible(reader)
|
||||||
|
.map_err(|(e, _)| e)?;
|
||||||
|
debug_assert!(len == write_len);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_untyped_for!(Frame);
|
||||||
|
impl_untyped_for!(Segment);
|
||||||
|
|
||||||
|
// Here are implementations for `xarray`.
|
||||||
|
|
||||||
|
use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref};
|
||||||
|
|
||||||
|
/// `FrameRef` is a struct that can work as `&'a Frame<m>`.
|
||||||
|
///
|
||||||
|
/// This is solely useful for [`crate::collections::xarray`].
|
||||||
|
pub struct FrameRef<'a, M: UFrameMeta + ?Sized> {
|
||||||
|
inner: ManuallyDrop<Frame<M>>,
|
||||||
|
_marker: PhantomData<&'a Frame<M>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: UFrameMeta + ?Sized> Deref for FrameRef<'_, M> {
|
||||||
|
type Target = Frame<M>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
|
||||||
|
// The pointer is also aligned to 4.
|
||||||
|
unsafe impl<M: UFrameMeta + ?Sized> xarray::ItemEntry for Frame<M> {
|
||||||
|
type Ref<'a>
|
||||||
|
= FrameRef<'a, M>
|
||||||
|
where
|
||||||
|
Self: 'a;
|
||||||
|
|
||||||
|
fn into_raw(self) -> *const () {
|
||||||
|
let ptr = self.ptr;
|
||||||
|
let _ = ManuallyDrop::new(self);
|
||||||
|
ptr as *const ()
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn from_raw(raw: *const ()) -> Self {
|
||||||
|
Self {
|
||||||
|
ptr: raw as *const _,
|
||||||
|
_marker: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
|
||||||
|
Self::Ref {
|
||||||
|
inner: ManuallyDrop::new(Frame {
|
||||||
|
ptr: raw as *const _,
|
||||||
|
_marker: PhantomData,
|
||||||
|
}),
|
||||||
|
_marker: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,236 +0,0 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
|
||||||
|
|
||||||
//! Untyped physical memory management.
|
|
||||||
//!
|
|
||||||
//! A frame is a special page that is _untyped_ memory.
|
|
||||||
//! It is used to store data irrelevant to the integrity of the kernel.
|
|
||||||
//! All pages mapped to the virtual address space of the users are backed by
|
|
||||||
//! frames. Frames, with all the properties of pages, can additionally be safely
|
|
||||||
//! read and written by the kernel or the user.
|
|
||||||
|
|
||||||
pub mod options;
|
|
||||||
mod segment;
|
|
||||||
|
|
||||||
use core::mem::ManuallyDrop;
|
|
||||||
|
|
||||||
pub use segment::UntypedSegment;
|
|
||||||
|
|
||||||
use super::{
|
|
||||||
meta::{impl_frame_meta_for, FrameMeta, MetaSlot},
|
|
||||||
Frame,
|
|
||||||
};
|
|
||||||
use crate::{
|
|
||||||
mm::{
|
|
||||||
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
|
|
||||||
paddr_to_vaddr, HasPaddr, Infallible, Paddr, PAGE_SIZE,
|
|
||||||
},
|
|
||||||
Error, Result,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// A handle to a physical memory page of untyped memory.
|
|
||||||
///
|
|
||||||
/// An instance of `UntypedFrame` is a handle to a page frame (a physical memory
|
|
||||||
/// page). A cloned `UntypedFrame` refers to the same page frame as the original.
|
|
||||||
/// As the original and cloned instances point to the same physical address,
|
|
||||||
/// they are treated as equal to each other. Behind the scene, a reference
|
|
||||||
/// counter is maintained for each page frame so that when all instances of
|
|
||||||
/// `UntypedFrame` that refer to the same page frame are dropped, the page frame
|
|
||||||
/// will be globally freed.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct UntypedFrame {
|
|
||||||
page: Frame<UntypedMeta>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UntypedFrame {
|
|
||||||
/// Returns the physical address of the page frame.
|
|
||||||
pub fn start_paddr(&self) -> Paddr {
|
|
||||||
self.page.paddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the end physical address of the page frame.
|
|
||||||
pub fn end_paddr(&self) -> Paddr {
|
|
||||||
self.start_paddr() + PAGE_SIZE
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the size of the frame
|
|
||||||
pub const fn size(&self) -> usize {
|
|
||||||
self.page.size()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a raw pointer to the starting virtual address of the frame.
|
|
||||||
pub fn as_ptr(&self) -> *const u8 {
|
|
||||||
paddr_to_vaddr(self.start_paddr()) as *const u8
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a mutable raw pointer to the starting virtual address of the frame.
|
|
||||||
pub fn as_mut_ptr(&self) -> *mut u8 {
|
|
||||||
paddr_to_vaddr(self.start_paddr()) as *mut u8
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Copies the content of `src` to the frame.
|
|
||||||
pub fn copy_from(&self, src: &UntypedFrame) {
|
|
||||||
if self.paddr() == src.paddr() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// SAFETY: the source and the destination does not overlap.
|
|
||||||
unsafe {
|
|
||||||
core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the reference count of the frame.
|
|
||||||
///
|
|
||||||
/// It returns the number of all references to the page, including all the
|
|
||||||
/// existing page handles ([`UntypedFrame`]) and all the mappings in the page
|
|
||||||
/// table that points to the page.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// The function is safe to call, but using it requires extra care. The
|
|
||||||
/// reference count can be changed by other threads at any time including
|
|
||||||
/// potentially between calling this method and acting on the result.
|
|
||||||
pub fn reference_count(&self) -> u32 {
|
|
||||||
self.page.reference_count()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Frame<UntypedMeta>> for UntypedFrame {
|
|
||||||
fn from(page: Frame<UntypedMeta>) -> Self {
|
|
||||||
Self { page }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<Frame<dyn FrameMeta>> for UntypedFrame {
|
|
||||||
type Error = Frame<dyn FrameMeta>;
|
|
||||||
|
|
||||||
/// Try converting a [`Frame<dyn FrameMeta>`] into the statically-typed [`UntypedFrame`].
|
|
||||||
///
|
|
||||||
/// If the dynamic page is not used as an untyped page frame, it will
|
|
||||||
/// return the dynamic page itself as is.
|
|
||||||
fn try_from(page: Frame<dyn FrameMeta>) -> core::result::Result<Self, Self::Error> {
|
|
||||||
page.try_into().map(|p: Frame<UntypedMeta>| p.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<UntypedFrame> for Frame<UntypedMeta> {
|
|
||||||
fn from(frame: UntypedFrame) -> Self {
|
|
||||||
frame.page
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HasPaddr for UntypedFrame {
|
|
||||||
fn paddr(&self) -> Paddr {
|
|
||||||
self.start_paddr()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> UntypedFrame {
|
|
||||||
/// Returns a reader to read data from it.
|
|
||||||
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
|
|
||||||
// SAFETY:
|
|
||||||
// - The memory range points to untyped memory.
|
|
||||||
// - The frame is alive during the lifetime `'a`.
|
|
||||||
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
|
|
||||||
unsafe { VmReader::from_kernel_space(self.as_ptr(), self.size()) }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a writer to write data into it.
|
|
||||||
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
|
|
||||||
// SAFETY:
|
|
||||||
// - The memory range points to untyped memory.
|
|
||||||
// - The frame is alive during the lifetime `'a`.
|
|
||||||
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
|
|
||||||
unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.size()) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl VmIo for UntypedFrame {
|
|
||||||
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
|
||||||
let read_len = writer.avail().min(self.size().saturating_sub(offset));
|
|
||||||
// Do bound check with potential integer overflow in mind
|
|
||||||
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
|
|
||||||
if max_offset > self.size() {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
let len = self
|
|
||||||
.reader()
|
|
||||||
.skip(offset)
|
|
||||||
.read_fallible(writer)
|
|
||||||
.map_err(|(e, _)| e)?;
|
|
||||||
debug_assert!(len == read_len);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
|
||||||
let write_len = reader.remain().min(self.size().saturating_sub(offset));
|
|
||||||
// Do bound check with potential integer overflow in mind
|
|
||||||
let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?;
|
|
||||||
if max_offset > self.size() {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
let len = self
|
|
||||||
.writer()
|
|
||||||
.skip(offset)
|
|
||||||
.write_fallible(reader)
|
|
||||||
.map_err(|(e, _)| e)?;
|
|
||||||
debug_assert!(len == write_len);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Metadata for a frame.
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct UntypedMeta {}
|
|
||||||
|
|
||||||
impl_frame_meta_for!(UntypedMeta);
|
|
||||||
|
|
||||||
// Here are implementations for `xarray`.
|
|
||||||
|
|
||||||
use core::{marker::PhantomData, ops::Deref};
|
|
||||||
|
|
||||||
/// `FrameRef` is a struct that can work as `&'a UntypedFrame`.
|
|
||||||
///
|
|
||||||
/// This is solely useful for [`crate::collections::xarray`].
|
|
||||||
pub struct FrameRef<'a> {
|
|
||||||
inner: ManuallyDrop<UntypedFrame>,
|
|
||||||
_marker: PhantomData<&'a UntypedFrame>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deref for FrameRef<'_> {
|
|
||||||
type Target = UntypedFrame;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.inner
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SAFETY: `UntypedFrame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
|
|
||||||
// The pointer is also aligned to 4.
|
|
||||||
unsafe impl xarray::ItemEntry for UntypedFrame {
|
|
||||||
type Ref<'a>
|
|
||||||
= FrameRef<'a>
|
|
||||||
where
|
|
||||||
Self: 'a;
|
|
||||||
|
|
||||||
fn into_raw(self) -> *const () {
|
|
||||||
let ptr = self.page.ptr;
|
|
||||||
core::mem::forget(self);
|
|
||||||
ptr as *const ()
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn from_raw(raw: *const ()) -> Self {
|
|
||||||
Self {
|
|
||||||
page: Frame::<UntypedMeta> {
|
|
||||||
ptr: raw as *mut MetaSlot,
|
|
||||||
_marker: PhantomData,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
|
|
||||||
Self::Ref {
|
|
||||||
inner: ManuallyDrop::new(UntypedFrame::from_raw(raw)),
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,112 +0,0 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
|
||||||
|
|
||||||
//! Options for allocating frames
|
|
||||||
|
|
||||||
use super::{UntypedFrame, UntypedSegment};
|
|
||||||
use crate::{
|
|
||||||
mm::{frame, frame::untyped::UntypedMeta, PAGE_SIZE},
|
|
||||||
prelude::*,
|
|
||||||
Error,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Options for allocating physical memory pages (or frames).
|
|
||||||
///
|
|
||||||
/// All allocated frames are safe to use in the sense that they are
|
|
||||||
/// not _typed memory_. We define typed memory as the memory that
|
|
||||||
/// may store Rust objects or affect Rust memory safety, e.g.,
|
|
||||||
/// the code and data segments of the OS kernel, the stack and heap
|
|
||||||
/// allocated for the OS kernel.
|
|
||||||
pub struct FrameAllocOptions {
|
|
||||||
nframes: usize,
|
|
||||||
is_contiguous: bool,
|
|
||||||
uninit: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FrameAllocOptions {
|
|
||||||
/// Creates new options for allocating the specified number of frames.
|
|
||||||
pub fn new(nframes: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
nframes,
|
|
||||||
is_contiguous: false,
|
|
||||||
uninit: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets whether the allocated frames should be contiguous.
|
|
||||||
///
|
|
||||||
/// The default value is `false`.
|
|
||||||
pub fn is_contiguous(&mut self, is_contiguous: bool) -> &mut Self {
|
|
||||||
self.is_contiguous = is_contiguous;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets whether the allocated frames should be uninitialized.
|
|
||||||
///
|
|
||||||
/// If `uninit` is set as `false`, the frame will be zeroed once allocated.
|
|
||||||
/// If `uninit` is set as `true`, the frame will **NOT** be zeroed and should *NOT* be read before writing.
|
|
||||||
///
|
|
||||||
/// The default value is false.
|
|
||||||
pub fn uninit(&mut self, uninit: bool) -> &mut Self {
|
|
||||||
self.uninit = uninit;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Allocates a single page frame according to the given options.
|
|
||||||
pub fn alloc_single(&self) -> Result<UntypedFrame> {
|
|
||||||
if self.nframes != 1 {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
|
|
||||||
let page = frame::allocator::alloc_single(UntypedMeta::default()).ok_or(Error::NoMemory)?;
|
|
||||||
let frame = UntypedFrame { page };
|
|
||||||
if !self.uninit {
|
|
||||||
frame.writer().fill(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(frame)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Allocates a contiguous range of page frames according to the given options.
|
|
||||||
///
|
|
||||||
/// The returned [`UntypedSegment`] contains at least one page frame.
|
|
||||||
pub fn alloc_contiguous(&self) -> Result<UntypedSegment> {
|
|
||||||
// It's no use to checking `self.is_contiguous` here.
|
|
||||||
if self.nframes == 0 {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
|
|
||||||
let segment: UntypedSegment =
|
|
||||||
frame::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| {
|
|
||||||
UntypedMeta::default()
|
|
||||||
})
|
|
||||||
.ok_or(Error::NoMemory)?
|
|
||||||
.into();
|
|
||||||
if !self.uninit {
|
|
||||||
segment.writer().fill(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(segment)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(ktest)]
|
|
||||||
#[ktest]
|
|
||||||
fn test_alloc_dealloc() {
|
|
||||||
// Here we allocate and deallocate frames in random orders to test the allocator.
|
|
||||||
// We expect the test to fail if the underlying implementation panics.
|
|
||||||
let single_options = FrameAllocOptions::new(1);
|
|
||||||
let mut contiguous_options = FrameAllocOptions::new(10);
|
|
||||||
contiguous_options.is_contiguous(true);
|
|
||||||
let mut remember_vec = Vec::new();
|
|
||||||
for _ in 0..10 {
|
|
||||||
for i in 0..10 {
|
|
||||||
let single_frame = single_options.alloc_single().unwrap();
|
|
||||||
if i % 3 == 0 {
|
|
||||||
remember_vec.push(single_frame);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let contiguous_segment = contiguous_options.alloc_contiguous().unwrap();
|
|
||||||
drop(contiguous_segment);
|
|
||||||
remember_vec.pop();
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,177 +0,0 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
|
||||||
|
|
||||||
//! A contiguous segment of untyped memory pages.
|
|
||||||
|
|
||||||
use core::ops::Range;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
mm::{
|
|
||||||
frame::{untyped::UntypedMeta, Segment},
|
|
||||||
io::{FallibleVmRead, FallibleVmWrite},
|
|
||||||
HasPaddr, Infallible, Paddr, UntypedFrame, VmIo, VmReader, VmWriter,
|
|
||||||
},
|
|
||||||
Error, Result,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// A contiguous segment of untyped memory pages.
|
|
||||||
///
|
|
||||||
/// A [`UntypedSegment`] object is a handle to a contiguous range of untyped memory
|
|
||||||
/// pages, and the underlying pages can be shared among multiple threads.
|
|
||||||
/// [`UntypedSegment::slice`] can be used to clone a slice of the segment (also can be
|
|
||||||
/// used to clone the entire range). Reference counts are maintained for each
|
|
||||||
/// page in the segment. So cloning the handle may not be cheap as it
|
|
||||||
/// increments the reference count of all the cloned pages.
|
|
||||||
///
|
|
||||||
/// Other [`UntypedFrame`] handles can also refer to the pages in the segment. And
|
|
||||||
/// the segment can be iterated over to get all the frames in it.
|
|
||||||
///
|
|
||||||
/// To allocate a segment, use [`crate::mm::FrameAllocator`].
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
///
|
|
||||||
/// ```rust
|
|
||||||
/// let vm_segment = FrameAllocOptions::new(2)
|
|
||||||
/// .is_contiguous(true)
|
|
||||||
/// .alloc_contiguous()?;
|
|
||||||
/// vm_segment.write_bytes(0, buf)?;
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct UntypedSegment {
|
|
||||||
pages: Segment<UntypedMeta>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HasPaddr for UntypedSegment {
|
|
||||||
fn paddr(&self) -> Paddr {
|
|
||||||
self.pages.start_paddr()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for UntypedSegment {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self {
|
|
||||||
pages: self.pages.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UntypedSegment {
|
|
||||||
/// Returns the start physical address.
|
|
||||||
pub fn start_paddr(&self) -> Paddr {
|
|
||||||
self.pages.start_paddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the end physical address.
|
|
||||||
pub fn end_paddr(&self) -> Paddr {
|
|
||||||
self.pages.end_paddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of bytes in it.
|
|
||||||
pub fn nbytes(&self) -> usize {
|
|
||||||
self.pages.nbytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Split the segment into two at the given byte offset from the start.
|
|
||||||
///
|
|
||||||
/// The resulting segments cannot be empty. So the byte offset cannot be
|
|
||||||
/// neither zero nor the length of the segment.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// The function panics if the byte offset is out of bounds, at either ends, or
|
|
||||||
/// not base-page-aligned.
|
|
||||||
pub fn split(self, offset: usize) -> (Self, Self) {
|
|
||||||
let (left, right) = self.pages.split(offset);
|
|
||||||
(Self { pages: left }, Self { pages: right })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get an extra handle to the segment in the byte range.
|
|
||||||
///
|
|
||||||
/// The sliced byte range in indexed by the offset from the start of the
|
|
||||||
/// segment. The resulting segment holds extra reference counts.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// The function panics if the byte range is out of bounds, or if any of
|
|
||||||
/// the ends of the byte range is not base-page aligned.
|
|
||||||
pub fn slice(&self, range: &Range<usize>) -> Self {
|
|
||||||
Self {
|
|
||||||
pages: self.pages.slice(range),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a [`VmReader`] to read from the segment from the beginning to the end.
|
|
||||||
pub fn reader(&self) -> VmReader<'_, Infallible> {
|
|
||||||
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8;
|
|
||||||
// SAFETY:
|
|
||||||
// - The memory range points to untyped memory.
|
|
||||||
// - The segment is alive during the lifetime `'a`.
|
|
||||||
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
|
|
||||||
unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a [`VmWriter`] to write to the segment from the beginning to the end.
|
|
||||||
pub fn writer(&self) -> VmWriter<'_, Infallible> {
|
|
||||||
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8;
|
|
||||||
// SAFETY:
|
|
||||||
// - The memory range points to untyped memory.
|
|
||||||
// - The segment is alive during the lifetime `'a`.
|
|
||||||
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
|
|
||||||
unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<UntypedFrame> for UntypedSegment {
|
|
||||||
fn from(frame: UntypedFrame) -> Self {
|
|
||||||
Self {
|
|
||||||
pages: Segment::from(frame.page),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Segment<UntypedMeta>> for UntypedSegment {
|
|
||||||
fn from(pages: Segment<UntypedMeta>) -> Self {
|
|
||||||
Self { pages }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl VmIo for UntypedSegment {
|
|
||||||
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
|
|
||||||
let read_len = writer.avail();
|
|
||||||
// Do bound check with potential integer overflow in mind
|
|
||||||
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
|
|
||||||
if max_offset > self.nbytes() {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
let len = self
|
|
||||||
.reader()
|
|
||||||
.skip(offset)
|
|
||||||
.read_fallible(writer)
|
|
||||||
.map_err(|(e, _)| e)?;
|
|
||||||
debug_assert!(len == read_len);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
|
|
||||||
let write_len = reader.remain();
|
|
||||||
// Do bound check with potential integer overflow in mind
|
|
||||||
let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?;
|
|
||||||
if max_offset > self.nbytes() {
|
|
||||||
return Err(Error::InvalidArgs);
|
|
||||||
}
|
|
||||||
let len = self
|
|
||||||
.writer()
|
|
||||||
.skip(offset)
|
|
||||||
.write_fallible(reader)
|
|
||||||
.map_err(|(e, _)| e)?;
|
|
||||||
debug_assert!(len == write_len);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for UntypedSegment {
|
|
||||||
type Item = UntypedFrame;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
self.pages.next().map(|page| UntypedFrame { page })
|
|
||||||
}
|
|
||||||
}
|
|
@ -7,11 +7,11 @@
|
|||||||
//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
|
//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
|
||||||
//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
|
//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
|
||||||
//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
|
//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
|
||||||
//! (e.g., `&[u8]`) or untyped memory (e.g, [`UntypedFrame`]). Behind the scene, `VmReader` and `VmWriter`
|
//! (e.g., `&[u8]`) or untyped memory (e.g, [`DynUFrame`]). Behind the scene, `VmReader` and `VmWriter`
|
||||||
//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
|
//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
|
||||||
//! safety depends on whether the given memory regions are _valid_ or not.
|
//! safety depends on whether the given memory regions are _valid_ or not.
|
||||||
//!
|
//!
|
||||||
//! [`UntypedFrame`]: crate::mm::UntypedFrame
|
//! [`DynUFrame`]: crate::mm::DynUFrame
|
||||||
//! [`from_user_space`]: `VmReader::from_user_space`
|
//! [`from_user_space`]: `VmReader::from_user_space`
|
||||||
//! [`from_kernel_space`]: `VmReader::from_kernel_space`
|
//! [`from_kernel_space`]: `VmReader::from_kernel_space`
|
||||||
//!
|
//!
|
||||||
@ -58,7 +58,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// A trait that enables reading/writing data from/to a VM object,
|
/// A trait that enables reading/writing data from/to a VM object,
|
||||||
/// e.g., [`UntypedSegment`], [`Vec<UntypedFrame>`] and [`UntypedFrame`].
|
/// e.g., [`DynUSegment`], [`Vec<DynUFrame>`] and [`DynUFrame`].
|
||||||
///
|
///
|
||||||
/// # Concurrency
|
/// # Concurrency
|
||||||
///
|
///
|
||||||
@ -67,8 +67,8 @@ use crate::{
|
|||||||
/// desire predictability or atomicity, the users should add extra mechanism
|
/// desire predictability or atomicity, the users should add extra mechanism
|
||||||
/// for such properties.
|
/// for such properties.
|
||||||
///
|
///
|
||||||
/// [`UntypedSegment`]: crate::mm::UntypedSegment
|
/// [`DynUSegment`]: crate::mm::DynUSegment
|
||||||
/// [`UntypedFrame`]: crate::mm::UntypedFrame
|
/// [`DynUFrame`]: crate::mm::DynUFrame
|
||||||
pub trait VmIo: Send + Sync {
|
pub trait VmIo: Send + Sync {
|
||||||
/// Reads requested data at a specified offset into a given `VmWriter`.
|
/// Reads requested data at a specified offset into a given `VmWriter`.
|
||||||
///
|
///
|
||||||
|
@ -164,7 +164,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
|||||||
// Map the metadata pages.
|
// Map the metadata pages.
|
||||||
{
|
{
|
||||||
let start_va = mapping::page_to_meta::<PagingConsts>(0);
|
let start_va = mapping::page_to_meta::<PagingConsts>(0);
|
||||||
let from = start_va..start_va + meta_pages.nbytes();
|
let from = start_va..start_va + meta_pages.size();
|
||||||
let prop = PageProperty {
|
let prop = PageProperty {
|
||||||
flags: PageFlags::RW,
|
flags: PageFlags::RW,
|
||||||
cache: CachePolicy::Writeback,
|
cache: CachePolicy::Writeback,
|
||||||
@ -214,7 +214,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
|
|||||||
};
|
};
|
||||||
let mut cursor = kpt.cursor_mut(&from).unwrap();
|
let mut cursor = kpt.cursor_mut(&from).unwrap();
|
||||||
for frame_paddr in to.step_by(PAGE_SIZE) {
|
for frame_paddr in to.step_by(PAGE_SIZE) {
|
||||||
let page = Frame::<KernelMeta>::from_unused(frame_paddr, KernelMeta::default());
|
let page = Frame::<KernelMeta>::from_unused(frame_paddr, KernelMeta);
|
||||||
// SAFETY: we are doing mappings for the kernel.
|
// SAFETY: we are doing mappings for the kernel.
|
||||||
unsafe {
|
unsafe {
|
||||||
let _old = cursor.map(page.into(), prop);
|
let _old = cursor.map(page.into(), prop);
|
||||||
@ -249,6 +249,6 @@ pub unsafe fn activate_kernel_page_table() {
|
|||||||
|
|
||||||
/// The metadata of pages that contains the kernel itself.
|
/// The metadata of pages that contains the kernel itself.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct KernelMeta {}
|
pub struct KernelMeta;
|
||||||
|
|
||||||
impl_frame_meta_for!(KernelMeta);
|
impl_frame_meta_for!(KernelMeta);
|
||||||
|
@ -24,7 +24,12 @@ use core::{fmt::Debug, ops::Range};
|
|||||||
|
|
||||||
pub use self::{
|
pub use self::{
|
||||||
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
|
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
|
||||||
frame::untyped::{options::FrameAllocOptions, UntypedFrame, UntypedSegment},
|
frame::{
|
||||||
|
allocator::FrameAllocOptions,
|
||||||
|
segment::{DynSegment, DynUSegment, Segment},
|
||||||
|
untyped::{DynUFrame, UFrameMeta, UntypedMem},
|
||||||
|
DynFrame, Frame,
|
||||||
|
},
|
||||||
io::{
|
io::{
|
||||||
Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader,
|
Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader,
|
||||||
VmWriter,
|
VmWriter,
|
||||||
|
@ -250,7 +250,7 @@ fn test_boot_pt_map_protect() {
|
|||||||
mm::{CachePolicy, FrameAllocOptions, PageFlags},
|
mm::{CachePolicy, FrameAllocOptions, PageFlags},
|
||||||
};
|
};
|
||||||
|
|
||||||
let root_frame = FrameAllocOptions::new(1).alloc_single().unwrap();
|
let root_frame = FrameAllocOptions::new().alloc_frame().unwrap();
|
||||||
let root_paddr = root_frame.start_paddr();
|
let root_paddr = root_frame.start_paddr();
|
||||||
|
|
||||||
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts> {
|
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts> {
|
||||||
|
@ -40,8 +40,9 @@ use super::{nr_subpage_per_huge, PageTableEntryTrait};
|
|||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::{PageTableEntry, PagingConsts},
|
arch::mm::{PageTableEntry, PagingConsts},
|
||||||
mm::{
|
mm::{
|
||||||
frame::{self, inc_page_ref_count, meta::FrameMeta, Frame},
|
frame::{inc_page_ref_count, meta::FrameMeta, Frame},
|
||||||
paddr_to_vaddr, Infallible, Paddr, PagingConstsTrait, PagingLevel, VmReader, PAGE_SIZE,
|
paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel,
|
||||||
|
VmReader,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -260,13 +261,11 @@ where
|
|||||||
/// extra unnecessary expensive operation.
|
/// extra unnecessary expensive operation.
|
||||||
pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
|
pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
|
||||||
let meta = PageTablePageMeta::new_locked(level, is_tracked);
|
let meta = PageTablePageMeta::new_locked(level, is_tracked);
|
||||||
let page = frame::allocator::alloc_single::<PageTablePageMeta<E, C>>(meta).unwrap();
|
let page = FrameAllocOptions::new()
|
||||||
|
.zeroed(true)
|
||||||
// Zero out the page table node.
|
.alloc_frame_with(meta)
|
||||||
let ptr = paddr_to_vaddr(page.paddr()) as *mut u8;
|
.expect("Failed to allocate a page table node");
|
||||||
// SAFETY: The page is exclusively owned here. Pointers are valid also.
|
// The allocated frame is zeroed. Make sure zero is absent PTE.
|
||||||
// We rely on the fact that 0 represents an absent entry to speed up `memset`.
|
|
||||||
unsafe { core::ptr::write_bytes(ptr, 0, PAGE_SIZE) };
|
|
||||||
debug_assert!(E::new_absent().as_bytes().iter().all(|&b| b == 0));
|
debug_assert!(E::new_absent().as_bytes().iter().all(|&b| b == 0));
|
||||||
|
|
||||||
Self { page }
|
Self { page }
|
||||||
@ -281,7 +280,7 @@ where
|
|||||||
|
|
||||||
// SAFETY: The provided physical address is valid and the level is
|
// SAFETY: The provided physical address is valid and the level is
|
||||||
// correct. The reference count is not changed.
|
// correct. The reference count is not changed.
|
||||||
unsafe { RawPageTableNode::from_raw_parts(this.page.paddr(), this.page.meta().level) }
|
unsafe { RawPageTableNode::from_raw_parts(this.page.start_paddr(), this.page.meta().level) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets a raw handle while still preserving the original handle.
|
/// Gets a raw handle while still preserving the original handle.
|
||||||
@ -290,7 +289,7 @@ where
|
|||||||
|
|
||||||
// SAFETY: The provided physical address is valid and the level is
|
// SAFETY: The provided physical address is valid and the level is
|
||||||
// correct. The reference count is increased by one.
|
// correct. The reference count is increased by one.
|
||||||
unsafe { RawPageTableNode::from_raw_parts(page.paddr(), page.meta().level) }
|
unsafe { RawPageTableNode::from_raw_parts(page.start_paddr(), page.meta().level) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the number of valid PTEs in the node.
|
/// Gets the number of valid PTEs in the node.
|
||||||
@ -310,7 +309,7 @@ where
|
|||||||
/// The caller must ensure that the index is within the bound.
|
/// The caller must ensure that the index is within the bound.
|
||||||
unsafe fn read_pte(&self, idx: usize) -> E {
|
unsafe fn read_pte(&self, idx: usize) -> E {
|
||||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||||
let ptr = paddr_to_vaddr(self.page.paddr()) as *const E;
|
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *const E;
|
||||||
// SAFETY: The index is within the bound and the PTE is plain-old-data.
|
// SAFETY: The index is within the bound and the PTE is plain-old-data.
|
||||||
unsafe { ptr.add(idx).read() }
|
unsafe { ptr.add(idx).read() }
|
||||||
}
|
}
|
||||||
@ -330,7 +329,7 @@ where
|
|||||||
/// (see [`Child::is_compatible`]).
|
/// (see [`Child::is_compatible`]).
|
||||||
unsafe fn write_pte(&mut self, idx: usize, pte: E) {
|
unsafe fn write_pte(&mut self, idx: usize, pte: E) {
|
||||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||||
let ptr = paddr_to_vaddr(self.page.paddr()) as *mut E;
|
let ptr = paddr_to_vaddr(self.page.start_paddr()) as *mut E;
|
||||||
// SAFETY: The index is within the bound and the PTE is plain-old-data.
|
// SAFETY: The index is within the bound and the PTE is plain-old-data.
|
||||||
unsafe { ptr.add(idx).write(pte) }
|
unsafe { ptr.add(idx).write(pte) }
|
||||||
}
|
}
|
||||||
|
@ -5,10 +5,9 @@ use core::mem::ManuallyDrop;
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
mm::{
|
mm::{
|
||||||
frame::{allocator, untyped::UntypedMeta},
|
|
||||||
kspace::LINEAR_MAPPING_BASE_VADDR,
|
kspace::LINEAR_MAPPING_BASE_VADDR,
|
||||||
page_prop::{CachePolicy, PageFlags},
|
page_prop::{CachePolicy, PageFlags},
|
||||||
MAX_USERSPACE_VADDR,
|
FrameAllocOptions, MAX_USERSPACE_VADDR,
|
||||||
},
|
},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
@ -31,8 +30,8 @@ fn test_tracked_map_unmap() {
|
|||||||
let pt = PageTable::<UserMode>::empty();
|
let pt = PageTable::<UserMode>::empty();
|
||||||
|
|
||||||
let from = PAGE_SIZE..PAGE_SIZE * 2;
|
let from = PAGE_SIZE..PAGE_SIZE * 2;
|
||||||
let page = allocator::alloc_single(UntypedMeta::default()).unwrap();
|
let page = FrameAllocOptions::new().alloc_frame().unwrap();
|
||||||
let start_paddr = page.paddr();
|
let start_paddr = page.start_paddr();
|
||||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) };
|
unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) };
|
||||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||||
@ -87,8 +86,8 @@ fn test_user_copy_on_write() {
|
|||||||
|
|
||||||
let pt = PageTable::<UserMode>::empty();
|
let pt = PageTable::<UserMode>::empty();
|
||||||
let from = PAGE_SIZE..PAGE_SIZE * 2;
|
let from = PAGE_SIZE..PAGE_SIZE * 2;
|
||||||
let page = allocator::alloc_single(UntypedMeta::default()).unwrap();
|
let page = FrameAllocOptions::new().alloc_frame().unwrap();
|
||||||
let start_paddr = page.paddr();
|
let start_paddr = page.start_paddr();
|
||||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
|
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
|
||||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||||
@ -172,7 +171,7 @@ fn test_base_protect_query() {
|
|||||||
|
|
||||||
let from_ppn = 1..1000;
|
let from_ppn = 1..1000;
|
||||||
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
|
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
|
||||||
let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| UntypedMeta::default()).unwrap();
|
let to = FrameAllocOptions::new().alloc_segment(999).unwrap();
|
||||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut cursor = pt.cursor_mut(&from).unwrap();
|
let mut cursor = pt.cursor_mut(&from).unwrap();
|
||||||
|
@ -22,7 +22,7 @@ use crate::{
|
|||||||
kspace::KERNEL_PAGE_TABLE,
|
kspace::KERNEL_PAGE_TABLE,
|
||||||
page_table::{self, PageTable, PageTableItem, UserMode},
|
page_table::{self, PageTable, PageTableItem, UserMode},
|
||||||
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
|
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
|
||||||
PageProperty, UntypedFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR,
|
DynUFrame, PageProperty, VmReader, VmWriter, MAX_USERSPACE_VADDR,
|
||||||
},
|
},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
sync::{PreemptDisabled, RwLock, RwLockReadGuard},
|
sync::{PreemptDisabled, RwLock, RwLockReadGuard},
|
||||||
@ -40,7 +40,7 @@ use crate::{
|
|||||||
///
|
///
|
||||||
/// A newly-created `VmSpace` is not backed by any physical memory pages. To
|
/// A newly-created `VmSpace` is not backed by any physical memory pages. To
|
||||||
/// provide memory pages for a `VmSpace`, one can allocate and map physical
|
/// provide memory pages for a `VmSpace`, one can allocate and map physical
|
||||||
/// memory ([`UntypedFrame`]s) to the `VmSpace` using the cursor.
|
/// memory ([`DynUFrame`]s) to the `VmSpace` using the cursor.
|
||||||
///
|
///
|
||||||
/// A `VmSpace` can also attach a page fault handler, which will be invoked to
|
/// A `VmSpace` can also attach a page fault handler, which will be invoked to
|
||||||
/// handle page faults generated from user space.
|
/// handle page faults generated from user space.
|
||||||
@ -323,7 +323,7 @@ impl CursorMut<'_, '_> {
|
|||||||
/// Map a frame into the current slot.
|
/// Map a frame into the current slot.
|
||||||
///
|
///
|
||||||
/// This method will bring the cursor to the next slot after the modification.
|
/// This method will bring the cursor to the next slot after the modification.
|
||||||
pub fn map(&mut self, frame: UntypedFrame, prop: PageProperty) {
|
pub fn map(&mut self, frame: DynUFrame, prop: PageProperty) {
|
||||||
let start_va = self.virt_addr();
|
let start_va = self.virt_addr();
|
||||||
// SAFETY: It is safe to map untyped memory into the userspace.
|
// SAFETY: It is safe to map untyped memory into the userspace.
|
||||||
let old = unsafe { self.pt_cursor.map(frame.into(), prop) };
|
let old = unsafe { self.pt_cursor.map(frame.into(), prop) };
|
||||||
@ -475,7 +475,7 @@ pub enum VmItem {
|
|||||||
/// The virtual address of the slot.
|
/// The virtual address of the slot.
|
||||||
va: Vaddr,
|
va: Vaddr,
|
||||||
/// The mapped frame.
|
/// The mapped frame.
|
||||||
frame: UntypedFrame,
|
frame: DynUFrame,
|
||||||
/// The property of the slot.
|
/// The property of the slot.
|
||||||
prop: PageProperty,
|
prop: PageProperty,
|
||||||
},
|
},
|
||||||
|
@ -14,6 +14,6 @@ pub use ostd_macros::ktest;
|
|||||||
|
|
||||||
pub use crate::{
|
pub use crate::{
|
||||||
early_print as print, early_println as println,
|
early_print as print, early_println as println,
|
||||||
mm::{Paddr, Vaddr},
|
mm::{Paddr, UntypedMem, Vaddr},
|
||||||
panic::abort,
|
panic::abort,
|
||||||
};
|
};
|
||||||
|
@ -3,10 +3,9 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
impl_frame_meta_for,
|
impl_frame_meta_for,
|
||||||
mm::{
|
mm::{
|
||||||
frame::allocator,
|
|
||||||
kspace::kvirt_area::{KVirtArea, Tracked},
|
kspace::kvirt_area::{KVirtArea, Tracked},
|
||||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
||||||
PAGE_SIZE,
|
FrameAllocOptions, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
@ -36,7 +35,7 @@ pub struct KernelStack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
struct KernelStackMeta {}
|
struct KernelStackMeta;
|
||||||
|
|
||||||
impl_frame_meta_for!(KernelStackMeta);
|
impl_frame_meta_for!(KernelStackMeta);
|
||||||
|
|
||||||
@ -47,8 +46,9 @@ impl KernelStack {
|
|||||||
let mut new_kvirt_area = KVirtArea::<Tracked>::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE);
|
let mut new_kvirt_area = KVirtArea::<Tracked>::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE);
|
||||||
let mapped_start = new_kvirt_area.range().start + 2 * PAGE_SIZE;
|
let mapped_start = new_kvirt_area.range().start + 2 * PAGE_SIZE;
|
||||||
let mapped_end = mapped_start + KERNEL_STACK_SIZE;
|
let mapped_end = mapped_start + KERNEL_STACK_SIZE;
|
||||||
let pages =
|
let pages = FrameAllocOptions::new()
|
||||||
allocator::alloc_contiguous(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap();
|
.zeroed(false)
|
||||||
|
.alloc_segment_with(KERNEL_STACK_SIZE / PAGE_SIZE, |_| KernelStackMeta)?;
|
||||||
let prop = PageProperty {
|
let prop = PageProperty {
|
||||||
flags: PageFlags::RW,
|
flags: PageFlags::RW,
|
||||||
cache: CachePolicy::Writeback,
|
cache: CachePolicy::Writeback,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user