Implement a new set of physical page APIs

This commit is contained in:
Zhang Junyang
2024-12-24 18:20:55 +08:00
committed by Tate, Hongliang Tian
parent 6e1c36965a
commit cdac59beda
56 changed files with 882 additions and 995 deletions

View File

@ -5,8 +5,8 @@ use bitvec::array::BitArray;
use int_to_c_enum::TryFromInt;
use ostd::{
mm::{
DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, UntypedSegment,
VmIo, VmReader, VmWriter,
DmaDirection, DmaStream, DmaStreamSlice, DynUSegment, FrameAllocOptions, Infallible, VmIo,
VmReader, VmWriter,
},
sync::{SpinLock, WaitQueue},
Error,
@ -426,11 +426,11 @@ impl<'a> BioSegment {
let bio_segment_inner = target_pool(direction)
.and_then(|pool| pool.alloc(nblocks, offset_within_first_block, len))
.unwrap_or_else(|| {
let segment = FrameAllocOptions::new(nblocks)
.uninit(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment(nblocks)
.unwrap();
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
let dma_stream = DmaStream::map(segment.into(), direction.into(), false).unwrap();
BioSegmentInner {
dma_slice: DmaStreamSlice::new(dma_stream, offset_within_first_block, len),
from_pool: false,
@ -442,9 +442,9 @@ impl<'a> BioSegment {
}
}
/// Constructs a new `BioSegment` with a given `UntypedSegment` and the bio direction.
pub fn new_from_segment(segment: UntypedSegment, direction: BioDirection) -> Self {
let len = segment.nbytes();
/// Constructs a new `BioSegment` with a given `DynUSegment` and the bio direction.
pub fn new_from_segment(segment: DynUSegment, direction: BioDirection) -> Self {
let len = segment.size();
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
Self {
inner: Arc::new(BioSegmentInner {
@ -481,8 +481,8 @@ impl<'a> BioSegment {
/// Returns the inner VM segment.
#[cfg(ktest)]
pub fn inner_segment(&self) -> &UntypedSegment {
self.inner.dma_slice.stream().vm_segment()
pub fn inner_segment(&self) -> &DynUSegment {
self.inner.dma_slice.stream().segment()
}
/// Returns a reader to read data from it.
@ -560,11 +560,11 @@ impl BioSegmentPool {
pub fn new(direction: BioDirection) -> Self {
let total_blocks = POOL_DEFAULT_NBLOCKS;
let pool = {
let segment = FrameAllocOptions::new(total_blocks)
.uninit(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment(total_blocks)
.unwrap();
DmaStream::map(segment, direction.into(), false).unwrap()
DmaStream::map(segment.into(), direction.into(), false).unwrap()
};
let manager = SpinLock::new(PoolSlotManager {
occupied: BitArray::ZERO,

View File

@ -34,10 +34,10 @@ impl TxBuffer {
let dma_stream = if let Some(stream) = pool.lock().pop_front() {
stream
} else {
let segment = FrameAllocOptions::new(TX_BUFFER_LEN / PAGE_SIZE)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.alloc_segment(TX_BUFFER_LEN / PAGE_SIZE)
.unwrap();
DmaStream::map(segment, DmaDirection::ToDevice, false).unwrap()
DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap()
};
let tx_buffer = {

View File

@ -152,9 +152,9 @@ impl DmaPage {
pool: Weak<DmaPool>,
) -> Result<Self, ostd::Error> {
let dma_stream = {
let segment = FrameAllocOptions::new(1).alloc_contiguous()?;
let segment = FrameAllocOptions::new().alloc_segment(1)?;
DmaStream::map(segment, direction, is_cache_coherent)
DmaStream::map(segment.into(), direction, is_cache_coherent)
.map_err(|_| ostd::Error::AccessDenied)?
};

View File

@ -141,13 +141,13 @@ impl DeviceInner {
let queue = VirtQueue::new(0, Self::QUEUE_SIZE, transport.as_mut())
.expect("create virtqueue failed");
let block_requests = {
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap()
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap()
};
assert!(Self::QUEUE_SIZE as usize * REQ_SIZE <= block_requests.nbytes());
let block_responses = {
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
DmaStream::map(vm_segment, DmaDirection::Bidirectional, false).unwrap()
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap()
};
assert!(Self::QUEUE_SIZE as usize * RESP_SIZE <= block_responses.nbytes());
@ -261,11 +261,11 @@ impl DeviceInner {
};
const MAX_ID_LENGTH: usize = 20;
let device_id_stream = {
let segment = FrameAllocOptions::new(1)
.uninit(true)
.alloc_contiguous()
let segment = FrameAllocOptions::new()
.zeroed(false)
.alloc_segment(1)
.unwrap();
DmaStream::map(segment, DmaDirection::FromDevice, false).unwrap()
DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap()
};
let device_id_slice = DmaStreamSlice::new(&device_id_stream, 0, MAX_ID_LENGTH);
let outputs = vec![&device_id_slice, &resp_slice];

View File

@ -87,13 +87,13 @@ impl ConsoleDevice {
SpinLock::new(VirtQueue::new(TRANSMIT0_QUEUE_INDEX, 2, transport.as_mut()).unwrap());
let send_buffer = {
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
DmaStream::map(vm_segment, DmaDirection::ToDevice, false).unwrap()
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap()
};
let receive_buffer = {
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
DmaStream::map(vm_segment, DmaDirection::FromDevice, false).unwrap()
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap()
};
let device = Arc::new(Self {

View File

@ -261,14 +261,14 @@ impl EventTable {
fn new(num_events: usize) -> Self {
assert!(num_events * mem::size_of::<VirtioInputEvent>() <= PAGE_SIZE);
let vm_segment = FrameAllocOptions::new(1).alloc_contiguous().unwrap();
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
let default_event = VirtioInputEvent::default();
let iter = iter::repeat(&default_event).take(EVENT_SIZE);
let nr_written = vm_segment.write_vals(0, iter, 0).unwrap();
let nr_written = segment.write_vals(0, iter, 0).unwrap();
assert_eq!(nr_written, EVENT_SIZE);
let stream = DmaStream::map(vm_segment, DmaDirection::FromDevice, false).unwrap();
let stream = DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap();
Self { stream, num_events }
}

View File

@ -76,7 +76,7 @@ impl VirtQueue {
}
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
// Currently, we use one UntypedFrame to place the descriptors and available rings, one UntypedFrame to place used rings
// Currently, we use one DynUFrame to place the descriptors and available rings, one DynUFrame to place used rings
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
if size > 128 {
return Err(QueueError::InvalidArgs);
@ -89,8 +89,8 @@ impl VirtQueue {
let align_size = VirtioPciLegacyTransport::QUEUE_ALIGN_SIZE;
let total_frames =
VirtioPciLegacyTransport::calc_virtqueue_size_aligned(queue_size) / align_size;
let continue_segment = FrameAllocOptions::new(total_frames)
.alloc_contiguous()
let continue_segment = FrameAllocOptions::new()
.alloc_segment(total_frames)
.unwrap();
let avial_size = size_of::<u16>() * (3 + queue_size);
@ -99,12 +99,12 @@ impl VirtQueue {
continue_segment.split(seg1_frames * align_size)
};
let desc_frame_ptr: SafePtr<Descriptor, DmaCoherent> =
SafePtr::new(DmaCoherent::map(seg1, true).unwrap(), 0);
SafePtr::new(DmaCoherent::map(seg1.into(), true).unwrap(), 0);
let mut avail_frame_ptr: SafePtr<AvailRing, DmaCoherent> =
desc_frame_ptr.clone().cast();
avail_frame_ptr.byte_add(desc_size);
let used_frame_ptr: SafePtr<UsedRing, DmaCoherent> =
SafePtr::new(DmaCoherent::map(seg2, true).unwrap(), 0);
SafePtr::new(DmaCoherent::map(seg2.into(), true).unwrap(), 0);
(desc_frame_ptr, avail_frame_ptr, used_frame_ptr)
} else {
if size > 256 {
@ -112,18 +112,27 @@ impl VirtQueue {
}
(
SafePtr::new(
DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true)
.unwrap(),
DmaCoherent::map(
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
true,
)
.unwrap(),
0,
),
SafePtr::new(
DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true)
.unwrap(),
DmaCoherent::map(
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
true,
)
.unwrap(),
0,
),
SafePtr::new(
DmaCoherent::map(FrameAllocOptions::new(1).alloc_contiguous().unwrap(), true)
.unwrap(),
DmaCoherent::map(
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
true,
)
.unwrap(),
0,
),
)