Add VmSegment and rewrite the vm allocation code with VmAllocOptions

This commit is contained in:
LI Qing
2023-10-30 15:37:59 +08:00
committed by Tate, Hongliang Tian
parent b0b25f9282
commit 4c72f5b7fa
12 changed files with 447 additions and 210 deletions

View File

@ -8,7 +8,7 @@ use crate::{
bus::pci::PciDeviceLocation, bus::pci::PciDeviceLocation,
vm::{ vm::{
page_table::{PageTableConfig, PageTableError}, page_table::{PageTableConfig, PageTableError},
Paddr, PageTable, Vaddr, VmAllocOptions, VmFrame, VmFrameVec, VmIo, Paddr, PageTable, Vaddr, VmAllocOptions, VmFrame, VmIo,
}, },
}; };
@ -48,10 +48,7 @@ pub enum ContextTableError {
impl RootTable { impl RootTable {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
root_frame: VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false)) root_frame: VmAllocOptions::new(1).alloc_single().unwrap(),
.unwrap()
.pop()
.unwrap(),
context_tables: BTreeMap::new(), context_tables: BTreeMap::new(),
} }
} }
@ -236,10 +233,7 @@ pub struct ContextTable {
impl ContextTable { impl ContextTable {
fn new() -> Self { fn new() -> Self {
Self { Self {
entries_frame: VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false)) entries_frame: VmAllocOptions::new(1).alloc_single().unwrap(),
.unwrap()
.pop()
.unwrap(),
page_tables: BTreeMap::new(), page_tables: BTreeMap::new(),
} }
} }

View File

@ -2,7 +2,7 @@ use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE};
use crate::cpu::CpuSet; use crate::cpu::CpuSet;
use crate::prelude::*; use crate::prelude::*;
use crate::user::UserSpace; use crate::user::UserSpace;
use crate::vm::{VmAllocOptions, VmFrameVec}; use crate::vm::{VmAllocOptions, VmSegment};
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
use intrusive_collections::intrusive_adapter; use intrusive_collections::intrusive_adapter;
@ -38,20 +38,20 @@ extern "C" {
} }
pub struct KernelStack { pub struct KernelStack {
frame: VmFrameVec, segment: VmSegment,
} }
impl KernelStack { impl KernelStack {
pub fn new() -> Result<Self> { pub fn new() -> Result<Self> {
Ok(Self { Ok(Self {
frame: VmFrameVec::allocate( segment: VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE)
VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE).is_contiguous(true), .is_contiguous(true)
)?, .alloc_contiguous()?,
}) })
} }
pub fn end_paddr(&self) -> Paddr { pub fn end_paddr(&self) -> Paddr {
self.frame.get(self.frame.len() - 1).unwrap().end_paddr() self.segment.end_paddr()
} }
} }

View File

@ -2,7 +2,7 @@ use alloc::vec;
use core::{ use core::{
iter::Iterator, iter::Iterator,
marker::PhantomData, marker::PhantomData,
ops::{BitAnd, BitOr, Not}, ops::{BitAnd, BitOr, Not, Range},
}; };
use crate::{arch::iommu, config::PAGE_SIZE, prelude::*, Error}; use crate::{arch::iommu, config::PAGE_SIZE, prelude::*, Error};
@ -18,56 +18,9 @@ use super::{Paddr, VmIo};
/// more often than not, one needs to operate on a batch of frames rather /// more often than not, one needs to operate on a batch of frames rather
/// a single frame. /// a single frame.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct VmFrameVec(Vec<VmFrame>); pub struct VmFrameVec(pub(crate) Vec<VmFrame>);
impl VmFrameVec { impl VmFrameVec {
/// Allocate a collection of free frames according to the given options.
///
/// All returned frames are safe to use in the sense that they are
/// not _typed memory_. We define typed memory as the memory that
/// may store Rust objects or affect Rust memory safety, e.g.,
/// the code and data segments of the OS kernel, the stack and heap
/// allocated for the OS kernel.
///
/// For more information, see `VmAllocOptions`.
pub fn allocate(options: &VmAllocOptions) -> Result<Self> {
let page_size = options.page_size;
let mut flags = VmFrameFlags::empty();
if options.can_dma {
flags.insert(VmFrameFlags::CAN_DMA);
}
let mut frames = if options.is_contiguous {
frame_allocator::alloc_continuous(options.page_size, flags).ok_or(Error::NoMemory)?
} else {
let mut frame_list = Vec::new();
for _ in 0..page_size {
frame_list.push(frame_allocator::alloc(flags).ok_or(Error::NoMemory)?);
}
frame_list
};
if options.can_dma {
for frame in frames.iter_mut() {
// Safety: The address is controlled by frame allocator.
unsafe {
if let Err(err) = iommu::map(frame.start_paddr(), frame) {
match err {
// do nothing
iommu::IommuError::NoIommu => {}
iommu::IommuError::ModificationError(err) => {
panic!("iommu map error:{:?}", err)
}
}
}
}
}
}
let frame_vec = Self(frames);
if !options.uninit {
frame_vec.zero();
}
Ok(frame_vec)
}
pub fn get(&self, index: usize) -> Option<&VmFrame> { pub fn get(&self, index: usize) -> Option<&VmFrame> {
self.0.get(index) self.0.get(index)
} }
@ -216,58 +169,6 @@ impl<'a> Iterator for VmFrameVecIter<'a> {
} }
} }
/// Options for allocating physical memory pages (or frames).
/// See `VmFrameVec::alloc`.
pub struct VmAllocOptions {
page_size: usize,
is_contiguous: bool,
uninit: bool,
can_dma: bool,
}
impl VmAllocOptions {
/// Creates new options for allocating the specified number of frames.
pub fn new(len: usize) -> Self {
Self {
page_size: len,
is_contiguous: false,
uninit: false,
can_dma: false,
}
}
/// Sets whether the allocated frames should be contiguous.
///
/// If the physical address is set, then the frames must be contiguous.
///
/// The default value is `false`.
pub fn is_contiguous(&mut self, is_contiguous: bool) -> &mut Self {
self.is_contiguous = is_contiguous;
self
}
/// Sets whether the allocated frames should be uninitialized.
///
/// If `uninit` is set as `false`, the frame will be zeroed once allocated.
/// If `uninit` is set as `true`, the frame will **NOT** be zeroed and should *NOT* be read before writing.
///
/// The default value is false.
pub fn uninit(&mut self, uninit: bool) -> &mut Self {
self.uninit = uninit;
self
}
/// Sets whether the pages can be accessed by devices through
/// Direct Memory Access (DMA).
///
/// In a TEE environment, DMAable pages are untrusted pages shared with
/// the VMM.
pub fn can_dma(&mut self, can_dma: bool) -> &mut Self {
self.can_dma = can_dma;
self
}
}
bitflags::bitflags! { bitflags::bitflags! {
pub(crate) struct VmFrameFlags : usize{ pub(crate) struct VmFrameFlags : usize{
const NEED_DEALLOC = 1 << 63; const NEED_DEALLOC = 1 << 63;
@ -326,15 +227,10 @@ impl VmFrame {
(self.frame_index() + 1) * PAGE_SIZE (self.frame_index() + 1) * PAGE_SIZE
} }
/// fill the frame with zero /// Fills the frame with zero.
pub fn zero(&self) { pub fn zero(&self) {
unsafe { // Safety: The range of memory is valid for writes of one page data.
core::ptr::write_bytes( unsafe { core::ptr::write_bytes(self.as_mut_ptr(), 0, PAGE_SIZE) }
super::paddr_to_vaddr(self.start_paddr()) as *mut u8,
0,
PAGE_SIZE,
)
}
} }
/// Returns whether the page frame is accessible by DMA. /// Returns whether the page frame is accessible by DMA.
@ -423,7 +319,208 @@ impl Drop for VmFrame {
} }
// Safety: the frame index is valid. // Safety: the frame index is valid.
unsafe { unsafe {
frame_allocator::dealloc(self.frame_index()); frame_allocator::dealloc_single(self.frame_index());
}
}
}
}
/// A handle to a contiguous range of page frames (physical memory pages).
///
/// The biggest difference between `VmSegment` and `VmFrameVec` is that
/// the page frames must be contiguous for `VmSegment`.
///
/// A cloned `VmSegment` refers to the same page frames as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other.
///
/// #Example
///
/// ```rust
/// let vm_segment = VmAllocOptions::new(2)
/// .is_contiguous(true)
/// .alloc_contiguous()?;
/// vm_segment.write_bytes(0, buf)?;
/// ```
#[derive(Debug, Clone)]
pub struct VmSegment {
inner: Arc<Inner>,
range: Range<usize>,
}
#[derive(Debug)]
struct Inner {
start_frame_index: Paddr,
nframes: usize,
}
impl Inner {
/// Creates the inner part of 'VmSegment'.
///
/// # Safety
///
/// The constructor of 'VmSegment' ensures the safety.
unsafe fn new(paddr: Paddr, nframes: usize, flags: VmFrameFlags) -> Self {
assert_eq!(paddr % PAGE_SIZE, 0);
Self {
start_frame_index: (paddr / PAGE_SIZE).bitor(flags.bits),
nframes,
}
}
fn start_frame_index(&self) -> usize {
self.start_frame_index
.bitand(VmFrameFlags::all().bits().not())
}
fn start_paddr(&self) -> Paddr {
self.start_frame_index() * PAGE_SIZE
}
}
impl HasPaddr for VmSegment {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl VmSegment {
/// Creates a new `VmSegment`.
///
/// # Safety
///
/// The given range of page frames must be contiguous and valid for use.
/// The given range of page frames must not have been allocated before,
/// as part of either a `VmFrame` or `VmSegment`.
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize, flags: VmFrameFlags) -> Self {
Self {
inner: Arc::new(Inner::new(paddr, nframes, flags)),
range: 0..nframes,
}
}
/// Returns a part of the `VmSegment`.
///
/// # Panic
///
/// If `range` is not within the range of this `VmSegment`,
/// then the method panics.
pub fn range(&self, range: Range<usize>) -> Self {
let orig_range = &self.range;
let adj_range = (range.start + orig_range.start)..(range.end + orig_range.start);
assert!(!adj_range.is_empty() && adj_range.end <= orig_range.end);
Self {
inner: self.inner.clone(),
range: adj_range,
}
}
/// Returns the start physical address.
pub fn start_paddr(&self) -> Paddr {
self.start_frame_index() * PAGE_SIZE
}
/// Returns the end physical address.
pub fn end_paddr(&self) -> Paddr {
(self.start_frame_index() + self.nframes()) * PAGE_SIZE
}
/// Returns the number of page frames.
pub fn nframes(&self) -> usize {
self.range.len()
}
/// Returns the number of bytes.
pub fn nbytes(&self) -> usize {
self.nframes() * PAGE_SIZE
}
/// Fills the page frames with zero.
pub fn zero(&self) {
// Safety: The range of memory is valid for writes of `self.nbytes()` data.
unsafe { core::ptr::write_bytes(self.as_mut_ptr(), 0, self.nbytes()) }
}
/// Returns whether the page frames is accessible by DMA.
///
/// In a TEE environment, DMAable pages are untrusted pages shared with
/// the VMM.
pub fn can_dma(&self) -> bool {
(self.inner.start_frame_index & VmFrameFlags::CAN_DMA.bits()) != 0
}
fn need_dealloc(&self) -> bool {
(self.inner.start_frame_index & VmFrameFlags::NEED_DEALLOC.bits()) != 0
}
fn start_frame_index(&self) -> usize {
self.inner.start_frame_index() + self.range.start
}
pub fn as_ptr(&self) -> *const u8 {
super::paddr_to_vaddr(self.start_paddr()) as *const u8
}
pub fn as_mut_ptr(&self) -> *mut u8 {
super::paddr_to_vaddr(self.start_paddr()) as *mut u8
}
}
impl<'a> VmSegment {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a> {
// Safety: the memory of the page frames is contiguous and is valid during `'a`.
unsafe { VmReader::from_raw_parts(self.as_ptr(), self.nbytes()) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a> {
// Safety: the memory of the page frames is contiguous and is valid during `'a`.
unsafe { VmWriter::from_raw_parts_mut(self.as_mut_ptr(), self.nbytes()) }
}
}
impl VmIo for VmSegment {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
if buf.len() + offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self.reader().skip(offset).read(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
if buf.len() + offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self.writer().skip(offset).write(&mut buf.into());
debug_assert!(len == buf.len());
Ok(())
}
}
impl Drop for VmSegment {
fn drop(&mut self) {
if self.need_dealloc() && Arc::strong_count(&self.inner) == 1 {
if self.can_dma() {
if let Err(err) = iommu::unmap(self.inner.start_paddr()) {
match err {
// do nothing
iommu::IommuError::NoIommu => {}
iommu::IommuError::ModificationError(err) => {
panic!("iommu map error:{:?}", err)
}
}
}
}
// Safety: the range of contiguous page frames is valid.
unsafe {
frame_allocator::dealloc_contiguous(
self.inner.start_frame_index(),
self.inner.nframes,
);
} }
} }
} }

View File

@ -7,29 +7,21 @@ use spin::Once;
use crate::boot::memory_region::{MemoryRegion, MemoryRegionType}; use crate::boot::memory_region::{MemoryRegion, MemoryRegionType};
use crate::{config::PAGE_SIZE, sync::SpinLock}; use crate::{config::PAGE_SIZE, sync::SpinLock};
use super::{frame::VmFrameFlags, VmFrame}; use super::{frame::VmFrameFlags, VmFrame, VmFrameVec, VmSegment};
pub(super) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new(); pub(super) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
pub(crate) fn alloc(flags: VmFrameFlags) -> Option<VmFrame> { pub(crate) fn alloc(nframes: usize, flags: VmFrameFlags) -> Option<VmFrameVec> {
FRAME_ALLOCATOR FRAME_ALLOCATOR
.get() .get()
.unwrap() .unwrap()
.lock() .lock()
.alloc(1) .alloc(nframes)
.map(|pa| unsafe { VmFrame::new(pa * PAGE_SIZE, flags.union(VmFrameFlags::NEED_DEALLOC)) })
}
pub(crate) fn alloc_continuous(frame_count: usize, flags: VmFrameFlags) -> Option<Vec<VmFrame>> {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(frame_count)
.map(|start| { .map(|start| {
let mut vector = Vec::new(); let mut vector = Vec::new();
// Safety: The frame index is valid.
unsafe { unsafe {
for i in 0..frame_count { for i in 0..nframes {
let frame = VmFrame::new( let frame = VmFrame::new(
(start + i) * PAGE_SIZE, (start + i) * PAGE_SIZE,
flags.union(VmFrameFlags::NEED_DEALLOC), flags.union(VmFrameFlags::NEED_DEALLOC),
@ -37,26 +29,57 @@ pub(crate) fn alloc_continuous(frame_count: usize, flags: VmFrameFlags) -> Optio
vector.push(frame); vector.push(frame);
} }
} }
vector VmFrameVec(vector)
}) })
} }
pub(crate) fn alloc_zero(flags: VmFrameFlags) -> Option<VmFrame> { pub(crate) fn alloc_single(flags: VmFrameFlags) -> Option<VmFrame> {
let frame = alloc(flags)?; FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx|
frame.zero(); // Safety: The frame index is valid.
Some(frame) unsafe { VmFrame::new(idx * PAGE_SIZE, flags.union(VmFrameFlags::NEED_DEALLOC)) })
} }
/// Dealloc a frame. pub(crate) fn alloc_contiguous(nframes: usize, flags: VmFrameFlags) -> Option<VmSegment> {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(nframes)
.map(|start|
// Safety: The range of page frames is contiguous and valid.
unsafe {
VmSegment::new(
start * PAGE_SIZE,
nframes,
flags.union(VmFrameFlags::NEED_DEALLOC),
)
})
}
/// Deallocate a frame.
/// ///
/// # Safety /// # Safety
/// ///
/// User should ensure the index is valid /// User should ensure the index is valid
/// ///
pub(crate) unsafe fn dealloc(index: usize) { pub(crate) unsafe fn dealloc_single(index: usize) {
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(index, 1); FRAME_ALLOCATOR.get().unwrap().lock().dealloc(index, 1);
} }
/// Deallocate a contiguous range of page frames.
///
/// # Safety
///
/// User should ensure the range of page frames is valid.
///
pub(crate) unsafe fn dealloc_contiguous(start_index: usize, nframes: usize) {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.dealloc(start_index, nframes);
}
pub(crate) fn init(regions: &[MemoryRegion]) { pub(crate) fn init(regions: &[MemoryRegion]) {
let mut allocator = FrameAllocator::<32>::new(); let mut allocator = FrameAllocator::<32>::new();
for region in regions.iter() { for region in regions.iter() {

View File

@ -1,19 +1,14 @@
use super::{ use super::page_table::{PageTable, PageTableConfig};
frame::VmFrameFlags,
page_table::{PageTable, PageTableConfig},
};
use crate::{ use crate::{
arch::mm::{PageTableEntry, PageTableFlags}, arch::mm::{PageTableEntry, PageTableFlags},
config::{PAGE_SIZE, PHYS_OFFSET}, config::{PAGE_SIZE, PHYS_OFFSET},
vm::is_page_aligned, vm::is_page_aligned,
vm::{VmFrame, VmFrameVec, VmReader, VmWriter}, vm::{VmAllocOptions, VmFrame, VmFrameVec, VmReader, VmWriter},
}; };
use crate::{prelude::*, Error}; use crate::{prelude::*, Error};
use alloc::collections::{btree_map::Entry, BTreeMap}; use alloc::collections::{btree_map::Entry, BTreeMap};
use core::fmt; use core::fmt;
use super::frame_allocator;
#[derive(Debug)] #[derive(Debug)]
pub struct MapArea { pub struct MapArea {
pub flags: PageTableFlags, pub flags: PageTableFlags,
@ -32,7 +27,7 @@ impl Clone for MapArea {
fn clone(&self) -> Self { fn clone(&self) -> Self {
let mut mapper = BTreeMap::new(); let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper { for (&va, old) in &self.mapper {
let new = frame_allocator::alloc(VmFrameFlags::empty()).unwrap(); let new = VmAllocOptions::new(1).uninit(true).alloc_single().unwrap();
new.copy_from_frame(old); new.copy_from_frame(old);
mapper.insert(va, new.clone()); mapper.insert(va, new.clone());
} }
@ -97,7 +92,7 @@ impl MapArea {
match self.mapper.entry(va) { match self.mapper.entry(va) {
Entry::Occupied(e) => e.get().start_paddr(), Entry::Occupied(e) => e.get().start_paddr(),
Entry::Vacant(e) => e Entry::Vacant(e) => e
.insert(frame_allocator::alloc_zero(VmFrameFlags::empty()).unwrap()) .insert(VmAllocOptions::new(1).alloc_single().unwrap())
.start_paddr(), .start_paddr(),
} }
} }

View File

@ -12,13 +12,15 @@ pub(crate) mod heap_allocator;
mod io; mod io;
mod memory_set; mod memory_set;
mod offset; mod offset;
mod options;
pub(crate) mod page_table; pub(crate) mod page_table;
mod space; mod space;
use crate::config::{KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET}; use crate::config::{KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET};
pub use self::frame::{VmAllocOptions, VmFrame, VmFrameVec, VmFrameVecIter, VmReader, VmWriter}; pub use self::frame::{VmFrame, VmFrameVec, VmFrameVecIter, VmReader, VmSegment, VmWriter};
pub use self::io::VmIo; pub use self::io::VmIo;
pub use self::options::VmAllocOptions;
pub use self::space::{VmMapOptions, VmPerm, VmSpace}; pub use self::space::{VmMapOptions, VmPerm, VmSpace};
pub use self::{ pub use self::{

View File

@ -0,0 +1,159 @@
use crate::{arch::iommu, prelude::*, Error};
use super::{frame::VmFrameFlags, frame_allocator, VmFrame, VmFrameVec, VmSegment};
/// Options for allocating physical memory pages (or frames).
///
/// All allocated frames are safe to use in the sense that they are
/// not _typed memory_. We define typed memory as the memory that
/// may store Rust objects or affect Rust memory safety, e.g.,
/// the code and data segments of the OS kernel, the stack and heap
/// allocated for the OS kernel.
pub struct VmAllocOptions {
nframes: usize,
is_contiguous: bool,
uninit: bool,
can_dma: bool,
}
impl VmAllocOptions {
/// Creates new options for allocating the specified number of frames.
pub fn new(nframes: usize) -> Self {
Self {
nframes,
is_contiguous: false,
uninit: false,
can_dma: false,
}
}
/// Sets whether the allocated frames should be contiguous.
///
/// The default value is `false`.
pub fn is_contiguous(&mut self, is_contiguous: bool) -> &mut Self {
self.is_contiguous = is_contiguous;
self
}
/// Sets whether the allocated frames should be uninitialized.
///
/// If `uninit` is set as `false`, the frame will be zeroed once allocated.
/// If `uninit` is set as `true`, the frame will **NOT** be zeroed and should *NOT* be read before writing.
///
/// The default value is false.
pub fn uninit(&mut self, uninit: bool) -> &mut Self {
self.uninit = uninit;
self
}
/// Sets whether the pages can be accessed by devices through
/// Direct Memory Access (DMA).
///
/// In a TEE environment, DMAable pages are untrusted pages shared with
/// the VMM.
pub fn can_dma(&mut self, can_dma: bool) -> &mut Self {
self.can_dma = can_dma;
self
}
/// Allocate a collection of page frames according to the given options.
pub fn alloc(&self) -> Result<VmFrameVec> {
let flags = self.flags();
let frames = if self.is_contiguous {
frame_allocator::alloc(self.nframes, flags).ok_or(Error::NoMemory)?
} else {
let mut frame_list = Vec::new();
for _ in 0..self.nframes {
frame_list.push(frame_allocator::alloc_single(flags).ok_or(Error::NoMemory)?);
}
VmFrameVec(frame_list)
};
if self.can_dma {
for frame in frames.0.iter() {
// Safety: the frame is controlled by frame allocator
unsafe { map_frame(frame) };
}
}
if !self.uninit {
frames.zero();
}
Ok(frames)
}
/// Allocate a single page frame according to the given options.
pub fn alloc_single(&self) -> Result<VmFrame> {
if self.nframes != 1 {
return Err(Error::InvalidArgs);
}
let frame = frame_allocator::alloc_single(self.flags()).ok_or(Error::NoMemory)?;
if self.can_dma {
// Safety: the frame is controlled by frame allocator
unsafe { map_frame(&frame) };
}
if !self.uninit {
frame.zero();
}
Ok(frame)
}
/// Allocate a contiguous range of page frames according to the given options.
///
/// The returned `VmSegment` contains at least one page frame.
pub fn alloc_contiguous(&self) -> Result<VmSegment> {
if !self.is_contiguous || self.nframes == 0 {
return Err(Error::InvalidArgs);
}
let segment =
frame_allocator::alloc_contiguous(self.nframes, self.flags()).ok_or(Error::NoMemory)?;
if self.can_dma {
// Safety: the segment is controlled by frame allocator
unsafe { map_segment(&segment) };
}
if !self.uninit {
segment.zero();
}
Ok(segment)
}
fn flags(&self) -> VmFrameFlags {
let mut flags = VmFrameFlags::empty();
if self.can_dma {
flags.insert(VmFrameFlags::CAN_DMA);
}
flags
}
}
/// Iommu map for the `VmFrame`.
///
/// # Safety
///
/// The address should be controlled by frame allocator.
unsafe fn map_frame(frame: &VmFrame) {
let Err(err) = iommu::map(frame.start_paddr(), frame) else {
return;
};
match err {
// do nothing
iommu::IommuError::NoIommu => {}
iommu::IommuError::ModificationError(err) => {
panic!("iommu map error:{:?}", err)
}
}
}
/// Iommu map for the `VmSegment`.
///
/// # Safety
///
/// The address should be controlled by frame allocator.
unsafe fn map_segment(segment: &VmSegment) {
// TODO: Support to map a VmSegment.
panic!("VmSegment do not support DMA");
}

View File

@ -1,7 +1,4 @@
use super::{ use super::{paddr_to_vaddr, Paddr, Vaddr, VmAllocOptions};
frame::VmFrameFlags,
frame_allocator, paddr_to_vaddr, VmAllocOptions, VmFrameVec, {Paddr, Vaddr},
};
use crate::{ use crate::{
arch::mm::{is_kernel_vaddr, is_user_vaddr, tlb_flush, PageTableEntry}, arch::mm::{is_kernel_vaddr, is_user_vaddr, tlb_flush, PageTableEntry},
config::{ENTRY_COUNT, PAGE_SIZE}, config::{ENTRY_COUNT, PAGE_SIZE},
@ -127,7 +124,7 @@ pub struct PageTable<T: PageTableEntryTrait, M = UserMode> {
impl<T: PageTableEntryTrait> PageTable<T, UserMode> { impl<T: PageTableEntryTrait> PageTable<T, UserMode> {
pub fn new(config: PageTableConfig) -> Self { pub fn new(config: PageTableConfig) -> Self {
let root_frame = frame_allocator::alloc_zero(VmFrameFlags::empty()).unwrap(); let root_frame = VmAllocOptions::new(1).alloc_single().unwrap();
Self { Self {
root_paddr: root_frame.start_paddr(), root_paddr: root_frame.start_paddr(),
tables: vec![root_frame], tables: vec![root_frame],
@ -279,10 +276,7 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
return None; return None;
} }
// Create next table // Create next table
let frame = VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false)) let frame = VmAllocOptions::new(1).alloc_single().unwrap();
.unwrap()
.pop()
.unwrap();
// Default flags: read, write, user, present // Default flags: read, write, user, present
let flags = T::F::new() let flags = T::F::new()
.set_present(true) .set_present(true)

View File

@ -11,7 +11,7 @@ use core::{
use jinux_frame::{ use jinux_frame::{
io_mem::IoMem, io_mem::IoMem,
offset_of, offset_of,
vm::{HasPaddr, VmAllocOptions, VmFrame, VmFrameVec}, vm::{HasPaddr, VmAllocOptions, VmFrame},
}; };
use jinux_rights::{Dup, TRightSet, TRights, Write}; use jinux_rights::{Dup, TRightSet, TRights, Write};
use jinux_util::{field_ptr, safe_ptr::SafePtr}; use jinux_util::{field_ptr, safe_ptr::SafePtr};
@ -79,12 +79,10 @@ impl VirtQueue {
let desc_size = size_of::<Descriptor>() * size as usize; let desc_size = size_of::<Descriptor>() * size as usize;
let (page1, page2) = { let (page1, page2) = {
let mut continue_pages = VmFrameVec::allocate( let mut continue_pages = VmAllocOptions::new(2)
VmAllocOptions::new(2)
.uninit(false)
.can_dma(true) .can_dma(true)
.is_contiguous(true), .is_contiguous(true)
) .alloc()
.unwrap(); .unwrap();
let page1 = continue_pages.pop().unwrap(); let page1 = continue_pages.pop().unwrap();
let page2 = continue_pages.pop().unwrap(); let page2 = continue_pages.pop().unwrap();
@ -105,24 +103,15 @@ impl VirtQueue {
} }
( (
SafePtr::new( SafePtr::new(
VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false).can_dma(true)) VmAllocOptions::new(1).can_dma(true).alloc_single().unwrap(),
.unwrap()
.pop()
.unwrap(),
0, 0,
), ),
SafePtr::new( SafePtr::new(
VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false).can_dma(true)) VmAllocOptions::new(1).can_dma(true).alloc_single().unwrap(),
.unwrap()
.pop()
.unwrap(),
0, 0,
), ),
SafePtr::new( SafePtr::new(
VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false).can_dma(true)) VmAllocOptions::new(1).can_dma(true).alloc_single().unwrap(),
.unwrap()
.pop()
.unwrap(),
0, 0,
), ),
) )

View File

@ -4,7 +4,7 @@ use crate::vm::vmo::{get_page_idx_range, Pager, Vmo, VmoFlags, VmoOptions};
use jinux_rights::Full; use jinux_rights::Full;
use core::ops::Range; use core::ops::Range;
use jinux_frame::vm::{VmAllocOptions, VmFrame, VmFrameVec}; use jinux_frame::vm::{VmAllocOptions, VmFrame};
use lru::LruCache; use lru::LruCache;
pub struct PageCache { pub struct PageCache {
@ -160,12 +160,7 @@ struct Page {
impl Page { impl Page {
pub fn alloc() -> Result<Self> { pub fn alloc() -> Result<Self> {
let frame = { let frame = VmAllocOptions::new(1).uninit(true).alloc_single()?;
let mut vm_alloc_option = VmAllocOptions::new(1);
vm_alloc_option.uninit(true);
let mut frames = VmFrameVec::allocate(&vm_alloc_option)?;
frames.pop().unwrap()
};
Ok(Self { Ok(Self {
frame, frame,
state: PageState::Uninit, state: PageState::Uninit,
@ -173,11 +168,7 @@ impl Page {
} }
pub fn alloc_zero() -> Result<Self> { pub fn alloc_zero() -> Result<Self> {
let frame = { let frame = VmAllocOptions::new(1).alloc_single()?;
let vm_alloc_option = VmAllocOptions::new(1);
let mut frames = VmFrameVec::allocate(&vm_alloc_option)?;
frames.pop().unwrap()
};
Ok(Self { Ok(Self {
frame, frame,
state: PageState::Dirty, state: PageState::Dirty,

View File

@ -153,10 +153,7 @@ impl VmoInner {
return Ok(()); return Ok(());
} }
let frame = match &self.pager { let frame = match &self.pager {
None => { None => VmAllocOptions::new(1).alloc_single()?,
let vm_alloc_option = VmAllocOptions::new(1);
VmFrameVec::allocate(&vm_alloc_option)?.pop().unwrap()
}
Some(pager) => pager.commit_page(offset)?, Some(pager) => pager.commit_page(offset)?,
}; };
self.insert_frame(page_idx, frame); self.insert_frame(page_idx, frame);
@ -205,8 +202,7 @@ impl VmoInner {
if page_idx >= inherited_frames.len() { if page_idx >= inherited_frames.len() {
if self.is_cow { if self.is_cow {
let options = VmAllocOptions::new(1); return Ok(VmAllocOptions::new(1).alloc_single()?);
return Ok(VmFrameVec::allocate(&options)?.pop().unwrap());
} }
return_errno_with_message!(Errno::EINVAL, "the page is not inherited from parent"); return_errno_with_message!(Errno::EINVAL, "the page is not inherited from parent");
} }
@ -217,10 +213,7 @@ impl VmoInner {
return Ok(inherited_frame); return Ok(inherited_frame);
} }
let frame = { let frame = VmAllocOptions::new(1).alloc_single()?;
let options = VmAllocOptions::new(1);
VmFrameVec::allocate(&options)?.pop().unwrap()
};
frame.copy_from_frame(&inherited_frame); frame.copy_from_frame(&inherited_frame);
Ok(frame) Ok(frame)
} }

View File

@ -4,7 +4,7 @@ use core::marker::PhantomData;
use core::ops::Range; use core::ops::Range;
use align_ext::AlignExt; use align_ext::AlignExt;
use jinux_frame::vm::{VmAllocOptions, VmFrame, VmFrameVec}; use jinux_frame::vm::{VmAllocOptions, VmFrame};
use jinux_rights_proc::require; use jinux_rights_proc::require;
use typeflags_util::{SetExtend, SetExtendOp}; use typeflags_util::{SetExtend, SetExtendOp};
@ -143,9 +143,9 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<BTreeMa
if flags.contains(VmoFlags::CONTIGUOUS) { if flags.contains(VmoFlags::CONTIGUOUS) {
// if the vmo is continuous, we need to allocate frames for the vmo // if the vmo is continuous, we need to allocate frames for the vmo
let frames_num = size / PAGE_SIZE; let frames_num = size / PAGE_SIZE;
let mut vm_alloc_option = VmAllocOptions::new(frames_num); let frames = VmAllocOptions::new(frames_num)
vm_alloc_option.is_contiguous(true); .is_contiguous(true)
let frames = VmFrameVec::allocate(&vm_alloc_option)?; .alloc()?;
let mut committed_pages = BTreeMap::new(); let mut committed_pages = BTreeMap::new();
for (idx, frame) in frames.into_iter().enumerate() { for (idx, frame) in frames.into_iter().enumerate() {
committed_pages.insert(idx * PAGE_SIZE, frame); committed_pages.insert(idx * PAGE_SIZE, frame);