implement vmo apis

This commit is contained in:
Jianfeng Jiang
2022-12-01 14:20:15 +08:00
parent 244a83e463
commit 31d644a0f3
10 changed files with 561 additions and 42 deletions

View File

@ -4,6 +4,7 @@ use crate::{config::PAGE_SIZE, mm::address::PhysAddr, prelude::*, Error};
use pod::Pod;
use super::VmIo;
use alloc::vec;
use crate::mm::PhysFrame;
@ -31,7 +32,7 @@ impl VmFrameVec {
let mut frame_list = Vec::new();
for i in 0..page_size {
let vm_frame = if let Some(paddr) = options.paddr {
VmFrame::alloc_with_paddr(paddr)
VmFrame::alloc_with_paddr(paddr + i * PAGE_SIZE)
} else {
VmFrame::alloc()
};
@ -88,6 +89,11 @@ impl VmFrameVec {
self.0.iter()
}
/// Return IntoIterator for internal frames
pub fn into_iter(self) -> alloc::vec::IntoIter<VmFrame> {
self.0.into_iter()
}
/// Returns the number of frames.
pub fn len(&self) -> usize {
self.0.len()
@ -104,6 +110,10 @@ impl VmFrameVec {
pub fn nbytes(&self) -> usize {
self.0.len() * PAGE_SIZE
}
pub fn from_one_frame(frame: VmFrame) -> Self {
Self(vec![frame])
}
}
impl VmIo for VmFrameVec {
@ -288,6 +298,11 @@ impl VmFrame {
self.physical_frame.start_pa().0
}
/// fill the frame with zero
pub fn zero(&self) {
unsafe { core::ptr::write_bytes(self.start_pa().kvaddr().as_ptr(), 0, PAGE_SIZE) }
}
pub fn start_pa(&self) -> PhysAddr {
self.physical_frame.start_pa()
}

View File

@ -1,14 +1,37 @@
use jinux_frame::cpu::CpuContext;
use jinux_frame::{
cpu::{CpuContext, TrapInformation},
trap::PAGE_FAULT,
};
use crate::{prelude::*, process::signal::signals::fault::FaultSignal};
/// We can't handle most exceptions, just send self a fault signal before return to user space.
pub fn handle_exception(context: &mut CpuContext) {
let trap_info = context.trap_information.clone();
let current = current!();
let pid = current.pid();
debug!("trap info = {:x?}", trap_info);
debug!("cpu context = {:x?}", context);
let signal = Box::new(FaultSignal::new(&trap_info));
match trap_info.id {
PAGE_FAULT => handle_page_fault(&trap_info),
_ => {
// We current do nothing about other exceptions
generate_fault_signal(&trap_info);
}
}
}
fn handle_page_fault(trap_info: &TrapInformation) {
const PAGE_NOT_PRESENT_ERROR_MASK: u64 = 0x1 << 0;
if trap_info.err & PAGE_NOT_PRESENT_ERROR_MASK == 0 {
// If page is not present, we should ask the vmar try to commit this page
todo!()
} else {
// Otherwise, the page fault is caused by page protection error.
generate_fault_signal(trap_info)
}
}
/// generate a fault signal for current process.
fn generate_fault_signal(trap_info: &TrapInformation) {
let current = current!();
let signal = Box::new(FaultSignal::new(trap_info));
current.sig_queues().lock().enqueue(signal);
}

View File

@ -54,6 +54,12 @@ pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
// TODO: how page faults can be delivered to and handled by the current VMAR.
impl Vmar {
pub(super) fn vm_space(&self) -> Arc<VmSpace> {
self.0.vm_space.clone()
}
}
struct Vmar_ {
/// vmar inner
inner: Mutex<VmarInner>,

View File

@ -5,7 +5,7 @@ use jinux_frame::{vm::VmIo, Error, Result};
use jinux_rights_proc::require;
use crate::{
rights::{Dup, Read, Rights, TRights},
rights::{Dup, Rights, TRights},
vm::vmo::Vmo,
};

View File

@ -125,18 +125,19 @@ impl Vmo<Rights> {
/// The method requires the Dup right.
pub fn dup(&self) -> Result<Self> {
self.check_rights(Rights::DUP)?;
todo!()
Ok(Self(self.0.clone(), self.1.clone()))
}
/// Restricts the access rights given the mask.
pub fn restrict(mut self, mask: Rights) -> Self {
todo!()
self.1 |= mask;
self
}
/// Converts to a static capability.
pub fn to_static<R1: TRights>(self) -> Result<Vmo<R1>> {
self.check_rights(Rights::from_bits(R1::BITS).ok_or(Error::InvalidArgs)?)?;
todo!()
Ok(Vmo(self.0, R1::new()))
}
/// Returns the access rights.

View File

@ -3,9 +3,18 @@
use core::ops::Range;
use crate::rights::Rights;
use alloc::sync::Arc;
use alloc::{
collections::{BTreeMap, BTreeSet},
sync::Arc,
sync::Weak,
};
use bitflags::bitflags;
use jinux_frame::{prelude::Result, vm::Paddr, Error};
use jinux_frame::{
config::PAGE_SIZE,
prelude::Result,
vm::{Paddr, Vaddr, VmAllocOptions, VmFrame, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace},
Error,
};
mod dyn_cap;
mod options;
@ -16,6 +25,8 @@ pub use options::{VmoChildOptions, VmoOptions};
pub use pager::Pager;
use spin::Mutex;
use super::vmar::Vmar;
/// Virtual Memory Objects (VMOs) are a type of capability that represents a
/// range of memory pages.
///
@ -90,39 +101,277 @@ bitflags! {
}
}
pub enum VmoType {
/// This vmo_ is created as a copy on write child
CopyOnWriteChild,
/// This vmo_ is created as a slice child
SliceChild,
/// This vmo_ is not created as a child of a parent vmo
NotChild,
}
struct Vmo_ {
/// Flags
flags: VmoFlags,
/// VmoInner
inner: Mutex<VmoInner>,
parent: Option<Arc<Vmo_>>,
/// Parent Vmo
parent: Weak<Vmo_>,
/// paddr
paddr: Option<Paddr>,
/// vmo type
vmo_type: VmoType,
}
struct VmoInner {
//...
/// The backup pager
pager: Option<Arc<dyn Pager>>,
/// size, in bytes
size: usize,
/// The mapped to vmar if mapped
mapped_to_vmar: Weak<Vmar>,
/// The base addr in vmspace if self is mapped. Otherwise this field is useless
mapped_to_addr: Vaddr,
/// The pages already mapped. The key is the page index.
mapped_pages: BTreeSet<usize>,
/// The perm of each page. This map is filled when first time map vmo to vmar
page_perms: BTreeMap<usize, VmPerm>,
/// The pages committed but not mapped to Vmar. The key is the page index, the value is the backup frame.
unmapped_pages: BTreeMap<usize, VmFrameVec>,
/// The pages from the parent that current vmo can access. The pages can only be inserted when create childs vmo.
/// The key is the page index in current vmo, and the value is the page index in parent vmo.
inherited_pages: BTreeMap<usize, usize>,
// Pages should be filled with zeros when committed. When create COW child, the pages exceed the range of parent vmo
// should be in this set. According to the on demand requirement, when read or write these pages for the first time,
// we should commit these pages and zeroed these pages.
// pages_should_fill_zeros: BTreeSet<usize>,
}
impl Vmo_ {
pub fn commit_page(&self, offset: usize) -> Result<()> {
todo!()
// assert!(offset % PAGE_SIZE == 0);
let page_idx = offset / PAGE_SIZE;
let is_mapped = self.is_mapped();
let mut inner = self.inner.lock();
if is_mapped {
if inner.mapped_pages.contains(&page_idx) {
return Ok(());
}
}
if !inner.unmapped_pages.contains_key(&offset) {
let frames = match &inner.pager {
None => {
let vm_alloc_option = VmAllocOptions::new(1);
let frames = VmFrameVec::allocate(&vm_alloc_option)?;
frames.iter().for_each(|frame| frame.zero());
frames
}
Some(pager) => {
let frame = pager.commit_page(offset)?;
VmFrameVec::from_one_frame(frame)
}
};
if is_mapped {
// We hold the lock inside inner, so we cannot call vm_space function here
let vm_space = inner.mapped_to_vmar.upgrade().unwrap().vm_space();
let mapped_to_addr = inner.mapped_to_addr + page_idx * PAGE_SIZE;
let mut vm_map_options = VmMapOptions::new();
let vm_perm = inner.page_perms.get(&page_idx).unwrap().clone();
vm_map_options.perm(vm_perm).addr(Some(mapped_to_addr));
vm_space.map(frames, &vm_map_options)?;
} else {
inner.unmapped_pages.insert(page_idx, frames);
}
}
Ok(())
}
pub fn decommit_page(&self, offset: usize) -> Result<()> {
todo!()
// assert!(offset % PAGE_SIZE == 0);
let page_idx = offset / PAGE_SIZE;
let mut inner = self.inner.lock();
if inner.mapped_pages.contains(&page_idx) {
// We hold the lock inside inner, so we cannot call vm_space function here
let vm_space = inner.mapped_to_vmar.upgrade().unwrap().vm_space();
let mapped_addr = inner.mapped_to_addr + page_idx * PAGE_SIZE;
vm_space.unmap(&(mapped_addr..mapped_addr + PAGE_SIZE))?;
inner.mapped_pages.remove(&page_idx);
if let Some(pager) = &inner.pager {
pager.decommit_page(offset)?;
}
} else if inner.unmapped_pages.contains_key(&page_idx) {
inner.unmapped_pages.remove(&page_idx);
if let Some(pager) = &inner.pager {
pager.decommit_page(offset)?;
}
}
Ok(())
}
pub fn commit(&self, range: Range<usize>) -> Result<()> {
todo!()
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
let start_page_idx = range.start / PAGE_SIZE;
let end_page_idx = range.end / PAGE_SIZE;
for page_idx in start_page_idx..end_page_idx {
let offset = page_idx * PAGE_SIZE;
self.commit_page(offset)?;
}
Ok(())
}
pub fn decommit(&self, range: Range<usize>) -> Result<()> {
todo!()
// assert!(range.start % PAGE_SIZE == 0);
// assert!(range.end % PAGE_SIZE == 0);
let start_page_idx = range.start / PAGE_SIZE;
let end_page_idx = range.end / PAGE_SIZE;
for page_idx in start_page_idx..end_page_idx {
let offset = page_idx * PAGE_SIZE;
self.decommit_page(offset)?;
}
Ok(())
}
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!()
let read_len = buf.len();
debug_assert!(offset + read_len <= self.size());
if offset + read_len > self.size() {
return Err(Error::InvalidArgs);
}
let first_page_idx = offset / PAGE_SIZE;
let last_page_idx = (offset + read_len - 1) / PAGE_SIZE;
let mut buf_read_offset = 0;
// read one page at a time
for page_idx in first_page_idx..=last_page_idx {
let page_offset = if page_idx == first_page_idx {
offset - first_page_idx * PAGE_SIZE
} else {
0
};
let page_remain_len = PAGE_SIZE - page_offset;
let buf_remain_len = read_len - buf_read_offset;
let read_len_in_page = page_remain_len.min(buf_remain_len);
if read_len_in_page == 0 {
break;
}
let read_buf = &mut buf[buf_read_offset..(buf_read_offset + read_len_in_page)];
buf_read_offset += read_len_in_page;
self.read_bytes_in_page(page_idx, page_offset, read_buf)?;
}
Ok(())
}
/// read bytes to buf. The read content are ensured on same page. if the page is not committed or mapped,
/// this func will commit or map this page
fn read_bytes_in_page(&self, page_idx: usize, offset: usize, buf: &mut [u8]) -> Result<()> {
// First read from pages in parent
if let Some(parent_page_idx) = self.inner.lock().inherited_pages.get(&page_idx) {
let parent_vmo = self.parent.upgrade().unwrap();
let parent_read_offset = *parent_page_idx * PAGE_SIZE + offset;
return parent_vmo.read_bytes(parent_read_offset, buf);
}
self.ensure_page_exists(page_idx)?;
if self.is_mapped() {
let page_map_addr = page_idx * PAGE_SIZE + self.mapped_to_addr();
let vm_space = self.vm_space();
vm_space.read_bytes(page_map_addr, buf)?;
} else {
let inner = self.inner.lock();
let page_frame = inner.unmapped_pages.get(&page_idx).unwrap();
page_frame.read_bytes(offset, buf)?;
}
Ok(())
}
/// commit (and map) page if page not exist
fn ensure_page_exists(&self, page_idx: usize) -> Result<()> {
self.commit_page(page_idx * PAGE_SIZE)?;
let is_mapped = self.is_mapped();
let inner = self.inner.lock();
if is_mapped {
debug_assert!(inner.mapped_pages.contains(&page_idx));
} else {
debug_assert!(inner.unmapped_pages.contains_key(&page_idx));
}
Ok(())
}
pub fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
let write_len = buf.len();
debug_assert!(offset + write_len <= self.size());
if offset + write_len > self.size() {
return Err(Error::InvalidArgs);
}
let first_page_idx = offset / PAGE_SIZE;
let last_page_idx = (offset + write_len - 1) / PAGE_SIZE;
let mut buf_write_offset = 0;
for page_idx in first_page_idx..=last_page_idx {
let page_offset = if page_idx == first_page_idx {
offset - first_page_idx * PAGE_SIZE
} else {
0
};
let page_remain_len = PAGE_SIZE - page_offset;
let buf_remain_len = write_len - buf_write_offset;
let write_len_in_page = page_remain_len.min(buf_remain_len);
if write_len_in_page == 0 {
break;
}
let write_buf = &buf[buf_write_offset..(buf_write_offset + write_len_in_page)];
buf_write_offset += write_len_in_page;
self.write_bytes_in_page(page_idx, page_offset, write_buf)?;
}
Ok(())
}
fn write_bytes_in_page(&self, page_idx: usize, offset: usize, buf: &[u8]) -> Result<()> {
// First check if pages in parent
if let Some(parent_page_idx) = self.inner.lock().inherited_pages.get(&page_idx) {
match self.vmo_type {
VmoType::NotChild | VmoType::SliceChild => {
let parent_vmo = self.parent.upgrade().unwrap();
let parent_read_offset = *parent_page_idx * PAGE_SIZE + offset;
return parent_vmo.write_bytes(parent_read_offset, buf);
}
VmoType::CopyOnWriteChild => {
// Commit a new page for write
self.commit_page(page_idx * offset)?;
let is_mapped = self.is_mapped();
let inner = self.inner.lock();
// Copy the content of parent page
let mut buffer = [0u8; PAGE_SIZE];
let parent_page_idx = inner.inherited_pages.get(&page_idx).unwrap().clone();
self.parent
.upgrade()
.unwrap()
.read_bytes(parent_page_idx * PAGE_SIZE, &mut buffer)?;
if is_mapped {
let mapped_to_addr = inner.mapped_to_addr + page_idx * PAGE_SIZE;
let vm_space = inner.mapped_to_vmar.upgrade().unwrap();
vm_space.write_bytes(mapped_to_addr, &buffer)?;
} else {
let frame = inner.unmapped_pages.get(&page_idx).unwrap();
frame.write_bytes(0, &buffer)?;
}
}
}
}
self.ensure_page_exists(page_idx)?;
if self.is_mapped() {
let page_map_addr = page_idx * PAGE_SIZE + self.mapped_to_addr();
let vm_space = self.vm_space();
vm_space.write_bytes(page_map_addr, buf)?;
} else {
let inner = self.inner.lock();
let page_frame = inner.unmapped_pages.get(&page_idx).unwrap();
page_frame.write_bytes(offset, buf)?;
}
Ok(())
}
pub fn clear(&self, range: Range<usize>) -> Result<()> {
@ -130,7 +379,7 @@ impl Vmo_ {
}
pub fn size(&self) -> usize {
todo!()
self.inner.lock().size
}
pub fn resize(&self, new_size: usize) -> Result<()> {
@ -138,11 +387,29 @@ impl Vmo_ {
}
pub fn paddr(&self) -> Option<Paddr> {
todo!()
self.paddr
}
pub fn flags(&self) -> VmoFlags {
todo!()
self.flags.clone()
}
fn is_mapped(&self) -> bool {
if self.inner.lock().mapped_to_vmar.strong_count() == 0 {
true
} else {
false
}
}
/// The mapped to vmspace. This function can only be called after self is mapped.
fn vm_space(&self) -> Arc<VmSpace> {
let mapped_to_vmar = self.inner.lock().mapped_to_vmar.upgrade().unwrap();
mapped_to_vmar.vm_space()
}
fn mapped_to_addr(&self) -> Vaddr {
self.inner.lock().mapped_to_addr
}
}

View File

@ -3,12 +3,20 @@
use core::marker::PhantomData;
use core::ops::Range;
use crate::prelude::*;
use jinux_frame::vm::Paddr;
use alloc::collections::BTreeMap;
use alloc::collections::BTreeSet;
use alloc::sync::Arc;
use alloc::sync::Weak;
use jinux_frame::config::PAGE_SIZE;
use jinux_frame::vm::{Paddr, VmAllocOptions, VmFrame, VmFrameVec};
use jinux_frame::{Error, Result};
use jinux_rights_proc::require;
use spin::Mutex;
use typeflags_util::{SetExtend, SetExtendOp};
use crate::rights::{Dup, Rights, TRights, Write};
use crate::vm::vmo::VmoType;
use crate::vm::vmo::{VmoInner, Vmo_};
use super::{Pager, Vmo, VmoFlags};
@ -104,8 +112,16 @@ impl VmoOptions<Rights> {
/// # Access rights
///
/// The VMO is initially assigned full access rights.
pub fn alloc(mut self) -> Result<Vmo<Rights>> {
todo!()
pub fn alloc(self) -> Result<Vmo<Rights>> {
let VmoOptions {
size,
paddr,
flags,
pager,
..
} = self;
let vmo_ = alloc_vmo_(size, paddr, flags, pager)?;
Ok(Vmo(Arc::new(vmo_), Rights::all()))
}
}
@ -116,8 +132,71 @@ impl<R: TRights> VmoOptions<R> {
///
/// The VMO is initially assigned the access rights represented
/// by `R: TRights`.
pub fn alloc(mut self) -> Result<Vmo<R>> {
todo!()
pub fn alloc(self) -> Result<Vmo<R>> {
let VmoOptions {
size,
paddr,
flags,
rights,
pager,
} = self;
let vmo_ = alloc_vmo_(size, paddr, flags, pager)?;
Ok(Vmo(Arc::new(vmo_), R::new()))
}
}
fn alloc_vmo_(
size: usize,
paddr: Option<Paddr>,
flags: VmoFlags,
pager: Option<Arc<dyn Pager>>,
) -> Result<Vmo_> {
debug_assert!(size % PAGE_SIZE == 0);
if size % PAGE_SIZE != 0 {
return Err(Error::InvalidArgs);
}
let committed_pages = committed_pages_if_continuous(flags, size, paddr)?;
// FIXME: can the pager be None when allocate vmo?
let vmo_inner = VmoInner {
pager,
size,
mapped_to_vmar: Weak::new(),
mapped_to_addr: 0,
mapped_pages: BTreeSet::new(),
page_perms: BTreeMap::new(),
unmapped_pages: committed_pages,
inherited_pages: BTreeMap::new(),
// pages_should_fill_zeros: BTreeSet::new(),
};
Ok(Vmo_ {
flags,
inner: Mutex::new(vmo_inner),
parent: Weak::new(),
paddr,
vmo_type: VmoType::NotChild,
})
}
fn committed_pages_if_continuous(
flags: VmoFlags,
size: usize,
paddr: Option<Paddr>,
) -> Result<BTreeMap<usize, VmFrameVec>> {
if flags.contains(VmoFlags::CONTIGUOUS) {
// if the vmo is continuous, we need to allocate frames for the vmo
let frames_num = size / PAGE_SIZE;
let mut vm_alloc_option = VmAllocOptions::new(frames_num);
vm_alloc_option.is_contiguous(true);
vm_alloc_option.paddr(paddr);
let frames = VmFrameVec::allocate(&vm_alloc_option)?;
let mut committed_pages = BTreeMap::new();
for (idx, frame) in frames.into_iter().enumerate() {
committed_pages.insert(idx * PAGE_SIZE, VmFrameVec::from_one_frame(frame));
}
Ok(committed_pages)
} else {
// otherwise, we wait for the page is read or write
Ok(BTreeMap::new())
}
}
@ -294,14 +373,41 @@ impl<R, C> VmoChildOptions<R, C> {
}
}
impl<C> VmoChildOptions<Rights, C> {
impl VmoChildOptions<Rights, VmoSliceChild> {
/// Allocates the child VMO.
///
/// # Access rights
///
/// The child VMO is initially assigned all the parent's access rights.
pub fn alloc(mut self) -> Result<Vmo<Rights>> {
todo!()
pub fn alloc(self) -> Result<Vmo<Rights>> {
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, parent_rights) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Slice)?;
Ok(Vmo(Arc::new(child_vmo_), parent_rights))
}
}
impl VmoChildOptions<Rights, VmoCowChild> {
/// Allocates the child VMO.
///
/// # Access rights
///
/// The child VMO is initially assigned all the parent's access rights.
pub fn alloc(self) -> Result<Vmo<Rights>> {
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, parent_rights) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Cow)?;
Ok(Vmo(Arc::new(child_vmo_), parent_rights))
}
}
@ -311,8 +417,16 @@ impl<R: TRights> VmoChildOptions<R, VmoSliceChild> {
/// # Access rights
///
/// The child VMO is initially assigned all the parent's access rights.
pub fn alloc(mut self) -> Result<Vmo<R>> {
todo!()
pub fn alloc(self) -> Result<Vmo<R>> {
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, parent_rights) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Slice)?;
Ok(Vmo(Arc::new(child_vmo_), parent_rights))
}
}
@ -323,14 +437,106 @@ impl<R: TRights> VmoChildOptions<R, VmoCowChild> {
///
/// The child VMO is initially assigned all the parent's access rights
/// plus the Write right.
pub fn alloc(mut self) -> Result<Vmo<SetExtendOp<R, Write>>>
pub fn alloc(self) -> Result<Vmo<SetExtendOp<R, Write>>>
where
R: SetExtend<Write>,
SetExtendOp<R, Write>: TRights,
{
todo!()
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, _) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Cow)?;
let right = SetExtendOp::<R, Write>::new();
Ok(Vmo(Arc::new(child_vmo_), right))
}
}
#[derive(Debug, Clone, Copy)]
enum ChildType {
Cow,
Slice,
}
fn alloc_child_vmo_(
parent_vmo_: Arc<Vmo_>,
range: Range<usize>,
child_flags: VmoFlags,
child_type: ChildType,
) -> Result<Vmo_> {
let child_vmo_start = range.start;
let child_vmo_end = range.end;
debug_assert!(child_vmo_start % PAGE_SIZE == 0);
debug_assert!(child_vmo_end % PAGE_SIZE == 0);
if child_vmo_start % PAGE_SIZE != 0 || child_vmo_end % PAGE_SIZE != 0 {
return Err(Error::InvalidArgs);
}
let parent_vmo_inner = parent_vmo_.inner.lock();
match child_type {
ChildType::Slice => {
// A slice child should be inside parent vmo's range
debug_assert!(child_vmo_end <= parent_vmo_inner.size);
if child_vmo_end > parent_vmo_inner.size {
return Err(Error::InvalidArgs);
}
}
ChildType::Cow => {
// A copy on Write child should intersect with parent vmo
debug_assert!(range.start < parent_vmo_inner.size);
if range.start >= parent_vmo_inner.size {
return Err(Error::InvalidArgs);
}
}
}
// FIXME: Should inherit parent VMO's pager and vmar?
let child_pager = parent_vmo_inner.pager.clone();
let child_mapped_to_vmar = parent_vmo_inner.mapped_to_vmar.clone();
// Set pages inherited from parent vmo and pages should fill zeros
let parent_end_page = parent_vmo_inner.size / PAGE_SIZE;
let mut inherited_pages = BTreeMap::new();
// let mut pages_should_fill_zeros = BTreeSet::new();
let child_start_page = child_vmo_start / PAGE_SIZE;
let child_end_page = child_vmo_end / PAGE_SIZE;
for page_idx in child_start_page..child_end_page {
if page_idx <= parent_end_page {
// If the page is in range of parent VMO
inherited_pages.insert(page_idx, page_idx + child_start_page);
} else {
// If the page is out of range of parent
// pages_should_fill_zeros.insert(page_idx);
break;
}
}
let vmo_inner = VmoInner {
pager: child_pager,
size: child_vmo_end - child_vmo_start,
mapped_to_vmar: child_mapped_to_vmar,
mapped_to_addr: 0,
mapped_pages: BTreeSet::new(),
page_perms: BTreeMap::new(),
unmapped_pages: BTreeMap::new(),
inherited_pages,
// pages_should_fill_zeros,
};
let child_paddr = parent_vmo_
.paddr()
.map(|parent_paddr| parent_paddr + child_vmo_start);
let vmo_type = match child_type {
ChildType::Cow => VmoType::CopyOnWriteChild,
ChildType::Slice => VmoType::SliceChild,
};
Ok(Vmo_ {
flags: child_flags,
inner: Mutex::new(vmo_inner),
parent: Arc::downgrade(&parent_vmo_),
paddr: child_paddr,
vmo_type,
})
}
/// A type to specify the "type" of a child, which is either a slice or a COW.
pub trait VmoChildType {}

View File

@ -10,7 +10,7 @@ use jinux_frame::vm::VmFrame;
/// notify the attached pager that the frame has been updated.
/// Finally, when a frame is no longer needed (i.e., on decommits),
/// the frame pager will also be notified.
pub trait Pager {
pub trait Pager: Send + Sync {
/// Ask the pager to provide a frame at a specified offset (in bytes).
///
/// After a page of a VMO is committed, the VMO shall not call this method

View File

@ -125,18 +125,19 @@ impl<R: TRights> Vmo<R> {
/// The method requires the Dup right.
#[require(R > Dup)]
pub fn dup(&self) -> Result<Self> {
todo!()
Ok(Vmo(self.0.clone(), self.1.clone()))
}
/// Strict the access rights.
#[require(R > R1)]
pub fn restrict<R1>(mut self) -> Vmo<R1> {
todo!()
pub fn restrict<R1: TRights>(self) -> Vmo<R1> {
Vmo(self.0, R1::new())
}
/// Converts to a dynamic capability.
pub fn to_dyn(self) -> Vmo<Rights> {
todo!()
let rights = self.rights();
Vmo(self.0, rights)
}
/// Returns the access rights.

View File

@ -2,7 +2,7 @@ use proc_macro2::{Ident, TokenStream};
use quote::{quote, TokenStreamExt};
use crate::{
flag_set::{self, generate_flag_sets, FlagSet},
flag_set::{generate_flag_sets, FlagSet},
type_flag::TypeFlagDef,
};