implement vmar apis

This commit is contained in:
Jianfeng Jiang 2022-11-30 14:20:06 +08:00
parent 3de0c49020
commit 244a83e463
14 changed files with 505 additions and 72 deletions

View File

@ -1,7 +1,7 @@
#![allow(non_camel_case_types)] #![allow(non_camel_case_types)]
use core::mem; use core::mem;
use jinux_frame::{cpu::GpRegs, offset_of}; use jinux_frame::cpu::GpRegs;
use jinux_util::{read_union_fields, union_read_ptr::UnionReadPtr}; use jinux_util::{read_union_fields, union_read_ptr::UnionReadPtr};
use crate::{prelude::*, process::Pid}; use crate::{prelude::*, process::Pid};

View File

@ -1,8 +1,6 @@
use core::ops::Range;
use alloc::sync::Arc; use alloc::sync::Arc;
use jinux_frame::prelude::Result; use core::ops::Range;
use jinux_frame::{vm::VmIo, Error}; use jinux_frame::{vm::VmIo, Error, Result};
use crate::{rights::Rights, vm::vmo::Vmo}; use crate::{rights::Rights, vm::vmo::Vmo};
@ -13,8 +11,8 @@ use super::{
impl Vmar<Rights> { impl Vmar<Rights> {
/// Creates a root VMAR. /// Creates a root VMAR.
pub fn new() -> Result<Self> { pub fn new_root() -> Result<Self> {
let inner = Arc::new(Vmar_::new()?); let inner = Arc::new(Vmar_::new_root()?);
let rights = Rights::all(); let rights = Rights::all();
let new_self = Self(inner, rights); let new_self = Self(inner, rights);
Ok(new_self) Ok(new_self)
@ -135,7 +133,7 @@ impl Vmar<Rights> {
/// The method requires the Dup right. /// The method requires the Dup right.
pub fn dup(&self) -> Result<Self> { pub fn dup(&self) -> Result<Self> {
self.check_rights(Rights::DUP)?; self.check_rights(Rights::DUP)?;
todo!() Ok(Vmar(self.0.clone(), self.1.clone()))
} }
/// Returns the access rights. /// Returns the access rights.

View File

@ -5,15 +5,24 @@ mod options;
mod static_cap; mod static_cap;
use crate::rights::Rights; use crate::rights::Rights;
use alloc::collections::BTreeMap;
use alloc::sync::Arc; use alloc::sync::Arc;
use alloc::sync::Weak;
use alloc::vec::Vec;
use bitflags::bitflags; use bitflags::bitflags;
use jinux_frame::config::PAGE_SIZE;
// use jinux_frame::vm::VmPerm;
use core::ops::Range; use core::ops::Range;
use jinux_frame::prelude::Result;
use jinux_frame::vm::Vaddr; use jinux_frame::vm::Vaddr;
use jinux_frame::vm::VmIo;
// use jinux_frame::vm::VmPerm;
use jinux_frame::vm::VmSpace; use jinux_frame::vm::VmSpace;
use jinux_frame::Error; use jinux_frame::AlignExt;
use jinux_frame::{Error, Result};
use spin::Mutex; use spin::Mutex;
use super::vmo::Vmo;
/// Virtual Memory Address Regions (VMARs) are a type of capability that manages /// Virtual Memory Address Regions (VMARs) are a type of capability that manages
/// user address spaces. /// user address spaces.
/// ///
@ -46,27 +55,109 @@ pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
// TODO: how page faults can be delivered to and handled by the current VMAR. // TODO: how page faults can be delivered to and handled by the current VMAR.
struct Vmar_ { struct Vmar_ {
inner: Mutex<Inner>, /// vmar inner
// The offset relative to the root VMAR inner: Mutex<VmarInner>,
/// The offset relative to the root VMAR
base: Vaddr, base: Vaddr,
parent: Option<Arc<Vmar_>>, /// The total size of the VMAR in bytes
size: usize,
/// The attached vmspace
vm_space: Arc<VmSpace>,
/// The parent vmar. If points to none, this is a root vmar
parent: Weak<Vmar_>,
} }
struct Inner { /// FIXME: How can a vmar have its child vmar and vmos with its rights?
struct VmarInner {
/// Whether the vmar is destroyed
is_destroyed: bool, is_destroyed: bool,
vm_space: VmSpace, /// The child vmars. The key is offset relative to root VMAR
//... child_vmar_s: BTreeMap<Vaddr, Arc<Vmar_>>,
/// The mapped vmos. The key is offset relative to root VMAR
mapped_vmos: BTreeMap<Vaddr, Arc<Vmo>>,
/// Free ranges that can be used for creating child vmar or mapping vmos
free_regions: BTreeMap<Vaddr, FreeRegion>,
} }
pub const ROOT_VMAR_HIGHEST_ADDR: Vaddr = 0x1000_0000_0000;
impl Vmar_ { impl Vmar_ {
pub fn new() -> Result<Self> { pub fn new_root() -> Result<Self> {
todo!() let mut free_regions = BTreeMap::new();
let root_region = FreeRegion::new(0..ROOT_VMAR_HIGHEST_ADDR);
free_regions.insert(root_region.start(), root_region);
let vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
mapped_vmos: BTreeMap::new(),
free_regions,
};
let vmar_ = Vmar_ {
inner: Mutex::new(vmar_inner),
vm_space: Arc::new(VmSpace::new()),
base: 0,
size: ROOT_VMAR_HIGHEST_ADDR,
parent: Weak::new(),
};
Ok(vmar_)
} }
pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> { pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
todo!() assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
self.check_protected_range(&range)?;
self.do_protect_inner(perms, range)?;
Ok(())
} }
// do real protect. The protected range is ensured to be mapped.
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
for (vmo_base, mapped_vmo) in &self.inner.lock().mapped_vmos {
let vmo_range = *vmo_base..(*vmo_base + mapped_vmo.size());
if is_intersected(&range, &vmo_range) {
let intersected_range = get_intersected_range(&range, &vmo_range);
// TODO: How to protect a mapped vmo?
todo!()
}
}
for (_, child_vmar_) in &self.inner.lock().child_vmar_s {
let child_vmar_range = child_vmar_.range();
if is_intersected(&range, &child_vmar_range) {
let intersected_range = get_intersected_range(&range, &child_vmar_range);
child_vmar_.do_protect_inner(perms, intersected_range)?;
}
}
Ok(())
}
/// ensure the whole protected range is mapped, that is to say, backed up by a VMO.
/// Internally, we check whether the range intersects any free region recursively.
/// If so, the range is not fully mapped.
fn check_protected_range(&self, protected_range: &Range<usize>) -> Result<()> {
// The protected range should be in self's range
assert!(self.base <= protected_range.start);
assert!(protected_range.end <= self.base + self.size);
// The protected range should not interstect with any free region
for (_, free_region) in &self.inner.lock().free_regions {
if is_intersected(&free_region.range, &protected_range) {
return Err(Error::InvalidArgs);
}
}
// if the protected range intersects with child vmar_, child vmar_ is responsible to do the check.
for (_, child_vmar_) in &self.inner.lock().child_vmar_s {
let child_range = child_vmar_.range();
if is_intersected(&child_range, &protected_range) {
let intersected_range = get_intersected_range(&child_range, &protected_range);
child_vmar_.check_protected_range(&intersected_range)?;
}
}
Ok(())
}
pub fn destroy_all(&self) -> Result<()> { pub fn destroy_all(&self) -> Result<()> {
todo!() todo!()
} }
@ -76,11 +167,146 @@ impl Vmar_ {
} }
pub fn read(&self, offset: usize, buf: &mut [u8]) -> Result<()> { pub fn read(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!() let read_start = self.base + offset;
let read_end = buf.len() + read_start;
// if the read range is in child vmar
for (child_vmar_base, child_vmar) in &self.inner.lock().child_vmar_s {
let child_vmar_end = *child_vmar_base + child_vmar.size;
if *child_vmar_base <= read_start && read_end <= child_vmar_end {
let child_offset = read_start - *child_vmar_base;
return child_vmar.read(child_offset, buf);
}
}
// if the read range is in mapped vmo
for (vmo_base, vmo) in &self.inner.lock().mapped_vmos {
let vmo_end = *vmo_base + vmo.size();
if *vmo_base <= read_start && read_end <= vmo_end {
let vmo_offset = read_start - *vmo_base;
return vmo.read_bytes(vmo_offset, buf);
}
}
// FIXME: should be read the free range?
// for (_, free_region) in &self.inner.lock().free_regions {
// let (region_start, region_end) = free_region.range();
// if region_start <= read_start && read_end <= region_end {
// return self.vm_space.read_bytes(read_start, buf);
// }
// }
// FIXME: If the read range is across different vmos or child vmars, should we directly return error?
Err(Error::AccessDenied)
} }
pub fn write(&self, offset: usize, buf: &[u8]) -> Result<()> { pub fn write(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!() let write_start = self.base + offset;
let write_end = buf.len() + write_start;
// if the write range is in child vmar
for (child_vmar_base, child_vmar) in &self.inner.lock().child_vmar_s {
let child_vmar_end = *child_vmar_base + child_vmar.size;
if *child_vmar_base <= write_start && write_end <= child_vmar_end {
let child_offset = write_start - *child_vmar_base;
return child_vmar.write(child_offset, buf);
}
}
// if the write range is in mapped vmo
for (vmo_base, vmo) in &self.inner.lock().mapped_vmos {
let vmo_end = *vmo_base + vmo.size();
if *vmo_base <= write_start && write_end <= vmo_end {
let vmo_offset = write_start - *vmo_base;
return vmo.write_bytes(vmo_offset, buf);
}
}
// if the write range is in free region
// FIXME: should we write the free region?
// for (_, free_region) in &self.inner.lock().free_regions {
// let (region_start, region_end) = free_region.range();
// if region_start <= write_start && write_end <= region_end {
// return self.vm_space.write_bytes(write_start, buf);
// }
// }
// FIXME: If the write range is across different vmos or child vmars, should we directly return error?
Err(Error::AccessDenied)
}
/// allocate a child vmar_.
pub fn alloc_child_vmar(
self: &Arc<Self>,
child_vmar_offset: Option<usize>,
child_vmar_size: usize,
align: usize,
) -> Result<Arc<Vmar_>> {
match self.find_free_region_for_child_vmar(child_vmar_offset, child_vmar_size, align) {
None => return Err(Error::InvalidArgs),
Some((region_base, child_vmar_offset)) => {
// This unwrap should never fails
let free_region = self.inner.lock().free_regions.remove(&region_base).unwrap();
let child_range = child_vmar_offset..(child_vmar_offset + child_vmar_size);
let regions_after_allocation = free_region.allocate_range(child_range.clone());
regions_after_allocation.into_iter().for_each(|region| {
self.inner
.lock()
.free_regions
.insert(region.start(), region);
});
let child_region = FreeRegion::new(child_range);
let mut child_regions = BTreeMap::new();
child_regions.insert(child_region.start(), child_region);
let child_vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
mapped_vmos: BTreeMap::new(),
free_regions: child_regions,
};
let child_vmar_ = Arc::new(Vmar_ {
inner: Mutex::new(child_vmar_inner),
base: child_vmar_offset,
size: child_vmar_size,
vm_space: self.vm_space.clone(),
parent: Arc::downgrade(self),
});
self.inner
.lock()
.child_vmar_s
.insert(child_vmar_.base, child_vmar_.clone());
Ok(child_vmar_)
}
}
}
/// returns (region base addr, child real offset)
fn find_free_region_for_child_vmar(
&self,
child_vmar_offset: Option<Vaddr>,
child_vmar_size: usize,
align: usize,
) -> Option<(Vaddr, Vaddr)> {
for (region_base, free_region) in &self.inner.lock().free_regions {
if let Some(child_vmar_offset) = child_vmar_offset {
// if the offset is set, we should find a free region can satisfy both the offset and size
if *region_base <= child_vmar_offset
&& (child_vmar_offset + child_vmar_size) <= (free_region.end())
{
return Some((*region_base, child_vmar_offset));
}
} else {
// else, we find a free region that can satisfy the length and align requirement.
// Here, we use a simple brute-force algorithm to find the first free range that can satisfy.
// FIXME: A randomized algorithm may be more efficient.
let region_start = free_region.start();
let region_end = free_region.end();
let child_vmar_real_start = region_start.align_up(align);
let child_vmar_real_end = child_vmar_real_start + child_vmar_size;
if region_start <= child_vmar_real_start && child_vmar_real_end <= region_end {
return Some((*region_base, child_vmar_real_start));
}
}
}
None
}
fn range(&self) -> Range<usize> {
self.base..(self.base + self.size)
} }
} }
@ -91,6 +317,11 @@ impl<R> Vmar<R> {
pub fn base(&self) -> Vaddr { pub fn base(&self) -> Vaddr {
self.0.base self.0.base
} }
/// The size of the vmar in bytes.
pub fn size(&self) -> usize {
self.0.size
}
} }
bitflags! { bitflags! {
@ -107,12 +338,87 @@ bitflags! {
impl From<Rights> for VmPerms { impl From<Rights> for VmPerms {
fn from(rights: Rights) -> VmPerms { fn from(rights: Rights) -> VmPerms {
todo!() let mut vm_perm = VmPerms::empty();
if rights.contains(Rights::READ) {
vm_perm |= VmPerms::READ;
}
if rights.contains(Rights::WRITE) {
vm_perm |= VmPerms::WRITE;
}
if rights.contains(Rights::EXEC) {
vm_perm |= VmPerms::EXEC;
}
vm_perm
} }
} }
impl From<VmPerms> for Rights { impl From<VmPerms> for Rights {
fn from(vm_perms: VmPerms) -> Rights { fn from(vm_perms: VmPerms) -> Rights {
todo!() let mut rights = Rights::empty();
if vm_perms.contains(VmPerms::READ) {
rights |= Rights::READ;
}
if vm_perms.contains(VmPerms::WRITE) {
rights |= Rights::WRITE;
}
if vm_perms.contains(VmPerms::EXEC) {
rights |= Rights::EXEC;
}
rights
} }
} }
pub struct FreeRegion {
range: Range<Vaddr>,
}
impl FreeRegion {
pub fn new(range: Range<Vaddr>) -> Self {
Self { range }
}
pub fn start(&self) -> Vaddr {
self.range.start
}
pub fn end(&self) -> Vaddr {
self.range.end
}
pub fn size(&self) -> usize {
self.range.end - self.range.start
}
pub fn range(&self) -> &Range<usize> {
&self.range
}
/// allocate a range in this free region.
/// The range is ensured to be contained in current region before call this function.
/// The return vector contains regions that are not allocated. Since the allocate_range can be
/// in the middle of a free region, the original region may be split as at most two regions.
pub fn allocate_range(&self, allocate_range: Range<Vaddr>) -> Vec<FreeRegion> {
let mut res = Vec::new();
if self.range.start < allocate_range.start {
let free_region = FreeRegion::new(self.range.start..allocate_range.start);
res.push(free_region);
}
if allocate_range.end < self.range.end {
let free_region = FreeRegion::new(allocate_range.end..self.range.end);
res.push(free_region);
}
res
}
}
/// determine whether two ranges are intersected.
fn is_intersected(range1: &Range<usize>, range2: &Range<usize>) -> bool {
range1.start.max(range2.start) < range1.end.min(range2.end)
}
/// get the intersection range of two ranges.
/// The two ranges should be ensured to be intersected.
fn get_intersected_range(range1: &Range<usize>, range2: &Range<usize>) -> Range<usize> {
debug_assert!(is_intersected(range1, range2));
range1.start.max(range2.start)..range1.end.min(range2.end)
}

View File

@ -1,7 +1,7 @@
//! Options for allocating child VMARs and creating mappings. //! Options for allocating child VMARs and creating mappings.
use jinux_frame::prelude::Result;
use jinux_frame::{config::PAGE_SIZE, vm::Vaddr}; use jinux_frame::{config::PAGE_SIZE, vm::Vaddr};
use jinux_frame::{Error, Result};
use crate::vm::vmo::Vmo; use crate::vm::vmo::Vmo;
@ -27,7 +27,7 @@ use super::{VmPerms, Vmar};
/// assert!(child_vmar.size() == child_size); /// assert!(child_vmar.size() == child_size);
/// ``` /// ```
/// ///
/// A child VMO created from a parent VMO of _static_ capability is also a /// A child VMAR created from a parent VMAR of _static_ capability is also a
/// _static_ capability. /// _static_ capability.
/// ``` /// ```
/// use jinux_std::prelude::*; /// use jinux_std::prelude::*;
@ -45,8 +45,8 @@ use super::{VmPerms, Vmar};
pub struct VmarChildOptions<R> { pub struct VmarChildOptions<R> {
parent: Vmar<R>, parent: Vmar<R>,
size: usize, size: usize,
offset: usize, offset: Option<usize>,
align: usize, align: Option<usize>,
} }
impl<R> VmarChildOptions<R> { impl<R> VmarChildOptions<R> {
@ -58,8 +58,8 @@ impl<R> VmarChildOptions<R> {
Self { Self {
parent, parent,
size, size,
offset: 0, offset: None,
align: PAGE_SIZE, align: None,
} }
} }
@ -69,7 +69,8 @@ impl<R> VmarChildOptions<R> {
/// ///
/// The alignment must be a power of two and a multiple of the page size. /// The alignment must be a power of two and a multiple of the page size.
pub fn align(mut self, align: usize) -> Self { pub fn align(mut self, align: usize) -> Self {
todo!() self.align = Some(align);
self
} }
/// Sets the offset of the child VMAR. /// Sets the offset of the child VMAR.
@ -84,7 +85,8 @@ impl<R> VmarChildOptions<R> {
/// ///
/// The offset must be page-aligned. /// The offset must be page-aligned.
pub fn offset(mut self, offset: usize) -> Self { pub fn offset(mut self, offset: usize) -> Self {
todo!() self.offset = Some(offset);
self
} }
/// Allocates the child VMAR according to the specified options. /// Allocates the child VMAR according to the specified options.
@ -94,8 +96,39 @@ impl<R> VmarChildOptions<R> {
/// # Access rights /// # Access rights
/// ///
/// The child VMAR is initially assigned all the parent's access rights. /// The child VMAR is initially assigned all the parent's access rights.
pub fn alloc(mut self) -> Result<Vmar<R>> { pub fn alloc(self) -> Result<Vmar<R>> {
todo!() // check align
let align = if let Some(align) = self.align {
if align % PAGE_SIZE != 0 || !align.is_power_of_two() {
return Err(Error::InvalidArgs);
}
align
} else {
PAGE_SIZE
};
// check size
if self.size % align != 0 {
return Err(Error::InvalidArgs);
}
// check offset
let root_vmar_offset = if let Some(offset) = self.offset {
if offset % PAGE_SIZE != 0 {
return Err(Error::InvalidArgs);
}
let root_vmar_offset = offset + self.parent.base();
if root_vmar_offset % align != 0 {
return Err(Error::InvalidArgs);
}
Some(root_vmar_offset)
} else {
None
};
let child_vmar_ = self
.parent
.0
.alloc_child_vmar(root_vmar_offset, self.size, align)?;
let child_vmar = Vmar(child_vmar_, self.parent.1);
Ok(child_vmar)
} }
} }

View File

@ -1,8 +1,7 @@
use core::ops::Range; use core::ops::Range;
use alloc::sync::Arc; use alloc::sync::Arc;
use jinux_frame::prelude::Result; use jinux_frame::{vm::VmIo, Error, Result};
use jinux_frame::{vm::VmIo, Error};
use jinux_rights_proc::require; use jinux_rights_proc::require;
use crate::{ use crate::{
@ -21,8 +20,8 @@ impl<R: TRights> Vmar<R> {
/// # Access rights /// # Access rights
/// ///
/// A root VMAR is initially given full access rights. /// A root VMAR is initially given full access rights.
pub fn new() -> Result<Self> { pub fn new_root() -> Result<Self> {
let inner = Arc::new(Vmar_::new()?); let inner = Arc::new(Vmar_::new_root()?);
let rights = R::new(); let rights = R::new();
let new_self = Self(inner, rights); let new_self = Self(inner, rights);
Ok(new_self) Ok(new_self)
@ -141,13 +140,13 @@ impl<R: TRights> Vmar<R> {
/// The method requires the Dup right. /// The method requires the Dup right.
#[require(R > Dup)] #[require(R > Dup)]
pub fn dup(&self) -> Result<Self> { pub fn dup(&self) -> Result<Self> {
todo!() Ok(Vmar(self.0.clone(), self.1))
} }
/// Strict the access rights. /// Strict the access rights.
#[require(R > R1)] #[require(R > R1)]
pub fn restrict<R1>(mut self) -> Vmo<R1> { pub fn restrict<R1: TRights>(self) -> Vmar<R1> {
todo!() Vmar(self.0, R1::new())
} }
/// Returns the access rights. /// Returns the access rights.

View File

@ -71,7 +71,7 @@ use spin::Mutex;
/// `Vmo` is easier to use (by offering more powerful APIs) and /// `Vmo` is easier to use (by offering more powerful APIs) and
/// harder to misuse (thanks to its nature of being capability). /// harder to misuse (thanks to its nature of being capability).
/// ///
pub struct Vmo<R>(Arc<Vmo_>, R); pub struct Vmo<R = Rights>(Arc<Vmo_>, R);
bitflags! { bitflags! {
/// VMO flags. /// VMO flags.

View File

@ -3,12 +3,12 @@
use core::marker::PhantomData; use core::marker::PhantomData;
use core::ops::Range; use core::ops::Range;
use alloc::sync::Arc; use crate::prelude::*;
use jinux_frame::prelude::Result;
use jinux_frame::vm::Paddr; use jinux_frame::vm::Paddr;
use jinux_rights_proc::require; use jinux_rights_proc::require;
use typeflags_util::{SetExtend, SetExtendOp};
use crate::rights::{Dup, Rights, TRights}; use crate::rights::{Dup, Rights, TRights, Write};
use super::{Pager, Vmo, VmoFlags}; use super::{Pager, Vmo, VmoFlags};
@ -50,8 +50,8 @@ pub struct VmoOptions<R = Rights> {
size: usize, size: usize,
paddr: Option<Paddr>, paddr: Option<Paddr>,
flags: VmoFlags, flags: VmoFlags,
rights: R, rights: Option<R>,
// supplier: Option<Arc<dyn FrameSupplier>>, pager: Option<Arc<dyn Pager>>,
} }
impl<R> VmoOptions<R> { impl<R> VmoOptions<R> {
@ -60,7 +60,13 @@ impl<R> VmoOptions<R> {
/// ///
/// The size of the VMO will be rounded up to align with the page size. /// The size of the VMO will be rounded up to align with the page size.
pub fn new(size: usize) -> Self { pub fn new(size: usize) -> Self {
todo!() Self {
size,
paddr: None,
flags: VmoFlags::empty(),
rights: None,
pager: None,
}
} }
/// Sets the starting physical address of the VMO. /// Sets the starting physical address of the VMO.
@ -70,7 +76,9 @@ impl<R> VmoOptions<R> {
/// If this option is set, then the underlying pages of VMO must be contiguous. /// If this option is set, then the underlying pages of VMO must be contiguous.
/// So `VmoFlags::IS_CONTIGUOUS` will be set automatically. /// So `VmoFlags::IS_CONTIGUOUS` will be set automatically.
pub fn paddr(mut self, paddr: Paddr) -> Self { pub fn paddr(mut self, paddr: Paddr) -> Self {
todo!() self.paddr = Some(paddr);
self.flags |= VmoFlags::CONTIGUOUS;
self
} }
/// Sets the VMO flags. /// Sets the VMO flags.
@ -79,12 +87,14 @@ impl<R> VmoOptions<R> {
/// ///
/// For more information about the flags, see `VmoFlags`. /// For more information about the flags, see `VmoFlags`.
pub fn flags(mut self, flags: VmoFlags) -> Self { pub fn flags(mut self, flags: VmoFlags) -> Self {
todo!() self.flags = flags;
self
} }
/// Sets the pager of the VMO. /// Sets the pager of the VMO.
pub fn pager(mut self, pager: Arc<dyn Pager>) -> Self { pub fn pager(mut self, pager: Arc<dyn Pager>) -> Self {
todo!() self.pager = Some(pager);
self
} }
} }
@ -181,7 +191,7 @@ impl<R: TRights> VmoOptions<R> {
/// Note that a slice VMO child and its parent cannot not be resizable. /// Note that a slice VMO child and its parent cannot not be resizable.
/// ///
/// ```rust /// ```rust
/// use _std::vm::{PAGE_SIZE, VmoOptions}; /// use jinux_std::vm::{PAGE_SIZE, VmoOptions};
/// ///
/// let parent_vmo = VmoOptions::new(PAGE_SIZE) /// let parent_vmo = VmoOptions::new(PAGE_SIZE)
/// .alloc() /// .alloc()
@ -313,28 +323,12 @@ impl<R: TRights> VmoChildOptions<R, VmoCowChild> {
/// ///
/// The child VMO is initially assigned all the parent's access rights /// The child VMO is initially assigned all the parent's access rights
/// plus the Write right. /// plus the Write right.
pub fn alloc<R1>(mut self) -> Result<Vmo<R1>> pub fn alloc(mut self) -> Result<Vmo<SetExtendOp<R, Write>>>
where where
R1: TRights, // TODO: R1 must contain the Write right. To do so at the type level, R: SetExtend<Write>,
// we need to implement a type-level operator
// (say, `TRightsExtend(L, F)`)
// that may extend a list (`L`) of type-level flags with an extra flag `F`.
// TRightsExtend<R, Write>
{ {
todo!() todo!()
} }
// original:
// pub fn alloc<R1>(mut self) -> Result<Vmo<R1>>
// where
// // TODO: R1 must contain the Write right. To do so at the type level,
// // we need to implement a type-level operator
// // (say, `TRightsExtend(L, F)`)
// // that may extend a list (`L`) of type-level flags with an extra flag `F`.
// R1: R // TRightsExtend<R, Write>
// {
// todo!()
// }
} }
/// A type to specify the "type" of a child, which is either a slice or a COW. /// A type to specify the "type" of a child, which is either a slice or a COW.

View File

@ -0,0 +1,15 @@
use crate::{Cons, Nil};
/// This trait will extend a set with another item.
/// If the set already contains the item, it will return the original set.
/// Otherwise, it will return the new set with the new item.
/// The implementation should care about the item orders when extending set.
pub trait SetExtend<T> {
type Output;
}
pub type SetExtendOp<Set, T> = <Set as SetExtend<T>>::Output;
impl<T> SetExtend<T> for Nil {
type Output = Cons<T, Nil>;
}

View File

@ -5,11 +5,13 @@
#![no_std] #![no_std]
pub mod assert; pub mod assert;
pub mod bool; pub mod bool;
pub mod extend;
pub mod if_; pub mod if_;
pub mod same; pub mod same;
pub mod set; pub mod set;
pub use crate::bool::{And, AndOp, False, IsFalse, IsTrue, Not, NotOp, Or, OrOp, True}; pub use crate::bool::{And, AndOp, False, IsFalse, IsTrue, Not, NotOp, Or, OrOp, True};
pub use crate::extend::{SetExtend, SetExtendOp};
pub use crate::same::{SameAs, SameAsOp}; pub use crate::same::{SameAs, SameAsOp};
pub use crate::set::{Cons, Nil, Set, SetContain, SetContainOp, SetInclude, SetIncludeOp}; pub use crate::set::{Cons, Nil, Set, SetContain, SetContainOp, SetInclude, SetIncludeOp};
pub use assert::AssertTypeSame; pub use assert::AssertTypeSame;

View File

@ -13,6 +13,7 @@ use core::ops::BitOr as Or;
pub trait Set {} pub trait Set {}
/// An non-empty type-level set. /// An non-empty type-level set.
#[derive(Debug, Clone, Copy)]
pub struct Cons<T, S: Set>(PhantomData<(T, S)>); pub struct Cons<T, S: Set>(PhantomData<(T, S)>);
impl<T, S: Set> Cons<T, S> { impl<T, S: Set> Cons<T, S> {
@ -22,6 +23,7 @@ impl<T, S: Set> Cons<T, S> {
} }
/// An empty type-level set. /// An empty type-level set.
#[derive(Debug, Clone, Copy)]
pub struct Nil; pub struct Nil;
impl<T, S: Set> Set for Cons<T, S> {} impl<T, S: Set> Set for Cons<T, S> {}

View File

@ -11,6 +11,7 @@ const SET_NAME: &'static str = "::typeflags_util::Cons";
/// A flagSet represent the combination of differnt flag item. /// A flagSet represent the combination of differnt flag item.
/// e.g. [Read, Write], [Read], [] are all flag sets. /// e.g. [Read, Write], [Read], [] are all flag sets.
/// The order of flagItem does not matters. So flag sets with same sets of items should be viewed as the same set. /// The order of flagItem does not matters. So flag sets with same sets of items should be viewed as the same set.
#[derive(Debug)]
pub struct FlagSet { pub struct FlagSet {
items: Vec<FlagItem>, items: Vec<FlagItem>,
} }
@ -107,6 +108,23 @@ impl FlagSet {
} }
} }
pub fn contains_type(&self, type_ident: &Ident) -> bool {
let type_name = type_ident.to_string();
self.items
.iter()
.position(|item| item.ident.to_string() == type_name)
.is_some()
}
pub fn contains_set(&self, other_set: &FlagSet) -> bool {
for item in &other_set.items {
if !self.contains_type(&item.ident) {
return false;
}
}
return true;
}
/// The token stream inside macro definition. We will generate a token stream for each permutation of items /// The token stream inside macro definition. We will generate a token stream for each permutation of items
/// since the user may use arbitrary order of items in macro. /// since the user may use arbitrary order of items in macro.
pub fn macro_item_tokens(&self) -> Vec<TokenStream> { pub fn macro_item_tokens(&self) -> Vec<TokenStream> {
@ -135,6 +153,14 @@ pub struct FlagItem {
val: Expr, val: Expr,
} }
impl core::fmt::Debug for FlagItem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FlagItem")
.field("ident", &self.ident.to_string())
.finish()
}
}
/// generate all possible flag sets /// generate all possible flag sets
pub fn generate_flag_sets(type_flag_def: &TypeFlagDef) -> Vec<FlagSet> { pub fn generate_flag_sets(type_flag_def: &TypeFlagDef) -> Vec<FlagSet> {
let flag_items = type_flag_def let flag_items = type_flag_def

View File

@ -43,6 +43,10 @@
//! assert_type_same!(SetIncludeOp<R, W>, False); //! assert_type_same!(SetIncludeOp<R, W>, False);
//! assert_type_same!(SetIncludeOp<W, O>, True); //! assert_type_same!(SetIncludeOp<W, O>, True);
//! assert_type_same!(SetIncludeOp<O, R>, False); //! assert_type_same!(SetIncludeOp<O, R>, False);
//! assert_type_same!(SetExtendOp<O, Read>, R);
//! assert_type_same!(SetExtendOp<R, Write>, RW);
//! assert_type_same!(SetExtendOp<R, Read>, R);
//! assert_type_same!(SetExtendOp<W, Read>, RW);
//! ``` //! ```
#![feature(proc_macro_diagnostic)] #![feature(proc_macro_diagnostic)]

View File

@ -77,7 +77,7 @@ impl TypeFlagDef {
let type_ = self.type_.clone(); let type_ = self.type_.clone();
quote!( quote!(
#(#attributes)* #(#attributes)*
#vis trait #ident : Sync + Send{ #vis trait #ident : Sync + Send + Copy + Clone{
const BITS: #type_; const BITS: #type_;
fn new() -> Self; fn new() -> Self;
@ -128,6 +128,7 @@ impl TypeFlagItem {
let ident = self.ident.clone(); let ident = self.ident.clone();
quote!( quote!(
#(#attributes)* #(#attributes)*
#[derive(Copy, Clone, Debug)]
#vis struct #ident {} #vis struct #ident {}
) )
} }

View File

@ -1,8 +1,8 @@
use proc_macro2::TokenStream; use proc_macro2::{Ident, TokenStream};
use quote::{quote, TokenStreamExt}; use quote::{quote, TokenStreamExt};
use crate::{ use crate::{
flag_set::{generate_flag_sets, FlagSet}, flag_set::{self, generate_flag_sets, FlagSet},
type_flag::TypeFlagDef, type_flag::TypeFlagDef,
}; };
@ -23,6 +23,9 @@ pub fn expand_type_flag(type_flags_def: &TypeFlagDef) -> TokenStream {
all_tokens.append_all(impl_main_trait_tokens); all_tokens.append_all(impl_main_trait_tokens);
}); });
let impl_set_entend_tokens = impl_set_extend(type_flags_def, &flag_sets);
all_tokens.append_all(impl_set_entend_tokens);
let export_declarive_macro_tokens = export_declarive_macro(type_flags_def, &flag_sets); let export_declarive_macro_tokens = export_declarive_macro(type_flags_def, &flag_sets);
all_tokens.append_all(export_declarive_macro_tokens); all_tokens.append_all(export_declarive_macro_tokens);
@ -73,6 +76,56 @@ pub fn impl_same_as(type_flags_def: &TypeFlagDef) -> TokenStream {
all_tokens all_tokens
} }
pub fn impl_set_extend(type_flags_def: &TypeFlagDef, flag_sets: &[FlagSet]) -> TokenStream {
let mut all_tokens = TokenStream::new();
let type_idents: Vec<_> = type_flags_def
.items_iter()
.map(|type_flag_item| type_flag_item.ident())
.collect();
for flag_set in flag_sets {
// We don't need to impl set extend trait for Nil
if flag_set.len() == 0 {
continue;
}
for type_ident in &type_idents {
let type_ident = type_ident.clone();
let flag_set_tokens = flag_set.type_name_tokens();
if flag_set.contains_type(&type_ident) {
// the flagset contains the type
let impl_extend_tokens = quote!(
impl ::typeflags_util::SetExtend<#type_ident> for #flag_set_tokens {
type Output = #flag_set_tokens;
}
);
all_tokens.append_all(impl_extend_tokens)
} else {
// the flagset does not contains the type
let output_set = extent_one_type(&type_ident, flag_set, flag_sets).unwrap();
let output_set_tokens = output_set.type_name_tokens();
let impl_extend_tokens = quote!(
impl ::typeflags_util::SetExtend<#type_ident> for #flag_set_tokens {
type Output = #output_set_tokens;
}
);
all_tokens.append_all(impl_extend_tokens);
}
}
}
all_tokens
}
fn extent_one_type<'a>(
type_ident: &Ident,
flag_set: &'a FlagSet,
sets: &'a [FlagSet],
) -> Option<&'a FlagSet> {
sets.iter().find(|bigger_set| {
bigger_set.contains_type(type_ident) && bigger_set.contains_set(flag_set)
})
}
/// export the declarive macro /// export the declarive macro
pub fn export_declarive_macro(type_flags_def: &TypeFlagDef, flag_sets: &[FlagSet]) -> TokenStream { pub fn export_declarive_macro(type_flags_def: &TypeFlagDef, flag_sets: &[FlagSet]) -> TokenStream {
let macro_ident = type_flags_def.trait_ident(); let macro_ident = type_flags_def.trait_ident();