Refactor Vmar and VmMapping.

Co-authored-by: Zhang Junyang <junyang@stu.pku.edu.cn>
This commit is contained in:
Wang Siyuan
2024-11-09 01:44:11 +00:00
committed by Tate, Hongliang Tian
parent e453649d78
commit 4ea3e49788
12 changed files with 782 additions and 1272 deletions

View File

@ -34,21 +34,33 @@ impl Heap {
}
}
/// Inits and maps the heap Vmo
pub(super) fn alloc_and_map_vmo(&self, root_vmar: &Vmar<Full>) -> Result<()> {
/// Initializes and maps the heap virtual memory.
pub(super) fn alloc_and_map_vm(&self, root_vmar: &Vmar<Full>) -> Result<()> {
let vmar_map_options = {
let perms = VmPerms::READ | VmPerms::WRITE;
root_vmar
// FIXME: Our current implementation of mapping resize cannot move
// existing mappings within the new range, which may cause the resize
// operation to fail. Therefore, if there are already mappings within
// the heap expansion range, the brk operation will fail.
.new_map(PAGE_SIZE, perms)
.unwrap()
.offset(self.base)
};
vmar_map_options.build()?;
// If we touch another mapped range when we are trying to expand the
// heap, we fail.
//
// So a simple solution is to reserve enough space for the heap by
// mapping without any permissions and allow it to be overwritten
// later by `brk`. New mappings from `mmap` that overlaps this range
// may be moved to another place.
let vmar_reserve_options = {
let perms = VmPerms::empty();
root_vmar
.new_map(USER_HEAP_SIZE_LIMIT - PAGE_SIZE, perms)
.unwrap()
.offset(self.base + PAGE_SIZE)
};
vmar_reserve_options.build()?;
self.set_uninitialized();
Ok(())
}
@ -63,14 +75,24 @@ impl Heap {
return_errno_with_message!(Errno::ENOMEM, "heap size limit was met.");
}
let current_heap_end = self.current_heap_end.load(Ordering::Acquire);
if new_heap_end <= current_heap_end {
// FIXME: should we allow shrink current user heap?
return Ok(current_heap_end);
}
let old_size = (current_heap_end - self.base).align_up(PAGE_SIZE);
let new_size = (new_heap_end - self.base).align_up(PAGE_SIZE);
let current_heap_end = current_heap_end.align_up(PAGE_SIZE);
let new_heap_end = new_heap_end.align_up(PAGE_SIZE);
// Remove the reserved space.
root_vmar.remove_mapping(current_heap_end..new_heap_end)?;
let old_size = current_heap_end - self.base;
let new_size = new_heap_end - self.base;
// Expand the heap.
root_vmar.resize_mapping(self.base, old_size, new_size)?;
self.current_heap_end.store(new_heap_end, Ordering::Release);
Ok(new_heap_end)
}

View File

@ -84,7 +84,7 @@ impl ProcessVm {
let root_vmar = Vmar::<Full>::new_root();
let init_stack = InitStack::new();
let heap = Heap::new();
heap.alloc_and_map_vmo(&root_vmar).unwrap();
heap.alloc_and_map_vm(&root_vmar).unwrap();
Self {
root_vmar,
heap,
@ -136,6 +136,6 @@ impl ProcessVm {
/// Clears existing mappings and then maps stack and heap vmo.
pub(super) fn clear_and_map(&self) {
self.root_vmar.clear().unwrap();
self.heap.alloc_and_map_vmo(&self.root_vmar).unwrap();
self.heap.alloc_and_map_vm(&self.root_vmar).unwrap();
}
}

View File

@ -53,7 +53,7 @@ pub fn sys_madvise(
fn madv_free(start: Vaddr, end: Vaddr, ctx: &Context) -> Result<()> {
let root_vmar = ctx.process.root_vmar();
let advised_range = start..end;
let _ = root_vmar.destroy(advised_range);
let _ = root_vmar.remove_mapping(advised_range);
Ok(())
}

View File

@ -25,6 +25,6 @@ pub fn sys_munmap(addr: Vaddr, len: usize, ctx: &Context) -> Result<SyscallRetur
"integer overflow when (addr + len)",
))?;
debug!("unmap range = 0x{:x} - 0x{:x}", addr, end);
root_vmar.destroy(addr..end)?;
root_vmar.remove_mapping(addr..end)?;
Ok(SyscallReturn::Return(0))
}

View File

@ -4,9 +4,7 @@ use core::ops::Range;
use aster_rights::Rights;
use super::{
options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_,
};
use super::{vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_};
use crate::{
prelude::*, thread::exception::PageFaultInfo, vm::page_fault_handler::PageFaultHandler,
};
@ -64,31 +62,7 @@ impl Vmar<Rights> {
Ok(VmarMapOptions::new(dup_self, size, perms))
}
/// Creates a new child VMAR through a set of VMAR child options.
///
/// # Example
///
/// ```
/// let parent = Vmar::new().unwrap();
/// let child_size = 10 * PAGE_SIZE;
/// let child = parent.new_child(child_size).alloc().unwrap();
/// assert!(child.size() == child_size);
/// ```
///
/// For more details on the available options, see `VmarChildOptions`.
///
/// # Access rights
///
/// This method requires the Dup right.
///
/// The new VMAR child will be of the same capability class and
/// access rights as the parent.
pub fn new_child(&self, size: usize) -> Result<VmarChildOptions<Rights>> {
let dup_self = self.dup()?;
Ok(VmarChildOptions::new(dup_self, size))
}
/// Change the permissions of the memory mappings in the specified range.
/// Changes the permissions of the memory mappings in the specified range.
///
/// The range's start and end addresses must be page-aligned.
/// Also, the range must be completely mapped.
@ -102,24 +76,22 @@ impl Vmar<Rights> {
self.0.protect(perms, range)
}
/// clear all mappings and children vmars.
/// Clears all mappings.
///
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) -> Result<()> {
self.0.clear_root_vmar()
}
/// Destroy all mappings and children VMARs that fall within the specified
/// Destroys all mappings that fall within the specified
/// range in bytes.
///
/// The range's start and end addresses must be page-aligned.
///
/// Mappings may fall partially within the range; only the overlapped
/// portions of the mappings are unmapped.
/// As for children VMARs, they must be fully within the range.
/// All children VMARs that fall within the range get their `destroy` methods
/// called.
pub fn destroy(&self, range: Range<usize>) -> Result<()> {
self.0.destroy(range)
pub fn remove_mapping(&self, range: Range<usize>) -> Result<()> {
self.0.remove_mapping(range)
}
/// Duplicates the capability.

View File

@ -1,26 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Intervals and interval sets used in VMARs.
use core::ops::Range;
/// An interval is associated with a range of values (`T`).
pub trait Interval<T> {
/// Returns the range of the interval.
fn range(&self) -> Range<T>;
}
/// A collection that contains intervals as items. In particular,
/// the collection allows one to retrieve interval items that intersect with
/// a point of value or range of values.
pub trait IntervalSet<'a, T> {
type Item: Interval<T> + 'a;
/// Find the interval items that overlap with a specific range.
fn find(&'a self, range: &Range<T>) -> impl IntoIterator<Item = &'a Self::Item> + 'a;
/// Finds one interval item that contains the point.
///
/// If there are multiple such items, then an arbitrary one is returned.
fn find_one(&'a self, point: &T) -> Option<&'a Self::Item>;
}

View File

@ -0,0 +1,334 @@
// SPDX-License-Identifier: MPL-2.0
//! Intervals and interval sets used in VMARs.
use alloc::collections::btree_map::{BTreeMap, Cursor, CursorMut};
use core::ops::Range;
/// The interval of an item in an interval set.
///
/// All items in the interval set must have a range.
pub trait Interval<K: Clone> {
/// Returns the range of the interval.
fn range(&self) -> Range<K>;
}
/// A collection that contains non-overlapping intervals as items.
///
/// In particular, the collection allows one to retrieve interval items that
/// intersect with a point of value or range of values.
#[derive(Debug)]
pub struct IntervalSet<K, V>
where
K: Clone + Ord,
V: Interval<K>,
{
btree: BTreeMap<K, V>,
}
impl<K, V> Default for IntervalSet<K, V>
where
K: Clone + Ord,
V: Interval<K>,
{
fn default() -> Self {
Self::new()
}
}
#[allow(dead_code)]
impl<K, V> IntervalSet<K, V>
where
K: Clone + Ord,
V: Interval<K>,
{
/// Creates a new interval set.
pub const fn new() -> Self {
Self {
btree: BTreeMap::new(),
}
}
/// Inserts an interval item into the interval set.
pub fn insert(&mut self, item: V) {
let range = item.range();
self.btree.insert(range.start, item);
}
/// Removes an interval item from the interval set.
pub fn remove(&mut self, key: &K) -> Option<V> {
self.btree.remove(key)
}
/// Returns an iterator over the interval items in the interval set.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &V> {
self.btree.values()
}
/// Finds an interval item that contains the given point.
///
/// If no such item exists, returns [`None`]. Otherwise, returns the item
/// that contains the point.
pub fn find_one(&self, point: &K) -> Option<&V> {
let cursor = self.btree.lower_bound(core::ops::Bound::Excluded(point));
// There's one previous element and one following element that may
// contain the point. If they don't, there's no other chances.
if let Some((_, v)) = cursor.peek_prev() {
if v.range().end > *point {
return Some(v);
}
} else if let Some((_, v)) = cursor.peek_next() {
if v.range().start <= *point {
return Some(v);
}
}
None
}
/// Finds all interval items that intersect with the given range.
pub fn find<'a>(&'a self, range: &Range<K>) -> IntervalIter<'a, K, V> {
let cursor = self
.btree
.lower_bound(core::ops::Bound::Excluded(&range.start));
IntervalIter {
cursor,
range: range.clone(),
peeked_prev: false,
}
}
/// Takes an interval item that contains the given point.
///
/// If no such item exists, returns [`None`]. Otherwise, returns the item
/// that contains the point.
pub fn take_one(&mut self, point: &K) -> Option<V> {
let mut cursor = self
.btree
.lower_bound_mut(core::ops::Bound::Excluded(point));
// There's one previous element and one following element that may
// contain the point. If they don't, there's no other chances.
if let Some((_, v)) = cursor.peek_prev() {
if v.range().end > *point {
return Some(cursor.remove_prev().unwrap().1);
}
} else if let Some((_, v)) = cursor.peek_next() {
if v.range().start <= *point {
return Some(cursor.remove_next().unwrap().1);
}
}
None
}
/// Takes all interval items that intersect with the given range.
///
/// This method returns a draining iterator that removes the items from the
/// interval set.
pub fn take<'a>(&'a mut self, range: &Range<K>) -> IntervalDrain<'a, K, V> {
let cursor = self
.btree
.lower_bound_mut(core::ops::Bound::Excluded(&range.start));
IntervalDrain {
cursor,
range: range.clone(),
drained_prev: false,
}
}
/// Clears the interval set, removing all intervals.
pub fn clear(&mut self) {
self.btree.clear();
}
}
/// An iterator that iterates over intervals in an interval set.
#[derive(Debug)]
pub struct IntervalIter<'a, K, V>
where
K: Clone + Ord,
V: Interval<K>,
{
cursor: Cursor<'a, K, V>,
range: Range<K>,
peeked_prev: bool,
}
impl<'a, K, V> Iterator for IntervalIter<'a, K, V>
where
K: Clone + Ord,
V: Interval<K>,
{
type Item = &'a V;
fn next(&mut self) -> Option<Self::Item> {
// There's one previous element that may intersect with the range.
if !self.peeked_prev {
self.peeked_prev = true;
if let Some((_, v)) = self.cursor.peek_prev() {
if v.range().end > self.range.start {
return Some(v);
}
}
}
// Find all intersected elements following it.
if let Some((_, v)) = self.cursor.next() {
if v.range().start >= self.range.end {
return None;
}
return Some(v);
}
None
}
}
/// A draining iterator that iterates over intervals in an interval set.
#[derive(Debug)]
pub struct IntervalDrain<'a, K, V>
where
K: Clone + Ord,
V: Interval<K>,
{
cursor: CursorMut<'a, K, V>,
range: Range<K>,
drained_prev: bool,
}
impl<K, V> Iterator for IntervalDrain<'_, K, V>
where
K: Clone + Ord,
V: Interval<K>,
{
type Item = V;
fn next(&mut self) -> Option<Self::Item> {
// There's one previous element that may intersect with the range.
if !self.drained_prev {
self.drained_prev = true;
if let Some((_, v)) = self.cursor.peek_prev() {
if v.range().end > self.range.start {
return Some(self.cursor.remove_prev().unwrap().1);
}
}
}
// Find all intersected elements following it.
if let Some((_, v)) = self.cursor.peek_next() {
if v.range().start >= self.range.end {
return None;
}
return Some(self.cursor.remove_next().unwrap().1);
}
None
}
}
#[cfg(ktest)]
mod tests {
use alloc::{vec, vec::Vec};
use core::ops::Range;
use ostd::prelude::ktest;
use super::*;
#[derive(Clone, Debug, PartialEq)]
struct TestInterval {
range: Range<i32>,
}
impl Interval<i32> for TestInterval {
fn range(&self) -> Range<i32> {
self.range.clone()
}
}
#[ktest]
fn test_insert_and_find_one() {
let mut set = IntervalSet::new();
let interval = TestInterval { range: 10..20 };
set.insert(interval.clone());
assert_eq!(set.find_one(&15), Some(&interval));
assert_eq!(set.find_one(&25), None);
}
#[ktest]
fn test_remove() {
let mut set = IntervalSet::new();
let interval = TestInterval { range: 10..20 };
set.insert(interval.clone());
assert_eq!(set.remove(&10), Some(interval));
assert_eq!(set.remove(&10), None);
}
#[ktest]
fn test_iter() {
let mut set = IntervalSet::new();
let interval1 = TestInterval { range: 10..20 };
let interval2 = TestInterval { range: 30..40 };
set.insert(interval1.clone());
set.insert(interval2.clone());
let intervals: Vec<&TestInterval> = set.iter().collect();
assert_eq!(intervals, vec![&interval1, &interval2]);
}
#[ktest]
fn test_find() {
let mut set = IntervalSet::new();
let interval1 = TestInterval { range: 10..20 };
let interval2 = TestInterval { range: 30..40 };
let interval3 = TestInterval { range: 40..50 };
let interval4 = TestInterval { range: 80..90 };
set.insert(interval1.clone());
set.insert(interval2.clone());
set.insert(interval3.clone());
set.insert(interval4.clone());
let found: Vec<&TestInterval> = set.find(&(35..50)).collect();
assert_eq!(found, vec![&interval2, &interval3]);
}
#[ktest]
fn test_take_one() {
let mut set = IntervalSet::new();
let interval1 = TestInterval { range: 10..20 };
let interval2 = TestInterval { range: 20..30 };
set.insert(interval1.clone());
set.insert(interval2.clone());
assert_eq!(set.take_one(&15), Some(interval1));
assert_eq!(set.take_one(&15), None);
}
#[ktest]
fn test_take() {
let mut set = IntervalSet::new();
let interval1 = TestInterval { range: 10..20 };
let interval2 = TestInterval { range: 30..40 };
let interval3 = TestInterval { range: 45..50 };
let interval4 = TestInterval { range: 60..70 };
set.insert(interval1.clone());
set.insert(interval2.clone());
set.insert(interval3.clone());
set.insert(interval4.clone());
let taken: Vec<TestInterval> = set.take(&(35..45)).collect();
assert_eq!(taken, vec![interval2]);
}
#[ktest]
fn test_clear() {
let mut set = IntervalSet::new();
let interval1 = TestInterval { range: 10..20 };
let interval2 = TestInterval { range: 20..30 };
set.insert(interval1);
set.insert(interval2);
set.clear();
assert!(set.iter().next().is_none());
}
}

View File

@ -1,12 +1,9 @@
// SPDX-License-Identifier: MPL-2.0
#![allow(unused_variables)]
//! Virtual Memory Address Regions (VMARs).
mod dyn_cap;
mod interval;
mod options;
mod interval_set;
mod static_cap;
pub mod vm_mapping;
@ -20,7 +17,7 @@ use ostd::{
};
use self::{
interval::{Interval, IntervalSet},
interval_set::{Interval, IntervalSet},
vm_mapping::VmMapping,
};
use super::page_fault_handler::PageFaultHandler;
@ -39,8 +36,7 @@ use crate::{
/// whose semantics are explained below.
///
/// The semantics of each access rights for VMARs are described below:
/// * The Dup right allows duplicating a VMAR and creating children out of
/// a VMAR.
/// * The Dup right allows duplicating a VMAR.
/// * The Read, Write, Exec rights allow creating memory mappings with
/// readable, writable, and executable access permissions, respectively.
/// * The Read and Write rights allow the VMAR to be read from and written to
@ -77,7 +73,6 @@ impl<R> VmarRightsOp for Vmar<R> {
}
}
// TODO: how page faults can be delivered to and handled by the current VMAR.
impl<R> PageFaultHandler for Vmar<R> {
default fn handle_page_fault(&self, _page_fault_info: &PageFaultInfo) -> Result<()> {
unimplemented!()
@ -90,16 +85,19 @@ impl<R> Vmar<R> {
self.0.vm_space()
}
/// Resizes the original mapping `map_addr..map_addr + old_size` to `map_addr..map_addr + new_size`.
/// Resizes the original mapping.
///
/// The range of the original mapping does not have to correspond to the entire `VmMapping`,
/// but it must ensure that all existing ranges have a mapping. Otherwise, this method will return `Err`.
/// If the new mapping size is smaller than the original mapping size, the extra part will be unmapped.
/// If the new mapping is larger than the old mapping and the extra part overlaps with existing mapping,
/// resizing will fail and return `Err`.
/// The range of the mapping goes from `map_addr..map_addr + old_size` to
/// `map_addr..map_addr + new_size`.
///
/// TODO: implement `remap` function to handle the case of overlapping mappings.
/// If the overlapping mappings are not fixed, they can be moved to make the resizing mapping successful.
/// The range of the original mapping does not have to solely map to a
/// whole [`VmMapping`], but it must ensure that all existing ranges have a
/// mapping. Otherwise, this method will return `Err`.
///
/// If the new mapping size is smaller than the original mapping size, the
/// extra part will be unmapped. If the new mapping is larger than the old
/// mapping and the extra part overlaps with existing mapping, resizing
/// will fail and return `Err`.
pub fn resize_mapping(&self, map_addr: Vaddr, old_size: usize, new_size: usize) -> Result<()> {
self.0.resize_mapping(map_addr, old_size, new_size)
}
@ -114,71 +112,112 @@ pub(super) struct Vmar_ {
size: usize,
/// The attached `VmSpace`
vm_space: Arc<VmSpace>,
/// The parent VMAR. If points to none, this is a root VMAR
parent: Weak<Vmar_>,
}
struct VmarInner {
/// Whether the VMAR is destroyed
is_destroyed: bool,
/// The child VMARs. The key is offset relative to root VMAR
child_vmar_s: BTreeMap<Vaddr, Arc<Vmar_>>,
/// The mapped VMOs. The key is offset relative to root VMAR
vm_mappings: BTreeMap<Vaddr, Arc<VmMapping>>,
/// Free regions that can be used for creating child VMAR or mapping VMOs
free_regions: BTreeMap<Vaddr, FreeRegion>,
/// The mapped pages and associated metadata.
vm_mappings: IntervalSet<Vaddr, VmMapping>,
}
impl VmarInner {
const fn new() -> Self {
Self {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
vm_mappings: BTreeMap::new(),
free_regions: BTreeMap::new(),
vm_mappings: IntervalSet::new(),
}
}
/// Finds a free region for child `Vmar` or `VmMapping`.
/// Returns (region base addr, child real offset).
fn find_free_region(
&mut self,
child_offset: Option<Vaddr>,
child_size: usize,
align: usize,
) -> Result<(Vaddr, Vaddr)> {
if let Some(child_vmar_offset) = child_offset {
// if the offset is set, we should find a free region can satisfy both the offset and size
let child_vmar_range = child_vmar_offset..(child_vmar_offset + child_size);
for free_region in self.free_regions.find(&child_vmar_range) {
let free_region_range = free_region.range();
if free_region_range.start <= child_vmar_range.start
&& child_vmar_range.end <= free_region_range.end
/// Allocates a free region for mapping with a specific offset and size.
///
/// If the provided range is already occupied, return an error.
fn alloc_free_region_exact(&mut self, offset: Vaddr, size: usize) -> Result<Range<Vaddr>> {
if self
.vm_mappings
.find(&(offset..offset + size))
.next()
.is_some()
{
return Ok((free_region_range.start, child_vmar_offset));
return_errno_with_message!(Errno::EACCES, "Requested region is already occupied");
}
Ok(offset..(offset + size))
}
/// Allocates a free region for mapping with a specific offset and size.
///
/// If the provided range is already occupied, this function truncates all
/// the mappings that intersect with the range.
fn alloc_free_region_exact_truncate(
&mut self,
vm_space: &VmSpace,
offset: Vaddr,
size: usize,
) -> Result<Range<Vaddr>> {
let range = offset..offset + size;
let mut mappings_to_remove = Vec::new();
for vm_mapping in self.vm_mappings.find(&range) {
mappings_to_remove.push(vm_mapping.map_to_addr());
}
for vm_mapping_addr in mappings_to_remove {
let vm_mapping = self.vm_mappings.remove(&vm_mapping_addr).unwrap();
let vm_mapping_range = vm_mapping.range();
let intersected_range = get_intersected_range(&range, &vm_mapping_range);
let (left, taken, right) = vm_mapping.split_range(&intersected_range)?;
if let Some(left) = left {
self.vm_mappings.insert(left);
}
if let Some(right) = right {
self.vm_mappings.insert(right);
}
taken.unmap(vm_space)?;
}
Ok(offset..(offset + size))
}
/// Allocates a free region for mapping.
///
/// If no such region is found, return an error.
fn alloc_free_region(&mut self, size: usize, align: usize) -> Result<Range<Vaddr>> {
// Fast path that there's still room to the end.
let highest_occupied = self
.vm_mappings
.iter()
.next_back()
.map_or(ROOT_VMAR_LOWEST_ADDR, |vm_mapping| vm_mapping.range().end);
// FIXME: The up-align may overflow.
let last_occupied_aligned = highest_occupied.align_up(align);
if let Some(last) = last_occupied_aligned.checked_add(size) {
if last <= ROOT_VMAR_CAP_ADDR {
return Ok(last_occupied_aligned..last);
}
}
} else {
// Else, we find a free region that can satisfy the length and align requirement.
// Here, we use a simple brute-force algorithm to find the first free range that can satisfy.
// FIXME: A randomized algorithm may be more efficient.
for (region_base, free_region) in &self.free_regions {
let region_start = free_region.start();
let region_end = free_region.end();
let child_vmar_real_start = region_start.align_up(align);
let child_vmar_real_end =
child_vmar_real_start
.checked_add(child_size)
.ok_or(Error::with_message(
Errno::ENOMEM,
"integer overflow when (child_vmar_real_start + child_size)",
))?;
if region_start <= child_vmar_real_start && child_vmar_real_end <= region_end {
return Ok((*region_base, child_vmar_real_start));
// Slow path that we need to search for a free region.
// Here, we use a simple brute-force FIRST-FIT algorithm.
// Allocate as low as possible to reduce fragmentation.
let mut last_end: Vaddr = ROOT_VMAR_LOWEST_ADDR;
for vm_mapping in self.vm_mappings.iter() {
let range = vm_mapping.range();
debug_assert!(range.start >= last_end);
debug_assert!(range.end <= highest_occupied);
let last_aligned = last_end.align_up(align);
let needed_end = last_aligned
.checked_add(size)
.ok_or(Error::new(Errno::ENOMEM))?;
if needed_end <= range.start {
return Ok(last_aligned..needed_end);
}
last_end = range.end;
}
}
return_errno_with_message!(Errno::EACCES, "Cannot find free region for child")
return_errno_with_message!(Errno::ENOMEM, "Cannot find free region for mapping");
}
}
@ -197,104 +236,63 @@ impl Interval<usize> for Arc<Vmar_> {
}
impl Vmar_ {
fn new(
inner: VmarInner,
vm_space: Arc<VmSpace>,
base: usize,
size: usize,
parent: Option<&Arc<Vmar_>>,
) -> Arc<Self> {
let parent = if let Some(parent) = parent {
Arc::downgrade(parent)
} else {
Weak::new()
};
fn new(inner: VmarInner, vm_space: Arc<VmSpace>, base: usize, size: usize) -> Arc<Self> {
Arc::new(Vmar_ {
inner: RwMutex::new(inner),
base,
size,
vm_space,
parent,
})
}
fn new_root() -> Arc<Self> {
let mut free_regions = BTreeMap::new();
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_CAP_ADDR);
free_regions.insert(root_region.start(), root_region);
let vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
vm_mappings: BTreeMap::new(),
free_regions,
vm_mappings: IntervalSet::new(),
};
let mut vm_space = VmSpace::new();
vm_space.register_page_fault_handler(handle_page_fault_wrapper);
Vmar_::new(vmar_inner, Arc::new(vm_space), 0, ROOT_VMAR_CAP_ADDR, None)
}
fn is_root_vmar(&self) -> bool {
self.parent.upgrade().is_none()
Vmar_::new(vmar_inner, Arc::new(vm_space), 0, ROOT_VMAR_CAP_ADDR)
}
fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
self.ensure_range_mapped(&range)?;
self.do_protect_inner(perms, range)?;
Ok(())
}
// Do real protect. The protected range is ensured to be mapped.
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
let protect_mappings: Vec<Arc<VmMapping>> = {
let inner = self.inner.read();
inner
.vm_mappings
.find(&range)
.into_iter()
.cloned()
.collect()
};
let mut inner = self.inner.write();
let vm_space = self.vm_space();
for vm_mapping in protect_mappings {
let vm_mapping_range =
vm_mapping.map_to_addr()..(vm_mapping.map_to_addr() + vm_mapping.map_size());
let mut protect_mappings = Vec::new();
for vm_mapping in inner.vm_mappings.find(&range) {
protect_mappings.push((vm_mapping.map_to_addr(), vm_mapping.perms()));
}
for (vm_mapping_addr, vm_mapping_perms) in protect_mappings {
if perms == vm_mapping_perms {
continue;
}
let vm_mapping = inner.vm_mappings.remove(&vm_mapping_addr).unwrap();
let vm_mapping_range = vm_mapping.range();
let intersected_range = get_intersected_range(&range, &vm_mapping_range);
vm_mapping.protect(perms, intersected_range)?;
// Protects part of the taken `VmMapping`.
let (left, taken, right) = vm_mapping.split_range(&intersected_range)?;
let taken = taken.protect(vm_space.as_ref(), perms);
inner.vm_mappings.insert(taken);
// And put the rest back.
if let Some(left) = left {
inner.vm_mappings.insert(left);
}
for child_vmar_ in self.inner.read().child_vmar_s.find(&range) {
let child_vmar_range = child_vmar_.range();
debug_assert!(is_intersected(&child_vmar_range, &range));
let intersected_range = get_intersected_range(&range, &child_vmar_range);
child_vmar_.do_protect_inner(perms, intersected_range)?;
if let Some(right) = right {
inner.vm_mappings.insert(right);
}
Ok(())
}
/// Ensure the whole protected range is mapped.
/// Internally, we check whether the range intersects any free region recursively.
/// If so, the range is not fully mapped.
fn ensure_range_mapped(&self, range: &Range<usize>) -> Result<()> {
// The protected range should be in self's range
assert!(self.base <= range.start);
assert!(range.end <= self.base + self.size);
// The protected range should not intersect with any free region
let inner = self.inner.read();
if inner.free_regions.find(range).into_iter().next().is_some() {
return_errno_with_message!(Errno::EACCES, "protected range is not fully mapped");
}
// if the protected range intersects with child `Vmar_`, child `Vmar_` is responsible to do the check.
for child_vmar_ in inner.child_vmar_s.find(range) {
let child_vmar_range = child_vmar_.range();
debug_assert!(is_intersected(&child_vmar_range, range));
let intersected_range = get_intersected_range(range, &child_vmar_range);
child_vmar_.ensure_range_mapped(&intersected_range)?;
}
Ok(())
@ -308,15 +306,10 @@ impl Vmar_ {
}
let inner = self.inner.read();
if let Some(child_vmar) = inner.child_vmar_s.find_one(&address) {
debug_assert!(child_vmar.range().contains(&address));
return child_vmar.handle_page_fault(page_fault_info);
}
// FIXME: If multiple VMOs are mapped to the addr, should we allow all VMOs to handle page fault?
if let Some(vm_mapping) = inner.vm_mappings.find_one(&address) {
debug_assert!(vm_mapping.range().contains(&address));
return vm_mapping.handle_page_fault(page_fault_info);
return vm_mapping.handle_page_fault(&self.vm_space, page_fault_info);
}
return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar");
@ -324,72 +317,20 @@ impl Vmar_ {
/// Clears all content of the root VMAR.
fn clear_root_vmar(&self) -> Result<()> {
debug_assert!(self.is_root_vmar());
if !self.is_root_vmar() {
return_errno_with_message!(Errno::EACCES, "The vmar is not root vmar");
}
self.clear_vm_space();
let mut inner = self.inner.write();
inner.child_vmar_s.clear();
inner.vm_mappings.clear();
inner.free_regions.clear();
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_CAP_ADDR);
inner.free_regions.insert(root_region.start(), root_region);
Ok(())
}
fn clear_vm_space(&self) {
self.vm_space.clear().unwrap();
}
pub fn destroy(&self, range: Range<usize>) -> Result<()> {
self.check_destroy_range(&range)?;
let mut inner = self.inner.write();
let mut free_regions = BTreeMap::new();
for child_vmar_ in inner.child_vmar_s.find(&range) {
let child_vmar_range = child_vmar_.range();
debug_assert!(is_intersected(&child_vmar_range, &range));
let free_region = FreeRegion::new(child_vmar_range);
free_regions.insert(free_region.start(), free_region);
}
inner
.child_vmar_s
.retain(|_, child_vmar_| !child_vmar_.is_destroyed());
let mut mappings_to_remove = LinkedList::new();
let mut mappings_to_append = LinkedList::new();
for vm_mapping in inner.vm_mappings.find(&range) {
let vm_mapping_range = vm_mapping.range();
debug_assert!(is_intersected(&vm_mapping_range, &range));
let intersected_range = get_intersected_range(&vm_mapping_range, &range);
vm_mapping.trim_mapping(
&intersected_range,
&mut mappings_to_remove,
&mut mappings_to_append,
)?;
let free_region = FreeRegion::new(intersected_range);
free_regions.insert(free_region.start(), free_region);
}
for mapping in mappings_to_remove {
inner.vm_mappings.remove(&mapping);
}
for (map_to_addr, mapping) in mappings_to_append {
inner.vm_mappings.insert(map_to_addr, mapping);
}
inner
.vm_mappings
.retain(|_, vm_mapping| !vm_mapping.is_destroyed());
inner.free_regions.append(&mut free_regions);
drop(inner);
self.merge_continuous_regions();
inner.vm_mappings.clear();
Ok(())
}
pub fn remove_mapping(&self, range: Range<usize>) -> Result<()> {
let mut inner = self.inner.write();
inner.alloc_free_region_exact_truncate(&self.vm_space, range.start, range.len())?;
Ok(())
}
// Split and unmap the found mapping if resize smaller.
// Enlarge the last mapping if resize larger.
fn resize_mapping(&self, map_addr: Vaddr, old_size: usize, new_size: usize) -> Result<()> {
debug_assert!(map_addr % PAGE_SIZE == 0);
debug_assert!(old_size % PAGE_SIZE == 0);
@ -405,149 +346,28 @@ impl Vmar_ {
let old_map_end = map_addr + old_size;
let new_map_end = map_addr + new_size;
self.ensure_range_mapped(&(map_addr..old_map_end))?;
if new_size < old_size {
self.destroy(new_map_end..old_map_end)?;
self.remove_mapping(new_map_end..old_map_end)?;
return Ok(());
}
let last_mapping = {
let inner = self.inner.read();
inner
.vm_mappings
.find_one(&(old_map_end - 1))
.unwrap()
.clone()
};
let mut inner = self.inner.write();
let last_mapping = inner.vm_mappings.find_one(&(old_map_end - 1)).unwrap();
let last_mapping_addr = last_mapping.map_to_addr();
let last_mapping = inner.vm_mappings.remove(&last_mapping_addr).unwrap();
let extra_mapping_start = last_mapping.map_end();
let free_region = self.allocate_free_region_for_mapping(
new_map_end - extra_mapping_start,
Some(extra_mapping_start),
PAGE_SIZE,
false,
)?;
last_mapping.enlarge(new_map_end - extra_mapping_start);
inner.alloc_free_region_exact(extra_mapping_start, new_map_end - extra_mapping_start)?;
let last_mapping = last_mapping.enlarge(new_map_end - extra_mapping_start);
inner.vm_mappings.insert(last_mapping);
Ok(())
}
fn check_destroy_range(&self, range: &Range<usize>) -> Result<()> {
debug_assert!(range.start % PAGE_SIZE == 0);
debug_assert!(range.end % PAGE_SIZE == 0);
let inner = self.inner.read();
for child_vmar_ in inner.child_vmar_s.find(range) {
let child_vmar_range = child_vmar_.range();
debug_assert!(is_intersected(&child_vmar_range, range));
if range.start <= child_vmar_range.start && child_vmar_range.end <= range.end {
// Child vmar is totally in the range.
continue;
}
return_errno_with_message!(
Errno::EACCES,
"Child vmar is partly intersected with destroyed range"
);
}
Ok(())
}
fn is_destroyed(&self) -> bool {
self.inner.read().is_destroyed
}
fn merge_continuous_regions(&self) {
let mut new_free_regions = BTreeMap::new();
let mut inner = self.inner.write();
let keys = inner.free_regions.keys().cloned().collect::<Vec<_>>();
for key in keys {
if let Some(mut free_region) = inner.free_regions.remove(&key) {
let mut region_end = free_region.end();
while let Some(another_region) = inner.free_regions.remove(&region_end) {
free_region.merge_other_region(&another_region);
region_end = another_region.end();
}
new_free_regions.insert(free_region.start(), free_region);
}
}
inner.free_regions.clear();
inner.free_regions.append(&mut new_free_regions);
}
/// Allocate a child `Vmar_`.
pub fn alloc_child_vmar(
self: &Arc<Self>,
child_vmar_offset: Option<usize>,
child_vmar_size: usize,
align: usize,
) -> Result<Arc<Vmar_>> {
let (region_base, child_vmar_offset) =
self.inner
.write()
.find_free_region(child_vmar_offset, child_vmar_size, align)?;
// This unwrap should never fails
let free_region = self
.inner
.write()
.free_regions
.remove(&region_base)
.unwrap();
let child_range = child_vmar_offset..(child_vmar_offset + child_vmar_size);
let regions_after_allocation = free_region.allocate_range(child_range.clone());
regions_after_allocation.into_iter().for_each(|region| {
self.inner
.write()
.free_regions
.insert(region.start(), region);
});
let child_region = FreeRegion::new(child_range);
let mut child_regions = BTreeMap::new();
child_regions.insert(child_region.start(), child_region);
let child_vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
vm_mappings: BTreeMap::new(),
free_regions: child_regions,
};
let child_vmar_ = Vmar_::new(
child_vmar_inner,
self.vm_space.clone(),
child_vmar_offset,
child_vmar_size,
Some(self),
);
self.inner
.write()
.child_vmar_s
.insert(child_vmar_.base, child_vmar_.clone());
Ok(child_vmar_)
}
fn check_overwrite(&self, mapping_range: Range<usize>, can_overwrite: bool) -> Result<()> {
let inner = self.inner.read();
if inner
.child_vmar_s
.find(&mapping_range)
.into_iter()
.next()
.is_some()
{
return_errno_with_message!(
Errno::EACCES,
"mapping range overlapped with child vmar range"
);
}
if !can_overwrite
&& inner
.vm_mappings
.find(&mapping_range)
.into_iter()
.next()
.is_some()
{
if !can_overwrite && inner.vm_mappings.find(&mapping_range).next().is_some() {
return_errno_with_message!(
Errno::EACCES,
"mapping range overlapped with another mapping"
@ -563,11 +383,8 @@ impl Vmar_ {
}
/// Maps a `VmMapping` to this VMAR.
fn add_mapping(&self, mapping: Arc<VmMapping>) {
self.inner
.write()
.vm_mappings
.insert(mapping.map_to_addr(), mapping);
fn add_mapping(&self, mapping: VmMapping) {
self.inner.write().vm_mappings.insert(mapping);
}
fn allocate_free_region_for_mapping(
@ -580,129 +397,56 @@ impl Vmar_ {
trace!("allocate free region, map_size = 0x{:x}, offset = {:x?}, align = 0x{:x}, can_overwrite = {}", map_size, offset, align, can_overwrite);
if can_overwrite {
let mut inner = self.inner.write();
// If can overwrite, the offset is ensured not to be `None`.
let offset = offset.ok_or(Error::with_message(
Errno::EINVAL,
"offset cannot be None since can overwrite is set",
))?;
let map_range = offset..(offset + map_size);
// If can overwrite, the mapping can cross multiple free regions. We will split each free regions that intersect with the mapping.
let mut split_regions = Vec::new();
for free_region in inner.free_regions.find(&map_range) {
let free_region_range = free_region.range();
if is_intersected(&free_region_range, &map_range) {
split_regions.push(free_region_range.start);
}
}
for region_base in split_regions {
let free_region = inner.free_regions.remove(&region_base).unwrap();
let intersected_range = get_intersected_range(&free_region.range(), &map_range);
let regions_after_split = free_region.allocate_range(intersected_range);
regions_after_split.into_iter().for_each(|region| {
inner.free_regions.insert(region.start(), region);
});
}
drop(inner);
self.trim_existing_mappings(map_range)?;
self.inner.write().alloc_free_region_exact_truncate(
&self.vm_space,
offset,
map_size,
)?;
Ok(offset)
} else if let Some(offset) = offset {
self.inner
.write()
.alloc_free_region_exact(offset, map_size)?;
Ok(offset)
} else {
// Otherwise, the mapping in a single region.
let mut inner = self.inner.write();
let (free_region_base, offset) = inner.find_free_region(offset, map_size, align)?;
let free_region = inner.free_regions.remove(&free_region_base).unwrap();
let mapping_range = offset..(offset + map_size);
let intersected_range = get_intersected_range(&free_region.range(), &mapping_range);
let regions_after_split = free_region.allocate_range(intersected_range);
regions_after_split.into_iter().for_each(|region| {
inner.free_regions.insert(region.start(), region);
});
Ok(offset)
let free_region = self.inner.write().alloc_free_region(map_size, align)?;
Ok(free_region.start)
}
}
fn trim_existing_mappings(&self, trim_range: Range<usize>) -> Result<()> {
let mut inner = self.inner.write();
let mut mappings_to_remove = LinkedList::new();
let mut mappings_to_append = LinkedList::new();
for vm_mapping in inner.vm_mappings.find(&trim_range) {
vm_mapping.trim_mapping(
&trim_range,
&mut mappings_to_remove,
&mut mappings_to_append,
)?;
}
for map_addr in mappings_to_remove {
inner.vm_mappings.remove(&map_addr);
}
for (map_addr, mapping) in mappings_to_append {
inner.vm_mappings.insert(map_addr, mapping);
}
Ok(())
}
pub(super) fn new_fork_root(self: &Arc<Self>) -> Result<Arc<Self>> {
if self.parent.upgrade().is_some() {
return_errno_with_message!(Errno::EINVAL, "can only dup cow vmar for root vmar");
}
self.new_fork(None)
}
/// Creates a new fork VMAR with Copy-On-Write (COW) mechanism.
fn new_fork(&self, parent: Option<&Arc<Vmar_>>) -> Result<Arc<Self>> {
let new_vmar_ = {
let vmar_inner = VmarInner::new();
// If this is not a root `Vmar`, we clone the `VmSpace` from parent.
//
// If this is a root `Vmar`, we leverage Copy-On-Write (COW) mechanism to
// clone the `VmSpace` to the child.
let vm_space = if let Some(parent) = parent {
parent.vm_space().clone()
} else {
let mut new_space = VmSpace::new();
new_space.register_page_fault_handler(handle_page_fault_wrapper);
Arc::new(new_space)
};
Vmar_::new(vmar_inner, vm_space, self.base, self.size, parent)
Vmar_::new(vmar_inner, Arc::new(new_space), self.base, self.size)
};
{
let inner = self.inner.read();
let mut new_inner = new_vmar_.inner.write();
// Clone free regions.
for (free_region_base, free_region) in &inner.free_regions {
new_inner
.free_regions
.insert(*free_region_base, free_region.clone());
}
// Clone child vmars.
for (child_vmar_base, child_vmar_) in &inner.child_vmar_s {
let new_child_vmar = child_vmar_.new_fork(Some(&new_vmar_))?;
new_inner
.child_vmar_s
.insert(*child_vmar_base, new_child_vmar);
}
// Clone mappings.
{
let new_vmspace = new_vmar_.vm_space();
let range = self.base..(self.base + self.size);
let mut new_cursor = new_vmspace.cursor_mut(&range).unwrap();
let cur_vmspace = self.vm_space();
let mut cur_cursor = cur_vmspace.cursor_mut(&range).unwrap();
for (vm_mapping_base, vm_mapping) in &inner.vm_mappings {
for vm_mapping in inner.vm_mappings.iter() {
let base = vm_mapping.map_to_addr();
// Clone the `VmMapping` to the new VMAR.
let new_mapping = Arc::new(vm_mapping.new_fork(&new_vmar_)?);
new_inner.vm_mappings.insert(*vm_mapping_base, new_mapping);
let new_mapping = vm_mapping.new_fork()?;
new_inner.vm_mappings.insert(new_mapping);
// Protect the mapping and copy to the new page table for COW.
cur_cursor.jump(*vm_mapping_base).unwrap();
new_cursor.jump(*vm_mapping_base).unwrap();
cur_cursor.jump(base).unwrap();
new_cursor.jump(base).unwrap();
let mut op = |page: &mut PageProperty| {
page.flags -= PageFlags::W;
};
@ -712,8 +456,6 @@ impl Vmar_ {
cur_cursor.flusher().dispatch_tlb_flush();
}
drop(new_inner);
Ok(new_vmar_)
}
}
@ -740,59 +482,6 @@ impl<R> Vmar<R> {
}
}
#[derive(Debug, Clone)]
pub struct FreeRegion {
range: Range<Vaddr>,
}
impl Interval<usize> for FreeRegion {
fn range(&self) -> Range<usize> {
self.range.clone()
}
}
impl FreeRegion {
pub fn new(range: Range<Vaddr>) -> Self {
Self { range }
}
pub fn start(&self) -> Vaddr {
self.range.start
}
pub fn end(&self) -> Vaddr {
self.range.end
}
pub fn size(&self) -> usize {
self.range.end - self.range.start
}
/// Allocates a range in this free region.
///
/// The range is ensured to be contained in current region before call this function.
/// The return vector contains regions that are not allocated. Since the `allocate_range` can be
/// in the middle of a free region, the original region may be split as at most two regions.
pub fn allocate_range(&self, allocate_range: Range<Vaddr>) -> Vec<FreeRegion> {
let mut res = Vec::new();
if self.range.start < allocate_range.start {
let free_region = FreeRegion::new(self.range.start..allocate_range.start);
res.push(free_region);
}
if allocate_range.end < self.range.end {
let free_region = FreeRegion::new(allocate_range.end..self.range.end);
res.push(free_region);
}
res
}
pub fn merge_other_region(&mut self, other_region: &FreeRegion) {
assert!(self.range.end == other_region.range.start);
assert!(self.range.start < other_region.range.end);
self.range = self.range.start..other_region.range.end
}
}
/// Determines whether two ranges are intersected.
/// returns false if one of the ranges has a length of 0
pub fn is_intersected(range1: &Range<usize>, range2: &Range<usize>) -> bool {
@ -805,42 +494,3 @@ pub fn get_intersected_range(range1: &Range<usize>, range2: &Range<usize>) -> Ra
debug_assert!(is_intersected(range1, range2));
range1.start.max(range2.start)..range1.end.min(range2.end)
}
impl<'a, V: Interval<Vaddr> + 'a> IntervalSet<'a, Vaddr> for BTreeMap<Vaddr, V> {
type Item = V;
fn find(&'a self, range: &Range<Vaddr>) -> impl IntoIterator<Item = &'a Self::Item> + 'a {
let mut res = Vec::new();
let mut cursor = self.lower_bound(core::ops::Bound::Excluded(&range.start));
// There's one previous element that may intersect with the range.
if let Some((_, v)) = cursor.peek_prev() {
if v.range().end > range.start {
res.push(v);
}
}
// Find all intersected elements following it.
while let Some((_, v)) = cursor.next() {
if v.range().start >= range.end {
break;
}
res.push(v);
}
res
}
fn find_one(&'a self, point: &Vaddr) -> Option<&'a Self::Item> {
let cursor = self.lower_bound(core::ops::Bound::Excluded(point));
// There's one previous element and one following element that may
// contain the point. If they don't, there's no other chances.
if let Some((_, v)) = cursor.peek_prev() {
if v.range().end > *point {
return Some(v);
}
} else if let Some((_, v)) = cursor.peek_next() {
if v.range().start <= *point {
return Some(v);
}
}
None
}
}

View File

@ -1,219 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Options for allocating child VMARs.
use ostd::{mm::PAGE_SIZE, Error, Result};
use super::Vmar;
/// Options for allocating a child VMAR, which must not overlap with any
/// existing mappings or child VMARs.
///
/// # Examples
///
/// A child VMAR created from a parent VMAR of _dynamic_ capability is also a
/// _dynamic_ capability.
/// ```
/// use aster_nix::vm::{PAGE_SIZE, Vmar};
///
/// let parent_vmar = Vmar::new();
/// let child_size = 10 * PAGE_SIZE;
/// let child_vmar = parent_vmar
/// .new_child(child_size)
/// .alloc()
/// .unwrap();
/// assert!(child_vmar.rights() == parent_vmo.rights());
/// assert!(child_vmar.size() == child_size);
/// ```
///
/// A child VMAR created from a parent VMAR of _static_ capability is also a
/// _static_ capability.
/// ```
/// use aster_nix::prelude::*;
/// use aster_nix::vm::{PAGE_SIZE, Vmar};
///
/// let parent_vmar: Vmar<Full> = Vmar::new();
/// let child_size = 10 * PAGE_SIZE;
/// let child_vmar = parent_vmar
/// .new_child(child_size)
/// .alloc()
/// .unwrap();
/// assert!(child_vmar.rights() == parent_vmo.rights());
/// assert!(child_vmar.size() == child_size);
/// ```
pub struct VmarChildOptions<R> {
parent: Vmar<R>,
size: usize,
offset: Option<usize>,
align: Option<usize>,
}
impl<R> VmarChildOptions<R> {
/// Creates a default set of options with the specified size of the VMAR
/// (in bytes).
///
/// The size of the VMAR will be rounded up to align with the page size.
pub fn new(parent: Vmar<R>, size: usize) -> Self {
Self {
parent,
size,
offset: None,
align: None,
}
}
/// Set the alignment of the child VMAR.
///
/// By default, the alignment is the page size.
///
/// The alignment must be a power of two and a multiple of the page size.
pub fn align(mut self, align: usize) -> Self {
self.align = Some(align);
self
}
/// Sets the offset of the child VMAR.
///
/// If not set, the system will choose an offset automatically.
///
/// The offset must satisfy the alignment requirement.
/// Also, the child VMAR's range `[offset, offset + size)` must be within
/// the VMAR.
///
/// If not specified,
///
/// The offset must be page-aligned.
pub fn offset(mut self, offset: usize) -> Self {
self.offset = Some(offset);
self
}
/// Allocates the child VMAR according to the specified options.
///
/// The new child VMAR
///
/// # Access rights
///
/// The child VMAR is initially assigned all the parent's access rights.
pub fn alloc(self) -> Result<Vmar<R>> {
// check align
let align = if let Some(align) = self.align {
debug_assert!(align % PAGE_SIZE == 0);
debug_assert!(align.is_power_of_two());
if align % PAGE_SIZE != 0 || !align.is_power_of_two() {
return Err(Error::InvalidArgs);
}
align
} else {
PAGE_SIZE
};
// check size
if self.size % align != 0 {
return Err(Error::InvalidArgs);
}
// check offset
let root_vmar_offset = if let Some(offset) = self.offset {
if offset % PAGE_SIZE != 0 {
return Err(Error::InvalidArgs);
}
let root_vmar_offset = offset + self.parent.base();
if root_vmar_offset % align != 0 {
return Err(Error::InvalidArgs);
}
Some(root_vmar_offset)
} else {
None
};
let child_vmar_ = self
.parent
.0
.alloc_child_vmar(root_vmar_offset, self.size, align)?;
let child_vmar = Vmar(child_vmar_, self.parent.1);
Ok(child_vmar)
}
}
#[cfg(ktest)]
mod test {
use aster_rights::Full;
use ostd::prelude::*;
use super::*;
use crate::vm::{
page_fault_handler::PageFaultHandler,
perms::VmPerms,
vmar::{PageFaultInfo, ROOT_VMAR_CAP_ADDR},
vmo::{VmoOptions, VmoRightsOp},
};
#[ktest]
fn root_vmar() {
let vmar = Vmar::<Full>::new_root();
assert!(vmar.size() == ROOT_VMAR_CAP_ADDR);
}
#[ktest]
fn child_vmar() {
let root_vmar = Vmar::<Full>::new_root();
let root_vmar_dup = root_vmar.dup().unwrap();
let child_vmar = VmarChildOptions::new(root_vmar_dup, 10 * PAGE_SIZE)
.alloc()
.unwrap();
assert!(child_vmar.size() == 10 * PAGE_SIZE);
let root_vmar_dup = root_vmar.dup().unwrap();
let second_child = VmarChildOptions::new(root_vmar_dup, 9 * PAGE_SIZE)
.alloc()
.unwrap();
let root_vmar_dup = root_vmar.dup().unwrap();
assert!(VmarChildOptions::new(root_vmar_dup, 9 * PAGE_SIZE)
.offset(11 * PAGE_SIZE)
.alloc()
.is_err());
}
#[ktest]
fn map_vmo() {
let root_vmar = Vmar::<Full>::new_root();
let vmo = VmoOptions::<Full>::new(PAGE_SIZE).alloc().unwrap().to_dyn();
let perms = VmPerms::READ | VmPerms::WRITE;
let map_offset = 0x1000_0000;
let vmo_dup = vmo.dup().unwrap();
root_vmar
.new_map(PAGE_SIZE, perms)
.unwrap()
.vmo(vmo_dup)
.offset(map_offset)
.build()
.unwrap();
}
#[ktest]
fn handle_page_fault() {
const OFFSET: usize = 0x1000_0000;
let root_vmar = Vmar::<Full>::new_root();
// the page is not mapped by a vmo
assert!(root_vmar
.handle_page_fault(&PageFaultInfo {
address: OFFSET,
required_perms: VmPerms::WRITE,
})
.is_err());
// the page is mapped READ
let vmo = VmoOptions::<Full>::new(PAGE_SIZE).alloc().unwrap().to_dyn();
let perms = VmPerms::READ;
let vmo_dup = vmo.dup().unwrap();
root_vmar
.new_map(PAGE_SIZE, perms)
.unwrap()
.vmo(vmo_dup)
.offset(OFFSET)
.build()
.unwrap();
root_vmar
.handle_page_fault(&PageFaultInfo {
address: OFFSET,
required_perms: VmPerms::READ,
})
.unwrap();
}
}

View File

@ -2,12 +2,10 @@
use core::ops::Range;
use aster_rights::{Dup, Rights, TRightSet, TRights};
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
use aster_rights_proc::require;
use super::{
options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_,
};
use super::{vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_};
use crate::{
prelude::*, thread::exception::PageFaultInfo, vm::page_fault_handler::PageFaultHandler,
};
@ -74,31 +72,6 @@ impl<R: TRights> Vmar<TRightSet<R>> {
Ok(VmarMapOptions::new(dup_self, size, perms))
}
/// Creates a new child VMAR through a set of VMAR child options.
///
/// # Example
///
/// ```
/// let parent = Vmar::new().unwrap();
/// let child_size = 10 * PAGE_SIZE;
/// let child = parent.new_child(child_size).alloc().unwrap();
/// assert!(child.size() == child_size);
/// ```
///
/// For more details on the available options, see `VmarChildOptions`.
///
/// # Access rights
///
/// This method requires the Dup right.
///
/// The new VMAR child will be of the same capability class and
/// access rights as the parent.
#[require(R > Dup)]
pub fn new_child(&self, size: usize) -> Result<VmarChildOptions<TRightSet<R>>> {
let dup_self = self.dup()?;
Ok(VmarChildOptions::new(dup_self, size))
}
/// Change the permissions of the memory mappings in the specified range.
///
/// The range's start and end addresses must be page-aligned.
@ -113,27 +86,26 @@ impl<R: TRights> Vmar<TRightSet<R>> {
self.0.protect(perms, range)
}
/// clear all mappings and children vmars.
/// Clears all mappings.
///
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) -> Result<()> {
self.0.clear_root_vmar()
}
/// Destroy all mappings and children VMARs that fall within the specified
/// Destroys all mappings that fall within the specified
/// range in bytes.
///
/// The range's start and end addresses must be page-aligned.
///
/// Mappings may fall partially within the range; only the overlapped
/// portions of the mappings are unmapped.
/// As for children VMARs, they must be fully within the range.
/// All children VMARs that fall within the range get their `destroy` methods
/// called.
pub fn destroy(&self, range: Range<usize>) -> Result<()> {
self.0.destroy(range)
#[require(R > Write)]
pub fn remove_mapping(&self, range: Range<usize>) -> Result<()> {
self.0.remove_mapping(range)
}
/// Duplicate the capability.
/// Duplicates the capability.
///
/// # Access rights
///
@ -155,7 +127,7 @@ impl<R: TRights> Vmar<TRightSet<R>> {
Ok(Vmar(vmar_, TRightSet(R::new())))
}
/// Strict the access rights.
/// Stricts the access rights.
#[require(R > R1)]
pub fn restrict<R1: TRights>(self) -> Vmar<R1> {
Vmar(self.0, R1::new())

View File

@ -1,24 +1,19 @@
// SPDX-License-Identifier: MPL-2.0
#![allow(dead_code)]
#![allow(unused_variables)]
use core::{
cmp::{max, min},
num::NonZeroUsize,
ops::Range,
};
use align_ext::AlignExt;
use aster_rights::Rights;
use ostd::{
mm::{
use ostd::mm::{
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, Frame, FrameAllocOptions, PageFlags,
PageProperty, VmSpace,
},
sync::RwLockReadGuard,
};
use super::{interval::Interval, is_intersected, Vmar, Vmar_};
use super::{interval_set::Interval, Vmar};
use crate::{
prelude::*,
thread::exception::PageFaultInfo,
@ -29,71 +24,59 @@ use crate::{
},
};
/// A `VmMapping` represents mapping a range of physical pages into a `Vmar`.
/// Mapping a range of physical pages into a `Vmar`.
///
/// A `VmMapping` can bind with a `Vmo` which can provide physical pages for mapping.
/// Otherwise, it must be an anonymous mapping and will map any empty physical page.
/// A `VmMapping` binding with a `Vmo` is called VMO-backed mapping. Generally, a VMO-backed
/// mapping is a file-backed mapping. Yet there are also some situations where specific pages
/// that are not in a file need to be mapped. e.g:
/// A `VmMapping` can bind with a `Vmo` which can provide physical pages for
/// mapping. Otherwise, it must be an anonymous mapping and will map any empty
/// physical page. A `VmMapping` binding with a `Vmo` is called VMO-backed
/// mapping. Generally, a VMO-backed mapping is a file-backed mapping. Yet
/// there are also some situations where specific pages that are not in a file
/// need to be mapped. e.g:
/// - Mappings to the VDSO data.
/// - Shared anonymous mappings. because the mapped pages need to be retained and shared with
/// other processes.
/// - Shared anonymous mappings. because the mapped pages need to be retained
/// and shared with other processes.
///
/// Such mappings will also be VMO-backed mappings.
///
/// This type controls the actual mapping in the [`VmSpace`]. It is a linear
/// type and cannot be [`Drop`]. To remove a mapping, use [`Self::unmap`].
#[derive(Debug)]
pub(super) struct VmMapping {
inner: RwLock<VmMappingInner>,
/// The parent VMAR. The parent should always point to a valid VMAR.
parent: Weak<Vmar_>,
/// Specific physical pages that need to be mapped.
/// If this field is `None`, it means that the mapping is
/// an independent anonymous mapping.
vmo: Option<MappedVmo>,
/// Whether the mapping is shared.
/// The updates to a shared mapping are visible among processes.
/// or are carried through to the underlying file for
/// file-backed shared mappings.
is_shared: bool,
/// Whether the mapping needs to handle surrounding pages when handling page fault.
handle_page_faults_around: bool,
}
impl VmMapping {
pub fn try_clone(&self) -> Result<Self> {
let inner = self.inner.read().clone();
let vmo = self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?;
Ok(Self {
inner: RwLock::new(inner),
parent: self.parent.clone(),
vmo,
is_shared: self.is_shared,
handle_page_faults_around: self.handle_page_faults_around,
})
}
}
#[derive(Clone)]
struct VmMappingInner {
/// For the VMO-backed mapping, this field indicates the map offset of the VMO in bytes.
vmo_offset: Option<usize>,
/// The size of mapping, in bytes. The map size can even be larger than the size of VMO.
/// Those pages outside VMO range cannot be read or write.
map_size: usize,
/// The size of mapping, in bytes. The map size can even be larger than the
/// size of VMO. Those pages outside VMO range cannot be read or write.
///
/// Zero sized mapping is not allowed. So this field is always non-zero.
map_size: NonZeroUsize,
/// The base address relative to the root VMAR where the VMO is mapped.
map_to_addr: Vaddr,
/// is destroyed
is_destroyed: bool,
/// Specific physical pages that need to be mapped. If this field is
/// `None`, it means that the mapping is an independent anonymous mapping.
///
/// The start of the virtual address maps to the start of the range
/// specified in [`MappedVmo`].
vmo: Option<MappedVmo>,
/// Whether the mapping is shared.
///
/// The updates to a shared mapping are visible among processes, or carried
/// through to the underlying file for file-backed shared mappings.
is_shared: bool,
/// Whether the mapping needs to handle surrounding pages when handling
/// page fault.
handle_page_faults_around: bool,
/// The permissions of pages in the mapping.
///
/// All pages within the same `VmMapping` have the same permissions.
perms: VmPerms,
}
impl Interval<usize> for Arc<VmMapping> {
fn range(&self) -> Range<usize> {
self.map_to_addr()..self.map_to_addr() + self.map_size()
impl Interval<Vaddr> for VmMapping {
fn range(&self) -> Range<Vaddr> {
self.map_to_addr..self.map_to_addr + self.map_size.get()
}
}
/***************************** Basic methods *********************************/
impl VmMapping {
pub fn build_mapping<R1, R2>(option: VmarMapOptions<R1, R2>) -> Result<Self> {
let VmarMapOptions {
@ -118,120 +101,90 @@ impl VmMapping {
map_to_addr + size
);
let (vmo, vmo_offset) = {
if let Some(vmo) = vmo {
(
Some(MappedVmo::new(vmo.to_dyn(), vmo_offset..vmo_limit)),
Some(vmo_offset.align_up(PAGE_SIZE)),
)
} else {
(None, None)
}
};
let vm_mapping_inner = VmMappingInner {
vmo_offset,
map_size: size,
map_to_addr,
is_destroyed: false,
perms,
};
let vmo = vmo.map(|vmo| MappedVmo::new(vmo.to_dyn(), vmo_offset..vmo_limit));
Ok(Self {
inner: RwLock::new(vm_mapping_inner),
parent: Arc::downgrade(&parent_vmar),
vmo,
is_shared,
handle_page_faults_around,
map_size: NonZeroUsize::new(size).unwrap(),
map_to_addr,
perms,
})
}
/// Builds a new VmMapping based on part of current `VmMapping`.
/// The mapping range of the new mapping must be contained in the full mapping.
///
/// Note: Since such new mappings will intersect with the current mapping,
/// making sure that when adding the new mapping into a Vmar, the current mapping in the Vmar will be removed.
fn clone_partial(
&self,
range: Range<usize>,
new_perms: Option<VmPerms>,
) -> Result<Arc<VmMapping>> {
let partial_mapping = Arc::new(self.try_clone()?);
// Adjust the mapping range and the permission.
{
let mut inner = partial_mapping.inner.write();
inner.shrink_to(range);
if let Some(perms) = new_perms {
inner.perms = perms;
}
}
Ok(partial_mapping)
}
pub fn vmo(&self) -> Option<&MappedVmo> {
self.vmo.as_ref()
pub(super) fn new_fork(&self) -> Result<VmMapping> {
Ok(VmMapping {
vmo: self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?,
..*self
})
}
/// Returns the mapping's start address.
pub fn map_to_addr(&self) -> Vaddr {
self.inner.read().map_to_addr
self.map_to_addr
}
/// Returns the mapping's end address.
pub fn map_end(&self) -> Vaddr {
let inner = self.inner.read();
inner.map_to_addr + inner.map_size
self.map_to_addr + self.map_size.get()
}
/// Returns the mapping's size.
pub fn map_size(&self) -> usize {
self.inner.read().map_size
self.map_size.get()
}
/// Unmaps pages in the range
pub fn unmap(&self, range: &Range<usize>, may_destroy: bool) -> Result<()> {
let parent = self.parent.upgrade().unwrap();
let vm_space = parent.vm_space();
self.inner.write().unmap(vm_space, range, may_destroy)
// Returns the permissions of pages in the mapping.
pub fn perms(&self) -> VmPerms {
self.perms
}
}
pub fn is_destroyed(&self) -> bool {
self.inner.read().is_destroyed
}
/****************************** Page faults **********************************/
/// Returns whether the mapping is a shared mapping.
pub fn is_shared(&self) -> bool {
self.is_shared
impl VmMapping {
pub fn handle_page_fault(
&self,
vm_space: &VmSpace,
page_fault_info: &PageFaultInfo,
) -> Result<()> {
if !self.perms.contains(page_fault_info.required_perms) {
trace!(
"self.perms {:?}, page_fault_info.required_perms {:?}, self.range {:?}",
self.perms,
page_fault_info.required_perms,
self.range()
);
return_errno_with_message!(Errno::EACCES, "perm check fails");
}
pub fn enlarge(&self, extra_size: usize) {
self.inner.write().map_size += extra_size;
}
pub fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> {
self.check_perms(&page_fault_info.required_perms)?;
let address = page_fault_info.address;
let page_aligned_addr = address.align_down(PAGE_SIZE);
let is_write = page_fault_info.required_perms.contains(VmPerms::WRITE);
if !is_write && self.vmo.is_some() && self.handle_page_faults_around {
self.handle_page_faults_around(address)?;
self.handle_page_faults_around(vm_space, address)?;
return Ok(());
}
let root_vmar = self.parent.upgrade().unwrap();
let mut cursor = root_vmar
.vm_space()
.cursor_mut(&(page_aligned_addr..page_aligned_addr + PAGE_SIZE))?;
let mut cursor =
vm_space.cursor_mut(&(page_aligned_addr..page_aligned_addr + PAGE_SIZE))?;
match cursor.query().unwrap() {
VmItem::Mapped {
va,
frame,
mut prop,
} if is_write => {
} => {
if VmPerms::from(prop.flags).contains(page_fault_info.required_perms) {
// The page fault is already handled maybe by other threads.
// Just flush the TLB and return.
TlbFlushOp::Address(va).perform_on_current();
return Ok(());
}
assert!(is_write);
// Perform COW if it is a write access to a shared mapping.
// Skip if the page fault is already handled.
@ -258,24 +211,19 @@ impl VmMapping {
cursor.map(new_frame, prop);
}
}
VmItem::Mapped { .. } => {
panic!("non-COW page fault should not happen on mapped address")
}
VmItem::NotMapped { .. } => {
// Map a new frame to the page fault address.
let inner = self.inner.read();
let (frame, is_readonly) = self.prepare_page(&inner, address, is_write)?;
let (frame, is_readonly) = self.prepare_page(address, is_write)?;
let vm_perms = {
let mut perms = inner.perms;
let mut perms = self.perms;
if is_readonly {
// COW pages are forced to be read-only.
perms -= VmPerms::WRITE;
}
perms
};
drop(inner);
let mut page_flags = vm_perms.into();
page_flags |= PageFlags::ACCESSED;
@ -287,25 +235,17 @@ impl VmMapping {
cursor.map(frame, map_prop);
}
}
Ok(())
}
fn prepare_page(
&self,
mapping_inner: &RwLockReadGuard<VmMappingInner>,
page_fault_addr: Vaddr,
write: bool,
) -> Result<(Frame, bool)> {
fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(Frame, bool)> {
let mut is_readonly = false;
let Some(vmo) = &self.vmo else {
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
};
let vmo_offset =
mapping_inner.vmo_offset.unwrap() + page_fault_addr - mapping_inner.map_to_addr;
let page_idx = vmo_offset / PAGE_SIZE;
let Ok(page) = vmo.get_committed_frame(page_idx) else {
let page_offset = page_fault_addr.align_down(PAGE_SIZE) - self.map_to_addr;
let Ok(page) = vmo.get_committed_frame(page_offset) else {
if !self.is_shared {
// The page index is outside the VMO. This is only allowed in private mapping.
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
@ -330,28 +270,24 @@ impl VmMapping {
}
}
fn handle_page_faults_around(&self, page_fault_addr: Vaddr) -> Result<()> {
fn handle_page_faults_around(&self, vm_space: &VmSpace, page_fault_addr: Vaddr) -> Result<()> {
const SURROUNDING_PAGE_NUM: usize = 16;
const SURROUNDING_PAGE_ADDR_MASK: usize = !(SURROUNDING_PAGE_NUM * PAGE_SIZE - 1);
let inner = self.inner.read();
let vmo_offset = inner.vmo_offset.unwrap();
let vmo = self.vmo().unwrap();
let vmo = self.vmo.as_ref().unwrap();
let around_page_addr = page_fault_addr & SURROUNDING_PAGE_ADDR_MASK;
let valid_size = min(vmo.size().saturating_sub(vmo_offset), inner.map_size);
let size = min(vmo.size(), self.map_size.get());
let start_addr = max(around_page_addr, inner.map_to_addr);
let start_addr = max(around_page_addr, self.map_to_addr);
let end_addr = min(
start_addr + SURROUNDING_PAGE_NUM * PAGE_SIZE,
inner.map_to_addr + valid_size,
self.map_to_addr + size,
);
let vm_perms = inner.perms - VmPerms::WRITE;
let parent = self.parent.upgrade().unwrap();
let vm_space = parent.vm_space();
let vm_perms = self.perms - VmPerms::WRITE;
let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?;
let operate = move |commit_fn: &mut dyn FnMut() -> Result<Frame>| {
if let VmItem::NotMapped { va, len } = cursor.query().unwrap() {
if let VmItem::NotMapped { .. } = cursor.query().unwrap() {
// We regard all the surrounding pages as accessed, no matter
// if it is really so. Then the hardware won't bother to update
// the accessed bit of the page table on following accesses.
@ -368,205 +304,119 @@ impl VmMapping {
Ok(())
};
let start_offset = vmo_offset + start_addr - inner.map_to_addr;
let end_offset = vmo_offset + end_addr - inner.map_to_addr;
let start_offset = start_addr - self.map_to_addr;
let end_offset = end_addr - self.map_to_addr;
vmo.operate_on_range(&(start_offset..end_offset), operate)?;
Ok(())
}
}
/// Protects a specified range of pages in the mapping to the target perms.
/// This `VmMapping` will split to maintain its property.
/**************************** Transformations ********************************/
impl VmMapping {
/// Enlarges the mapping by `extra_size` bytes to the high end.
pub fn enlarge(self, extra_size: usize) -> Self {
Self {
map_size: NonZeroUsize::new(self.map_size.get() + extra_size).unwrap(),
..self
}
}
/// Splits the mapping at the specified address.
///
/// Since this method will modify the `vm_mappings` in the vmar,
/// it should not be called during the direct iteration of the `vm_mappings`.
pub(super) fn protect(&self, new_perms: VmPerms, range: Range<usize>) -> Result<()> {
// If `new_perms` is equal to `old_perms`, `protect()` will not modify any permission in the VmMapping.
let old_perms = self.inner.read().perms;
if old_perms == new_perms {
return Ok(());
/// The address must be within the mapping and page-aligned. The address
/// must not be either the start or the end of the mapping.
fn split(self, at: Vaddr) -> Result<(Self, Self)> {
debug_assert!(self.map_to_addr < at && at < self.map_end());
debug_assert!(at % PAGE_SIZE == 0);
let (mut l_vmo, mut r_vmo) = (None, None);
if let Some(vmo) = self.vmo {
let at_offset = vmo.range.start + at - self.map_to_addr;
let l_range = vmo.range.start..at_offset;
let r_range = at_offset..vmo.range.end;
l_vmo = Some(MappedVmo::new(vmo.vmo.dup()?, l_range));
r_vmo = Some(MappedVmo::new(vmo.vmo.dup()?, r_range));
}
// Protect permission for the perm in the VmMapping.
self.protect_with_subdivision(&range, new_perms)?;
// Protect permission in the VmSpace.
let vmar = self.parent.upgrade().unwrap();
let vm_space = vmar.vm_space();
self.inner.write().protect(vm_space, new_perms, range)?;
let left_size = at - self.map_to_addr;
let right_size = self.map_size.get() - left_size;
let left = Self {
map_to_addr: self.map_to_addr,
map_size: NonZeroUsize::new(left_size).unwrap(),
vmo: l_vmo,
..self
};
let right = Self {
map_to_addr: at,
map_size: NonZeroUsize::new(right_size).unwrap(),
vmo: r_vmo,
..self
};
Ok(())
Ok((left, right))
}
pub(super) fn new_fork(&self, new_parent: &Arc<Vmar_>) -> Result<VmMapping> {
let new_inner = self.inner.read().clone();
Ok(VmMapping {
inner: RwLock::new(new_inner),
parent: Arc::downgrade(new_parent),
vmo: self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?,
is_shared: self.is_shared,
handle_page_faults_around: self.handle_page_faults_around,
})
}
pub fn range(&self) -> Range<usize> {
self.map_to_addr()..self.map_to_addr() + self.map_size()
}
/// Protects the current `VmMapping` to enforce new permissions within a specified range.
///
/// Due to the property of `VmMapping`, this operation may require subdividing the current
/// `VmMapping`. In this condition, it will generate a new `VmMapping` with the specified `perm` to protect the
/// target range, as well as additional `VmMappings` to preserve the mappings in the remaining ranges.
/// Splits the mapping at the specified address.
///
/// There are four conditions:
/// 1. |--------old perm--------| -> |-old-| + |------new------|
/// 2. |--------old perm--------| -> |-new-| + |------old------|
/// 3. |--------old perm--------| -> |-old-| + |-new-| + |-old-|
/// 4. |--------old perm--------| -> |---------new perm--------|
/// 1. |-outside `range`-| + |------------within `range`------------|
/// 2. |------------within `range`------------| + |-outside `range`-|
/// 3. |-outside `range`-| + |-within `range`-| + |-outside `range`-|
/// 4. |----------------------within `range` -----------------------|
///
/// Generally, this function is only used in `protect()` method.
/// This method modifies the parent `Vmar` in the end if subdividing is required.
/// It removes current mapping and add split mapping to the Vmar.
fn protect_with_subdivision(
&self,
intersect_range: &Range<usize>,
perms: VmPerms,
) -> Result<()> {
let mut additional_mappings = Vec::new();
/// Returns (left outside, within, right outside) if successful.
///
/// # Panics
///
/// Panics if the mapping does not contain the range, or if the start or
/// end of the range is not page-aligned.
pub fn split_range(self, range: &Range<Vaddr>) -> Result<(Option<Self>, Self, Option<Self>)> {
let mapping_range = self.range();
if range.start <= mapping_range.start && mapping_range.end <= range.end {
// Condition 4.
return Ok((None, self, None));
} else if mapping_range.start < range.start {
let (left, within) = self.split(range.start).unwrap();
if range.end < mapping_range.end {
// Condition 3.
let (within, right) = within.split(range.end).unwrap();
return Ok((Some(left), within, Some(right)));
} else {
// Condition 1.
return Ok((Some(left), within, None));
}
} else if mapping_range.contains(&range.end) {
// Condition 2.
let (within, right) = self.split(range.end).unwrap();
return Ok((None, within, Some(right)));
}
panic!("The mapping does not contain the splitting range.");
}
}
/************************** VM Space operations ******************************/
impl VmMapping {
/// Unmaps the mapping from the VM space.
pub(super) fn unmap(self, vm_space: &VmSpace) -> Result<()> {
let range = self.range();
// Condition 4, the `additional_mappings` will be empty.
if range.start == intersect_range.start && range.end == intersect_range.end {
self.inner.write().perms = perms;
return Ok(());
}
// Condition 1 or 3, which needs an additional new VmMapping with range (range.start..intersect_range.start)
if range.start < intersect_range.start {
let additional_left_mapping =
self.clone_partial(range.start..intersect_range.start, None)?;
additional_mappings.push(additional_left_mapping);
}
// Condition 2 or 3, which needs an additional new VmMapping with range (intersect_range.end..range.end).
if range.end > intersect_range.end {
let additional_right_mapping =
self.clone_partial(intersect_range.end..range.end, None)?;
additional_mappings.push(additional_right_mapping);
}
// The protected VmMapping must exist and its range is `intersect_range`.
let protected_mapping = self.clone_partial(intersect_range.clone(), Some(perms))?;
// Begin to modify the `Vmar`.
let vmar = self.parent.upgrade().unwrap();
let mut vmar_inner = vmar.inner.write();
// Remove the original mapping.
vmar_inner.vm_mappings.remove(&self.map_to_addr());
// Add protected mappings to the vmar.
vmar_inner
.vm_mappings
.insert(protected_mapping.map_to_addr(), protected_mapping);
// Add additional mappings to the vmar.
for mapping in additional_mappings {
vmar_inner
.vm_mappings
.insert(mapping.map_to_addr(), mapping);
}
let mut cursor = vm_space.cursor_mut(&range)?;
cursor.unmap(range.len());
Ok(())
}
/// Trims a range from the mapping.
/// There are several cases.
/// 1. the trim_range is totally in the mapping. Then the mapping will split as two mappings.
/// 2. the trim_range covers the mapping. Then the mapping will be destroyed.
/// 3. the trim_range partly overlaps with the mapping, in left or right. Only overlapped part is trimmed.
/// If we create a mapping with a new map addr, we will add it to mappings_to_append.
/// If the mapping with map addr does not exist ever, the map addr will be added to mappings_to_remove.
/// Otherwise, we will directly modify self.
pub fn trim_mapping(
self: &Arc<Self>,
trim_range: &Range<usize>,
mappings_to_remove: &mut LinkedList<Vaddr>,
mappings_to_append: &mut LinkedList<(Vaddr, Arc<VmMapping>)>,
) -> Result<()> {
let map_to_addr = self.map_to_addr();
let map_size = self.map_size();
/// Change the perms of the mapping.
pub(super) fn protect(self, vm_space: &VmSpace, perms: VmPerms) -> Self {
let range = self.range();
if !is_intersected(&range, trim_range) {
return Ok(());
}
if trim_range.start <= map_to_addr && trim_range.end >= map_to_addr + map_size {
// Fast path: the whole mapping was trimmed.
self.unmap(trim_range, true)?;
mappings_to_remove.push_back(map_to_addr);
return Ok(());
}
if trim_range.start <= range.start {
mappings_to_remove.push_back(map_to_addr);
if trim_range.end <= range.end {
// Overlap vm_mapping from left.
let new_map_addr = self.trim_left(trim_range.end)?;
mappings_to_append.push_back((new_map_addr, self.clone()));
} else {
// The mapping was totally destroyed.
}
} else {
if trim_range.end <= range.end {
// The trim range was totally inside the old mapping.
let another_mapping = Arc::new(self.try_clone()?);
let another_map_to_addr = another_mapping.trim_left(trim_range.end)?;
mappings_to_append.push_back((another_map_to_addr, another_mapping));
} else {
// Overlap vm_mapping from right.
}
self.trim_right(trim_range.start)?;
}
Ok(())
}
/// Trims the mapping from left to a new address.
fn trim_left(&self, vaddr: Vaddr) -> Result<Vaddr> {
let vmar = self.parent.upgrade().unwrap();
let vm_space = vmar.vm_space();
self.inner.write().trim_left(vm_space, vaddr)
}
/// Trims the mapping from right to a new address.
fn trim_right(&self, vaddr: Vaddr) -> Result<Vaddr> {
let vmar = self.parent.upgrade().unwrap();
let vm_space = vmar.vm_space();
self.inner.write().trim_right(vm_space, vaddr)
}
fn check_perms(&self, perms: &VmPerms) -> Result<()> {
self.inner.read().check_perms(perms)
}
}
impl VmMappingInner {
/// Unmap pages in the range.
fn unmap(&mut self, vm_space: &VmSpace, range: &Range<usize>, may_destroy: bool) -> Result<()> {
let map_addr = range.start.align_down(PAGE_SIZE);
let map_end = range.end.align_up(PAGE_SIZE);
let map_range = map_addr..map_end;
let mut cursor = vm_space.cursor_mut(&map_range)?;
cursor.unmap(map_range.len());
if may_destroy && map_range == self.range() {
self.is_destroyed = true;
}
Ok(())
}
pub(super) fn protect(
&mut self,
vm_space: &VmSpace,
perms: VmPerms,
range: Range<usize>,
) -> Result<()> {
debug_assert!(range.start % PAGE_SIZE == 0);
debug_assert!(range.end % PAGE_SIZE == 0);
let mut cursor = vm_space.cursor_mut(&range).unwrap();
let op = |p: &mut PageProperty| p.flags = perms.into();
while cursor.virt_addr() < range.end {
if let Some(va) = cursor.protect_next(range.end - cursor.virt_addr(), op) {
@ -576,66 +426,8 @@ impl VmMappingInner {
}
}
cursor.flusher().dispatch_tlb_flush();
Ok(())
}
/// Trim the mapping from left to a new address.
fn trim_left(&mut self, vm_space: &VmSpace, vaddr: Vaddr) -> Result<Vaddr> {
trace!(
"trim left: range: {:x?}, vaddr = 0x{:x}",
self.range(),
vaddr
);
debug_assert!(vaddr >= self.map_to_addr && vaddr <= self.map_to_addr + self.map_size);
debug_assert!(vaddr % PAGE_SIZE == 0);
let trim_size = vaddr - self.map_to_addr;
self.unmap(vm_space, &(self.map_to_addr..vaddr), true)?;
self.map_to_addr = vaddr;
self.vmo_offset = self.vmo_offset.map(|vmo_offset| vmo_offset + trim_size);
self.map_size -= trim_size;
Ok(self.map_to_addr)
}
/// Trim the mapping from right to a new address.
fn trim_right(&mut self, vm_space: &VmSpace, vaddr: Vaddr) -> Result<Vaddr> {
trace!(
"trim right: range: {:x?}, vaddr = 0x{:x}",
self.range(),
vaddr
);
debug_assert!(vaddr >= self.map_to_addr && vaddr <= self.map_to_addr + self.map_size);
debug_assert!(vaddr % PAGE_SIZE == 0);
self.unmap(vm_space, &(vaddr..self.map_to_addr + self.map_size), true)?;
self.map_size = vaddr - self.map_to_addr;
Ok(self.map_to_addr)
}
/// Shrinks the current `VmMapping` to the new range.
/// The new range must be contained in the old range.
fn shrink_to(&mut self, new_range: Range<usize>) {
debug_assert!(self.map_to_addr <= new_range.start);
debug_assert!(self.map_to_addr + self.map_size >= new_range.end);
self.vmo_offset = self
.vmo_offset
.map(|vmo_offset| vmo_offset + new_range.start - self.map_to_addr);
self.map_to_addr = new_range.start;
self.map_size = new_range.end - new_range.start;
}
fn range(&self) -> Range<usize> {
self.map_to_addr..self.map_to_addr + self.map_size
}
fn check_perms(&self, perms: &VmPerms) -> Result<()> {
if !self.perms.contains(*perms) {
return_errno_with_message!(Errno::EACCES, "perm check fails");
}
Ok(())
Self { perms, ..self }
}
}
@ -778,7 +570,7 @@ impl<R1, R2> VmarMapOptions<R1, R2> {
pub fn build(self) -> Result<Vaddr> {
self.check_options()?;
let parent_vmar = self.parent.0.clone();
let vm_mapping = Arc::new(VmMapping::build_mapping(self)?);
let vm_mapping = VmMapping::build_mapping(self)?;
let map_to_addr = vm_mapping.map_to_addr();
parent_vmar.add_mapping(vm_mapping);
Ok(map_to_addr)
@ -850,6 +642,7 @@ impl<R1, R2> VmarMapOptions<R1, R2> {
/// A wrapper that represents a mapped [`Vmo`] and provide required functionalities
/// that need to be provided to mappings from the VMO.
#[derive(Debug)]
pub(super) struct MappedVmo {
vmo: Vmo,
/// Represents the accessible range in the VMO for mappings.
@ -862,26 +655,34 @@ impl MappedVmo {
Self { vmo, range }
}
/// Gets the committed frame at the input `page_idx` in the mapped VMO.
fn size(&self) -> usize {
self.range.len()
}
/// Gets the committed frame at the input offset in the mapped VMO.
///
/// If the VMO has not committed a frame at this index, it will commit
/// one first and return it.
pub fn get_committed_frame(&self, page_idx: usize) -> Result<Frame> {
debug_assert!(self.range.contains(&(page_idx * PAGE_SIZE)));
self.vmo.commit_page(page_idx * PAGE_SIZE)
fn get_committed_frame(&self, page_offset: usize) -> Result<Frame> {
debug_assert!(page_offset < self.range.len());
debug_assert!(page_offset % PAGE_SIZE == 0);
self.vmo.commit_page(self.range.start + page_offset)
}
/// Traverses the indices within a specified range of a VMO sequentially.
///
/// For each index position, you have the option to commit the page as well as
/// perform other operations.
fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where
F: FnMut(&mut dyn FnMut() -> Result<Frame>) -> Result<()>,
{
debug_assert!(self.range.start <= range.start && self.range.end >= range.end);
debug_assert!(range.start < self.range.len());
debug_assert!(range.end <= self.range.len());
self.vmo.operate_on_range(range, operate)
let range = self.range.start + range.start..self.range.start + range.end;
self.vmo.operate_on_range(&range, operate)
}
/// Duplicates the capability.
@ -891,9 +692,4 @@ impl MappedVmo {
range: self.range.clone(),
})
}
/// Returns the size (in bytes) of a VMO.
pub fn size(&self) -> usize {
self.vmo.size()
}
}

View File

@ -70,7 +70,7 @@ pub use pager::Pager;
/// Compared with `Frame`,
/// `Vmo` is easier to use (by offering more powerful APIs) and
/// harder to misuse (thanks to its nature of being capability).
///
#[derive(Debug)]
pub struct Vmo<R = Rights>(pub(super) Arc<Vmo_>, R);
/// Functions exist both for static capbility and dynamic capability
@ -176,6 +176,15 @@ pub(super) struct Vmo_ {
pages: Pages,
}
impl Debug for Vmo_ {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("Vmo_")
.field("flags", &self.flags)
.field("size", &self.size())
.finish()
}
}
bitflags! {
/// Commit Flags.
pub struct CommitFlags: u8 {