diff --git a/kernel/aster-nix/src/syscall/madvise.rs b/kernel/aster-nix/src/syscall/madvise.rs index db99f37c3..6254cf3e7 100644 --- a/kernel/aster-nix/src/syscall/madvise.rs +++ b/kernel/aster-nix/src/syscall/madvise.rs @@ -29,11 +29,10 @@ fn madv_dontneed(start: Vaddr, len: usize) -> Result<()> { debug_assert!(len % PAGE_SIZE == 0); let current = current!(); let root_vmar = current.root_vmar(); - let vm_mapping = root_vmar.get_vm_mapping(start)?; - // ensure the range is totally in the mapping - debug_assert!(vm_mapping.map_to_addr() <= start); - debug_assert!(start + len <= vm_mapping.map_to_addr() + vm_mapping.map_size()); - vm_mapping.unmap_and_decommit(start..(start + len)) + let advised_range = start..start + len; + // `destroy()` interface may require adjustment and replacement afterwards. + let _ = root_vmar.destroy(advised_range); + Ok(()) } #[repr(i32)] diff --git a/kernel/aster-nix/src/vm/vmar/mod.rs b/kernel/aster-nix/src/vm/vmar/mod.rs index 9794c5040..e2e07874a 100644 --- a/kernel/aster-nix/src/vm/vmar/mod.rs +++ b/kernel/aster-nix/src/vm/vmar/mod.rs @@ -181,18 +181,26 @@ impl Vmar_ { Ok(()) } - // do real protect. The protected range is ensured to be mapped. + // Do real protect. The protected range is ensured to be mapped. fn do_protect_inner(&self, perms: VmPerms, range: Range) -> Result<()> { - let inner = self.inner.lock(); + let protect_mappings: Vec> = { + let inner = self.inner.lock(); + inner + .vm_mappings + .find(&range) + .into_iter() + .cloned() + .collect() + }; - for vm_mapping in inner.vm_mappings.find(&range) { - let vm_mapping_range = vm_mapping.range(); - debug_assert!(is_intersected(&vm_mapping_range, &range)); + for vm_mapping in protect_mappings { + let vm_mapping_range = + vm_mapping.map_to_addr()..(vm_mapping.map_to_addr() + vm_mapping.map_size()); let intersected_range = get_intersected_range(&range, &vm_mapping_range); vm_mapping.protect(perms, intersected_range)?; } - for child_vmar_ in inner.child_vmar_s.find(&range) { + for child_vmar_ in self.inner.lock().child_vmar_s.find(&range) { let child_vmar_range = child_vmar_.range(); debug_assert!(is_intersected(&child_vmar_range, &range)); let intersected_range = get_intersected_range(&range, &child_vmar_range); @@ -202,7 +210,7 @@ impl Vmar_ { Ok(()) } - /// ensure the whole protected range is mapped, that is to say, backed up by a VMO. + /// Ensure the whole protected range is mapped, that is to say, backed up by a VMO. /// Internally, we check whether the range intersects any free region recursively. /// If so, the range is not fully mapped. fn check_protected_range(&self, protected_range: &Range) -> Result<()> { @@ -265,7 +273,7 @@ impl Vmar_ { return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar"); } - /// clear all content of the root vmar + /// Clear all content of the root vmar pub fn clear_root_vmar(&self) -> Result<()> { debug_assert!(self.is_root_vmar()); if !self.is_root_vmar() { @@ -366,7 +374,7 @@ impl Vmar_ { let child_vmar_range = child_vmar_.range(); debug_assert!(is_intersected(&child_vmar_range, range)); if range.start <= child_vmar_range.start && child_vmar_range.end <= range.end { - // child vmar is totolly in the range + // Child vmar is totally in the range. continue; } return_errno_with_message!( @@ -404,7 +412,7 @@ impl Vmar_ { let read_start = self.base + offset; let read_end = buf.len() + read_start; let read_range = read_start..read_end; - // if the read range is in child vmar + // If the read range is in child vmar. let inner = self.inner.lock(); for child_vmar_ in inner.child_vmar_s.find(&read_range) { let child_vmar_range = child_vmar_.range(); @@ -414,7 +422,7 @@ impl Vmar_ { } } - // if the read range is in mapped vmo + // If the read range is in mapped vmo. for vm_mapping in inner.vm_mappings.find(&read_range) { let vm_mapping_range = vm_mapping.range(); if vm_mapping_range.start <= read_start && read_end <= vm_mapping_range.end { @@ -439,7 +447,7 @@ impl Vmar_ { .ok_or_else(|| Error::with_message(Errno::EFAULT, "Arithmetic Overflow"))?; let write_range = write_start..write_end; - // if the write range is in child vmar + // If the write range is in child vmar. let inner = self.inner.lock(); for child_vmar_ in inner.child_vmar_s.find(&write_range) { let child_vmar_range = child_vmar_.range(); @@ -449,7 +457,7 @@ impl Vmar_ { } } - // if the write range is in mapped vmo + // If the write range is in mapped vmo. for vm_mapping in inner.vm_mappings.find(&write_range) { let vm_mapping_range = vm_mapping.range(); if vm_mapping_range.start <= write_start && write_end <= vm_mapping_range.end { @@ -462,7 +470,7 @@ impl Vmar_ { return_errno_with_message!(Errno::EACCES, "write range is not backed up by a vmo"); } - /// allocate a child vmar_. + /// Allocate a child vmar_. pub fn alloc_child_vmar( self: &Arc, child_vmar_offset: Option, @@ -504,8 +512,8 @@ impl Vmar_ { Ok(child_vmar_) } - /// find a free region for child vmar or vmo. - /// returns (region base addr, child real offset) + /// Find a free region for child vmar or vmo. + /// Returns (region base addr, child real offset). fn find_free_region_for_child( &self, child_offset: Option, @@ -526,7 +534,7 @@ impl Vmar_ { } } } else { - // else, we find a free region that can satisfy the length and align requirement. + // Else, we find a free region that can satisfy the length and align requirement. // Here, we use a simple brute-force algorithm to find the first free range that can satisfy. // FIXME: A randomized algorithm may be more efficient. for (region_base, free_region) in &inner.free_regions { @@ -568,12 +576,12 @@ impl Vmar_ { Ok(()) } - /// returns the attached vm_space + /// Returns the attached `VmSpace`. pub(super) fn vm_space(&self) -> &VmSpace { &self.vm_space } - /// map a vmo to this vmar + /// Map a vmo to this vmar. pub fn add_mapping(&self, mapping: Arc) { self.inner .lock() @@ -589,18 +597,18 @@ impl Vmar_ { align: usize, can_overwrite: bool, ) -> Result { - trace!("allocate free region, vmo_size = 0x{:x}, map_size = 0x{:x}, offset = {:x?}, align = 0x{:x}, can_ovewrite = {}", vmo_size, size, offset, align, can_overwrite); + trace!("allocate free region, vmo_size = 0x{:x}, map_size = 0x{:x}, offset = {:x?}, align = 0x{:x}, can_overwrite = {}", vmo_size, size, offset, align, can_overwrite); let map_size = size.max(vmo_size); if can_overwrite { let mut inner = self.inner.lock(); - // if can_overwrite, the offset is ensured not to be None + // If can_overwrite, the offset is ensured not to be None. let offset = offset.ok_or(Error::with_message( Errno::EINVAL, "offset cannot be None since can overwrite is set", ))?; let map_range = offset..(offset + map_size); - // If can overwrite, the vmo can cross multiple free regions. We will split each free regions that intersect with the vmo + // If can overwrite, the vmo can cross multiple free regions. We will split each free regions that intersect with the vmo. let mut split_regions = Vec::new(); for free_region in inner.free_regions.find(&map_range) { @@ -622,7 +630,7 @@ impl Vmar_ { self.trim_existing_mappings(map_range)?; Ok(offset) } else { - // Otherwise, the vmo in a single region + // Otherwise, the vmo in a single region. let (free_region_base, offset) = self.find_free_region_for_child(offset, map_size, align)?; let mut inner = self.inner.lock(); @@ -666,7 +674,7 @@ impl Vmar_ { self.new_cow(None) } - /// Create a new vmar by creating cow child for all mapped vmos + /// Create a new vmar by creating cow child for all mapped vmos. fn new_cow(&self, parent: Option<&Arc>) -> Result> { let new_vmar_ = { let vmar_inner = VmarInner::new(); @@ -681,7 +689,7 @@ impl Vmar_ { }; let inner = self.inner.lock(); - // clone free regions + // Clone free regions. for (free_region_base, free_region) in &inner.free_regions { new_vmar_ .inner @@ -690,7 +698,7 @@ impl Vmar_ { .insert(*free_region_base, free_region.clone()); } - // clone child vmars + // Clone child vmars. for (child_vmar_base, child_vmar_) in &inner.child_vmar_s { let new_child_vmar = child_vmar_.new_cow(Some(&new_vmar_))?; new_vmar_ @@ -700,7 +708,7 @@ impl Vmar_ { .insert(*child_vmar_base, new_child_vmar); } - // clone vm mappings + // Clone vm mappings. for (vm_mapping_base, vm_mapping) in &inner.vm_mappings { let new_mapping = Arc::new(vm_mapping.new_cow(&new_vmar_)?); new_vmar_ @@ -739,7 +747,8 @@ impl Vmar { self.0.size } - /// get a mapped vmo + /// Get mapped vmo at given offset. + /// TODO: improve the searching algorithm. pub fn get_vm_mapping(&self, offset: Vaddr) -> Result> { let rights = Rights::all(); self.check_rights(rights)?; @@ -775,7 +784,7 @@ impl FreeRegion { self.range.end - self.range.start } - /// allocate a range in this free region. + /// Allocate a range in this free region. /// The range is ensured to be contained in current region before call this function. /// The return vector contains regions that are not allocated. Since the allocate_range can be /// in the middle of a free region, the original region may be split as at most two regions. @@ -799,13 +808,13 @@ impl FreeRegion { } } -/// determine whether two ranges are intersected. -/// returns zero if one of the ranges has a length of 0 +/// Determine whether two ranges are intersected. +/// returns false if one of the ranges has a length of 0 pub fn is_intersected(range1: &Range, range2: &Range) -> bool { range1.start.max(range2.start) < range1.end.min(range2.end) } -/// get the intersection range of two ranges. +/// Get the intersection range of two ranges. /// The two ranges should be ensured to be intersected. pub fn get_intersected_range(range1: &Range, range2: &Range) -> Range { debug_assert!(is_intersected(range1, range2)); diff --git a/kernel/aster-nix/src/vm/vmar/vm_mapping.rs b/kernel/aster-nix/src/vm/vmar/vm_mapping.rs index fe935d0b3..c22c76b02 100644 --- a/kernel/aster-nix/src/vm/vmar/vm_mapping.rs +++ b/kernel/aster-nix/src/vm/vmar/vm_mapping.rs @@ -17,7 +17,7 @@ use crate::{ /// A VmMapping represents mapping a vmo into a vmar. /// A vmar can has multiple VmMappings, which means multiple vmos are mapped to a vmar. /// A vmo can also contain multiple VmMappings, which means a vmo can be mapped to multiple vmars. -/// The reltionship between Vmar and Vmo is M:N. +/// The relationship between Vmar and Vmo is M:N. pub struct VmMapping { inner: Mutex, /// The parent vmar. The parent should always point to a valid vmar. @@ -51,10 +51,9 @@ struct VmMappingInner { is_destroyed: bool, /// The pages already mapped. The key is the page index in vmo. mapped_pages: BTreeSet, - /// The permission of each page. The key is the page index in vmo. - /// This map can be filled when mapping a vmo to vmar and can be modified when call mprotect. - /// We keep the options in case the page is not committed(or create copy on write mappings) and will further need these options. - page_perms: BTreeMap, + /// The permission of pages in the mapping. + /// All pages within the same VmMapping have the same permission. + perm: VmPerm, } impl Interval for Arc { @@ -90,23 +89,13 @@ impl VmMapping { map_to_addr + size ); - let page_perms = { - let mut page_perms = BTreeMap::new(); - let perm = VmPerm::from(perms); - let page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + size)); - for page_idx in page_idx_range { - page_perms.insert(page_idx, perm); - } - page_perms - }; - let vm_mapping_inner = VmMappingInner { vmo_offset, map_size: size, map_to_addr, is_destroyed: false, mapped_pages: BTreeSet::new(), - page_perms, + perm: VmPerm::from(perms), }; Ok(Self { @@ -116,6 +105,28 @@ impl VmMapping { }) } + /// Build a new VmMapping based on part of current `VmMapping`. + /// The mapping range of the new mapping must be contained in the full mapping. + /// + /// Note: Since such new mappings will intersect with the current mapping, + /// making sure that when adding the new mapping into a Vmar, the current mapping in the Vmar will be removed. + fn clone_partial( + &self, + range: Range, + new_perm: Option, + ) -> Result> { + let partial_mapping = Arc::new(self.try_clone()?); + // Adjust the mapping range and the permission. + { + let mut inner = partial_mapping.inner.lock(); + inner.shrink_to(range); + if let Some(perm) = new_perm { + inner.perm = perm; + } + } + Ok(partial_mapping) + } + pub fn vmo(&self) -> &Vmo { &self.vmo } @@ -190,17 +201,6 @@ impl VmMapping { self.inner.lock().unmap(vm_space, range, may_destroy) } - pub fn unmap_and_decommit(&self, range: Range) -> Result<()> { - self.unmap(&range, false)?; - let vmo_range = { - let map_to_addr = self.map_to_addr(); - let vmo_offset = self.vmo_offset(); - (range.start - map_to_addr + vmo_offset)..(range.end - map_to_addr + vmo_offset) - }; - self.vmo.decommit(vmo_range)?; - Ok(()) - } - pub fn is_destroyed(&self) -> bool { self.inner.lock().is_destroyed } @@ -233,12 +233,28 @@ impl VmMapping { self.map_one_page(page_idx, frame, is_readonly) } - pub(super) fn protect(&self, perms: VmPerms, range: Range) -> Result<()> { - let rights = Rights::from(perms); + /// Protect a specified range of pages in the mapping to the target perms. + /// The VmMapping will split to maintain its property. + /// + /// Since this method will modify the `vm_mappings` in the vmar, + /// it should not be called during the direct iteration of the `vm_mappings`. + pub(super) fn protect(&self, new_perms: VmPerms, range: Range) -> Result<()> { + // If `new_perms` is equal to `old_perms`, `protect()` will not modify any permission in the VmMapping. + let old_perms = VmPerms::from(self.inner.lock().perm); + if old_perms == new_perms { + return Ok(()); + } + + let rights = Rights::from(new_perms); self.vmo().check_rights(rights)?; + // Protect permission for the perm in the VmMapping. + self.protect_with_subdivision(&range, VmPerm::from(new_perms))?; + // Protect permission in the VmSpace. let vmar = self.parent.upgrade().unwrap(); let vm_space = vmar.vm_space(); - self.inner.lock().protect(vm_space, perms, range) + self.inner.lock().protect(vm_space, new_perms, range)?; + + Ok(()) } pub(super) fn new_cow(&self, new_parent: &Arc) -> Result { @@ -258,7 +274,7 @@ impl VmMapping { map_to_addr: inner.map_to_addr, is_destroyed: inner.is_destroyed, mapped_pages: BTreeSet::new(), - page_perms: inner.page_perms.clone(), + perm: inner.perm, } }; @@ -273,6 +289,63 @@ impl VmMapping { self.map_to_addr()..self.map_to_addr() + self.map_size() } + /// Protect the current `VmMapping` to enforce new permissions within a specified range. + /// + /// Due to the property of `VmMapping`, this operation may require subdividing the current + /// `VmMapping`. In this condition, it will generate a new `VmMapping` with the specified `perm` to protect the + /// target range, as well as additional `VmMappings` to preserve the mappings in the remaining ranges. + /// + /// There are four conditions: + /// 1. |--------old perm--------| -> |-old-| + |------new------| + /// 2. |--------old perm--------| -> |-new-| + |------old------| + /// 3. |--------old perm--------| -> |-old-| + |-new-| + |-old-| + /// 4. |--------old perm--------| -> |---------new perm--------| + /// + /// Generally, this function is only used in `protect()` method. + /// This method modifies the parent `Vmar` in the end if subdividing is required. + /// It removes current mapping and add splitted mapping to the Vmar. + fn protect_with_subdivision(&self, intersect_range: &Range, perm: VmPerm) -> Result<()> { + let mut additional_mappings = Vec::new(); + let range = self.range(); + // Condition 4, the `additional_mappings` will be empty. + if range.start == intersect_range.start && range.end == intersect_range.end { + self.inner.lock().perm = perm; + return Ok(()); + } + // Condition 1 or 3, which needs an additional new VmMapping with range (range.start..intersect_range.start) + if range.start < intersect_range.start { + let additional_left_mapping = + self.clone_partial(range.start..intersect_range.start, None)?; + additional_mappings.push(additional_left_mapping); + } + // Condition 2 or 3, which needs an additional new VmMapping with range (intersect_range.end..range.end). + if range.end > intersect_range.end { + let additional_right_mapping = + self.clone_partial(intersect_range.end..range.end, None)?; + additional_mappings.push(additional_right_mapping); + } + // The protected VmMapping must exist and its range is `intersect_range`. + let protected_mapping = self.clone_partial(intersect_range.clone(), Some(perm))?; + + // Begin to modify the `Vmar`. + let vmar = self.parent.upgrade().unwrap(); + let mut vmar_inner = vmar.inner.lock(); + // Remove the original mapping. + vmar_inner.vm_mappings.remove(&self.map_to_addr()); + // Add protected mappings to the vmar. + vmar_inner + .vm_mappings + .insert(protected_mapping.map_to_addr(), protected_mapping); + // Add additional mappings to the vmar. + for mapping in additional_mappings { + vmar_inner + .vm_mappings + .insert(mapping.map_to_addr(), mapping); + } + + Ok(()) + } + /// Trim a range from the mapping. /// There are several cases. /// 1. the trim_range is totally in the mapping. Then the mapping will split as two mappings. @@ -294,7 +367,7 @@ impl VmMapping { return Ok(()); } if trim_range.start <= map_to_addr && trim_range.end >= map_to_addr + map_size { - // fast path: the whole mapping was trimed + // Fast path: the whole mapping was trimed. self.unmap(trim_range, true)?; mappings_to_remove.insert(map_to_addr); return Ok(()); @@ -302,20 +375,20 @@ impl VmMapping { if trim_range.start <= range.start { mappings_to_remove.insert(map_to_addr); if trim_range.end <= range.end { - // overlap vm_mapping from left + // Overlap vm_mapping from left. let new_map_addr = self.trim_left(trim_range.end)?; mappings_to_append.insert(new_map_addr, self.clone()); } else { - // the mapping was totally destroyed + // The mapping was totally destroyed. } } else { if trim_range.end <= range.end { - // the trim range was totally inside the old mapping + // The trim range was totally inside the old mapping. let another_mapping = Arc::new(self.try_clone()?); let another_map_to_addr = another_mapping.trim_left(trim_range.end)?; mappings_to_append.insert(another_map_to_addr, another_mapping); } else { - // overlap vm_mapping from right + // Overlap vm_mapping from right. } self.trim_right(trim_range.start)?; } @@ -323,14 +396,14 @@ impl VmMapping { Ok(()) } - /// trim the mapping from left to a new address. + /// Trim the mapping from left to a new address. fn trim_left(&self, vaddr: Vaddr) -> Result { let vmar = self.parent.upgrade().unwrap(); let vm_space = vmar.vm_space(); self.inner.lock().trim_left(vm_space, vaddr) } - /// trim the mapping from right to a new address. + /// Trim the mapping from right to a new address. fn trim_right(&self, vaddr: Vaddr) -> Result { let vmar = self.parent.upgrade().unwrap(); let vm_space = vmar.vm_space(); @@ -354,7 +427,7 @@ impl VmMappingInner { let map_addr = self.page_map_addr(page_idx); let vm_perm = { - let mut perm = *self.page_perms.get(&page_idx).unwrap(); + let mut perm = self.perm; if is_readonly { debug_assert!(vmo.is_cow_child()); perm -= VmPerm::W; @@ -369,7 +442,7 @@ impl VmMappingInner { options }; - // cow child allows unmapping the mapped page + // Cow child allows unmapping the mapped page. if vmo.is_cow_child() && vm_space.is_mapped(map_addr) { vm_space.unmap(&(map_addr..(map_addr + PAGE_SIZE))).unwrap(); } @@ -389,7 +462,7 @@ impl VmMappingInner { Ok(()) } - /// Unmap pages in the range + /// Unmap pages in the range. fn unmap(&mut self, vm_space: &VmSpace, range: &Range, may_destroy: bool) -> Result<()> { let map_to_addr = self.map_to_addr; let vmo_map_range = (range.start - map_to_addr + self.vmo_offset) @@ -405,7 +478,7 @@ impl VmMappingInner { } fn page_map_addr(&self, page_idx: usize) -> usize { - page_idx * PAGE_SIZE - self.vmo_offset + self.map_to_addr + page_idx * PAGE_SIZE + self.map_to_addr - self.vmo_offset } pub(super) fn protect( @@ -420,11 +493,9 @@ impl VmMappingInner { let end_page = (range.end - self.map_to_addr + self.vmo_offset) / PAGE_SIZE; let perm = VmPerm::from(perms); for page_idx in start_page..end_page { - self.page_perms.insert(page_idx, perm); let page_addr = self.page_map_addr(page_idx); if vm_space.is_mapped(page_addr) { - // if the page is already mapped, we will modify page table - let perm = VmPerm::from(perms); + // If the page is already mapped, we will modify page table let page_range = page_addr..(page_addr + PAGE_SIZE); vm_space.protect(&page_range, perm)?; } @@ -432,7 +503,7 @@ impl VmMappingInner { Ok(()) } - /// trim the mapping from left to a new address. + /// Trim the mapping from left to a new address. fn trim_left(&mut self, vm_space: &VmSpace, vaddr: Vaddr) -> Result { trace!( "trim left: range: {:x?}, vaddr = 0x{:x}", @@ -448,7 +519,6 @@ impl VmMappingInner { self.vmo_offset += trim_size; self.map_size -= trim_size; for page_idx in old_vmo_offset / PAGE_SIZE..self.vmo_offset / PAGE_SIZE { - self.page_perms.remove(&page_idx); if self.mapped_pages.remove(&page_idx) { let _ = self.unmap_one_page(vm_space, page_idx); } @@ -456,7 +526,7 @@ impl VmMappingInner { Ok(self.map_to_addr) } - /// trim the mapping from right to a new address. + /// Trim the mapping from right to a new address. fn trim_right(&mut self, vm_space: &VmSpace, vaddr: Vaddr) -> Result { trace!( "trim right: range: {:x?}, vaddr = 0x{:x}", @@ -468,24 +538,34 @@ impl VmMappingInner { let page_idx_range = (vaddr - self.map_to_addr + self.vmo_offset) / PAGE_SIZE ..(self.map_size + self.vmo_offset) / PAGE_SIZE; for page_idx in page_idx_range { - self.page_perms.remove(&page_idx); let _ = self.unmap_one_page(vm_space, page_idx); } self.map_size = vaddr - self.map_to_addr; Ok(self.map_to_addr) } + /// Shrink the current `VmMapping` to the new range. + /// The new range must be contained in the old range. + fn shrink_to(&mut self, new_range: Range) { + debug_assert!(self.map_to_addr <= new_range.start); + debug_assert!(self.map_to_addr + self.map_size >= new_range.end); + self.vmo_offset += new_range.start - self.map_to_addr; + self.map_to_addr = new_range.start; + self.map_size = new_range.end - new_range.start; + } + fn range(&self) -> Range { self.map_to_addr..self.map_to_addr + self.map_size } fn check_perm(&self, page_idx: &usize, perm: &VmPerm) -> Result<()> { - let page_perm = self - .page_perms - .get(page_idx) - .ok_or(Error::with_message(Errno::EINVAL, "invalid page idx"))?; - - if !page_perm.contains(*perm) { + // Check if the page is in current VmMapping. + if page_idx * PAGE_SIZE < self.vmo_offset + || (page_idx + 1) * PAGE_SIZE > self.vmo_offset + self.map_size + { + return_errno_with_message!(Errno::EINVAL, "invalid page idx"); + } + if !self.perm.contains(*perm) { return_errno_with_message!(Errno::EACCES, "perm check fails"); } @@ -609,9 +689,9 @@ impl VmarMapOptions { Ok(map_to_addr) } - /// check whether all options are valid + /// Check whether all options are valid. fn check_options(&self) -> Result<()> { - // check align + // Check align. debug_assert!(self.align % PAGE_SIZE == 0); debug_assert!(self.align.is_power_of_two()); if self.align % PAGE_SIZE != 0 || !self.align.is_power_of_two() { @@ -632,16 +712,16 @@ impl VmarMapOptions { Ok(()) } - /// check whether the vmperm is subset of vmo rights + /// Check whether the vmperm is subset of vmo rights. fn check_perms(&self) -> Result<()> { let perm_rights = Rights::from(self.perms); self.vmo.check_rights(perm_rights) } - /// check whether the vmo will overwrite with any existing vmo or vmar + /// Check whether the vmo will overwrite with any existing vmo or vmar. fn check_overwrite(&self) -> Result<()> { if self.can_overwrite { - // if can_overwrite is set, the offset cannot be None + // If `can_overwrite` is set, the offset cannot be None. debug_assert!(self.offset.is_some()); if self.offset.is_none() { return_errno_with_message!( @@ -651,12 +731,12 @@ impl VmarMapOptions { } } if self.offset.is_none() { - // if does not specify the offset, we assume the map can always find suitable free region. + // If does not specify the offset, we assume the map can always find suitable free region. // FIXME: is this always true? return Ok(()); } let offset = self.offset.unwrap(); - // we should spare enough space at least for the whole vmo + // We should spare enough space at least for the whole vmo. let size = self.size.max(self.vmo.size()); let vmo_range = offset..(offset + size); self.parent