From e60b5b7649f128a4be18e5a7fa4625eab3182926 Mon Sep 17 00:00:00 2001 From: Chen Chengjun Date: Tue, 8 Oct 2024 11:30:20 +0800 Subject: [PATCH] Refine mapping-related locks --- kernel/src/process/posix_thread/exit.rs | 1 + kernel/src/vm/vmar/mod.rs | 51 +++++++++++--------- kernel/src/vm/vmar/vm_mapping.rs | 64 ++++++++++++++----------- ostd/src/mm/page_table/node/mod.rs | 4 +- 4 files changed, 66 insertions(+), 54 deletions(-) diff --git a/kernel/src/process/posix_thread/exit.rs b/kernel/src/process/posix_thread/exit.rs index d81733ec7..0664cd37e 100644 --- a/kernel/src/process/posix_thread/exit.rs +++ b/kernel/src/process/posix_thread/exit.rs @@ -31,6 +31,7 @@ pub fn do_exit(thread: &Thread, posix_thread: &PosixThread, term_status: TermSta futex_wake(*clear_ctid, 1, None)?; *clear_ctid = 0; } + drop(clear_ctid); // exit the robust list: walk the robust list; mark futex words as dead and do futex wake wake_robust_list(posix_thread, tid); diff --git a/kernel/src/vm/vmar/mod.rs b/kernel/src/vm/vmar/mod.rs index f2fb84690..73eed3282 100644 --- a/kernel/src/vm/vmar/mod.rs +++ b/kernel/src/vm/vmar/mod.rs @@ -107,7 +107,7 @@ impl Vmar { pub(super) struct Vmar_ { /// VMAR inner - inner: Mutex, + inner: RwMutex, /// The offset relative to the root VMAR base: Vaddr, /// The total size of the VMAR in bytes @@ -211,7 +211,7 @@ impl Vmar_ { }; Arc::new(Vmar_ { - inner: Mutex::new(inner), + inner: RwMutex::new(inner), base, size, vm_space, @@ -249,7 +249,7 @@ impl Vmar_ { // Do real protect. The protected range is ensured to be mapped. fn do_protect_inner(&self, perms: VmPerms, range: Range) -> Result<()> { let protect_mappings: Vec> = { - let inner = self.inner.lock(); + let inner = self.inner.read(); inner .vm_mappings .find(&range) @@ -265,7 +265,7 @@ impl Vmar_ { vm_mapping.protect(perms, intersected_range)?; } - for child_vmar_ in self.inner.lock().child_vmar_s.find(&range) { + for child_vmar_ in self.inner.read().child_vmar_s.find(&range) { let child_vmar_range = child_vmar_.range(); debug_assert!(is_intersected(&child_vmar_range, &range)); let intersected_range = get_intersected_range(&range, &child_vmar_range); @@ -284,7 +284,7 @@ impl Vmar_ { assert!(range.end <= self.base + self.size); // The protected range should not intersect with any free region - let inner = self.inner.lock(); + let inner = self.inner.read(); if inner.free_regions.find(range).into_iter().next().is_some() { return_errno_with_message!(Errno::EACCES, "protected range is not fully mapped"); } @@ -307,7 +307,7 @@ impl Vmar_ { return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar"); } - let inner = self.inner.lock(); + let inner = self.inner.read(); if let Some(child_vmar) = inner.child_vmar_s.find_one(&address) { debug_assert!(child_vmar.range().contains(&address)); return child_vmar.handle_page_fault(page_fault_info); @@ -329,7 +329,7 @@ impl Vmar_ { return_errno_with_message!(Errno::EACCES, "The vmar is not root vmar"); } self.clear_vm_space(); - let mut inner = self.inner.lock(); + let mut inner = self.inner.write(); inner.child_vmar_s.clear(); inner.vm_mappings.clear(); inner.free_regions.clear(); @@ -345,7 +345,7 @@ impl Vmar_ { pub fn destroy(&self, range: Range) -> Result<()> { self.check_destroy_range(&range)?; - let mut inner = self.inner.lock(); + let mut inner = self.inner.write(); let mut free_regions = BTreeMap::new(); for child_vmar_ in inner.child_vmar_s.find(&range) { @@ -414,7 +414,7 @@ impl Vmar_ { } let last_mapping = { - let inner = self.inner.lock(); + let inner = self.inner.read(); inner .vm_mappings .find_one(&(old_map_end - 1)) @@ -437,7 +437,7 @@ impl Vmar_ { debug_assert!(range.start % PAGE_SIZE == 0); debug_assert!(range.end % PAGE_SIZE == 0); - let inner = self.inner.lock(); + let inner = self.inner.read(); for child_vmar_ in inner.child_vmar_s.find(range) { let child_vmar_range = child_vmar_.range(); @@ -456,12 +456,12 @@ impl Vmar_ { } fn is_destroyed(&self) -> bool { - self.inner.lock().is_destroyed + self.inner.read().is_destroyed } fn merge_continuous_regions(&self) { let mut new_free_regions = BTreeMap::new(); - let mut inner = self.inner.lock(); + let mut inner = self.inner.write(); let keys = inner.free_regions.keys().cloned().collect::>(); for key in keys { if let Some(mut free_region) = inner.free_regions.remove(&key) { @@ -486,15 +486,20 @@ impl Vmar_ { ) -> Result> { let (region_base, child_vmar_offset) = self.inner - .lock() + .write() .find_free_region(child_vmar_offset, child_vmar_size, align)?; // This unwrap should never fails - let free_region = self.inner.lock().free_regions.remove(®ion_base).unwrap(); + let free_region = self + .inner + .write() + .free_regions + .remove(®ion_base) + .unwrap(); let child_range = child_vmar_offset..(child_vmar_offset + child_vmar_size); let regions_after_allocation = free_region.allocate_range(child_range.clone()); regions_after_allocation.into_iter().for_each(|region| { self.inner - .lock() + .write() .free_regions .insert(region.start(), region); }); @@ -515,14 +520,14 @@ impl Vmar_ { Some(self), ); self.inner - .lock() + .write() .child_vmar_s .insert(child_vmar_.base, child_vmar_.clone()); Ok(child_vmar_) } fn check_overwrite(&self, mapping_range: Range, can_overwrite: bool) -> Result<()> { - let inner = self.inner.lock(); + let inner = self.inner.read(); if inner .child_vmar_s .find(&mapping_range) @@ -561,7 +566,7 @@ impl Vmar_ { /// Maps a `VmMapping` to this VMAR. fn add_mapping(&self, mapping: Arc) { self.inner - .lock() + .write() .vm_mappings .insert(mapping.map_to_addr(), mapping); } @@ -576,7 +581,7 @@ impl Vmar_ { trace!("allocate free region, map_size = 0x{:x}, offset = {:x?}, align = 0x{:x}, can_overwrite = {}", map_size, offset, align, can_overwrite); if can_overwrite { - let mut inner = self.inner.lock(); + let mut inner = self.inner.write(); // If can overwrite, the offset is ensured not to be `None`. let offset = offset.ok_or(Error::with_message( Errno::EINVAL, @@ -606,7 +611,7 @@ impl Vmar_ { Ok(offset) } else { // Otherwise, the mapping in a single region. - let mut inner = self.inner.lock(); + let mut inner = self.inner.write(); let (free_region_base, offset) = inner.find_free_region(offset, map_size, align)?; let free_region = inner.free_regions.remove(&free_region_base).unwrap(); let mapping_range = offset..(offset + map_size); @@ -620,7 +625,7 @@ impl Vmar_ { } fn trim_existing_mappings(&self, trim_range: Range) -> Result<()> { - let mut inner = self.inner.lock(); + let mut inner = self.inner.write(); let mut mappings_to_remove = LinkedList::new(); let mut mappings_to_append = LinkedList::new(); for vm_mapping in inner.vm_mappings.find(&trim_range) { @@ -666,8 +671,8 @@ impl Vmar_ { Vmar_::new(vmar_inner, vm_space, self.base, self.size, parent) }; - let inner = self.inner.lock(); - let mut new_inner = new_vmar_.inner.lock(); + let inner = self.inner.read(); + let mut new_inner = new_vmar_.inner.write(); // Clone free regions. for (free_region_base, free_region) in &inner.free_regions { diff --git a/kernel/src/vm/vmar/vm_mapping.rs b/kernel/src/vm/vmar/vm_mapping.rs index 87ed53d48..651487428 100644 --- a/kernel/src/vm/vmar/vm_mapping.rs +++ b/kernel/src/vm/vmar/vm_mapping.rs @@ -10,9 +10,12 @@ use core::{ use align_ext::AlignExt; use aster_rights::Rights; -use ostd::mm::{ - tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, Frame, FrameAllocOptions, PageFlags, - PageProperty, VmSpace, +use ostd::{ + mm::{ + tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, Frame, FrameAllocOptions, PageFlags, + PageProperty, VmSpace, + }, + sync::RwLockReadGuard, }; use super::{interval::Interval, is_intersected, Vmar, Vmar_}; @@ -39,7 +42,7 @@ use crate::{ /// /// Such mappings will also be VMO-backed mappings. pub(super) struct VmMapping { - inner: Mutex, + inner: RwLock, /// The parent VMAR. The parent should always point to a valid VMAR. parent: Weak, /// Specific physical pages that need to be mapped. @@ -57,10 +60,10 @@ pub(super) struct VmMapping { impl VmMapping { pub fn try_clone(&self) -> Result { - let inner = self.inner.lock().clone(); + let inner = self.inner.read().clone(); let vmo = self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?; Ok(Self { - inner: Mutex::new(inner), + inner: RwLock::new(inner), parent: self.parent.clone(), vmo, is_shared: self.is_shared, @@ -135,7 +138,7 @@ impl VmMapping { }; Ok(Self { - inner: Mutex::new(vm_mapping_inner), + inner: RwLock::new(vm_mapping_inner), parent: Arc::downgrade(&parent_vmar), vmo, is_shared, @@ -156,7 +159,7 @@ impl VmMapping { let partial_mapping = Arc::new(self.try_clone()?); // Adjust the mapping range and the permission. { - let mut inner = partial_mapping.inner.lock(); + let mut inner = partial_mapping.inner.write(); inner.shrink_to(range); if let Some(perms) = new_perms { inner.perms = perms; @@ -171,29 +174,29 @@ impl VmMapping { /// Returns the mapping's start address. pub fn map_to_addr(&self) -> Vaddr { - self.inner.lock().map_to_addr + self.inner.read().map_to_addr } /// Returns the mapping's end address. pub fn map_end(&self) -> Vaddr { - let inner = self.inner.lock(); + let inner = self.inner.read(); inner.map_to_addr + inner.map_size } /// Returns the mapping's size. pub fn map_size(&self) -> usize { - self.inner.lock().map_size + self.inner.read().map_size } /// Unmaps pages in the range pub fn unmap(&self, range: &Range, may_destroy: bool) -> Result<()> { let parent = self.parent.upgrade().unwrap(); let vm_space = parent.vm_space(); - self.inner.lock().unmap(vm_space, range, may_destroy) + self.inner.write().unmap(vm_space, range, may_destroy) } pub fn is_destroyed(&self) -> bool { - self.inner.lock().is_destroyed + self.inner.read().is_destroyed } /// Returns whether the mapping is a shared mapping. @@ -202,7 +205,7 @@ impl VmMapping { } pub fn enlarge(&self, extra_size: usize) { - self.inner.lock().map_size += extra_size; + self.inner.write().map_size += extra_size; } pub fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> { @@ -261,17 +264,19 @@ impl VmMapping { VmItem::NotMapped { .. } => { // Map a new frame to the page fault address. - let inner_lock = self.inner.lock(); - let (frame, is_readonly) = self.prepare_page(&inner_lock, address, is_write)?; + let inner = self.inner.read(); + let (frame, is_readonly) = self.prepare_page(&inner, address, is_write)?; let vm_perms = { - let mut perms = inner_lock.perms; + let mut perms = inner.perms; if is_readonly { // COW pages are forced to be read-only. perms -= VmPerms::WRITE; } perms }; + drop(inner); + let mut page_flags = vm_perms.into(); page_flags |= PageFlags::ACCESSED; if is_write { @@ -288,7 +293,7 @@ impl VmMapping { fn prepare_page( &self, - inner_lock: &MutexGuard, + mapping_inner: &RwLockReadGuard, page_fault_addr: Vaddr, write: bool, ) -> Result<(Frame, bool)> { @@ -297,7 +302,8 @@ impl VmMapping { return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly)); }; - let vmo_offset = inner_lock.vmo_offset.unwrap() + page_fault_addr - inner_lock.map_to_addr; + let vmo_offset = + mapping_inner.vmo_offset.unwrap() + page_fault_addr - mapping_inner.map_to_addr; let page_idx = vmo_offset / PAGE_SIZE; let Ok(page) = vmo.get_committed_frame(page_idx) else { if !self.is_shared { @@ -328,7 +334,7 @@ impl VmMapping { const SURROUNDING_PAGE_NUM: usize = 16; const SURROUNDING_PAGE_ADDR_MASK: usize = !(SURROUNDING_PAGE_NUM * PAGE_SIZE - 1); - let inner = self.inner.lock(); + let inner = self.inner.read(); let vmo_offset = inner.vmo_offset.unwrap(); let vmo = self.vmo().unwrap(); let around_page_addr = page_fault_addr & SURROUNDING_PAGE_ADDR_MASK; @@ -376,7 +382,7 @@ impl VmMapping { /// it should not be called during the direct iteration of the `vm_mappings`. pub(super) fn protect(&self, new_perms: VmPerms, range: Range) -> Result<()> { // If `new_perms` is equal to `old_perms`, `protect()` will not modify any permission in the VmMapping. - let old_perms = self.inner.lock().perms; + let old_perms = self.inner.read().perms; if old_perms == new_perms { return Ok(()); } @@ -386,16 +392,16 @@ impl VmMapping { // Protect permission in the VmSpace. let vmar = self.parent.upgrade().unwrap(); let vm_space = vmar.vm_space(); - self.inner.lock().protect(vm_space, new_perms, range)?; + self.inner.write().protect(vm_space, new_perms, range)?; Ok(()) } pub(super) fn new_fork(&self, new_parent: &Arc) -> Result { - let new_inner = self.inner.lock().clone(); + let new_inner = self.inner.read().clone(); Ok(VmMapping { - inner: Mutex::new(new_inner), + inner: RwLock::new(new_inner), parent: Arc::downgrade(new_parent), vmo: self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?, is_shared: self.is_shared, @@ -431,7 +437,7 @@ impl VmMapping { let range = self.range(); // Condition 4, the `additional_mappings` will be empty. if range.start == intersect_range.start && range.end == intersect_range.end { - self.inner.lock().perms = perms; + self.inner.write().perms = perms; return Ok(()); } // Condition 1 or 3, which needs an additional new VmMapping with range (range.start..intersect_range.start) @@ -451,7 +457,7 @@ impl VmMapping { // Begin to modify the `Vmar`. let vmar = self.parent.upgrade().unwrap(); - let mut vmar_inner = vmar.inner.lock(); + let mut vmar_inner = vmar.inner.write(); // Remove the original mapping. vmar_inner.vm_mappings.remove(&self.map_to_addr()); // Add protected mappings to the vmar. @@ -522,18 +528,18 @@ impl VmMapping { fn trim_left(&self, vaddr: Vaddr) -> Result { let vmar = self.parent.upgrade().unwrap(); let vm_space = vmar.vm_space(); - self.inner.lock().trim_left(vm_space, vaddr) + self.inner.write().trim_left(vm_space, vaddr) } /// Trims the mapping from right to a new address. fn trim_right(&self, vaddr: Vaddr) -> Result { let vmar = self.parent.upgrade().unwrap(); let vm_space = vmar.vm_space(); - self.inner.lock().trim_right(vm_space, vaddr) + self.inner.write().trim_right(vm_space, vaddr) } fn check_perms(&self, perms: &VmPerms) -> Result<()> { - self.inner.lock().check_perms(perms) + self.inner.read().check_perms(perms) } } diff --git a/ostd/src/mm/page_table/node/mod.rs b/ostd/src/mm/page_table/node/mod.rs index a5e1dcb1d..80d8df824 100644 --- a/ostd/src/mm/page_table/node/mod.rs +++ b/ostd/src/mm/page_table/node/mod.rs @@ -81,8 +81,8 @@ where let page: Page> = self.into(); // Acquire the lock. - while page - .meta() + let meta = page.meta(); + while meta .lock .compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed) .is_err()