diff --git a/kernel/src/vm/vmar/vm_mapping.rs b/kernel/src/vm/vmar/vm_mapping.rs index 5de086d58..a666e9229 100644 --- a/kernel/src/vm/vmar/vm_mapping.rs +++ b/kernel/src/vm/vmar/vm_mapping.rs @@ -167,16 +167,6 @@ impl VmMapping { self.vmo.as_ref() } - /// Adds a new committed page and map it to vmspace. If copy on write is set, it's allowed to unmap the page at the same address. - /// FIXME: This implementation based on the truth that we map one page at a time. If multiple pages are mapped together, this implementation may have problems - fn map_one_page(&self, map_addr: usize, frame: Frame, is_readonly: bool) -> Result<()> { - let parent = self.parent.upgrade().unwrap(); - let vm_space = parent.vm_space(); - self.inner - .lock() - .map_one_page(vm_space, map_addr, frame, is_readonly) - } - /// Returns the mapping's start address. pub fn map_to_addr(&self) -> Vaddr { self.inner.lock().map_to_addr @@ -193,11 +183,6 @@ impl VmMapping { self.inner.lock().map_size } - /// Returns the mapping's offset in the VMO. - pub fn vmo_offset(&self) -> Option { - self.inner.lock().vmo_offset - } - /// Unmaps pages in the range pub fn unmap(&self, range: &Range, may_destroy: bool) -> Result<()> { let parent = self.parent.upgrade().unwrap(); @@ -234,21 +219,28 @@ impl VmMapping { let page_aligned_addr = page_fault_addr.align_down(PAGE_SIZE); + let root_vmar = self.parent.upgrade().unwrap(); + let mut cursor = root_vmar + .vm_space() + .cursor_mut(&(page_aligned_addr..page_aligned_addr + PAGE_SIZE))?; + let current_mapping = cursor.query().unwrap(); + + // Perform COW if it is a write access to a shared mapping. if write && !not_present { - // Perform COW at page table. - let root_vmar = self.parent.upgrade().unwrap(); - let mut cursor = root_vmar - .vm_space() - .cursor_mut(&(page_aligned_addr..page_aligned_addr + PAGE_SIZE))?; let VmItem::Mapped { va: _, frame, mut prop, - } = cursor.query().unwrap() + } = current_mapping else { return Err(Error::new(Errno::EFAULT)); }; + // Skip if the page fault is already handled. + if prop.flags.contains(PageFlags::W) { + return Ok(()); + } + if self.is_shared { cursor.protect(PAGE_SIZE, |p| p.flags |= PageFlags::W); } else { @@ -259,18 +251,40 @@ impl VmMapping { return Ok(()); } - let (frame, is_readonly) = self.prepare_page(page_fault_addr, write)?; + // Map a new frame to the page fault address. + // Skip if the page fault is already handled. + if let VmItem::NotMapped { .. } = current_mapping { + let inner_lock = self.inner.lock(); + let (frame, is_readonly) = self.prepare_page(&inner_lock, page_fault_addr, write)?; - self.map_one_page(page_aligned_addr, frame, is_readonly) + let vm_perms = { + let mut perms = inner_lock.perms; + if is_readonly { + // COW pages are forced to be read-only. + perms -= VmPerms::WRITE; + } + perms + }; + let map_prop = PageProperty::new(vm_perms.into(), CachePolicy::Writeback); + + cursor.map(frame, map_prop); + } + + Ok(()) } - fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(Frame, bool)> { + fn prepare_page( + &self, + inner_lock: &MutexGuard, + page_fault_addr: Vaddr, + write: bool, + ) -> Result<(Frame, bool)> { let mut is_readonly = false; let Some(vmo) = &self.vmo else { return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly)); }; - let vmo_offset = self.vmo_offset().unwrap() + page_fault_addr - self.map_to_addr(); + let vmo_offset = inner_lock.vmo_offset.unwrap() + page_fault_addr - inner_lock.map_to_addr; let page_idx = vmo_offset / PAGE_SIZE; let Ok(page) = vmo.get_committed_frame(page_idx) else { if !self.is_shared { @@ -507,30 +521,6 @@ impl VmMapping { } impl VmMappingInner { - fn map_one_page( - &mut self, - vm_space: &VmSpace, - map_addr: usize, - frame: Frame, - is_readonly: bool, - ) -> Result<()> { - let map_range = map_addr..map_addr + PAGE_SIZE; - - let vm_perms = { - let mut perms = self.perms; - if is_readonly { - // COW pages are forced to be read-only. - perms -= VmPerms::WRITE; - } - perms - }; - let map_prop = PageProperty::new(vm_perms.into(), CachePolicy::Writeback); - - let mut cursor = vm_space.cursor_mut(&map_range).unwrap(); - cursor.map(frame, map_prop); - Ok(()) - } - /// Unmap pages in the range. fn unmap(&mut self, vm_space: &VmSpace, range: &Range, may_destroy: bool) -> Result<()> { let map_addr = range.start.align_down(PAGE_SIZE);