mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-24 09:53:24 +00:00
Refine mapping-related locks
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
968389f550
commit
e60b5b7649
@ -31,6 +31,7 @@ pub fn do_exit(thread: &Thread, posix_thread: &PosixThread, term_status: TermSta
|
|||||||
futex_wake(*clear_ctid, 1, None)?;
|
futex_wake(*clear_ctid, 1, None)?;
|
||||||
*clear_ctid = 0;
|
*clear_ctid = 0;
|
||||||
}
|
}
|
||||||
|
drop(clear_ctid);
|
||||||
// exit the robust list: walk the robust list; mark futex words as dead and do futex wake
|
// exit the robust list: walk the robust list; mark futex words as dead and do futex wake
|
||||||
wake_robust_list(posix_thread, tid);
|
wake_robust_list(posix_thread, tid);
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ impl<R> Vmar<R> {
|
|||||||
|
|
||||||
pub(super) struct Vmar_ {
|
pub(super) struct Vmar_ {
|
||||||
/// VMAR inner
|
/// VMAR inner
|
||||||
inner: Mutex<VmarInner>,
|
inner: RwMutex<VmarInner>,
|
||||||
/// The offset relative to the root VMAR
|
/// The offset relative to the root VMAR
|
||||||
base: Vaddr,
|
base: Vaddr,
|
||||||
/// The total size of the VMAR in bytes
|
/// The total size of the VMAR in bytes
|
||||||
@ -211,7 +211,7 @@ impl Vmar_ {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Arc::new(Vmar_ {
|
Arc::new(Vmar_ {
|
||||||
inner: Mutex::new(inner),
|
inner: RwMutex::new(inner),
|
||||||
base,
|
base,
|
||||||
size,
|
size,
|
||||||
vm_space,
|
vm_space,
|
||||||
@ -249,7 +249,7 @@ impl Vmar_ {
|
|||||||
// Do real protect. The protected range is ensured to be mapped.
|
// Do real protect. The protected range is ensured to be mapped.
|
||||||
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
|
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
|
||||||
let protect_mappings: Vec<Arc<VmMapping>> = {
|
let protect_mappings: Vec<Arc<VmMapping>> = {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
inner
|
inner
|
||||||
.vm_mappings
|
.vm_mappings
|
||||||
.find(&range)
|
.find(&range)
|
||||||
@ -265,7 +265,7 @@ impl Vmar_ {
|
|||||||
vm_mapping.protect(perms, intersected_range)?;
|
vm_mapping.protect(perms, intersected_range)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
for child_vmar_ in self.inner.lock().child_vmar_s.find(&range) {
|
for child_vmar_ in self.inner.read().child_vmar_s.find(&range) {
|
||||||
let child_vmar_range = child_vmar_.range();
|
let child_vmar_range = child_vmar_.range();
|
||||||
debug_assert!(is_intersected(&child_vmar_range, &range));
|
debug_assert!(is_intersected(&child_vmar_range, &range));
|
||||||
let intersected_range = get_intersected_range(&range, &child_vmar_range);
|
let intersected_range = get_intersected_range(&range, &child_vmar_range);
|
||||||
@ -284,7 +284,7 @@ impl Vmar_ {
|
|||||||
assert!(range.end <= self.base + self.size);
|
assert!(range.end <= self.base + self.size);
|
||||||
|
|
||||||
// The protected range should not intersect with any free region
|
// The protected range should not intersect with any free region
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
if inner.free_regions.find(range).into_iter().next().is_some() {
|
if inner.free_regions.find(range).into_iter().next().is_some() {
|
||||||
return_errno_with_message!(Errno::EACCES, "protected range is not fully mapped");
|
return_errno_with_message!(Errno::EACCES, "protected range is not fully mapped");
|
||||||
}
|
}
|
||||||
@ -307,7 +307,7 @@ impl Vmar_ {
|
|||||||
return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar");
|
return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar");
|
||||||
}
|
}
|
||||||
|
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
if let Some(child_vmar) = inner.child_vmar_s.find_one(&address) {
|
if let Some(child_vmar) = inner.child_vmar_s.find_one(&address) {
|
||||||
debug_assert!(child_vmar.range().contains(&address));
|
debug_assert!(child_vmar.range().contains(&address));
|
||||||
return child_vmar.handle_page_fault(page_fault_info);
|
return child_vmar.handle_page_fault(page_fault_info);
|
||||||
@ -329,7 +329,7 @@ impl Vmar_ {
|
|||||||
return_errno_with_message!(Errno::EACCES, "The vmar is not root vmar");
|
return_errno_with_message!(Errno::EACCES, "The vmar is not root vmar");
|
||||||
}
|
}
|
||||||
self.clear_vm_space();
|
self.clear_vm_space();
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.write();
|
||||||
inner.child_vmar_s.clear();
|
inner.child_vmar_s.clear();
|
||||||
inner.vm_mappings.clear();
|
inner.vm_mappings.clear();
|
||||||
inner.free_regions.clear();
|
inner.free_regions.clear();
|
||||||
@ -345,7 +345,7 @@ impl Vmar_ {
|
|||||||
|
|
||||||
pub fn destroy(&self, range: Range<usize>) -> Result<()> {
|
pub fn destroy(&self, range: Range<usize>) -> Result<()> {
|
||||||
self.check_destroy_range(&range)?;
|
self.check_destroy_range(&range)?;
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.write();
|
||||||
let mut free_regions = BTreeMap::new();
|
let mut free_regions = BTreeMap::new();
|
||||||
|
|
||||||
for child_vmar_ in inner.child_vmar_s.find(&range) {
|
for child_vmar_ in inner.child_vmar_s.find(&range) {
|
||||||
@ -414,7 +414,7 @@ impl Vmar_ {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let last_mapping = {
|
let last_mapping = {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
inner
|
inner
|
||||||
.vm_mappings
|
.vm_mappings
|
||||||
.find_one(&(old_map_end - 1))
|
.find_one(&(old_map_end - 1))
|
||||||
@ -437,7 +437,7 @@ impl Vmar_ {
|
|||||||
debug_assert!(range.start % PAGE_SIZE == 0);
|
debug_assert!(range.start % PAGE_SIZE == 0);
|
||||||
debug_assert!(range.end % PAGE_SIZE == 0);
|
debug_assert!(range.end % PAGE_SIZE == 0);
|
||||||
|
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
|
|
||||||
for child_vmar_ in inner.child_vmar_s.find(range) {
|
for child_vmar_ in inner.child_vmar_s.find(range) {
|
||||||
let child_vmar_range = child_vmar_.range();
|
let child_vmar_range = child_vmar_.range();
|
||||||
@ -456,12 +456,12 @@ impl Vmar_ {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn is_destroyed(&self) -> bool {
|
fn is_destroyed(&self) -> bool {
|
||||||
self.inner.lock().is_destroyed
|
self.inner.read().is_destroyed
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_continuous_regions(&self) {
|
fn merge_continuous_regions(&self) {
|
||||||
let mut new_free_regions = BTreeMap::new();
|
let mut new_free_regions = BTreeMap::new();
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.write();
|
||||||
let keys = inner.free_regions.keys().cloned().collect::<Vec<_>>();
|
let keys = inner.free_regions.keys().cloned().collect::<Vec<_>>();
|
||||||
for key in keys {
|
for key in keys {
|
||||||
if let Some(mut free_region) = inner.free_regions.remove(&key) {
|
if let Some(mut free_region) = inner.free_regions.remove(&key) {
|
||||||
@ -486,15 +486,20 @@ impl Vmar_ {
|
|||||||
) -> Result<Arc<Vmar_>> {
|
) -> Result<Arc<Vmar_>> {
|
||||||
let (region_base, child_vmar_offset) =
|
let (region_base, child_vmar_offset) =
|
||||||
self.inner
|
self.inner
|
||||||
.lock()
|
.write()
|
||||||
.find_free_region(child_vmar_offset, child_vmar_size, align)?;
|
.find_free_region(child_vmar_offset, child_vmar_size, align)?;
|
||||||
// This unwrap should never fails
|
// This unwrap should never fails
|
||||||
let free_region = self.inner.lock().free_regions.remove(®ion_base).unwrap();
|
let free_region = self
|
||||||
|
.inner
|
||||||
|
.write()
|
||||||
|
.free_regions
|
||||||
|
.remove(®ion_base)
|
||||||
|
.unwrap();
|
||||||
let child_range = child_vmar_offset..(child_vmar_offset + child_vmar_size);
|
let child_range = child_vmar_offset..(child_vmar_offset + child_vmar_size);
|
||||||
let regions_after_allocation = free_region.allocate_range(child_range.clone());
|
let regions_after_allocation = free_region.allocate_range(child_range.clone());
|
||||||
regions_after_allocation.into_iter().for_each(|region| {
|
regions_after_allocation.into_iter().for_each(|region| {
|
||||||
self.inner
|
self.inner
|
||||||
.lock()
|
.write()
|
||||||
.free_regions
|
.free_regions
|
||||||
.insert(region.start(), region);
|
.insert(region.start(), region);
|
||||||
});
|
});
|
||||||
@ -515,14 +520,14 @@ impl Vmar_ {
|
|||||||
Some(self),
|
Some(self),
|
||||||
);
|
);
|
||||||
self.inner
|
self.inner
|
||||||
.lock()
|
.write()
|
||||||
.child_vmar_s
|
.child_vmar_s
|
||||||
.insert(child_vmar_.base, child_vmar_.clone());
|
.insert(child_vmar_.base, child_vmar_.clone());
|
||||||
Ok(child_vmar_)
|
Ok(child_vmar_)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_overwrite(&self, mapping_range: Range<usize>, can_overwrite: bool) -> Result<()> {
|
fn check_overwrite(&self, mapping_range: Range<usize>, can_overwrite: bool) -> Result<()> {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
if inner
|
if inner
|
||||||
.child_vmar_s
|
.child_vmar_s
|
||||||
.find(&mapping_range)
|
.find(&mapping_range)
|
||||||
@ -561,7 +566,7 @@ impl Vmar_ {
|
|||||||
/// Maps a `VmMapping` to this VMAR.
|
/// Maps a `VmMapping` to this VMAR.
|
||||||
fn add_mapping(&self, mapping: Arc<VmMapping>) {
|
fn add_mapping(&self, mapping: Arc<VmMapping>) {
|
||||||
self.inner
|
self.inner
|
||||||
.lock()
|
.write()
|
||||||
.vm_mappings
|
.vm_mappings
|
||||||
.insert(mapping.map_to_addr(), mapping);
|
.insert(mapping.map_to_addr(), mapping);
|
||||||
}
|
}
|
||||||
@ -576,7 +581,7 @@ impl Vmar_ {
|
|||||||
trace!("allocate free region, map_size = 0x{:x}, offset = {:x?}, align = 0x{:x}, can_overwrite = {}", map_size, offset, align, can_overwrite);
|
trace!("allocate free region, map_size = 0x{:x}, offset = {:x?}, align = 0x{:x}, can_overwrite = {}", map_size, offset, align, can_overwrite);
|
||||||
|
|
||||||
if can_overwrite {
|
if can_overwrite {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.write();
|
||||||
// If can overwrite, the offset is ensured not to be `None`.
|
// If can overwrite, the offset is ensured not to be `None`.
|
||||||
let offset = offset.ok_or(Error::with_message(
|
let offset = offset.ok_or(Error::with_message(
|
||||||
Errno::EINVAL,
|
Errno::EINVAL,
|
||||||
@ -606,7 +611,7 @@ impl Vmar_ {
|
|||||||
Ok(offset)
|
Ok(offset)
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, the mapping in a single region.
|
// Otherwise, the mapping in a single region.
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.write();
|
||||||
let (free_region_base, offset) = inner.find_free_region(offset, map_size, align)?;
|
let (free_region_base, offset) = inner.find_free_region(offset, map_size, align)?;
|
||||||
let free_region = inner.free_regions.remove(&free_region_base).unwrap();
|
let free_region = inner.free_regions.remove(&free_region_base).unwrap();
|
||||||
let mapping_range = offset..(offset + map_size);
|
let mapping_range = offset..(offset + map_size);
|
||||||
@ -620,7 +625,7 @@ impl Vmar_ {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn trim_existing_mappings(&self, trim_range: Range<usize>) -> Result<()> {
|
fn trim_existing_mappings(&self, trim_range: Range<usize>) -> Result<()> {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.write();
|
||||||
let mut mappings_to_remove = LinkedList::new();
|
let mut mappings_to_remove = LinkedList::new();
|
||||||
let mut mappings_to_append = LinkedList::new();
|
let mut mappings_to_append = LinkedList::new();
|
||||||
for vm_mapping in inner.vm_mappings.find(&trim_range) {
|
for vm_mapping in inner.vm_mappings.find(&trim_range) {
|
||||||
@ -666,8 +671,8 @@ impl Vmar_ {
|
|||||||
Vmar_::new(vmar_inner, vm_space, self.base, self.size, parent)
|
Vmar_::new(vmar_inner, vm_space, self.base, self.size, parent)
|
||||||
};
|
};
|
||||||
|
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
let mut new_inner = new_vmar_.inner.lock();
|
let mut new_inner = new_vmar_.inner.write();
|
||||||
|
|
||||||
// Clone free regions.
|
// Clone free regions.
|
||||||
for (free_region_base, free_region) in &inner.free_regions {
|
for (free_region_base, free_region) in &inner.free_regions {
|
||||||
|
@ -10,9 +10,12 @@ use core::{
|
|||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_rights::Rights;
|
use aster_rights::Rights;
|
||||||
use ostd::mm::{
|
use ostd::{
|
||||||
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, Frame, FrameAllocOptions, PageFlags,
|
mm::{
|
||||||
PageProperty, VmSpace,
|
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, Frame, FrameAllocOptions, PageFlags,
|
||||||
|
PageProperty, VmSpace,
|
||||||
|
},
|
||||||
|
sync::RwLockReadGuard,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{interval::Interval, is_intersected, Vmar, Vmar_};
|
use super::{interval::Interval, is_intersected, Vmar, Vmar_};
|
||||||
@ -39,7 +42,7 @@ use crate::{
|
|||||||
///
|
///
|
||||||
/// Such mappings will also be VMO-backed mappings.
|
/// Such mappings will also be VMO-backed mappings.
|
||||||
pub(super) struct VmMapping {
|
pub(super) struct VmMapping {
|
||||||
inner: Mutex<VmMappingInner>,
|
inner: RwLock<VmMappingInner>,
|
||||||
/// The parent VMAR. The parent should always point to a valid VMAR.
|
/// The parent VMAR. The parent should always point to a valid VMAR.
|
||||||
parent: Weak<Vmar_>,
|
parent: Weak<Vmar_>,
|
||||||
/// Specific physical pages that need to be mapped.
|
/// Specific physical pages that need to be mapped.
|
||||||
@ -57,10 +60,10 @@ pub(super) struct VmMapping {
|
|||||||
|
|
||||||
impl VmMapping {
|
impl VmMapping {
|
||||||
pub fn try_clone(&self) -> Result<Self> {
|
pub fn try_clone(&self) -> Result<Self> {
|
||||||
let inner = self.inner.lock().clone();
|
let inner = self.inner.read().clone();
|
||||||
let vmo = self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?;
|
let vmo = self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
inner: Mutex::new(inner),
|
inner: RwLock::new(inner),
|
||||||
parent: self.parent.clone(),
|
parent: self.parent.clone(),
|
||||||
vmo,
|
vmo,
|
||||||
is_shared: self.is_shared,
|
is_shared: self.is_shared,
|
||||||
@ -135,7 +138,7 @@ impl VmMapping {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
inner: Mutex::new(vm_mapping_inner),
|
inner: RwLock::new(vm_mapping_inner),
|
||||||
parent: Arc::downgrade(&parent_vmar),
|
parent: Arc::downgrade(&parent_vmar),
|
||||||
vmo,
|
vmo,
|
||||||
is_shared,
|
is_shared,
|
||||||
@ -156,7 +159,7 @@ impl VmMapping {
|
|||||||
let partial_mapping = Arc::new(self.try_clone()?);
|
let partial_mapping = Arc::new(self.try_clone()?);
|
||||||
// Adjust the mapping range and the permission.
|
// Adjust the mapping range and the permission.
|
||||||
{
|
{
|
||||||
let mut inner = partial_mapping.inner.lock();
|
let mut inner = partial_mapping.inner.write();
|
||||||
inner.shrink_to(range);
|
inner.shrink_to(range);
|
||||||
if let Some(perms) = new_perms {
|
if let Some(perms) = new_perms {
|
||||||
inner.perms = perms;
|
inner.perms = perms;
|
||||||
@ -171,29 +174,29 @@ impl VmMapping {
|
|||||||
|
|
||||||
/// Returns the mapping's start address.
|
/// Returns the mapping's start address.
|
||||||
pub fn map_to_addr(&self) -> Vaddr {
|
pub fn map_to_addr(&self) -> Vaddr {
|
||||||
self.inner.lock().map_to_addr
|
self.inner.read().map_to_addr
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the mapping's end address.
|
/// Returns the mapping's end address.
|
||||||
pub fn map_end(&self) -> Vaddr {
|
pub fn map_end(&self) -> Vaddr {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
inner.map_to_addr + inner.map_size
|
inner.map_to_addr + inner.map_size
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the mapping's size.
|
/// Returns the mapping's size.
|
||||||
pub fn map_size(&self) -> usize {
|
pub fn map_size(&self) -> usize {
|
||||||
self.inner.lock().map_size
|
self.inner.read().map_size
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Unmaps pages in the range
|
/// Unmaps pages in the range
|
||||||
pub fn unmap(&self, range: &Range<usize>, may_destroy: bool) -> Result<()> {
|
pub fn unmap(&self, range: &Range<usize>, may_destroy: bool) -> Result<()> {
|
||||||
let parent = self.parent.upgrade().unwrap();
|
let parent = self.parent.upgrade().unwrap();
|
||||||
let vm_space = parent.vm_space();
|
let vm_space = parent.vm_space();
|
||||||
self.inner.lock().unmap(vm_space, range, may_destroy)
|
self.inner.write().unmap(vm_space, range, may_destroy)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_destroyed(&self) -> bool {
|
pub fn is_destroyed(&self) -> bool {
|
||||||
self.inner.lock().is_destroyed
|
self.inner.read().is_destroyed
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns whether the mapping is a shared mapping.
|
/// Returns whether the mapping is a shared mapping.
|
||||||
@ -202,7 +205,7 @@ impl VmMapping {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn enlarge(&self, extra_size: usize) {
|
pub fn enlarge(&self, extra_size: usize) {
|
||||||
self.inner.lock().map_size += extra_size;
|
self.inner.write().map_size += extra_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> {
|
pub fn handle_page_fault(&self, page_fault_info: &PageFaultInfo) -> Result<()> {
|
||||||
@ -261,17 +264,19 @@ impl VmMapping {
|
|||||||
VmItem::NotMapped { .. } => {
|
VmItem::NotMapped { .. } => {
|
||||||
// Map a new frame to the page fault address.
|
// Map a new frame to the page fault address.
|
||||||
|
|
||||||
let inner_lock = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
let (frame, is_readonly) = self.prepare_page(&inner_lock, address, is_write)?;
|
let (frame, is_readonly) = self.prepare_page(&inner, address, is_write)?;
|
||||||
|
|
||||||
let vm_perms = {
|
let vm_perms = {
|
||||||
let mut perms = inner_lock.perms;
|
let mut perms = inner.perms;
|
||||||
if is_readonly {
|
if is_readonly {
|
||||||
// COW pages are forced to be read-only.
|
// COW pages are forced to be read-only.
|
||||||
perms -= VmPerms::WRITE;
|
perms -= VmPerms::WRITE;
|
||||||
}
|
}
|
||||||
perms
|
perms
|
||||||
};
|
};
|
||||||
|
drop(inner);
|
||||||
|
|
||||||
let mut page_flags = vm_perms.into();
|
let mut page_flags = vm_perms.into();
|
||||||
page_flags |= PageFlags::ACCESSED;
|
page_flags |= PageFlags::ACCESSED;
|
||||||
if is_write {
|
if is_write {
|
||||||
@ -288,7 +293,7 @@ impl VmMapping {
|
|||||||
|
|
||||||
fn prepare_page(
|
fn prepare_page(
|
||||||
&self,
|
&self,
|
||||||
inner_lock: &MutexGuard<VmMappingInner>,
|
mapping_inner: &RwLockReadGuard<VmMappingInner>,
|
||||||
page_fault_addr: Vaddr,
|
page_fault_addr: Vaddr,
|
||||||
write: bool,
|
write: bool,
|
||||||
) -> Result<(Frame, bool)> {
|
) -> Result<(Frame, bool)> {
|
||||||
@ -297,7 +302,8 @@ impl VmMapping {
|
|||||||
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
|
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
|
||||||
};
|
};
|
||||||
|
|
||||||
let vmo_offset = inner_lock.vmo_offset.unwrap() + page_fault_addr - inner_lock.map_to_addr;
|
let vmo_offset =
|
||||||
|
mapping_inner.vmo_offset.unwrap() + page_fault_addr - mapping_inner.map_to_addr;
|
||||||
let page_idx = vmo_offset / PAGE_SIZE;
|
let page_idx = vmo_offset / PAGE_SIZE;
|
||||||
let Ok(page) = vmo.get_committed_frame(page_idx) else {
|
let Ok(page) = vmo.get_committed_frame(page_idx) else {
|
||||||
if !self.is_shared {
|
if !self.is_shared {
|
||||||
@ -328,7 +334,7 @@ impl VmMapping {
|
|||||||
const SURROUNDING_PAGE_NUM: usize = 16;
|
const SURROUNDING_PAGE_NUM: usize = 16;
|
||||||
const SURROUNDING_PAGE_ADDR_MASK: usize = !(SURROUNDING_PAGE_NUM * PAGE_SIZE - 1);
|
const SURROUNDING_PAGE_ADDR_MASK: usize = !(SURROUNDING_PAGE_NUM * PAGE_SIZE - 1);
|
||||||
|
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.read();
|
||||||
let vmo_offset = inner.vmo_offset.unwrap();
|
let vmo_offset = inner.vmo_offset.unwrap();
|
||||||
let vmo = self.vmo().unwrap();
|
let vmo = self.vmo().unwrap();
|
||||||
let around_page_addr = page_fault_addr & SURROUNDING_PAGE_ADDR_MASK;
|
let around_page_addr = page_fault_addr & SURROUNDING_PAGE_ADDR_MASK;
|
||||||
@ -376,7 +382,7 @@ impl VmMapping {
|
|||||||
/// it should not be called during the direct iteration of the `vm_mappings`.
|
/// it should not be called during the direct iteration of the `vm_mappings`.
|
||||||
pub(super) fn protect(&self, new_perms: VmPerms, range: Range<usize>) -> Result<()> {
|
pub(super) fn protect(&self, new_perms: VmPerms, range: Range<usize>) -> Result<()> {
|
||||||
// If `new_perms` is equal to `old_perms`, `protect()` will not modify any permission in the VmMapping.
|
// If `new_perms` is equal to `old_perms`, `protect()` will not modify any permission in the VmMapping.
|
||||||
let old_perms = self.inner.lock().perms;
|
let old_perms = self.inner.read().perms;
|
||||||
if old_perms == new_perms {
|
if old_perms == new_perms {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -386,16 +392,16 @@ impl VmMapping {
|
|||||||
// Protect permission in the VmSpace.
|
// Protect permission in the VmSpace.
|
||||||
let vmar = self.parent.upgrade().unwrap();
|
let vmar = self.parent.upgrade().unwrap();
|
||||||
let vm_space = vmar.vm_space();
|
let vm_space = vmar.vm_space();
|
||||||
self.inner.lock().protect(vm_space, new_perms, range)?;
|
self.inner.write().protect(vm_space, new_perms, range)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn new_fork(&self, new_parent: &Arc<Vmar_>) -> Result<VmMapping> {
|
pub(super) fn new_fork(&self, new_parent: &Arc<Vmar_>) -> Result<VmMapping> {
|
||||||
let new_inner = self.inner.lock().clone();
|
let new_inner = self.inner.read().clone();
|
||||||
|
|
||||||
Ok(VmMapping {
|
Ok(VmMapping {
|
||||||
inner: Mutex::new(new_inner),
|
inner: RwLock::new(new_inner),
|
||||||
parent: Arc::downgrade(new_parent),
|
parent: Arc::downgrade(new_parent),
|
||||||
vmo: self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?,
|
vmo: self.vmo.as_ref().map(|vmo| vmo.dup()).transpose()?,
|
||||||
is_shared: self.is_shared,
|
is_shared: self.is_shared,
|
||||||
@ -431,7 +437,7 @@ impl VmMapping {
|
|||||||
let range = self.range();
|
let range = self.range();
|
||||||
// Condition 4, the `additional_mappings` will be empty.
|
// Condition 4, the `additional_mappings` will be empty.
|
||||||
if range.start == intersect_range.start && range.end == intersect_range.end {
|
if range.start == intersect_range.start && range.end == intersect_range.end {
|
||||||
self.inner.lock().perms = perms;
|
self.inner.write().perms = perms;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// Condition 1 or 3, which needs an additional new VmMapping with range (range.start..intersect_range.start)
|
// Condition 1 or 3, which needs an additional new VmMapping with range (range.start..intersect_range.start)
|
||||||
@ -451,7 +457,7 @@ impl VmMapping {
|
|||||||
|
|
||||||
// Begin to modify the `Vmar`.
|
// Begin to modify the `Vmar`.
|
||||||
let vmar = self.parent.upgrade().unwrap();
|
let vmar = self.parent.upgrade().unwrap();
|
||||||
let mut vmar_inner = vmar.inner.lock();
|
let mut vmar_inner = vmar.inner.write();
|
||||||
// Remove the original mapping.
|
// Remove the original mapping.
|
||||||
vmar_inner.vm_mappings.remove(&self.map_to_addr());
|
vmar_inner.vm_mappings.remove(&self.map_to_addr());
|
||||||
// Add protected mappings to the vmar.
|
// Add protected mappings to the vmar.
|
||||||
@ -522,18 +528,18 @@ impl VmMapping {
|
|||||||
fn trim_left(&self, vaddr: Vaddr) -> Result<Vaddr> {
|
fn trim_left(&self, vaddr: Vaddr) -> Result<Vaddr> {
|
||||||
let vmar = self.parent.upgrade().unwrap();
|
let vmar = self.parent.upgrade().unwrap();
|
||||||
let vm_space = vmar.vm_space();
|
let vm_space = vmar.vm_space();
|
||||||
self.inner.lock().trim_left(vm_space, vaddr)
|
self.inner.write().trim_left(vm_space, vaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Trims the mapping from right to a new address.
|
/// Trims the mapping from right to a new address.
|
||||||
fn trim_right(&self, vaddr: Vaddr) -> Result<Vaddr> {
|
fn trim_right(&self, vaddr: Vaddr) -> Result<Vaddr> {
|
||||||
let vmar = self.parent.upgrade().unwrap();
|
let vmar = self.parent.upgrade().unwrap();
|
||||||
let vm_space = vmar.vm_space();
|
let vm_space = vmar.vm_space();
|
||||||
self.inner.lock().trim_right(vm_space, vaddr)
|
self.inner.write().trim_right(vm_space, vaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_perms(&self, perms: &VmPerms) -> Result<()> {
|
fn check_perms(&self, perms: &VmPerms) -> Result<()> {
|
||||||
self.inner.lock().check_perms(perms)
|
self.inner.read().check_perms(perms)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,8 +81,8 @@ where
|
|||||||
let page: Page<PageTablePageMeta<E, C>> = self.into();
|
let page: Page<PageTablePageMeta<E, C>> = self.into();
|
||||||
|
|
||||||
// Acquire the lock.
|
// Acquire the lock.
|
||||||
while page
|
let meta = page.meta();
|
||||||
.meta()
|
while meta
|
||||||
.lock
|
.lock
|
||||||
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed)
|
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed)
|
||||||
.is_err()
|
.is_err()
|
||||||
|
Reference in New Issue
Block a user