mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-27 03:13:23 +00:00
Refactor Vmar
and VmMapping
.
Co-authored-by: Zhang Junyang <junyang@stu.pku.edu.cn>
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
e453649d78
commit
4ea3e49788
@ -34,21 +34,33 @@ impl Heap {
|
||||
}
|
||||
}
|
||||
|
||||
/// Inits and maps the heap Vmo
|
||||
pub(super) fn alloc_and_map_vmo(&self, root_vmar: &Vmar<Full>) -> Result<()> {
|
||||
/// Initializes and maps the heap virtual memory.
|
||||
pub(super) fn alloc_and_map_vm(&self, root_vmar: &Vmar<Full>) -> Result<()> {
|
||||
let vmar_map_options = {
|
||||
let perms = VmPerms::READ | VmPerms::WRITE;
|
||||
root_vmar
|
||||
// FIXME: Our current implementation of mapping resize cannot move
|
||||
// existing mappings within the new range, which may cause the resize
|
||||
// operation to fail. Therefore, if there are already mappings within
|
||||
// the heap expansion range, the brk operation will fail.
|
||||
.new_map(PAGE_SIZE, perms)
|
||||
.unwrap()
|
||||
.offset(self.base)
|
||||
};
|
||||
vmar_map_options.build()?;
|
||||
|
||||
// If we touch another mapped range when we are trying to expand the
|
||||
// heap, we fail.
|
||||
//
|
||||
// So a simple solution is to reserve enough space for the heap by
|
||||
// mapping without any permissions and allow it to be overwritten
|
||||
// later by `brk`. New mappings from `mmap` that overlaps this range
|
||||
// may be moved to another place.
|
||||
let vmar_reserve_options = {
|
||||
let perms = VmPerms::empty();
|
||||
root_vmar
|
||||
.new_map(USER_HEAP_SIZE_LIMIT - PAGE_SIZE, perms)
|
||||
.unwrap()
|
||||
.offset(self.base + PAGE_SIZE)
|
||||
};
|
||||
vmar_reserve_options.build()?;
|
||||
|
||||
self.set_uninitialized();
|
||||
Ok(())
|
||||
}
|
||||
@ -63,14 +75,24 @@ impl Heap {
|
||||
return_errno_with_message!(Errno::ENOMEM, "heap size limit was met.");
|
||||
}
|
||||
let current_heap_end = self.current_heap_end.load(Ordering::Acquire);
|
||||
|
||||
if new_heap_end <= current_heap_end {
|
||||
// FIXME: should we allow shrink current user heap?
|
||||
return Ok(current_heap_end);
|
||||
}
|
||||
let old_size = (current_heap_end - self.base).align_up(PAGE_SIZE);
|
||||
let new_size = (new_heap_end - self.base).align_up(PAGE_SIZE);
|
||||
|
||||
let current_heap_end = current_heap_end.align_up(PAGE_SIZE);
|
||||
let new_heap_end = new_heap_end.align_up(PAGE_SIZE);
|
||||
|
||||
// Remove the reserved space.
|
||||
root_vmar.remove_mapping(current_heap_end..new_heap_end)?;
|
||||
|
||||
let old_size = current_heap_end - self.base;
|
||||
let new_size = new_heap_end - self.base;
|
||||
|
||||
// Expand the heap.
|
||||
root_vmar.resize_mapping(self.base, old_size, new_size)?;
|
||||
|
||||
self.current_heap_end.store(new_heap_end, Ordering::Release);
|
||||
Ok(new_heap_end)
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ impl ProcessVm {
|
||||
let root_vmar = Vmar::<Full>::new_root();
|
||||
let init_stack = InitStack::new();
|
||||
let heap = Heap::new();
|
||||
heap.alloc_and_map_vmo(&root_vmar).unwrap();
|
||||
heap.alloc_and_map_vm(&root_vmar).unwrap();
|
||||
Self {
|
||||
root_vmar,
|
||||
heap,
|
||||
@ -136,6 +136,6 @@ impl ProcessVm {
|
||||
/// Clears existing mappings and then maps stack and heap vmo.
|
||||
pub(super) fn clear_and_map(&self) {
|
||||
self.root_vmar.clear().unwrap();
|
||||
self.heap.alloc_and_map_vmo(&self.root_vmar).unwrap();
|
||||
self.heap.alloc_and_map_vm(&self.root_vmar).unwrap();
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user