fix mapping cow vmo's parent's pages with write access

This commit is contained in:
Jianfeng Jiang
2023-03-29 05:25:13 -04:00
committed by Tate, Hongliang Tian
parent d19dc09563
commit 7bee03f91f
7 changed files with 37 additions and 28 deletions

View File

@ -6,7 +6,7 @@ SCRIPT_DIR=/scripts
cd ${SCRIPT_DIR}/.. cd ${SCRIPT_DIR}/..
echo "Running tests......" echo "Running tests......"
tests="hello_world/hello_world fork/fork execve/execve fork_c/fork signal_c/signal_test pthread/pthread_test" tests="hello_world/hello_world fork/fork execve/execve fork_c/fork signal_c/signal_test pthread/pthread_test hello_pie/hello"
for testcase in ${tests} for testcase in ${tests}
do do

View File

@ -56,7 +56,7 @@ impl Pager for PageCacheManager {
page.frame() page.frame()
} else { } else {
let page = if offset < self.backed_inode.upgrade().unwrap().metadata().size { let page = if offset < self.backed_inode.upgrade().unwrap().metadata().size {
let mut page = Page::alloc()?; let mut page = Page::alloc_zero()?;
self.backed_inode self.backed_inode
.upgrade() .upgrade()
.unwrap() .unwrap()

View File

@ -124,6 +124,6 @@ fn mmap_filebacked_vmo(
vm_map_options = vm_map_options.offset(addr).can_overwrite(true); vm_map_options = vm_map_options.offset(addr).can_overwrite(true);
} }
let map_addr = vm_map_options.build()?; let map_addr = vm_map_options.build()?;
debug!("map addr = 0x{:x}", map_addr); trace!("map range = 0x{:x} - 0x{:x}", map_addr, map_addr + len);
Ok(map_addr) Ok(map_addr)
} }

View File

@ -1,3 +1,5 @@
use jinux_frame::AlignExt;
use crate::{log_syscall_entry, prelude::*}; use crate::{log_syscall_entry, prelude::*};
use crate::syscall::SYS_MPROTECT; use crate::syscall::SYS_MPROTECT;
@ -15,6 +17,8 @@ pub fn sys_mprotect(addr: Vaddr, len: usize, perms: u64) -> Result<SyscallReturn
); );
let current = current!(); let current = current!();
let root_vmar = current.root_vmar(); let root_vmar = current.root_vmar();
debug_assert!(addr % PAGE_SIZE == 0);
let len = len.align_up(PAGE_SIZE);
let range = addr..(addr + len); let range = addr..(addr + len);
root_vmar.protect(vm_perms, range)?; root_vmar.protect(vm_perms, range)?;
Ok(SyscallReturn::Return(0)) Ok(SyscallReturn::Return(0))

View File

@ -27,7 +27,7 @@ pub fn do_sys_writev(fd: u64, io_vec_ptr: Vaddr, io_vec_count: usize) -> Result<
); );
let mut write_len = 0; let mut write_len = 0;
for i in 0..io_vec_count { for i in 0..io_vec_count {
let io_vec = read_val_from_user::<IoVec>(io_vec_ptr + i * 8)?; let io_vec = read_val_from_user::<IoVec>(io_vec_ptr + i * 16)?;
let base = io_vec.base; let base = io_vec.base;
let len = io_vec.len; let len = io_vec.len;
let mut buffer = vec![0u8; len]; let mut buffer = vec![0u8; len];

View File

@ -322,6 +322,7 @@ impl Vmar_ {
.vm_mappings .vm_mappings
.retain(|_, vm_mapping| !vm_mapping.is_destroyed()); .retain(|_, vm_mapping| !vm_mapping.is_destroyed());
inner.free_regions.append(&mut free_regions); inner.free_regions.append(&mut free_regions);
drop(inner);
self.merge_continuous_regions(); self.merge_continuous_regions();
Ok(()) Ok(())
} }
@ -691,7 +692,6 @@ impl Vmar_ {
/// get mapped vmo at given offset /// get mapped vmo at given offset
pub fn get_vm_mapping(&self, offset: Vaddr) -> Result<Arc<VmMapping>> { pub fn get_vm_mapping(&self, offset: Vaddr) -> Result<Arc<VmMapping>> {
debug!("get vm mapping, offset = 0x{:x}", offset);
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings { for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
if *vm_mapping_base <= offset && offset < *vm_mapping_base + vm_mapping.map_size() { if *vm_mapping_base <= offset && offset < *vm_mapping_base + vm_mapping.map_size() {
return Ok(vm_mapping.clone()); return Ok(vm_mapping.clone());

View File

@ -94,21 +94,7 @@ impl VmMapping {
} }
let vm_space = parent_vmar.vm_space(); let vm_space = parent_vmar.vm_space();
let mut mapped_pages = BTreeSet::new(); let mapped_pages = BTreeSet::new();
let mapped_page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + real_map_size));
let start_page_idx = mapped_page_idx_range.start;
for page_idx in mapped_page_idx_range {
let mut vm_map_options = VmMapOptions::new();
let page_map_addr = map_to_addr + (page_idx - start_page_idx) * PAGE_SIZE;
vm_map_options.addr(Some(page_map_addr));
vm_map_options.perm(perm.clone());
vm_map_options.can_overwrite(can_overwrite);
vm_map_options.align(align);
if let Ok(frames) = vmo.get_backup_frame(page_idx, false, false) {
vm_space.map(frames, &vm_map_options)?;
mapped_pages.insert(page_idx);
}
}
let vm_mapping_inner = VmMappingInner { let vm_mapping_inner = VmMappingInner {
vmo_offset, vmo_offset,
@ -131,11 +117,20 @@ impl VmMapping {
/// Add a new committed page and map it to vmspace. If copy on write is set, it's allowed to unmap the page at the same address. /// Add a new committed page and map it to vmspace. If copy on write is set, it's allowed to unmap the page at the same address.
/// FIXME: This implementation based on the truth that we map one page at a time. If multiple pages are mapped together, this implementation may have problems /// FIXME: This implementation based on the truth that we map one page at a time. If multiple pages are mapped together, this implementation may have problems
pub(super) fn map_one_page(&self, page_idx: usize, frames: VmFrameVec) -> Result<()> { pub(super) fn map_one_page(
&self,
page_idx: usize,
frames: VmFrameVec,
forbid_write_access: bool,
) -> Result<()> {
let parent = self.parent.upgrade().unwrap(); let parent = self.parent.upgrade().unwrap();
let vm_space = parent.vm_space(); let vm_space = parent.vm_space();
let map_addr = page_idx * PAGE_SIZE + self.map_to_addr(); let map_addr = page_idx * PAGE_SIZE + self.map_to_addr();
let vm_perm = self.inner.lock().page_perms.get(&page_idx).unwrap().clone(); let mut vm_perm = self.inner.lock().page_perms.get(&page_idx).unwrap().clone();
if forbid_write_access {
debug_assert!(self.vmo.is_cow_child());
vm_perm -= VmPerm::W;
}
let mut vm_map_options = VmMapOptions::new(); let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(map_addr)); vm_map_options.addr(Some(map_addr));
vm_map_options.perm(vm_perm.clone()); vm_map_options.perm(vm_perm.clone());
@ -234,7 +229,13 @@ impl VmMapping {
// get the backup frame for page // get the backup frame for page
let frames = self.vmo.get_backup_frame(page_idx, write, true)?; let frames = self.vmo.get_backup_frame(page_idx, write, true)?;
// map the page // map the page
self.map_one_page(page_idx, frames) if self.vmo.is_cow_child() && !write {
// Since the read access triggers page fault, the child vmo does not commit a new frame ever.
// Note the frame is from parent, so the map must forbid write access.
self.map_one_page(page_idx, frames, true)
} else {
self.map_one_page(page_idx, frames, false)
}
} }
pub(super) fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> { pub(super) fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
@ -267,15 +268,18 @@ impl VmMapping {
let VmMapping { inner, parent, vmo } = self; let VmMapping { inner, parent, vmo } = self;
let parent_vmo = vmo.dup().unwrap(); let parent_vmo = vmo.dup().unwrap();
let vmo_size = parent_vmo.size(); let vmo_size = parent_vmo.size();
debug!("fork vmo in forkmapping, parent size = 0x{:x}", vmo_size);
let child_vmo = VmoChildOptions::new_cow(parent_vmo, 0..vmo_size).alloc()?;
let parent_vmar = new_parent.upgrade().unwrap();
let vm_space = parent_vmar.vm_space();
let vmo_offset = self.vmo_offset(); let vmo_offset = self.vmo_offset();
let map_to_addr = self.map_to_addr(); let map_to_addr = self.map_to_addr();
let map_size = self.map_size(); let map_size = self.map_size();
debug!(
"fork vmo, parent size = 0x{:x}, map_to_addr = 0x{:x}",
vmo_size, map_to_addr
);
let child_vmo = VmoChildOptions::new_cow(parent_vmo, 0..vmo_size).alloc()?;
let parent_vmar = new_parent.upgrade().unwrap();
let vm_space = parent_vmar.vm_space();
let real_map_size = self.map_size().min(child_vmo.size()); let real_map_size = self.map_size().min(child_vmo.size());
let page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + real_map_size)); let page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + real_map_size));
let start_page_idx = page_idx_range.start; let start_page_idx = page_idx_range.start;
@ -340,6 +344,7 @@ impl VmMapping {
// fast path: the whole mapping was trimed // fast path: the whole mapping was trimed
self.unmap(trim_range, true)?; self.unmap(trim_range, true)?;
mappings_to_remove.insert(map_to_addr); mappings_to_remove.insert(map_to_addr);
return Ok(());
} }
let vm_mapping_left = range.start; let vm_mapping_left = range.start;
let vm_mapping_right = range.end; let vm_mapping_right = range.end;