map segments with vnode

This commit is contained in:
Jianfeng Jiang 2023-03-31 01:52:00 -04:00 committed by Tate, Hongliang Tian
parent 7bee03f91f
commit 9ae2ca4c02
10 changed files with 98 additions and 136 deletions

View File

@ -1,6 +1,6 @@
use super::Inode;
use crate::prelude::*;
use crate::rights::Rights;
use crate::rights::Full;
use crate::vm::vmo::{Pager, Vmo, VmoFlags, VmoOptions};
use core::ops::Range;
@ -8,21 +8,21 @@ use jinux_frame::vm::{VmAllocOptions, VmFrame, VmFrameVec};
use lru::LruCache;
pub struct PageCache {
pages: Vmo,
pages: Vmo<Full>,
manager: Arc<PageCacheManager>,
}
impl PageCache {
pub fn new(inode: &Arc<dyn Inode>) -> Result<Self> {
let manager = Arc::new(PageCacheManager::new(Arc::downgrade(inode)));
let pages = VmoOptions::<Rights>::new(inode.len())
let pages = VmoOptions::<Full>::new(inode.len())
.flags(VmoFlags::RESIZABLE)
.pager(manager.clone())
.alloc()?;
Ok(Self { pages, manager })
}
pub fn pages(&self) -> &Vmo {
pub fn pages(&self) -> &Vmo<Full> {
&self.pages
}

View File

@ -1,11 +1,7 @@
use super::{DirentVisitor, FsFlags, Inode, InodeMode, InodeType, Metadata, PageCache};
use crate::prelude::*;
<<<<<<< HEAD
=======
use crate::rights::Full;
use crate::vm::vmo::{Vmo, VmoFlags, VmoOptions};
>>>>>>> support file-backed mmap
use crate::vm::vmo::Vmo;
use alloc::string::String;
use core::time::Duration;
use jinux_frame::vm::VmIo;
@ -23,8 +19,12 @@ struct Inner {
}
impl Vnode {
pub fn page_cache(&self) -> Vmo<Full> {
self.inner.read().page_cache.dup().unwrap()
pub fn page_cache(&self) -> Option<Vmo<Full>> {
self.inner
.read()
.page_cache
.as_ref()
.map(|page_chche| page_chche.pages().dup().unwrap())
}
pub fn new(inode: Arc<dyn Inode>) -> Result<Self> {

View File

@ -1,85 +0,0 @@
use crate::fs::file_handle::FileHandle;
use crate::fs::utils::SeekFrom;
use crate::prelude::*;
use crate::vm::vmar::{get_intersected_range, is_intersected};
use align_ext::AlignExt;
use jinux_frame::vm::{VmAllocOptions, VmFrameVec, VmIo};
use xmas_elf::program::ProgramHeader64;
use crate::vm::vmo::Pager;
// use super::load_elf::ElfSegment;
/// The pager behind a elf segment
pub struct ElfSegmentPager {
/// The pager size
pager_size: usize,
/// the back up file
file: Arc<FileHandle>,
/// The segment offset in backup file
file_offset: usize,
/// The segment size in backup file
file_size: usize,
/// The offset for the segment data.
/// The pager always starts at page-align address, while the segment data may start at any address.
/// So the offset will be the segment data start address % PAGE_SIZE
page_offset: usize,
}
impl ElfSegmentPager {
pub fn new(file: Arc<FileHandle>, program_header: &ProgramHeader64) -> Self {
let ph_start = program_header.virtual_addr as Vaddr;
let ph_end = ph_start + program_header.mem_size as Vaddr;
let start = ph_start.align_down(PAGE_SIZE);
let end = ph_end.align_up(PAGE_SIZE);
let pager_size = end - start;
let offset = ph_start % PAGE_SIZE;
Self {
pager_size,
file,
file_offset: program_header.offset as usize,
file_size: program_header.file_size as usize,
page_offset: offset,
}
}
}
impl Pager for ElfSegmentPager {
fn commit_page(&self, offset: usize) -> Result<jinux_frame::vm::VmFrame> {
if offset >= self.pager_size {
return_errno_with_message!(Errno::EINVAL, "offset exceeds pager size");
}
let vm_alloc_option = VmAllocOptions::new(1);
let mut vm_frames = VmFrameVec::allocate(&vm_alloc_option)?;
vm_frames.zero();
let page_start = offset.align_down(PAGE_SIZE);
let page_end = page_start + PAGE_SIZE;
let page_range = page_start..page_end;
let segment_range = self.page_offset..self.page_offset + self.file_size;
if is_intersected(&page_range, &segment_range) {
let intersected_range = get_intersected_range(&page_range, &segment_range);
let segment_from_file_range = (intersected_range.start - self.page_offset)
..(intersected_range.end - self.page_offset);
let mut segment_data = vec![0u8; segment_from_file_range.len()];
self.file.seek(SeekFrom::Start(
self.file_offset + segment_from_file_range.start,
))?;
self.file.read(&mut segment_data)?;
let write_offset = intersected_range.start % PAGE_SIZE;
vm_frames.write_bytes(write_offset, &segment_data)?;
}
let vm_frame = vm_frames.pop().unwrap();
Ok(vm_frame)
}
fn update_page(&self, offset: usize) -> Result<()> {
unimplemented!()
}
fn decommit_page(&self, offset: usize) -> Result<()> {
unimplemented!()
}
}

View File

@ -1,26 +1,21 @@
//! This module is used to parse elf file content to get elf_load_info.
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use crate::fs::file_handle::FileHandle;
use crate::fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD};
use crate::fs::utils::AccessMode;
use crate::fs::utils::Dentry;
use crate::process::program_loader::elf::init_stack::{init_aux_vec, InitStack};
use crate::vm::perms::VmPerms;
use crate::vm::vmo::VmoRightsOp;
use crate::{
prelude::*,
rights::Full,
vm::{
vmar::Vmar,
vmo::{Pager, Vmo, VmoOptions},
},
vm::{vmar::Vmar, vmo::Vmo},
};
use align_ext::AlignExt;
use jinux_frame::vm::VmPerm;
use xmas_elf::program::{self, ProgramHeader64};
use super::elf_file::Elf;
use super::elf_segment_pager::ElfSegmentPager;
/// load elf to the root vmar. this function will
/// 1. read the vaddr of each segment to get all elf pages.
@ -29,7 +24,7 @@ use super::elf_segment_pager::ElfSegmentPager;
pub fn load_elf_to_root_vmar(
root_vmar: &Vmar<Full>,
file_header: &[u8],
elf_file: Arc<FileHandle>,
elf_file: Arc<Dentry>,
fs_resolver: &FsResolver,
argv: Vec<CString>,
envp: Vec<CString>,
@ -42,7 +37,7 @@ pub fn load_elf_to_root_vmar(
} else {
None
};
let map_addr = map_segment_vmos(&elf, root_vmar, elf_file)?;
let map_addr = map_segment_vmos(&elf, root_vmar, &elf_file)?;
let mut aux_vec = init_aux_vec(&elf, map_addr)?;
let mut init_stack = InitStack::new_default_config(argv, envp);
init_stack.init(root_vmar, &elf, &ldso_load_info, &mut aux_vec)?;
@ -73,12 +68,13 @@ fn load_ldso_for_shared_object(
if let Ok(ldso_path) = elf.ldso_path(file_header) && elf.is_shared_object(){
trace!("ldso_path = {:?}", ldso_path);
let fs_path = FsPath::new(AT_FDCWD, &ldso_path)?;
let ldso_file = fs_resolver.open(&fs_path, AccessMode::O_RDONLY as u32, 0)?;
let ldso_file = fs_resolver.lookup(&fs_path)?;
let vnode = ldso_file.vnode();
let mut buf = Box::new([0u8; PAGE_SIZE]);
let ldso_header = ldso_file.read(&mut *buf)?;
let ldso_header = vnode.read_at(0, &mut *buf)?;
let ldso_elf = Elf::parse_elf(&*buf)?;
let ldso_file = Arc::new(FileHandle::new_inode_handle(ldso_file));
let map_addr = map_segment_vmos(&ldso_elf, root_vmar, ldso_file)?.unwrap();
// let ldso_file = Arc::new(FileHandle::new_inode_handle(ldso_file));
let map_addr = map_segment_vmos(&ldso_elf, root_vmar, &ldso_file)?.unwrap();
return Ok(LdsoLoadInfo::new(ldso_elf.entry_point() + map_addr, map_addr));
}
// There are three reasons that an executable may lack ldso_path,
@ -137,7 +133,7 @@ impl ElfLoadInfo {
pub fn map_segment_vmos(
elf: &Elf,
root_vmar: &Vmar<Full>,
elf_file: Arc<FileHandle>,
elf_file: &Dentry,
) -> Result<Option<Vaddr>> {
// all segments of the shared object must be mapped to a continuous vm range
// to ensure the relative offset of each segment not changed.
@ -151,12 +147,12 @@ pub fn map_segment_vmos(
.get_type()
.map_err(|_| Error::with_message(Errno::ENOEXEC, "parse program header type fails"))?;
if type_ == program::Type::Load {
let vmo = init_segment_vmo(program_header, elf_file.clone())?;
let vmo = init_segment_vmo(program_header, elf_file)?;
map_segment_vmo(
program_header,
vmo,
root_vmar,
elf_file.clone(),
// elf_file.clone(),
&file_map_addr,
)?;
}
@ -180,7 +176,7 @@ fn map_segment_vmo(
program_header: &ProgramHeader64,
vmo: Vmo,
root_vmar: &Vmar<Full>,
elf_file: Arc<FileHandle>,
// elf_file: Arc<FileHandle>,
file_map_addr: &Option<Vaddr>,
) -> Result<()> {
let perms = VmPerms::from(parse_segment_perm(program_header.flags)?);
@ -188,8 +184,8 @@ fn map_segment_vmo(
let offset = (program_header.virtual_addr as Vaddr).align_down(PAGE_SIZE);
trace!(
"map segment vmo: virtual addr = 0x{:x}, size = 0x{:x}, perms = {:?}",
program_header.virtual_addr,
program_header.file_size,
offset,
program_header.mem_size,
perms
);
let mut vm_map_options = root_vmar.new_map(vmo, perms)?;
@ -204,14 +200,58 @@ fn map_segment_vmo(
}
/// create vmo for each segment
fn init_segment_vmo(program_header: &ProgramHeader64, elf_file: Arc<FileHandle>) -> Result<Vmo> {
let vmo_start = (program_header.virtual_addr as Vaddr).align_down(PAGE_SIZE);
let vmo_end = (program_header.virtual_addr as Vaddr + program_header.mem_size as Vaddr)
fn init_segment_vmo(program_header: &ProgramHeader64, elf_file: &Dentry) -> Result<Vmo> {
trace!(
"mem range = 0x{:x} - 0x{:x}, mem_size = 0x{:x}",
program_header.virtual_addr,
program_header.virtual_addr + program_header.mem_size,
program_header.mem_size
);
trace!(
"file range = 0x{:x} - 0x{:x}, file_size = 0x{:x}",
program_header.offset,
program_header.offset + program_header.file_size,
program_header.file_size
);
let file_offset = program_header.offset as usize;
let virtual_addr = program_header.virtual_addr as usize;
debug_assert!(file_offset % PAGE_SIZE == virtual_addr % PAGE_SIZE);
let child_vmo_offset = file_offset.align_down(PAGE_SIZE);
let map_start = (program_header.virtual_addr as usize).align_down(PAGE_SIZE);
let map_end = (program_header.virtual_addr as usize + program_header.mem_size as usize)
.align_up(PAGE_SIZE);
let segment_len = vmo_end - vmo_start;
let pager = Arc::new(ElfSegmentPager::new(elf_file, &program_header)) as Arc<dyn Pager>;
let vmo_alloc_options: VmoOptions<Full> = VmoOptions::new(segment_len).pager(pager);
Ok(vmo_alloc_options.alloc()?.to_dyn())
let vmo_size = map_end - map_start;
debug_assert!(vmo_size >= (program_header.file_size as usize).align_up(PAGE_SIZE));
let vnode = elf_file.vnode();
let page_cache_vmo = vnode.page_cache().ok_or(Error::with_message(
Errno::ENOENT,
"executable has no page cache",
))?;
let segment_vmo = page_cache_vmo
.new_cow_child(child_vmo_offset..child_vmo_offset + vmo_size)?
.alloc()?;
// Write zero as paddings. There are head padding and tail padding.
// Head padding: if the segment's virtual address is not page-aligned,
// then the bytes in first page from start to virtual address should be padded zeros.
// Tail padding: If the segment's mem_size is larger than file size,
// then the bytes that are not backed up by file content should be zeros.(usually .data/.bss sections).
// FIXME: Head padding may be removed.
// Head padding.
let page_offset = file_offset % PAGE_SIZE;
if page_offset != 0 {
let buffer = vec![0u8; page_offset];
segment_vmo.write_bytes(0, &buffer)?;
}
// Tail padding.
let tail_padding_offset = program_header.file_size as usize + page_offset;
if vmo_size > tail_padding_offset {
let buffer = vec![0u8; vmo_size - tail_padding_offset];
segment_vmo.write_bytes(tail_padding_offset, &buffer)?;
}
Ok(segment_vmo.to_dyn())
}
fn parse_segment_perm(flags: xmas_elf::program::Flags) -> Result<VmPerm> {

View File

@ -1,6 +1,5 @@
mod aux_vec;
mod elf_file;
mod elf_segment_pager;
mod init_stack;
mod load_elf;

View File

@ -1,9 +1,7 @@
pub mod elf;
mod shebang;
use crate::fs::file_handle::FileHandle;
use crate::fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD};
use crate::fs::utils::AccessMode;
use crate::prelude::*;
use crate::rights::Full;
use crate::vm::vmar::Vmar;
@ -34,12 +32,13 @@ pub fn load_program_to_root_vmar(
executable_path
};
let fs_path = FsPath::new(AT_FDCWD, &executable_path)?;
let abs_path = fs_resolver.lookup(&fs_path)?.abs_path();
let file = fs_resolver.open(&fs_path, AccessMode::O_RDONLY as u32, 0)?;
let elf_file = fs_resolver.lookup(&fs_path)?;
let abs_path = elf_file.abs_path();
let vnode = elf_file.vnode();
let file_header = {
// read the first page of file header
let mut file_header_buffer = Box::new([0u8; PAGE_SIZE]);
file.read(&mut *file_header_buffer)?;
vnode.read_at(0, &mut *file_header_buffer)?;
file_header_buffer
};
if let Some(mut new_argv) = parse_shebang_line(&*file_header)? {
@ -58,7 +57,8 @@ pub fn load_program_to_root_vmar(
);
}
let elf_file = Arc::new(FileHandle::new_inode_handle(file));
debug!("load executable, path = {}", executable_path);
load_elf_to_root_vmar(root_vmar, &*file_header, elf_file, fs_resolver, argv, envp)
let elf_load_info =
load_elf_to_root_vmar(root_vmar, &*file_header, elf_file, fs_resolver, argv, envp)?;
Ok((abs_path, elf_load_info))
}

View File

@ -38,7 +38,7 @@ pub fn sys_execve(
// destroy root vmars
let root_vmar = current.root_vmar();
root_vmar.clear()?;
current.user_vm().set_default();
current.user_vm().set_default()?;
// load elf content to new vm space
let fs_resolver = &*current.fs().read();
debug!("load program to root vmar");

View File

@ -107,7 +107,10 @@ fn mmap_filebacked_vmo(
let fs_resolver = current.fs().read();
let dentry = fs_resolver.lookup_from_fd(fd)?;
let vnode = dentry.vnode();
let page_cache_vmo = vnode.page_cache();
let page_cache_vmo = vnode.page_cache().ok_or(Error::with_message(
Errno::EBADF,
"File does not have page cache",
))?;
let vmo = if flags.contains(MMapFlags::MAP_PRIVATE) {
// map private

View File

@ -334,10 +334,10 @@ impl Vmo_ {
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
if let Some(parent_page_idx) = inner.inherited_pages.parent_page_idx(page_idx) {
// copy contents of parent to the frame
let mut tmp_buffer = [0u8; PAGE_SIZE];
let mut tmp_buffer = Box::new([0u8; PAGE_SIZE]);
let parent = self.parent.upgrade().unwrap();
parent.read_bytes(parent_page_idx * PAGE_SIZE, &mut tmp_buffer)?;
frames.write_bytes(0, &tmp_buffer)?;
parent.read_bytes(parent_page_idx * PAGE_SIZE, &mut *tmp_buffer)?;
frames.write_bytes(0, &*tmp_buffer)?;
} else {
frames.zero();
}

View File

@ -487,7 +487,12 @@ fn alloc_child_vmo_(
}
let parent_page_idx_offset = range.start / PAGE_SIZE;
let inherited_end = range.end.min(parent_vmo_size);
let inherited_end_page_idx = inherited_end / PAGE_SIZE + 1;
let cow_size = if inherited_end >= range.start {
inherited_end - range.start
} else {
0
};
let inherited_end_page_idx = cow_size / PAGE_SIZE;
let inherited_pages = InheritedPages::new(0..inherited_end_page_idx, parent_page_idx_offset);
let vmo_inner = VmoInner {
pager: None,