refactor current implementations with vmar and vmo

This commit is contained in:
Jianfeng Jiang
2022-12-07 19:22:37 +08:00
parent 52531c6cb6
commit 174fdc07ea
88 changed files with 1766 additions and 1362 deletions

View File

@ -2,5 +2,5 @@
We don't include the source code of busybox here since the source code is really large. The busybox can be compiled with following commands.
After download the source code of busybox 1.35.0 and unzip, then cd to the directory of busybox
1. ```make defconfig #set config to default```
2. change the line in .config, `#CONFIG_STATIC is not set` => `CONFIG_STATIC=y`. We need a static-compiled busybox
1. `make defconfig`. We set all config as default.
2. change the line in .config: `#CONFIG_STATIC is not set` => `CONFIG_STATIC=y`. We need a static-linked busybox binary since we does not support dynamic linking now.

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cea612414dc19fcd03b563607ea9a453a3d3390b9f3b229ef8e56b08e4d4c8c5
oid sha256:9bc1642390b9dc38ecc058240e529401c38aa8bb9a86bad3615e4bdad505fa8c
size 9592

View File

@ -6,6 +6,7 @@ use core::mem::MaybeUninit;
use crate::debug;
use crate::trap::{CalleeRegs, CallerRegs, SyscallFrame, TrapFrame};
use crate::x86_64_util::rdfsbase;
use pod::Pod;
/// Defines a CPU-local variable.
@ -156,7 +157,7 @@ impl From<TrapFrame> for CpuContext {
rip: trap.rip,
rflag: trap.rflags,
},
fs_base: 0,
fs_base: rdfsbase(),
fp_regs: FpRegs::default(),
trap_information: TrapInformation {
cr2: trap.cr2,

View File

@ -1,5 +1,6 @@
use super::{page_table::PageTable, *};
use crate::prelude::*;
use crate::vm::VmIo;
use crate::{
config::PAGE_SIZE,
mm::address::is_aligned,

View File

@ -7,5 +7,6 @@ pub(crate) use alloc::sync::Arc;
pub(crate) use alloc::vec::Vec;
pub(crate) use core::any::Any;
pub use crate::debug;
pub(crate) use crate::util::AlignExt;
pub use crate::vm::{Paddr, Vaddr};

View File

@ -38,7 +38,7 @@ __trap_entry:
push r12
push rbp
push rbx
mov rdi, 0
# mov rdi, 0
push rdi
save
# save cr2

View File

@ -245,7 +245,7 @@ impl VmAllocOptions {
}
}
#[derive(Debug, Clone)]
#[derive(Debug)]
/// A handle to a page frame.
///
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
@ -260,6 +260,14 @@ pub struct VmFrame {
pub(crate) physical_frame: Arc<PhysFrame>,
}
impl Clone for VmFrame {
fn clone(&self) -> Self {
Self {
physical_frame: self.physical_frame.clone(),
}
}
}
impl VmFrame {
/// Creates a new VmFrame.
///

View File

@ -22,15 +22,17 @@ use super::VmIo;
/// A newly-created `VmSpace` is not backed by any physical memory pages.
/// To provide memory pages for a `VmSpace`, one can allocate and map
/// physical memory (`VmFrames`) to the `VmSpace`.
#[derive(Debug, Clone)]
pub struct VmSpace {
memory_set: Mutex<MemorySet>,
memory_set: Arc<Mutex<MemorySet>>,
}
impl VmSpace {
/// Creates a new VM address space.
pub fn new() -> Self {
Self {
memory_set: Mutex::new(MemorySet::new()),
memory_set: Arc::new(Mutex::new(MemorySet::new())),
}
}
/// Activate the page table, load root physical address to cr3
@ -55,6 +57,7 @@ impl VmSpace {
if options.addr.is_none() {
return Err(Error::InvalidArgs);
}
// debug!("map to vm space: 0x{:x}", options.addr.unwrap());
self.memory_set.lock().map(MapArea::new(
VirtAddr(options.addr.unwrap()),
frames.len() * PAGE_SIZE,
@ -108,15 +111,6 @@ impl Default for VmSpace {
}
}
impl Clone for VmSpace {
fn clone(&self) -> Self {
let memory_set = self.memory_set.lock().clone();
VmSpace {
memory_set: Mutex::new(memory_set),
}
}
}
impl VmIo for VmSpace {
fn read_bytes(&self, vaddr: usize, buf: &mut [u8]) -> Result<()> {
self.memory_set.lock().read_bytes(vaddr, buf)
@ -133,8 +127,12 @@ impl VmIo for VmSpace {
pub struct VmMapOptions {
/// start virtual address
addr: Option<Vaddr>,
/// map align
align: usize,
/// permission
perm: VmPerm,
/// can overwrite
can_overwrite: bool,
}
impl VmMapOptions {
@ -142,7 +140,9 @@ impl VmMapOptions {
pub fn new() -> Self {
Self {
addr: None,
align: PAGE_SIZE,
perm: VmPerm::empty(),
can_overwrite: false,
}
}
@ -153,7 +153,8 @@ impl VmMapOptions {
///
/// The default value of this option is the page size.
pub fn align(&mut self, align: usize) -> &mut Self {
todo!()
self.align = align;
self
}
/// Sets the permissions of the mapping, which affects whether
@ -182,7 +183,8 @@ impl VmMapOptions {
///
/// The default value of this option is `false`.
pub fn can_overwrite(&mut self, can_overwrite: bool) -> &mut Self {
todo!()
self.can_overwrite = can_overwrite;
self
}
}

View File

@ -211,6 +211,20 @@ impl From<core::ffi::FromBytesWithNulError> for Error {
}
}
impl From<Error> for jinux_frame::Error {
fn from(error: Error) -> Self {
match error.errno {
Errno::EACCES => jinux_frame::Error::AccessDenied,
Errno::EIO => jinux_frame::Error::IoError,
Errno::ENOMEM => jinux_frame::Error::NoMemory,
Errno::EFAULT => jinux_frame::Error::PageFault,
Errno::EINVAL => jinux_frame::Error::InvalidArgs,
Errno::EBUSY => jinux_frame::Error::NotEnoughResources,
_ => jinux_frame::Error::InvalidArgs,
}
}
}
impl From<alloc::ffi::NulError> for Error {
fn from(_: alloc::ffi::NulError) -> Self {
Error::with_message(Errno::E2BIG, "Cannot find null in cstring")

View File

@ -18,7 +18,7 @@ macro_rules! define_fcntl_cmd {
fn try_from(value: i32) -> Result<Self> {
match value {
$($name => Ok(FcntlCmd::$name),)*
_ => return_errno!(Errno::EINVAL),
_ => return_errno_with_message!(Errno::EINVAL, "Unknown fcntl cmd"),
}
}
}

View File

@ -60,7 +60,9 @@ impl FileTable {
self.table.remove(&fd);
}
pub fn get_file(&self, fd: FileDescripter) -> Option<&Arc<dyn File>> {
self.table.get(&fd)
pub fn get_file(&self, fd: FileDescripter) -> Result<&Arc<dyn File>> {
self.table
.get(&fd)
.ok_or(Error::with_message(Errno::EBADF, "fd not exits"))
}
}

View File

@ -35,7 +35,6 @@ extern crate alloc;
pub mod driver;
pub mod error;
pub mod fs;
mod memory;
pub mod prelude;
mod process;
pub mod rights;

View File

@ -1,55 +0,0 @@
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use pod::Pod;
pub mod vm_page;
/// copy bytes from user space of current process. The bytes len is the len of dest.
pub fn read_bytes_from_user(src: Vaddr, dest: &mut [u8]) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to copy bytes from user",
))?;
vm_space.read_bytes(src, dest)?;
Ok(())
}
/// copy val (Plain of Data type) from user space of current process.
pub fn read_val_from_user<T: Pod>(src: Vaddr) -> Result<T> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to copy val from user",
))?;
Ok(vm_space.read_val(src)?)
}
/// write bytes from user space of current process. The bytes len is the len of src.
pub fn write_bytes_to_user(dest: Vaddr, src: &[u8]) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to write bytes to user",
))?;
vm_space.write_bytes(dest, src)?;
Ok(())
}
/// write val (Plain of Data type) to user space of current process.
pub fn write_val_to_user<T: Pod>(dest: Vaddr, val: &T) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to write val to user",
))?;
vm_space.write_val(dest, val)?;
Ok(())
}
/// read a cstring from user, the length of cstring should not exceed max_len(include null byte)
pub fn read_cstring_from_user(addr: Vaddr, max_len: usize) -> Result<CString> {
let mut buffer = vec![0u8; max_len];
read_bytes_from_user(addr, &mut buffer)?;
Ok(CString::from(CStr::from_bytes_until_nul(&buffer)?))
}

View File

@ -1,160 +0,0 @@
//! A Page in virtual address space
use crate::prelude::*;
use core::ops::Range;
use jinux_frame::vm::{VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace};
/// A set of **CONTINUOUS** virtual pages in VmSpace
pub struct VmPageRange {
start_page: VmPage,
end_page: VmPage,
}
impl VmPageRange {
/// create a set of pages containing virtual address range [a, b)
pub const fn new_range(vaddr_range: Range<Vaddr>) -> Self {
let start_page = VmPage::containing_address(vaddr_range.start);
let end_page = VmPage::containing_address(vaddr_range.end - 1);
Self {
start_page,
end_page,
}
}
pub const fn new_page_range(start_page: VmPage, end_page: VmPage) -> Self {
Self {
start_page,
end_page,
}
}
/// returns the page containing the specific vaddr
pub const fn containing_address(vaddr: Vaddr) -> Self {
let page = VmPage::containing_address(vaddr);
Self {
start_page: page,
end_page: page,
}
}
pub const fn start_address(&self) -> Vaddr {
self.start_page.start_address()
}
/// the address right after the end page
pub const fn end_address(&self) -> Vaddr {
self.end_page.start_address() + PAGE_SIZE
}
/// allocate a set of physical frames and map self to frames
pub fn map(&mut self, vm_space: &VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
self.map_to(vm_space, frames, vm_perm);
}
/// map self to a set of zeroed frames
pub fn map_zeroed(&self, vm_space: &VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
let buffer = vec![0u8; self.nbytes()];
self.map_to(vm_space, frames, vm_perm);
vm_space
.write_bytes(self.start_address(), &buffer)
.expect("write zero failed");
// frames.write_bytes(0, &buffer).expect("write zero failed");
}
/// map self to a set of frames
pub fn map_to(&self, vm_space: &VmSpace, frames: VmFrameVec, vm_perm: VmPerm) {
assert_eq!(self.len(), frames.len());
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(vm_perm);
vm_space.map(frames, &vm_map_options).expect("map failed");
}
pub fn unmap(&mut self, vm_space: &VmSpace) {
vm_space
.unmap(&(self.start_address()..self.end_address()))
.expect("unmap failed");
}
pub fn is_mapped(&self, vm_space: &VmSpace) -> bool {
todo!()
}
pub fn iter(&self) -> VmPageIter<'_> {
VmPageIter {
current: self.start_page,
page_range: self,
}
}
/// return the number of virtual pages
pub const fn len(&self) -> usize {
self.end_page.vpn - self.start_page.vpn + 1
}
pub const fn nbytes(&self) -> usize {
self.len() * PAGE_SIZE
}
}
pub struct VmPageIter<'a> {
current: VmPage,
page_range: &'a VmPageRange,
}
impl<'a> Iterator for VmPageIter<'a> {
type Item = VmPage;
fn next(&mut self) -> Option<Self::Item> {
let next_page = if self.current <= self.page_range.end_page {
Some(self.current)
} else {
None
};
self.current = self.current.next_page();
next_page
}
}
/// A Virtual Page
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct VmPage {
/// Virtual Page Number
vpn: usize,
}
impl VmPage {
pub const fn containing_address(vaddr: Vaddr) -> Self {
Self {
vpn: vaddr / PAGE_SIZE,
}
}
pub const fn start_address(&self) -> Vaddr {
self.vpn * PAGE_SIZE
}
pub const fn next_page(&self) -> VmPage {
VmPage { vpn: self.vpn + 1 }
}
/// Check whether current page is mapped
pub fn is_mapped(&self, vm_space: &VmSpace) -> bool {
vm_space.is_mapped(self.start_address())
}
pub fn map_page(&self, vm_space: &VmSpace, vm_perm: VmPerm) -> Result<()> {
let vm_alloc_option = VmAllocOptions::new(1);
let vm_frame = VmFrameVec::allocate(&vm_alloc_option)?;
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(vm_perm);
vm_space.map(vm_frame, &vm_map_options)?;
Ok(())
}
}

View File

@ -2,6 +2,7 @@
pub(crate) use alloc::boxed::Box;
pub(crate) use alloc::collections::BTreeMap;
pub(crate) use alloc::collections::BTreeSet;
pub(crate) use alloc::collections::LinkedList;
pub(crate) use alloc::collections::VecDeque;
pub(crate) use alloc::ffi::CString;

View File

@ -96,18 +96,9 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
let child_pid = new_pid();
let current = Process::current();
// child process vm space
// FIXME: COPY ON WRITE can be used here
let parent_vm_space = current
.vm_space()
.expect("User task should always have vm space");
let child_vm_space = parent_vm_space.clone();
debug_check_clone_vm_space(parent_vm_space, &child_vm_space);
let child_file_name = match current.filename() {
None => None,
Some(filename) => Some(filename.clone()),
};
// child process vmar
let parent_root_vmar = current.root_vmar().unwrap();
let child_root_vmar = current.root_vmar().unwrap().fork_vmar()?;
// child process user_vm
let child_user_vm = match current.user_vm() {
@ -115,18 +106,16 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
Some(user_vm) => Some(user_vm.clone()),
};
// child process cpu context
// child process user space
let mut child_cpu_context = parent_context.clone();
debug!("parent context: {:x?}", parent_context);
debug!("parent gp_regs: {:x?}", child_cpu_context.gp_regs);
child_cpu_context.gp_regs.rax = 0; // Set return value of child process
let child_vm_space = child_root_vmar.vm_space().clone();
let child_user_space = Arc::new(UserSpace::new(child_vm_space, child_cpu_context));
debug!("before spawn child task");
debug!("current pid: {}", current.pid());
debug!("child process pid: {}", child_pid);
debug!("rip = 0x{:x}", child_cpu_context.gp_regs.rip);
let child_file_name = match current.filename() {
None => None,
Some(filename) => Some(filename.clone()),
};
let child_file_table = current.file_table.lock().clone();
// inherit parent's sig disposition
@ -145,6 +134,7 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
child_file_name,
child_user_vm,
Some(child_user_space),
Some(child_root_vmar),
None,
child_file_table,
child_sig_dispositions,
@ -189,12 +179,11 @@ fn clone_child_clear_tid(child_process: &Arc<Process>) -> Result<()> {
}
fn clone_child_set_tid(child_process: &Arc<Process>, clone_args: CloneArgs) -> Result<()> {
debug!("clone child set tid");
let child_pid = child_process.pid();
let child_vm = child_process
.vm_space()
let child_vmar = child_process
.root_vmar()
.ok_or_else(|| Error::new(Errno::ECHILD))?;
child_vm.write_val(clone_args.child_tidptr, &child_pid)?;
child_vmar.write_val(clone_args.child_tidptr, &child_pid)?;
Ok(())
}

View File

@ -1,339 +0,0 @@
//! This module is used to parse elf file content to get elf_load_info.
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use crate::{
memory::vm_page::{VmPage, VmPageRange},
prelude::*,
};
use core::{cmp::Ordering, ops::Range};
use jinux_frame::vm::{VmAllocOptions, VmFrameVec, VmIo, VmPerm, VmSpace};
use xmas_elf::{
header,
program::{self, ProgramHeader, ProgramHeader64, SegmentData},
ElfFile,
};
use super::init_stack::InitStack;
pub struct ElfLoadInfo<'a> {
segments: Vec<ElfSegment<'a>>,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
}
pub struct ElfSegment<'a> {
range: Range<Vaddr>,
data: &'a [u8],
type_: program::Type,
vm_perm: VmPerm,
}
#[derive(Debug, Clone, Copy, Default)]
/// Info parsed from elf header. The entry point is used to set rip
/// The other info is used to set auxv vectors.
pub struct ElfHeaderInfo {
/// the entry point of the elf
pub entry_point: Vaddr,
/// page header table offset
pub ph_off: u64,
/// number of program headers
pub ph_num: u16,
/// The size of a program header
pub ph_ent: usize,
}
impl<'a> ElfSegment<'a> {
fn parse_elf_segment(segment: ProgramHeader<'a>, elf_file: &ElfFile<'a>) -> Result<Self> {
let start = segment.virtual_addr() as Vaddr;
let end = start + segment.mem_size() as Vaddr;
let type_ = match segment.get_type() {
Err(error_msg) => return_errno_with_message!(Errno::ENOEXEC, error_msg),
Ok(type_) => type_,
};
let data = read_segment_data(segment, elf_file)?;
let vm_perm = Self::parse_segment_perm(segment)?;
Ok(Self {
range: start..end,
type_,
data,
vm_perm,
})
}
pub fn parse_segment_perm(segment: ProgramHeader<'a>) -> Result<VmPerm> {
let flags = segment.flags();
if !flags.is_read() {
return_errno_with_message!(Errno::ENOEXEC, "unreadable segment");
}
let mut vm_perm = VmPerm::R;
if flags.is_write() {
vm_perm |= VmPerm::W;
}
if flags.is_execute() {
vm_perm |= VmPerm::X;
}
Ok(vm_perm)
}
pub fn is_loadable(&self) -> bool {
self.type_ == program::Type::Load
}
pub fn start_address(&self) -> Vaddr {
self.range.start
}
pub fn end_address(&self) -> Vaddr {
self.range.end
}
fn copy_and_map_segment(&self, vm_space: &VmSpace) -> Result<()> {
let start_address = self.start_address();
let page_mask = PAGE_SIZE - 1;
let segment_len = self.end_address() - self.start_address();
let data_len = self.data.len();
let zeroed_bytes = if segment_len > data_len {
vec![0u8; segment_len - data_len]
} else {
Vec::new()
};
// according to linux abi, the first page may be on same page with another segment.
// So at first, we will check whether the first page is mapped.
if vm_space.is_mapped(start_address) {
// The first page is mapped. This is the rare case.
let write_len_on_first_page =
(PAGE_SIZE - (start_address & page_mask)).min(self.data.len());
vm_space
.write_bytes(start_address, &self.data[..write_len_on_first_page])
.expect("Write first page failed");
let start_page = VmPage::containing_address(start_address).next_page();
let end_page = VmPage::containing_address(self.end_address());
if end_page >= start_page {
let vm_page_range = VmPageRange::new_page_range(start_page, end_page);
let page_num = vm_page_range.len();
let vm_alloc_options = VmAllocOptions::new(page_num);
let frames = VmFrameVec::allocate(&vm_alloc_options)?;
frames.write_bytes(0, &self.data[write_len_on_first_page..])?;
if zeroed_bytes.len() > 0 {
frames.write_bytes(data_len - write_len_on_first_page, &zeroed_bytes)?;
}
vm_page_range.map_to(vm_space, frames, self.vm_perm);
} else {
if zeroed_bytes.len() > 0 {
vm_space.write_bytes(start_address + data_len, &zeroed_bytes)?;
}
}
} else {
// The first page is not mapped. This is the common case.
let vm_page_range = VmPageRange::new_range(start_address..self.end_address());
let page_num = vm_page_range.len();
let vm_alloc_options = VmAllocOptions::new(page_num);
let frames = VmFrameVec::allocate(&vm_alloc_options)?;
let offset = start_address & page_mask;
// copy segment
frames.write_bytes(offset, &self.data)?;
// write zero bytes
if zeroed_bytes.len() > 0 {
let write_addr = offset + data_len;
frames.write_bytes(write_addr, &zeroed_bytes)?;
}
vm_page_range.map_to(vm_space, frames, self.vm_perm);
}
Ok(())
}
fn is_page_aligned(&self) -> bool {
self.start_address() % PAGE_SIZE == 0
}
}
impl<'a> ElfLoadInfo<'a> {
fn with_capacity(
capacity: usize,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
) -> Self {
Self {
segments: Vec::with_capacity(capacity),
init_stack,
elf_header_info,
}
}
fn add_segment(&mut self, elf_segment: ElfSegment<'a>) {
self.segments.push(elf_segment);
}
pub fn parse_elf_data(
elf_file_content: &'a [u8],
filename: CString,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Result<Self> {
let elf_file = match ElfFile::new(elf_file_content) {
Err(error_msg) => return_errno_with_message!(Errno::ENOEXEC, error_msg),
Ok(elf_file) => elf_file,
};
check_elf_header(&elf_file)?;
// parse elf header
let elf_header_info = ElfHeaderInfo::parse_elf_header(&elf_file);
// FIXME: only contains load segment?
let segments_count = elf_file.program_iter().count();
let init_stack = InitStack::new_default_config(filename, argv, envp);
let mut elf_load_info =
ElfLoadInfo::with_capacity(segments_count, init_stack, elf_header_info);
// parse each segemnt
for segment in elf_file.program_iter() {
let elf_segment = ElfSegment::parse_elf_segment(segment, &elf_file)?;
if elf_segment.is_loadable() {
elf_load_info.add_segment(elf_segment)
}
}
Ok(elf_load_info)
}
fn vm_page_range(&self) -> Result<VmPageRange> {
let elf_start_address = self
.segments
.iter()
.filter(|segment| segment.is_loadable())
.map(|segment| segment.start_address())
.min()
.unwrap();
let elf_end_address = self
.segments
.iter()
.filter(|segment| segment.is_loadable())
.map(|segment| segment.end_address())
.max()
.unwrap();
Ok(VmPageRange::new_range(elf_start_address..elf_end_address))
}
/// copy and map all segment
pub fn copy_and_map_segments(&self, vm_space: &VmSpace) -> Result<()> {
for segment in self.segments.iter() {
segment.copy_and_map_segment(vm_space)?;
}
Ok(())
}
pub fn init_stack(&mut self, vm_space: &VmSpace) {
self.init_stack
.init(vm_space, &self.elf_header_info)
.expect("Init User Stack failed");
}
/// This function will write the program header table to the initial stack top.
/// This function must be called after init process initial stack.
/// This infomation is used to set Auxv vectors.
pub fn write_program_header_table(&self, vm_space: &VmSpace, file_content: &[u8]) {
let write_len = PAGE_SIZE.min(file_content.len());
let write_content = &file_content[..write_len];
let write_addr = self.init_stack.init_stack_top() - PAGE_SIZE;
vm_space
.write_bytes(write_addr, write_content)
.expect("Write elf content failed");
}
pub fn entry_point(&self) -> u64 {
self.elf_header_info.entry_point as u64
}
pub fn user_stack_top(&self) -> u64 {
self.init_stack.user_stack_top() as u64
}
pub fn argc(&self) -> u64 {
self.init_stack.argc()
}
pub fn argv(&self) -> u64 {
self.init_stack.argv()
}
pub fn envc(&self) -> u64 {
self.init_stack.envc()
}
pub fn envp(&self) -> u64 {
self.init_stack.envp()
}
/// read content from vmspace to ensure elf data is correctly copied to user space
pub fn debug_check_map_result(&self, vm_space: &VmSpace) {
for segment in self.segments.iter() {
let start_address = segment.start_address();
let len = segment.data.len();
let mut read_buffer = vec![0; len];
vm_space
.read_bytes(start_address, &mut read_buffer)
.expect("read bytes failed");
let res = segment.data.cmp(&read_buffer);
assert_eq!(res, Ordering::Equal);
}
}
}
impl ElfHeaderInfo {
fn parse_elf_header(elf_file: &ElfFile) -> Self {
let entry_point = elf_file.header.pt2.entry_point() as Vaddr;
let ph_off = elf_file.header.pt2.ph_offset();
let ph_num = elf_file.header.pt2.ph_count();
let ph_ent = core::mem::size_of::<ProgramHeader64>();
ElfHeaderInfo {
entry_point,
ph_off,
ph_num,
ph_ent,
}
}
}
fn check_elf_header(elf_file: &ElfFile) -> Result<()> {
let elf_header = elf_file.header;
// 64bit
debug_assert_eq!(elf_header.pt1.class(), header::Class::SixtyFour);
if elf_header.pt1.class() != header::Class::SixtyFour {
return_errno!(Errno::ENOEXEC);
}
// little endian
debug_assert_eq!(elf_header.pt1.data(), header::Data::LittleEndian);
if elf_header.pt1.data() != header::Data::LittleEndian {
return_errno!(Errno::ENOEXEC);
}
// system V ABI
// debug_assert_eq!(elf_header.pt1.os_abi(), header::OsAbi::SystemV);
// if elf_header.pt1.os_abi() != header::OsAbi::SystemV {
// return Error::new(Errno::ENOEXEC);
// }
// x86_64 architecture
debug_assert_eq!(
elf_header.pt2.machine().as_machine(),
header::Machine::X86_64
);
if elf_header.pt2.machine().as_machine() != header::Machine::X86_64 {
return_errno!(Errno::ENOEXEC);
}
// Executable file
debug_assert_eq!(elf_header.pt2.type_().as_type(), header::Type::Executable);
if elf_header.pt2.type_().as_type() != header::Type::Executable {
return_errno!(Errno::ENOEXEC);
}
Ok(())
}
fn read_segment_data<'a>(segment: ProgramHeader<'a>, elf_file: &ElfFile<'a>) -> Result<&'a [u8]> {
match segment.get_data(&elf_file) {
Err(msg) => return_errno_with_message!(Errno::ENOEXEC, msg),
Ok(data) => match data {
SegmentData::Note64(_, data) | SegmentData::Undefined(data) => Ok(data),
_ => return_errno_with_message!(Errno::ENOEXEC, "Unkonwn segment data type"),
},
}
}

View File

@ -0,0 +1,141 @@
/// A wrapper of xmas_elf's elf parsing
use xmas_elf::{
header::{self, Header, HeaderPt1, HeaderPt2, HeaderPt2_, Machine_, Type_},
program::ProgramHeader64,
};
use crate::prelude::*;
pub struct Elf {
pub elf_header: ElfHeader,
pub program_headers: Vec<ProgramHeader64>,
}
impl Elf {
pub fn parse_elf(input: &[u8]) -> Result<Self> {
// first parse elf header
// The elf header is usually 64 bytes. pt1 is 16bytes and pt2 is 48 bytes.
// We require 128 bytes here is to keep consistency with linux implementations.
debug_assert!(input.len() >= 128);
let header =
xmas_elf::header::parse_header(input).map_err(|_| Error::new(Errno::ENOEXEC))?;
let elf_header = ElfHeader::parse_elf_header(header)?;
check_elf_header(&elf_header)?;
// than parse the program headers table
// FIXME: we should acquire enough pages before parse
let ph_offset = elf_header.pt2.ph_offset;
let ph_count = elf_header.pt2.ph_count;
let ph_entry_size = elf_header.pt2.ph_entry_size;
debug_assert!(
input.len() >= ph_offset as usize + ph_count as usize * ph_entry_size as usize
);
let mut program_headers = Vec::with_capacity(ph_count as usize);
for index in 0..ph_count {
let program_header = xmas_elf::program::parse_program_header(input, header, index)
.map_err(|_| Error::new(Errno::ENOEXEC))?;
let ph64 = match program_header {
xmas_elf::program::ProgramHeader::Ph64(ph64) => ph64.clone(),
xmas_elf::program::ProgramHeader::Ph32(_) => {
return_errno_with_message!(Errno::ENOEXEC, "Not 64 byte executable")
}
};
program_headers.push(ph64);
}
Ok(Self {
elf_header,
program_headers,
})
}
}
pub struct ElfHeader {
pub pt1: HeaderPt1,
pub pt2: HeaderPt2_64,
}
impl ElfHeader {
fn parse_elf_header(header: Header) -> Result<Self> {
let pt1 = header.pt1.clone();
let pt2 = match header.pt2 {
HeaderPt2::Header64(header_pt2) => {
let HeaderPt2_ {
type_,
machine,
version,
entry_point,
ph_offset,
sh_offset,
flags,
header_size,
ph_entry_size,
ph_count,
sh_entry_size,
sh_count,
sh_str_index,
} = header_pt2;
HeaderPt2_64 {
type_: *type_,
machine: *machine,
version: *version,
entry_point: *entry_point,
ph_offset: *ph_offset,
sh_offset: *sh_offset,
flags: *flags,
header_size: *header_size,
ph_entry_size: *ph_entry_size,
ph_count: *ph_count,
sh_entry_size: *sh_entry_size,
sh_count: *sh_count,
sh_str_index: *sh_str_index,
}
}
_ => return_errno_with_message!(Errno::ENOEXEC, "parse elf header failed"),
};
Ok(ElfHeader { pt1, pt2 })
}
}
pub struct HeaderPt2_64 {
pub type_: Type_,
pub machine: Machine_,
pub version: u32,
pub entry_point: u64,
pub ph_offset: u64,
pub sh_offset: u64,
pub flags: u32,
pub header_size: u16,
pub ph_entry_size: u16,
pub ph_count: u16,
pub sh_entry_size: u16,
pub sh_count: u16,
pub sh_str_index: u16,
}
fn check_elf_header(elf_header: &ElfHeader) -> Result<()> {
// 64bit
debug_assert_eq!(elf_header.pt1.class(), header::Class::SixtyFour);
if elf_header.pt1.class() != header::Class::SixtyFour {
return_errno_with_message!(Errno::ENOEXEC, "Not 64 byte executable");
}
// little endian
debug_assert_eq!(elf_header.pt1.data(), header::Data::LittleEndian);
if elf_header.pt1.data() != header::Data::LittleEndian {
return_errno_with_message!(Errno::ENOEXEC, "Not little endian executable");
}
// system V ABI
// debug_assert_eq!(elf_header.pt1.os_abi(), header::OsAbi::SystemV);
// if elf_header.pt1.os_abi() != header::OsAbi::SystemV {
// return Error::new(Errno::ENOEXEC);
// }
// x86_64 architecture
debug_assert_eq!(elf_header.pt2.machine.as_machine(), header::Machine::X86_64);
if elf_header.pt2.machine.as_machine() != header::Machine::X86_64 {
return_errno_with_message!(Errno::ENOEXEC, "Not x86_64 executable");
}
// Executable file
debug_assert_eq!(elf_header.pt2.type_.as_type(), header::Type::Executable);
if elf_header.pt2.type_.as_type() != header::Type::Executable {
return_errno_with_message!(Errno::ENOEXEC, "Not executable file");
}
Ok(())
}

View File

@ -0,0 +1,69 @@
use crate::prelude::*;
use crate::vm::vmar::{get_intersected_range, is_intersected};
use jinux_frame::vm::{VmAllocOptions, VmFrameVec, VmIo};
use jinux_frame::AlignExt;
use crate::vm::vmo::Pager;
use super::load_elf::ElfSegment;
/// The pager behind a elf segment
pub struct ElfSegmentPager {
/// The pager size
pager_size: usize,
/// data for current segment
segment_data: &'static [u8],
/// The offset for the segment data.
/// The pager always starts at page-align address, while the segment data may start at any address.
/// So the offset will be the segment data start address % PAGE_SIZE
offset: usize,
}
impl ElfSegmentPager {
pub fn new(elf_file_content: &'static [u8], elf_segment: &ElfSegment) -> Self {
let start = elf_segment.start_address().align_down(PAGE_SIZE);
let end = elf_segment.end_address().align_up(PAGE_SIZE);
let pager_size = end - start;
let offset = elf_segment.start_address() % PAGE_SIZE;
let elf_file_segment =
&elf_file_content[elf_segment.offset..elf_segment.offset + elf_segment.file_size];
Self {
pager_size,
segment_data: elf_file_segment,
offset,
}
}
}
impl Pager for ElfSegmentPager {
fn commit_page(&self, offset: usize) -> Result<jinux_frame::vm::VmFrame> {
if offset >= self.pager_size {
return_errno_with_message!(Errno::EINVAL, "offset exceeds pager size");
}
let vm_alloc_option = VmAllocOptions::new(1);
let mut vm_frames = VmFrameVec::allocate(&vm_alloc_option)?;
vm_frames.zero();
let page_start = offset.align_down(PAGE_SIZE);
let page_end = page_start + PAGE_SIZE;
let page_range = page_start..page_end;
let data_range = self.offset..self.offset + self.segment_data.len();
if is_intersected(&page_range, &data_range) {
let intersected_range = get_intersected_range(&page_range, &data_range);
let data_write_range =
(intersected_range.start - self.offset)..(intersected_range.end - self.offset);
let write_content = &self.segment_data[data_write_range];
let write_offset = intersected_range.start % PAGE_SIZE;
vm_frames.write_bytes(write_offset, write_content)?;
}
let vm_frame = vm_frames.pop().unwrap();
Ok(vm_frame)
}
fn update_page(&self, offset: usize) -> Result<()> {
unimplemented!()
}
fn decommit_page(&self, offset: usize) -> Result<()> {
unimplemented!()
}
}

View File

@ -2,15 +2,21 @@
//! The process initial stack, contains arguments, environmental variables and auxiliary vectors
//! The data layout of init stack can be seen in Figure 3.9 in https://uclibc.org/docs/psABI-x86_64.pdf
use crate::{memory::vm_page::VmPageRange, prelude::*};
use crate::rights::Rights;
use crate::vm::perms::VmPerms;
use crate::{
prelude::*,
rights::Full,
vm::{vmar::Vmar, vmo::VmoOptions},
};
use core::mem;
use jinux_frame::{
vm::{VmIo, VmPerm, VmSpace},
vm::{VmIo, VmPerm},
AlignExt,
};
use super::aux_vec::{AuxKey, AuxVec};
use super::elf::ElfHeaderInfo;
use super::load_elf::ElfHeaderInfo;
pub const INIT_STACK_BASE: Vaddr = 0x0000_0000_2000_0000;
pub const INIT_STACK_SIZE: usize = 0x1000 * 16; // 64KB
@ -90,10 +96,9 @@ impl InitStack {
}
/// This function only work for first process
pub fn new_default_config(filename: CString, argv: Vec<CString>, envp: Vec<CString>) -> Self {
pub fn new_default_config(argv: Vec<CString>, envp: Vec<CString>) -> Self {
let init_stack_top = INIT_STACK_BASE - PAGE_SIZE;
let init_stack_size = INIT_STACK_SIZE;
// InitStack::new(filename, init_stack_top, init_stack_size, argv, envp)
InitStack::new(init_stack_top, init_stack_size, argv, envp)
}
@ -110,18 +115,28 @@ impl InitStack {
self.init_stack_top - self.init_stack_size
}
pub fn init(&mut self, vm_space: &VmSpace, elf_header_info: &ElfHeaderInfo) -> Result<()> {
self.map_and_zeroed(vm_space);
self.write_zero_page(vm_space); // This page is used to store page header table
self.write_stack_content(vm_space, elf_header_info)?;
self.debug_print_stack_content(vm_space);
pub fn init(
&mut self,
root_vmar: &Vmar<Full>,
elf_header_info: &ElfHeaderInfo,
ph_addr: Vaddr,
) -> Result<()> {
self.map_and_zeroed(root_vmar)?;
self.write_stack_content(root_vmar, elf_header_info, ph_addr)?;
self.debug_print_stack_content(root_vmar);
Ok(())
}
fn map_and_zeroed(&self, vm_space: &VmSpace) {
let vm_page_range = VmPageRange::new_range(self.user_stack_bottom()..self.user_stack_top());
let vm_perm = InitStack::perm();
vm_page_range.map_zeroed(vm_space, vm_perm);
fn map_and_zeroed(&self, root_vmar: &Vmar<Full>) -> Result<()> {
let vmo_options = VmoOptions::<Rights>::new(self.init_stack_size);
let vmo = vmo_options.alloc()?;
vmo.clear(0..vmo.size())?;
let perms = VmPerms::READ | VmPerms::WRITE;
let vmar_map_options = root_vmar
.new_map(vmo, perms)?
.offset(self.user_stack_bottom());
vmar_map_options.build().unwrap();
Ok(())
}
/// Libc ABI requires 16-byte alignment of the stack entrypoint.
@ -129,60 +144,54 @@ impl InitStack {
/// to meet the requirement if necessary.
fn adjust_stack_alignment(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
envp_pointers: &Vec<u64>,
argv_pointers: &Vec<u64>,
) -> Result<()> {
// ensure 8-byte alignment
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
let auxvec_size = (self.aux_vec.table().len() + 1) * (mem::size_of::<u64>() * 2);
let envp_pointers_size = (envp_pointers.len() + 1) * mem::size_of::<u64>();
let argv_pointers_size = (argv_pointers.len() + 1) * mem::size_of::<u64>();
let argc_size = mem::size_of::<u64>();
let to_write_size = auxvec_size + envp_pointers_size + argv_pointers_size + argc_size;
if (self.pos - to_write_size) % 16 != 0 {
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
}
Ok(())
}
fn write_zero_page(&mut self, vm_space: &VmSpace) {
self.pos -= PAGE_SIZE;
}
fn write_stack_content(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
elf_header_info: &ElfHeaderInfo,
ph_addr: Vaddr,
) -> Result<()> {
// write envp string
let envp_pointers = self.write_envp_strings(vm_space)?;
let envp_pointers = self.write_envp_strings(root_vmar)?;
// write argv string
let argv_pointers = self.write_argv_strings(vm_space)?;
let argv_pointers = self.write_argv_strings(root_vmar)?;
// write random value
let random_value = generate_random_for_aux_vec();
let random_value_pointer = self.write_bytes(&random_value, vm_space)?;
let random_value_pointer = self.write_bytes(&random_value, root_vmar)?;
self.aux_vec.set(AuxKey::AT_RANDOM, random_value_pointer)?;
self.aux_vec.set(AuxKey::AT_PAGESZ, PAGE_SIZE as _)?;
self.aux_vec.set(
AuxKey::AT_PHDR,
self.init_stack_top as u64 - PAGE_SIZE as u64 + elf_header_info.ph_off,
)?;
self.aux_vec.set(AuxKey::AT_PHDR, ph_addr as u64)?;
self.aux_vec
.set(AuxKey::AT_PHNUM, elf_header_info.ph_num as u64)?;
self.aux_vec
.set(AuxKey::AT_PHENT, elf_header_info.ph_ent as u64)?;
self.adjust_stack_alignment(vm_space, &envp_pointers, &argv_pointers)?;
self.write_aux_vec(vm_space)?;
self.write_envp_pointers(vm_space, envp_pointers)?;
self.write_argv_pointers(vm_space, argv_pointers)?;
self.adjust_stack_alignment(root_vmar, &envp_pointers, &argv_pointers)?;
self.write_aux_vec(root_vmar)?;
self.write_envp_pointers(root_vmar, envp_pointers)?;
self.write_argv_pointers(root_vmar, argv_pointers)?;
// write argc
let argc = self.argc();
self.write_u64(argc, vm_space)?;
self.write_u64(argc, root_vmar)?;
Ok(())
}
fn write_envp_strings(&mut self, vm_space: &VmSpace) -> Result<Vec<u64>> {
fn write_envp_strings(&mut self, root_vmar: &Vmar<Full>) -> Result<Vec<u64>> {
let envp = self
.envp
.iter()
@ -190,13 +199,13 @@ impl InitStack {
.collect::<Vec<_>>();
let mut envp_pointers = Vec::with_capacity(envp.len());
for envp in envp.iter() {
let pointer = self.write_cstring(envp, vm_space)?;
let pointer = self.write_cstring(envp, root_vmar)?;
envp_pointers.push(pointer);
}
Ok(envp_pointers)
}
fn write_argv_strings(&mut self, vm_space: &VmSpace) -> Result<Vec<u64>> {
fn write_argv_strings(&mut self, root_vmar: &Vmar<Full>) -> Result<Vec<u64>> {
let argv = self
.argv
.iter()
@ -204,17 +213,17 @@ impl InitStack {
.collect::<Vec<_>>();
let mut argv_pointers = Vec::with_capacity(argv.len());
for argv in argv.iter().rev() {
let pointer = self.write_cstring(argv, vm_space)?;
let pointer = self.write_cstring(argv, root_vmar)?;
argv_pointers.push(pointer);
}
argv_pointers.reverse();
Ok(argv_pointers)
}
fn write_aux_vec(&mut self, vm_space: &VmSpace) -> Result<()> {
fn write_aux_vec(&mut self, root_vmar: &Vmar<Full>) -> Result<()> {
// Write NULL auxilary
self.write_u64(0, vm_space)?;
self.write_u64(AuxKey::AT_NULL as u64, vm_space)?;
self.write_u64(0, root_vmar)?;
self.write_u64(AuxKey::AT_NULL as u64, root_vmar)?;
// Write Auxiliary vectors
let aux_vec: Vec<_> = self
.aux_vec
@ -223,38 +232,38 @@ impl InitStack {
.map(|(aux_key, aux_value)| (*aux_key, *aux_value))
.collect();
for (aux_key, aux_value) in aux_vec.iter() {
self.write_u64(*aux_value, vm_space)?;
self.write_u64(*aux_key as u64, vm_space)?;
self.write_u64(*aux_value, root_vmar)?;
self.write_u64(*aux_key as u64, root_vmar)?;
}
Ok(())
}
fn write_envp_pointers(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
mut envp_pointers: Vec<u64>,
) -> Result<()> {
// write NULL pointer
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
// write envp pointers
envp_pointers.reverse();
for envp_pointer in envp_pointers {
self.write_u64(envp_pointer, vm_space)?;
self.write_u64(envp_pointer, root_vmar)?;
}
Ok(())
}
fn write_argv_pointers(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
mut argv_pointers: Vec<u64>,
) -> Result<()> {
// write 0
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
// write argv pointers
argv_pointers.reverse();
for argv_pointer in argv_pointers {
self.write_u64(argv_pointer, vm_space)?;
self.write_u64(argv_pointer, root_vmar)?;
}
Ok(())
}
@ -286,35 +295,35 @@ impl InitStack {
}
/// returns the u64 start address
fn write_u64(&mut self, val: u64, vm_space: &VmSpace) -> Result<u64> {
fn write_u64(&mut self, val: u64, root_vmar: &Vmar<Full>) -> Result<u64> {
let start_address = (self.pos - 8).align_down(8);
self.pos = start_address;
vm_space.write_val(start_address, &val)?;
root_vmar.write_val(start_address, &val)?;
Ok(self.pos as u64)
}
fn write_bytes(&mut self, bytes: &[u8], vm_space: &VmSpace) -> Result<u64> {
fn write_bytes(&mut self, bytes: &[u8], root_vmar: &Vmar<Full>) -> Result<u64> {
let len = bytes.len();
self.pos -= len;
vm_space.write_bytes(self.pos, bytes)?;
root_vmar.write_bytes(self.pos, bytes)?;
Ok(self.pos as u64)
}
/// returns the string start address
/// cstring will with end null byte.
fn write_cstring(&mut self, val: &CString, vm_space: &VmSpace) -> Result<u64> {
fn write_cstring(&mut self, val: &CString, root_vmar: &Vmar<Full>) -> Result<u64> {
let bytes = val.as_bytes_with_nul();
self.write_bytes(bytes, vm_space)
self.write_bytes(bytes, root_vmar)
}
pub const fn perm() -> VmPerm {
VmPerm::RWU
}
fn debug_print_stack_content(&self, vm_space: &VmSpace) {
fn debug_print_stack_content(&self, root_vmar: &Vmar<Full>) {
debug!("print stack content:");
let stack_top = self.user_stack_top();
let argc = vm_space.read_val::<u64>(stack_top).unwrap();
let argc = root_vmar.read_val::<u64>(stack_top).unwrap();
debug!("argc = {}", argc);
}
}

View File

@ -0,0 +1,250 @@
//! This module is used to parse elf file content to get elf_load_info.
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use crate::vm::perms::VmPerms;
use crate::vm::vmo::VmoRightsOp;
use crate::{
prelude::*,
rights::Full,
vm::{
vmar::Vmar,
vmo::{Pager, Vmo, VmoOptions},
},
};
use jinux_frame::vm::VmPerm;
use jinux_frame::AlignExt;
use xmas_elf::program::{self, ProgramHeader64};
use super::elf_file::Elf;
use super::elf_segment_pager::ElfSegmentPager;
use super::init_stack::InitStack;
pub struct ElfLoadInfo {
segments: Vec<ElfSegment>,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
}
pub struct ElfSegment {
/// The virtual addr where to put the segment.
pub virtual_addr: Vaddr,
/// The segment's size in memory, in bytes.
pub mem_size: usize,
/// The segment's offset in origin elf file
pub offset: usize,
/// The size the segment has in origin elf file, in bytes
pub file_size: usize,
type_: program::Type,
vm_perm: VmPerm,
}
#[derive(Debug, Clone, Copy, Default)]
/// Info parsed from elf header. The entry point is used to set rip
/// The other info is used to set auxv vectors.
pub struct ElfHeaderInfo {
/// the entry point of the elf
pub entry_point: Vaddr,
/// page header table offset
pub ph_off: u64,
/// number of program headers
pub ph_num: u16,
/// The size of a program header
pub ph_ent: u16,
}
impl ElfSegment {
fn parse_elf_segment(program_header: ProgramHeader64) -> Result<Self> {
let start = program_header.virtual_addr as Vaddr;
let end = start + program_header.mem_size as Vaddr;
let type_ = program_header
.get_type()
.map_err(|_| Error::new(Errno::ENOEXEC))?;
let vm_perm = Self::parse_segment_perm(program_header.flags)?;
Ok(Self {
virtual_addr: program_header.virtual_addr as _,
mem_size: program_header.mem_size as usize,
offset: program_header.offset as usize,
file_size: program_header.file_size as usize,
type_,
vm_perm,
})
}
pub fn parse_segment_perm(flags: xmas_elf::program::Flags) -> Result<VmPerm> {
if !flags.is_read() {
return_errno_with_message!(Errno::ENOEXEC, "unreadable segment");
}
let mut vm_perm = VmPerm::R;
if flags.is_write() {
vm_perm |= VmPerm::W;
}
if flags.is_execute() {
vm_perm |= VmPerm::X;
}
Ok(vm_perm)
}
fn contains_program_headers_table(&self, ph_offset: usize) -> bool {
// program headers table is at ph_offset of elf file
self.offset <= ph_offset && ph_offset < self.offset + self.file_size
}
/// If this segment contains ph table, then returns the ph table addr
/// Otherwise, returns None
pub fn program_headers_table_addr(&self, ph_offset: usize) -> Option<Vaddr> {
if self.contains_program_headers_table(ph_offset) {
Some(ph_offset - self.offset + self.virtual_addr)
} else {
None
}
}
pub fn is_loadable(&self) -> bool {
self.type_ == program::Type::Load
}
pub fn start_address(&self) -> Vaddr {
self.virtual_addr
}
pub fn end_address(&self) -> Vaddr {
self.virtual_addr + self.mem_size
}
pub fn init_segment_vmo(&self, elf_file_content: &'static [u8]) -> Vmo<Full> {
let vmo_start = self.start_address().align_down(PAGE_SIZE);
let vmo_end = self.end_address().align_up(PAGE_SIZE);
let segment_len = vmo_end - vmo_start;
let pager = Arc::new(ElfSegmentPager::new(elf_file_content, self)) as Arc<dyn Pager>;
let vmo_alloc_options: VmoOptions<Full> = VmoOptions::new(segment_len).pager(pager);
vmo_alloc_options.alloc().unwrap()
}
// create vmo for each segment and map the segment to root_vmar
fn map_segment_vmo(
&self,
root_vmar: &Vmar<Full>,
elf_file_content: &'static [u8],
) -> Result<()> {
let vmo = self.init_segment_vmo(elf_file_content).to_dyn();
let perms = VmPerms::from(self.vm_perm);
// The segment may not be aligned to page
let offset = self.start_address().align_down(PAGE_SIZE);
let vm_map_options = root_vmar.new_map(vmo, perms)?.offset(offset);
let map_addr = vm_map_options.build()?;
Ok(())
}
}
impl ElfLoadInfo {
fn with_capacity(
capacity: usize,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
) -> Self {
Self {
segments: Vec::with_capacity(capacity),
init_stack,
elf_header_info,
}
}
fn add_segment(&mut self, elf_segment: ElfSegment) {
self.segments.push(elf_segment);
}
pub fn parse_elf_data(
elf_file_content: &'static [u8],
argv: Vec<CString>,
envp: Vec<CString>,
) -> Result<Self> {
let elf_file = Elf::parse_elf(elf_file_content)?;
// parse elf header
let elf_header_info = ElfHeaderInfo::parse_elf_header(&elf_file);
// FIXME: only contains load segment?
let ph_count = elf_file.program_headers.len();
let init_stack = InitStack::new_default_config(argv, envp);
let mut elf_load_info = ElfLoadInfo::with_capacity(ph_count, init_stack, elf_header_info);
// parse each segemnt
for program_header in elf_file.program_headers {
let elf_segment = ElfSegment::parse_elf_segment(program_header)?;
if elf_segment.is_loadable() {
elf_load_info.add_segment(elf_segment)
}
}
Ok(elf_load_info)
}
/// init vmo for each segment and then map segment to root vmar
pub fn map_segment_vmos(
&self,
root_vmar: &Vmar<Full>,
elf_file_content: &'static [u8],
) -> Result<()> {
for segment in &self.segments {
segment.map_segment_vmo(root_vmar, elf_file_content)?;
}
Ok(())
}
pub fn init_stack(&mut self, root_vmar: &Vmar<Full>, file_content: &[u8]) -> Result<()> {
let ph_addr = self.program_headers_table_addr()?;
self.init_stack
.init(root_vmar, &self.elf_header_info, ph_addr)?;
Ok(())
}
fn program_headers_table_addr(&self) -> Result<Vaddr> {
let ph_offset = self.elf_header_info.ph_off as usize;
for segment in &self.segments {
if let Some(ph_addr) = segment.program_headers_table_addr(ph_offset) {
return Ok(ph_addr);
}
}
return_errno_with_message!(
Errno::ENOEXEC,
"can not find program header table address in elf"
);
}
pub fn entry_point(&self) -> u64 {
self.elf_header_info.entry_point as u64
}
pub fn user_stack_top(&self) -> u64 {
self.init_stack.user_stack_top() as u64
}
pub fn argc(&self) -> u64 {
self.init_stack.argc()
}
pub fn argv(&self) -> u64 {
self.init_stack.argv()
}
pub fn envc(&self) -> u64 {
self.init_stack.envc()
}
pub fn envp(&self) -> u64 {
self.init_stack.envp()
}
}
impl ElfHeaderInfo {
fn parse_elf_header(elf_file: &Elf) -> Self {
let entry_point = elf_file.elf_header.pt2.entry_point as Vaddr;
let ph_off = elf_file.elf_header.pt2.ph_offset;
let ph_num = elf_file.elf_header.pt2.ph_count;
let ph_ent = elf_file.elf_header.pt2.ph_entry_size;
ElfHeaderInfo {
entry_point,
ph_off,
ph_num,
ph_ent,
}
}
}

View File

@ -1,29 +1,26 @@
pub mod aux_vec;
pub mod elf;
pub mod elf_file;
pub mod elf_segment_pager;
pub mod init_stack;
pub mod load_elf;
use jinux_frame::vm::VmSpace;
use self::load_elf::ElfLoadInfo;
use crate::{prelude::*, rights::Full, vm::vmar::Vmar};
use self::elf::ElfLoadInfo;
use crate::prelude::*;
/// load elf to a given vm_space. this function will
/// load elf to the root vmar. this function will
/// 1. read the vaddr of each segment to get all elf pages.
/// 2. allocate physical frames and copy elf data to these frames
/// 3. map frames to the correct vaddr
/// 4. (allocate frams and) map the user stack
pub fn load_elf_to_vm_space<'a>(
/// 2. create a vmo for each elf segment, create a backup pager for each segment. Then map the vmo to the root vmar.
/// 3. write proper content to the init stack.
pub fn load_elf_to_root_vmar(
filename: CString,
elf_file_content: &'a [u8],
vm_space: &VmSpace,
elf_file_content: &'static [u8],
root_vmar: &Vmar<Full>,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Result<ElfLoadInfo<'a>> {
let mut elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content, filename, argv, envp)?;
elf_load_info.copy_and_map_segments(vm_space)?;
elf_load_info.debug_check_map_result(vm_space);
elf_load_info.init_stack(vm_space);
elf_load_info.write_program_header_table(vm_space, elf_file_content);
) -> Result<ElfLoadInfo> {
let mut elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content, argv, envp)?;
elf_load_info.map_segment_vmos(root_vmar, elf_file_content)?;
elf_load_info.init_stack(root_vmar, elf_file_content)?;
debug!("load elf succeeds.");
Ok(elf_load_info)

View File

@ -1,14 +1,19 @@
use jinux_frame::{
cpu::{CpuContext, TrapInformation},
trap::PAGE_FAULT,
trap::*,
vm::VmIo,
};
use crate::vm::page_fault_handler::PageFaultHandler;
use crate::{prelude::*, process::signal::signals::fault::FaultSignal};
/// We can't handle most exceptions, just send self a fault signal before return to user space.
pub fn handle_exception(context: &mut CpuContext) {
let trap_info = context.trap_information.clone();
debug!("trap info = {:x?}", trap_info);
log_trap_info(&trap_info);
let current = current!();
let root_vmar = current.root_vmar().unwrap();
match trap_info.id {
PAGE_FAULT => handle_page_fault(&trap_info),
_ => {
@ -21,12 +26,28 @@ pub fn handle_exception(context: &mut CpuContext) {
fn handle_page_fault(trap_info: &TrapInformation) {
const PAGE_NOT_PRESENT_ERROR_MASK: u64 = 0x1 << 0;
const WRITE_ACCESS_MASK: u64 = 0x1 << 1;
if trap_info.err & PAGE_NOT_PRESENT_ERROR_MASK == 0 {
// TODO: If page is not present, we should ask the vmar try to commit this page
generate_fault_signal(trap_info)
let not_present = trap_info.err & PAGE_NOT_PRESENT_ERROR_MASK == 0;
let write = trap_info.err & WRITE_ACCESS_MASK != 0;
if not_present || write {
// If page is not present or due to write access, we should ask the vmar try to commit this page
let current = current!();
let root_vmar = current.root_vmar().unwrap();
let page_fault_addr = trap_info.cr2 as Vaddr;
debug!(
"Page fault address: 0x{:x}, write access: {}",
page_fault_addr, write
);
if let Err(_) = root_vmar.handle_page_fault(page_fault_addr, not_present, write) {
generate_fault_signal(trap_info);
} else {
// Otherwise, the page fault is caused by page protection error.
generate_fault_signal(trap_info)
// ensure page fault is successfully handled
// FIXME: this check can be removed
let vm_space = root_vmar.vm_space();
let _: u8 = vm_space.read_val(page_fault_addr).unwrap();
}
} else {
// Otherwise, the page fault cannot be handled
generate_fault_signal(trap_info);
}
}
@ -36,3 +57,58 @@ fn generate_fault_signal(trap_info: &TrapInformation) {
let signal = Box::new(FaultSignal::new(trap_info));
current.sig_queues().lock().enqueue(signal);
}
macro_rules! log_trap_common {
($exception_name: ident, $trap_info: ident) => {
debug!(
"[Trap][{}][err = {}]",
stringify!($exception_name),
$trap_info.err
)
};
}
fn log_trap_info(trap_info: &TrapInformation) {
match trap_info.id {
DIVIDE_BY_ZERO => log_trap_common!(DIVIDE_BY_ZERO, trap_info),
DEBUG => log_trap_common!(DEBUG, trap_info),
NON_MASKABLE_INTERRUPT => log_trap_common!(NON_MASKABLE_INTERRUPT, trap_info),
BREAKPOINT => log_trap_common!(BREAKPOINT, trap_info),
OVERFLOW => log_trap_common!(OVERFLOW, trap_info),
BOUND_RANGE_EXCEEDED => log_trap_common!(BOUND_RANGE_EXCEEDED, trap_info),
INVALID_OPCODE => log_trap_common!(INVALID_OPCODE, trap_info),
DEVICE_NOT_AVAILABLE => log_trap_common!(DEVICE_NOT_AVAILABLE, trap_info),
DOUBLE_FAULT => log_trap_common!(DOUBLE_FAULT, trap_info),
COPROCESSOR_SEGMENT_OVERRUN => log_trap_common!(COPROCESSOR_SEGMENT_OVERRUN, trap_info),
INVAILD_TSS => log_trap_common!(INVAILD_TSS, trap_info),
SEGMENT_NOT_PRESENT => log_trap_common!(SEGMENT_NOT_PRESENT, trap_info),
STACK_SEGMENT_FAULT => log_trap_common!(STACK_SEGMENT_FAULT, trap_info),
GENERAL_PROTECTION_FAULT => log_trap_common!(GENERAL_PROTECTION_FAULT, trap_info),
PAGE_FAULT => {
debug!(
"[Trap][{}][page fault addr = 0x{:x}, err = {}]",
stringify!(PAGE_FAULT),
trap_info.cr2,
trap_info.err
);
}
// 15 reserved
X87_FLOATING_POINT_EXCEPTION => log_trap_common!(X87_FLOATING_POINT_EXCEPTION, trap_info),
ALIGNMENT_CHECK => log_trap_common!(ALIGNMENT_CHECK, trap_info),
MACHINE_CHECK => log_trap_common!(MACHINE_CHECK, trap_info),
SIMD_FLOATING_POINT_EXCEPTION => log_trap_common!(SIMD_FLOATING_POINT_EXCEPTION, trap_info),
VIRTUALIZATION_EXCEPTION => log_trap_common!(VIRTUALIZATION_EXCEPTION, trap_info),
CONTROL_PROTECTION_EXCEPTION => log_trap_common!(CONTROL_PROTECTION_EXCEPTION, trap_info),
HYPERVISOR_INJECTION_EXCEPTION => {
log_trap_common!(HYPERVISOR_INJECTION_EXCEPTION, trap_info)
}
VMM_COMMUNICATION_EXCEPTION => log_trap_common!(VMM_COMMUNICATION_EXCEPTION, trap_info),
SECURITY_EXCEPTION => log_trap_common!(SECURITY_EXCEPTION, trap_info),
_ => {
info!(
"[Trap][Unknown trap type][id = {}, err = {}]",
trap_info.id, trap_info.err
);
}
}
}

View File

@ -2,7 +2,6 @@ use core::sync::atomic::{AtomicI32, Ordering};
use self::name::ProcessName;
use self::process_group::ProcessGroup;
use self::process_vm::mmap_area::MmapArea;
use self::process_vm::user_heap::UserHeap;
use self::process_vm::UserVm;
use self::signal::constants::SIGCHLD;
@ -14,9 +13,11 @@ use self::status::ProcessStatus;
use self::task::create_user_task_from_elf;
use crate::fs::file_table::FileTable;
use crate::prelude::*;
use crate::rights::Full;
use crate::tty::get_console;
use crate::vm::vmar::Vmar;
use jinux_frame::sync::WaitQueue;
use jinux_frame::{task::Task, user::UserSpace, vm::VmSpace};
use jinux_frame::{task::Task, user::UserSpace};
pub mod clone;
pub mod elf;
@ -47,6 +48,7 @@ pub struct Process {
filename: Option<CString>,
user_space: Option<Arc<UserSpace>>,
user_vm: Option<UserVm>,
root_vmar: Option<Vmar<Full>>,
/// wait for child status changed
waiting_children: WaitQueue,
/// wait for io events
@ -97,6 +99,7 @@ impl Process {
exec_filename: Option<CString>,
user_vm: Option<UserVm>,
user_space: Option<Arc<UserSpace>>,
root_vmar: Option<Vmar<Full>>,
process_group: Option<Weak<ProcessGroup>>,
file_table: FileTable,
sig_dispositions: SigDispositions,
@ -104,10 +107,8 @@ impl Process {
sig_mask: SigMask,
) -> Self {
let parent = if pid == 0 {
debug!("Init process does not has parent");
None
} else {
debug!("All process except init should have parent");
let current_process = current!();
Some(Arc::downgrade(&current_process))
};
@ -125,6 +126,7 @@ impl Process {
filename: exec_filename,
user_space,
user_vm,
root_vmar,
waiting_children,
poll_queue,
exit_code: AtomicI32::new(0),
@ -189,8 +191,15 @@ impl Process {
let user_process = Arc::new_cyclic(|weak_process_ref| {
let weak_process = weak_process_ref.clone();
let cloned_filename = Some(filename.clone());
let task =
create_user_task_from_elf(filename, elf_file_content, weak_process, argv, envp);
let root_vmar = Vmar::<Full>::new_root().unwrap();
let task = create_user_task_from_elf(
&root_vmar,
filename,
elf_file_content,
weak_process,
argv,
envp,
);
let user_space = task.user_space().map(|user_space| user_space.clone());
let user_vm = UserVm::new();
let file_table = FileTable::new_with_stdio();
@ -203,6 +212,7 @@ impl Process {
cloned_filename,
Some(user_vm),
user_space,
Some(root_vmar),
None,
file_table,
sig_dispositions,
@ -239,6 +249,7 @@ impl Process {
None,
None,
None,
None,
file_table,
sig_dispositions,
sig_queues,
@ -372,19 +383,16 @@ impl Process {
self.user_space.as_ref()
}
/// returns the vm space if the process does have, otherwise None
pub fn vm_space(&self) -> Option<&VmSpace> {
match self.user_space {
None => None,
Some(ref user_space) => Some(user_space.vm_space()),
}
}
/// returns the user_vm
pub fn user_vm(&self) -> Option<&UserVm> {
self.user_vm.as_ref()
}
/// returns the root vmar
pub fn root_vmar(&self) -> Option<&Vmar<Full>> {
self.root_vmar.as_ref()
}
/// returns the user heap if the process does have, otherwise None
pub fn user_heap(&self) -> Option<&UserHeap> {
match self.user_vm {
@ -393,14 +401,6 @@ impl Process {
}
}
/// returns the mmap area if the process does have, otherwise None
pub fn mmap_area(&self) -> Option<&MmapArea> {
match self.user_vm {
None => None,
Some(ref user_vm) => Some(user_vm.mmap_area()),
}
}
/// free zombie child with pid, returns the exit code of child process.
/// remove process from process group.
pub fn reap_zombie_child(&self, pid: Pid) -> i32 {

View File

@ -1,97 +0,0 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::{memory::vm_page::VmPageRange, prelude::*, process::elf::init_stack::INIT_STACK_BASE};
use jinux_frame::vm::{VmPerm, VmSpace};
// The definition of MMapFlags is from occlum
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
impl TryFrom<u64> for MMapFlags {
type Error = Error;
fn try_from(value: u64) -> Result<Self> {
MMapFlags::from_bits(value as u32)
.ok_or_else(|| Error::with_message(Errno::EINVAL, "unknown mmap flags"))
}
}
#[derive(Debug)]
pub struct MmapArea {
base_addr: Vaddr,
current: AtomicUsize,
}
impl MmapArea {
pub const fn new() -> MmapArea {
MmapArea {
base_addr: INIT_STACK_BASE,
current: AtomicUsize::new(INIT_STACK_BASE),
}
}
pub fn mmap(
&self,
len: usize,
offset: usize,
vm_perm: VmPerm,
flags: MMapFlags,
vm_space: &VmSpace,
) -> Vaddr {
// TODO: how to respect flags?
if flags.complement().contains(MMapFlags::MAP_ANONYMOUS)
| flags.complement().contains(MMapFlags::MAP_PRIVATE)
{
panic!("Unsupported mmap flags {:?} now", flags);
}
if len % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned len");
}
if offset % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned offset");
}
let current = self.current.load(Ordering::Relaxed);
let vm_page_range = VmPageRange::new_range(current..(current + len));
vm_page_range.map_zeroed(vm_space, vm_perm);
self.current.store(current + len, Ordering::Relaxed);
debug!("mmap area start: 0x{:x}, size: {}", current, len);
current
}
/// Set mmap area to the default status. i.e., point current to base.
pub fn set_default(&self) {
self.current.store(self.base_addr, Ordering::Relaxed);
}
}
impl Clone for MmapArea {
fn clone(&self) -> Self {
let current = self.current.load(Ordering::Relaxed);
Self {
base_addr: self.base_addr.clone(),
current: AtomicUsize::new(current),
}
}
}

View File

@ -0,0 +1,34 @@
use crate::prelude::*;
// The definition of MMapFlags is from occlum
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
impl TryFrom<u64> for MMapFlags {
type Error = Error;
fn try_from(value: u64) -> Result<Self> {
MMapFlags::from_bits(value as u32)
.ok_or_else(|| Error::with_message(Errno::EINVAL, "unknown mmap flags"))
}
}

View File

@ -4,10 +4,9 @@
//! So we define a UserVm struct to store such infomation.
//! Briefly, it contains the exact usage of each segment of virtual spaces.
pub mod mmap_area;
pub mod mmap_flags;
pub mod user_heap;
use mmap_area::MmapArea;
use user_heap::UserHeap;
/*
@ -41,30 +40,20 @@ use user_heap::UserHeap;
#[derive(Debug, Clone)]
pub struct UserVm {
user_heap: UserHeap,
mmap_area: MmapArea,
}
impl UserVm {
pub const fn new() -> Self {
let user_heap = UserHeap::new();
let mmap_area = MmapArea::new();
UserVm {
user_heap,
mmap_area,
}
UserVm { user_heap }
}
pub fn user_heap(&self) -> &UserHeap {
&self.user_heap
}
pub fn mmap_area(&self) -> &MmapArea {
&self.mmap_area
}
/// Set user vm to the init status
pub fn set_default(&self) {
self.user_heap.set_default();
self.mmap_area.set_default();
}
}

View File

@ -1,12 +1,15 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::vm::perms::VmPerms;
use crate::{
memory::vm_page::{VmPage, VmPageRange},
prelude::*,
rights::Rights,
vm::vmo::{VmoFlags, VmoOptions},
};
use jinux_frame::vm::{VmPerm, VmSpace};
use jinux_frame::AlignExt;
pub const USER_HEAP_BASE: Vaddr = 0x0000_0000_1000_0000;
pub const USER_HEAP_SIZE_LIMIT: usize = PAGE_SIZE * 1000;
#[derive(Debug)]
pub struct UserHeap {
@ -23,37 +26,38 @@ impl UserHeap {
}
}
pub fn brk(&self, new_heap_end: Option<Vaddr>, vm_space: &VmSpace) -> Vaddr {
pub fn brk(&self, new_heap_end: Option<Vaddr>) -> Result<Vaddr> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
match new_heap_end {
None => return self.current_heap_end.load(Ordering::Relaxed),
None => {
// create a heap vmo for current process
let perms = VmPerms::READ | VmPerms::WRITE;
let vmo_options = VmoOptions::<Rights>::new(0).flags(VmoFlags::RESIZABLE);
let heap_vmo = vmo_options.alloc().unwrap();
let vmar_map_options = root_vmar
.new_map(heap_vmo, perms)
.unwrap()
.offset(USER_HEAP_BASE)
.size(USER_HEAP_SIZE_LIMIT);
vmar_map_options.build().unwrap();
return Ok(self.current_heap_end.load(Ordering::Relaxed));
}
Some(new_heap_end) => {
let current_heap_end = self.current_heap_end.load(Ordering::Acquire);
if new_heap_end < current_heap_end {
return current_heap_end;
// FIXME: should we allow shrink current user heap?
return Ok(current_heap_end);
}
let new_size = (new_heap_end - self.heap_base).align_up(PAGE_SIZE);
let heap_vmo = root_vmar.get_mapped_vmo(USER_HEAP_BASE)?;
heap_vmo.resize(new_size)?;
self.current_heap_end.store(new_heap_end, Ordering::Release);
let start_page = VmPage::containing_address(current_heap_end - 1).next_page();
let end_page = VmPage::containing_address(new_heap_end);
if end_page >= start_page {
let vm_pages = VmPageRange::new_page_range(start_page, end_page);
let vm_perm = UserHeap::user_heap_perm();
vm_pages.map_zeroed(vm_space, vm_perm);
debug!(
"map address: 0x{:x} - 0x{:x}",
vm_pages.start_address(),
vm_pages.end_address()
);
}
return new_heap_end;
return Ok(new_heap_end);
}
}
}
#[inline(always)]
const fn user_heap_perm() -> VmPerm {
VmPerm::RWXU
}
/// Set heap to the default status. i.e., point the heap end to heap base.
pub fn set_default(&self) {
self.current_heap_end

View File

@ -15,9 +15,9 @@ use jinux_frame::{cpu::CpuContext, task::Task};
use self::c_types::siginfo_t;
use self::sig_mask::SigMask;
use self::sig_num::SigNum;
use crate::memory::{write_bytes_to_user, write_val_to_user};
use crate::process::signal::c_types::ucontext_t;
use crate::process::signal::sig_action::SigActionFlags;
use crate::util::{write_bytes_to_user, write_val_to_user};
use crate::{
prelude::*,
process::signal::sig_action::{SigAction, SigDefaultAction},

View File

@ -4,36 +4,37 @@ use jinux_frame::{
cpu::CpuContext,
task::Task,
user::{UserEvent, UserMode, UserSpace},
vm::VmSpace,
};
use crate::{
prelude::*,
process::{exception::handle_exception, signal::handle_pending_signal},
rights::Full,
vm::vmar::Vmar,
};
use crate::syscall::handle_syscall;
use super::{elf::load_elf_to_vm_space, Process};
use super::{elf::load_elf_to_root_vmar, Process};
static COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn create_user_task_from_elf(
root_vmar: &Vmar<Full>,
filename: CString,
elf_file_content: &[u8],
elf_file_content: &'static [u8],
parent: Weak<Process>,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Arc<Task> {
let vm_space = VmSpace::new();
let elf_load_info = load_elf_to_vm_space(filename, elf_file_content, &vm_space, argv, envp)
let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, &root_vmar, argv, envp)
.expect("Load Elf failed");
let vm_space = root_vmar.vm_space().clone();
let mut cpu_ctx = CpuContext::default();
// set entry point
cpu_ctx.gp_regs.rip = elf_load_info.entry_point();
// set user stack
cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_top();
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
create_new_task(user_space, parent)
}
@ -44,8 +45,7 @@ pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<
let cur = Task::current();
let user_space = cur.user_space().expect("user task should have user space");
let mut user_mode = UserMode::new(user_space);
debug!("In new task");
debug!("[new task] pid = {}", current!().pid());
debug!("In user task entry:");
debug!("[new task] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip);
debug!("[new task] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp);
debug!("[new task] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax);
@ -72,7 +72,7 @@ pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<
}
}
debug!("exit user loop");
// Work around: exit in kernel task entry may be not called. Why this will happen?
// FIXME: This is a work around: exit in kernel task entry may be not called. Why this will happen?
Task::current().exit();
}

View File

@ -1,8 +1,8 @@
use super::{constants::*, SyscallReturn};
use crate::{memory::read_cstring_from_user, prelude::*, syscall::SYS_ACCESS};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_ACCESS, util::read_cstring_from_user};
pub fn sys_access(filename_ptr: Vaddr, file_mode: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_ACCESS]", SYS_ACCESS);
log_syscall_entry!(SYS_ACCESS);
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
debug!("filename: {:?}, file_mode = {}", filename, file_mode);
// TODO: access currenly does not check and just return success

View File

@ -1,7 +1,7 @@
use jinux_frame::cpu::CpuContext;
use crate::prelude::*;
use crate::syscall::SYS_ARCH_PRCTL;
use crate::{log_syscall_entry, prelude::*};
use super::SyscallReturn;
@ -29,7 +29,7 @@ impl TryFrom<u64> for ArchPrctlCode {
}
pub fn sys_arch_prctl(code: u64, addr: u64, context: &mut CpuContext) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_ARCH_PRCTL]", SYS_ARCH_PRCTL);
log_syscall_entry!(SYS_ARCH_PRCTL);
let arch_prctl_code = ArchPrctlCode::try_from(code)?;
debug!(
"arch_prctl_code: {:?}, addr = 0x{:x}",

View File

@ -1,3 +1,4 @@
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::syscall::SyscallReturn;
@ -5,7 +6,7 @@ use crate::syscall::SYS_BRK;
/// expand the user heap to new heap end, returns the new heap end if expansion succeeds.
pub fn sys_brk(heap_end: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_BRK]", SYS_BRK);
log_syscall_entry!(SYS_BRK);
let new_heap_end = if heap_end == 0 {
None
} else {
@ -13,13 +14,8 @@ pub fn sys_brk(heap_end: u64) -> Result<SyscallReturn> {
};
debug!("new heap end = {:x?}", heap_end);
let current = current!();
let user_heap = current
.user_heap()
.expect("brk should work on process with user heap");
let vm_space = current
.vm_space()
.expect("brk should work on process with user space");
let new_heap_end = user_heap.brk(new_heap_end, vm_space);
let user_heap = current.user_heap().unwrap();
let new_heap_end = user_heap.brk(new_heap_end)?;
Ok(SyscallReturn::Return(new_heap_end as _))
}

View File

@ -1,5 +1,6 @@
use jinux_frame::cpu::CpuContext;
use crate::log_syscall_entry;
use crate::process::clone::{clone_child, CloneArgs, CloneFlags};
use crate::{prelude::*, syscall::SYS_CLONE};
@ -15,11 +16,12 @@ pub fn sys_clone(
tls: usize,
parent_context: CpuContext,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_CLONE]", SYS_CLONE);
log_syscall_entry!(SYS_CLONE);
let clone_flags = CloneFlags::from(clone_flags);
debug!("flags = {:?}, child_stack_ptr = 0x{:x}, parent_tid_ptr = 0x{:x}, child tid ptr = 0x{:x}, tls = 0x{:x}", clone_flags, new_sp, parent_tidptr, child_tidptr, tls);
let clone_args = CloneArgs::new(new_sp, parent_tidptr, child_tidptr, tls, clone_flags);
let child_process = clone_child(parent_context, clone_args).unwrap();
let child_pid = child_process.pid();
let pid = current!().pid();
debug!("*********schedule child process, pid = {}**********", pid);

View File

@ -1,17 +1,14 @@
use super::SyscallReturn;
use super::SYS_CLOSE;
use crate::log_syscall_entry;
use crate::{fs::file::FileDescripter, prelude::*};
pub fn sys_close(fd: FileDescripter) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_CLOSE]", SYS_CLOSE);
log_syscall_entry!(SYS_CLOSE);
debug!("fd = {}", fd);
let current = current!();
let mut file_table = current.file_table().lock();
match file_table.get_file(fd) {
None => return_errno!(Errno::EBADF),
Some(_) => {
let _ = file_table.get_file(fd)?;
file_table.close_file(fd);
Ok(SyscallReturn::Return(0))
}
}
}

View File

@ -1,8 +1,9 @@
use jinux_frame::cpu::CpuContext;
use super::{constants::*, SyscallReturn};
use crate::memory::{read_cstring_from_user, read_val_from_user};
use crate::process::elf::load_elf_to_vm_space;
use crate::log_syscall_entry;
use crate::process::elf::load_elf_to_root_vmar;
use crate::util::{read_cstring_from_user, read_val_from_user};
use crate::{prelude::*, syscall::SYS_EXECVE};
pub fn sys_execve(
@ -11,7 +12,7 @@ pub fn sys_execve(
envp_ptr_ptr: Vaddr,
context: &mut CpuContext,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_EXECVE]", SYS_EXECVE);
log_syscall_entry!(SYS_EXECVE);
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
let argv = read_cstring_vec(argv_ptr_ptr, MAX_ARGV_NUMBER, MAX_ARG_LEN)?;
let envp = read_cstring_vec(envp_ptr_ptr, MAX_ENVP_NUMBER, MAX_ENV_LEN)?;
@ -25,17 +26,17 @@ pub fn sys_execve(
let elf_file_content = crate::user_apps::read_execve_hello_content();
let current = current!();
// Set process vm space to default
let vm_space = current
.vm_space()
// destroy root vmars
let root_vmar = current
.root_vmar()
.expect("[Internal Error] User process should have vm space");
vm_space.clear();
root_vmar.clear()?;
let user_vm = current
.user_vm()
.expect("[Internal Error] User process should have user vm");
user_vm.set_default();
// load elf content to new vm space
let elf_load_info = load_elf_to_vm_space(filename, elf_file_content, &vm_space, argv, envp)
let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, root_vmar, argv, envp)
.expect("load elf failed");
debug!("load elf in execve succeeds");
// set signal disposition to default
@ -74,7 +75,7 @@ fn read_cstring_vec(
res.push(cstring);
}
if !find_null {
return_errno!(Errno::E2BIG);
return_errno_with_message!(Errno::E2BIG, "Cannot find null pointer in vector");
}
Ok(res)
}

View File

@ -1,9 +1,9 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::{SyscallReturn, SYS_EXIT};
pub fn sys_exit(exit_code: i32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_EXIT]", SYS_EXIT);
log_syscall_entry!(SYS_EXIT);
current!().exit(exit_code);
Ok(SyscallReturn::Return(0))
}

View File

@ -1,10 +1,10 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::{SyscallReturn, SYS_EXIT_GROUP};
/// Exit all thread in a process.
pub fn sys_exit_group(exit_code: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_EXIT_GROUP]", SYS_EXIT_GROUP);
log_syscall_entry!(SYS_EXIT_GROUP);
current!().exit(exit_code as _);
Ok(SyscallReturn::Return(0))
}

View File

@ -1,9 +1,10 @@
use super::{SyscallReturn, SYS_FCNTL};
use crate::fs::fcntl::FcntlCmd;
use crate::log_syscall_entry;
use crate::{fs::file::FileDescripter, prelude::*};
pub fn sys_fcntl(fd: FileDescripter, cmd: i32, arg: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FCNTL]", SYS_FCNTL);
log_syscall_entry!(SYS_FCNTL);
let fcntl_cmd = FcntlCmd::try_from(cmd)?;
debug!("fd = {}, cmd = {:?}, arg = {}", fd, fcntl_cmd, arg);
match fcntl_cmd {

View File

@ -1,4 +1,5 @@
use crate::{
log_syscall_entry,
prelude::*,
process::clone::{clone_child, CloneArgs},
};
@ -9,7 +10,7 @@ use crate::{process::Process, syscall::SYS_FORK};
use super::SyscallReturn;
pub fn sys_fork(parent_context: CpuContext) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FORK]", SYS_FORK);
log_syscall_entry!(SYS_FORK);
let child_process = fork(parent_context);
Ok(SyscallReturn::Return(child_process.pid() as _))
}

View File

@ -1,19 +1,19 @@
use jinux_frame::vm::VmIo;
use crate::fs::stat::Stat;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::{SyscallReturn, SYS_FSTAT};
pub fn sys_fstat(fd: u64, stat_buf_ptr: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FSTAT]", SYS_FSTAT);
log_syscall_entry!(SYS_FSTAT);
debug!("fd = {}, stat_buf_addr = 0x{:x}", fd, stat_buf_ptr);
let current = current!();
let vm_space = current.vm_space().unwrap();
let root_vmar = current.root_vmar().unwrap();
if fd == 1 {
let stat = Stat::stdout_stat();
vm_space.write_val(stat_buf_ptr, &stat)?;
root_vmar.write_val(stat_buf_ptr, &stat)?;
return Ok(SyscallReturn::Return(0));
}
// TODO: fstat only returns fake result now

View File

@ -2,9 +2,10 @@ use core::sync::atomic::{AtomicBool, Ordering};
use crate::process::{Pid, Process};
use crate::syscall::SyscallReturn;
use crate::{memory::read_val_from_user, syscall::SYS_FUTEX};
use crate::syscall::SYS_FUTEX;
use crate::util::read_val_from_user;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use jinux_frame::cpu::num_cpus;
type FutexBitSet = u32;
@ -22,7 +23,7 @@ pub fn sys_futex(
futex_new_addr: u64,
bitset: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FUTEX]", SYS_FUTEX);
log_syscall_entry!(SYS_FUTEX);
// FIXME: we current ignore futex flags
let (futex_op, futex_flags) = futex_op_and_flags_from_u32(futex_op as _).unwrap();

View File

@ -1,11 +1,12 @@
use crate::memory::write_bytes_to_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::util::write_bytes_to_user;
use super::SyscallReturn;
use super::SYS_GETCWD;
pub fn sys_getcwd(buf: Vaddr, len: usize) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETCWD]", SYS_GETCWD);
log_syscall_entry!(SYS_GETCWD);
// TODO: getcwd only return a fake result now
let fake_cwd = CString::new("/")?;
let bytes = fake_cwd.as_bytes_with_nul();

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETEGID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETEGID};
use super::SyscallReturn;
pub fn sys_getegid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETEGID]", SYS_GETEGID);
log_syscall_entry!(SYS_GETEGID);
// TODO: getegid only return a fake egid now
Ok(SyscallReturn::Return(0))
}

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETEUID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETEUID};
use super::SyscallReturn;
pub fn sys_geteuid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETEUID]", SYS_GETEUID);
log_syscall_entry!(SYS_GETEUID);
// TODO: geteuid only return a fake euid now"
Ok(SyscallReturn::Return(0))
}

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETGID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETGID};
use super::SyscallReturn;
pub fn sys_getgid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETGID]", SYS_GETGID);
log_syscall_entry!(SYS_GETGID);
// TODO: getgid only return a fake gid now"
Ok(SyscallReturn::Return(0))
}

View File

@ -1,8 +1,8 @@
use super::{SyscallReturn, SYS_GETPGRP};
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
pub fn sys_getpgrp() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETPGRP]", SYS_GETPGRP);
log_syscall_entry!(SYS_GETPGRP);
let current = current!();
Ok(SyscallReturn::Return(current.pgid() as _))
}

View File

@ -1,11 +1,11 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::SYS_GETPID;
use super::SyscallReturn;
pub fn sys_getpid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETPID]", SYS_GETPID);
log_syscall_entry!(SYS_GETPID);
let pid = current!().pid();
debug!("[sys_getpid]: pid = {}", pid);
Ok(SyscallReturn::Return(pid as _))

View File

@ -1,10 +1,11 @@
use crate::log_syscall_entry;
use crate::prelude::*;
use super::SyscallReturn;
use super::SYS_GETPPID;
pub fn sys_getppid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETPPID]", SYS_GETPPID);
log_syscall_entry!(SYS_GETPPID);
let current = current!();
let parent = current.parent();
match parent {

View File

@ -1,11 +1,11 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::SYS_GETTID;
use super::SyscallReturn;
pub fn sys_gettid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETTID]", SYS_GETTID);
log_syscall_entry!(SYS_GETTID);
// For single-thread process, tid is equal to pid
let tid = current!().pid();
Ok(SyscallReturn::Return(tid as _))

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETUID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETUID};
use super::SyscallReturn;
pub fn sys_getuid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETUID]", SYS_GETUID);
log_syscall_entry!(SYS_GETUID);
// TODO: getuid only return a fake uid now;
Ok(SyscallReturn::Return(0))
}

View File

@ -1,12 +1,13 @@
use crate::fs::file::FileDescripter;
use crate::fs::ioctl::IoctlCmd;
use crate::log_syscall_entry;
use crate::prelude::*;
use super::SyscallReturn;
use super::SYS_IOCTL;
pub fn sys_ioctl(fd: FileDescripter, cmd: u32, arg: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_IOCTL]", SYS_IOCTL);
log_syscall_entry!(SYS_IOCTL);
let ioctl_cmd = IoctlCmd::try_from(cmd)?;
debug!(
"fd = {}, ioctl_cmd = {:?}, arg = 0x{:x}",
@ -14,11 +15,7 @@ pub fn sys_ioctl(fd: FileDescripter, cmd: u32, arg: Vaddr) -> Result<SyscallRetu
);
let current = current!();
let file_table = current.file_table().lock();
match file_table.get_file(fd) {
None => return_errno_with_message!(Errno::EBADF, "Fd does not exist"),
Some(file) => {
let file = file_table.get_file(fd)?;
let res = file.ioctl(ioctl_cmd, arg)?;
return Ok(SyscallReturn::Return(res as _));
}
}
}

View File

@ -1,4 +1,4 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::process::signal::signals::user::{UserSignal, UserSignalKind};
use crate::process::{table, Process};
@ -10,7 +10,7 @@ use crate::{
use super::SyscallReturn;
pub fn sys_kill(process_filter: u64, sig_num: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_KILL]", SYS_KILL);
log_syscall_entry!(SYS_KILL);
let process_filter = ProcessFilter::from_id(process_filter as _);
let sig_num = SigNum::try_from(sig_num as u8).unwrap();
debug!(
@ -49,7 +49,9 @@ fn get_processes(filter: &ProcessFilter) -> Result<Vec<Arc<Process>>> {
ProcessFilter::WithPid(pid) => {
let process = table::pid_to_process(*pid);
match process {
None => return_errno!(Errno::ESRCH),
None => {
return_errno_with_message!(Errno::ESRCH, "No such process in process table")
}
Some(process) => vec![process],
}
}

View File

@ -1,10 +1,11 @@
use crate::log_syscall_entry;
use crate::{fs::file::FileDescripter, prelude::*};
use super::SyscallReturn;
use super::SYS_LSEEK;
pub fn sys_lseek(fd: FileDescripter, offset: usize, whence: u32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_LSEEK]", SYS_LSEEK);
log_syscall_entry!(SYS_LSEEK);
debug!("fd = {}, offset = {}, whence = {}", fd, offset, whence);
// TODO: do lseek
Ok(SyscallReturn::Return(0))

View File

@ -1,14 +1,15 @@
use crate::fs::stat::Stat;
use crate::memory::read_cstring_from_user;
use crate::memory::write_val_to_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::syscall::constants::MAX_FILENAME_LEN;
use crate::util::read_cstring_from_user;
use crate::util::write_val_to_user;
use super::SyscallReturn;
use super::SYS_LSTAT;
pub fn sys_lstat(filename_ptr: Vaddr, stat_buf_ptr: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_LSTAT]", SYS_LSTAT);
log_syscall_entry!(SYS_LSTAT);
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
debug!(
"filename = {:?}, stat_buf_ptr = 0x{:x}",

View File

@ -1,7 +1,10 @@
//! This mod defines mmap flags and the handler to syscall mmap
use crate::prelude::*;
use crate::process::process_vm::mmap_area::MMapFlags;
use crate::process::process_vm::mmap_flags::MMapFlags;
use crate::rights::Rights;
use crate::vm::perms::VmPerms;
use crate::vm::vmo::VmoOptions;
use crate::{log_syscall_entry, prelude::*};
use jinux_frame::vm::VmPerm;
use crate::syscall::SYS_MMAP;
@ -16,7 +19,7 @@ pub fn sys_mmap(
fd: u64,
offset: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_MMAP]", SYS_MMAP);
log_syscall_entry!(SYS_MMAP);
let perms = VmPerm::try_from(perms).unwrap();
let flags = MMapFlags::try_from(flags).unwrap();
let res = do_sys_mmap(
@ -26,7 +29,7 @@ pub fn sys_mmap(
flags,
fd as usize,
offset as usize,
);
)?;
Ok(SyscallReturn::Return(res as _))
}
@ -37,25 +40,47 @@ pub fn do_sys_mmap(
flags: MMapFlags,
fd: usize,
offset: usize,
) -> Vaddr {
debug!(
) -> Result<Vaddr> {
info!(
"addr = 0x{:x}, len = 0x{:x}, perms = {:?}, flags = {:?}, fd = {}, offset = 0x{:x}",
addr, len, vm_perm, flags, fd, offset
);
if flags.contains(MMapFlags::MAP_ANONYMOUS) & !flags.contains(MMapFlags::MAP_FIXED) {
// only support map anonymous areas on **NOT** fixed addr now
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// only support map anonymous areas.
mmap_anonymous_vmo(len, offset, vm_perm, flags)
} else {
panic!("Unsupported mmap flags: {:?}", flags);
}
let current = current!();
let mmap_area = current
.mmap_area()
.expect("mmap should work on process with mmap area");
let vm_space = current
.vm_space()
.expect("mmap should work on process with user space");
// current.mmap(len, vm_perm, flags, offset)
mmap_area.mmap(len, offset, vm_perm, flags, vm_space)
}
pub fn mmap_anonymous_vmo(
len: usize,
offset: usize,
vm_perm: VmPerm,
flags: MMapFlags,
) -> Result<Vaddr> {
// TODO: how to respect flags?
if flags.complement().contains(MMapFlags::MAP_ANONYMOUS)
| flags.complement().contains(MMapFlags::MAP_PRIVATE)
{
panic!("Unsupported mmap flags {:?} now", flags);
}
if len % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned len");
}
if offset % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned offset");
}
let vmo_options: VmoOptions<Rights> = VmoOptions::new(len);
let vmo = vmo_options.alloc()?;
let current = current!();
let root_vmar = current.root_vmar().unwrap();
let perms = VmPerms::from(vm_perm);
let mut vmar_map_options = root_vmar.new_map(vmo, perms)?;
if flags.contains(MMapFlags::MAP_FIXED) {
vmar_map_options = vmar_map_options.offset(offset);
}
Ok(vmar_map_options.build()?)
}

View File

@ -274,3 +274,11 @@ pub fn syscall_dispatch(
_ => panic!("Unsupported syscall number: {}", syscall_number),
}
}
#[macro_export]
macro_rules! log_syscall_entry {
($syscall_name: tt) => {
let syscall_name_str = stringify!($syscall_name);
info!("[SYSCALL][id={}][{}]", $syscall_name, syscall_name_str);
};
}

View File

@ -1,13 +1,13 @@
use jinux_frame::vm::VmPerm;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::SYS_MPROTECT;
use super::SyscallReturn;
pub fn sys_mprotect(vaddr: u64, len: u64, perms: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_MPROTECT]", SYS_MPROTECT);
log_syscall_entry!(SYS_MPROTECT);
let perms = VmPerm::try_from(perms).unwrap();
do_sys_mprotect(vaddr as Vaddr, len as usize, perms);
Ok(SyscallReturn::Return(0))

View File

@ -1,10 +1,11 @@
use crate::log_syscall_entry;
use crate::prelude::*;
use super::SyscallReturn;
use super::SYS_MUNMAP;
pub fn sys_munmap(addr: Vaddr, len: usize) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_MUNMAP]", SYS_MUNMAP);
log_syscall_entry!(SYS_MUNMAP);
debug!("addr = 0x{:x}, len = {}", addr, len);
//TODO: do munmap
Ok(SyscallReturn::Return(0))

View File

@ -1,9 +1,10 @@
use crate::fs::file::File;
use crate::fs::file::FileDescripter;
use crate::memory::read_cstring_from_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::syscall::constants::MAX_FILENAME_LEN;
use crate::tty::get_console;
use crate::util::read_cstring_from_user;
use super::SyscallReturn;
use super::SYS_OPENAT;
@ -16,7 +17,7 @@ pub fn sys_openat(
flags: i32,
mode: u16,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_OPENAT]", SYS_OPENAT);
log_syscall_entry!(SYS_OPENAT);
let pathname = read_cstring_from_user(pathname_addr, MAX_FILENAME_LEN)?;
debug!(
"dirfd = {}, pathname = {:?}, flags = {}, mode = {}",
@ -28,15 +29,15 @@ pub fn sys_openat(
// Below are three special files we encountered when running busybox ash.
// We currently only return ENOENT, which means the file does not exist.
if dirfd == AT_FDCWD && pathname == CString::new("/etc/passwd")? {
return_errno!(Errno::ENOENT);
return_errno_with_message!(Errno::ENOENT, "No such file");
}
if dirfd == AT_FDCWD && pathname == CString::new("/etc/profile")? {
return_errno!(Errno::ENOENT);
return_errno_with_message!(Errno::ENOENT, "No such file");
}
if dirfd == AT_FDCWD && pathname == CString::new("./trace")? {
return_errno!(Errno::ENOENT);
return_errno_with_message!(Errno::ENOENT, "No such file");
}
if dirfd == AT_FDCWD && pathname == CString::new("/dev/tty")? {
@ -44,7 +45,6 @@ pub fn sys_openat(
let current = current!();
let mut file_table = current.file_table().lock();
let fd = file_table.insert(tty_file);
debug!("openat fd = {}", fd);
return Ok(SyscallReturn::Return(fd as _));
}
todo!()

View File

@ -1,14 +1,15 @@
use core::time::Duration;
use crate::fs::poll::{c_pollfd, PollFd};
use crate::memory::{read_val_from_user, write_val_to_user};
use crate::log_syscall_entry;
use crate::util::{read_val_from_user, write_val_to_user};
use crate::{fs::poll::c_nfds, prelude::*};
use super::SyscallReturn;
use super::SYS_POLL;
pub fn sys_poll(fds: Vaddr, nfds: c_nfds, timeout: i32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_POLL]", SYS_POLL);
log_syscall_entry!(SYS_POLL);
let mut read_addr = fds;
let mut pollfds = Vec::with_capacity(nfds as _);
@ -36,8 +37,8 @@ pub fn sys_poll(fds: Vaddr, nfds: c_nfds, timeout: i32) -> Result<SyscallReturn>
let file_table = current.file_table().lock();
let file = file_table.get_file(pollfd.fd);
match file {
None => return Some(Err(Error::new(Errno::EBADF))),
Some(file) => {
Err(_) => return Some(Err(Error::new(Errno::EBADF))),
Ok(file) => {
let file_events = file.poll();
let polled_events = pollfd.events.intersection(file_events);
if !polled_events.is_empty() {

View File

@ -1,12 +1,13 @@
use crate::memory::read_cstring_from_user;
use crate::memory::write_bytes_to_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::process::name::MAX_PROCESS_NAME_LEN;
use crate::util::read_cstring_from_user;
use crate::util::write_bytes_to_user;
use super::SyscallReturn;
use super::SYS_PRCTL;
pub fn sys_prctl(option: i32, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_PRCTL]", SYS_PRCTL);
log_syscall_entry!(SYS_PRCTL);
let prctl_cmd = PrctlCmd::from_args(option, arg2, arg3, arg4, arg5)?;
debug!("prctl cmd = {:?}", prctl_cmd);
let current = current!();

View File

@ -1,33 +1,21 @@
use crate::memory::write_bytes_to_user;
use crate::log_syscall_entry;
use crate::util::write_bytes_to_user;
use crate::{fs::file::FileDescripter, prelude::*};
use super::SyscallReturn;
use super::SYS_READ;
pub fn sys_read(fd: FileDescripter, user_buf_addr: Vaddr, buf_len: usize) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_READ]", SYS_READ);
log_syscall_entry!(SYS_READ);
debug!(
"fd = {}, user_buf_ptr = 0x{:x}, buf_len = 0x{:x}",
fd, user_buf_addr, buf_len
);
let current = current!();
let file_table = current.file_table().lock();
let file = file_table.get_file(fd);
match file {
None => return_errno!(Errno::EBADF),
Some(file) => {
let file = file_table.get_file(fd)?;
let mut read_buf = vec![0u8; buf_len];
let read_len = file.read(&mut read_buf)?;
write_bytes_to_user(user_buf_addr, &read_buf)?;
debug!(
"read_len = {}, read_buf = {:?}",
read_len,
&read_buf[..read_len]
);
// let read_str = core::str::from_utf8(&read_buf[..read_len - 1]).unwrap();
// println!("str = {}" ,read_str);
// todo!();
return Ok(SyscallReturn::Return(read_len as _));
}
}
}

View File

@ -1,9 +1,7 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{
memory::{read_bytes_from_user, write_bytes_to_user},
syscall::SYS_READLINK,
};
use crate::syscall::SYS_READLINK;
use crate::util::{read_bytes_from_user, write_bytes_to_user};
use super::SyscallReturn;
@ -14,7 +12,7 @@ pub fn sys_readlink(
user_buf_ptr: u64,
user_buf_len: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_READLINK]", SYS_READLINK);
log_syscall_entry!(SYS_READLINK);
let res = do_sys_readlink(
filename_ptr as Vaddr,
user_buf_ptr as Vaddr,

View File

@ -1,8 +1,9 @@
use crate::{
memory::{read_val_from_user, write_val_to_user},
log_syscall_entry,
prelude::*,
process::signal::{c_types::sigaction_t, sig_action::SigAction, sig_num::SigNum},
syscall::SYS_RT_SIGACTION,
util::{read_val_from_user, write_val_to_user},
};
use super::SyscallReturn;
@ -13,7 +14,7 @@ pub fn sys_rt_sigaction(
old_sig_action_ptr: Vaddr,
sigset_size: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_RT_SIGACTION]", SYS_RT_SIGACTION);
log_syscall_entry!(SYS_RT_SIGACTION);
let sig_num = SigNum::try_from(sig_num)?;
let sig_action_c = read_val_from_user::<sigaction_t>(sig_action_ptr)?;
let sig_action = SigAction::try_from(sig_action_c).unwrap();

View File

@ -1,6 +1,7 @@
use jinux_frame::vm::VmIo;
use crate::{
log_syscall_entry,
prelude::*,
syscall::{SyscallReturn, SYS_RT_SIGPROCMASK},
};
@ -11,7 +12,7 @@ pub fn sys_rt_sigprocmask(
oldset_ptr: Vaddr,
sigset_size: usize,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_RT_SIGPROCMASK]", SYS_RT_SIGPROCMASK);
log_syscall_entry!(SYS_RT_SIGPROCMASK);
let mask_op = MaskOp::try_from(how).unwrap();
debug!(
"mask op = {:?}, set_ptr = 0x{:x}, oldset_ptr = 0x{:x}, sigset_size = {}",
@ -31,15 +32,15 @@ fn do_rt_sigprocmask(
sigset_size: usize,
) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().unwrap();
let root_vmar = current.root_vmar().unwrap();
let mut sig_mask = current.sig_mask().lock();
let old_sig_mask_value = sig_mask.as_u64();
debug!("old sig mask value: 0x{:x}", old_sig_mask_value);
if oldset_ptr != 0 {
vm_space.write_val(oldset_ptr, &old_sig_mask_value)?;
root_vmar.write_val(oldset_ptr, &old_sig_mask_value)?;
}
if set_ptr != 0 {
let new_set = vm_space.read_val::<u64>(set_ptr)?;
let new_set = root_vmar.read_val::<u64>(set_ptr)?;
debug!("new set = 0x{:x}", new_set);
match mask_op {
MaskOp::Block => sig_mask.block(new_set),

View File

@ -1,10 +1,12 @@
use crate::{memory::read_val_from_user, prelude::*, process::signal::c_types::ucontext_t};
use crate::{
log_syscall_entry, prelude::*, process::signal::c_types::ucontext_t, util::read_val_from_user,
};
use jinux_frame::cpu::CpuContext;
use super::{SyscallReturn, SYS_RT_SIGRETRUN};
pub fn sys_rt_sigreturn(context: &mut CpuContext) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_RT_SIGRETURN]", SYS_RT_SIGRETRUN);
log_syscall_entry!(SYS_RT_SIGRETRUN);
let current = current!();
let sig_context = current.sig_context().lock().pop_back().unwrap();
let ucontext = read_val_from_user::<ucontext_t>(sig_context)?;

View File

@ -1,11 +1,11 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{process::Process, syscall::SYS_SCHED_YIELD};
use super::SyscallReturn;
pub fn sys_sched_yield() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_SCHED_YIELD]", SYS_SCHED_YIELD);
log_syscall_entry!(SYS_SCHED_YIELD);
Process::yield_now();
Ok(SyscallReturn::Return(0))
}

View File

@ -1,4 +1,5 @@
use crate::{
log_syscall_entry,
prelude::*,
process::{process_group::ProcessGroup, table, Pgid, Pid},
};
@ -6,7 +7,7 @@ use crate::{
use super::{SyscallReturn, SYS_SETPGID};
pub fn sys_setpgid(pid: Pid, pgid: Pgid) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_SETPGID]", SYS_SETPGID);
log_syscall_entry!(SYS_SETPGID);
let current = current!();
// if pid is 0, pid should be the pid of current process
let pid = if pid == 0 { current.pid() } else { pid };

View File

@ -1,4 +1,4 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::process::signal::sig_num::SigNum;
use crate::process::signal::signals::user::{UserSignal, UserSignalKind};
@ -11,7 +11,7 @@ use super::SyscallReturn;
/// Since jinuxx only supports one-thread process now, tgkill will send signal to process with pid as its process id,
/// and tgid as its process group id.
pub fn sys_tgkill(tgid: Pgid, pid: Pid, sig_num: u8) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_TGKILL]", SYS_TGKILL);
log_syscall_entry!(SYS_TGKILL);
let sig_num = SigNum::from_u8(sig_num);
debug!("tgid = {}, pid = {}, sig_num = {:?}", tgid, pid, sig_num);
let target_process =

View File

@ -1,6 +1,7 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{memory::write_val_to_user, syscall::SYS_UNAME};
use crate::syscall::SYS_UNAME;
use crate::util::write_val_to_user;
use super::SyscallReturn;
@ -59,7 +60,7 @@ fn copy_cstring_to_u8_slice(src: &CStr, dst: &mut [u8]) {
}
pub fn sys_uname(old_uname_addr: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_UNAME]", SYS_UNAME);
log_syscall_entry!(SYS_UNAME);
debug!("old uname addr = 0x{:x}", old_uname_addr);
write_val_to_user(old_uname_addr, &*UTS_NAME)?;
Ok(SyscallReturn::Return(0))

View File

@ -1,7 +1,8 @@
use crate::{
memory::write_val_to_user,
log_syscall_entry,
process::{process_filter::ProcessFilter, wait::wait_child_exit},
syscall::SYS_WAIT4,
util::write_val_to_user,
};
use crate::prelude::*;
@ -10,7 +11,7 @@ use crate::process::wait::WaitOptions;
use super::SyscallReturn;
pub fn sys_wait4(wait_pid: u64, exit_status_ptr: u64, wait_options: u32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_WAIT4]", SYS_WAIT4);
log_syscall_entry!(SYS_WAIT4);
let wait_options = WaitOptions::from_bits(wait_options).expect("Unknown wait options");
debug!(
"pid = {}, exit_status_ptr = {}, wait_options: {:?}",

View File

@ -1,5 +1,5 @@
use crate::prelude::*;
use crate::process::{process_filter::ProcessFilter, wait::wait_child_exit};
use crate::{log_syscall_entry, prelude::*};
use crate::process::wait::WaitOptions;
@ -14,7 +14,7 @@ pub fn sys_waitid(
rusage_addr: u64,
) -> Result<SyscallReturn> {
// FIXME: what does infoq and rusage use for?
debug!("[syscall][id={}][SYS_WAITID]", SYS_WAITID);
log_syscall_entry!(SYS_WAITID);
let process_filter = ProcessFilter::from_which_and_id(which, upid);
let wait_options = WaitOptions::from_bits(options as u32).expect("Unknown wait options");
let (exit_code, pid) = wait_child_exit(process_filter, wait_options)?;

View File

@ -1,7 +1,8 @@
use crate::fs::file::FileDescripter;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{memory::read_bytes_from_user, syscall::SYS_WRITE};
use crate::syscall::SYS_WRITE;
use crate::util::read_bytes_from_user;
use super::SyscallReturn;
@ -13,8 +14,7 @@ pub fn sys_write(
user_buf_ptr: Vaddr,
user_buf_len: u64,
) -> Result<SyscallReturn> {
// only suppprt STDOUT now.
debug!("[syscall][id={}][SYS_WRITE]", SYS_WRITE);
log_syscall_entry!(SYS_WRITE);
debug!(
"fd = {}, user_buf_ptr = 0x{:x}, user_buf_len = 0x{:x}",
fd, user_buf_ptr, user_buf_len
@ -22,14 +22,9 @@ pub fn sys_write(
let current = current!();
let file_table = current.file_table().lock();
match file_table.get_file(fd) {
None => return_errno!(Errno::EBADF),
Some(file) => {
let file = file_table.get_file(fd)?;
let mut buffer = vec![0u8; user_buf_len as usize];
read_bytes_from_user(user_buf_ptr as usize, &mut buffer)?;
debug!("write buf = {:?}", buffer);
let write_len = file.write(&buffer)?;
Ok(SyscallReturn::Return(write_len as _))
}
}
}

View File

@ -1,9 +1,7 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{
memory::{read_bytes_from_user, read_val_from_user},
syscall::SYS_WRITEV,
};
use crate::syscall::SYS_WRITEV;
use crate::util::{read_bytes_from_user, read_val_from_user};
use super::SyscallReturn;
@ -17,7 +15,7 @@ pub struct IoVec {
}
pub fn sys_writev(fd: u64, io_vec_ptr: u64, io_vec_count: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_WRITEV]", SYS_WRITEV);
log_syscall_entry!(SYS_WRITEV);
let res = do_sys_writev(fd, io_vec_ptr as Vaddr, io_vec_count as usize)?;
Ok(SyscallReturn::Return(res as _))
}

View File

@ -3,8 +3,8 @@ use jinux_frame::receive_char;
use self::line_discipline::LineDiscipline;
use crate::fs::events::IoEvents;
use crate::fs::ioctl::IoctlCmd;
use crate::memory::{read_val_from_user, write_val_to_user};
use crate::process::Pgid;
use crate::util::{read_val_from_user, write_val_to_user};
use crate::{fs::file::File, prelude::*};
pub mod line_discipline;

View File

@ -1 +1,38 @@
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use pod::Pod;
/// copy bytes from user space of current process. The bytes len is the len of dest.
pub fn read_bytes_from_user(src: Vaddr, dest: &mut [u8]) -> Result<()> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.read_bytes(src, dest)?)
}
/// copy val (Plain of Data type) from user space of current process.
pub fn read_val_from_user<T: Pod>(src: Vaddr) -> Result<T> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.read_val(src)?)
}
/// write bytes from user space of current process. The bytes len is the len of src.
pub fn write_bytes_to_user(dest: Vaddr, src: &[u8]) -> Result<()> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.write_bytes(dest, src)?)
}
/// write val (Plain of Data type) to user space of current process.
pub fn write_val_to_user<T: Pod>(dest: Vaddr, val: &T) -> Result<()> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.write_val(dest, val)?)
}
/// read a cstring from user, the length of cstring should not exceed max_len(include null byte)
pub fn read_cstring_from_user(addr: Vaddr, max_len: usize) -> Result<CString> {
let mut buffer = vec![0u8; max_len];
read_bytes_from_user(addr, &mut buffer)?;
Ok(CString::from(CStr::from_bytes_until_nul(&buffer)?))
}

View File

@ -14,7 +14,7 @@
//! In Jinux, VMARs and VMOs, as well as other capabilities, are implemented
//! as zero-cost capabilities.
mod page_fault_handler;
mod perms;
mod vmar;
mod vmo;
pub mod page_fault_handler;
pub mod perms;
pub mod vmar;
pub mod vmo;

View File

@ -1,12 +1,13 @@
use jinux_frame::vm::Vaddr;
use jinux_frame::Result;
use crate::prelude::*;
/// This trait is implemented by structs which can handle a user space page fault.
/// In current implementation, they are vmars and vmos.
pub trait PageFaultHandler {
/// Handle a page fault at a specific addr. if write is true, means the page fault is caused by a write access,
/// Handle a page fault at a specific addr. if not_present is true, the page fault is caused by page not present.
/// Otherwise, it's caused by page protection error.
/// if write is true, means the page fault is caused by a write access,
/// otherwise, the page fault is caused by a read access.
/// If the page fault can be handled successfully, this function will return Ok(()).
/// Otherwise, this function will return Err.
fn handle_page_fault(&self, page_fault_addr: Vaddr, write: bool) -> Result<()>;
fn handle_page_fault(&self, offset: Vaddr, not_present: bool, write: bool) -> Result<()>;
}

View File

@ -1,16 +1,16 @@
use alloc::sync::Arc;
use core::ops::Range;
use jinux_frame::{
vm::{Vaddr, VmIo},
Error, Result,
};
use jinux_frame::vm::{Vaddr, VmIo};
use crate::prelude::*;
use crate::{
rights::Rights,
vm::{page_fault_handler::PageFaultHandler, vmo::Vmo},
};
use super::{options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, Vmar_};
use super::{
options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_,
};
impl Vmar<Rights> {
/// Creates a root VMAR.
@ -107,6 +107,12 @@ impl Vmar<Rights> {
self.0.protect(perms, range)
}
/// clear all mappings and children vmars.
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) -> Result<()> {
self.0.clear_root_vmar()
}
/// Destroy a VMAR, including all its mappings and children VMARs.
///
/// After being destroyed, the VMAR becomes useless and returns errors
@ -138,40 +144,41 @@ impl Vmar<Rights> {
self.check_rights(Rights::DUP)?;
Ok(Vmar(self.0.clone(), self.1.clone()))
}
/// Returns the access rights.
pub fn rights(&self) -> Rights {
self.1
}
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.1.contains(rights) {
Ok(())
} else {
Err(Error::AccessDenied)
}
}
}
impl VmIo for Vmar<Rights> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read(offset, buf)
self.0.read(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write(offset, buf)
self.0.write(offset, buf)?;
Ok(())
}
}
impl PageFaultHandler for Vmar<Rights> {
fn handle_page_fault(&self, page_fault_addr: Vaddr, write: bool) -> Result<()> {
fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
if write {
self.check_rights(Rights::WRITE)?;
} else {
self.check_rights(Rights::READ)?;
}
self.0.handle_page_fault(page_fault_addr, write)
self.0
.handle_page_fault(page_fault_addr, not_present, write)
}
}
impl VmarRightsOp for Vmar<Rights> {
fn rights(&self) -> Rights {
self.1
}
}

View File

@ -5,6 +5,8 @@ mod options;
mod static_cap;
pub mod vm_mapping;
use crate::prelude::*;
use crate::rights::Full;
use crate::rights::Rights;
use crate::vm::perms::VmPerms;
use alloc::collections::BTreeMap;
@ -12,16 +14,13 @@ use alloc::sync::Arc;
use alloc::sync::Weak;
use alloc::vec::Vec;
use core::ops::Range;
use jinux_frame::config::PAGE_SIZE;
use jinux_frame::vm::Vaddr;
use jinux_frame::vm::VmSpace;
use jinux_frame::AlignExt;
use jinux_frame::{Error, Result};
use spin::Mutex;
use self::vm_mapping::VmMapping;
use super::page_fault_handler::PageFaultHandler;
use super::vmo::Vmo;
/// Virtual Memory Address Regions (VMARs) are a type of capability that manages
/// user address spaces.
@ -52,13 +51,45 @@ use super::page_fault_handler::PageFaultHandler;
///
pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
pub trait VmarRightsOp {
/// Returns the access rights.
fn rights(&self) -> Rights;
fn check_rights(&self, rights: Rights) -> Result<()>;
}
impl<R> VmarRightsOp for Vmar<R> {
default fn rights(&self) -> Rights {
unimplemented!()
}
default fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
return_errno_with_message!(Errno::EACCES, "Rights check failed");
}
}
}
// TODO: how page faults can be delivered to and handled by the current VMAR.
impl<R> PageFaultHandler for Vmar<R> {
default fn handle_page_fault(&self, page_fault_addr: Vaddr, write: bool) -> Result<()> {
default fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
unimplemented!()
}
}
impl<R> Vmar<R> {
/// FIXME: This function should require access control
pub fn vm_space(&self) -> &VmSpace {
self.0.vm_space()
}
}
pub(super) struct Vmar_ {
/// vmar inner
inner: Mutex<VmarInner>,
@ -67,19 +98,18 @@ pub(super) struct Vmar_ {
/// The total size of the VMAR in bytes
size: usize,
/// The attached vmspace
vm_space: Arc<VmSpace>,
vm_space: VmSpace,
/// The parent vmar. If points to none, this is a root vmar
parent: Weak<Vmar_>,
}
/// FIXME: How can a vmar have its child vmar and vmos with its rights?
struct VmarInner {
/// Whether the vmar is destroyed
is_destroyed: bool,
/// The child vmars. The key is offset relative to root VMAR
child_vmar_s: BTreeMap<Vaddr, Arc<Vmar_>>,
/// The mapped vmos. The key is offset relative to root VMAR
mapped_vmos: BTreeMap<Vaddr, Arc<VmMapping>>,
vm_mappings: BTreeMap<Vaddr, Arc<VmMapping>>,
/// Free regions that can be used for creating child vmar or mapping vmos
free_regions: BTreeMap<Vaddr, FreeRegion>,
}
@ -98,12 +128,12 @@ impl Vmar_ {
let vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
mapped_vmos: BTreeMap::new(),
vm_mappings: BTreeMap::new(),
free_regions,
};
let vmar_ = Vmar_ {
inner: Mutex::new(vmar_inner),
vm_space: Arc::new(VmSpace::new()),
vm_space: VmSpace::new(),
base: 0,
size: ROOT_VMAR_HIGHEST_ADDR,
parent: Weak::new(),
@ -111,6 +141,14 @@ impl Vmar_ {
Ok(vmar_)
}
fn is_root_vmar(&self) -> bool {
if let Some(_) = self.parent.upgrade() {
false
} else {
true
}
}
pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
@ -121,12 +159,11 @@ impl Vmar_ {
// do real protect. The protected range is ensured to be mapped.
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
for (vmo_base, vm_mapping) in &self.inner.lock().mapped_vmos {
let vmo_range = *vmo_base..(*vmo_base + vm_mapping.size());
if is_intersected(&range, &vmo_range) {
let intersected_range = get_intersected_range(&range, &vmo_range);
// TODO: How to protect a mapped vmo?
todo!()
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
let vm_mapping_range = *vm_mapping_base..(*vm_mapping_base + vm_mapping.size());
if is_intersected(&range, &vm_mapping_range) {
let intersected_range = get_intersected_range(&range, &vm_mapping_range);
vm_mapping.protect(perms, intersected_range)?;
}
}
@ -152,7 +189,7 @@ impl Vmar_ {
// The protected range should not interstect with any free region
for (_, free_region) in &self.inner.lock().free_regions {
if is_intersected(&free_region.range, &protected_range) {
return Err(Error::InvalidArgs);
return_errno_with_message!(Errno::EACCES, "protected range is not fully mapped");
}
}
@ -169,9 +206,14 @@ impl Vmar_ {
}
/// Handle user space page fault, if the page fault is successfully handled ,return Ok(()).
pub fn handle_page_fault(&self, page_fault_addr: Vaddr, write: bool) -> Result<()> {
pub fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
if page_fault_addr < self.base || page_fault_addr >= self.base + self.size {
return Err(Error::AccessDenied);
return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar");
}
let inner = self.inner.lock();
@ -179,28 +221,144 @@ impl Vmar_ {
if *child_vmar_base <= page_fault_addr
&& page_fault_addr < *child_vmar_base + child_vmar.size
{
return child_vmar.handle_page_fault(page_fault_addr, write);
return child_vmar.handle_page_fault(page_fault_addr, not_present, write);
}
}
// FIXME: If multiple vmos are mapped to the addr, should we allow all vmos to handle page fault?
for (vm_mapping_base, vm_mapping) in &inner.mapped_vmos {
for (vm_mapping_base, vm_mapping) in &inner.vm_mappings {
if *vm_mapping_base <= page_fault_addr
&& page_fault_addr <= *vm_mapping_base + vm_mapping.size()
{
return vm_mapping.handle_page_fault(page_fault_addr, write);
return vm_mapping.handle_page_fault(page_fault_addr, not_present, write);
}
}
return Err(Error::AccessDenied);
return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar");
}
/// clear all content of the root vmar
pub fn clear_root_vmar(&self) -> Result<()> {
debug_assert!(self.is_root_vmar());
if !self.is_root_vmar() {
return_errno_with_message!(Errno::EACCES, "The vmar is not root vmar");
}
self.vm_space.clear();
let mut inner = self.inner.lock();
inner.child_vmar_s.clear();
inner.vm_mappings.clear();
inner.free_regions.clear();
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_HIGHEST_ADDR);
inner.free_regions.insert(root_region.start(), root_region);
Ok(())
}
pub fn destroy_all(&self) -> Result<()> {
todo!()
let mut inner = self.inner.lock();
inner.is_destroyed = true;
let mut free_regions = BTreeMap::new();
for (child_vmar_base, child_vmar) in &inner.child_vmar_s {
child_vmar.destroy_all()?;
let free_region = FreeRegion::new(child_vmar.range());
free_regions.insert(free_region.start(), free_region);
}
inner.child_vmar_s.clear();
inner.free_regions.append(&mut free_regions);
for (_, vm_mapping) in &inner.vm_mappings {
vm_mapping.unmap(vm_mapping.range(), true)?;
let free_region = FreeRegion::new(vm_mapping.range());
free_regions.insert(free_region.start(), free_region);
}
inner.vm_mappings.clear();
inner.free_regions.append(&mut free_regions);
drop(inner);
self.merge_continuous_regions();
self.vm_space.clear();
Ok(())
}
pub fn destroy(&self, range: Range<usize>) -> Result<()> {
todo!()
self.check_destroy_range(&range)?;
let mut inner = self.inner.lock();
let mut free_regions = BTreeMap::new();
for (child_vmar_base, child_vmar) in &inner.child_vmar_s {
let child_vmar_range = child_vmar.range();
if is_intersected(&range, &child_vmar_range) {
child_vmar.destroy_all()?;
}
let free_region = FreeRegion::new(child_vmar_range);
free_regions.insert(free_region.start(), free_region);
}
inner
.child_vmar_s
.retain(|_, child_vmar_| !child_vmar_.is_destroyed());
for (_, vm_mapping) in &inner.vm_mappings {
let vm_mapping_range = vm_mapping.range();
if is_intersected(&vm_mapping_range, &range) {
let intersected_range = get_intersected_range(&vm_mapping_range, &range);
vm_mapping.unmap(intersected_range.clone(), true)?;
let free_region = FreeRegion::new(intersected_range);
free_regions.insert(free_region.start(), free_region);
}
}
inner
.vm_mappings
.retain(|_, vm_mapping| !vm_mapping.is_destroyed());
inner.free_regions.append(&mut free_regions);
self.merge_continuous_regions();
Ok(())
}
fn check_destroy_range(&self, range: &Range<usize>) -> Result<()> {
debug_assert!(range.start % PAGE_SIZE == 0);
debug_assert!(range.end % PAGE_SIZE == 0);
for (child_vmar_base, child_vmar) in &self.inner.lock().child_vmar_s {
let child_vmar_start = *child_vmar_base;
let child_vmar_end = child_vmar_start + child_vmar.size;
if child_vmar_end <= range.start || child_vmar_start >= range.end {
// child vmar does not intersect with range
continue;
}
if range.start <= child_vmar_start && child_vmar_end <= range.end {
// child vmar is totolly in the range
continue;
}
assert!(is_intersected(range, &(child_vmar_start..child_vmar_end)));
return_errno_with_message!(
Errno::EACCES,
"Child vmar is partly intersected with destryed range"
);
}
Ok(())
}
fn is_destroyed(&self) -> bool {
self.inner.lock().is_destroyed
}
fn merge_continuous_regions(&self) {
let mut new_free_regions = BTreeMap::new();
let mut inner = self.inner.lock();
let keys = inner.free_regions.keys().cloned().collect::<Vec<_>>();
for key in keys {
if let Some(mut free_region) = inner.free_regions.remove(&key) {
let mut region_end = free_region.end();
while let Some(another_region) = inner.free_regions.remove(&region_end) {
free_region.merge_other_region(&another_region);
region_end = another_region.end();
}
new_free_regions.insert(free_region.start(), free_region);
}
}
inner.free_regions.clear();
inner.free_regions.append(&mut new_free_regions);
}
pub fn read(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
@ -215,7 +373,7 @@ impl Vmar_ {
}
}
// if the read range is in mapped vmo
for (vm_mapping_base, vm_mapping) in &self.inner.lock().mapped_vmos {
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
let vm_mapping_end = *vm_mapping_base + vm_mapping.size();
if *vm_mapping_base <= read_start && read_end <= vm_mapping_end {
let vm_mapping_offset = read_start - *vm_mapping_base;
@ -224,7 +382,7 @@ impl Vmar_ {
}
// FIXME: If the read range is across different vmos or child vmars, should we directly return error?
Err(Error::AccessDenied)
return_errno_with_message!(Errno::EACCES, "read range is not backed up by a vmo");
}
pub fn write(&self, offset: usize, buf: &[u8]) -> Result<()> {
@ -239,7 +397,7 @@ impl Vmar_ {
}
}
// if the write range is in mapped vmo
for (vm_mapping_base, vm_mapping) in &self.inner.lock().mapped_vmos {
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
let vm_mapping_end = *vm_mapping_base + vm_mapping.size();
if *vm_mapping_base <= write_start && write_end <= vm_mapping_end {
let vm_mapping_offset = write_start - *vm_mapping_base;
@ -248,7 +406,7 @@ impl Vmar_ {
}
// FIXME: If the write range is across different vmos or child vmars, should we directly return error?
Err(Error::AccessDenied)
return_errno_with_message!(Errno::EACCES, "write range is not backed up by a vmo");
}
/// allocate a child vmar_.
@ -258,9 +416,8 @@ impl Vmar_ {
child_vmar_size: usize,
align: usize,
) -> Result<Arc<Vmar_>> {
match self.find_free_region_for_child(child_vmar_offset, child_vmar_size, align) {
None => return Err(Error::InvalidArgs),
Some((region_base, child_vmar_offset)) => {
let (region_base, child_vmar_offset) =
self.find_free_region_for_child(child_vmar_offset, child_vmar_size, align)?;
// This unwrap should never fails
let free_region = self.inner.lock().free_regions.remove(&region_base).unwrap();
let child_range = child_vmar_offset..(child_vmar_offset + child_vmar_size);
@ -277,7 +434,7 @@ impl Vmar_ {
let child_vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
mapped_vmos: BTreeMap::new(),
vm_mappings: BTreeMap::new(),
free_regions: child_regions,
};
let child_vmar_ = Arc::new(Vmar_ {
@ -293,8 +450,6 @@ impl Vmar_ {
.insert(child_vmar_.base, child_vmar_.clone());
Ok(child_vmar_)
}
}
}
/// find a free region for child vmar or vmo.
/// returns (region base addr, child real offset)
@ -303,14 +458,14 @@ impl Vmar_ {
child_offset: Option<Vaddr>,
child_size: usize,
align: usize,
) -> Option<(Vaddr, Vaddr)> {
) -> Result<(Vaddr, Vaddr)> {
for (region_base, free_region) in &self.inner.lock().free_regions {
if let Some(child_vmar_offset) = child_offset {
// if the offset is set, we should find a free region can satisfy both the offset and size
if *region_base <= child_vmar_offset
&& (child_vmar_offset + child_size) <= (free_region.end())
{
return Some((*region_base, child_vmar_offset));
return Ok((*region_base, child_vmar_offset));
}
} else {
// else, we find a free region that can satisfy the length and align requirement.
@ -321,11 +476,11 @@ impl Vmar_ {
let child_vmar_real_start = region_start.align_up(align);
let child_vmar_real_end = child_vmar_real_start + child_size;
if region_start <= child_vmar_real_start && child_vmar_real_end <= region_end {
return Some((*region_base, child_vmar_real_start));
return Ok((*region_base, child_vmar_real_start));
}
}
}
None
return_errno_with_message!(Errno::EACCES, "Cannot find free region for child")
}
fn range(&self) -> Range<usize> {
@ -337,15 +492,21 @@ impl Vmar_ {
for (_, child_vmar) in &inner.child_vmar_s {
let child_vmar_range = child_vmar.range();
if is_intersected(&vmo_range, &child_vmar_range) {
return Err(Error::InvalidArgs);
return_errno_with_message!(
Errno::EACCES,
"vmo range overlapped with child vmar range"
);
}
}
if !can_overwrite {
for (child_vmo_base, child_vmo) in &inner.mapped_vmos {
for (child_vmo_base, child_vmo) in &inner.vm_mappings {
let child_vmo_range = *child_vmo_base..*child_vmo_base + child_vmo.size();
if is_intersected(&vmo_range, &child_vmo_range) {
return Err(Error::InvalidArgs);
return_errno_with_message!(
Errno::EACCES,
"vmo range overlapped with another vmo"
);
}
}
}
@ -362,7 +523,7 @@ impl Vmar_ {
pub fn add_mapping(&self, mapping: Arc<VmMapping>) {
self.inner
.lock()
.mapped_vmos
.vm_mappings
.insert(mapping.map_to_addr(), mapping);
}
@ -375,8 +536,9 @@ impl Vmar_ {
can_overwrite: bool,
) -> Result<Vaddr> {
let allocate_size = size.max(vmo_size);
let mut inner = self.inner.lock();
if can_overwrite {
let mut inner = self.inner.lock();
// if can_overwrite, the offset is ensured not to be None
let offset = offset.unwrap();
let vmo_range = offset..(offset + allocate_size);
@ -399,9 +561,9 @@ impl Vmar_ {
return Ok(offset);
} else {
// Otherwise, the vmo in a single region
match self.find_free_region_for_child(offset, allocate_size, align) {
None => return Err(Error::InvalidArgs),
Some((free_region_base, offset)) => {
let (free_region_base, offset) =
self.find_free_region_for_child(offset, allocate_size, align)?;
let mut inner = self.inner.lock();
let free_region = inner.free_regions.remove(&free_region_base).unwrap();
let vmo_range = offset..(offset + allocate_size);
let intersected_range = get_intersected_range(free_region.range(), &vmo_range);
@ -412,7 +574,78 @@ impl Vmar_ {
return Ok(offset);
}
}
/// fork vmar for child process
pub fn fork_vmar_(&self, parent: Weak<Vmar_>) -> Result<Arc<Self>> {
// create an empty vmar at first
let is_destroyed = false;
let child_vmar_s = BTreeMap::new();
let mapped_vmos = BTreeMap::new();
let free_regions = BTreeMap::new();
let vmar_inner = VmarInner {
is_destroyed,
child_vmar_s,
vm_mappings: mapped_vmos,
free_regions,
};
// If this is a root vmar, we create a new vmspace
// Otherwise, we clone the vm space from parent.
let vm_space = if let Some(parent) = parent.upgrade() {
parent.vm_space().clone()
} else {
VmSpace::new()
};
let vmar_ = Vmar_ {
inner: Mutex::new(vmar_inner),
base: self.base,
size: self.size,
vm_space,
parent,
};
let new_vmar_ = Arc::new(vmar_);
let inner = self.inner.lock();
// clone free regions
for (free_region_base, free_region) in &inner.free_regions {
new_vmar_
.inner
.lock()
.free_regions
.insert(*free_region_base, free_region.clone());
}
// clone child vmars
for (child_vmar_base, child_vmar_) in &inner.child_vmar_s {
let parent_of_forked_child = Arc::downgrade(&new_vmar_);
let forked_child_vmar = child_vmar_.fork_vmar_(parent_of_forked_child)?;
new_vmar_
.inner
.lock()
.child_vmar_s
.insert(*child_vmar_base, forked_child_vmar);
}
// clone vm mappings
for (vm_mapping_base, vm_mapping) in &inner.vm_mappings {
let parent_of_forked_mapping = Arc::downgrade(&new_vmar_);
let forked_mapping = Arc::new(vm_mapping.fork_mapping(parent_of_forked_mapping)?);
new_vmar_
.inner
.lock()
.vm_mappings
.insert(*vm_mapping_base, forked_mapping);
}
Ok(new_vmar_)
}
/// get mapped vmo at given offset
pub fn get_mapped_vmo(&self, offset: Vaddr) -> Result<Vmo<Rights>> {
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
if *vm_mapping_base <= offset && offset < *vm_mapping_base + vm_mapping.size() {
return Ok(vm_mapping.vmo().dup()?);
}
}
return_errno_with_message!(Errno::EACCES, "No mapped vmo at this offset");
}
}
@ -428,8 +661,24 @@ impl<R> Vmar<R> {
pub fn size(&self) -> usize {
self.0.size
}
/// Fork a vmar for child process
pub fn fork_vmar(&self) -> Result<Vmar<Full>> {
let rights = Rights::all();
self.check_rights(rights)?;
let vmar_ = self.0.fork_vmar_(Weak::new())?;
Ok(Vmar(vmar_, Full::new()))
}
/// get a mapped vmo
pub fn get_mapped_vmo(&self, offset: Vaddr) -> Result<Vmo<Rights>> {
let rights = Rights::all();
self.check_rights(rights)?;
self.0.get_mapped_vmo(offset)
}
}
#[derive(Debug, Clone)]
pub struct FreeRegion {
range: Range<Vaddr>,
}
@ -471,16 +720,22 @@ impl FreeRegion {
}
res
}
pub fn merge_other_region(&mut self, other_region: &FreeRegion) {
assert!(self.range.end == other_region.range.start);
assert!(self.range.start < other_region.range.end);
self.range = self.range.start..other_region.range.end
}
}
/// determine whether two ranges are intersected.
fn is_intersected(range1: &Range<usize>, range2: &Range<usize>) -> bool {
pub fn is_intersected(range1: &Range<usize>, range2: &Range<usize>) -> bool {
range1.start.max(range2.start) < range1.end.min(range2.end)
}
/// get the intersection range of two ranges.
/// The two ranges should be ensured to be intersected.
fn get_intersected_range(range1: &Range<usize>, range2: &Range<usize>) -> Range<usize> {
pub fn get_intersected_range(range1: &Range<usize>, range2: &Range<usize>) -> Range<usize> {
debug_assert!(is_intersected(range1, range2));
range1.start.max(range2.start)..range1.end.min(range2.end)
}

View File

@ -1,10 +1,7 @@
use core::ops::Range;
use alloc::sync::Arc;
use jinux_frame::{
vm::{Vaddr, VmIo},
Error, Result,
};
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use jinux_rights_proc::require;
use crate::{
@ -12,7 +9,9 @@ use crate::{
vm::{page_fault_handler::PageFaultHandler, vmo::Vmo},
};
use super::{options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, Vmar_};
use super::{
options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_,
};
impl<R: TRights> Vmar<R> {
/// Creates a root VMAR.
@ -111,6 +110,12 @@ impl<R: TRights> Vmar<R> {
self.0.protect(perms, range)
}
/// clear all mappings and children vmars.
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) -> Result<()> {
self.0.clear_root_vmar()
}
/// Destroy a VMAR, including all its mappings and children VMARs.
///
/// After being destroyed, the VMAR becomes useless and returns errors
@ -149,39 +154,48 @@ impl<R: TRights> Vmar<R> {
Vmar(self.0, R1::new())
}
/// Returns the access rights.
pub const fn rights(&self) -> Rights {
Rights::from_bits(R::BITS).unwrap()
}
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
Err(Error::AccessDenied)
return_errno_with_message!(Errno::EACCES, "check rights failed");
}
}
}
impl<R: TRights> VmIo for Vmar<R> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read(offset, buf)
self.0.read(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write(offset, buf)
self.0.write(offset, buf)?;
Ok(())
}
}
impl<R: TRights> PageFaultHandler for Vmar<R> {
fn handle_page_fault(&self, page_fault_addr: Vaddr, write: bool) -> Result<()> {
fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
if write {
self.check_rights(Rights::WRITE)?;
} else {
self.check_rights(Rights::READ)?;
}
self.0.handle_page_fault(page_fault_addr, write)
self.0
.handle_page_fault(page_fault_addr, not_present, write)
}
}
impl<R: TRights> VmarRightsOp for Vmar<R> {
fn rights(&self) -> Rights {
Rights::from_bits(R::BITS).unwrap()
}
}

View File

@ -1,16 +1,13 @@
use alloc::{
collections::{BTreeMap, BTreeSet},
sync::{Arc, Weak},
};
use jinux_frame::{
config::PAGE_SIZE,
vm::{Vaddr, VmFrameVec, VmIo, VmPerm},
Error,
};
use jinux_frame::{vm::VmMapOptions, Result};
use crate::prelude::*;
use core::ops::Range;
use jinux_frame::vm::VmMapOptions;
use jinux_frame::vm::{VmFrameVec, VmIo, VmPerm};
use spin::Mutex;
use crate::vm::{page_fault_handler::PageFaultHandler, vmo::Vmo};
use crate::vm::{
vmo::get_page_idx_range,
vmo::{Vmo, VmoChildOptions},
};
use super::{Vmar, Vmar_};
use crate::vm::perms::VmPerms;
@ -22,23 +19,29 @@ use crate::vm::vmo::VmoRightsOp;
/// A vmo can also contain multiple VmMappings, which means a vmo can be mapped to multiple vmars.
/// The reltionship between Vmar and Vmo is M:N.
pub struct VmMapping {
inner: Mutex<VmMappingInner>,
/// The parent vmar. The parent should always point to a valid vmar.
parent: Weak<Vmar_>,
/// The mapped vmo. The mapped vmo is with dynamic capability.
vmo: Vmo<Rights>,
/// The mao offset of the vmo, in bytes.
/// The map offset of the vmo, in bytes.
vmo_offset: usize,
/// The size of mapping, in bytes. The map size can even be larger than the size of backup vmo.
/// Those pages outside vmo range cannot be read or write.
map_size: usize,
/// The base address relative to the root vmar where the vmo is mapped.
map_to_addr: Vaddr,
}
struct VmMappingInner {
/// is destroyed
is_destroyed: bool,
/// The pages already mapped. The key is the page index in vmo.
mapped_pages: Mutex<BTreeSet<usize>>,
/// The map option of each **unmapped** page. The key is the page index in vmo.
mapped_pages: BTreeSet<usize>,
/// The permission of each page. The key is the page index in vmo.
/// This map can be filled when mapping a vmo to vmar and can be modified when call mprotect.
/// We keep the options in case the page is not committed and will further need these options.
page_map_options: Mutex<BTreeMap<usize, VmMapOptions>>,
/// We keep the options in case the page is not committed(or create copy on write mappings) and will further need these options.
page_perms: BTreeMap<usize, VmPerm>,
}
impl VmMapping {
@ -63,84 +66,208 @@ impl VmMapping {
align,
can_overwrite,
)?;
let mut page_perms = BTreeMap::new();
let real_map_size = size.min(vmo_size);
let start_page_idx = vmo_offset / PAGE_SIZE;
let end_page_idx = (vmo_offset + real_map_size) / PAGE_SIZE;
let vm_space = parent_vmar.vm_space();
let mut page_map_options = BTreeMap::new();
let mut mapped_pages = BTreeSet::new();
let perm = VmPerm::from(perms);
for page_idx in start_page_idx..end_page_idx {
let page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + size));
for page_idx in page_idx_range {
page_perms.insert(page_idx, perm);
}
let vm_space = parent_vmar.vm_space();
let mut mapped_pages = BTreeSet::new();
let mapped_page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + real_map_size));
let start_page_idx = mapped_page_idx_range.start;
for page_idx in mapped_page_idx_range {
let mut vm_map_options = VmMapOptions::new();
let page_map_addr = map_to_addr + (page_idx - start_page_idx) * PAGE_SIZE;
vm_map_options.addr(Some(page_map_addr));
vm_map_options.perm(perm);
vm_map_options.perm(perm.clone());
vm_map_options.can_overwrite(can_overwrite);
vm_map_options.align(align);
if vmo.page_commited(page_idx) {
vmo.map_page(page_idx, &vm_space, vm_map_options)?;
if let Ok(frames) = vmo.get_backup_frame(page_idx, false, false) {
vm_space.map(frames, &vm_map_options)?;
mapped_pages.insert(page_idx);
} else {
// The page is not committed. We simple record the map options for further mapping.
page_map_options.insert(page_idx, vm_map_options);
}
}
let vm_mapping_inner = VmMappingInner {
is_destroyed: false,
mapped_pages,
page_perms,
};
Ok(Self {
inner: Mutex::new(vm_mapping_inner),
parent: Arc::downgrade(&parent_vmar),
vmo,
vmo_offset,
map_size: size,
map_to_addr,
mapped_pages: Mutex::new(mapped_pages),
page_map_options: Mutex::new(page_map_options),
})
}
/// Add a new committed page and map it to vmspace
pub fn map_one_page(&self, page_idx: usize, frames: VmFrameVec) -> Result<()> {
pub(super) fn vmo(&self) -> &Vmo<Rights> {
&self.vmo
}
/// Add a new committed page and map it to vmspace. If copy on write is set, it's allowed to unmap the page at the same address.
/// FIXME: This implementation based on the truth that we map one page at a time. If multiple pages are mapped together, this implementation may have problems
pub(super) fn map_one_page(&self, page_idx: usize, frames: VmFrameVec) -> Result<()> {
let parent = self.parent.upgrade().unwrap();
let vm_space = parent.vm_space();
let map_addr = page_idx * PAGE_SIZE + self.map_to_addr;
let page_map_options_lock = self.page_map_options.lock();
let map_options = page_map_options_lock.get(&page_idx).unwrap();
vm_space.map(frames, &map_options)?;
self.mapped_pages.lock().insert(page_idx);
let vm_perm = self.inner.lock().page_perms.get(&page_idx).unwrap().clone();
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(map_addr));
vm_map_options.perm(vm_perm.clone());
// copy on write allows unmap the mapped page
if self.vmo.is_cow_child() && vm_space.is_mapped(map_addr) {
vm_space.unmap(&(map_addr..(map_addr + PAGE_SIZE))).unwrap();
}
vm_space.map(frames, &vm_map_options)?;
self.inner.lock().mapped_pages.insert(page_idx);
Ok(())
}
pub fn unmap_one_page(&self, page_idx: usize) -> Result<()> {
/// unmap a page
pub(super) fn unmap_one_page(&self, page_idx: usize) -> Result<()> {
let parent = self.parent.upgrade().unwrap();
let vm_space = parent.vm_space();
let map_addr = page_idx * PAGE_SIZE + self.map_to_addr;
let range = map_addr..(map_addr + PAGE_SIZE);
if vm_space.is_mapped(map_addr) {
vm_space.unmap(&range)?;
self.mapped_pages.lock().remove(&page_idx);
}
self.inner.lock().mapped_pages.remove(&page_idx);
Ok(())
}
pub fn map_to_addr(&self) -> Vaddr {
/// the mapping's start address
pub(super) fn map_to_addr(&self) -> Vaddr {
self.map_to_addr
}
pub fn size(&self) -> usize {
pub(super) fn size(&self) -> usize {
self.map_size
}
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
let vmo_read_offset = self.vmo_offset + offset;
self.vmo.read_bytes(vmo_read_offset, buf)
self.vmo.read_bytes(vmo_read_offset, buf)?;
Ok(())
}
pub fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
let vmo_write_offset = self.vmo_offset + offset;
self.vmo.write_bytes(vmo_write_offset, buf)
self.vmo.write_bytes(vmo_write_offset, buf)?;
Ok(())
}
pub fn handle_page_fault(&self, page_fault_addr: Vaddr, write: bool) -> Result<()> {
/// Unmap pages in the range
pub fn unmap(&self, range: Range<usize>, destroy: bool) -> Result<()> {
let vmo_map_range = (range.start - self.map_to_addr)..(range.end - self.map_to_addr);
let page_idx_range = get_page_idx_range(&vmo_map_range);
for page_idx in page_idx_range {
self.unmap_one_page(page_idx)?;
}
if destroy && range == self.range() {
self.inner.lock().is_destroyed = false;
}
Ok(())
}
pub fn is_destroyed(&self) -> bool {
self.inner.lock().is_destroyed
}
pub fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
let vmo_offset = self.vmo_offset + page_fault_addr - self.map_to_addr;
self.vmo.handle_page_fault(vmo_offset, write)
if vmo_offset >= self.vmo.size() {
return_errno_with_message!(Errno::EACCES, "page fault addr is not backed up by a vmo");
}
if write {
self.vmo.check_rights(Rights::WRITE)?;
} else {
self.vmo.check_rights(Rights::READ)?;
}
// get the backup frame for page
let page_idx = vmo_offset / PAGE_SIZE;
let frames = self.vmo.get_backup_frame(page_idx, write, true)?;
// map the page
self.map_one_page(page_idx, frames)
}
pub(super) fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
let rights = Rights::from(perms);
self.vmo().check_rights(rights)?;
// FIXME: should we commit and map these pages before protect vmspace?
let vmar = self.parent.upgrade().unwrap();
let vm_space = vmar.vm_space();
let perm = VmPerm::from(perms);
vm_space.protect(&range, perm)?;
Ok(())
}
pub(super) fn fork_mapping(&self, new_parent: Weak<Vmar_>) -> Result<VmMapping> {
let VmMapping {
inner,
parent,
vmo,
vmo_offset,
map_size,
map_to_addr,
} = self;
let parent_vmo = vmo.clone();
let vmo_size = parent_vmo.size();
let child_vmo = VmoChildOptions::new_cow(parent_vmo, 0..vmo_size).alloc()?;
let parent_vmar = new_parent.upgrade().unwrap();
let vm_space = parent_vmar.vm_space();
let real_map_size = self.size().min(child_vmo.size());
let vmo_offset = *vmo_offset;
let page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + real_map_size));
let start_page_idx = page_idx_range.start;
let mut mapped_pages = BTreeSet::new();
for page_idx in page_idx_range {
// When map pages from parent, we should forbid write access to these pages.
// So any write access to these pages will trigger a page fault. Then, we can allocate new pages for the page.
let mut vm_perm = inner.lock().page_perms.get(&page_idx).unwrap().clone();
vm_perm -= VmPerm::W;
let mut vm_map_options = VmMapOptions::new();
let map_addr = (page_idx - start_page_idx) * PAGE_SIZE + self.map_to_addr;
vm_map_options.addr(Some(map_addr));
vm_map_options.perm(vm_perm);
if let Ok(frames) = child_vmo.get_backup_frame(page_idx, false, false) {
vm_space.map(frames, &vm_map_options)?;
mapped_pages.insert(page_idx);
}
}
let is_destroyed = inner.lock().is_destroyed;
let page_perms = inner.lock().page_perms.clone();
let inner = VmMappingInner {
is_destroyed,
mapped_pages,
page_perms,
};
Ok(VmMapping {
inner: Mutex::new(inner),
parent: new_parent,
vmo: child_vmo,
vmo_offset,
map_size: *map_size,
map_to_addr: *map_to_addr,
})
}
pub fn range(&self) -> Range<usize> {
self.map_to_addr..self.map_to_addr + self.map_size
}
}
@ -256,7 +383,6 @@ impl<R1, R2> VmarMapOptions<R1, R2> {
let vmo_ = self.vmo.0.clone();
let vm_mapping = Arc::new(VmMapping::build_mapping(self)?);
let map_to_addr = vm_mapping.map_to_addr();
vmo_.add_mapping(Arc::downgrade(&vm_mapping));
parent_vmar.add_mapping(vm_mapping);
Ok(map_to_addr)
}
@ -267,16 +393,16 @@ impl<R1, R2> VmarMapOptions<R1, R2> {
debug_assert!(self.align % PAGE_SIZE == 0);
debug_assert!(self.align.is_power_of_two());
if self.align % PAGE_SIZE != 0 || !self.align.is_power_of_two() {
return Err(Error::InvalidArgs);
return_errno_with_message!(Errno::EINVAL, "invalid align");
}
debug_assert!(self.vmo_offset % self.align == 0);
if self.vmo_offset % self.align != 0 {
return Err(Error::InvalidArgs);
return_errno_with_message!(Errno::EINVAL, "invalid vmo offset");
}
if let Some(offset) = self.offset {
debug_assert!(offset % self.align == 0);
if offset % self.align != 0 {
return Err(Error::InvalidArgs);
return_errno_with_message!(Errno::EINVAL, "invalid offset");
}
}
self.check_perms()?;
@ -296,7 +422,10 @@ impl<R1, R2> VmarMapOptions<R1, R2> {
// if can_overwrite is set, the offset cannot be None
debug_assert!(self.offset != None);
if self.offset == None {
return Err(Error::InvalidArgs);
return_errno_with_message!(
Errno::EINVAL,
"offset can not be none when can overwrite is true"
);
}
}
if self.offset == None {

View File

@ -1,7 +1,8 @@
use core::ops::Range;
use jinux_frame::prelude::Result;
use jinux_frame::{vm::VmIo, Error};
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use crate::rights::{Rights, TRights};
@ -67,6 +68,12 @@ impl Vmo<Rights> {
Ok(VmoChildOptions::new_cow(dup_self, range))
}
/// commit a page at specific offset
pub fn commit_page(&self, offset: usize) -> Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset)
}
/// Commits the pages specified in the range (in bytes).
///
/// The range must be within the size of the VMO.
@ -137,20 +144,28 @@ impl Vmo<Rights> {
/// Converts to a static capability.
pub fn to_static<R1: TRights>(self) -> Result<Vmo<R1>> {
self.check_rights(Rights::from_bits(R1::BITS).ok_or(Error::InvalidArgs)?)?;
self.check_rights(Rights::from_bits(R1::BITS).ok_or(Error::new(Errno::EINVAL))?)?;
Ok(Vmo(self.0, R1::new()))
}
}
impl VmIo for Vmo<Rights> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
self.check_rights(Rights::READ)?;
self.0.read_bytes(offset, buf)
impl Clone for Vmo<Rights> {
fn clone(&self) -> Self {
Self(self.0.clone(), self.1.clone())
}
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
impl VmIo for Vmo<Rights> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read_bytes(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write_bytes(offset, buf)
self.0.write_bytes(offset, buf)?;
Ok(())
}
}

View File

@ -3,15 +3,10 @@
use core::ops::Range;
use crate::rights::Rights;
use alloc::vec;
use alloc::{collections::BTreeMap, sync::Arc, sync::Weak, vec::Vec};
use bitflags::bitflags;
use jinux_frame::{
config::PAGE_SIZE,
prelude::Result,
vm::{Paddr, Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmSpace},
Error,
};
use jinux_frame::vm::{Paddr, VmAllocOptions, VmFrameVec, VmIo};
use jinux_frame::AlignExt;
use crate::prelude::*;
mod dyn_cap;
mod options;
@ -22,9 +17,6 @@ pub use options::{VmoChildOptions, VmoOptions};
pub use pager::Pager;
use spin::Mutex;
use super::page_fault_handler::PageFaultHandler;
use super::vmar::vm_mapping::VmMapping;
/// Virtual Memory Objects (VMOs) are a type of capability that represents a
/// range of memory pages.
///
@ -92,7 +84,7 @@ pub trait VmoRightsOp {
if self.rights().contains(rights) {
Ok(())
} else {
Err(Error::AccessDenied)
return_errno_with_message!(Errno::EINVAL, "vmo rights check failed");
}
}
@ -117,17 +109,6 @@ impl<R> VmoRightsOp for Vmo<R> {
}
}
impl<R> PageFaultHandler for Vmo<R> {
default fn handle_page_fault(&self, page_fault_addr: Vaddr, write: bool) -> Result<()> {
if write {
self.check_rights(Rights::WRITE)?;
} else {
self.check_rights(Rights::READ)?;
}
self.0.handle_page_fault(page_fault_addr, write)
}
}
bitflags! {
/// VMO flags.
pub struct VmoFlags: u32 {
@ -145,6 +126,7 @@ bitflags! {
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VmoType {
/// This vmo_ is created as a copy on write child
CopyOnWriteChild,
@ -177,8 +159,6 @@ struct VmoInner {
/// The pages from the parent that current vmo can access. The pages can only be inherited when create childs vmo.
/// We store the page index range
inherited_pages: InheritedPages,
/// The current mapping on this vmo. The vmo can be mapped to multiple vmars.
mappings: Vec<Weak<VmMapping>>,
}
/// Pages inherited from parent
@ -221,7 +201,6 @@ impl InheritedPages {
impl Vmo_ {
pub fn commit_page(&self, offset: usize) -> Result<()> {
// assert!(offset % PAGE_SIZE == 0);
let page_idx = offset / PAGE_SIZE;
let mut inner = self.inner.lock();
@ -238,19 +217,12 @@ impl Vmo_ {
VmFrameVec::from_one_frame(frame)
}
};
// Update Mapping
for vm_mapping in &inner.mappings {
if let Some(vm_mapping) = vm_mapping.upgrade() {
vm_mapping.map_one_page(page_idx, frames.clone())?;
}
}
inner.committed_pages.insert(page_idx, frames);
}
Ok(())
}
pub fn decommit_page(&self, offset: usize) -> Result<()> {
// assert!(offset % PAGE_SIZE == 0);
let page_idx = offset / PAGE_SIZE;
let mut inner = self.inner.lock();
if inner.committed_pages.contains_key(&page_idx) {
@ -258,22 +230,13 @@ impl Vmo_ {
if let Some(pager) = &inner.pager {
pager.decommit_page(offset)?;
}
// Update mappings
for vm_mapping in &inner.mappings {
if let Some(vm_mapping) = vm_mapping.upgrade() {
vm_mapping.unmap_one_page(page_idx)?;
}
}
}
Ok(())
}
pub fn commit(&self, range: Range<usize>) -> Result<()> {
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
let start_page_idx = range.start / PAGE_SIZE;
let end_page_idx = (range.end - 1) / PAGE_SIZE;
for page_idx in start_page_idx..=end_page_idx {
let page_idx_range = get_page_idx_range(&range);
for page_idx in page_idx_range {
let offset = page_idx * PAGE_SIZE;
self.commit_page(offset)?;
}
@ -282,43 +245,60 @@ impl Vmo_ {
}
pub fn decommit(&self, range: Range<usize>) -> Result<()> {
// assert!(range.start % PAGE_SIZE == 0);
// assert!(range.end % PAGE_SIZE == 0);
let start_page_idx = range.start / PAGE_SIZE;
let end_page_idx = (range.end - 1) / PAGE_SIZE;
for page_idx in start_page_idx..=end_page_idx {
let page_idx_range = get_page_idx_range(&range);
for page_idx in page_idx_range {
let offset = page_idx * PAGE_SIZE;
self.decommit_page(offset)?;
}
Ok(())
}
/// Handle page fault.
pub fn handle_page_fault(&self, offset: usize, write: bool) -> Result<()> {
if offset >= self.size() {
return Err(Error::AccessDenied);
}
let page_idx = offset / PAGE_SIZE;
self.ensure_page_exists(page_idx, write)?;
Ok(())
}
/// determine whether a page is commited
pub fn page_commited(&self, page_idx: usize) -> bool {
self.inner.lock().committed_pages.contains_key(&page_idx)
}
/// Map a page to vm space. The page is ensured to be committed before call this function.
pub fn map_page(
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
let read_len = buf.len();
debug_assert!(offset + read_len <= self.size());
if offset + read_len > self.size() {
return_errno_with_message!(Errno::EINVAL, "read range exceeds vmo size");
}
let read_range = offset..(offset + read_len);
let frames = self.ensure_all_pages_exist(read_range, false)?;
let read_offset = offset % PAGE_SIZE;
Ok(frames.read_bytes(read_offset, buf)?)
}
/// Ensure all pages inside range are backed up vm frames, returns the frames.
fn ensure_all_pages_exist(&self, range: Range<usize>, write_page: bool) -> Result<VmFrameVec> {
let page_idx_range = get_page_idx_range(&range);
let mut frames = VmFrameVec::empty();
for page_idx in page_idx_range {
let mut page_frame = self.get_backup_frame(page_idx, write_page, true)?;
frames.append(&mut page_frame)?;
}
Ok(frames)
}
/// Get the backup frame for a page. If commit_if_none is set, we will commit a new page for the page
/// if the page does not have a backup frame.
fn get_backup_frame(
&self,
page_idx: usize,
vm_space: &VmSpace,
map_options: VmMapOptions,
) -> Result<Vaddr> {
debug_assert!(self.page_commited(page_idx));
if !self.page_commited(page_idx) {
return Err(Error::AccessDenied);
write_page: bool,
commit_if_none: bool,
) -> Result<VmFrameVec> {
// if the page is already commit, return the committed page.
if let Some(frames) = self.inner.lock().committed_pages.get(&page_idx) {
return Ok(frames.clone());
}
match self.vmo_type {
// if the vmo is not child, then commit new page
VmoType::NotChild => {
if commit_if_none {
self.commit_page(page_idx * PAGE_SIZE)?;
let frames = self
.inner
.lock()
@ -326,61 +306,24 @@ impl Vmo_ {
.get(&page_idx)
.unwrap()
.clone();
vm_space.map(frames, &map_options)
}
pub fn add_mapping(&self, mapping: Weak<VmMapping>) {
self.inner.lock().mappings.push(mapping);
}
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
let read_len = buf.len();
debug_assert!(offset + read_len <= self.size());
if offset + read_len > self.size() {
return Err(Error::InvalidArgs);
}
let read_range = offset..(offset + read_len);
let frames = self.ensure_all_pages_exist(read_range, false)?;
let read_offset = offset % PAGE_SIZE;
frames.read_bytes(read_offset, buf)
}
/// Ensure all pages inside range are backed up vm frames, returns the frames.
fn ensure_all_pages_exist(&self, range: Range<usize>, write_page: bool) -> Result<VmFrameVec> {
let start_page_idx = range.start / PAGE_SIZE;
let end_page_idx = (range.end - 1) / PAGE_SIZE; // The end addr is not included
let mut frames = VmFrameVec::empty();
for page_idx in start_page_idx..=end_page_idx {
let mut page_frame = self.ensure_page_exists(page_idx, write_page)?;
frames.append(&mut page_frame)?;
}
Ok(frames)
}
/// Ensure one page is backed up by a vmframe, then returns the vmframe.
fn ensure_page_exists(&self, page_idx: usize, write_page: bool) -> Result<VmFrameVec> {
let inner = self.inner.lock();
// if the page is already commit, return the committed page.
if inner.committed_pages.contains_key(&page_idx) {
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
return Ok(frames);
} else {
return_errno_with_message!(Errno::EINVAL, "backup frame does not exist");
}
match self.vmo_type {
// if the vmo is not child, then commit new page
VmoType::NotChild => {
self.commit_page(page_idx * PAGE_SIZE)?;
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
return Ok(frames);
}
// if the vmo is slice child, we will request the frame from parent
VmoType::SliceChild => {
let inner = self.inner.lock();
debug_assert!(inner.inherited_pages.contains_page(page_idx));
if !inner.inherited_pages.contains_page(page_idx) {
return Err(Error::AccessDenied);
return_errno_with_message!(
Errno::EINVAL,
"page does not inherited from parent"
);
}
let parent = self.parent.upgrade().unwrap();
let parent_page_idx = inner.inherited_pages.parent_page_idx(page_idx).unwrap();
return parent.ensure_page_exists(parent_page_idx, write_page);
return parent.get_backup_frame(parent_page_idx, write_page, commit_if_none);
}
// If the vmo is copy on write
VmoType::CopyOnWriteChild => {
@ -388,6 +331,7 @@ impl Vmo_ {
// write
// commit a new page
self.commit_page(page_idx * PAGE_SIZE)?;
let inner = self.inner.lock();
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
if let Some(parent_page_idx) = inner.inherited_pages.parent_page_idx(page_idx) {
// copy contents of parent to the frame
@ -401,14 +345,26 @@ impl Vmo_ {
return Ok(frames);
} else {
// read
if let Some(parent_page_idx) = inner.inherited_pages.parent_page_idx(page_idx) {
if let Some(parent_page_idx) =
self.inner.lock().inherited_pages.parent_page_idx(page_idx)
{
// If it's inherited from parent, we request the page from parent
let parent = self.parent.upgrade().unwrap();
return parent.ensure_page_exists(parent_page_idx, write_page);
return parent.get_backup_frame(
parent_page_idx,
write_page,
commit_if_none,
);
} else {
// Otherwise, we commit a new page
self.commit_page(page_idx * PAGE_SIZE)?;
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
let frames = self
.inner
.lock()
.committed_pages
.get(&page_idx)
.unwrap()
.clone();
// FIXME: should we zero the frames here?
frames.zero();
return Ok(frames);
@ -422,7 +378,7 @@ impl Vmo_ {
let write_len = buf.len();
debug_assert!(offset + write_len <= self.size());
if offset + write_len > self.size() {
return Err(Error::InvalidArgs);
return_errno_with_message!(Errno::EINVAL, "write range exceeds the vmo size");
}
let write_range = offset..(offset + write_len);
@ -442,7 +398,22 @@ impl Vmo_ {
}
pub fn resize(&self, new_size: usize) -> Result<()> {
todo!()
assert!(self.flags.contains(VmoFlags::RESIZABLE));
let new_size = new_size.align_up(PAGE_SIZE);
let old_size = self.size();
if new_size == old_size {
return Ok(());
}
if new_size < old_size {
self.decommit(new_size..old_size)?;
self.inner.lock().size = new_size;
} else {
self.commit(old_size..new_size)?;
self.inner.lock().size = new_size;
}
Ok(())
}
pub fn paddr(&self) -> Option<Paddr> {
@ -472,17 +443,32 @@ impl<R> Vmo<R> {
}
/// return whether a page is already committed
pub fn page_commited(&self, page_idx: usize) -> bool {
self.0.page_commited(page_idx)
pub fn has_backup_frame(&self, page_idx: usize) -> bool {
if let Ok(_) = self.0.get_backup_frame(page_idx, false, false) {
true
} else {
false
}
}
/// map a committed page, returns the map address
pub fn map_page(
pub fn get_backup_frame(
&self,
page_idx: usize,
vm_space: &VmSpace,
map_options: VmMapOptions,
) -> Result<Vaddr> {
self.0.map_page(page_idx, vm_space, map_options)
write_page: bool,
commit_if_none: bool,
) -> Result<VmFrameVec> {
self.0
.get_backup_frame(page_idx, write_page, commit_if_none)
}
pub fn is_cow_child(&self) -> bool {
self.0.vmo_type == VmoType::CopyOnWriteChild
}
}
/// get the page index range that contains the offset range of vmo
pub fn get_page_idx_range(vmo_offset_range: &Range<usize>) -> Range<usize> {
let start = vmo_offset_range.start.align_down(PAGE_SIZE);
let end = vmo_offset_range.end.align_up(PAGE_SIZE);
(start / PAGE_SIZE)..(end / PAGE_SIZE)
}

View File

@ -3,17 +3,13 @@
use core::marker::PhantomData;
use core::ops::Range;
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
use alloc::sync::Weak;
use alloc::vec::Vec;
use jinux_frame::config::PAGE_SIZE;
use jinux_frame::vm::{Paddr, VmAllocOptions, VmFrameVec};
use jinux_frame::{Error, Result};
use jinux_frame::AlignExt;
use jinux_rights_proc::require;
use spin::Mutex;
use typeflags_util::{SetExtend, SetExtendOp};
use crate::prelude::*;
use crate::rights::{Dup, Rights, TRights, Write};
use crate::vm::vmo::InheritedPages;
use crate::vm::vmo::VmoType;
@ -153,19 +149,13 @@ fn alloc_vmo_(
flags: VmoFlags,
pager: Option<Arc<dyn Pager>>,
) -> Result<Vmo_> {
debug_assert!(size % PAGE_SIZE == 0);
if size % PAGE_SIZE != 0 {
return Err(Error::InvalidArgs);
}
let size = size.align_up(PAGE_SIZE);
let committed_pages = committed_pages_if_continuous(flags, size, paddr)?;
// FIXME: can the pager be None when allocate vmo?
let vmo_inner = VmoInner {
pager,
size,
committed_pages,
inherited_pages: InheritedPages::new_empty(),
mappings: Vec::new(),
// pages_should_fill_zeros: BTreeSet::new(),
};
Ok(Vmo_ {
flags,
@ -471,34 +461,37 @@ fn alloc_child_vmo_(
debug_assert!(child_vmo_start % PAGE_SIZE == 0);
debug_assert!(child_vmo_end % PAGE_SIZE == 0);
if child_vmo_start % PAGE_SIZE != 0 || child_vmo_end % PAGE_SIZE != 0 {
return Err(Error::InvalidArgs);
return_errno_with_message!(Errno::EINVAL, "vmo range does not aligned with PAGE_SIZE");
}
let parent_vmo_size = parent_vmo_.size();
let parent_vmo_inner = parent_vmo_.inner.lock();
match child_type {
ChildType::Slice => {
// A slice child should be inside parent vmo's range
debug_assert!(child_vmo_end <= parent_vmo_inner.size);
if child_vmo_end > parent_vmo_inner.size {
return Err(Error::InvalidArgs);
return_errno_with_message!(
Errno::EINVAL,
"slice child vmo cannot exceed parent vmo's size"
);
}
}
ChildType::Cow => {
// A copy on Write child should intersect with parent vmo
debug_assert!(range.start < parent_vmo_inner.size);
if range.start >= parent_vmo_inner.size {
return Err(Error::InvalidArgs);
return_errno_with_message!(Errno::EINVAL, "COW vmo should overlap with its parent");
}
}
}
let parent_page_idx_offset = range.start / PAGE_SIZE;
let inherited_end = range.end.min(parent_vmo_.size());
let inherited_end = range.end.min(parent_vmo_size);
let inherited_end_page_idx = inherited_end / PAGE_SIZE + 1;
let inherited_pages = InheritedPages::new(0..inherited_end_page_idx, parent_page_idx_offset);
let vmo_inner = VmoInner {
pager: None,
size: child_vmo_end - child_vmo_start,
committed_pages: BTreeMap::new(),
mappings: Vec::new(),
inherited_pages,
};
let child_paddr = parent_vmo_

View File

@ -1,4 +1,4 @@
use jinux_frame::prelude::Result;
use crate::prelude::*;
use jinux_frame::vm::VmFrame;
/// Pagers provide frame to a VMO.

View File

@ -1,6 +1,5 @@
use crate::prelude::*;
use core::ops::Range;
use jinux_frame::prelude::Result;
use jinux_frame::vm::VmIo;
use jinux_rights_proc::require;
@ -67,6 +66,12 @@ impl<R: TRights> Vmo<R> {
Ok(VmoChildOptions::new_cow(dup_self, range))
}
/// commit a page at specific offset
pub fn commit_page(&self, offset: usize) -> Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset)
}
/// Commit the pages specified in the range (in bytes).
///
/// The range must be within the size of the VMO.
@ -137,14 +142,16 @@ impl<R: TRights> Vmo<R> {
}
impl<R: TRights> VmIo for Vmo<R> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read_bytes(offset, buf)
self.0.read_bytes(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write_bytes(offset, buf)
self.0.write_bytes(offset, buf)?;
Ok(())
}
}