Merge pull request #52 from StevenJiang1110/vm-cap

Implement vm capability and refactor current codes with vm capability
This commit is contained in:
Tate, Hongliang Tian 2022-12-16 16:39:48 +08:00 committed by GitHub
commit 1d401fd8fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
98 changed files with 3125 additions and 1344 deletions

View File

@ -2,5 +2,5 @@
We don't include the source code of busybox here since the source code is really large. The busybox can be compiled with following commands.
After download the source code of busybox 1.35.0 and unzip, then cd to the directory of busybox
1. ```make defconfig #set config to default```
2. change the line in .config, `#CONFIG_STATIC is not set` => `CONFIG_STATIC=y`. We need a static-compiled busybox
1. `make defconfig`. We set all config as default.
2. change the line in .config: `#CONFIG_STATIC is not set` => `CONFIG_STATIC=y`. We need a static-linked busybox binary since we does not support dynamic linking now.

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cea612414dc19fcd03b563607ea9a453a3d3390b9f3b229ef8e56b08e4d4c8c5
oid sha256:9bc1642390b9dc38ecc058240e529401c38aa8bb9a86bad3615e4bdad505fa8c
size 9592

View File

@ -6,6 +6,7 @@ use core::mem::MaybeUninit;
use crate::debug;
use crate::trap::{CalleeRegs, CallerRegs, SyscallFrame, TrapFrame};
use crate::x86_64_util::rdfsbase;
use pod::Pod;
/// Defines a CPU-local variable.
@ -156,7 +157,7 @@ impl From<TrapFrame> for CpuContext {
rip: trap.rip,
rflag: trap.rflags,
},
fs_base: 0,
fs_base: rdfsbase(),
fp_regs: FpRegs::default(),
trap_information: TrapInformation {
cr2: trap.cr2,

View File

@ -1,5 +1,6 @@
use super::{page_table::PageTable, *};
use crate::prelude::*;
use crate::vm::VmIo;
use crate::{
config::PAGE_SIZE,
mm::address::is_aligned,

View File

@ -7,5 +7,6 @@ pub(crate) use alloc::sync::Arc;
pub(crate) use alloc::vec::Vec;
pub(crate) use core::any::Any;
pub use crate::debug;
pub(crate) use crate::util::AlignExt;
pub use crate::vm::{Paddr, Vaddr};

View File

@ -38,7 +38,7 @@ __trap_entry:
push r12
push rbp
push rbx
mov rdi, 0
# mov rdi, 0
push rdi
save
# save cr2

View File

@ -4,6 +4,7 @@ use crate::{config::PAGE_SIZE, mm::address::PhysAddr, prelude::*, Error};
use pod::Pod;
use super::VmIo;
use alloc::vec;
use crate::mm::PhysFrame;
@ -14,6 +15,7 @@ use crate::mm::PhysFrame;
/// type to represent a series of page frames is convenient because,
/// more often than not, one needs to operate on a batch of frames rather
/// a single frame.
#[derive(Debug, Clone)]
pub struct VmFrameVec(Vec<VmFrame>);
impl VmFrameVec {
@ -31,7 +33,7 @@ impl VmFrameVec {
let mut frame_list = Vec::new();
for i in 0..page_size {
let vm_frame = if let Some(paddr) = options.paddr {
VmFrame::alloc_with_paddr(paddr)
VmFrame::alloc_with_paddr(paddr + i * PAGE_SIZE)
} else {
VmFrame::alloc()
};
@ -43,6 +45,11 @@ impl VmFrameVec {
Ok(Self(frame_list))
}
/// returns an empty vmframe vec
pub fn empty() -> Self {
Self(Vec::new())
}
/// Pushs a new frame to the collection.
pub fn push(&mut self, new_frame: VmFrame) {
self.0.push(new_frame);
@ -73,6 +80,11 @@ impl VmFrameVec {
Ok(())
}
/// zero all internal vm frames
pub fn zero(&self) {
self.0.iter().for_each(|vm_frame| vm_frame.zero())
}
/// Truncate some frames.
///
/// If `new_len >= self.len()`, then this method has no effect.
@ -88,6 +100,11 @@ impl VmFrameVec {
self.0.iter()
}
/// Return IntoIterator for internal frames
pub fn into_iter(self) -> alloc::vec::IntoIter<VmFrame> {
self.0.into_iter()
}
/// Returns the number of frames.
pub fn len(&self) -> usize {
self.0.len()
@ -104,6 +121,10 @@ impl VmFrameVec {
pub fn nbytes(&self) -> usize {
self.0.len() * PAGE_SIZE
}
pub fn from_one_frame(frame: VmFrame) -> Self {
Self(vec![frame])
}
}
impl VmIo for VmFrameVec {
@ -224,7 +245,7 @@ impl VmAllocOptions {
}
}
#[derive(Debug, Clone)]
#[derive(Debug)]
/// A handle to a page frame.
///
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
@ -239,6 +260,14 @@ pub struct VmFrame {
pub(crate) physical_frame: Arc<PhysFrame>,
}
impl Clone for VmFrame {
fn clone(&self) -> Self {
Self {
physical_frame: self.physical_frame.clone(),
}
}
}
impl VmFrame {
/// Creates a new VmFrame.
///
@ -288,6 +317,11 @@ impl VmFrame {
self.physical_frame.start_pa().0
}
/// fill the frame with zero
pub fn zero(&self) {
unsafe { core::ptr::write_bytes(self.start_pa().kvaddr().as_ptr(), 0, PAGE_SIZE) }
}
pub fn start_pa(&self) -> PhysAddr {
self.physical_frame.start_pa()
}

View File

@ -22,15 +22,17 @@ use super::VmIo;
/// A newly-created `VmSpace` is not backed by any physical memory pages.
/// To provide memory pages for a `VmSpace`, one can allocate and map
/// physical memory (`VmFrames`) to the `VmSpace`.
#[derive(Debug, Clone)]
pub struct VmSpace {
memory_set: Mutex<MemorySet>,
memory_set: Arc<Mutex<MemorySet>>,
}
impl VmSpace {
/// Creates a new VM address space.
pub fn new() -> Self {
Self {
memory_set: Mutex::new(MemorySet::new()),
memory_set: Arc::new(Mutex::new(MemorySet::new())),
}
}
/// Activate the page table, load root physical address to cr3
@ -55,6 +57,7 @@ impl VmSpace {
if options.addr.is_none() {
return Err(Error::InvalidArgs);
}
// debug!("map to vm space: 0x{:x}", options.addr.unwrap());
self.memory_set.lock().map(MapArea::new(
VirtAddr(options.addr.unwrap()),
frames.len() * PAGE_SIZE,
@ -108,15 +111,6 @@ impl Default for VmSpace {
}
}
impl Clone for VmSpace {
fn clone(&self) -> Self {
let memory_set = self.memory_set.lock().clone();
VmSpace {
memory_set: Mutex::new(memory_set),
}
}
}
impl VmIo for VmSpace {
fn read_bytes(&self, vaddr: usize, buf: &mut [u8]) -> Result<()> {
self.memory_set.lock().read_bytes(vaddr, buf)
@ -129,11 +123,16 @@ impl VmIo for VmSpace {
/// Options for mapping physical memory pages into a VM address space.
/// See `VmSpace::map`.
#[derive(Clone)]
pub struct VmMapOptions {
/// start virtual address
addr: Option<Vaddr>,
/// map align
align: usize,
/// permission
perm: VmPerm,
/// can overwrite
can_overwrite: bool,
}
impl VmMapOptions {
@ -141,7 +140,9 @@ impl VmMapOptions {
pub fn new() -> Self {
Self {
addr: None,
align: PAGE_SIZE,
perm: VmPerm::empty(),
can_overwrite: false,
}
}
@ -152,7 +153,8 @@ impl VmMapOptions {
///
/// The default value of this option is the page size.
pub fn align(&mut self, align: usize) -> &mut Self {
todo!()
self.align = align;
self
}
/// Sets the permissions of the mapping, which affects whether
@ -181,7 +183,8 @@ impl VmMapOptions {
///
/// The default value of this option is `false`.
pub fn can_overwrite(&mut self, can_overwrite: bool) -> &mut Self {
todo!()
self.can_overwrite = can_overwrite;
self
}
}

View File

@ -211,6 +211,20 @@ impl From<core::ffi::FromBytesWithNulError> for Error {
}
}
impl From<Error> for jinux_frame::Error {
fn from(error: Error) -> Self {
match error.errno {
Errno::EACCES => jinux_frame::Error::AccessDenied,
Errno::EIO => jinux_frame::Error::IoError,
Errno::ENOMEM => jinux_frame::Error::NoMemory,
Errno::EFAULT => jinux_frame::Error::PageFault,
Errno::EINVAL => jinux_frame::Error::InvalidArgs,
Errno::EBUSY => jinux_frame::Error::NotEnoughResources,
_ => jinux_frame::Error::InvalidArgs,
}
}
}
impl From<alloc::ffi::NulError> for Error {
fn from(_: alloc::ffi::NulError) -> Self {
Error::with_message(Errno::E2BIG, "Cannot find null in cstring")

View File

@ -18,7 +18,7 @@ macro_rules! define_fcntl_cmd {
fn try_from(value: i32) -> Result<Self> {
match value {
$($name => Ok(FcntlCmd::$name),)*
_ => return_errno!(Errno::EINVAL),
_ => return_errno_with_message!(Errno::EINVAL, "Unknown fcntl cmd"),
}
}
}

View File

@ -60,7 +60,9 @@ impl FileTable {
self.table.remove(&fd);
}
pub fn get_file(&self, fd: FileDescripter) -> Option<&Arc<dyn File>> {
self.table.get(&fd)
pub fn get_file(&self, fd: FileDescripter) -> Result<&Arc<dyn File>> {
self.table
.get(&fd)
.ok_or(Error::with_message(Errno::EBADF, "fd not exits"))
}
}

View File

@ -10,6 +10,10 @@
#![feature(btree_drain_filter)]
#![feature(const_option)]
#![feature(extend_one)]
// FIXME: This feature is used to support vm capbility now as a work around.
// Since this is an incomplete feature, use this feature is unsafe.
// We should find a proper method to replace this feature with min_specialization, which is a sound feature.
#![feature(specialization)]
use crate::{
prelude::*,
@ -31,7 +35,6 @@ extern crate alloc;
pub mod driver;
pub mod error;
pub mod fs;
mod memory;
pub mod prelude;
mod process;
pub mod rights;

View File

@ -1,55 +0,0 @@
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use pod::Pod;
pub mod vm_page;
/// copy bytes from user space of current process. The bytes len is the len of dest.
pub fn read_bytes_from_user(src: Vaddr, dest: &mut [u8]) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to copy bytes from user",
))?;
vm_space.read_bytes(src, dest)?;
Ok(())
}
/// copy val (Plain of Data type) from user space of current process.
pub fn read_val_from_user<T: Pod>(src: Vaddr) -> Result<T> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to copy val from user",
))?;
Ok(vm_space.read_val(src)?)
}
/// write bytes from user space of current process. The bytes len is the len of src.
pub fn write_bytes_to_user(dest: Vaddr, src: &[u8]) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to write bytes to user",
))?;
vm_space.write_bytes(dest, src)?;
Ok(())
}
/// write val (Plain of Data type) to user space of current process.
pub fn write_val_to_user<T: Pod>(dest: Vaddr, val: &T) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().ok_or(Error::with_message(
Errno::ESRCH,
"[Internal error]Current should have vm space to write val to user",
))?;
vm_space.write_val(dest, val)?;
Ok(())
}
/// read a cstring from user, the length of cstring should not exceed max_len(include null byte)
pub fn read_cstring_from_user(addr: Vaddr, max_len: usize) -> Result<CString> {
let mut buffer = vec![0u8; max_len];
read_bytes_from_user(addr, &mut buffer)?;
Ok(CString::from(CStr::from_bytes_until_nul(&buffer)?))
}

View File

@ -1,160 +0,0 @@
//! A Page in virtual address space
use crate::prelude::*;
use core::ops::Range;
use jinux_frame::vm::{VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace};
/// A set of **CONTINUOUS** virtual pages in VmSpace
pub struct VmPageRange {
start_page: VmPage,
end_page: VmPage,
}
impl VmPageRange {
/// create a set of pages containing virtual address range [a, b)
pub const fn new_range(vaddr_range: Range<Vaddr>) -> Self {
let start_page = VmPage::containing_address(vaddr_range.start);
let end_page = VmPage::containing_address(vaddr_range.end - 1);
Self {
start_page,
end_page,
}
}
pub const fn new_page_range(start_page: VmPage, end_page: VmPage) -> Self {
Self {
start_page,
end_page,
}
}
/// returns the page containing the specific vaddr
pub const fn containing_address(vaddr: Vaddr) -> Self {
let page = VmPage::containing_address(vaddr);
Self {
start_page: page,
end_page: page,
}
}
pub const fn start_address(&self) -> Vaddr {
self.start_page.start_address()
}
/// the address right after the end page
pub const fn end_address(&self) -> Vaddr {
self.end_page.start_address() + PAGE_SIZE
}
/// allocate a set of physical frames and map self to frames
pub fn map(&mut self, vm_space: &VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
self.map_to(vm_space, frames, vm_perm);
}
/// map self to a set of zeroed frames
pub fn map_zeroed(&self, vm_space: &VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
let buffer = vec![0u8; self.nbytes()];
self.map_to(vm_space, frames, vm_perm);
vm_space
.write_bytes(self.start_address(), &buffer)
.expect("write zero failed");
// frames.write_bytes(0, &buffer).expect("write zero failed");
}
/// map self to a set of frames
pub fn map_to(&self, vm_space: &VmSpace, frames: VmFrameVec, vm_perm: VmPerm) {
assert_eq!(self.len(), frames.len());
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(vm_perm);
vm_space.map(frames, &vm_map_options).expect("map failed");
}
pub fn unmap(&mut self, vm_space: &VmSpace) {
vm_space
.unmap(&(self.start_address()..self.end_address()))
.expect("unmap failed");
}
pub fn is_mapped(&self, vm_space: &VmSpace) -> bool {
todo!()
}
pub fn iter(&self) -> VmPageIter<'_> {
VmPageIter {
current: self.start_page,
page_range: self,
}
}
/// return the number of virtual pages
pub const fn len(&self) -> usize {
self.end_page.vpn - self.start_page.vpn + 1
}
pub const fn nbytes(&self) -> usize {
self.len() * PAGE_SIZE
}
}
pub struct VmPageIter<'a> {
current: VmPage,
page_range: &'a VmPageRange,
}
impl<'a> Iterator for VmPageIter<'a> {
type Item = VmPage;
fn next(&mut self) -> Option<Self::Item> {
let next_page = if self.current <= self.page_range.end_page {
Some(self.current)
} else {
None
};
self.current = self.current.next_page();
next_page
}
}
/// A Virtual Page
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct VmPage {
/// Virtual Page Number
vpn: usize,
}
impl VmPage {
pub const fn containing_address(vaddr: Vaddr) -> Self {
Self {
vpn: vaddr / PAGE_SIZE,
}
}
pub const fn start_address(&self) -> Vaddr {
self.vpn * PAGE_SIZE
}
pub const fn next_page(&self) -> VmPage {
VmPage { vpn: self.vpn + 1 }
}
/// Check whether current page is mapped
pub fn is_mapped(&self, vm_space: &VmSpace) -> bool {
vm_space.is_mapped(self.start_address())
}
pub fn map_page(&self, vm_space: &VmSpace, vm_perm: VmPerm) -> Result<()> {
let vm_alloc_option = VmAllocOptions::new(1);
let vm_frame = VmFrameVec::allocate(&vm_alloc_option)?;
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(vm_perm);
vm_space.map(vm_frame, &vm_map_options)?;
Ok(())
}
}

View File

@ -2,6 +2,7 @@
pub(crate) use alloc::boxed::Box;
pub(crate) use alloc::collections::BTreeMap;
pub(crate) use alloc::collections::BTreeSet;
pub(crate) use alloc::collections::LinkedList;
pub(crate) use alloc::collections::VecDeque;
pub(crate) use alloc::ffi::CString;

View File

@ -96,18 +96,9 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
let child_pid = new_pid();
let current = Process::current();
// child process vm space
// FIXME: COPY ON WRITE can be used here
let parent_vm_space = current
.vm_space()
.expect("User task should always have vm space");
let child_vm_space = parent_vm_space.clone();
debug_check_clone_vm_space(parent_vm_space, &child_vm_space);
let child_file_name = match current.filename() {
None => None,
Some(filename) => Some(filename.clone()),
};
// child process vmar
let parent_root_vmar = current.root_vmar().unwrap();
let child_root_vmar = current.root_vmar().unwrap().fork_vmar()?;
// child process user_vm
let child_user_vm = match current.user_vm() {
@ -115,18 +106,16 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
Some(user_vm) => Some(user_vm.clone()),
};
// child process cpu context
// child process user space
let mut child_cpu_context = parent_context.clone();
debug!("parent context: {:x?}", parent_context);
debug!("parent gp_regs: {:x?}", child_cpu_context.gp_regs);
child_cpu_context.gp_regs.rax = 0; // Set return value of child process
let child_vm_space = child_root_vmar.vm_space().clone();
let child_user_space = Arc::new(UserSpace::new(child_vm_space, child_cpu_context));
debug!("before spawn child task");
debug!("current pid: {}", current.pid());
debug!("child process pid: {}", child_pid);
debug!("rip = 0x{:x}", child_cpu_context.gp_regs.rip);
let child_file_name = match current.filename() {
None => None,
Some(filename) => Some(filename.clone()),
};
let child_file_table = current.file_table.lock().clone();
// inherit parent's sig disposition
@ -145,6 +134,7 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
child_file_name,
child_user_vm,
Some(child_user_space),
Some(child_root_vmar),
None,
child_file_table,
child_sig_dispositions,
@ -189,12 +179,11 @@ fn clone_child_clear_tid(child_process: &Arc<Process>) -> Result<()> {
}
fn clone_child_set_tid(child_process: &Arc<Process>, clone_args: CloneArgs) -> Result<()> {
debug!("clone child set tid");
let child_pid = child_process.pid();
let child_vm = child_process
.vm_space()
let child_vmar = child_process
.root_vmar()
.ok_or_else(|| Error::new(Errno::ECHILD))?;
child_vm.write_val(clone_args.child_tidptr, &child_pid)?;
child_vmar.write_val(clone_args.child_tidptr, &child_pid)?;
Ok(())
}

View File

@ -1,339 +0,0 @@
//! This module is used to parse elf file content to get elf_load_info.
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use crate::{
memory::vm_page::{VmPage, VmPageRange},
prelude::*,
};
use core::{cmp::Ordering, ops::Range};
use jinux_frame::vm::{VmAllocOptions, VmFrameVec, VmIo, VmPerm, VmSpace};
use xmas_elf::{
header,
program::{self, ProgramHeader, ProgramHeader64, SegmentData},
ElfFile,
};
use super::init_stack::InitStack;
pub struct ElfLoadInfo<'a> {
segments: Vec<ElfSegment<'a>>,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
}
pub struct ElfSegment<'a> {
range: Range<Vaddr>,
data: &'a [u8],
type_: program::Type,
vm_perm: VmPerm,
}
#[derive(Debug, Clone, Copy, Default)]
/// Info parsed from elf header. The entry point is used to set rip
/// The other info is used to set auxv vectors.
pub struct ElfHeaderInfo {
/// the entry point of the elf
pub entry_point: Vaddr,
/// page header table offset
pub ph_off: u64,
/// number of program headers
pub ph_num: u16,
/// The size of a program header
pub ph_ent: usize,
}
impl<'a> ElfSegment<'a> {
fn parse_elf_segment(segment: ProgramHeader<'a>, elf_file: &ElfFile<'a>) -> Result<Self> {
let start = segment.virtual_addr() as Vaddr;
let end = start + segment.mem_size() as Vaddr;
let type_ = match segment.get_type() {
Err(error_msg) => return_errno_with_message!(Errno::ENOEXEC, error_msg),
Ok(type_) => type_,
};
let data = read_segment_data(segment, elf_file)?;
let vm_perm = Self::parse_segment_perm(segment)?;
Ok(Self {
range: start..end,
type_,
data,
vm_perm,
})
}
pub fn parse_segment_perm(segment: ProgramHeader<'a>) -> Result<VmPerm> {
let flags = segment.flags();
if !flags.is_read() {
return_errno_with_message!(Errno::ENOEXEC, "unreadable segment");
}
let mut vm_perm = VmPerm::R;
if flags.is_write() {
vm_perm |= VmPerm::W;
}
if flags.is_execute() {
vm_perm |= VmPerm::X;
}
Ok(vm_perm)
}
pub fn is_loadable(&self) -> bool {
self.type_ == program::Type::Load
}
pub fn start_address(&self) -> Vaddr {
self.range.start
}
pub fn end_address(&self) -> Vaddr {
self.range.end
}
fn copy_and_map_segment(&self, vm_space: &VmSpace) -> Result<()> {
let start_address = self.start_address();
let page_mask = PAGE_SIZE - 1;
let segment_len = self.end_address() - self.start_address();
let data_len = self.data.len();
let zeroed_bytes = if segment_len > data_len {
vec![0u8; segment_len - data_len]
} else {
Vec::new()
};
// according to linux abi, the first page may be on same page with another segment.
// So at first, we will check whether the first page is mapped.
if vm_space.is_mapped(start_address) {
// The first page is mapped. This is the rare case.
let write_len_on_first_page =
(PAGE_SIZE - (start_address & page_mask)).min(self.data.len());
vm_space
.write_bytes(start_address, &self.data[..write_len_on_first_page])
.expect("Write first page failed");
let start_page = VmPage::containing_address(start_address).next_page();
let end_page = VmPage::containing_address(self.end_address());
if end_page >= start_page {
let vm_page_range = VmPageRange::new_page_range(start_page, end_page);
let page_num = vm_page_range.len();
let vm_alloc_options = VmAllocOptions::new(page_num);
let frames = VmFrameVec::allocate(&vm_alloc_options)?;
frames.write_bytes(0, &self.data[write_len_on_first_page..])?;
if zeroed_bytes.len() > 0 {
frames.write_bytes(data_len - write_len_on_first_page, &zeroed_bytes)?;
}
vm_page_range.map_to(vm_space, frames, self.vm_perm);
} else {
if zeroed_bytes.len() > 0 {
vm_space.write_bytes(start_address + data_len, &zeroed_bytes)?;
}
}
} else {
// The first page is not mapped. This is the common case.
let vm_page_range = VmPageRange::new_range(start_address..self.end_address());
let page_num = vm_page_range.len();
let vm_alloc_options = VmAllocOptions::new(page_num);
let frames = VmFrameVec::allocate(&vm_alloc_options)?;
let offset = start_address & page_mask;
// copy segment
frames.write_bytes(offset, &self.data)?;
// write zero bytes
if zeroed_bytes.len() > 0 {
let write_addr = offset + data_len;
frames.write_bytes(write_addr, &zeroed_bytes)?;
}
vm_page_range.map_to(vm_space, frames, self.vm_perm);
}
Ok(())
}
fn is_page_aligned(&self) -> bool {
self.start_address() % PAGE_SIZE == 0
}
}
impl<'a> ElfLoadInfo<'a> {
fn with_capacity(
capacity: usize,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
) -> Self {
Self {
segments: Vec::with_capacity(capacity),
init_stack,
elf_header_info,
}
}
fn add_segment(&mut self, elf_segment: ElfSegment<'a>) {
self.segments.push(elf_segment);
}
pub fn parse_elf_data(
elf_file_content: &'a [u8],
filename: CString,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Result<Self> {
let elf_file = match ElfFile::new(elf_file_content) {
Err(error_msg) => return_errno_with_message!(Errno::ENOEXEC, error_msg),
Ok(elf_file) => elf_file,
};
check_elf_header(&elf_file)?;
// parse elf header
let elf_header_info = ElfHeaderInfo::parse_elf_header(&elf_file);
// FIXME: only contains load segment?
let segments_count = elf_file.program_iter().count();
let init_stack = InitStack::new_default_config(filename, argv, envp);
let mut elf_load_info =
ElfLoadInfo::with_capacity(segments_count, init_stack, elf_header_info);
// parse each segemnt
for segment in elf_file.program_iter() {
let elf_segment = ElfSegment::parse_elf_segment(segment, &elf_file)?;
if elf_segment.is_loadable() {
elf_load_info.add_segment(elf_segment)
}
}
Ok(elf_load_info)
}
fn vm_page_range(&self) -> Result<VmPageRange> {
let elf_start_address = self
.segments
.iter()
.filter(|segment| segment.is_loadable())
.map(|segment| segment.start_address())
.min()
.unwrap();
let elf_end_address = self
.segments
.iter()
.filter(|segment| segment.is_loadable())
.map(|segment| segment.end_address())
.max()
.unwrap();
Ok(VmPageRange::new_range(elf_start_address..elf_end_address))
}
/// copy and map all segment
pub fn copy_and_map_segments(&self, vm_space: &VmSpace) -> Result<()> {
for segment in self.segments.iter() {
segment.copy_and_map_segment(vm_space)?;
}
Ok(())
}
pub fn init_stack(&mut self, vm_space: &VmSpace) {
self.init_stack
.init(vm_space, &self.elf_header_info)
.expect("Init User Stack failed");
}
/// This function will write the program header table to the initial stack top.
/// This function must be called after init process initial stack.
/// This infomation is used to set Auxv vectors.
pub fn write_program_header_table(&self, vm_space: &VmSpace, file_content: &[u8]) {
let write_len = PAGE_SIZE.min(file_content.len());
let write_content = &file_content[..write_len];
let write_addr = self.init_stack.init_stack_top() - PAGE_SIZE;
vm_space
.write_bytes(write_addr, write_content)
.expect("Write elf content failed");
}
pub fn entry_point(&self) -> u64 {
self.elf_header_info.entry_point as u64
}
pub fn user_stack_top(&self) -> u64 {
self.init_stack.user_stack_top() as u64
}
pub fn argc(&self) -> u64 {
self.init_stack.argc()
}
pub fn argv(&self) -> u64 {
self.init_stack.argv()
}
pub fn envc(&self) -> u64 {
self.init_stack.envc()
}
pub fn envp(&self) -> u64 {
self.init_stack.envp()
}
/// read content from vmspace to ensure elf data is correctly copied to user space
pub fn debug_check_map_result(&self, vm_space: &VmSpace) {
for segment in self.segments.iter() {
let start_address = segment.start_address();
let len = segment.data.len();
let mut read_buffer = vec![0; len];
vm_space
.read_bytes(start_address, &mut read_buffer)
.expect("read bytes failed");
let res = segment.data.cmp(&read_buffer);
assert_eq!(res, Ordering::Equal);
}
}
}
impl ElfHeaderInfo {
fn parse_elf_header(elf_file: &ElfFile) -> Self {
let entry_point = elf_file.header.pt2.entry_point() as Vaddr;
let ph_off = elf_file.header.pt2.ph_offset();
let ph_num = elf_file.header.pt2.ph_count();
let ph_ent = core::mem::size_of::<ProgramHeader64>();
ElfHeaderInfo {
entry_point,
ph_off,
ph_num,
ph_ent,
}
}
}
fn check_elf_header(elf_file: &ElfFile) -> Result<()> {
let elf_header = elf_file.header;
// 64bit
debug_assert_eq!(elf_header.pt1.class(), header::Class::SixtyFour);
if elf_header.pt1.class() != header::Class::SixtyFour {
return_errno!(Errno::ENOEXEC);
}
// little endian
debug_assert_eq!(elf_header.pt1.data(), header::Data::LittleEndian);
if elf_header.pt1.data() != header::Data::LittleEndian {
return_errno!(Errno::ENOEXEC);
}
// system V ABI
// debug_assert_eq!(elf_header.pt1.os_abi(), header::OsAbi::SystemV);
// if elf_header.pt1.os_abi() != header::OsAbi::SystemV {
// return Error::new(Errno::ENOEXEC);
// }
// x86_64 architecture
debug_assert_eq!(
elf_header.pt2.machine().as_machine(),
header::Machine::X86_64
);
if elf_header.pt2.machine().as_machine() != header::Machine::X86_64 {
return_errno!(Errno::ENOEXEC);
}
// Executable file
debug_assert_eq!(elf_header.pt2.type_().as_type(), header::Type::Executable);
if elf_header.pt2.type_().as_type() != header::Type::Executable {
return_errno!(Errno::ENOEXEC);
}
Ok(())
}
fn read_segment_data<'a>(segment: ProgramHeader<'a>, elf_file: &ElfFile<'a>) -> Result<&'a [u8]> {
match segment.get_data(&elf_file) {
Err(msg) => return_errno_with_message!(Errno::ENOEXEC, msg),
Ok(data) => match data {
SegmentData::Note64(_, data) | SegmentData::Undefined(data) => Ok(data),
_ => return_errno_with_message!(Errno::ENOEXEC, "Unkonwn segment data type"),
},
}
}

View File

@ -0,0 +1,141 @@
/// A wrapper of xmas_elf's elf parsing
use xmas_elf::{
header::{self, Header, HeaderPt1, HeaderPt2, HeaderPt2_, Machine_, Type_},
program::ProgramHeader64,
};
use crate::prelude::*;
pub struct Elf {
pub elf_header: ElfHeader,
pub program_headers: Vec<ProgramHeader64>,
}
impl Elf {
pub fn parse_elf(input: &[u8]) -> Result<Self> {
// first parse elf header
// The elf header is usually 64 bytes. pt1 is 16bytes and pt2 is 48 bytes.
// We require 128 bytes here is to keep consistency with linux implementations.
debug_assert!(input.len() >= 128);
let header =
xmas_elf::header::parse_header(input).map_err(|_| Error::new(Errno::ENOEXEC))?;
let elf_header = ElfHeader::parse_elf_header(header)?;
check_elf_header(&elf_header)?;
// than parse the program headers table
// FIXME: we should acquire enough pages before parse
let ph_offset = elf_header.pt2.ph_offset;
let ph_count = elf_header.pt2.ph_count;
let ph_entry_size = elf_header.pt2.ph_entry_size;
debug_assert!(
input.len() >= ph_offset as usize + ph_count as usize * ph_entry_size as usize
);
let mut program_headers = Vec::with_capacity(ph_count as usize);
for index in 0..ph_count {
let program_header = xmas_elf::program::parse_program_header(input, header, index)
.map_err(|_| Error::new(Errno::ENOEXEC))?;
let ph64 = match program_header {
xmas_elf::program::ProgramHeader::Ph64(ph64) => ph64.clone(),
xmas_elf::program::ProgramHeader::Ph32(_) => {
return_errno_with_message!(Errno::ENOEXEC, "Not 64 byte executable")
}
};
program_headers.push(ph64);
}
Ok(Self {
elf_header,
program_headers,
})
}
}
pub struct ElfHeader {
pub pt1: HeaderPt1,
pub pt2: HeaderPt2_64,
}
impl ElfHeader {
fn parse_elf_header(header: Header) -> Result<Self> {
let pt1 = header.pt1.clone();
let pt2 = match header.pt2 {
HeaderPt2::Header64(header_pt2) => {
let HeaderPt2_ {
type_,
machine,
version,
entry_point,
ph_offset,
sh_offset,
flags,
header_size,
ph_entry_size,
ph_count,
sh_entry_size,
sh_count,
sh_str_index,
} = header_pt2;
HeaderPt2_64 {
type_: *type_,
machine: *machine,
version: *version,
entry_point: *entry_point,
ph_offset: *ph_offset,
sh_offset: *sh_offset,
flags: *flags,
header_size: *header_size,
ph_entry_size: *ph_entry_size,
ph_count: *ph_count,
sh_entry_size: *sh_entry_size,
sh_count: *sh_count,
sh_str_index: *sh_str_index,
}
}
_ => return_errno_with_message!(Errno::ENOEXEC, "parse elf header failed"),
};
Ok(ElfHeader { pt1, pt2 })
}
}
pub struct HeaderPt2_64 {
pub type_: Type_,
pub machine: Machine_,
pub version: u32,
pub entry_point: u64,
pub ph_offset: u64,
pub sh_offset: u64,
pub flags: u32,
pub header_size: u16,
pub ph_entry_size: u16,
pub ph_count: u16,
pub sh_entry_size: u16,
pub sh_count: u16,
pub sh_str_index: u16,
}
fn check_elf_header(elf_header: &ElfHeader) -> Result<()> {
// 64bit
debug_assert_eq!(elf_header.pt1.class(), header::Class::SixtyFour);
if elf_header.pt1.class() != header::Class::SixtyFour {
return_errno_with_message!(Errno::ENOEXEC, "Not 64 byte executable");
}
// little endian
debug_assert_eq!(elf_header.pt1.data(), header::Data::LittleEndian);
if elf_header.pt1.data() != header::Data::LittleEndian {
return_errno_with_message!(Errno::ENOEXEC, "Not little endian executable");
}
// system V ABI
// debug_assert_eq!(elf_header.pt1.os_abi(), header::OsAbi::SystemV);
// if elf_header.pt1.os_abi() != header::OsAbi::SystemV {
// return Error::new(Errno::ENOEXEC);
// }
// x86_64 architecture
debug_assert_eq!(elf_header.pt2.machine.as_machine(), header::Machine::X86_64);
if elf_header.pt2.machine.as_machine() != header::Machine::X86_64 {
return_errno_with_message!(Errno::ENOEXEC, "Not x86_64 executable");
}
// Executable file
debug_assert_eq!(elf_header.pt2.type_.as_type(), header::Type::Executable);
if elf_header.pt2.type_.as_type() != header::Type::Executable {
return_errno_with_message!(Errno::ENOEXEC, "Not executable file");
}
Ok(())
}

View File

@ -0,0 +1,69 @@
use crate::prelude::*;
use crate::vm::vmar::{get_intersected_range, is_intersected};
use jinux_frame::vm::{VmAllocOptions, VmFrameVec, VmIo};
use jinux_frame::AlignExt;
use crate::vm::vmo::Pager;
use super::load_elf::ElfSegment;
/// The pager behind a elf segment
pub struct ElfSegmentPager {
/// The pager size
pager_size: usize,
/// data for current segment
segment_data: &'static [u8],
/// The offset for the segment data.
/// The pager always starts at page-align address, while the segment data may start at any address.
/// So the offset will be the segment data start address % PAGE_SIZE
offset: usize,
}
impl ElfSegmentPager {
pub fn new(elf_file_content: &'static [u8], elf_segment: &ElfSegment) -> Self {
let start = elf_segment.start_address().align_down(PAGE_SIZE);
let end = elf_segment.end_address().align_up(PAGE_SIZE);
let pager_size = end - start;
let offset = elf_segment.start_address() % PAGE_SIZE;
let elf_file_segment =
&elf_file_content[elf_segment.offset..elf_segment.offset + elf_segment.file_size];
Self {
pager_size,
segment_data: elf_file_segment,
offset,
}
}
}
impl Pager for ElfSegmentPager {
fn commit_page(&self, offset: usize) -> Result<jinux_frame::vm::VmFrame> {
if offset >= self.pager_size {
return_errno_with_message!(Errno::EINVAL, "offset exceeds pager size");
}
let vm_alloc_option = VmAllocOptions::new(1);
let mut vm_frames = VmFrameVec::allocate(&vm_alloc_option)?;
vm_frames.zero();
let page_start = offset.align_down(PAGE_SIZE);
let page_end = page_start + PAGE_SIZE;
let page_range = page_start..page_end;
let data_range = self.offset..self.offset + self.segment_data.len();
if is_intersected(&page_range, &data_range) {
let intersected_range = get_intersected_range(&page_range, &data_range);
let data_write_range =
(intersected_range.start - self.offset)..(intersected_range.end - self.offset);
let write_content = &self.segment_data[data_write_range];
let write_offset = intersected_range.start % PAGE_SIZE;
vm_frames.write_bytes(write_offset, write_content)?;
}
let vm_frame = vm_frames.pop().unwrap();
Ok(vm_frame)
}
fn update_page(&self, offset: usize) -> Result<()> {
unimplemented!()
}
fn decommit_page(&self, offset: usize) -> Result<()> {
unimplemented!()
}
}

View File

@ -2,15 +2,21 @@
//! The process initial stack, contains arguments, environmental variables and auxiliary vectors
//! The data layout of init stack can be seen in Figure 3.9 in https://uclibc.org/docs/psABI-x86_64.pdf
use crate::{memory::vm_page::VmPageRange, prelude::*};
use crate::rights::Rights;
use crate::vm::perms::VmPerms;
use crate::{
prelude::*,
rights::Full,
vm::{vmar::Vmar, vmo::VmoOptions},
};
use core::mem;
use jinux_frame::{
vm::{VmIo, VmPerm, VmSpace},
vm::{VmIo, VmPerm},
AlignExt,
};
use super::aux_vec::{AuxKey, AuxVec};
use super::elf::ElfHeaderInfo;
use super::load_elf::ElfHeaderInfo;
pub const INIT_STACK_BASE: Vaddr = 0x0000_0000_2000_0000;
pub const INIT_STACK_SIZE: usize = 0x1000 * 16; // 64KB
@ -90,10 +96,9 @@ impl InitStack {
}
/// This function only work for first process
pub fn new_default_config(filename: CString, argv: Vec<CString>, envp: Vec<CString>) -> Self {
pub fn new_default_config(argv: Vec<CString>, envp: Vec<CString>) -> Self {
let init_stack_top = INIT_STACK_BASE - PAGE_SIZE;
let init_stack_size = INIT_STACK_SIZE;
// InitStack::new(filename, init_stack_top, init_stack_size, argv, envp)
InitStack::new(init_stack_top, init_stack_size, argv, envp)
}
@ -110,18 +115,28 @@ impl InitStack {
self.init_stack_top - self.init_stack_size
}
pub fn init(&mut self, vm_space: &VmSpace, elf_header_info: &ElfHeaderInfo) -> Result<()> {
self.map_and_zeroed(vm_space);
self.write_zero_page(vm_space); // This page is used to store page header table
self.write_stack_content(vm_space, elf_header_info)?;
self.debug_print_stack_content(vm_space);
pub fn init(
&mut self,
root_vmar: &Vmar<Full>,
elf_header_info: &ElfHeaderInfo,
ph_addr: Vaddr,
) -> Result<()> {
self.map_and_zeroed(root_vmar)?;
self.write_stack_content(root_vmar, elf_header_info, ph_addr)?;
self.debug_print_stack_content(root_vmar);
Ok(())
}
fn map_and_zeroed(&self, vm_space: &VmSpace) {
let vm_page_range = VmPageRange::new_range(self.user_stack_bottom()..self.user_stack_top());
let vm_perm = InitStack::perm();
vm_page_range.map_zeroed(vm_space, vm_perm);
fn map_and_zeroed(&self, root_vmar: &Vmar<Full>) -> Result<()> {
let vmo_options = VmoOptions::<Rights>::new(self.init_stack_size);
let vmo = vmo_options.alloc()?;
vmo.clear(0..vmo.size())?;
let perms = VmPerms::READ | VmPerms::WRITE;
let vmar_map_options = root_vmar
.new_map(vmo, perms)?
.offset(self.user_stack_bottom());
vmar_map_options.build().unwrap();
Ok(())
}
/// Libc ABI requires 16-byte alignment of the stack entrypoint.
@ -129,60 +144,54 @@ impl InitStack {
/// to meet the requirement if necessary.
fn adjust_stack_alignment(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
envp_pointers: &Vec<u64>,
argv_pointers: &Vec<u64>,
) -> Result<()> {
// ensure 8-byte alignment
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
let auxvec_size = (self.aux_vec.table().len() + 1) * (mem::size_of::<u64>() * 2);
let envp_pointers_size = (envp_pointers.len() + 1) * mem::size_of::<u64>();
let argv_pointers_size = (argv_pointers.len() + 1) * mem::size_of::<u64>();
let argc_size = mem::size_of::<u64>();
let to_write_size = auxvec_size + envp_pointers_size + argv_pointers_size + argc_size;
if (self.pos - to_write_size) % 16 != 0 {
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
}
Ok(())
}
fn write_zero_page(&mut self, vm_space: &VmSpace) {
self.pos -= PAGE_SIZE;
}
fn write_stack_content(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
elf_header_info: &ElfHeaderInfo,
ph_addr: Vaddr,
) -> Result<()> {
// write envp string
let envp_pointers = self.write_envp_strings(vm_space)?;
let envp_pointers = self.write_envp_strings(root_vmar)?;
// write argv string
let argv_pointers = self.write_argv_strings(vm_space)?;
let argv_pointers = self.write_argv_strings(root_vmar)?;
// write random value
let random_value = generate_random_for_aux_vec();
let random_value_pointer = self.write_bytes(&random_value, vm_space)?;
let random_value_pointer = self.write_bytes(&random_value, root_vmar)?;
self.aux_vec.set(AuxKey::AT_RANDOM, random_value_pointer)?;
self.aux_vec.set(AuxKey::AT_PAGESZ, PAGE_SIZE as _)?;
self.aux_vec.set(
AuxKey::AT_PHDR,
self.init_stack_top as u64 - PAGE_SIZE as u64 + elf_header_info.ph_off,
)?;
self.aux_vec.set(AuxKey::AT_PHDR, ph_addr as u64)?;
self.aux_vec
.set(AuxKey::AT_PHNUM, elf_header_info.ph_num as u64)?;
self.aux_vec
.set(AuxKey::AT_PHENT, elf_header_info.ph_ent as u64)?;
self.adjust_stack_alignment(vm_space, &envp_pointers, &argv_pointers)?;
self.write_aux_vec(vm_space)?;
self.write_envp_pointers(vm_space, envp_pointers)?;
self.write_argv_pointers(vm_space, argv_pointers)?;
self.adjust_stack_alignment(root_vmar, &envp_pointers, &argv_pointers)?;
self.write_aux_vec(root_vmar)?;
self.write_envp_pointers(root_vmar, envp_pointers)?;
self.write_argv_pointers(root_vmar, argv_pointers)?;
// write argc
let argc = self.argc();
self.write_u64(argc, vm_space)?;
self.write_u64(argc, root_vmar)?;
Ok(())
}
fn write_envp_strings(&mut self, vm_space: &VmSpace) -> Result<Vec<u64>> {
fn write_envp_strings(&mut self, root_vmar: &Vmar<Full>) -> Result<Vec<u64>> {
let envp = self
.envp
.iter()
@ -190,13 +199,13 @@ impl InitStack {
.collect::<Vec<_>>();
let mut envp_pointers = Vec::with_capacity(envp.len());
for envp in envp.iter() {
let pointer = self.write_cstring(envp, vm_space)?;
let pointer = self.write_cstring(envp, root_vmar)?;
envp_pointers.push(pointer);
}
Ok(envp_pointers)
}
fn write_argv_strings(&mut self, vm_space: &VmSpace) -> Result<Vec<u64>> {
fn write_argv_strings(&mut self, root_vmar: &Vmar<Full>) -> Result<Vec<u64>> {
let argv = self
.argv
.iter()
@ -204,17 +213,17 @@ impl InitStack {
.collect::<Vec<_>>();
let mut argv_pointers = Vec::with_capacity(argv.len());
for argv in argv.iter().rev() {
let pointer = self.write_cstring(argv, vm_space)?;
let pointer = self.write_cstring(argv, root_vmar)?;
argv_pointers.push(pointer);
}
argv_pointers.reverse();
Ok(argv_pointers)
}
fn write_aux_vec(&mut self, vm_space: &VmSpace) -> Result<()> {
fn write_aux_vec(&mut self, root_vmar: &Vmar<Full>) -> Result<()> {
// Write NULL auxilary
self.write_u64(0, vm_space)?;
self.write_u64(AuxKey::AT_NULL as u64, vm_space)?;
self.write_u64(0, root_vmar)?;
self.write_u64(AuxKey::AT_NULL as u64, root_vmar)?;
// Write Auxiliary vectors
let aux_vec: Vec<_> = self
.aux_vec
@ -223,38 +232,38 @@ impl InitStack {
.map(|(aux_key, aux_value)| (*aux_key, *aux_value))
.collect();
for (aux_key, aux_value) in aux_vec.iter() {
self.write_u64(*aux_value, vm_space)?;
self.write_u64(*aux_key as u64, vm_space)?;
self.write_u64(*aux_value, root_vmar)?;
self.write_u64(*aux_key as u64, root_vmar)?;
}
Ok(())
}
fn write_envp_pointers(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
mut envp_pointers: Vec<u64>,
) -> Result<()> {
// write NULL pointer
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
// write envp pointers
envp_pointers.reverse();
for envp_pointer in envp_pointers {
self.write_u64(envp_pointer, vm_space)?;
self.write_u64(envp_pointer, root_vmar)?;
}
Ok(())
}
fn write_argv_pointers(
&mut self,
vm_space: &VmSpace,
root_vmar: &Vmar<Full>,
mut argv_pointers: Vec<u64>,
) -> Result<()> {
// write 0
self.write_u64(0, vm_space)?;
self.write_u64(0, root_vmar)?;
// write argv pointers
argv_pointers.reverse();
for argv_pointer in argv_pointers {
self.write_u64(argv_pointer, vm_space)?;
self.write_u64(argv_pointer, root_vmar)?;
}
Ok(())
}
@ -286,35 +295,35 @@ impl InitStack {
}
/// returns the u64 start address
fn write_u64(&mut self, val: u64, vm_space: &VmSpace) -> Result<u64> {
fn write_u64(&mut self, val: u64, root_vmar: &Vmar<Full>) -> Result<u64> {
let start_address = (self.pos - 8).align_down(8);
self.pos = start_address;
vm_space.write_val(start_address, &val)?;
root_vmar.write_val(start_address, &val)?;
Ok(self.pos as u64)
}
fn write_bytes(&mut self, bytes: &[u8], vm_space: &VmSpace) -> Result<u64> {
fn write_bytes(&mut self, bytes: &[u8], root_vmar: &Vmar<Full>) -> Result<u64> {
let len = bytes.len();
self.pos -= len;
vm_space.write_bytes(self.pos, bytes)?;
root_vmar.write_bytes(self.pos, bytes)?;
Ok(self.pos as u64)
}
/// returns the string start address
/// cstring will with end null byte.
fn write_cstring(&mut self, val: &CString, vm_space: &VmSpace) -> Result<u64> {
fn write_cstring(&mut self, val: &CString, root_vmar: &Vmar<Full>) -> Result<u64> {
let bytes = val.as_bytes_with_nul();
self.write_bytes(bytes, vm_space)
self.write_bytes(bytes, root_vmar)
}
pub const fn perm() -> VmPerm {
VmPerm::RWU
}
fn debug_print_stack_content(&self, vm_space: &VmSpace) {
fn debug_print_stack_content(&self, root_vmar: &Vmar<Full>) {
debug!("print stack content:");
let stack_top = self.user_stack_top();
let argc = vm_space.read_val::<u64>(stack_top).unwrap();
let argc = root_vmar.read_val::<u64>(stack_top).unwrap();
debug!("argc = {}", argc);
}
}

View File

@ -0,0 +1,250 @@
//! This module is used to parse elf file content to get elf_load_info.
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use crate::vm::perms::VmPerms;
use crate::vm::vmo::VmoRightsOp;
use crate::{
prelude::*,
rights::Full,
vm::{
vmar::Vmar,
vmo::{Pager, Vmo, VmoOptions},
},
};
use jinux_frame::vm::VmPerm;
use jinux_frame::AlignExt;
use xmas_elf::program::{self, ProgramHeader64};
use super::elf_file::Elf;
use super::elf_segment_pager::ElfSegmentPager;
use super::init_stack::InitStack;
pub struct ElfLoadInfo {
segments: Vec<ElfSegment>,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
}
pub struct ElfSegment {
/// The virtual addr where to put the segment.
pub virtual_addr: Vaddr,
/// The segment's size in memory, in bytes.
pub mem_size: usize,
/// The segment's offset in origin elf file
pub offset: usize,
/// The size the segment has in origin elf file, in bytes
pub file_size: usize,
type_: program::Type,
vm_perm: VmPerm,
}
#[derive(Debug, Clone, Copy, Default)]
/// Info parsed from elf header. The entry point is used to set rip
/// The other info is used to set auxv vectors.
pub struct ElfHeaderInfo {
/// the entry point of the elf
pub entry_point: Vaddr,
/// page header table offset
pub ph_off: u64,
/// number of program headers
pub ph_num: u16,
/// The size of a program header
pub ph_ent: u16,
}
impl ElfSegment {
fn parse_elf_segment(program_header: ProgramHeader64) -> Result<Self> {
let start = program_header.virtual_addr as Vaddr;
let end = start + program_header.mem_size as Vaddr;
let type_ = program_header
.get_type()
.map_err(|_| Error::new(Errno::ENOEXEC))?;
let vm_perm = Self::parse_segment_perm(program_header.flags)?;
Ok(Self {
virtual_addr: program_header.virtual_addr as _,
mem_size: program_header.mem_size as usize,
offset: program_header.offset as usize,
file_size: program_header.file_size as usize,
type_,
vm_perm,
})
}
pub fn parse_segment_perm(flags: xmas_elf::program::Flags) -> Result<VmPerm> {
if !flags.is_read() {
return_errno_with_message!(Errno::ENOEXEC, "unreadable segment");
}
let mut vm_perm = VmPerm::R;
if flags.is_write() {
vm_perm |= VmPerm::W;
}
if flags.is_execute() {
vm_perm |= VmPerm::X;
}
Ok(vm_perm)
}
fn contains_program_headers_table(&self, ph_offset: usize) -> bool {
// program headers table is at ph_offset of elf file
self.offset <= ph_offset && ph_offset < self.offset + self.file_size
}
/// If this segment contains ph table, then returns the ph table addr
/// Otherwise, returns None
pub fn program_headers_table_addr(&self, ph_offset: usize) -> Option<Vaddr> {
if self.contains_program_headers_table(ph_offset) {
Some(ph_offset - self.offset + self.virtual_addr)
} else {
None
}
}
pub fn is_loadable(&self) -> bool {
self.type_ == program::Type::Load
}
pub fn start_address(&self) -> Vaddr {
self.virtual_addr
}
pub fn end_address(&self) -> Vaddr {
self.virtual_addr + self.mem_size
}
pub fn init_segment_vmo(&self, elf_file_content: &'static [u8]) -> Vmo<Full> {
let vmo_start = self.start_address().align_down(PAGE_SIZE);
let vmo_end = self.end_address().align_up(PAGE_SIZE);
let segment_len = vmo_end - vmo_start;
let pager = Arc::new(ElfSegmentPager::new(elf_file_content, self)) as Arc<dyn Pager>;
let vmo_alloc_options: VmoOptions<Full> = VmoOptions::new(segment_len).pager(pager);
vmo_alloc_options.alloc().unwrap()
}
// create vmo for each segment and map the segment to root_vmar
fn map_segment_vmo(
&self,
root_vmar: &Vmar<Full>,
elf_file_content: &'static [u8],
) -> Result<()> {
let vmo = self.init_segment_vmo(elf_file_content).to_dyn();
let perms = VmPerms::from(self.vm_perm);
// The segment may not be aligned to page
let offset = self.start_address().align_down(PAGE_SIZE);
let vm_map_options = root_vmar.new_map(vmo, perms)?.offset(offset);
let map_addr = vm_map_options.build()?;
Ok(())
}
}
impl ElfLoadInfo {
fn with_capacity(
capacity: usize,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
) -> Self {
Self {
segments: Vec::with_capacity(capacity),
init_stack,
elf_header_info,
}
}
fn add_segment(&mut self, elf_segment: ElfSegment) {
self.segments.push(elf_segment);
}
pub fn parse_elf_data(
elf_file_content: &'static [u8],
argv: Vec<CString>,
envp: Vec<CString>,
) -> Result<Self> {
let elf_file = Elf::parse_elf(elf_file_content)?;
// parse elf header
let elf_header_info = ElfHeaderInfo::parse_elf_header(&elf_file);
// FIXME: only contains load segment?
let ph_count = elf_file.program_headers.len();
let init_stack = InitStack::new_default_config(argv, envp);
let mut elf_load_info = ElfLoadInfo::with_capacity(ph_count, init_stack, elf_header_info);
// parse each segemnt
for program_header in elf_file.program_headers {
let elf_segment = ElfSegment::parse_elf_segment(program_header)?;
if elf_segment.is_loadable() {
elf_load_info.add_segment(elf_segment)
}
}
Ok(elf_load_info)
}
/// init vmo for each segment and then map segment to root vmar
pub fn map_segment_vmos(
&self,
root_vmar: &Vmar<Full>,
elf_file_content: &'static [u8],
) -> Result<()> {
for segment in &self.segments {
segment.map_segment_vmo(root_vmar, elf_file_content)?;
}
Ok(())
}
pub fn init_stack(&mut self, root_vmar: &Vmar<Full>, file_content: &[u8]) -> Result<()> {
let ph_addr = self.program_headers_table_addr()?;
self.init_stack
.init(root_vmar, &self.elf_header_info, ph_addr)?;
Ok(())
}
fn program_headers_table_addr(&self) -> Result<Vaddr> {
let ph_offset = self.elf_header_info.ph_off as usize;
for segment in &self.segments {
if let Some(ph_addr) = segment.program_headers_table_addr(ph_offset) {
return Ok(ph_addr);
}
}
return_errno_with_message!(
Errno::ENOEXEC,
"can not find program header table address in elf"
);
}
pub fn entry_point(&self) -> u64 {
self.elf_header_info.entry_point as u64
}
pub fn user_stack_top(&self) -> u64 {
self.init_stack.user_stack_top() as u64
}
pub fn argc(&self) -> u64 {
self.init_stack.argc()
}
pub fn argv(&self) -> u64 {
self.init_stack.argv()
}
pub fn envc(&self) -> u64 {
self.init_stack.envc()
}
pub fn envp(&self) -> u64 {
self.init_stack.envp()
}
}
impl ElfHeaderInfo {
fn parse_elf_header(elf_file: &Elf) -> Self {
let entry_point = elf_file.elf_header.pt2.entry_point as Vaddr;
let ph_off = elf_file.elf_header.pt2.ph_offset;
let ph_num = elf_file.elf_header.pt2.ph_count;
let ph_ent = elf_file.elf_header.pt2.ph_entry_size;
ElfHeaderInfo {
entry_point,
ph_off,
ph_num,
ph_ent,
}
}
}

View File

@ -1,29 +1,26 @@
pub mod aux_vec;
pub mod elf;
pub mod elf_file;
pub mod elf_segment_pager;
pub mod init_stack;
pub mod load_elf;
use jinux_frame::vm::VmSpace;
use self::load_elf::ElfLoadInfo;
use crate::{prelude::*, rights::Full, vm::vmar::Vmar};
use self::elf::ElfLoadInfo;
use crate::prelude::*;
/// load elf to a given vm_space. this function will
/// load elf to the root vmar. this function will
/// 1. read the vaddr of each segment to get all elf pages.
/// 2. allocate physical frames and copy elf data to these frames
/// 3. map frames to the correct vaddr
/// 4. (allocate frams and) map the user stack
pub fn load_elf_to_vm_space<'a>(
/// 2. create a vmo for each elf segment, create a backup pager for each segment. Then map the vmo to the root vmar.
/// 3. write proper content to the init stack.
pub fn load_elf_to_root_vmar(
filename: CString,
elf_file_content: &'a [u8],
vm_space: &VmSpace,
elf_file_content: &'static [u8],
root_vmar: &Vmar<Full>,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Result<ElfLoadInfo<'a>> {
let mut elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content, filename, argv, envp)?;
elf_load_info.copy_and_map_segments(vm_space)?;
elf_load_info.debug_check_map_result(vm_space);
elf_load_info.init_stack(vm_space);
elf_load_info.write_program_header_table(vm_space, elf_file_content);
) -> Result<ElfLoadInfo> {
let mut elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content, argv, envp)?;
elf_load_info.map_segment_vmos(root_vmar, elf_file_content)?;
elf_load_info.init_stack(root_vmar, elf_file_content)?;
debug!("load elf succeeds.");
Ok(elf_load_info)

View File

@ -1,14 +1,114 @@
use jinux_frame::cpu::CpuContext;
use jinux_frame::{
cpu::{CpuContext, TrapInformation},
trap::*,
vm::VmIo,
};
use crate::vm::page_fault_handler::PageFaultHandler;
use crate::{prelude::*, process::signal::signals::fault::FaultSignal};
/// We can't handle most exceptions, just send self a fault signal before return to user space.
pub fn handle_exception(context: &mut CpuContext) {
let trap_info = context.trap_information.clone();
log_trap_info(&trap_info);
let current = current!();
let pid = current.pid();
debug!("trap info = {:x?}", trap_info);
debug!("cpu context = {:x?}", context);
let signal = Box::new(FaultSignal::new(&trap_info));
let root_vmar = current.root_vmar().unwrap();
match trap_info.id {
PAGE_FAULT => handle_page_fault(&trap_info),
_ => {
// We current do nothing about other exceptions
generate_fault_signal(&trap_info);
}
}
}
fn handle_page_fault(trap_info: &TrapInformation) {
const PAGE_NOT_PRESENT_ERROR_MASK: u64 = 0x1 << 0;
const WRITE_ACCESS_MASK: u64 = 0x1 << 1;
let not_present = trap_info.err & PAGE_NOT_PRESENT_ERROR_MASK == 0;
let write = trap_info.err & WRITE_ACCESS_MASK != 0;
if not_present || write {
// If page is not present or due to write access, we should ask the vmar try to commit this page
let current = current!();
let root_vmar = current.root_vmar().unwrap();
let page_fault_addr = trap_info.cr2 as Vaddr;
debug!(
"Page fault address: 0x{:x}, write access: {}",
page_fault_addr, write
);
if let Err(_) = root_vmar.handle_page_fault(page_fault_addr, not_present, write) {
generate_fault_signal(trap_info);
} else {
// ensure page fault is successfully handled
// FIXME: this check can be removed
let vm_space = root_vmar.vm_space();
let _: u8 = vm_space.read_val(page_fault_addr).unwrap();
}
} else {
// Otherwise, the page fault cannot be handled
generate_fault_signal(trap_info);
}
}
/// generate a fault signal for current process.
fn generate_fault_signal(trap_info: &TrapInformation) {
let current = current!();
let signal = Box::new(FaultSignal::new(trap_info));
current.sig_queues().lock().enqueue(signal);
}
macro_rules! log_trap_common {
($exception_name: ident, $trap_info: ident) => {
debug!(
"[Trap][{}][err = {}]",
stringify!($exception_name),
$trap_info.err
)
};
}
fn log_trap_info(trap_info: &TrapInformation) {
match trap_info.id {
DIVIDE_BY_ZERO => log_trap_common!(DIVIDE_BY_ZERO, trap_info),
DEBUG => log_trap_common!(DEBUG, trap_info),
NON_MASKABLE_INTERRUPT => log_trap_common!(NON_MASKABLE_INTERRUPT, trap_info),
BREAKPOINT => log_trap_common!(BREAKPOINT, trap_info),
OVERFLOW => log_trap_common!(OVERFLOW, trap_info),
BOUND_RANGE_EXCEEDED => log_trap_common!(BOUND_RANGE_EXCEEDED, trap_info),
INVALID_OPCODE => log_trap_common!(INVALID_OPCODE, trap_info),
DEVICE_NOT_AVAILABLE => log_trap_common!(DEVICE_NOT_AVAILABLE, trap_info),
DOUBLE_FAULT => log_trap_common!(DOUBLE_FAULT, trap_info),
COPROCESSOR_SEGMENT_OVERRUN => log_trap_common!(COPROCESSOR_SEGMENT_OVERRUN, trap_info),
INVAILD_TSS => log_trap_common!(INVAILD_TSS, trap_info),
SEGMENT_NOT_PRESENT => log_trap_common!(SEGMENT_NOT_PRESENT, trap_info),
STACK_SEGMENT_FAULT => log_trap_common!(STACK_SEGMENT_FAULT, trap_info),
GENERAL_PROTECTION_FAULT => log_trap_common!(GENERAL_PROTECTION_FAULT, trap_info),
PAGE_FAULT => {
debug!(
"[Trap][{}][page fault addr = 0x{:x}, err = {}]",
stringify!(PAGE_FAULT),
trap_info.cr2,
trap_info.err
);
}
// 15 reserved
X87_FLOATING_POINT_EXCEPTION => log_trap_common!(X87_FLOATING_POINT_EXCEPTION, trap_info),
ALIGNMENT_CHECK => log_trap_common!(ALIGNMENT_CHECK, trap_info),
MACHINE_CHECK => log_trap_common!(MACHINE_CHECK, trap_info),
SIMD_FLOATING_POINT_EXCEPTION => log_trap_common!(SIMD_FLOATING_POINT_EXCEPTION, trap_info),
VIRTUALIZATION_EXCEPTION => log_trap_common!(VIRTUALIZATION_EXCEPTION, trap_info),
CONTROL_PROTECTION_EXCEPTION => log_trap_common!(CONTROL_PROTECTION_EXCEPTION, trap_info),
HYPERVISOR_INJECTION_EXCEPTION => {
log_trap_common!(HYPERVISOR_INJECTION_EXCEPTION, trap_info)
}
VMM_COMMUNICATION_EXCEPTION => log_trap_common!(VMM_COMMUNICATION_EXCEPTION, trap_info),
SECURITY_EXCEPTION => log_trap_common!(SECURITY_EXCEPTION, trap_info),
_ => {
info!(
"[Trap][Unknown trap type][id = {}, err = {}]",
trap_info.id, trap_info.err
);
}
}
}

View File

@ -2,7 +2,6 @@ use core::sync::atomic::{AtomicI32, Ordering};
use self::name::ProcessName;
use self::process_group::ProcessGroup;
use self::process_vm::mmap_area::MmapArea;
use self::process_vm::user_heap::UserHeap;
use self::process_vm::UserVm;
use self::signal::constants::SIGCHLD;
@ -14,9 +13,11 @@ use self::status::ProcessStatus;
use self::task::create_user_task_from_elf;
use crate::fs::file_table::FileTable;
use crate::prelude::*;
use crate::rights::Full;
use crate::tty::get_console;
use crate::vm::vmar::Vmar;
use jinux_frame::sync::WaitQueue;
use jinux_frame::{task::Task, user::UserSpace, vm::VmSpace};
use jinux_frame::{task::Task, user::UserSpace};
pub mod clone;
pub mod elf;
@ -47,6 +48,7 @@ pub struct Process {
filename: Option<CString>,
user_space: Option<Arc<UserSpace>>,
user_vm: Option<UserVm>,
root_vmar: Option<Vmar<Full>>,
/// wait for child status changed
waiting_children: WaitQueue,
/// wait for io events
@ -97,6 +99,7 @@ impl Process {
exec_filename: Option<CString>,
user_vm: Option<UserVm>,
user_space: Option<Arc<UserSpace>>,
root_vmar: Option<Vmar<Full>>,
process_group: Option<Weak<ProcessGroup>>,
file_table: FileTable,
sig_dispositions: SigDispositions,
@ -104,10 +107,8 @@ impl Process {
sig_mask: SigMask,
) -> Self {
let parent = if pid == 0 {
debug!("Init process does not has parent");
None
} else {
debug!("All process except init should have parent");
let current_process = current!();
Some(Arc::downgrade(&current_process))
};
@ -125,6 +126,7 @@ impl Process {
filename: exec_filename,
user_space,
user_vm,
root_vmar,
waiting_children,
poll_queue,
exit_code: AtomicI32::new(0),
@ -189,8 +191,15 @@ impl Process {
let user_process = Arc::new_cyclic(|weak_process_ref| {
let weak_process = weak_process_ref.clone();
let cloned_filename = Some(filename.clone());
let task =
create_user_task_from_elf(filename, elf_file_content, weak_process, argv, envp);
let root_vmar = Vmar::<Full>::new_root().unwrap();
let task = create_user_task_from_elf(
&root_vmar,
filename,
elf_file_content,
weak_process,
argv,
envp,
);
let user_space = task.user_space().map(|user_space| user_space.clone());
let user_vm = UserVm::new();
let file_table = FileTable::new_with_stdio();
@ -203,6 +212,7 @@ impl Process {
cloned_filename,
Some(user_vm),
user_space,
Some(root_vmar),
None,
file_table,
sig_dispositions,
@ -239,6 +249,7 @@ impl Process {
None,
None,
None,
None,
file_table,
sig_dispositions,
sig_queues,
@ -372,19 +383,16 @@ impl Process {
self.user_space.as_ref()
}
/// returns the vm space if the process does have, otherwise None
pub fn vm_space(&self) -> Option<&VmSpace> {
match self.user_space {
None => None,
Some(ref user_space) => Some(user_space.vm_space()),
}
}
/// returns the user_vm
pub fn user_vm(&self) -> Option<&UserVm> {
self.user_vm.as_ref()
}
/// returns the root vmar
pub fn root_vmar(&self) -> Option<&Vmar<Full>> {
self.root_vmar.as_ref()
}
/// returns the user heap if the process does have, otherwise None
pub fn user_heap(&self) -> Option<&UserHeap> {
match self.user_vm {
@ -393,14 +401,6 @@ impl Process {
}
}
/// returns the mmap area if the process does have, otherwise None
pub fn mmap_area(&self) -> Option<&MmapArea> {
match self.user_vm {
None => None,
Some(ref user_vm) => Some(user_vm.mmap_area()),
}
}
/// free zombie child with pid, returns the exit code of child process.
/// remove process from process group.
pub fn reap_zombie_child(&self, pid: Pid) -> i32 {

View File

@ -1,97 +0,0 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::{memory::vm_page::VmPageRange, prelude::*, process::elf::init_stack::INIT_STACK_BASE};
use jinux_frame::vm::{VmPerm, VmSpace};
// The definition of MMapFlags is from occlum
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
impl TryFrom<u64> for MMapFlags {
type Error = Error;
fn try_from(value: u64) -> Result<Self> {
MMapFlags::from_bits(value as u32)
.ok_or_else(|| Error::with_message(Errno::EINVAL, "unknown mmap flags"))
}
}
#[derive(Debug)]
pub struct MmapArea {
base_addr: Vaddr,
current: AtomicUsize,
}
impl MmapArea {
pub const fn new() -> MmapArea {
MmapArea {
base_addr: INIT_STACK_BASE,
current: AtomicUsize::new(INIT_STACK_BASE),
}
}
pub fn mmap(
&self,
len: usize,
offset: usize,
vm_perm: VmPerm,
flags: MMapFlags,
vm_space: &VmSpace,
) -> Vaddr {
// TODO: how to respect flags?
if flags.complement().contains(MMapFlags::MAP_ANONYMOUS)
| flags.complement().contains(MMapFlags::MAP_PRIVATE)
{
panic!("Unsupported mmap flags {:?} now", flags);
}
if len % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned len");
}
if offset % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned offset");
}
let current = self.current.load(Ordering::Relaxed);
let vm_page_range = VmPageRange::new_range(current..(current + len));
vm_page_range.map_zeroed(vm_space, vm_perm);
self.current.store(current + len, Ordering::Relaxed);
debug!("mmap area start: 0x{:x}, size: {}", current, len);
current
}
/// Set mmap area to the default status. i.e., point current to base.
pub fn set_default(&self) {
self.current.store(self.base_addr, Ordering::Relaxed);
}
}
impl Clone for MmapArea {
fn clone(&self) -> Self {
let current = self.current.load(Ordering::Relaxed);
Self {
base_addr: self.base_addr.clone(),
current: AtomicUsize::new(current),
}
}
}

View File

@ -0,0 +1,34 @@
use crate::prelude::*;
// The definition of MMapFlags is from occlum
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
impl TryFrom<u64> for MMapFlags {
type Error = Error;
fn try_from(value: u64) -> Result<Self> {
MMapFlags::from_bits(value as u32)
.ok_or_else(|| Error::with_message(Errno::EINVAL, "unknown mmap flags"))
}
}

View File

@ -4,10 +4,9 @@
//! So we define a UserVm struct to store such infomation.
//! Briefly, it contains the exact usage of each segment of virtual spaces.
pub mod mmap_area;
pub mod mmap_flags;
pub mod user_heap;
use mmap_area::MmapArea;
use user_heap::UserHeap;
/*
@ -41,30 +40,20 @@ use user_heap::UserHeap;
#[derive(Debug, Clone)]
pub struct UserVm {
user_heap: UserHeap,
mmap_area: MmapArea,
}
impl UserVm {
pub const fn new() -> Self {
let user_heap = UserHeap::new();
let mmap_area = MmapArea::new();
UserVm {
user_heap,
mmap_area,
}
UserVm { user_heap }
}
pub fn user_heap(&self) -> &UserHeap {
&self.user_heap
}
pub fn mmap_area(&self) -> &MmapArea {
&self.mmap_area
}
/// Set user vm to the init status
pub fn set_default(&self) {
self.user_heap.set_default();
self.mmap_area.set_default();
}
}

View File

@ -1,12 +1,15 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::vm::perms::VmPerms;
use crate::{
memory::vm_page::{VmPage, VmPageRange},
prelude::*,
rights::Rights,
vm::vmo::{VmoFlags, VmoOptions},
};
use jinux_frame::vm::{VmPerm, VmSpace};
use jinux_frame::AlignExt;
pub const USER_HEAP_BASE: Vaddr = 0x0000_0000_1000_0000;
pub const USER_HEAP_SIZE_LIMIT: usize = PAGE_SIZE * 1000;
#[derive(Debug)]
pub struct UserHeap {
@ -23,37 +26,38 @@ impl UserHeap {
}
}
pub fn brk(&self, new_heap_end: Option<Vaddr>, vm_space: &VmSpace) -> Vaddr {
pub fn brk(&self, new_heap_end: Option<Vaddr>) -> Result<Vaddr> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
match new_heap_end {
None => return self.current_heap_end.load(Ordering::Relaxed),
None => {
// create a heap vmo for current process
let perms = VmPerms::READ | VmPerms::WRITE;
let vmo_options = VmoOptions::<Rights>::new(0).flags(VmoFlags::RESIZABLE);
let heap_vmo = vmo_options.alloc().unwrap();
let vmar_map_options = root_vmar
.new_map(heap_vmo, perms)
.unwrap()
.offset(USER_HEAP_BASE)
.size(USER_HEAP_SIZE_LIMIT);
vmar_map_options.build().unwrap();
return Ok(self.current_heap_end.load(Ordering::Relaxed));
}
Some(new_heap_end) => {
let current_heap_end = self.current_heap_end.load(Ordering::Acquire);
if new_heap_end < current_heap_end {
return current_heap_end;
// FIXME: should we allow shrink current user heap?
return Ok(current_heap_end);
}
let new_size = (new_heap_end - self.heap_base).align_up(PAGE_SIZE);
let heap_vmo = root_vmar.get_mapped_vmo(USER_HEAP_BASE)?;
heap_vmo.resize(new_size)?;
self.current_heap_end.store(new_heap_end, Ordering::Release);
let start_page = VmPage::containing_address(current_heap_end - 1).next_page();
let end_page = VmPage::containing_address(new_heap_end);
if end_page >= start_page {
let vm_pages = VmPageRange::new_page_range(start_page, end_page);
let vm_perm = UserHeap::user_heap_perm();
vm_pages.map_zeroed(vm_space, vm_perm);
debug!(
"map address: 0x{:x} - 0x{:x}",
vm_pages.start_address(),
vm_pages.end_address()
);
}
return new_heap_end;
return Ok(new_heap_end);
}
}
}
#[inline(always)]
const fn user_heap_perm() -> VmPerm {
VmPerm::RWXU
}
/// Set heap to the default status. i.e., point the heap end to heap base.
pub fn set_default(&self) {
self.current_heap_end

View File

@ -1,7 +1,7 @@
#![allow(non_camel_case_types)]
use core::mem;
use jinux_frame::{cpu::GpRegs, offset_of};
use jinux_frame::cpu::GpRegs;
use jinux_util::{read_union_fields, union_read_ptr::UnionReadPtr};
use crate::{prelude::*, process::Pid};

View File

@ -15,9 +15,9 @@ use jinux_frame::{cpu::CpuContext, task::Task};
use self::c_types::siginfo_t;
use self::sig_mask::SigMask;
use self::sig_num::SigNum;
use crate::memory::{write_bytes_to_user, write_val_to_user};
use crate::process::signal::c_types::ucontext_t;
use crate::process::signal::sig_action::SigActionFlags;
use crate::util::{write_bytes_to_user, write_val_to_user};
use crate::{
prelude::*,
process::signal::sig_action::{SigAction, SigDefaultAction},

View File

@ -4,36 +4,37 @@ use jinux_frame::{
cpu::CpuContext,
task::Task,
user::{UserEvent, UserMode, UserSpace},
vm::VmSpace,
};
use crate::{
prelude::*,
process::{exception::handle_exception, signal::handle_pending_signal},
rights::Full,
vm::vmar::Vmar,
};
use crate::syscall::handle_syscall;
use super::{elf::load_elf_to_vm_space, Process};
use super::{elf::load_elf_to_root_vmar, Process};
static COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn create_user_task_from_elf(
root_vmar: &Vmar<Full>,
filename: CString,
elf_file_content: &[u8],
elf_file_content: &'static [u8],
parent: Weak<Process>,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Arc<Task> {
let vm_space = VmSpace::new();
let elf_load_info = load_elf_to_vm_space(filename, elf_file_content, &vm_space, argv, envp)
let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, &root_vmar, argv, envp)
.expect("Load Elf failed");
let vm_space = root_vmar.vm_space().clone();
let mut cpu_ctx = CpuContext::default();
// set entry point
cpu_ctx.gp_regs.rip = elf_load_info.entry_point();
// set user stack
cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_top();
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
create_new_task(user_space, parent)
}
@ -44,8 +45,7 @@ pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<
let cur = Task::current();
let user_space = cur.user_space().expect("user task should have user space");
let mut user_mode = UserMode::new(user_space);
debug!("In new task");
debug!("[new task] pid = {}", current!().pid());
debug!("In user task entry:");
debug!("[new task] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip);
debug!("[new task] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp);
debug!("[new task] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax);
@ -72,7 +72,7 @@ pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<
}
}
debug!("exit user loop");
// Work around: exit in kernel task entry may be not called. Why this will happen?
// FIXME: This is a work around: exit in kernel task entry may be not called. Why this will happen?
Task::current().exit();
}

View File

@ -1,8 +1,8 @@
use super::{constants::*, SyscallReturn};
use crate::{memory::read_cstring_from_user, prelude::*, syscall::SYS_ACCESS};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_ACCESS, util::read_cstring_from_user};
pub fn sys_access(filename_ptr: Vaddr, file_mode: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_ACCESS]", SYS_ACCESS);
log_syscall_entry!(SYS_ACCESS);
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
debug!("filename: {:?}, file_mode = {}", filename, file_mode);
// TODO: access currenly does not check and just return success

View File

@ -1,7 +1,7 @@
use jinux_frame::cpu::CpuContext;
use crate::prelude::*;
use crate::syscall::SYS_ARCH_PRCTL;
use crate::{log_syscall_entry, prelude::*};
use super::SyscallReturn;
@ -29,7 +29,7 @@ impl TryFrom<u64> for ArchPrctlCode {
}
pub fn sys_arch_prctl(code: u64, addr: u64, context: &mut CpuContext) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_ARCH_PRCTL]", SYS_ARCH_PRCTL);
log_syscall_entry!(SYS_ARCH_PRCTL);
let arch_prctl_code = ArchPrctlCode::try_from(code)?;
debug!(
"arch_prctl_code: {:?}, addr = 0x{:x}",

View File

@ -1,3 +1,4 @@
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::syscall::SyscallReturn;
@ -5,7 +6,7 @@ use crate::syscall::SYS_BRK;
/// expand the user heap to new heap end, returns the new heap end if expansion succeeds.
pub fn sys_brk(heap_end: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_BRK]", SYS_BRK);
log_syscall_entry!(SYS_BRK);
let new_heap_end = if heap_end == 0 {
None
} else {
@ -13,13 +14,8 @@ pub fn sys_brk(heap_end: u64) -> Result<SyscallReturn> {
};
debug!("new heap end = {:x?}", heap_end);
let current = current!();
let user_heap = current
.user_heap()
.expect("brk should work on process with user heap");
let vm_space = current
.vm_space()
.expect("brk should work on process with user space");
let new_heap_end = user_heap.brk(new_heap_end, vm_space);
let user_heap = current.user_heap().unwrap();
let new_heap_end = user_heap.brk(new_heap_end)?;
Ok(SyscallReturn::Return(new_heap_end as _))
}

View File

@ -1,5 +1,6 @@
use jinux_frame::cpu::CpuContext;
use crate::log_syscall_entry;
use crate::process::clone::{clone_child, CloneArgs, CloneFlags};
use crate::{prelude::*, syscall::SYS_CLONE};
@ -15,11 +16,12 @@ pub fn sys_clone(
tls: usize,
parent_context: CpuContext,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_CLONE]", SYS_CLONE);
log_syscall_entry!(SYS_CLONE);
let clone_flags = CloneFlags::from(clone_flags);
debug!("flags = {:?}, child_stack_ptr = 0x{:x}, parent_tid_ptr = 0x{:x}, child tid ptr = 0x{:x}, tls = 0x{:x}", clone_flags, new_sp, parent_tidptr, child_tidptr, tls);
let clone_args = CloneArgs::new(new_sp, parent_tidptr, child_tidptr, tls, clone_flags);
let child_process = clone_child(parent_context, clone_args).unwrap();
let child_pid = child_process.pid();
let pid = current!().pid();
debug!("*********schedule child process, pid = {}**********", pid);

View File

@ -1,17 +1,14 @@
use super::SyscallReturn;
use super::SYS_CLOSE;
use crate::log_syscall_entry;
use crate::{fs::file::FileDescripter, prelude::*};
pub fn sys_close(fd: FileDescripter) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_CLOSE]", SYS_CLOSE);
log_syscall_entry!(SYS_CLOSE);
debug!("fd = {}", fd);
let current = current!();
let mut file_table = current.file_table().lock();
match file_table.get_file(fd) {
None => return_errno!(Errno::EBADF),
Some(_) => {
file_table.close_file(fd);
Ok(SyscallReturn::Return(0))
}
}
let _ = file_table.get_file(fd)?;
file_table.close_file(fd);
Ok(SyscallReturn::Return(0))
}

View File

@ -1,8 +1,9 @@
use jinux_frame::cpu::CpuContext;
use super::{constants::*, SyscallReturn};
use crate::memory::{read_cstring_from_user, read_val_from_user};
use crate::process::elf::load_elf_to_vm_space;
use crate::log_syscall_entry;
use crate::process::elf::load_elf_to_root_vmar;
use crate::util::{read_cstring_from_user, read_val_from_user};
use crate::{prelude::*, syscall::SYS_EXECVE};
pub fn sys_execve(
@ -11,7 +12,7 @@ pub fn sys_execve(
envp_ptr_ptr: Vaddr,
context: &mut CpuContext,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_EXECVE]", SYS_EXECVE);
log_syscall_entry!(SYS_EXECVE);
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
let argv = read_cstring_vec(argv_ptr_ptr, MAX_ARGV_NUMBER, MAX_ARG_LEN)?;
let envp = read_cstring_vec(envp_ptr_ptr, MAX_ENVP_NUMBER, MAX_ENV_LEN)?;
@ -25,17 +26,17 @@ pub fn sys_execve(
let elf_file_content = crate::user_apps::read_execve_hello_content();
let current = current!();
// Set process vm space to default
let vm_space = current
.vm_space()
// destroy root vmars
let root_vmar = current
.root_vmar()
.expect("[Internal Error] User process should have vm space");
vm_space.clear();
root_vmar.clear()?;
let user_vm = current
.user_vm()
.expect("[Internal Error] User process should have user vm");
user_vm.set_default();
// load elf content to new vm space
let elf_load_info = load_elf_to_vm_space(filename, elf_file_content, &vm_space, argv, envp)
let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, root_vmar, argv, envp)
.expect("load elf failed");
debug!("load elf in execve succeeds");
// set signal disposition to default
@ -74,7 +75,7 @@ fn read_cstring_vec(
res.push(cstring);
}
if !find_null {
return_errno!(Errno::E2BIG);
return_errno_with_message!(Errno::E2BIG, "Cannot find null pointer in vector");
}
Ok(res)
}

View File

@ -1,9 +1,9 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::{SyscallReturn, SYS_EXIT};
pub fn sys_exit(exit_code: i32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_EXIT]", SYS_EXIT);
log_syscall_entry!(SYS_EXIT);
current!().exit(exit_code);
Ok(SyscallReturn::Return(0))
}

View File

@ -1,10 +1,10 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::{SyscallReturn, SYS_EXIT_GROUP};
/// Exit all thread in a process.
pub fn sys_exit_group(exit_code: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_EXIT_GROUP]", SYS_EXIT_GROUP);
log_syscall_entry!(SYS_EXIT_GROUP);
current!().exit(exit_code as _);
Ok(SyscallReturn::Return(0))
}

View File

@ -1,9 +1,10 @@
use super::{SyscallReturn, SYS_FCNTL};
use crate::fs::fcntl::FcntlCmd;
use crate::log_syscall_entry;
use crate::{fs::file::FileDescripter, prelude::*};
pub fn sys_fcntl(fd: FileDescripter, cmd: i32, arg: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FCNTL]", SYS_FCNTL);
log_syscall_entry!(SYS_FCNTL);
let fcntl_cmd = FcntlCmd::try_from(cmd)?;
debug!("fd = {}, cmd = {:?}, arg = {}", fd, fcntl_cmd, arg);
match fcntl_cmd {

View File

@ -1,4 +1,5 @@
use crate::{
log_syscall_entry,
prelude::*,
process::clone::{clone_child, CloneArgs},
};
@ -9,7 +10,7 @@ use crate::{process::Process, syscall::SYS_FORK};
use super::SyscallReturn;
pub fn sys_fork(parent_context: CpuContext) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FORK]", SYS_FORK);
log_syscall_entry!(SYS_FORK);
let child_process = fork(parent_context);
Ok(SyscallReturn::Return(child_process.pid() as _))
}

View File

@ -1,19 +1,19 @@
use jinux_frame::vm::VmIo;
use crate::fs::stat::Stat;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::{SyscallReturn, SYS_FSTAT};
pub fn sys_fstat(fd: u64, stat_buf_ptr: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FSTAT]", SYS_FSTAT);
log_syscall_entry!(SYS_FSTAT);
debug!("fd = {}, stat_buf_addr = 0x{:x}", fd, stat_buf_ptr);
let current = current!();
let vm_space = current.vm_space().unwrap();
let root_vmar = current.root_vmar().unwrap();
if fd == 1 {
let stat = Stat::stdout_stat();
vm_space.write_val(stat_buf_ptr, &stat)?;
root_vmar.write_val(stat_buf_ptr, &stat)?;
return Ok(SyscallReturn::Return(0));
}
// TODO: fstat only returns fake result now

View File

@ -2,9 +2,10 @@ use core::sync::atomic::{AtomicBool, Ordering};
use crate::process::{Pid, Process};
use crate::syscall::SyscallReturn;
use crate::{memory::read_val_from_user, syscall::SYS_FUTEX};
use crate::syscall::SYS_FUTEX;
use crate::util::read_val_from_user;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use jinux_frame::cpu::num_cpus;
type FutexBitSet = u32;
@ -22,7 +23,7 @@ pub fn sys_futex(
futex_new_addr: u64,
bitset: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_FUTEX]", SYS_FUTEX);
log_syscall_entry!(SYS_FUTEX);
// FIXME: we current ignore futex flags
let (futex_op, futex_flags) = futex_op_and_flags_from_u32(futex_op as _).unwrap();

View File

@ -1,11 +1,12 @@
use crate::memory::write_bytes_to_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::util::write_bytes_to_user;
use super::SyscallReturn;
use super::SYS_GETCWD;
pub fn sys_getcwd(buf: Vaddr, len: usize) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETCWD]", SYS_GETCWD);
log_syscall_entry!(SYS_GETCWD);
// TODO: getcwd only return a fake result now
let fake_cwd = CString::new("/")?;
let bytes = fake_cwd.as_bytes_with_nul();

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETEGID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETEGID};
use super::SyscallReturn;
pub fn sys_getegid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETEGID]", SYS_GETEGID);
log_syscall_entry!(SYS_GETEGID);
// TODO: getegid only return a fake egid now
Ok(SyscallReturn::Return(0))
}

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETEUID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETEUID};
use super::SyscallReturn;
pub fn sys_geteuid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETEUID]", SYS_GETEUID);
log_syscall_entry!(SYS_GETEUID);
// TODO: geteuid only return a fake euid now"
Ok(SyscallReturn::Return(0))
}

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETGID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETGID};
use super::SyscallReturn;
pub fn sys_getgid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETGID]", SYS_GETGID);
log_syscall_entry!(SYS_GETGID);
// TODO: getgid only return a fake gid now"
Ok(SyscallReturn::Return(0))
}

View File

@ -1,8 +1,8 @@
use super::{SyscallReturn, SYS_GETPGRP};
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
pub fn sys_getpgrp() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETPGRP]", SYS_GETPGRP);
log_syscall_entry!(SYS_GETPGRP);
let current = current!();
Ok(SyscallReturn::Return(current.pgid() as _))
}

View File

@ -1,11 +1,11 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::SYS_GETPID;
use super::SyscallReturn;
pub fn sys_getpid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETPID]", SYS_GETPID);
log_syscall_entry!(SYS_GETPID);
let pid = current!().pid();
debug!("[sys_getpid]: pid = {}", pid);
Ok(SyscallReturn::Return(pid as _))

View File

@ -1,10 +1,11 @@
use crate::log_syscall_entry;
use crate::prelude::*;
use super::SyscallReturn;
use super::SYS_GETPPID;
pub fn sys_getppid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETPPID]", SYS_GETPPID);
log_syscall_entry!(SYS_GETPPID);
let current = current!();
let parent = current.parent();
match parent {

View File

@ -1,11 +1,11 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::SYS_GETTID;
use super::SyscallReturn;
pub fn sys_gettid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETTID]", SYS_GETTID);
log_syscall_entry!(SYS_GETTID);
// For single-thread process, tid is equal to pid
let tid = current!().pid();
Ok(SyscallReturn::Return(tid as _))

View File

@ -1,9 +1,9 @@
use crate::{prelude::*, syscall::SYS_GETUID};
use crate::{log_syscall_entry, prelude::*, syscall::SYS_GETUID};
use super::SyscallReturn;
pub fn sys_getuid() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_GETUID]", SYS_GETUID);
log_syscall_entry!(SYS_GETUID);
// TODO: getuid only return a fake uid now;
Ok(SyscallReturn::Return(0))
}

View File

@ -1,12 +1,13 @@
use crate::fs::file::FileDescripter;
use crate::fs::ioctl::IoctlCmd;
use crate::log_syscall_entry;
use crate::prelude::*;
use super::SyscallReturn;
use super::SYS_IOCTL;
pub fn sys_ioctl(fd: FileDescripter, cmd: u32, arg: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_IOCTL]", SYS_IOCTL);
log_syscall_entry!(SYS_IOCTL);
let ioctl_cmd = IoctlCmd::try_from(cmd)?;
debug!(
"fd = {}, ioctl_cmd = {:?}, arg = 0x{:x}",
@ -14,11 +15,7 @@ pub fn sys_ioctl(fd: FileDescripter, cmd: u32, arg: Vaddr) -> Result<SyscallRetu
);
let current = current!();
let file_table = current.file_table().lock();
match file_table.get_file(fd) {
None => return_errno_with_message!(Errno::EBADF, "Fd does not exist"),
Some(file) => {
let res = file.ioctl(ioctl_cmd, arg)?;
return Ok(SyscallReturn::Return(res as _));
}
}
let file = file_table.get_file(fd)?;
let res = file.ioctl(ioctl_cmd, arg)?;
return Ok(SyscallReturn::Return(res as _));
}

View File

@ -1,4 +1,4 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::process::signal::signals::user::{UserSignal, UserSignalKind};
use crate::process::{table, Process};
@ -10,7 +10,7 @@ use crate::{
use super::SyscallReturn;
pub fn sys_kill(process_filter: u64, sig_num: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_KILL]", SYS_KILL);
log_syscall_entry!(SYS_KILL);
let process_filter = ProcessFilter::from_id(process_filter as _);
let sig_num = SigNum::try_from(sig_num as u8).unwrap();
debug!(
@ -49,7 +49,9 @@ fn get_processes(filter: &ProcessFilter) -> Result<Vec<Arc<Process>>> {
ProcessFilter::WithPid(pid) => {
let process = table::pid_to_process(*pid);
match process {
None => return_errno!(Errno::ESRCH),
None => {
return_errno_with_message!(Errno::ESRCH, "No such process in process table")
}
Some(process) => vec![process],
}
}

View File

@ -1,10 +1,11 @@
use crate::log_syscall_entry;
use crate::{fs::file::FileDescripter, prelude::*};
use super::SyscallReturn;
use super::SYS_LSEEK;
pub fn sys_lseek(fd: FileDescripter, offset: usize, whence: u32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_LSEEK]", SYS_LSEEK);
log_syscall_entry!(SYS_LSEEK);
debug!("fd = {}, offset = {}, whence = {}", fd, offset, whence);
// TODO: do lseek
Ok(SyscallReturn::Return(0))

View File

@ -1,14 +1,15 @@
use crate::fs::stat::Stat;
use crate::memory::read_cstring_from_user;
use crate::memory::write_val_to_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::syscall::constants::MAX_FILENAME_LEN;
use crate::util::read_cstring_from_user;
use crate::util::write_val_to_user;
use super::SyscallReturn;
use super::SYS_LSTAT;
pub fn sys_lstat(filename_ptr: Vaddr, stat_buf_ptr: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_LSTAT]", SYS_LSTAT);
log_syscall_entry!(SYS_LSTAT);
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
debug!(
"filename = {:?}, stat_buf_ptr = 0x{:x}",

View File

@ -1,7 +1,10 @@
//! This mod defines mmap flags and the handler to syscall mmap
use crate::prelude::*;
use crate::process::process_vm::mmap_area::MMapFlags;
use crate::process::process_vm::mmap_flags::MMapFlags;
use crate::rights::Rights;
use crate::vm::perms::VmPerms;
use crate::vm::vmo::VmoOptions;
use crate::{log_syscall_entry, prelude::*};
use jinux_frame::vm::VmPerm;
use crate::syscall::SYS_MMAP;
@ -16,7 +19,7 @@ pub fn sys_mmap(
fd: u64,
offset: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_MMAP]", SYS_MMAP);
log_syscall_entry!(SYS_MMAP);
let perms = VmPerm::try_from(perms).unwrap();
let flags = MMapFlags::try_from(flags).unwrap();
let res = do_sys_mmap(
@ -26,7 +29,7 @@ pub fn sys_mmap(
flags,
fd as usize,
offset as usize,
);
)?;
Ok(SyscallReturn::Return(res as _))
}
@ -37,25 +40,47 @@ pub fn do_sys_mmap(
flags: MMapFlags,
fd: usize,
offset: usize,
) -> Vaddr {
debug!(
) -> Result<Vaddr> {
info!(
"addr = 0x{:x}, len = 0x{:x}, perms = {:?}, flags = {:?}, fd = {}, offset = 0x{:x}",
addr, len, vm_perm, flags, fd, offset
);
if flags.contains(MMapFlags::MAP_ANONYMOUS) & !flags.contains(MMapFlags::MAP_FIXED) {
// only support map anonymous areas on **NOT** fixed addr now
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// only support map anonymous areas.
mmap_anonymous_vmo(len, offset, vm_perm, flags)
} else {
panic!("Unsupported mmap flags: {:?}", flags);
}
let current = current!();
let mmap_area = current
.mmap_area()
.expect("mmap should work on process with mmap area");
let vm_space = current
.vm_space()
.expect("mmap should work on process with user space");
// current.mmap(len, vm_perm, flags, offset)
mmap_area.mmap(len, offset, vm_perm, flags, vm_space)
}
pub fn mmap_anonymous_vmo(
len: usize,
offset: usize,
vm_perm: VmPerm,
flags: MMapFlags,
) -> Result<Vaddr> {
// TODO: how to respect flags?
if flags.complement().contains(MMapFlags::MAP_ANONYMOUS)
| flags.complement().contains(MMapFlags::MAP_PRIVATE)
{
panic!("Unsupported mmap flags {:?} now", flags);
}
if len % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned len");
}
if offset % PAGE_SIZE != 0 {
panic!("Mmap only support page-aligned offset");
}
let vmo_options: VmoOptions<Rights> = VmoOptions::new(len);
let vmo = vmo_options.alloc()?;
let current = current!();
let root_vmar = current.root_vmar().unwrap();
let perms = VmPerms::from(vm_perm);
let mut vmar_map_options = root_vmar.new_map(vmo, perms)?;
if flags.contains(MMapFlags::MAP_FIXED) {
vmar_map_options = vmar_map_options.offset(offset);
}
Ok(vmar_map_options.build()?)
}

View File

@ -274,3 +274,11 @@ pub fn syscall_dispatch(
_ => panic!("Unsupported syscall number: {}", syscall_number),
}
}
#[macro_export]
macro_rules! log_syscall_entry {
($syscall_name: tt) => {
let syscall_name_str = stringify!($syscall_name);
info!("[SYSCALL][id={}][{}]", $syscall_name, syscall_name_str);
};
}

View File

@ -1,13 +1,13 @@
use jinux_frame::vm::VmPerm;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::syscall::SYS_MPROTECT;
use super::SyscallReturn;
pub fn sys_mprotect(vaddr: u64, len: u64, perms: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_MPROTECT]", SYS_MPROTECT);
log_syscall_entry!(SYS_MPROTECT);
let perms = VmPerm::try_from(perms).unwrap();
do_sys_mprotect(vaddr as Vaddr, len as usize, perms);
Ok(SyscallReturn::Return(0))

View File

@ -1,10 +1,11 @@
use crate::log_syscall_entry;
use crate::prelude::*;
use super::SyscallReturn;
use super::SYS_MUNMAP;
pub fn sys_munmap(addr: Vaddr, len: usize) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_MUNMAP]", SYS_MUNMAP);
log_syscall_entry!(SYS_MUNMAP);
debug!("addr = 0x{:x}, len = {}", addr, len);
//TODO: do munmap
Ok(SyscallReturn::Return(0))

View File

@ -1,9 +1,10 @@
use crate::fs::file::File;
use crate::fs::file::FileDescripter;
use crate::memory::read_cstring_from_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::syscall::constants::MAX_FILENAME_LEN;
use crate::tty::get_console;
use crate::util::read_cstring_from_user;
use super::SyscallReturn;
use super::SYS_OPENAT;
@ -16,7 +17,7 @@ pub fn sys_openat(
flags: i32,
mode: u16,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_OPENAT]", SYS_OPENAT);
log_syscall_entry!(SYS_OPENAT);
let pathname = read_cstring_from_user(pathname_addr, MAX_FILENAME_LEN)?;
debug!(
"dirfd = {}, pathname = {:?}, flags = {}, mode = {}",
@ -28,15 +29,15 @@ pub fn sys_openat(
// Below are three special files we encountered when running busybox ash.
// We currently only return ENOENT, which means the file does not exist.
if dirfd == AT_FDCWD && pathname == CString::new("/etc/passwd")? {
return_errno!(Errno::ENOENT);
return_errno_with_message!(Errno::ENOENT, "No such file");
}
if dirfd == AT_FDCWD && pathname == CString::new("/etc/profile")? {
return_errno!(Errno::ENOENT);
return_errno_with_message!(Errno::ENOENT, "No such file");
}
if dirfd == AT_FDCWD && pathname == CString::new("./trace")? {
return_errno!(Errno::ENOENT);
return_errno_with_message!(Errno::ENOENT, "No such file");
}
if dirfd == AT_FDCWD && pathname == CString::new("/dev/tty")? {
@ -44,7 +45,6 @@ pub fn sys_openat(
let current = current!();
let mut file_table = current.file_table().lock();
let fd = file_table.insert(tty_file);
debug!("openat fd = {}", fd);
return Ok(SyscallReturn::Return(fd as _));
}
todo!()

View File

@ -1,14 +1,15 @@
use core::time::Duration;
use crate::fs::poll::{c_pollfd, PollFd};
use crate::memory::{read_val_from_user, write_val_to_user};
use crate::log_syscall_entry;
use crate::util::{read_val_from_user, write_val_to_user};
use crate::{fs::poll::c_nfds, prelude::*};
use super::SyscallReturn;
use super::SYS_POLL;
pub fn sys_poll(fds: Vaddr, nfds: c_nfds, timeout: i32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_POLL]", SYS_POLL);
log_syscall_entry!(SYS_POLL);
let mut read_addr = fds;
let mut pollfds = Vec::with_capacity(nfds as _);
@ -36,8 +37,8 @@ pub fn sys_poll(fds: Vaddr, nfds: c_nfds, timeout: i32) -> Result<SyscallReturn>
let file_table = current.file_table().lock();
let file = file_table.get_file(pollfd.fd);
match file {
None => return Some(Err(Error::new(Errno::EBADF))),
Some(file) => {
Err(_) => return Some(Err(Error::new(Errno::EBADF))),
Ok(file) => {
let file_events = file.poll();
let polled_events = pollfd.events.intersection(file_events);
if !polled_events.is_empty() {

View File

@ -1,12 +1,13 @@
use crate::memory::read_cstring_from_user;
use crate::memory::write_bytes_to_user;
use crate::log_syscall_entry;
use crate::prelude::*;
use crate::process::name::MAX_PROCESS_NAME_LEN;
use crate::util::read_cstring_from_user;
use crate::util::write_bytes_to_user;
use super::SyscallReturn;
use super::SYS_PRCTL;
pub fn sys_prctl(option: i32, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_PRCTL]", SYS_PRCTL);
log_syscall_entry!(SYS_PRCTL);
let prctl_cmd = PrctlCmd::from_args(option, arg2, arg3, arg4, arg5)?;
debug!("prctl cmd = {:?}", prctl_cmd);
let current = current!();

View File

@ -1,33 +1,21 @@
use crate::memory::write_bytes_to_user;
use crate::log_syscall_entry;
use crate::util::write_bytes_to_user;
use crate::{fs::file::FileDescripter, prelude::*};
use super::SyscallReturn;
use super::SYS_READ;
pub fn sys_read(fd: FileDescripter, user_buf_addr: Vaddr, buf_len: usize) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_READ]", SYS_READ);
log_syscall_entry!(SYS_READ);
debug!(
"fd = {}, user_buf_ptr = 0x{:x}, buf_len = 0x{:x}",
fd, user_buf_addr, buf_len
);
let current = current!();
let file_table = current.file_table().lock();
let file = file_table.get_file(fd);
match file {
None => return_errno!(Errno::EBADF),
Some(file) => {
let mut read_buf = vec![0u8; buf_len];
let read_len = file.read(&mut read_buf)?;
write_bytes_to_user(user_buf_addr, &read_buf)?;
debug!(
"read_len = {}, read_buf = {:?}",
read_len,
&read_buf[..read_len]
);
// let read_str = core::str::from_utf8(&read_buf[..read_len - 1]).unwrap();
// println!("str = {}" ,read_str);
// todo!();
return Ok(SyscallReturn::Return(read_len as _));
}
}
let file = file_table.get_file(fd)?;
let mut read_buf = vec![0u8; buf_len];
let read_len = file.read(&mut read_buf)?;
write_bytes_to_user(user_buf_addr, &read_buf)?;
return Ok(SyscallReturn::Return(read_len as _));
}

View File

@ -1,9 +1,7 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{
memory::{read_bytes_from_user, write_bytes_to_user},
syscall::SYS_READLINK,
};
use crate::syscall::SYS_READLINK;
use crate::util::{read_bytes_from_user, write_bytes_to_user};
use super::SyscallReturn;
@ -14,7 +12,7 @@ pub fn sys_readlink(
user_buf_ptr: u64,
user_buf_len: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_READLINK]", SYS_READLINK);
log_syscall_entry!(SYS_READLINK);
let res = do_sys_readlink(
filename_ptr as Vaddr,
user_buf_ptr as Vaddr,

View File

@ -1,8 +1,9 @@
use crate::{
memory::{read_val_from_user, write_val_to_user},
log_syscall_entry,
prelude::*,
process::signal::{c_types::sigaction_t, sig_action::SigAction, sig_num::SigNum},
syscall::SYS_RT_SIGACTION,
util::{read_val_from_user, write_val_to_user},
};
use super::SyscallReturn;
@ -13,7 +14,7 @@ pub fn sys_rt_sigaction(
old_sig_action_ptr: Vaddr,
sigset_size: u64,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_RT_SIGACTION]", SYS_RT_SIGACTION);
log_syscall_entry!(SYS_RT_SIGACTION);
let sig_num = SigNum::try_from(sig_num)?;
let sig_action_c = read_val_from_user::<sigaction_t>(sig_action_ptr)?;
let sig_action = SigAction::try_from(sig_action_c).unwrap();

View File

@ -1,6 +1,7 @@
use jinux_frame::vm::VmIo;
use crate::{
log_syscall_entry,
prelude::*,
syscall::{SyscallReturn, SYS_RT_SIGPROCMASK},
};
@ -11,7 +12,7 @@ pub fn sys_rt_sigprocmask(
oldset_ptr: Vaddr,
sigset_size: usize,
) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_RT_SIGPROCMASK]", SYS_RT_SIGPROCMASK);
log_syscall_entry!(SYS_RT_SIGPROCMASK);
let mask_op = MaskOp::try_from(how).unwrap();
debug!(
"mask op = {:?}, set_ptr = 0x{:x}, oldset_ptr = 0x{:x}, sigset_size = {}",
@ -31,15 +32,15 @@ fn do_rt_sigprocmask(
sigset_size: usize,
) -> Result<()> {
let current = current!();
let vm_space = current.vm_space().unwrap();
let root_vmar = current.root_vmar().unwrap();
let mut sig_mask = current.sig_mask().lock();
let old_sig_mask_value = sig_mask.as_u64();
debug!("old sig mask value: 0x{:x}", old_sig_mask_value);
if oldset_ptr != 0 {
vm_space.write_val(oldset_ptr, &old_sig_mask_value)?;
root_vmar.write_val(oldset_ptr, &old_sig_mask_value)?;
}
if set_ptr != 0 {
let new_set = vm_space.read_val::<u64>(set_ptr)?;
let new_set = root_vmar.read_val::<u64>(set_ptr)?;
debug!("new set = 0x{:x}", new_set);
match mask_op {
MaskOp::Block => sig_mask.block(new_set),

View File

@ -1,10 +1,12 @@
use crate::{memory::read_val_from_user, prelude::*, process::signal::c_types::ucontext_t};
use crate::{
log_syscall_entry, prelude::*, process::signal::c_types::ucontext_t, util::read_val_from_user,
};
use jinux_frame::cpu::CpuContext;
use super::{SyscallReturn, SYS_RT_SIGRETRUN};
pub fn sys_rt_sigreturn(context: &mut CpuContext) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_RT_SIGRETURN]", SYS_RT_SIGRETRUN);
log_syscall_entry!(SYS_RT_SIGRETRUN);
let current = current!();
let sig_context = current.sig_context().lock().pop_back().unwrap();
let ucontext = read_val_from_user::<ucontext_t>(sig_context)?;

View File

@ -1,11 +1,11 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{process::Process, syscall::SYS_SCHED_YIELD};
use super::SyscallReturn;
pub fn sys_sched_yield() -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_SCHED_YIELD]", SYS_SCHED_YIELD);
log_syscall_entry!(SYS_SCHED_YIELD);
Process::yield_now();
Ok(SyscallReturn::Return(0))
}

View File

@ -1,4 +1,5 @@
use crate::{
log_syscall_entry,
prelude::*,
process::{process_group::ProcessGroup, table, Pgid, Pid},
};
@ -6,7 +7,7 @@ use crate::{
use super::{SyscallReturn, SYS_SETPGID};
pub fn sys_setpgid(pid: Pid, pgid: Pgid) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_SETPGID]", SYS_SETPGID);
log_syscall_entry!(SYS_SETPGID);
let current = current!();
// if pid is 0, pid should be the pid of current process
let pid = if pid == 0 { current.pid() } else { pid };

View File

@ -1,4 +1,4 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::process::signal::sig_num::SigNum;
use crate::process::signal::signals::user::{UserSignal, UserSignalKind};
@ -11,7 +11,7 @@ use super::SyscallReturn;
/// Since jinuxx only supports one-thread process now, tgkill will send signal to process with pid as its process id,
/// and tgid as its process group id.
pub fn sys_tgkill(tgid: Pgid, pid: Pid, sig_num: u8) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_TGKILL]", SYS_TGKILL);
log_syscall_entry!(SYS_TGKILL);
let sig_num = SigNum::from_u8(sig_num);
debug!("tgid = {}, pid = {}, sig_num = {:?}", tgid, pid, sig_num);
let target_process =

View File

@ -1,6 +1,7 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{memory::write_val_to_user, syscall::SYS_UNAME};
use crate::syscall::SYS_UNAME;
use crate::util::write_val_to_user;
use super::SyscallReturn;
@ -59,7 +60,7 @@ fn copy_cstring_to_u8_slice(src: &CStr, dst: &mut [u8]) {
}
pub fn sys_uname(old_uname_addr: Vaddr) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_UNAME]", SYS_UNAME);
log_syscall_entry!(SYS_UNAME);
debug!("old uname addr = 0x{:x}", old_uname_addr);
write_val_to_user(old_uname_addr, &*UTS_NAME)?;
Ok(SyscallReturn::Return(0))

View File

@ -1,7 +1,8 @@
use crate::{
memory::write_val_to_user,
log_syscall_entry,
process::{process_filter::ProcessFilter, wait::wait_child_exit},
syscall::SYS_WAIT4,
util::write_val_to_user,
};
use crate::prelude::*;
@ -10,7 +11,7 @@ use crate::process::wait::WaitOptions;
use super::SyscallReturn;
pub fn sys_wait4(wait_pid: u64, exit_status_ptr: u64, wait_options: u32) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_WAIT4]", SYS_WAIT4);
log_syscall_entry!(SYS_WAIT4);
let wait_options = WaitOptions::from_bits(wait_options).expect("Unknown wait options");
debug!(
"pid = {}, exit_status_ptr = {}, wait_options: {:?}",

View File

@ -1,5 +1,5 @@
use crate::prelude::*;
use crate::process::{process_filter::ProcessFilter, wait::wait_child_exit};
use crate::{log_syscall_entry, prelude::*};
use crate::process::wait::WaitOptions;
@ -14,7 +14,7 @@ pub fn sys_waitid(
rusage_addr: u64,
) -> Result<SyscallReturn> {
// FIXME: what does infoq and rusage use for?
debug!("[syscall][id={}][SYS_WAITID]", SYS_WAITID);
log_syscall_entry!(SYS_WAITID);
let process_filter = ProcessFilter::from_which_and_id(which, upid);
let wait_options = WaitOptions::from_bits(options as u32).expect("Unknown wait options");
let (exit_code, pid) = wait_child_exit(process_filter, wait_options)?;

View File

@ -1,7 +1,8 @@
use crate::fs::file::FileDescripter;
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{memory::read_bytes_from_user, syscall::SYS_WRITE};
use crate::syscall::SYS_WRITE;
use crate::util::read_bytes_from_user;
use super::SyscallReturn;
@ -13,8 +14,7 @@ pub fn sys_write(
user_buf_ptr: Vaddr,
user_buf_len: u64,
) -> Result<SyscallReturn> {
// only suppprt STDOUT now.
debug!("[syscall][id={}][SYS_WRITE]", SYS_WRITE);
log_syscall_entry!(SYS_WRITE);
debug!(
"fd = {}, user_buf_ptr = 0x{:x}, user_buf_len = 0x{:x}",
fd, user_buf_ptr, user_buf_len
@ -22,14 +22,9 @@ pub fn sys_write(
let current = current!();
let file_table = current.file_table().lock();
match file_table.get_file(fd) {
None => return_errno!(Errno::EBADF),
Some(file) => {
let mut buffer = vec![0u8; user_buf_len as usize];
read_bytes_from_user(user_buf_ptr as usize, &mut buffer)?;
debug!("write buf = {:?}", buffer);
let write_len = file.write(&buffer)?;
Ok(SyscallReturn::Return(write_len as _))
}
}
let file = file_table.get_file(fd)?;
let mut buffer = vec![0u8; user_buf_len as usize];
read_bytes_from_user(user_buf_ptr as usize, &mut buffer)?;
let write_len = file.write(&buffer)?;
Ok(SyscallReturn::Return(write_len as _))
}

View File

@ -1,9 +1,7 @@
use crate::prelude::*;
use crate::{log_syscall_entry, prelude::*};
use crate::{
memory::{read_bytes_from_user, read_val_from_user},
syscall::SYS_WRITEV,
};
use crate::syscall::SYS_WRITEV;
use crate::util::{read_bytes_from_user, read_val_from_user};
use super::SyscallReturn;
@ -17,7 +15,7 @@ pub struct IoVec {
}
pub fn sys_writev(fd: u64, io_vec_ptr: u64, io_vec_count: u64) -> Result<SyscallReturn> {
debug!("[syscall][id={}][SYS_WRITEV]", SYS_WRITEV);
log_syscall_entry!(SYS_WRITEV);
let res = do_sys_writev(fd, io_vec_ptr as Vaddr, io_vec_count as usize)?;
Ok(SyscallReturn::Return(res as _))
}

View File

@ -3,8 +3,8 @@ use jinux_frame::receive_char;
use self::line_discipline::LineDiscipline;
use crate::fs::events::IoEvents;
use crate::fs::ioctl::IoctlCmd;
use crate::memory::{read_val_from_user, write_val_to_user};
use crate::process::Pgid;
use crate::util::{read_val_from_user, write_val_to_user};
use crate::{fs::file::File, prelude::*};
pub mod line_discipline;

View File

@ -1 +1,38 @@
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use pod::Pod;
/// copy bytes from user space of current process. The bytes len is the len of dest.
pub fn read_bytes_from_user(src: Vaddr, dest: &mut [u8]) -> Result<()> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.read_bytes(src, dest)?)
}
/// copy val (Plain of Data type) from user space of current process.
pub fn read_val_from_user<T: Pod>(src: Vaddr) -> Result<T> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.read_val(src)?)
}
/// write bytes from user space of current process. The bytes len is the len of src.
pub fn write_bytes_to_user(dest: Vaddr, src: &[u8]) -> Result<()> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.write_bytes(dest, src)?)
}
/// write val (Plain of Data type) to user space of current process.
pub fn write_val_to_user<T: Pod>(dest: Vaddr, val: &T) -> Result<()> {
let current = current!();
let root_vmar = current.root_vmar().unwrap();
Ok(root_vmar.write_val(dest, val)?)
}
/// read a cstring from user, the length of cstring should not exceed max_len(include null byte)
pub fn read_cstring_from_user(addr: Vaddr, max_len: usize) -> Result<CString> {
let mut buffer = vec![0u8; max_len];
read_bytes_from_user(addr, &mut buffer)?;
Ok(CString::from(CStr::from_bytes_until_nul(&buffer)?))
}

View File

@ -14,5 +14,7 @@
//! In Jinux, VMARs and VMOs, as well as other capabilities, are implemented
//! as zero-cost capabilities.
mod vmar;
mod vmo;
pub mod page_fault_handler;
pub mod perms;
pub mod vmar;
pub mod vmo;

View File

@ -0,0 +1,13 @@
use crate::prelude::*;
/// This trait is implemented by structs which can handle a user space page fault.
/// In current implementation, they are vmars and vmos.
pub trait PageFaultHandler {
/// Handle a page fault at a specific addr. if not_present is true, the page fault is caused by page not present.
/// Otherwise, it's caused by page protection error.
/// if write is true, means the page fault is caused by a write access,
/// otherwise, the page fault is caused by a read access.
/// If the page fault can be handled successfully, this function will return Ok(()).
/// Otherwise, this function will return Err.
fn handle_page_fault(&self, offset: Vaddr, not_present: bool, write: bool) -> Result<()>;
}

View File

@ -0,0 +1,79 @@
use crate::rights::Rights;
use bitflags::bitflags;
use jinux_frame::vm::VmPerm;
bitflags! {
/// The memory access permissions of memory mappings.
pub struct VmPerms: u32 {
/// Readable.
const READ = 1 << 0;
/// Writable.
const WRITE = 1 << 1;
/// Executable.
const EXEC = 1 << 2;
}
}
impl From<Rights> for VmPerms {
fn from(rights: Rights) -> VmPerms {
let mut vm_perm = VmPerms::empty();
if rights.contains(Rights::READ) {
vm_perm |= VmPerms::READ;
}
if rights.contains(Rights::WRITE) {
vm_perm |= VmPerms::WRITE;
}
if rights.contains(Rights::EXEC) {
vm_perm |= VmPerms::EXEC;
}
vm_perm
}
}
impl From<VmPerms> for Rights {
fn from(vm_perms: VmPerms) -> Rights {
let mut rights = Rights::empty();
if vm_perms.contains(VmPerms::READ) {
rights |= Rights::READ;
}
if vm_perms.contains(VmPerms::WRITE) {
rights |= Rights::WRITE;
}
if vm_perms.contains(VmPerms::EXEC) {
rights |= Rights::EXEC;
}
rights
}
}
impl From<VmPerm> for VmPerms {
fn from(perm: VmPerm) -> Self {
let mut perms = VmPerms::empty();
if perm.contains(VmPerm::R) {
perms |= VmPerms::READ;
}
if perm.contains(VmPerm::W) {
perms |= VmPerms::WRITE;
}
if perm.contains(VmPerm::X) {
perms |= VmPerms::EXEC;
}
perms
}
}
impl From<VmPerms> for VmPerm {
fn from(perms: VmPerms) -> Self {
let mut perm = VmPerm::empty();
if perms.contains(VmPerms::READ) {
perm |= VmPerm::R;
}
if perms.contains(VmPerms::WRITE) {
perm |= VmPerm::W;
}
if perms.contains(VmPerms::EXEC) {
perm |= VmPerm::X;
}
perm
}
}

View File

@ -1,20 +1,21 @@
use core::ops::Range;
use jinux_frame::vm::{Vaddr, VmIo};
use alloc::sync::Arc;
use jinux_frame::prelude::Result;
use jinux_frame::{vm::VmIo, Error};
use crate::prelude::*;
use crate::{rights::Rights, vm::vmo::Vmo};
use crate::{
rights::Rights,
vm::{page_fault_handler::PageFaultHandler, vmo::Vmo},
};
use super::{
options::{VmarChildOptions, VmarMapOptions},
VmPerms, Vmar, Vmar_,
options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_,
};
impl Vmar<Rights> {
/// Creates a root VMAR.
pub fn new() -> Result<Self> {
let inner = Arc::new(Vmar_::new()?);
pub fn new_root() -> Result<Self> {
let inner = Arc::new(Vmar_::new_root()?);
let rights = Rights::all();
let new_self = Self(inner, rights);
Ok(new_self)
@ -106,6 +107,12 @@ impl Vmar<Rights> {
self.0.protect(perms, range)
}
/// clear all mappings and children vmars.
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) -> Result<()> {
self.0.clear_root_vmar()
}
/// Destroy a VMAR, including all its mappings and children VMARs.
///
/// After being destroyed, the VMAR becomes useless and returns errors
@ -135,31 +142,43 @@ impl Vmar<Rights> {
/// The method requires the Dup right.
pub fn dup(&self) -> Result<Self> {
self.check_rights(Rights::DUP)?;
todo!()
}
/// Returns the access rights.
pub fn rights(&self) -> Rights {
self.1
}
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.1.contains(rights) {
Ok(())
} else {
Err(Error::AccessDenied)
}
Ok(Vmar(self.0.clone(), self.1.clone()))
}
}
impl VmIo for Vmar<Rights> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read(offset, buf)
self.0.read(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write(offset, buf)
self.0.write(offset, buf)?;
Ok(())
}
}
impl PageFaultHandler for Vmar<Rights> {
fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
if write {
self.check_rights(Rights::WRITE)?;
} else {
self.check_rights(Rights::READ)?;
}
self.0
.handle_page_fault(page_fault_addr, not_present, write)
}
}
impl VmarRightsOp for Vmar<Rights> {
fn rights(&self) -> Rights {
self.1
}
}

View File

@ -3,16 +3,24 @@
mod dyn_cap;
mod options;
mod static_cap;
pub mod vm_mapping;
use crate::prelude::*;
use crate::rights::Full;
use crate::rights::Rights;
use crate::vm::perms::VmPerms;
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
use bitflags::bitflags;
use alloc::sync::Weak;
use alloc::vec::Vec;
use core::ops::Range;
use jinux_frame::prelude::Result;
use jinux_frame::vm::Vaddr;
use jinux_frame::vm::VmSpace;
use jinux_frame::Error;
use spin::Mutex;
use jinux_frame::AlignExt;
use self::vm_mapping::VmMapping;
use super::page_fault_handler::PageFaultHandler;
use super::vmo::Vmo;
/// Virtual Memory Address Regions (VMARs) are a type of capability that manages
/// user address spaces.
@ -43,44 +51,601 @@ use spin::Mutex;
///
pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
pub trait VmarRightsOp {
/// Returns the access rights.
fn rights(&self) -> Rights;
fn check_rights(&self, rights: Rights) -> Result<()>;
}
impl<R> VmarRightsOp for Vmar<R> {
default fn rights(&self) -> Rights {
unimplemented!()
}
default fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
return_errno_with_message!(Errno::EACCES, "Rights check failed");
}
}
}
// TODO: how page faults can be delivered to and handled by the current VMAR.
impl<R> PageFaultHandler for Vmar<R> {
default fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
unimplemented!()
}
}
struct Vmar_ {
inner: Mutex<Inner>,
// The offset relative to the root VMAR
impl<R> Vmar<R> {
/// FIXME: This function should require access control
pub fn vm_space(&self) -> &VmSpace {
self.0.vm_space()
}
}
pub(super) struct Vmar_ {
/// vmar inner
inner: Mutex<VmarInner>,
/// The offset relative to the root VMAR
base: Vaddr,
parent: Option<Arc<Vmar_>>,
/// The total size of the VMAR in bytes
size: usize,
/// The attached vmspace
vm_space: VmSpace,
/// The parent vmar. If points to none, this is a root vmar
parent: Weak<Vmar_>,
}
struct Inner {
struct VmarInner {
/// Whether the vmar is destroyed
is_destroyed: bool,
vm_space: VmSpace,
//...
/// The child vmars. The key is offset relative to root VMAR
child_vmar_s: BTreeMap<Vaddr, Arc<Vmar_>>,
/// The mapped vmos. The key is offset relative to root VMAR
vm_mappings: BTreeMap<Vaddr, Arc<VmMapping>>,
/// Free regions that can be used for creating child vmar or mapping vmos
free_regions: BTreeMap<Vaddr, FreeRegion>,
}
// FIXME: How to set the correct root vmar range?
// We should not include addr 0 here(is this right?), since the 0 addr means the null pointer.
// We should include addr 0x0040_0000, since non-pie executables typically are put on 0x0040_0000.
pub const ROOT_VMAR_LOWEST_ADDR: Vaddr = 0x0010_0000;
pub const ROOT_VMAR_HIGHEST_ADDR: Vaddr = 0x1000_0000_0000;
impl Vmar_ {
pub fn new() -> Result<Self> {
todo!()
pub fn new_root() -> Result<Self> {
let mut free_regions = BTreeMap::new();
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_HIGHEST_ADDR);
free_regions.insert(root_region.start(), root_region);
let vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
vm_mappings: BTreeMap::new(),
free_regions,
};
let vmar_ = Vmar_ {
inner: Mutex::new(vmar_inner),
vm_space: VmSpace::new(),
base: 0,
size: ROOT_VMAR_HIGHEST_ADDR,
parent: Weak::new(),
};
Ok(vmar_)
}
fn is_root_vmar(&self) -> bool {
if let Some(_) = self.parent.upgrade() {
false
} else {
true
}
}
pub fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
todo!()
assert!(range.start % PAGE_SIZE == 0);
assert!(range.end % PAGE_SIZE == 0);
self.check_protected_range(&range)?;
self.do_protect_inner(perms, range)?;
Ok(())
}
// do real protect. The protected range is ensured to be mapped.
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
let vm_mapping_range = *vm_mapping_base..(*vm_mapping_base + vm_mapping.size());
if is_intersected(&range, &vm_mapping_range) {
let intersected_range = get_intersected_range(&range, &vm_mapping_range);
vm_mapping.protect(perms, intersected_range)?;
}
}
for (_, child_vmar_) in &self.inner.lock().child_vmar_s {
let child_vmar_range = child_vmar_.range();
if is_intersected(&range, &child_vmar_range) {
let intersected_range = get_intersected_range(&range, &child_vmar_range);
child_vmar_.do_protect_inner(perms, intersected_range)?;
}
}
Ok(())
}
/// ensure the whole protected range is mapped, that is to say, backed up by a VMO.
/// Internally, we check whether the range intersects any free region recursively.
/// If so, the range is not fully mapped.
fn check_protected_range(&self, protected_range: &Range<usize>) -> Result<()> {
// The protected range should be in self's range
assert!(self.base <= protected_range.start);
assert!(protected_range.end <= self.base + self.size);
// The protected range should not interstect with any free region
for (_, free_region) in &self.inner.lock().free_regions {
if is_intersected(&free_region.range, &protected_range) {
return_errno_with_message!(Errno::EACCES, "protected range is not fully mapped");
}
}
// if the protected range intersects with child vmar_, child vmar_ is responsible to do the check.
for (_, child_vmar_) in &self.inner.lock().child_vmar_s {
let child_range = child_vmar_.range();
if is_intersected(&child_range, &protected_range) {
let intersected_range = get_intersected_range(&child_range, &protected_range);
child_vmar_.check_protected_range(&intersected_range)?;
}
}
Ok(())
}
/// Handle user space page fault, if the page fault is successfully handled ,return Ok(()).
pub fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
if page_fault_addr < self.base || page_fault_addr >= self.base + self.size {
return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar");
}
let inner = self.inner.lock();
for (child_vmar_base, child_vmar) in &inner.child_vmar_s {
if *child_vmar_base <= page_fault_addr
&& page_fault_addr < *child_vmar_base + child_vmar.size
{
return child_vmar.handle_page_fault(page_fault_addr, not_present, write);
}
}
// FIXME: If multiple vmos are mapped to the addr, should we allow all vmos to handle page fault?
for (vm_mapping_base, vm_mapping) in &inner.vm_mappings {
if *vm_mapping_base <= page_fault_addr
&& page_fault_addr <= *vm_mapping_base + vm_mapping.size()
{
return vm_mapping.handle_page_fault(page_fault_addr, not_present, write);
}
}
return_errno_with_message!(Errno::EACCES, "page fault addr is not in current vmar");
}
/// clear all content of the root vmar
pub fn clear_root_vmar(&self) -> Result<()> {
debug_assert!(self.is_root_vmar());
if !self.is_root_vmar() {
return_errno_with_message!(Errno::EACCES, "The vmar is not root vmar");
}
self.vm_space.clear();
let mut inner = self.inner.lock();
inner.child_vmar_s.clear();
inner.vm_mappings.clear();
inner.free_regions.clear();
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_HIGHEST_ADDR);
inner.free_regions.insert(root_region.start(), root_region);
Ok(())
}
pub fn destroy_all(&self) -> Result<()> {
todo!()
let mut inner = self.inner.lock();
inner.is_destroyed = true;
let mut free_regions = BTreeMap::new();
for (child_vmar_base, child_vmar) in &inner.child_vmar_s {
child_vmar.destroy_all()?;
let free_region = FreeRegion::new(child_vmar.range());
free_regions.insert(free_region.start(), free_region);
}
inner.child_vmar_s.clear();
inner.free_regions.append(&mut free_regions);
for (_, vm_mapping) in &inner.vm_mappings {
vm_mapping.unmap(vm_mapping.range(), true)?;
let free_region = FreeRegion::new(vm_mapping.range());
free_regions.insert(free_region.start(), free_region);
}
inner.vm_mappings.clear();
inner.free_regions.append(&mut free_regions);
drop(inner);
self.merge_continuous_regions();
self.vm_space.clear();
Ok(())
}
pub fn destroy(&self, range: Range<usize>) -> Result<()> {
todo!()
self.check_destroy_range(&range)?;
let mut inner = self.inner.lock();
let mut free_regions = BTreeMap::new();
for (child_vmar_base, child_vmar) in &inner.child_vmar_s {
let child_vmar_range = child_vmar.range();
if is_intersected(&range, &child_vmar_range) {
child_vmar.destroy_all()?;
}
let free_region = FreeRegion::new(child_vmar_range);
free_regions.insert(free_region.start(), free_region);
}
inner
.child_vmar_s
.retain(|_, child_vmar_| !child_vmar_.is_destroyed());
for (_, vm_mapping) in &inner.vm_mappings {
let vm_mapping_range = vm_mapping.range();
if is_intersected(&vm_mapping_range, &range) {
let intersected_range = get_intersected_range(&vm_mapping_range, &range);
vm_mapping.unmap(intersected_range.clone(), true)?;
let free_region = FreeRegion::new(intersected_range);
free_regions.insert(free_region.start(), free_region);
}
}
inner
.vm_mappings
.retain(|_, vm_mapping| !vm_mapping.is_destroyed());
inner.free_regions.append(&mut free_regions);
self.merge_continuous_regions();
Ok(())
}
fn check_destroy_range(&self, range: &Range<usize>) -> Result<()> {
debug_assert!(range.start % PAGE_SIZE == 0);
debug_assert!(range.end % PAGE_SIZE == 0);
for (child_vmar_base, child_vmar) in &self.inner.lock().child_vmar_s {
let child_vmar_start = *child_vmar_base;
let child_vmar_end = child_vmar_start + child_vmar.size;
if child_vmar_end <= range.start || child_vmar_start >= range.end {
// child vmar does not intersect with range
continue;
}
if range.start <= child_vmar_start && child_vmar_end <= range.end {
// child vmar is totolly in the range
continue;
}
assert!(is_intersected(range, &(child_vmar_start..child_vmar_end)));
return_errno_with_message!(
Errno::EACCES,
"Child vmar is partly intersected with destryed range"
);
}
Ok(())
}
fn is_destroyed(&self) -> bool {
self.inner.lock().is_destroyed
}
fn merge_continuous_regions(&self) {
let mut new_free_regions = BTreeMap::new();
let mut inner = self.inner.lock();
let keys = inner.free_regions.keys().cloned().collect::<Vec<_>>();
for key in keys {
if let Some(mut free_region) = inner.free_regions.remove(&key) {
let mut region_end = free_region.end();
while let Some(another_region) = inner.free_regions.remove(&region_end) {
free_region.merge_other_region(&another_region);
region_end = another_region.end();
}
new_free_regions.insert(free_region.start(), free_region);
}
}
inner.free_regions.clear();
inner.free_regions.append(&mut new_free_regions);
}
pub fn read(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!()
let read_start = self.base + offset;
let read_end = buf.len() + read_start;
// if the read range is in child vmar
for (child_vmar_base, child_vmar) in &self.inner.lock().child_vmar_s {
let child_vmar_end = *child_vmar_base + child_vmar.size;
if *child_vmar_base <= read_start && read_end <= child_vmar_end {
let child_offset = read_start - *child_vmar_base;
return child_vmar.read(child_offset, buf);
}
}
// if the read range is in mapped vmo
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
let vm_mapping_end = *vm_mapping_base + vm_mapping.size();
if *vm_mapping_base <= read_start && read_end <= vm_mapping_end {
let vm_mapping_offset = read_start - *vm_mapping_base;
return vm_mapping.read_bytes(vm_mapping_offset, buf);
}
}
// FIXME: If the read range is across different vmos or child vmars, should we directly return error?
return_errno_with_message!(Errno::EACCES, "read range is not backed up by a vmo");
}
pub fn write(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
let write_start = self.base + offset;
let write_end = buf.len() + write_start;
// if the write range is in child vmar
for (child_vmar_base, child_vmar) in &self.inner.lock().child_vmar_s {
let child_vmar_end = *child_vmar_base + child_vmar.size;
if *child_vmar_base <= write_start && write_end <= child_vmar_end {
let child_offset = write_start - *child_vmar_base;
return child_vmar.write(child_offset, buf);
}
}
// if the write range is in mapped vmo
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
let vm_mapping_end = *vm_mapping_base + vm_mapping.size();
if *vm_mapping_base <= write_start && write_end <= vm_mapping_end {
let vm_mapping_offset = write_start - *vm_mapping_base;
return vm_mapping.write_bytes(vm_mapping_offset, buf);
}
}
// FIXME: If the write range is across different vmos or child vmars, should we directly return error?
return_errno_with_message!(Errno::EACCES, "write range is not backed up by a vmo");
}
/// allocate a child vmar_.
pub fn alloc_child_vmar(
self: &Arc<Self>,
child_vmar_offset: Option<usize>,
child_vmar_size: usize,
align: usize,
) -> Result<Arc<Vmar_>> {
let (region_base, child_vmar_offset) =
self.find_free_region_for_child(child_vmar_offset, child_vmar_size, align)?;
// This unwrap should never fails
let free_region = self.inner.lock().free_regions.remove(&region_base).unwrap();
let child_range = child_vmar_offset..(child_vmar_offset + child_vmar_size);
let regions_after_allocation = free_region.allocate_range(child_range.clone());
regions_after_allocation.into_iter().for_each(|region| {
self.inner
.lock()
.free_regions
.insert(region.start(), region);
});
let child_region = FreeRegion::new(child_range);
let mut child_regions = BTreeMap::new();
child_regions.insert(child_region.start(), child_region);
let child_vmar_inner = VmarInner {
is_destroyed: false,
child_vmar_s: BTreeMap::new(),
vm_mappings: BTreeMap::new(),
free_regions: child_regions,
};
let child_vmar_ = Arc::new(Vmar_ {
inner: Mutex::new(child_vmar_inner),
base: child_vmar_offset,
size: child_vmar_size,
vm_space: self.vm_space.clone(),
parent: Arc::downgrade(self),
});
self.inner
.lock()
.child_vmar_s
.insert(child_vmar_.base, child_vmar_.clone());
Ok(child_vmar_)
}
/// find a free region for child vmar or vmo.
/// returns (region base addr, child real offset)
fn find_free_region_for_child(
&self,
child_offset: Option<Vaddr>,
child_size: usize,
align: usize,
) -> Result<(Vaddr, Vaddr)> {
for (region_base, free_region) in &self.inner.lock().free_regions {
if let Some(child_vmar_offset) = child_offset {
// if the offset is set, we should find a free region can satisfy both the offset and size
if *region_base <= child_vmar_offset
&& (child_vmar_offset + child_size) <= (free_region.end())
{
return Ok((*region_base, child_vmar_offset));
}
} else {
// else, we find a free region that can satisfy the length and align requirement.
// Here, we use a simple brute-force algorithm to find the first free range that can satisfy.
// FIXME: A randomized algorithm may be more efficient.
let region_start = free_region.start();
let region_end = free_region.end();
let child_vmar_real_start = region_start.align_up(align);
let child_vmar_real_end = child_vmar_real_start + child_size;
if region_start <= child_vmar_real_start && child_vmar_real_end <= region_end {
return Ok((*region_base, child_vmar_real_start));
}
}
}
return_errno_with_message!(Errno::EACCES, "Cannot find free region for child")
}
fn range(&self) -> Range<usize> {
self.base..(self.base + self.size)
}
fn check_vmo_overwrite(&self, vmo_range: Range<usize>, can_overwrite: bool) -> Result<()> {
let inner = self.inner.lock();
for (_, child_vmar) in &inner.child_vmar_s {
let child_vmar_range = child_vmar.range();
if is_intersected(&vmo_range, &child_vmar_range) {
return_errno_with_message!(
Errno::EACCES,
"vmo range overlapped with child vmar range"
);
}
}
if !can_overwrite {
for (child_vmo_base, child_vmo) in &inner.vm_mappings {
let child_vmo_range = *child_vmo_base..*child_vmo_base + child_vmo.size();
if is_intersected(&vmo_range, &child_vmo_range) {
return_errno_with_message!(
Errno::EACCES,
"vmo range overlapped with another vmo"
);
}
}
}
Ok(())
}
/// returns the attached vm_space
pub(super) fn vm_space(&self) -> &VmSpace {
&self.vm_space
}
/// map a vmo to this vmar
pub fn add_mapping(&self, mapping: Arc<VmMapping>) {
self.inner
.lock()
.vm_mappings
.insert(mapping.map_to_addr(), mapping);
}
fn allocate_free_region_for_vmo(
&self,
vmo_size: usize,
size: usize,
offset: Option<usize>,
align: usize,
can_overwrite: bool,
) -> Result<Vaddr> {
let allocate_size = size.max(vmo_size);
if can_overwrite {
let mut inner = self.inner.lock();
// if can_overwrite, the offset is ensured not to be None
let offset = offset.unwrap();
let vmo_range = offset..(offset + allocate_size);
// If can overwrite, the vmo can cross multiple free regions. We will split each free regions that intersect with the vmo
let mut split_regions = Vec::new();
for (free_region_base, free_region) in &inner.free_regions {
let free_region_range = free_region.range();
if is_intersected(free_region_range, &vmo_range) {
split_regions.push(*free_region_base);
}
}
for region_base in split_regions {
let free_region = inner.free_regions.remove(&region_base).unwrap();
let intersected_range = get_intersected_range(free_region.range(), &vmo_range);
let regions_after_split = free_region.allocate_range(intersected_range);
regions_after_split.into_iter().for_each(|region| {
inner.free_regions.insert(region.start(), region);
});
}
return Ok(offset);
} else {
// Otherwise, the vmo in a single region
let (free_region_base, offset) =
self.find_free_region_for_child(offset, allocate_size, align)?;
let mut inner = self.inner.lock();
let free_region = inner.free_regions.remove(&free_region_base).unwrap();
let vmo_range = offset..(offset + allocate_size);
let intersected_range = get_intersected_range(free_region.range(), &vmo_range);
let regions_after_split = free_region.allocate_range(intersected_range);
regions_after_split.into_iter().for_each(|region| {
inner.free_regions.insert(region.start(), region);
});
return Ok(offset);
}
}
/// fork vmar for child process
pub fn fork_vmar_(&self, parent: Weak<Vmar_>) -> Result<Arc<Self>> {
// create an empty vmar at first
let is_destroyed = false;
let child_vmar_s = BTreeMap::new();
let mapped_vmos = BTreeMap::new();
let free_regions = BTreeMap::new();
let vmar_inner = VmarInner {
is_destroyed,
child_vmar_s,
vm_mappings: mapped_vmos,
free_regions,
};
// If this is a root vmar, we create a new vmspace
// Otherwise, we clone the vm space from parent.
let vm_space = if let Some(parent) = parent.upgrade() {
parent.vm_space().clone()
} else {
VmSpace::new()
};
let vmar_ = Vmar_ {
inner: Mutex::new(vmar_inner),
base: self.base,
size: self.size,
vm_space,
parent,
};
let new_vmar_ = Arc::new(vmar_);
let inner = self.inner.lock();
// clone free regions
for (free_region_base, free_region) in &inner.free_regions {
new_vmar_
.inner
.lock()
.free_regions
.insert(*free_region_base, free_region.clone());
}
// clone child vmars
for (child_vmar_base, child_vmar_) in &inner.child_vmar_s {
let parent_of_forked_child = Arc::downgrade(&new_vmar_);
let forked_child_vmar = child_vmar_.fork_vmar_(parent_of_forked_child)?;
new_vmar_
.inner
.lock()
.child_vmar_s
.insert(*child_vmar_base, forked_child_vmar);
}
// clone vm mappings
for (vm_mapping_base, vm_mapping) in &inner.vm_mappings {
let parent_of_forked_mapping = Arc::downgrade(&new_vmar_);
let forked_mapping = Arc::new(vm_mapping.fork_mapping(parent_of_forked_mapping)?);
new_vmar_
.inner
.lock()
.vm_mappings
.insert(*vm_mapping_base, forked_mapping);
}
Ok(new_vmar_)
}
/// get mapped vmo at given offset
pub fn get_mapped_vmo(&self, offset: Vaddr) -> Result<Vmo<Rights>> {
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
if *vm_mapping_base <= offset && offset < *vm_mapping_base + vm_mapping.size() {
return Ok(vm_mapping.vmo().dup()?);
}
}
return_errno_with_message!(Errno::EACCES, "No mapped vmo at this offset");
}
}
@ -91,28 +656,86 @@ impl<R> Vmar<R> {
pub fn base(&self) -> Vaddr {
self.0.base
}
}
bitflags! {
/// The memory access permissions of memory mappings.
pub struct VmPerms: u32 {
/// Readable.
const READ = 1 << 0;
/// Writable.
const WRITE = 1 << 1;
/// Executable.
const EXEC = 1 << 2;
/// The size of the vmar in bytes.
pub fn size(&self) -> usize {
self.0.size
}
/// Fork a vmar for child process
pub fn fork_vmar(&self) -> Result<Vmar<Full>> {
let rights = Rights::all();
self.check_rights(rights)?;
let vmar_ = self.0.fork_vmar_(Weak::new())?;
Ok(Vmar(vmar_, Full::new()))
}
/// get a mapped vmo
pub fn get_mapped_vmo(&self, offset: Vaddr) -> Result<Vmo<Rights>> {
let rights = Rights::all();
self.check_rights(rights)?;
self.0.get_mapped_vmo(offset)
}
}
impl From<Rights> for VmPerms {
fn from(rights: Rights) -> VmPerms {
todo!()
#[derive(Debug, Clone)]
pub struct FreeRegion {
range: Range<Vaddr>,
}
impl FreeRegion {
pub fn new(range: Range<Vaddr>) -> Self {
Self { range }
}
pub fn start(&self) -> Vaddr {
self.range.start
}
pub fn end(&self) -> Vaddr {
self.range.end
}
pub fn size(&self) -> usize {
self.range.end - self.range.start
}
pub fn range(&self) -> &Range<usize> {
&self.range
}
/// allocate a range in this free region.
/// The range is ensured to be contained in current region before call this function.
/// The return vector contains regions that are not allocated. Since the allocate_range can be
/// in the middle of a free region, the original region may be split as at most two regions.
pub fn allocate_range(&self, allocate_range: Range<Vaddr>) -> Vec<FreeRegion> {
let mut res = Vec::new();
if self.range.start < allocate_range.start {
let free_region = FreeRegion::new(self.range.start..allocate_range.start);
res.push(free_region);
}
if allocate_range.end < self.range.end {
let free_region = FreeRegion::new(allocate_range.end..self.range.end);
res.push(free_region);
}
res
}
pub fn merge_other_region(&mut self, other_region: &FreeRegion) {
assert!(self.range.end == other_region.range.start);
assert!(self.range.start < other_region.range.end);
self.range = self.range.start..other_region.range.end
}
}
impl From<VmPerms> for Rights {
fn from(vm_perms: VmPerms) -> Rights {
todo!()
}
/// determine whether two ranges are intersected.
pub fn is_intersected(range1: &Range<usize>, range2: &Range<usize>) -> bool {
range1.start.max(range2.start) < range1.end.min(range2.end)
}
/// get the intersection range of two ranges.
/// The two ranges should be ensured to be intersected.
pub fn get_intersected_range(range1: &Range<usize>, range2: &Range<usize>) -> Range<usize> {
debug_assert!(is_intersected(range1, range2));
range1.start.max(range2.start)..range1.end.min(range2.end)
}

View File

@ -1,11 +1,9 @@
//! Options for allocating child VMARs and creating mappings.
//! Options for allocating child VMARs.
use jinux_frame::prelude::Result;
use jinux_frame::{config::PAGE_SIZE, vm::Vaddr};
use jinux_frame::config::PAGE_SIZE;
use jinux_frame::{Error, Result};
use crate::vm::vmo::Vmo;
use super::{VmPerms, Vmar};
use super::Vmar;
/// Options for allocating a child VMAR, which must not overlap with any
/// existing mappings or child VMARs.
@ -27,7 +25,7 @@ use super::{VmPerms, Vmar};
/// assert!(child_vmar.size() == child_size);
/// ```
///
/// A child VMO created from a parent VMO of _static_ capability is also a
/// A child VMAR created from a parent VMAR of _static_ capability is also a
/// _static_ capability.
/// ```
/// use jinux_std::prelude::*;
@ -45,8 +43,8 @@ use super::{VmPerms, Vmar};
pub struct VmarChildOptions<R> {
parent: Vmar<R>,
size: usize,
offset: usize,
align: usize,
offset: Option<usize>,
align: Option<usize>,
}
impl<R> VmarChildOptions<R> {
@ -58,8 +56,8 @@ impl<R> VmarChildOptions<R> {
Self {
parent,
size,
offset: 0,
align: PAGE_SIZE,
offset: None,
align: None,
}
}
@ -69,7 +67,8 @@ impl<R> VmarChildOptions<R> {
///
/// The alignment must be a power of two and a multiple of the page size.
pub fn align(mut self, align: usize) -> Self {
todo!()
self.align = Some(align);
self
}
/// Sets the offset of the child VMAR.
@ -84,7 +83,8 @@ impl<R> VmarChildOptions<R> {
///
/// The offset must be page-aligned.
pub fn offset(mut self, offset: usize) -> Self {
todo!()
self.offset = Some(offset);
self
}
/// Allocates the child VMAR according to the specified options.
@ -94,118 +94,40 @@ impl<R> VmarChildOptions<R> {
/// # Access rights
///
/// The child VMAR is initially assigned all the parent's access rights.
pub fn alloc(mut self) -> Result<Vmar<R>> {
todo!()
}
}
/// Options for creating a new mapping. The mapping is not allowed to overlap
/// with any child VMARs. And unless specified otherwise, it is not allowed
/// to overlap with any existing mapping, either.
pub struct VmarMapOptions<R1, R2> {
parent: Vmar<R1>,
vmo: Vmo<R2>,
perms: VmPerms,
vmo_offset: usize,
size: usize,
offset: Option<usize>,
align: usize,
can_overwrite: bool,
}
impl<R1, R2> VmarMapOptions<R1, R2> {
/// Creates a default set of options with the VMO and the memory access
/// permissions.
///
/// The VMO must have access rights that correspond to the memory
/// access permissions. For example, if `perms` contains `VmPerm::Write`,
/// then `vmo.rights()` should contain `Rights::WRITE`.
pub fn new(parent: Vmar<R1>, vmo: Vmo<R2>, perms: VmPerms) -> Self {
let size = vmo.size();
Self {
parent,
vmo,
perms,
vmo_offset: 0,
size,
offset: None,
align: PAGE_SIZE,
can_overwrite: false,
pub fn alloc(self) -> Result<Vmar<R>> {
// check align
let align = if let Some(align) = self.align {
debug_assert!(align % PAGE_SIZE == 0);
debug_assert!(align.is_power_of_two());
if align % PAGE_SIZE != 0 || !align.is_power_of_two() {
return Err(Error::InvalidArgs);
}
align
} else {
PAGE_SIZE
};
// check size
if self.size % align != 0 {
return Err(Error::InvalidArgs);
}
}
/// Sets the offset of the first memory page in the VMO that is to be
/// mapped into the VMAR.
///
/// The offset must be page-aligned and within the VMO.
///
/// The default value is zero.
pub fn vmo_offset(mut self, offset: usize) -> Self {
self.vmo_offset = offset;
self
}
/// Sets the size of the mapping.
///
/// The size of a mapping may not be equal to that of the VMO.
/// For example, it is ok to create a mapping whose size is larger than
/// that of the VMO, although one cannot read from or write to the
/// part of the mapping that is not backed by the VMO.
/// So you may wonder: what is the point of supporting such _oversized_
/// mappings? The reason is two-fold.
/// 1. VMOs are resizable. So even if a mapping is backed by a VMO whose
/// size is equal to that of the mapping initially, we cannot prevent
/// the VMO from shrinking.
/// 2. Mappings are not allowed to overlap by default. As a result,
/// oversized mappings can serve as a placeholder to prevent future
/// mappings from occupying some particular address ranges accidentally.
///
/// The default value is the size of the VMO.
pub fn size(mut self, size: usize) -> Self {
self.size = size;
self
}
/// Sets the mapping's alignment.
///
/// The default value is the page size.
///
/// The provided alignment must be a power of two and a multiple of the
/// page size.
pub fn align(mut self, align: usize) -> Self {
self.align = align;
self
}
/// Sets the mapping's offset inside the VMAR.
///
/// The offset must satisfy the alignment requirement.
/// Also, the mapping's range `[offset, offset + size)` must be within
/// the VMAR.
///
/// If not set, the system will choose an offset automatically.
pub fn offset(mut self, offset: usize) -> Self {
self.offset = Some(offset);
self
}
/// Sets whether the mapping can overwrite existing mappings.
///
/// The default value is false.
///
/// If this option is set to true, then the `offset` option must be
/// set.
pub fn can_overwrite(mut self, can_overwrite: bool) -> Self {
self.can_overwrite = can_overwrite;
self
}
/// Creates the mapping.
///
/// All options will be checked at this point.
///
/// On success, the virtual address of the new mapping is returned.
pub fn build(mut self) -> Result<Vaddr> {
todo!()
// check offset
let root_vmar_offset = if let Some(offset) = self.offset {
if offset % PAGE_SIZE != 0 {
return Err(Error::InvalidArgs);
}
let root_vmar_offset = offset + self.parent.base();
if root_vmar_offset % align != 0 {
return Err(Error::InvalidArgs);
}
Some(root_vmar_offset)
} else {
None
};
let child_vmar_ = self
.parent
.0
.alloc_child_vmar(root_vmar_offset, self.size, align)?;
let child_vmar = Vmar(child_vmar_, self.parent.1);
Ok(child_vmar)
}
}

View File

@ -1,18 +1,16 @@
use core::ops::Range;
use alloc::sync::Arc;
use jinux_frame::prelude::Result;
use jinux_frame::{vm::VmIo, Error};
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use jinux_rights_proc::require;
use crate::{
rights::{Dup, Read, Rights, TRights},
vm::vmo::Vmo,
rights::{Dup, Rights, TRights},
vm::{page_fault_handler::PageFaultHandler, vmo::Vmo},
};
use super::{
options::{VmarChildOptions, VmarMapOptions},
VmPerms, Vmar, Vmar_,
options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, VmarRightsOp, Vmar_,
};
impl<R: TRights> Vmar<R> {
@ -21,8 +19,8 @@ impl<R: TRights> Vmar<R> {
/// # Access rights
///
/// A root VMAR is initially given full access rights.
pub fn new() -> Result<Self> {
let inner = Arc::new(Vmar_::new()?);
pub fn new_root() -> Result<Self> {
let inner = Arc::new(Vmar_::new_root()?);
let rights = R::new();
let new_self = Self(inner, rights);
Ok(new_self)
@ -112,6 +110,12 @@ impl<R: TRights> Vmar<R> {
self.0.protect(perms, range)
}
/// clear all mappings and children vmars.
/// After being cleared, this vmar will become an empty vmar
pub fn clear(&self) -> Result<()> {
self.0.clear_root_vmar()
}
/// Destroy a VMAR, including all its mappings and children VMARs.
///
/// After being destroyed, the VMAR becomes useless and returns errors
@ -141,37 +145,57 @@ impl<R: TRights> Vmar<R> {
/// The method requires the Dup right.
#[require(R > Dup)]
pub fn dup(&self) -> Result<Self> {
todo!()
Ok(Vmar(self.0.clone(), self.1))
}
/// Strict the access rights.
#[require(R > R1)]
pub fn restrict<R1>(mut self) -> Vmo<R1> {
todo!()
}
/// Returns the access rights.
pub const fn rights(&self) -> Rights {
Rights::from_bits(R::BITS).unwrap()
pub fn restrict<R1: TRights>(self) -> Vmar<R1> {
Vmar(self.0, R1::new())
}
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
Err(Error::AccessDenied)
return_errno_with_message!(Errno::EACCES, "check rights failed");
}
}
}
impl<R: TRights> VmIo for Vmar<R> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read(offset, buf)
self.0.read(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write(offset, buf)
self.0.write(offset, buf)?;
Ok(())
}
}
impl<R: TRights> PageFaultHandler for Vmar<R> {
fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
if write {
self.check_rights(Rights::WRITE)?;
} else {
self.check_rights(Rights::READ)?;
}
self.0
.handle_page_fault(page_fault_addr, not_present, write)
}
}
impl<R: TRights> VmarRightsOp for Vmar<R> {
fn rights(&self) -> Rights {
Rights::from_bits(R::BITS).unwrap()
}
}

View File

@ -0,0 +1,444 @@
use crate::prelude::*;
use core::ops::Range;
use jinux_frame::vm::VmMapOptions;
use jinux_frame::vm::{VmFrameVec, VmIo, VmPerm};
use spin::Mutex;
use crate::vm::{
vmo::get_page_idx_range,
vmo::{Vmo, VmoChildOptions},
};
use super::{Vmar, Vmar_};
use crate::vm::perms::VmPerms;
use crate::vm::vmar::Rights;
use crate::vm::vmo::VmoRightsOp;
/// A VmMapping represents mapping a vmo into a vmar.
/// A vmar can has multiple VmMappings, which means multiple vmos are mapped to a vmar.
/// A vmo can also contain multiple VmMappings, which means a vmo can be mapped to multiple vmars.
/// The reltionship between Vmar and Vmo is M:N.
pub struct VmMapping {
inner: Mutex<VmMappingInner>,
/// The parent vmar. The parent should always point to a valid vmar.
parent: Weak<Vmar_>,
/// The mapped vmo. The mapped vmo is with dynamic capability.
vmo: Vmo<Rights>,
/// The map offset of the vmo, in bytes.
vmo_offset: usize,
/// The size of mapping, in bytes. The map size can even be larger than the size of backup vmo.
/// Those pages outside vmo range cannot be read or write.
map_size: usize,
/// The base address relative to the root vmar where the vmo is mapped.
map_to_addr: Vaddr,
}
struct VmMappingInner {
/// is destroyed
is_destroyed: bool,
/// The pages already mapped. The key is the page index in vmo.
mapped_pages: BTreeSet<usize>,
/// The permission of each page. The key is the page index in vmo.
/// This map can be filled when mapping a vmo to vmar and can be modified when call mprotect.
/// We keep the options in case the page is not committed(or create copy on write mappings) and will further need these options.
page_perms: BTreeMap<usize, VmPerm>,
}
impl VmMapping {
pub fn build_mapping<R1, R2>(option: VmarMapOptions<R1, R2>) -> Result<Self> {
let VmarMapOptions {
parent,
vmo,
perms,
vmo_offset,
size,
offset,
align,
can_overwrite,
} = option;
let Vmar(parent_vmar, _) = parent;
let vmo = vmo.to_dyn();
let vmo_size = vmo.size();
let map_to_addr = parent_vmar.allocate_free_region_for_vmo(
vmo_size,
size,
offset,
align,
can_overwrite,
)?;
let mut page_perms = BTreeMap::new();
let real_map_size = size.min(vmo_size);
let perm = VmPerm::from(perms);
let page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + size));
for page_idx in page_idx_range {
page_perms.insert(page_idx, perm);
}
let vm_space = parent_vmar.vm_space();
let mut mapped_pages = BTreeSet::new();
let mapped_page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + real_map_size));
let start_page_idx = mapped_page_idx_range.start;
for page_idx in mapped_page_idx_range {
let mut vm_map_options = VmMapOptions::new();
let page_map_addr = map_to_addr + (page_idx - start_page_idx) * PAGE_SIZE;
vm_map_options.addr(Some(page_map_addr));
vm_map_options.perm(perm.clone());
vm_map_options.can_overwrite(can_overwrite);
vm_map_options.align(align);
if let Ok(frames) = vmo.get_backup_frame(page_idx, false, false) {
vm_space.map(frames, &vm_map_options)?;
mapped_pages.insert(page_idx);
}
}
let vm_mapping_inner = VmMappingInner {
is_destroyed: false,
mapped_pages,
page_perms,
};
Ok(Self {
inner: Mutex::new(vm_mapping_inner),
parent: Arc::downgrade(&parent_vmar),
vmo,
vmo_offset,
map_size: size,
map_to_addr,
})
}
pub(super) fn vmo(&self) -> &Vmo<Rights> {
&self.vmo
}
/// Add a new committed page and map it to vmspace. If copy on write is set, it's allowed to unmap the page at the same address.
/// FIXME: This implementation based on the truth that we map one page at a time. If multiple pages are mapped together, this implementation may have problems
pub(super) fn map_one_page(&self, page_idx: usize, frames: VmFrameVec) -> Result<()> {
let parent = self.parent.upgrade().unwrap();
let vm_space = parent.vm_space();
let map_addr = page_idx * PAGE_SIZE + self.map_to_addr;
let vm_perm = self.inner.lock().page_perms.get(&page_idx).unwrap().clone();
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(map_addr));
vm_map_options.perm(vm_perm.clone());
// copy on write allows unmap the mapped page
if self.vmo.is_cow_child() && vm_space.is_mapped(map_addr) {
vm_space.unmap(&(map_addr..(map_addr + PAGE_SIZE))).unwrap();
}
vm_space.map(frames, &vm_map_options)?;
self.inner.lock().mapped_pages.insert(page_idx);
Ok(())
}
/// unmap a page
pub(super) fn unmap_one_page(&self, page_idx: usize) -> Result<()> {
let parent = self.parent.upgrade().unwrap();
let vm_space = parent.vm_space();
let map_addr = page_idx * PAGE_SIZE + self.map_to_addr;
let range = map_addr..(map_addr + PAGE_SIZE);
if vm_space.is_mapped(map_addr) {
vm_space.unmap(&range)?;
}
self.inner.lock().mapped_pages.remove(&page_idx);
Ok(())
}
/// the mapping's start address
pub(super) fn map_to_addr(&self) -> Vaddr {
self.map_to_addr
}
pub(super) fn size(&self) -> usize {
self.map_size
}
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
let vmo_read_offset = self.vmo_offset + offset;
self.vmo.read_bytes(vmo_read_offset, buf)?;
Ok(())
}
pub fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
let vmo_write_offset = self.vmo_offset + offset;
self.vmo.write_bytes(vmo_write_offset, buf)?;
Ok(())
}
/// Unmap pages in the range
pub fn unmap(&self, range: Range<usize>, destroy: bool) -> Result<()> {
let vmo_map_range = (range.start - self.map_to_addr)..(range.end - self.map_to_addr);
let page_idx_range = get_page_idx_range(&vmo_map_range);
for page_idx in page_idx_range {
self.unmap_one_page(page_idx)?;
}
if destroy && range == self.range() {
self.inner.lock().is_destroyed = false;
}
Ok(())
}
pub fn is_destroyed(&self) -> bool {
self.inner.lock().is_destroyed
}
pub fn handle_page_fault(
&self,
page_fault_addr: Vaddr,
not_present: bool,
write: bool,
) -> Result<()> {
let vmo_offset = self.vmo_offset + page_fault_addr - self.map_to_addr;
if vmo_offset >= self.vmo.size() {
return_errno_with_message!(Errno::EACCES, "page fault addr is not backed up by a vmo");
}
if write {
self.vmo.check_rights(Rights::WRITE)?;
} else {
self.vmo.check_rights(Rights::READ)?;
}
// get the backup frame for page
let page_idx = vmo_offset / PAGE_SIZE;
let frames = self.vmo.get_backup_frame(page_idx, write, true)?;
// map the page
self.map_one_page(page_idx, frames)
}
pub(super) fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
let rights = Rights::from(perms);
self.vmo().check_rights(rights)?;
// FIXME: should we commit and map these pages before protect vmspace?
let vmar = self.parent.upgrade().unwrap();
let vm_space = vmar.vm_space();
let perm = VmPerm::from(perms);
vm_space.protect(&range, perm)?;
Ok(())
}
pub(super) fn fork_mapping(&self, new_parent: Weak<Vmar_>) -> Result<VmMapping> {
let VmMapping {
inner,
parent,
vmo,
vmo_offset,
map_size,
map_to_addr,
} = self;
let parent_vmo = vmo.clone();
let vmo_size = parent_vmo.size();
let child_vmo = VmoChildOptions::new_cow(parent_vmo, 0..vmo_size).alloc()?;
let parent_vmar = new_parent.upgrade().unwrap();
let vm_space = parent_vmar.vm_space();
let real_map_size = self.size().min(child_vmo.size());
let vmo_offset = *vmo_offset;
let page_idx_range = get_page_idx_range(&(vmo_offset..vmo_offset + real_map_size));
let start_page_idx = page_idx_range.start;
let mut mapped_pages = BTreeSet::new();
for page_idx in page_idx_range {
// When map pages from parent, we should forbid write access to these pages.
// So any write access to these pages will trigger a page fault. Then, we can allocate new pages for the page.
let mut vm_perm = inner.lock().page_perms.get(&page_idx).unwrap().clone();
vm_perm -= VmPerm::W;
let mut vm_map_options = VmMapOptions::new();
let map_addr = (page_idx - start_page_idx) * PAGE_SIZE + self.map_to_addr;
vm_map_options.addr(Some(map_addr));
vm_map_options.perm(vm_perm);
if let Ok(frames) = child_vmo.get_backup_frame(page_idx, false, false) {
vm_space.map(frames, &vm_map_options)?;
mapped_pages.insert(page_idx);
}
}
let is_destroyed = inner.lock().is_destroyed;
let page_perms = inner.lock().page_perms.clone();
let inner = VmMappingInner {
is_destroyed,
mapped_pages,
page_perms,
};
Ok(VmMapping {
inner: Mutex::new(inner),
parent: new_parent,
vmo: child_vmo,
vmo_offset,
map_size: *map_size,
map_to_addr: *map_to_addr,
})
}
pub fn range(&self) -> Range<usize> {
self.map_to_addr..self.map_to_addr + self.map_size
}
}
/// Options for creating a new mapping. The mapping is not allowed to overlap
/// with any child VMARs. And unless specified otherwise, it is not allowed
/// to overlap with any existing mapping, either.
pub struct VmarMapOptions<R1, R2> {
parent: Vmar<R1>,
vmo: Vmo<R2>,
perms: VmPerms,
vmo_offset: usize,
size: usize,
offset: Option<usize>,
align: usize,
can_overwrite: bool,
}
impl<R1, R2> VmarMapOptions<R1, R2> {
/// Creates a default set of options with the VMO and the memory access
/// permissions.
///
/// The VMO must have access rights that correspond to the memory
/// access permissions. For example, if `perms` contains `VmPerm::Write`,
/// then `vmo.rights()` should contain `Rights::WRITE`.
pub fn new(parent: Vmar<R1>, vmo: Vmo<R2>, perms: VmPerms) -> Self {
let size = vmo.size();
Self {
parent,
vmo,
perms,
vmo_offset: 0,
size,
offset: None,
align: PAGE_SIZE,
can_overwrite: false,
}
}
/// Sets the offset of the first memory page in the VMO that is to be
/// mapped into the VMAR.
///
/// The offset must be page-aligned and within the VMO.
///
/// The default value is zero.
pub fn vmo_offset(mut self, offset: usize) -> Self {
self.vmo_offset = offset;
self
}
/// Sets the size of the mapping.
///
/// The size of a mapping may not be equal to that of the VMO.
/// For example, it is ok to create a mapping whose size is larger than
/// that of the VMO, although one cannot read from or write to the
/// part of the mapping that is not backed by the VMO.
/// So you may wonder: what is the point of supporting such _oversized_
/// mappings? The reason is two-fold.
/// 1. VMOs are resizable. So even if a mapping is backed by a VMO whose
/// size is equal to that of the mapping initially, we cannot prevent
/// the VMO from shrinking.
/// 2. Mappings are not allowed to overlap by default. As a result,
/// oversized mappings can serve as a placeholder to prevent future
/// mappings from occupying some particular address ranges accidentally.
///
/// The default value is the size of the VMO.
pub fn size(mut self, size: usize) -> Self {
self.size = size;
self
}
/// Sets the mapping's alignment.
///
/// The default value is the page size.
///
/// The provided alignment must be a power of two and a multiple of the
/// page size.
pub fn align(mut self, align: usize) -> Self {
self.align = align;
self
}
/// Sets the mapping's offset inside the VMAR.
///
/// The offset must satisfy the alignment requirement.
/// Also, the mapping's range `[offset, offset + size)` must be within
/// the VMAR.
///
/// If not set, the system will choose an offset automatically.
pub fn offset(mut self, offset: usize) -> Self {
self.offset = Some(offset);
self
}
/// Sets whether the mapping can overwrite existing mappings.
///
/// The default value is false.
///
/// If this option is set to true, then the `offset` option must be
/// set.
pub fn can_overwrite(mut self, can_overwrite: bool) -> Self {
self.can_overwrite = can_overwrite;
self
}
/// Creates the mapping.
///
/// All options will be checked at this point.
///
/// On success, the virtual address of the new mapping is returned.
pub fn build(self) -> Result<Vaddr> {
self.check_options()?;
let parent_vmar = self.parent.0.clone();
let vmo_ = self.vmo.0.clone();
let vm_mapping = Arc::new(VmMapping::build_mapping(self)?);
let map_to_addr = vm_mapping.map_to_addr();
parent_vmar.add_mapping(vm_mapping);
Ok(map_to_addr)
}
/// check whether all options are valid
fn check_options(&self) -> Result<()> {
// check align
debug_assert!(self.align % PAGE_SIZE == 0);
debug_assert!(self.align.is_power_of_two());
if self.align % PAGE_SIZE != 0 || !self.align.is_power_of_two() {
return_errno_with_message!(Errno::EINVAL, "invalid align");
}
debug_assert!(self.vmo_offset % self.align == 0);
if self.vmo_offset % self.align != 0 {
return_errno_with_message!(Errno::EINVAL, "invalid vmo offset");
}
if let Some(offset) = self.offset {
debug_assert!(offset % self.align == 0);
if offset % self.align != 0 {
return_errno_with_message!(Errno::EINVAL, "invalid offset");
}
}
self.check_perms()?;
self.check_overwrite()?;
Ok(())
}
/// check whether the vmperm is subset of vmo rights
fn check_perms(&self) -> Result<()> {
let perm_rights = Rights::from(self.perms);
self.vmo.check_rights(perm_rights)
}
/// check whether the vmo will overwrite with any existing vmo or vmar
fn check_overwrite(&self) -> Result<()> {
if self.can_overwrite {
// if can_overwrite is set, the offset cannot be None
debug_assert!(self.offset != None);
if self.offset == None {
return_errno_with_message!(
Errno::EINVAL,
"offset can not be none when can overwrite is true"
);
}
}
if self.offset == None {
// if does not specify the offset, we assume the map can always find suitable free region.
// FIXME: is this always true?
return Ok(());
}
let offset = self.offset.unwrap();
// we should spare enough space at least for the whole vmo
let size = self.size.max(self.vmo.size());
let vmo_range = offset..(offset + size);
self.parent
.0
.check_vmo_overwrite(vmo_range, self.can_overwrite)
}
}

View File

@ -1,10 +1,12 @@
use core::ops::Range;
use jinux_frame::prelude::Result;
use jinux_frame::{vm::VmIo, Error};
use crate::prelude::*;
use jinux_frame::vm::VmIo;
use crate::rights::{Rights, TRights};
use super::VmoRightsOp;
use super::{
options::{VmoCowChild, VmoSliceChild},
Vmo, VmoChildOptions,
@ -66,6 +68,12 @@ impl Vmo<Rights> {
Ok(VmoChildOptions::new_cow(dup_self, range))
}
/// commit a page at specific offset
pub fn commit_page(&self, offset: usize) -> Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset)
}
/// Commits the pages specified in the range (in bytes).
///
/// The range must be within the size of the VMO.
@ -125,42 +133,50 @@ impl Vmo<Rights> {
/// The method requires the Dup right.
pub fn dup(&self) -> Result<Self> {
self.check_rights(Rights::DUP)?;
todo!()
Ok(Self(self.0.clone(), self.1.clone()))
}
/// Restricts the access rights given the mask.
pub fn restrict(mut self, mask: Rights) -> Self {
todo!()
self.1 |= mask;
self
}
/// Converts to a static capability.
pub fn to_static<R1: TRights>(self) -> Result<Vmo<R1>> {
self.check_rights(Rights::from_bits(R1::BITS).ok_or(Error::InvalidArgs)?)?;
todo!()
self.check_rights(Rights::from_bits(R1::BITS).ok_or(Error::new(Errno::EINVAL))?)?;
Ok(Vmo(self.0, R1::new()))
}
}
/// Returns the access rights.
pub fn rights(&self) -> Rights {
self.1
}
pub fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
Err(Error::AccessDenied)
}
impl Clone for Vmo<Rights> {
fn clone(&self) -> Self {
Self(self.0.clone(), self.1.clone())
}
}
impl VmIo for Vmo<Rights> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read_bytes(offset, buf)
self.0.read_bytes(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write_bytes(offset, buf)
self.0.write_bytes(offset, buf)?;
Ok(())
}
}
impl VmoRightsOp for Vmo<Rights> {
fn rights(&self) -> Rights {
self.1
}
/// Converts to a dynamic capability.
fn to_dyn(self) -> Vmo<Rights> {
let rights = self.rights();
Vmo(self.0, rights)
}
}

View File

@ -3,9 +3,10 @@
use core::ops::Range;
use crate::rights::Rights;
use alloc::sync::Arc;
use bitflags::bitflags;
use jinux_frame::{prelude::Result, vm::Paddr, Error};
use jinux_frame::vm::{Paddr, VmAllocOptions, VmFrameVec, VmIo};
use jinux_frame::AlignExt;
use crate::prelude::*;
mod dyn_cap;
mod options;
@ -71,7 +72,42 @@ use spin::Mutex;
/// `Vmo` is easier to use (by offering more powerful APIs) and
/// harder to misuse (thanks to its nature of being capability).
///
pub struct Vmo<R>(Arc<Vmo_>, R);
pub struct Vmo<R = Rights>(pub(super) Arc<Vmo_>, R);
/// Functions exist both for static capbility and dynamic capibility
pub trait VmoRightsOp {
/// Returns the access rights.
fn rights(&self) -> Rights;
/// Check whether rights is included in self
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
return_errno_with_message!(Errno::EINVAL, "vmo rights check failed");
}
}
/// Converts to a dynamic capability.
fn to_dyn(self) -> Vmo<Rights>
where
Self: Sized;
}
// We implement this trait for Vmo, so we can use functions on type like Vmo<R> without trait bounds.
// FIXME: This requires the imcomplete feature specialization, which should be fixed further.
impl<R> VmoRightsOp for Vmo<R> {
default fn rights(&self) -> Rights {
unimplemented!()
}
default fn to_dyn(self) -> Vmo<Rights>
where
Self: Sized,
{
unimplemented!()
}
}
bitflags! {
/// VMO flags.
@ -90,59 +126,302 @@ bitflags! {
}
}
struct Vmo_ {
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VmoType {
/// This vmo_ is created as a copy on write child
CopyOnWriteChild,
/// This vmo_ is created as a slice child
SliceChild,
/// This vmo_ is not created as a child of a parent vmo
NotChild,
}
pub(super) struct Vmo_ {
/// Flags
flags: VmoFlags,
/// VmoInner
inner: Mutex<VmoInner>,
parent: Option<Arc<Vmo_>>,
/// Parent Vmo
parent: Weak<Vmo_>,
/// paddr
paddr: Option<Paddr>,
/// vmo type
vmo_type: VmoType,
}
struct VmoInner {
//...
/// The backup pager
pager: Option<Arc<dyn Pager>>,
/// size, in bytes
size: usize,
/// The pages committed. The key is the page index, the value is the backup frame.
committed_pages: BTreeMap<usize, VmFrameVec>,
/// The pages from the parent that current vmo can access. The pages can only be inherited when create childs vmo.
/// We store the page index range
inherited_pages: InheritedPages,
}
/// Pages inherited from parent
struct InheritedPages {
/// The page index range in child vmo. The pages inside these range are initially inherited from parent vmo.
/// The range includes the start page, but not including the end page
page_range: Range<usize>,
/// The page index offset in parent vmo. That is to say, the page with index `idx` in child vmo corrsponds to
/// page with index `idx + parent_page_idx_offset` in parent vmo
parent_page_idx_offset: usize,
}
impl InheritedPages {
pub fn new_empty() -> Self {
Self {
page_range: 0..0,
parent_page_idx_offset: 0,
}
}
pub fn new(page_range: Range<usize>, parent_page_idx_offset: usize) -> Self {
Self {
page_range,
parent_page_idx_offset,
}
}
fn contains_page(&self, page_idx: usize) -> bool {
self.page_range.start <= page_idx && page_idx < self.page_range.end
}
fn parent_page_idx(&self, child_page_idx: usize) -> Option<usize> {
if self.contains_page(child_page_idx) {
Some(child_page_idx + self.parent_page_idx_offset)
} else {
None
}
}
}
impl Vmo_ {
pub fn commit_page(&self, offset: usize) -> Result<()> {
todo!()
let page_idx = offset / PAGE_SIZE;
let mut inner = self.inner.lock();
if !inner.committed_pages.contains_key(&page_idx) {
let frames = match &inner.pager {
None => {
let vm_alloc_option = VmAllocOptions::new(1);
let frames = VmFrameVec::allocate(&vm_alloc_option)?;
frames.iter().for_each(|frame| frame.zero());
frames
}
Some(pager) => {
let frame = pager.commit_page(offset)?;
VmFrameVec::from_one_frame(frame)
}
};
inner.committed_pages.insert(page_idx, frames);
}
Ok(())
}
pub fn decommit_page(&self, offset: usize) -> Result<()> {
todo!()
let page_idx = offset / PAGE_SIZE;
let mut inner = self.inner.lock();
if inner.committed_pages.contains_key(&page_idx) {
inner.committed_pages.remove(&page_idx);
if let Some(pager) = &inner.pager {
pager.decommit_page(offset)?;
}
}
Ok(())
}
pub fn commit(&self, range: Range<usize>) -> Result<()> {
todo!()
let page_idx_range = get_page_idx_range(&range);
for page_idx in page_idx_range {
let offset = page_idx * PAGE_SIZE;
self.commit_page(offset)?;
}
Ok(())
}
pub fn decommit(&self, range: Range<usize>) -> Result<()> {
todo!()
let page_idx_range = get_page_idx_range(&range);
for page_idx in page_idx_range {
let offset = page_idx * PAGE_SIZE;
self.decommit_page(offset)?;
}
Ok(())
}
/// determine whether a page is commited
pub fn page_commited(&self, page_idx: usize) -> bool {
self.inner.lock().committed_pages.contains_key(&page_idx)
}
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!()
let read_len = buf.len();
debug_assert!(offset + read_len <= self.size());
if offset + read_len > self.size() {
return_errno_with_message!(Errno::EINVAL, "read range exceeds vmo size");
}
let read_range = offset..(offset + read_len);
let frames = self.ensure_all_pages_exist(read_range, false)?;
let read_offset = offset % PAGE_SIZE;
Ok(frames.read_bytes(read_offset, buf)?)
}
/// Ensure all pages inside range are backed up vm frames, returns the frames.
fn ensure_all_pages_exist(&self, range: Range<usize>, write_page: bool) -> Result<VmFrameVec> {
let page_idx_range = get_page_idx_range(&range);
let mut frames = VmFrameVec::empty();
for page_idx in page_idx_range {
let mut page_frame = self.get_backup_frame(page_idx, write_page, true)?;
frames.append(&mut page_frame)?;
}
Ok(frames)
}
/// Get the backup frame for a page. If commit_if_none is set, we will commit a new page for the page
/// if the page does not have a backup frame.
fn get_backup_frame(
&self,
page_idx: usize,
write_page: bool,
commit_if_none: bool,
) -> Result<VmFrameVec> {
// if the page is already commit, return the committed page.
if let Some(frames) = self.inner.lock().committed_pages.get(&page_idx) {
return Ok(frames.clone());
}
match self.vmo_type {
// if the vmo is not child, then commit new page
VmoType::NotChild => {
if commit_if_none {
self.commit_page(page_idx * PAGE_SIZE)?;
let frames = self
.inner
.lock()
.committed_pages
.get(&page_idx)
.unwrap()
.clone();
return Ok(frames);
} else {
return_errno_with_message!(Errno::EINVAL, "backup frame does not exist");
}
}
// if the vmo is slice child, we will request the frame from parent
VmoType::SliceChild => {
let inner = self.inner.lock();
debug_assert!(inner.inherited_pages.contains_page(page_idx));
if !inner.inherited_pages.contains_page(page_idx) {
return_errno_with_message!(
Errno::EINVAL,
"page does not inherited from parent"
);
}
let parent = self.parent.upgrade().unwrap();
let parent_page_idx = inner.inherited_pages.parent_page_idx(page_idx).unwrap();
return parent.get_backup_frame(parent_page_idx, write_page, commit_if_none);
}
// If the vmo is copy on write
VmoType::CopyOnWriteChild => {
if write_page {
// write
// commit a new page
self.commit_page(page_idx * PAGE_SIZE)?;
let inner = self.inner.lock();
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
if let Some(parent_page_idx) = inner.inherited_pages.parent_page_idx(page_idx) {
// copy contents of parent to the frame
let mut tmp_buffer = [0u8; PAGE_SIZE];
let parent = self.parent.upgrade().unwrap();
parent.read_bytes(parent_page_idx * PAGE_SIZE, &mut tmp_buffer)?;
frames.write_bytes(0, &tmp_buffer)?;
} else {
frames.zero();
}
return Ok(frames);
} else {
// read
if let Some(parent_page_idx) =
self.inner.lock().inherited_pages.parent_page_idx(page_idx)
{
// If it's inherited from parent, we request the page from parent
let parent = self.parent.upgrade().unwrap();
return parent.get_backup_frame(
parent_page_idx,
write_page,
commit_if_none,
);
} else {
// Otherwise, we commit a new page
self.commit_page(page_idx * PAGE_SIZE)?;
let frames = self
.inner
.lock()
.committed_pages
.get(&page_idx)
.unwrap()
.clone();
// FIXME: should we zero the frames here?
frames.zero();
return Ok(frames);
}
}
}
}
}
pub fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
let write_len = buf.len();
debug_assert!(offset + write_len <= self.size());
if offset + write_len > self.size() {
return_errno_with_message!(Errno::EINVAL, "write range exceeds the vmo size");
}
let write_range = offset..(offset + write_len);
let frames = self.ensure_all_pages_exist(write_range, true)?;
let write_offset = offset % PAGE_SIZE;
frames.write_bytes(write_offset, buf)?;
Ok(())
}
pub fn clear(&self, range: Range<usize>) -> Result<()> {
todo!()
let buffer = vec![0u8; range.end - range.start];
self.write_bytes(range.start, &buffer)
}
pub fn size(&self) -> usize {
todo!()
self.inner.lock().size
}
pub fn resize(&self, new_size: usize) -> Result<()> {
todo!()
assert!(self.flags.contains(VmoFlags::RESIZABLE));
let new_size = new_size.align_up(PAGE_SIZE);
let old_size = self.size();
if new_size == old_size {
return Ok(());
}
if new_size < old_size {
self.decommit(new_size..old_size)?;
self.inner.lock().size = new_size;
} else {
self.commit(old_size..new_size)?;
self.inner.lock().size = new_size;
}
Ok(())
}
pub fn paddr(&self) -> Option<Paddr> {
todo!()
self.paddr
}
pub fn flags(&self) -> VmoFlags {
todo!()
self.flags.clone()
}
}
@ -162,4 +441,34 @@ impl<R> Vmo<R> {
pub fn flags(&self) -> VmoFlags {
self.0.flags()
}
/// return whether a page is already committed
pub fn has_backup_frame(&self, page_idx: usize) -> bool {
if let Ok(_) = self.0.get_backup_frame(page_idx, false, false) {
true
} else {
false
}
}
pub fn get_backup_frame(
&self,
page_idx: usize,
write_page: bool,
commit_if_none: bool,
) -> Result<VmFrameVec> {
self.0
.get_backup_frame(page_idx, write_page, commit_if_none)
}
pub fn is_cow_child(&self) -> bool {
self.0.vmo_type == VmoType::CopyOnWriteChild
}
}
/// get the page index range that contains the offset range of vmo
pub fn get_page_idx_range(vmo_offset_range: &Range<usize>) -> Range<usize> {
let start = vmo_offset_range.start.align_down(PAGE_SIZE);
let end = vmo_offset_range.end.align_up(PAGE_SIZE);
(start / PAGE_SIZE)..(end / PAGE_SIZE)
}

View File

@ -3,13 +3,19 @@
use core::marker::PhantomData;
use core::ops::Range;
use alloc::sync::Arc;
use jinux_frame::prelude::Result;
use jinux_frame::vm::Paddr;
use jinux_frame::vm::{Paddr, VmAllocOptions, VmFrameVec};
use jinux_frame::AlignExt;
use jinux_rights_proc::require;
use typeflags_util::{SetExtend, SetExtendOp};
use crate::rights::{Dup, Rights, TRights};
use crate::prelude::*;
use crate::rights::{Dup, Rights, TRights, Write};
use crate::vm::vmo::InheritedPages;
use crate::vm::vmo::VmoType;
use crate::vm::vmo::{VmoInner, Vmo_};
use super::VmoRightsOp;
use super::{Pager, Vmo, VmoFlags};
/// Options for allocating a root VMO.
@ -50,8 +56,8 @@ pub struct VmoOptions<R = Rights> {
size: usize,
paddr: Option<Paddr>,
flags: VmoFlags,
rights: R,
// supplier: Option<Arc<dyn FrameSupplier>>,
rights: Option<R>,
pager: Option<Arc<dyn Pager>>,
}
impl<R> VmoOptions<R> {
@ -60,7 +66,13 @@ impl<R> VmoOptions<R> {
///
/// The size of the VMO will be rounded up to align with the page size.
pub fn new(size: usize) -> Self {
todo!()
Self {
size,
paddr: None,
flags: VmoFlags::empty(),
rights: None,
pager: None,
}
}
/// Sets the starting physical address of the VMO.
@ -70,7 +82,9 @@ impl<R> VmoOptions<R> {
/// If this option is set, then the underlying pages of VMO must be contiguous.
/// So `VmoFlags::IS_CONTIGUOUS` will be set automatically.
pub fn paddr(mut self, paddr: Paddr) -> Self {
todo!()
self.paddr = Some(paddr);
self.flags |= VmoFlags::CONTIGUOUS;
self
}
/// Sets the VMO flags.
@ -79,12 +93,14 @@ impl<R> VmoOptions<R> {
///
/// For more information about the flags, see `VmoFlags`.
pub fn flags(mut self, flags: VmoFlags) -> Self {
todo!()
self.flags = flags;
self
}
/// Sets the pager of the VMO.
pub fn pager(mut self, pager: Arc<dyn Pager>) -> Self {
todo!()
self.pager = Some(pager);
self
}
}
@ -94,8 +110,16 @@ impl VmoOptions<Rights> {
/// # Access rights
///
/// The VMO is initially assigned full access rights.
pub fn alloc(mut self) -> Result<Vmo<Rights>> {
todo!()
pub fn alloc(self) -> Result<Vmo<Rights>> {
let VmoOptions {
size,
paddr,
flags,
pager,
..
} = self;
let vmo_ = alloc_vmo_(size, paddr, flags, pager)?;
Ok(Vmo(Arc::new(vmo_), Rights::all()))
}
}
@ -106,8 +130,62 @@ impl<R: TRights> VmoOptions<R> {
///
/// The VMO is initially assigned the access rights represented
/// by `R: TRights`.
pub fn alloc(mut self) -> Result<Vmo<R>> {
todo!()
pub fn alloc(self) -> Result<Vmo<R>> {
let VmoOptions {
size,
paddr,
flags,
rights,
pager,
} = self;
let vmo_ = alloc_vmo_(size, paddr, flags, pager)?;
Ok(Vmo(Arc::new(vmo_), R::new()))
}
}
fn alloc_vmo_(
size: usize,
paddr: Option<Paddr>,
flags: VmoFlags,
pager: Option<Arc<dyn Pager>>,
) -> Result<Vmo_> {
let size = size.align_up(PAGE_SIZE);
let committed_pages = committed_pages_if_continuous(flags, size, paddr)?;
let vmo_inner = VmoInner {
pager,
size,
committed_pages,
inherited_pages: InheritedPages::new_empty(),
};
Ok(Vmo_ {
flags,
inner: Mutex::new(vmo_inner),
parent: Weak::new(),
paddr,
vmo_type: VmoType::NotChild,
})
}
fn committed_pages_if_continuous(
flags: VmoFlags,
size: usize,
paddr: Option<Paddr>,
) -> Result<BTreeMap<usize, VmFrameVec>> {
if flags.contains(VmoFlags::CONTIGUOUS) {
// if the vmo is continuous, we need to allocate frames for the vmo
let frames_num = size / PAGE_SIZE;
let mut vm_alloc_option = VmAllocOptions::new(frames_num);
vm_alloc_option.is_contiguous(true);
vm_alloc_option.paddr(paddr);
let frames = VmFrameVec::allocate(&vm_alloc_option)?;
let mut committed_pages = BTreeMap::new();
for (idx, frame) in frames.into_iter().enumerate() {
committed_pages.insert(idx * PAGE_SIZE, VmFrameVec::from_one_frame(frame));
}
Ok(committed_pages)
} else {
// otherwise, we wait for the page is read or write
Ok(BTreeMap::new())
}
}
@ -181,7 +259,7 @@ impl<R: TRights> VmoOptions<R> {
/// Note that a slice VMO child and its parent cannot not be resizable.
///
/// ```rust
/// use _std::vm::{PAGE_SIZE, VmoOptions};
/// use jinux_std::vm::{PAGE_SIZE, VmoOptions};
///
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
/// .alloc()
@ -284,14 +362,41 @@ impl<R, C> VmoChildOptions<R, C> {
}
}
impl<C> VmoChildOptions<Rights, C> {
impl VmoChildOptions<Rights, VmoSliceChild> {
/// Allocates the child VMO.
///
/// # Access rights
///
/// The child VMO is initially assigned all the parent's access rights.
pub fn alloc(mut self) -> Result<Vmo<Rights>> {
todo!()
pub fn alloc(self) -> Result<Vmo<Rights>> {
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, parent_rights) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Slice)?;
Ok(Vmo(Arc::new(child_vmo_), parent_rights))
}
}
impl VmoChildOptions<Rights, VmoCowChild> {
/// Allocates the child VMO.
///
/// # Access rights
///
/// The child VMO is initially assigned all the parent's access rights.
pub fn alloc(self) -> Result<Vmo<Rights>> {
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, parent_rights) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Cow)?;
Ok(Vmo(Arc::new(child_vmo_), parent_rights))
}
}
@ -301,8 +406,16 @@ impl<R: TRights> VmoChildOptions<R, VmoSliceChild> {
/// # Access rights
///
/// The child VMO is initially assigned all the parent's access rights.
pub fn alloc(mut self) -> Result<Vmo<R>> {
todo!()
pub fn alloc(self) -> Result<Vmo<R>> {
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, parent_rights) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Slice)?;
Ok(Vmo(Arc::new(child_vmo_), parent_rights))
}
}
@ -313,28 +426,88 @@ impl<R: TRights> VmoChildOptions<R, VmoCowChild> {
///
/// The child VMO is initially assigned all the parent's access rights
/// plus the Write right.
pub fn alloc<R1>(mut self) -> Result<Vmo<R1>>
pub fn alloc(self) -> Result<Vmo<SetExtendOp<R, Write>>>
where
R1: TRights, // TODO: R1 must contain the Write right. To do so at the type level,
// we need to implement a type-level operator
// (say, `TRightsExtend(L, F)`)
// that may extend a list (`L`) of type-level flags with an extra flag `F`.
// TRightsExtend<R, Write>
R: SetExtend<Write>,
SetExtendOp<R, Write>: TRights,
{
todo!()
let VmoChildOptions {
parent,
range,
flags,
..
} = self;
let Vmo(parent_vmo_, _) = parent;
let child_vmo_ = alloc_child_vmo_(parent_vmo_, range, flags, ChildType::Cow)?;
let right = SetExtendOp::<R, Write>::new();
Ok(Vmo(Arc::new(child_vmo_), right))
}
}
// original:
// pub fn alloc<R1>(mut self) -> Result<Vmo<R1>>
// where
// // TODO: R1 must contain the Write right. To do so at the type level,
// // we need to implement a type-level operator
// // (say, `TRightsExtend(L, F)`)
// // that may extend a list (`L`) of type-level flags with an extra flag `F`.
// R1: R // TRightsExtend<R, Write>
// {
// todo!()
// }
#[derive(Debug, Clone, Copy)]
enum ChildType {
Cow,
Slice,
}
fn alloc_child_vmo_(
parent_vmo_: Arc<Vmo_>,
range: Range<usize>,
child_flags: VmoFlags,
child_type: ChildType,
) -> Result<Vmo_> {
let child_vmo_start = range.start;
let child_vmo_end = range.end;
debug_assert!(child_vmo_start % PAGE_SIZE == 0);
debug_assert!(child_vmo_end % PAGE_SIZE == 0);
if child_vmo_start % PAGE_SIZE != 0 || child_vmo_end % PAGE_SIZE != 0 {
return_errno_with_message!(Errno::EINVAL, "vmo range does not aligned with PAGE_SIZE");
}
let parent_vmo_size = parent_vmo_.size();
let parent_vmo_inner = parent_vmo_.inner.lock();
match child_type {
ChildType::Slice => {
// A slice child should be inside parent vmo's range
debug_assert!(child_vmo_end <= parent_vmo_inner.size);
if child_vmo_end > parent_vmo_inner.size {
return_errno_with_message!(
Errno::EINVAL,
"slice child vmo cannot exceed parent vmo's size"
);
}
}
ChildType::Cow => {
// A copy on Write child should intersect with parent vmo
debug_assert!(range.start < parent_vmo_inner.size);
if range.start >= parent_vmo_inner.size {
return_errno_with_message!(Errno::EINVAL, "COW vmo should overlap with its parent");
}
}
}
let parent_page_idx_offset = range.start / PAGE_SIZE;
let inherited_end = range.end.min(parent_vmo_size);
let inherited_end_page_idx = inherited_end / PAGE_SIZE + 1;
let inherited_pages = InheritedPages::new(0..inherited_end_page_idx, parent_page_idx_offset);
let vmo_inner = VmoInner {
pager: None,
size: child_vmo_end - child_vmo_start,
committed_pages: BTreeMap::new(),
inherited_pages,
};
let child_paddr = parent_vmo_
.paddr()
.map(|parent_paddr| parent_paddr + child_vmo_start);
let vmo_type = match child_type {
ChildType::Cow => VmoType::CopyOnWriteChild,
ChildType::Slice => VmoType::SliceChild,
};
Ok(Vmo_ {
flags: child_flags,
inner: Mutex::new(vmo_inner),
parent: Arc::downgrade(&parent_vmo_),
paddr: child_paddr,
vmo_type,
})
}
/// A type to specify the "type" of a child, which is either a slice or a COW.

View File

@ -1,4 +1,4 @@
use jinux_frame::prelude::Result;
use crate::prelude::*;
use jinux_frame::vm::VmFrame;
/// Pagers provide frame to a VMO.
@ -10,7 +10,7 @@ use jinux_frame::vm::VmFrame;
/// notify the attached pager that the frame has been updated.
/// Finally, when a frame is no longer needed (i.e., on decommits),
/// the frame pager will also be notified.
pub trait Pager {
pub trait Pager: Send + Sync {
/// Ask the pager to provide a frame at a specified offset (in bytes).
///
/// After a page of a VMO is committed, the VMO shall not call this method

View File

@ -1,11 +1,11 @@
use crate::prelude::*;
use core::ops::Range;
use jinux_frame::prelude::Result;
use jinux_frame::{vm::VmIo, Error};
use jinux_frame::vm::VmIo;
use jinux_rights_proc::require;
use crate::rights::*;
use super::VmoRightsOp;
use super::{
options::{VmoCowChild, VmoSliceChild},
Vmo, VmoChildOptions,
@ -66,6 +66,12 @@ impl<R: TRights> Vmo<R> {
Ok(VmoChildOptions::new_cow(dup_self, range))
}
/// commit a page at specific offset
pub fn commit_page(&self, offset: usize) -> Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset)
}
/// Commit the pages specified in the range (in bytes).
///
/// The range must be within the size of the VMO.
@ -125,42 +131,38 @@ impl<R: TRights> Vmo<R> {
/// The method requires the Dup right.
#[require(R > Dup)]
pub fn dup(&self) -> Result<Self> {
todo!()
Ok(Vmo(self.0.clone(), self.1.clone()))
}
/// Strict the access rights.
#[require(R > R1)]
pub fn restrict<R1>(mut self) -> Vmo<R1> {
todo!()
}
/// Converts to a dynamic capability.
pub fn to_dyn(self) -> Vmo<Rights> {
todo!()
}
/// Returns the access rights.
pub const fn rights(&self) -> Rights {
Rights::from_bits(R::BITS).unwrap()
}
fn check_rights(&self, rights: Rights) -> Result<()> {
if self.rights().contains(rights) {
Ok(())
} else {
Err(Error::AccessDenied)
}
pub fn restrict<R1: TRights>(self) -> Vmo<R1> {
Vmo(self.0, R1::new())
}
}
impl<R: TRights> VmIo for Vmo<R> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::READ)?;
self.0.read_bytes(offset, buf)
self.0.read_bytes(offset, buf)?;
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, offset: usize, buf: &[u8]) -> jinux_frame::Result<()> {
self.check_rights(Rights::WRITE)?;
self.0.write_bytes(offset, buf)
self.0.write_bytes(offset, buf)?;
Ok(())
}
}
impl<R: TRights> VmoRightsOp for Vmo<R> {
fn rights(&self) -> Rights {
Rights::from_bits(R::BITS).unwrap()
}
/// Converts to a dynamic capability.
fn to_dyn(self) -> Vmo<Rights> {
let rights = self.rights();
Vmo(self.0, rights)
}
}

View File

@ -0,0 +1,15 @@
use crate::{Cons, Nil};
/// This trait will extend a set with another item.
/// If the set already contains the item, it will return the original set.
/// Otherwise, it will return the new set with the new item.
/// The implementation should care about the item orders when extending set.
pub trait SetExtend<T> {
type Output;
}
pub type SetExtendOp<Set, T> = <Set as SetExtend<T>>::Output;
impl<T> SetExtend<T> for Nil {
type Output = Cons<T, Nil>;
}

View File

@ -5,11 +5,13 @@
#![no_std]
pub mod assert;
pub mod bool;
pub mod extend;
pub mod if_;
pub mod same;
pub mod set;
pub use crate::bool::{And, AndOp, False, IsFalse, IsTrue, Not, NotOp, Or, OrOp, True};
pub use crate::extend::{SetExtend, SetExtendOp};
pub use crate::same::{SameAs, SameAsOp};
pub use crate::set::{Cons, Nil, Set, SetContain, SetContainOp, SetInclude, SetIncludeOp};
pub use assert::AssertTypeSame;

View File

@ -13,6 +13,7 @@ use core::ops::BitOr as Or;
pub trait Set {}
/// An non-empty type-level set.
#[derive(Debug, Clone, Copy)]
pub struct Cons<T, S: Set>(PhantomData<(T, S)>);
impl<T, S: Set> Cons<T, S> {
@ -22,6 +23,7 @@ impl<T, S: Set> Cons<T, S> {
}
/// An empty type-level set.
#[derive(Debug, Clone, Copy)]
pub struct Nil;
impl<T, S: Set> Set for Cons<T, S> {}

View File

@ -11,6 +11,7 @@ const SET_NAME: &'static str = "::typeflags_util::Cons";
/// A flagSet represent the combination of differnt flag item.
/// e.g. [Read, Write], [Read], [] are all flag sets.
/// The order of flagItem does not matters. So flag sets with same sets of items should be viewed as the same set.
#[derive(Debug)]
pub struct FlagSet {
items: Vec<FlagItem>,
}
@ -107,6 +108,23 @@ impl FlagSet {
}
}
pub fn contains_type(&self, type_ident: &Ident) -> bool {
let type_name = type_ident.to_string();
self.items
.iter()
.position(|item| item.ident.to_string() == type_name)
.is_some()
}
pub fn contains_set(&self, other_set: &FlagSet) -> bool {
for item in &other_set.items {
if !self.contains_type(&item.ident) {
return false;
}
}
return true;
}
/// The token stream inside macro definition. We will generate a token stream for each permutation of items
/// since the user may use arbitrary order of items in macro.
pub fn macro_item_tokens(&self) -> Vec<TokenStream> {
@ -135,6 +153,14 @@ pub struct FlagItem {
val: Expr,
}
impl core::fmt::Debug for FlagItem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FlagItem")
.field("ident", &self.ident.to_string())
.finish()
}
}
/// generate all possible flag sets
pub fn generate_flag_sets(type_flag_def: &TypeFlagDef) -> Vec<FlagSet> {
let flag_items = type_flag_def

View File

@ -43,6 +43,10 @@
//! assert_type_same!(SetIncludeOp<R, W>, False);
//! assert_type_same!(SetIncludeOp<W, O>, True);
//! assert_type_same!(SetIncludeOp<O, R>, False);
//! assert_type_same!(SetExtendOp<O, Read>, R);
//! assert_type_same!(SetExtendOp<R, Write>, RW);
//! assert_type_same!(SetExtendOp<R, Read>, R);
//! assert_type_same!(SetExtendOp<W, Read>, RW);
//! ```
#![feature(proc_macro_diagnostic)]

View File

@ -77,7 +77,7 @@ impl TypeFlagDef {
let type_ = self.type_.clone();
quote!(
#(#attributes)*
#vis trait #ident : Sync + Send{
#vis trait #ident : Sync + Send + Copy + Clone{
const BITS: #type_;
fn new() -> Self;
@ -128,6 +128,7 @@ impl TypeFlagItem {
let ident = self.ident.clone();
quote!(
#(#attributes)*
#[derive(Copy, Clone, Debug)]
#vis struct #ident {}
)
}

View File

@ -1,4 +1,4 @@
use proc_macro2::TokenStream;
use proc_macro2::{Ident, TokenStream};
use quote::{quote, TokenStreamExt};
use crate::{
@ -23,6 +23,9 @@ pub fn expand_type_flag(type_flags_def: &TypeFlagDef) -> TokenStream {
all_tokens.append_all(impl_main_trait_tokens);
});
let impl_set_entend_tokens = impl_set_extend(type_flags_def, &flag_sets);
all_tokens.append_all(impl_set_entend_tokens);
let export_declarive_macro_tokens = export_declarive_macro(type_flags_def, &flag_sets);
all_tokens.append_all(export_declarive_macro_tokens);
@ -73,6 +76,56 @@ pub fn impl_same_as(type_flags_def: &TypeFlagDef) -> TokenStream {
all_tokens
}
pub fn impl_set_extend(type_flags_def: &TypeFlagDef, flag_sets: &[FlagSet]) -> TokenStream {
let mut all_tokens = TokenStream::new();
let type_idents: Vec<_> = type_flags_def
.items_iter()
.map(|type_flag_item| type_flag_item.ident())
.collect();
for flag_set in flag_sets {
// We don't need to impl set extend trait for Nil
if flag_set.len() == 0 {
continue;
}
for type_ident in &type_idents {
let type_ident = type_ident.clone();
let flag_set_tokens = flag_set.type_name_tokens();
if flag_set.contains_type(&type_ident) {
// the flagset contains the type
let impl_extend_tokens = quote!(
impl ::typeflags_util::SetExtend<#type_ident> for #flag_set_tokens {
type Output = #flag_set_tokens;
}
);
all_tokens.append_all(impl_extend_tokens)
} else {
// the flagset does not contains the type
let output_set = extent_one_type(&type_ident, flag_set, flag_sets).unwrap();
let output_set_tokens = output_set.type_name_tokens();
let impl_extend_tokens = quote!(
impl ::typeflags_util::SetExtend<#type_ident> for #flag_set_tokens {
type Output = #output_set_tokens;
}
);
all_tokens.append_all(impl_extend_tokens);
}
}
}
all_tokens
}
fn extent_one_type<'a>(
type_ident: &Ident,
flag_set: &'a FlagSet,
sets: &'a [FlagSet],
) -> Option<&'a FlagSet> {
sets.iter().find(|bigger_set| {
bigger_set.contains_type(type_ident) && bigger_set.contains_set(flag_set)
})
}
/// export the declarive macro
pub fn export_declarive_macro(type_flags_def: &TypeFlagDef, flag_sets: &[FlagSet]) -> TokenStream {
let macro_ident = type_flags_def.trait_ident();