add fork support

This commit is contained in:
jiangjianfeng 2022-09-08 15:37:34 +08:00 committed by Jianfeng Jiang
parent 893613146f
commit 87cdf6acd6
17 changed files with 433 additions and 58 deletions

View File

@ -73,7 +73,7 @@ impl From<SyscallFrame> for CpuContext {
rax: syscall.caller.rax as u64, rax: syscall.caller.rax as u64,
rcx: syscall.caller.rcx as u64, rcx: syscall.caller.rcx as u64,
rsp: syscall.callee.rsp as u64, rsp: syscall.callee.rsp as u64,
rip: 0, rip: syscall.caller.rcx as u64,
rflag: 0, rflag: 0,
}, },
fs_base: 0, fs_base: 0,

View File

@ -83,6 +83,7 @@ pub fn init(boot_info: &'static mut BootInfo) {
} }
fn general_handler(trap_frame: TrapFrame) { fn general_handler(trap_frame: TrapFrame) {
println!("{:?}", trap_frame); println!("{:?}", trap_frame);
println!("rip = 0x{:x}", trap_frame.rip);
panic!("couldn't handler trap right now"); panic!("couldn't handler trap right now");
} }

View File

@ -113,7 +113,7 @@ impl MapArea {
let offset = current_start_address - va.0; let offset = current_start_address - va.0;
let copy_len = (va.0 + PAGE_SIZE - current_start_address).min(remain); let copy_len = (va.0 + PAGE_SIZE - current_start_address).min(remain);
let src = &data[processed..processed + copy_len]; let src = &data[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[offset..copy_len]; let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[offset..(offset + copy_len)];
dst.copy_from_slice(src); dst.copy_from_slice(src);
processed += copy_len; processed += copy_len;
remain -= copy_len; remain -= copy_len;
@ -134,7 +134,7 @@ impl MapArea {
let offset = start - va.0; let offset = start - va.0;
let copy_len = (va.0 + PAGE_SIZE - start).min(remain); let copy_len = (va.0 + PAGE_SIZE - start).min(remain);
let src = &mut data[processed..processed + copy_len]; let src = &mut data[processed..processed + copy_len];
let dst = &pa.start_pa().kvaddr().get_bytes_array()[offset..copy_len]; let dst = &pa.start_pa().kvaddr().get_bytes_array()[offset..(offset + copy_len)];
src.copy_from_slice(dst); src.copy_from_slice(dst);
processed += copy_len; processed += copy_len;
remain -= copy_len; remain -= copy_len;

View File

@ -12,7 +12,6 @@ use crate::vm::{VmAllocOptions, VmFrameVec};
use crate::{prelude::*, UPSafeCell}; use crate::{prelude::*, UPSafeCell};
use super::processor::{current_task, schedule}; use super::processor::{current_task, schedule};
use super::scheduler::add_task;
core::arch::global_asm!(include_str!("switch.S")); core::arch::global_asm!(include_str!("switch.S"));
#[derive(Debug, Default, Clone, Copy)] #[derive(Debug, Default, Clone, Copy)]
@ -182,6 +181,55 @@ impl Task {
Ok(arc_self) Ok(arc_self)
} }
/// create a new task data structure without schedule it
pub fn new<F, T>(
task_fn: F,
task_data: T,
user_space: Option<Arc<UserSpace>>,
) -> Result<Arc<Self>>
where
F: Fn() + Send + Sync + 'static,
T: Any + Send + Sync,
{
/// all task will entering this function
/// this function is mean to executing the task_fn in Task
fn kernel_task_entry() {
let current_task = current_task()
.expect("no current task, it should have current task in kernel task entry");
current_task.func.call(());
current_task.exit();
}
let result = Self {
func: Box::new(task_fn),
data: Box::new(task_data),
user_space,
task_inner: unsafe {
UPSafeCell::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap: false,
})
},
exit_code: 0,
kstack: KernelStack::new(),
};
result.task_inner.exclusive_access().task_status = TaskStatus::Runnable;
result.task_inner.exclusive_access().ctx.rip = kernel_task_entry as usize;
result.task_inner.exclusive_access().ctx.regs.rsp =
result.kstack.frame.end_pa().unwrap().kvaddr().0 as usize
- size_of::<usize>()
- size_of::<SyscallFrame>();
Ok(Arc::new(result))
}
/// send the task to schedule
pub fn send_to_scheduler(self: &Arc<Self>) {
switch_to_task(self.clone());
}
pub(crate) fn syscall_frame(&self) -> &mut SyscallFrame { pub(crate) fn syscall_frame(&self) -> &mut SyscallFrame {
unsafe { unsafe {
&mut *(self &mut *(self
@ -214,7 +262,7 @@ impl Task {
} }
/// Returns the task data. /// Returns the task data.
pub fn data(&self) -> &dyn Any { pub fn data(&self) -> &Box<dyn Any + Send + Sync> {
&self.data &self.data
} }

View File

@ -116,6 +116,7 @@ impl<'a> UserMode<'a> {
if !self.executed { if !self.executed {
self.current.syscall_frame().caller.rcx = self.user_space.cpu_ctx.gp_regs.rip as usize; self.current.syscall_frame().caller.rcx = self.user_space.cpu_ctx.gp_regs.rip as usize;
self.current.syscall_frame().callee.rsp = self.user_space.cpu_ctx.gp_regs.rsp as usize; self.current.syscall_frame().callee.rsp = self.user_space.cpu_ctx.gp_regs.rsp as usize;
self.current.syscall_frame().caller.rax = self.user_space.cpu_ctx.gp_regs.rax as usize;
self.executed = true; self.executed = true;
} else { } else {
if self.current.inner_exclusive_access().is_from_trap { if self.current.inner_exclusive_access().is_from_trap {
@ -141,8 +142,8 @@ impl<'a> UserMode<'a> {
UserEvent::Exception UserEvent::Exception
} else { } else {
self.context = CpuContext::from(*self.current.syscall_frame()); self.context = CpuContext::from(*self.current.syscall_frame());
println!("[kernel] syscall id:{}", self.context.gp_regs.rax); debug!("[kernel] syscall id:{}", self.context.gp_regs.rax);
println!("[kernel] rsp: 0x{:x}", self.context.gp_regs.rsp); debug!("[kernel] rsp: 0x{:x}", self.context.gp_regs.rsp);
UserEvent::Syscall UserEvent::Syscall
} }
} }

View File

@ -98,6 +98,15 @@ impl Default for VmSpace {
} }
} }
impl Clone for VmSpace {
fn clone(&self) -> Self {
let memory_set = self.memory_set.exclusive_access().clone();
VmSpace {
memory_set: unsafe { UPSafeCell::new(memory_set) },
}
}
}
impl VmIo for VmSpace { impl VmIo for VmSpace {
fn read_bytes(&self, vaddr: usize, buf: &mut [u8]) -> Result<()> { fn read_bytes(&self, vaddr: usize, buf: &mut [u8]) -> Result<()> {
self.memory_set.exclusive_access().read_bytes(vaddr, buf) self.memory_set.exclusive_access().read_bytes(vaddr, buf)

View File

@ -6,9 +6,11 @@
#![feature(const_btree_new)] #![feature(const_btree_new)]
#![feature(cstr_from_bytes_until_nul)] #![feature(cstr_from_bytes_until_nul)]
use kxos_frame::{info, println, task::Task}; use kxos_frame::{info, println};
use process::Process; use process::Process;
use crate::process::current_pid;
extern crate alloc; extern crate alloc;
mod memory; mod memory;
@ -20,28 +22,44 @@ pub fn init() {
process::fifo_scheduler::init(); process::fifo_scheduler::init();
} }
pub fn init_task() { pub fn init_process() {
println!("[kernel] Hello world from init task!"); println!("[kernel] Spawn init process!");
let process = Process::spawn_kernel_task(|| { let process = Process::spawn_kernel_process(|| {
println!("[kernel] Hello world from kernel!"); println!("[kernel] Hello world from kernel!");
let pid = current_pid();
info!("current pid = {}", pid);
}); });
info!("spawn kernel process, pid = {}", process.pid()); info!(
"[kxos-std/lib.rs] spawn kernel process, pid = {}",
process.pid()
);
let elf_file_content = read_elf_content(); let hello_world_content = read_hello_world_content();
let process = Process::spawn_from_elf(elf_file_content); let process = Process::spawn_user_process(hello_world_content);
info!("spwan user process, pid = {}", process.pid()); info!(
"[kxos-std/lib.rs] spwan hello world process, pid = {}",
process.pid()
);
let fork_content = read_fork_content();
let process = Process::spawn_user_process(fork_content);
info!("spawn fork process, pid = {}", process.pid());
loop {} loop {}
} }
/// first process never return /// first process never return
pub fn run_first_process() -> ! { pub fn run_first_process() -> ! {
let elf_file_content = read_elf_content(); let elf_file_content = read_hello_world_content();
Task::spawn(init_task, None::<u8>, None).expect("Spawn first task failed"); Process::spawn_kernel_process(init_process);
unreachable!() unreachable!()
} }
fn read_elf_content() -> &'static [u8] { pub fn read_hello_world_content() -> &'static [u8] {
include_bytes!("../../kxos-user/hello_world/hello_world") include_bytes!("../../kxos-user/hello_world/hello_world")
} }
fn read_fork_content() -> &'static [u8] {
include_bytes!("../../kxos-user/fork/fork")
}

View File

@ -4,6 +4,7 @@ use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use kxos_frame::{ use kxos_frame::{
config::PAGE_SIZE, config::PAGE_SIZE,
debug,
vm::{Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace}, vm::{Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace},
Error, Error,
}; };
@ -161,6 +162,10 @@ impl<'a> ElfLoadInfo<'a> {
pub fn copy_and_map(&self, vm_space: &VmSpace) -> Result<(), ElfError> { pub fn copy_and_map(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
for segment in self.segments.iter() { for segment in self.segments.iter() {
debug!(
"map segment: 0x{:x}-0x{:x}",
segment.range.start, segment.range.end
);
segment.copy_and_map(vm_space)?; segment.copy_and_map(vm_space)?;
} }
Ok(()) Ok(())

View File

@ -1,59 +1,264 @@
use core::sync::atomic::{AtomicUsize, Ordering}; use core::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use alloc::sync::Arc; use alloc::vec;
use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use kxos_frame::cpu::CpuContext;
// use kxos_frame::{sync::SpinLock, task::Task, user::UserSpace}; // use kxos_frame::{sync::SpinLock, task::Task, user::UserSpace};
use kxos_frame::task::Task; use kxos_frame::{
debug,
task::Task,
user::UserSpace,
vm::{VmIo, VmSpace},
};
use spin::Mutex;
use self::task::spawn_user_task_from_elf; use crate::process::task::create_forked_task;
use self::status::ProcessStatus;
use self::task::create_user_task_from_elf;
pub mod fifo_scheduler; pub mod fifo_scheduler;
pub mod status;
pub mod task; pub mod task;
// static PROCESSES: SpinLock<BTreeMap<usize, Arc<Process>>> = SpinLock::new(BTreeMap::new());
static PID_ALLOCATOR: AtomicUsize = AtomicUsize::new(0); static PID_ALLOCATOR: AtomicUsize = AtomicUsize::new(0);
const CHILDREN_CAPACITY: usize = 16;
/// Process stands for a set of tasks that shares the same userspace. /// Process stands for a set of tasks that shares the same userspace.
/// Currently, we only support one task inside a process. /// Currently, we only support one task inside a process.
pub struct Process { pub struct Process {
// Immutable Part
pid: usize, pid: usize,
task: Arc<Task>, task: Arc<Task>,
exit_code: i32, user_space: Option<Arc<UserSpace>>,
// user_space: Option<Arc<UserSpace>>,
// TODO: childs, parent, files, // Mutable Part
/// The exit code
exit_code: AtomicI32,
/// Process status
status: Mutex<ProcessStatus>,
/// Parent process
parent: Mutex<Option<Weak<Process>>>,
/// Children processes
children: Mutex<Vec<Arc<Process>>>,
} }
impl Process { impl Process {
pub fn spawn_from_elf(elf_file_content: &[u8]) -> Self { fn new(pid: usize, task: Arc<Task>, user_space: Option<Arc<UserSpace>>) -> Self {
let pid = new_pid(); let parent = if pid == 0 {
let task = spawn_user_task_from_elf(elf_file_content); debug!("Init process does not has parent");
let exit_code = 0; None
} else {
debug!("All process except init should have parent");
let current_process = current_process();
Some(Arc::downgrade(&current_process))
};
let children = Vec::with_capacity(CHILDREN_CAPACITY);
Self { Self {
pid, pid,
task, task,
exit_code, user_space,
exit_code: AtomicI32::new(0),
status: Mutex::new(ProcessStatus::Runnable),
parent: Mutex::new(parent),
children: Mutex::new(children),
} }
} }
pub fn spawn_kernel_task<F>(task_fn: F) -> Self /// init a user process and send the process to scheduler
pub fn spawn_user_process(elf_file_content: &'static [u8]) -> Arc<Self> {
let process = Process::create_user_process(elf_file_content);
process.send_to_scheduler();
process
}
/// init a kernel process and send the process to scheduler
pub fn spawn_kernel_process<F>(task_fn: F) -> Arc<Self>
where
F: Fn() + Send + Sync + 'static,
{
let process = Process::create_kernel_process(task_fn);
process.send_to_scheduler();
process
}
fn create_user_process(elf_file_content: &'static [u8]) -> Arc<Self> {
let pid = new_pid();
Arc::new_cyclic(|weak_process_ref| {
let weak_process = weak_process_ref.clone();
let task = create_user_task_from_elf(elf_file_content, weak_process);
let user_space = task.user_space().map(|user_space| user_space.clone());
Process::new(pid, task, user_space)
})
}
fn create_kernel_process<F>(task_fn: F) -> Arc<Self>
where where
F: Fn() + Send + Sync + 'static, F: Fn() + Send + Sync + 'static,
{ {
let pid = new_pid(); let pid = new_pid();
let task = Task::spawn(task_fn, pid, None).expect("spawn kernel task failed"); Arc::new_cyclic(|weak_process_ref| {
let exit_code = 0; let weak_process = weak_process_ref.clone();
Self { let task = Task::new(task_fn, weak_process, None).expect("spawn kernel task failed");
pid, Process::new(pid, task, None)
task, })
exit_code,
}
} }
pub fn pid(&self) -> usize { pub fn pid(&self) -> usize {
self.pid self.pid
} }
fn add_child(&self, child: Arc<Process>) {
// debug!("==============Add child: {}", child.pid());
self.children.lock().push(child);
}
fn set_parent(&self, parent: Weak<Process>) {
let _ = self.parent.lock().insert(parent);
}
pub fn set_exit_code(&self, exit_code: i32) {
self.exit_code.store(exit_code, Ordering::Relaxed);
}
/// Exit current process
/// Set the status of current process as Zombie
/// Move all children to init process
pub fn exit(&self) {
self.status.lock().set_zombie();
// move children to the init process
let current_process = current_process();
if !current_process.is_init_process() {
let init_process = get_init_process();
for child in self.children.lock().drain(..) {
child.set_parent(Arc::downgrade(&init_process));
init_process.add_child(child);
}
}
}
fn is_init_process(&self) -> bool {
self.pid == 0
}
/// start to run current process
fn send_to_scheduler(self: &Arc<Self>) {
self.task.send_to_scheduler();
}
fn user_space(&self) -> Option<&Arc<UserSpace>> {
self.user_space.as_ref()
}
pub fn has_child(&self) -> bool {
self.children.lock().len() != 0
}
pub fn get_child_process(&self) -> Arc<Process> {
let children_lock = self.children.lock();
let child_len = children_lock.len();
assert_eq!(1, child_len, "Process can only have one child now");
children_lock
.iter()
.nth(0)
.expect("[Internal Error]")
.clone()
}
/// WorkAround: This function only create a new process, but did not schedule the process to run
pub fn fork(parent_context: CpuContext) -> Arc<Process> {
let child_pid = new_pid();
let current = current_process();
let parent_user_space = match current.user_space() {
None => None,
Some(user_space) => Some(user_space.clone()),
}
.expect("User task should always have user space");
// child process vm space
// FIXME: COPY ON WRITE can be used here
let parent_vm_space = parent_user_space.vm_space();
let child_vm_space = parent_user_space.vm_space().clone();
check_fork_vm_space(parent_vm_space, &child_vm_space);
// child process cpu context
let mut child_cpu_context = parent_context.clone();
debug!("parent cpu context: {:?}", child_cpu_context.gp_regs);
child_cpu_context.gp_regs.rax = 0; // Set return value of child process
let child_user_space = Arc::new(UserSpace::new(child_vm_space, child_cpu_context));
debug!("before spawn child task");
debug!("current pid: {}", current_pid());
debug!("child process pid: {}", child_pid);
debug!("rip = 0x{:x}", child_cpu_context.gp_regs.rip);
let child = Arc::new_cyclic(|child_process_ref| {
let weak_child_process = child_process_ref.clone();
let child_task = create_forked_task(child_user_space.clone(), weak_child_process);
Process::new(child_pid, child_task, Some(child_user_space))
});
current_process().add_child(child.clone());
// child.send_to_scheduler();
child
}
}
pub fn current_process() -> Arc<Process> {
let task = Task::current();
let process = task
.data()
.downcast_ref::<Weak<Process>>()
.expect("[Internal Error] Task data should points to weak<process>");
process
.upgrade()
.expect("[Internal Error] current process cannot be None")
}
pub fn current_pid() -> usize {
let process = current_process();
let pid = process.pid();
pid
}
/// Get the init process
pub fn get_init_process() -> Arc<Process> {
let mut current_process = current_process();
while current_process.pid() != 0 {
let process = current_process
.parent
.lock()
.as_ref()
.map(|current| current.upgrade())
.flatten()
.expect("[Internal Error] init process cannot be None");
current_process = process;
}
current_process
} }
/// create a new pid for new process /// create a new pid for new process
fn new_pid() -> usize { fn new_pid() -> usize {
PID_ALLOCATOR.fetch_add(1, Ordering::Release) PID_ALLOCATOR.fetch_add(1, Ordering::Release)
} }
/// debug use
fn check_fork_vm_space(parent_vm_space: &VmSpace, child_vm_space: &VmSpace) {
let mut buffer1 = vec![0u8; 0x78];
let mut buffer2 = vec![0u8; 0x78];
parent_vm_space
.read_bytes(0x401000, &mut buffer1)
.expect("read buffer1 failed");
child_vm_space
.read_bytes(0x401000, &mut buffer2)
.expect("read buffer1 failed");
for len in 0..buffer1.len() {
assert_eq!(buffer1[len], buffer2[len]);
}
debug!("check fork vm space succeed.");
}

View File

@ -0,0 +1,11 @@
#[derive(Debug, Clone, Copy)]
pub enum ProcessStatus {
Runnable,
Zombie,
}
impl ProcessStatus {
pub fn set_zombie(&mut self) {
*self = ProcessStatus::Zombie;
}
}

View File

@ -1,4 +1,6 @@
use alloc::sync::Arc; use core::sync::atomic::AtomicUsize;
use alloc::sync::{Arc, Weak};
use kxos_frame::{ use kxos_frame::{
cpu::CpuContext, cpu::CpuContext,
debug, debug,
@ -7,9 +9,17 @@ use kxos_frame::{
vm::VmSpace, vm::VmSpace,
}; };
use crate::{memory::load_elf_to_vm_space, syscall::syscall_handler}; use crate::{
memory::load_elf_to_vm_space,
process::{current_pid, current_process},
syscall::syscall_handler,
};
pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> { use super::Process;
static COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn create_user_task_from_elf(elf_file_content: &[u8], process: Weak<Process>) -> Arc<Task> {
let vm_space = VmSpace::new(); let vm_space = VmSpace::new();
let elf_load_info = load_elf_to_vm_space(elf_file_content, &vm_space).expect("Load Elf failed"); let elf_load_info = load_elf_to_vm_space(elf_file_content, &vm_space).expect("Load Elf failed");
let mut cpu_ctx = CpuContext::default(); let mut cpu_ctx = CpuContext::default();
@ -20,12 +30,46 @@ pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> {
cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_top(); cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_top();
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx)); let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
fn user_task_entry() { fn user_task_entry() {
let cur = Task::current(); let cur = Task::current();
let user_space = cur.user_space().expect("user task should have user space"); let user_space = cur.user_space().expect("user task should have user space");
let mut user_mode = UserMode::new(user_space); let mut user_mode = UserMode::new(user_space);
debug!("in user task");
loop {
let user_event = user_mode.execute();
debug!("return from user mode");
debug!("current pid = {}", current_pid());
let context = user_mode.context_mut();
if let HandlerResult::Exit = handle_user_event(user_event, context) {
// FIXME: How to set task status? How to set exit code of process?
break;
}
}
let current_process = current_process();
// Work Around: We schedule all child tasks to run when current process exit.
if current_process.has_child() {
debug!("*********schedule child process**********");
let child_process = current_process.get_child_process();
child_process.send_to_scheduler();
debug!("*********return to parent process*********");
}
// exit current process
current_process.exit();
}
Task::new(user_task_entry, process, Some(user_space)).expect("spawn user task failed.")
}
pub fn create_forked_task(userspace: Arc<UserSpace>, process: Weak<Process>) -> Arc<Task> {
fn user_task_entry() {
let cur = Task::current();
let user_space = cur.user_space().expect("user task should have user space");
let mut user_mode = UserMode::new(user_space);
debug!("In forked task");
debug!("[forked task] pid = {}", current_pid());
debug!("[forked task] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip);
debug!("[forked task] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp);
debug!("[forked task] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax);
loop { loop {
let user_event = user_mode.execute(); let user_event = user_mode.execute();
debug!("return from user mode"); debug!("return from user mode");
@ -37,8 +81,7 @@ pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> {
} }
} }
// FIXME: set the correct type when task has no data Task::new(user_task_entry, process, Some(userspace)).expect("spawn task failed")
Task::spawn(user_task_entry, None::<u8>, Some(user_space)).expect("spawn user task failed.")
} }
fn handle_user_event(user_event: UserEvent, context: &mut CpuContext) -> HandlerResult { fn handle_user_event(user_event: UserEvent, context: &mut CpuContext) -> HandlerResult {

View File

@ -1,14 +1,18 @@
use alloc::borrow::ToOwned;
use alloc::vec; use alloc::vec;
use alloc::{sync::Arc, vec::Vec}; use alloc::{sync::Arc, vec::Vec};
use kxos_frame::cpu::CpuContext; use kxos_frame::cpu::CpuContext;
use kxos_frame::Error; use kxos_frame::{debug, Error};
use kxos_frame::{task::Task, user::UserSpace, vm::VmIo}; use kxos_frame::{task::Task, user::UserSpace, vm::VmIo};
use kxos_frame::info; use kxos_frame::info;
use crate::process::task::HandlerResult; use crate::process::task::HandlerResult;
use crate::process::{current_pid, current_process, Process};
const SYS_WRITE: u64 = 1; const SYS_WRITE: u64 = 1;
const SYS_GETPID: u64 = 39;
const SYS_FORK: u64 = 57;
const SYS_EXIT: u64 = 60; const SYS_EXIT: u64 = 60;
pub struct SyscallArgument { pub struct SyscallArgument {
@ -40,7 +44,7 @@ impl SyscallArgument {
pub fn syscall_handler(context: &mut CpuContext) -> HandlerResult { pub fn syscall_handler(context: &mut CpuContext) -> HandlerResult {
let syscall_frame = SyscallArgument::new_from_context(context); let syscall_frame = SyscallArgument::new_from_context(context);
let syscall_return = syscall_dispatch(syscall_frame.syscall_number, syscall_frame.args); let syscall_return = syscall_dispatch(syscall_frame.syscall_number, syscall_frame.args, context.to_owned());
match syscall_return { match syscall_return {
SyscallResult::Return(return_value) => { SyscallResult::Return(return_value) => {
@ -52,9 +56,11 @@ pub fn syscall_handler(context: &mut CpuContext) -> HandlerResult {
} }
} }
pub fn syscall_dispatch(syscall_number: u64, args: [u64; 6]) -> SyscallResult { pub fn syscall_dispatch(syscall_number: u64, args: [u64; 6], context: CpuContext) -> SyscallResult {
match syscall_number { match syscall_number {
SYS_WRITE => sys_write(args[0], args[1], args[2]), SYS_WRITE => sys_write(args[0], args[1], args[2]),
SYS_GETPID => sys_getpid(),
SYS_FORK => sys_fork(context),
SYS_EXIT => sys_exit(args[0] as _), SYS_EXIT => sys_exit(args[0] as _),
_ => panic!("Unsupported syscall number: {}", syscall_number), _ => panic!("Unsupported syscall number: {}", syscall_number),
} }
@ -62,6 +68,7 @@ pub fn syscall_dispatch(syscall_number: u64, args: [u64; 6]) -> SyscallResult {
pub fn sys_write(fd: u64, user_buf_ptr: u64, user_buf_len: u64) -> SyscallResult { pub fn sys_write(fd: u64, user_buf_ptr: u64, user_buf_len: u64) -> SyscallResult {
// only suppprt STDOUT now. // only suppprt STDOUT now.
debug!("[syscall][id={}][SYS_WRITE]", SYS_WRITE);
const STDOUT: u64 = 1; const STDOUT: u64 = 1;
if fd == STDOUT { if fd == STDOUT {
let task = Task::current(); let task = Task::current();
@ -77,7 +84,22 @@ pub fn sys_write(fd: u64, user_buf_ptr: u64, user_buf_len: u64) -> SyscallResult
} }
} }
pub fn sys_getpid() -> SyscallResult {
debug!("[syscall][id={}][SYS_GETPID]", SYS_GETPID);
let pid = current_pid();
info!("[sys_getpid]: pid = {}", pid);
SyscallResult::Return(pid as i32)
}
pub fn sys_fork(parent_context: CpuContext) -> SyscallResult {
debug!("[syscall][id={}][SYS_FORK]", SYS_FORK);
let child_process = Process::fork(parent_context);
SyscallResult::Return(child_process.pid() as i32)
}
pub fn sys_exit(exit_code: i32) -> SyscallResult { pub fn sys_exit(exit_code: i32) -> SyscallResult {
debug!("[syscall][id={}][SYS_EXIT]", SYS_EXIT);
current_process().set_exit_code(exit_code);
SyscallResult::Exit(exit_code) SyscallResult::Exit(exit_code)
} }
@ -88,6 +110,8 @@ fn copy_bytes_from_user(
) -> Result<Vec<u8>, Error> { ) -> Result<Vec<u8>, Error> {
let vm_space = user_space.vm_space(); let vm_space = user_space.vm_space();
let mut buffer = vec![0u8; user_buf_len]; let mut buffer = vec![0u8; user_buf_len];
debug!("user_buf_ptr: 0x{:x}", user_buf_ptr);
debug!("user_buf_len: {}", user_buf_len);
vm_space.read_bytes(user_buf_ptr, &mut buffer)?; vm_space.read_bytes(user_buf_ptr, &mut buffer)?;
Ok(buffer) Ok(buffer)
} }

View File

@ -1,5 +1,5 @@
.PHONY: build clean run .PHONY: build clean run
build: hello_world.s build: fork.s
@gcc -static -nostdlib fork.s -o fork @gcc -static -nostdlib fork.s -o fork
clean: clean:
@rm fork @rm fork

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:1ad189cc084511d777ec2bcb211ca7cdbfb55f00320953da71a6a37f46404dd6 oid sha256:6d1d5aa2ef1105eb716219002763836a47e95cef22997064f6f60bfd4c7a43de
size 9488 size 9528

View File

@ -9,16 +9,22 @@ _start:
cmp $0, %rax cmp $0, %rax
je _child # child process je _child # child process
jmp _parent # parent process jmp _parent # parent process
_parent: _parent:
call get_pid
call print_parent_message call print_parent_message
call exit call exit
_child: _child:
call get_pid
call print_child_message call print_child_message
call exit call exit
exit: exit:
mov $60, %rax # syscall number of exit mov $60, %rax # syscall number of exit
mov $0, %rdi # exit code mov $0, %rdi # exit code
syscall syscall
get_pid:
mov $39, %rax
syscall
ret
print_hello_world: print_hello_world:
mov $message, %rsi # address of message mov $message, %rsi # address of message
mov $message_end, %rdx mov $message_end, %rdx
@ -40,9 +46,9 @@ _print_message:
mov $1, %rdi # stdout mov $1, %rdi # stdout
syscall syscall
ret ret
.section .rodata .section .rodata
message: message:
.ascii "Hello, world\n" .ascii "Hello, world in fork\n"
message_end: message_end:
message_parent: message_parent:
.ascii "Hello world from parent\n" .ascii "Hello world from parent\n"

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:0ac54a2485e87769b102225e0a2d7c25e449a01bd0b2504a164fe1ce205acb13 oid sha256:f56fb5cf05f234578b13c8d73f2d29568f6f513602f1ea0b71a0dbf0cf8f60f8
size 9072 size 9104

View File

@ -8,6 +8,10 @@ _start:
mov $60, %rax # syscall number of exit mov $60, %rax # syscall number of exit
mov $0, %rdi # exit code mov $0, %rdi # exit code
syscall syscall
get_pid:
mov $39, %rax
syscall
ret
print_message: print_message:
mov $1, %rax # syscall number of write mov $1, %rax # syscall number of write
mov $1, %rdi # stdout mov $1, %rdi # stdout