refactor process implementation

This commit is contained in:
Jianfeng Jiang
2022-12-15 14:53:43 +08:00
parent 6651a642d3
commit 7e6591aab2
14 changed files with 182 additions and 128 deletions

View File

@ -38,6 +38,21 @@ pub struct CpuContext {
/// trap information, this field is all zero when it is syscall
pub trap_information: TrapInformation,
}
impl CpuContext {
pub fn set_rax(&mut self, rax: u64) {
self.gp_regs.rax = rax;
}
pub fn set_rsp(&mut self, rsp: u64) {
self.gp_regs.rsp = rsp;
}
pub fn set_rip(&mut self, rip: u64) {
self.gp_regs.rip = rip;
}
}
#[derive(Clone, Default, Copy, Debug)]
#[repr(C)]
pub struct TrapInformation {

View File

@ -1,6 +1,5 @@
use super::{page_table::PageTable, *};
use crate::prelude::*;
use crate::vm::VmIo;
use crate::{
config::PAGE_SIZE,
mm::address::is_aligned,

View File

@ -5,7 +5,8 @@ mod scheduler;
#[allow(clippy::module_inception)]
mod task;
pub(crate) use self::processor::{get_idle_task_cx_ptr, schedule};
pub(crate) use self::processor::get_idle_task_cx_ptr;
pub use self::processor::schedule;
pub use self::scheduler::{set_scheduler, Scheduler};
pub(crate) use self::task::context_switch;
pub(crate) use self::task::TaskContext;

View File

@ -222,7 +222,7 @@ impl Task {
Ok(Arc::new(result))
}
pub fn send_to_scheduler(self: &Arc<Self>) {
pub fn run(self: &Arc<Self>) {
switch_to_task(self.clone());
}

View File

@ -21,7 +21,6 @@ use crate::{
prelude::*,
user_apps::{get_busybox_app, UserApp},
};
use jinux_frame::{info, println};
use process::Process;
use crate::{
@ -42,6 +41,7 @@ pub mod prelude;
mod process;
pub mod rights;
pub mod syscall;
pub mod thread;
pub mod tty;
mod user_apps;
mod util;
@ -63,9 +63,9 @@ pub fn init_process() {
println!("[kernel] Hello world from kernel!");
let current = current!();
let pid = current.pid();
info!("current pid = {}", pid);
debug!("current pid = {}", pid);
let ppid = current.parent().unwrap().pid();
info!("current ppid = {}", ppid);
debug!("current ppid = {}", ppid);
});
info!(
"[jinux-std/lib.rs] spawn kernel process, pid = {}",

View File

@ -6,7 +6,8 @@ use jinux_frame::{
use crate::{
prelude::*,
process::{new_pid, signal::sig_queues::SigQueues, table, task::create_new_task},
process::{new_pid, signal::sig_queues::SigQueues, table},
thread::Thread,
};
use super::Process;
@ -127,15 +128,17 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
let child = Arc::new_cyclic(|child_process_ref| {
let weak_child_process = child_process_ref.clone();
let child_task = create_new_task(child_user_space.clone(), weak_child_process);
let tid = child_pid;
let child_thread =
Thread::new_user_thread(tid, child_user_space.clone(), weak_child_process);
Process::new(
child_pid,
child_task,
vec![child_thread],
child_file_name,
child_user_vm,
Some(child_user_space),
// Some(child_user_space),
Some(child_root_vmar),
None,
Weak::new(),
child_file_table,
child_sig_dispositions,
child_sig_queues,
@ -143,13 +146,7 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<
)
});
// Inherit parent's process group
let parent_process_group = current
.process_group()
.lock()
.as_ref()
.map(|ppgrp| ppgrp.upgrade())
.flatten()
.unwrap();
let parent_process_group = current.process_group().lock().upgrade().unwrap();
parent_process_group.add_process(child.clone());
child.set_process_group(Arc::downgrade(&parent_process_group));

View File

@ -10,14 +10,14 @@ use self::signal::sig_mask::SigMask;
use self::signal::sig_queues::SigQueues;
use self::signal::signals::kernel::KernelSignal;
use self::status::ProcessStatus;
use self::task::create_user_task_from_elf;
use crate::fs::file_table::FileTable;
use crate::prelude::*;
use crate::rights::Full;
use crate::thread::Thread;
use crate::tty::get_console;
use crate::vm::vmar::Vmar;
use jinux_frame::sync::WaitQueue;
use jinux_frame::{task::Task, user::UserSpace};
use jinux_frame::task::Task;
pub mod clone;
pub mod elf;
@ -30,7 +30,6 @@ pub mod process_vm;
pub mod signal;
pub mod status;
pub mod table;
pub mod task;
pub mod wait;
static PID_ALLOCATOR: AtomicI32 = AtomicI32::new(0);
@ -44,9 +43,9 @@ pub type ExitCode = i32;
pub struct Process {
// Immutable Part
pid: Pid,
task: Arc<Task>,
threads: Vec<Arc<Thread>>,
filename: Option<CString>,
user_space: Option<Arc<UserSpace>>,
// user_space: Option<Arc<UserSpace>>,
user_vm: Option<UserVm>,
root_vmar: Option<Vmar<Full>>,
/// wait for child status changed
@ -60,11 +59,11 @@ pub struct Process {
/// Process status
status: Mutex<ProcessStatus>,
/// Parent process
parent: Mutex<Option<Weak<Process>>>,
parent: Mutex<Weak<Process>>,
/// Children processes
children: Mutex<BTreeMap<Pid, Arc<Process>>>,
/// Process group
process_group: Mutex<Option<Weak<ProcessGroup>>>,
process_group: Mutex<Weak<ProcessGroup>>,
/// Process name
process_name: Mutex<Option<ProcessName>>,
/// File table
@ -82,35 +81,28 @@ pub struct Process {
impl Process {
/// returns the current process
pub fn current() -> Arc<Process> {
let task = Task::current();
let process = task
.data()
.downcast_ref::<Weak<Process>>()
.expect("[Internal Error] task data should points to weak<process>");
process
.upgrade()
.expect("[Internal Error] current process cannot be None")
let current_thread = Thread::current();
current_thread.process()
}
/// create a new process(not schedule it)
pub fn new(
pid: Pid,
task: Arc<Task>,
threads: Vec<Arc<Thread>>,
exec_filename: Option<CString>,
user_vm: Option<UserVm>,
user_space: Option<Arc<UserSpace>>,
root_vmar: Option<Vmar<Full>>,
process_group: Option<Weak<ProcessGroup>>,
process_group: Weak<ProcessGroup>,
file_table: FileTable,
sig_dispositions: SigDispositions,
sig_queues: SigQueues,
sig_mask: SigMask,
) -> Self {
let parent = if pid == 0 {
None
Weak::new()
} else {
let current_process = current!();
Some(Arc::downgrade(&current_process))
Arc::downgrade(&current_process)
};
let children = BTreeMap::new();
let waiting_children = WaitQueue::new();
@ -122,9 +114,8 @@ impl Process {
});
Self {
pid,
task,
threads,
filename: exec_filename,
user_space,
user_vm,
root_vmar,
waiting_children,
@ -162,7 +153,7 @@ impl Process {
// FIXME: How to determine the fg process group?
let pgid = process.pgid();
get_console().set_fg(pgid);
process.send_to_scheduler();
process.run();
process
}
@ -176,7 +167,7 @@ impl Process {
current!().exit(0);
};
let process = Process::create_kernel_process(process_fn);
process.send_to_scheduler();
process.run();
process
}
@ -192,15 +183,16 @@ impl Process {
let weak_process = weak_process_ref.clone();
let cloned_filename = Some(filename.clone());
let root_vmar = Vmar::<Full>::new_root().unwrap();
let task = create_user_task_from_elf(
let tid = pid;
let thread = Thread::new_user_thread_from_elf(
&root_vmar,
filename,
elf_file_content,
weak_process,
tid,
argv,
envp,
);
let user_space = task.user_space().map(|user_space| user_space.clone());
let user_vm = UserVm::new();
let file_table = FileTable::new_with_stdio();
let sig_dispositions = SigDispositions::new();
@ -208,12 +200,11 @@ impl Process {
let sig_mask = SigMask::new_empty();
Process::new(
pid,
task,
vec![thread],
cloned_filename,
Some(user_vm),
user_space,
Some(root_vmar),
None,
Weak::new(),
file_table,
sig_dispositions,
sig_queues,
@ -237,19 +228,19 @@ impl Process {
let pid = new_pid();
let kernel_process = Arc::new_cyclic(|weak_process_ref| {
let weak_process = weak_process_ref.clone();
let task = Task::new(task_fn, weak_process, None).expect("spawn kernel task failed");
let tid = pid;
let thread = Thread::new_kernel_thread(tid, task_fn, weak_process);
let file_table = FileTable::new();
let sig_dispositions = SigDispositions::new();
let sig_queues = SigQueues::new();
let sig_mask = SigMask::new_empty();
Process::new(
pid,
task,
None,
None,
vec![thread],
None,
None,
None,
Weak::new(),
file_table,
sig_dispositions,
sig_queues,
@ -271,13 +262,7 @@ impl Process {
/// returns the process group id of the process
pub fn pgid(&self) -> Pgid {
if let Some(process_group) = self
.process_group
.lock()
.as_ref()
.map(|process_group| process_group.upgrade())
.flatten()
{
if let Some(process_group) = self.process_group.lock().upgrade() {
process_group.pgid()
} else {
0
@ -288,7 +273,7 @@ impl Process {
&self.process_name
}
pub fn process_group(&self) -> &Mutex<Option<Weak<ProcessGroup>>> {
pub fn process_group(&self) -> &Mutex<Weak<ProcessGroup>> {
&self.process_group
}
@ -303,18 +288,16 @@ impl Process {
}
fn set_parent(&self, parent: Weak<Process>) {
let _ = self.parent.lock().insert(parent);
*self.parent.lock() = parent;
}
/// Set process group for current process. If old process group exists,
/// remove current process from old process group.
pub fn set_process_group(&self, process_group: Weak<ProcessGroup>) {
if let Some(old_process_group) = &*self.process_group().lock() {
if let Some(old_process_group) = old_process_group.upgrade() {
old_process_group.remove_process(self.pid());
}
if let Some(old_process_group) = self.process_group.lock().upgrade() {
old_process_group.remove_process(self.pid());
}
let _ = self.process_group.lock().insert(process_group);
*self.process_group.lock() = process_group;
}
pub fn file_table(&self) -> &Mutex<FileTable> {
@ -331,11 +314,7 @@ impl Process {
}
pub fn parent(&self) -> Option<Arc<Process>> {
self.parent
.lock()
.as_ref()
.map(|parent| parent.upgrade())
.flatten()
self.parent.lock().upgrade()
}
/// Exit process.
@ -369,8 +348,10 @@ impl Process {
}
/// start to run current process
pub fn send_to_scheduler(self: &Arc<Self>) {
self.task.send_to_scheduler();
pub fn run(&self) {
for thread in &self.threads {
thread.run()
}
}
/// yield the current process to allow other processes to run
@ -378,11 +359,6 @@ impl Process {
Task::yield_now();
}
/// returns the userspace
pub fn user_space(&self) -> Option<&Arc<UserSpace>> {
self.user_space.as_ref()
}
/// returns the user_vm
pub fn user_vm(&self) -> Option<&UserVm> {
self.user_vm.as_ref()
@ -407,10 +383,8 @@ impl Process {
let child_process = self.children.lock().remove(&pid).unwrap();
assert!(child_process.status().lock().is_zombie());
table::remove_process(child_process.pid());
if let Some(process_group) = child_process.process_group().lock().as_ref() {
if let Some(process_group) = process_group.upgrade() {
process_group.remove_process(child_process.pid);
}
if let Some(process_group) = child_process.process_group().lock().upgrade() {
process_group.remove_process(child_process.pid);
}
child_process.exit_code()
}
@ -456,9 +430,7 @@ pub fn get_init_process() -> Arc<Process> {
let process = current_process
.parent
.lock()
.as_ref()
.map(|current| current.upgrade())
.flatten()
.upgrade()
.expect("[Internal Error] init process cannot be None");
current_process = process;
}

View File

@ -45,7 +45,7 @@ pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> {
flags,
restorer_addr,
mask,
} => handle_user_signal_handler(
} => handle_user_signal(
sig_num,
handler_addr,
flags,
@ -95,7 +95,7 @@ pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> {
Ok(())
}
pub fn handle_user_signal_handler(
pub fn handle_user_signal(
sig_num: SigNum,
handler_addr: Vaddr,
flags: SigActionFlags,

View File

@ -25,7 +25,7 @@ pub fn sys_clone(
let child_pid = child_process.pid();
let pid = current!().pid();
debug!("*********schedule child process, pid = {}**********", pid);
child_process.send_to_scheduler();
child_process.run();
debug!("*********return to parent process, pid = {}*********", pid);
Ok(SyscallReturn::Return(child_pid as _))
}

View File

@ -23,7 +23,7 @@ fn fork(parent_context: CpuContext) -> Arc<Process> {
let child = clone_child(parent_context, clone_args).unwrap();
let pid = current.pid();
debug!("*********schedule child process, pid = {}**********", pid);
child.send_to_scheduler();
child.run();
debug!("*********return to parent process, pid = {}*********", pid);
child
}

View File

@ -210,13 +210,13 @@ pub fn handle_syscall(context: &mut CpuContext) {
Ok(return_value) => {
debug!("syscall return: {:?}", return_value);
if let SyscallReturn::Return(return_value) = return_value {
context.gp_regs.rax = return_value as u64;
context.set_rax(return_value as u64);
}
}
Err(err) => {
debug!("syscall return error: {:?}", err);
let errno = err.error() as i32;
context.gp_regs.rax = (-errno) as u64
context.set_rax((-errno) as u64)
}
}
}

View File

@ -8,8 +8,13 @@ use super::{SyscallReturn, SYS_RT_SIGRETRUN};
pub fn sys_rt_sigreturn(context: &mut CpuContext) -> Result<SyscallReturn> {
log_syscall_entry!(SYS_RT_SIGRETRUN);
let current = current!();
let sig_context = current.sig_context().lock().pop_back().unwrap();
let ucontext = read_val_from_user::<ucontext_t>(sig_context)?;
let sig_context_addr = current.sig_context().lock().pop_back().unwrap();
println!("sig context address = 0x{:x}", sig_context_addr);
let stack_value = read_val_from_user::<Vaddr>((context.gp_regs.rsp) as usize)?;
println!("stack value = 0x{:x}", stack_value);
// debug_assert!(sig_context_addr == stack_value);
// println!("stack value = 0x{:x}", sig_context);
let ucontext = read_val_from_user::<ucontext_t>(sig_context_addr)?;
context.gp_regs = ucontext.uc_mcontext.inner.gp_regs;
// unblock sig mask
let sig_mask = ucontext.uc_sigmask;

View File

@ -0,0 +1,93 @@
//! Posix thread implementation
use crate::{
prelude::*,
process::{elf::load_elf_to_root_vmar, Process},
rights::Full,
vm::vmar::Vmar,
};
use jinux_frame::{cpu::CpuContext, task::Task, user::UserSpace};
use self::task::create_new_user_task;
pub mod task;
pub type Tid = i32;
/// A thread is a wrapper on top of task.
pub struct Thread {
/// Thread id
tid: Tid,
/// Low-level info
task: Arc<Task>,
/// The process. FIXME: should we store the process info here?
process: Weak<Process>,
}
impl Thread {
pub fn new_user_thread_from_elf(
root_vmar: &Vmar<Full>,
filename: CString,
elf_file_content: &'static [u8],
process: Weak<Process>,
tid: Tid,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Arc<Self> {
let elf_load_info =
load_elf_to_root_vmar(filename, elf_file_content, &root_vmar, argv, envp)
.expect("Load Elf failed");
let vm_space = root_vmar.vm_space().clone();
let mut cpu_ctx = CpuContext::default();
cpu_ctx.set_rip(elf_load_info.entry_point());
cpu_ctx.set_rsp(elf_load_info.user_stack_top());
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
Thread::new_user_thread(tid, user_space, process)
}
pub fn new_user_thread(
tid: Tid,
user_space: Arc<UserSpace>,
process: Weak<Process>,
) -> Arc<Self> {
Arc::new_cyclic(|thread_ref| {
let task = create_new_user_task(user_space, thread_ref.clone());
Thread { tid, task, process }
})
}
pub fn new_kernel_thread<F>(tid: Tid, task_fn: F, process: Weak<Process>) -> Arc<Self>
where
F: Fn() + Send + Sync + 'static,
{
Arc::new_cyclic(|thread_ref| {
let weal_thread = thread_ref.clone();
let task = Task::new(task_fn, weal_thread, None).unwrap();
Thread { tid, task, process }
})
}
pub fn current() -> Arc<Self> {
let task = Task::current();
let thread = task
.data()
.downcast_ref::<Weak<Thread>>()
.expect("[Internal Error] task data should points to weak<process>");
thread
.upgrade()
.expect("[Internal Error] current process cannot be None")
}
pub fn process(&self) -> Arc<Process> {
self.process.upgrade().unwrap()
}
/// Add inner task to the run queue of scheduler. Note this does not means the thread will run at once.
pub fn run(&self) {
self.task.run();
}
pub fn yield_now() {
Task::yield_now()
}
}

View File

@ -1,5 +1,3 @@
use core::sync::atomic::AtomicUsize;
use jinux_frame::{
cpu::CpuContext,
task::Task,
@ -9,46 +7,20 @@ use jinux_frame::{
use crate::{
prelude::*,
process::{exception::handle_exception, signal::handle_pending_signal},
rights::Full,
vm::vmar::Vmar,
syscall::handle_syscall,
};
use crate::syscall::handle_syscall;
use super::{elf::load_elf_to_root_vmar, Process};
static COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn create_user_task_from_elf(
root_vmar: &Vmar<Full>,
filename: CString,
elf_file_content: &'static [u8],
parent: Weak<Process>,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Arc<Task> {
let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, &root_vmar, argv, envp)
.expect("Load Elf failed");
let vm_space = root_vmar.vm_space().clone();
let mut cpu_ctx = CpuContext::default();
// set entry point
cpu_ctx.gp_regs.rip = elf_load_info.entry_point();
// set user stack
cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_top();
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
create_new_task(user_space, parent)
}
use super::Thread;
/// create new task with userspace and parent process
pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<Task> {
pub fn create_new_user_task(user_space: Arc<UserSpace>, thread_ref: Weak<Thread>) -> Arc<Task> {
fn user_task_entry() {
let cur = Task::current();
let user_space = cur.user_space().expect("user task should have user space");
let mut user_mode = UserMode::new(user_space);
debug!("In user task entry:");
debug!("[new task] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip);
debug!("[new task] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp);
debug!("[new task] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax);
debug!("[Task entry] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip);
debug!("[Task entry] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp);
debug!("[Task entry] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax);
loop {
let user_event = user_mode.execute();
let context = user_mode.context_mut();
@ -66,7 +38,7 @@ pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<
}
// If current is suspended, wait for a signal to wake up self
while current.status().lock().is_suspend() {
Process::yield_now();
Thread::yield_now();
debug!("{} is suspended.", current.pid());
handle_pending_signal(context).unwrap();
}
@ -76,7 +48,7 @@ pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<
Task::current().exit();
}
Task::new(user_task_entry, parent, Some(userspace)).expect("spawn task failed")
Task::new(user_task_entry, thread_ref, Some(user_space)).expect("spawn task failed")
}
fn handle_user_event(user_event: UserEvent, context: &mut CpuContext) {