From 7e6591aab22834e9bbf7055b54ff53c665114930 Mon Sep 17 00:00:00 2001 From: Jianfeng Jiang Date: Thu, 15 Dec 2022 14:53:43 +0800 Subject: [PATCH] refactor process implementation --- src/framework/jinux-frame/src/cpu.rs | 15 +++ .../jinux-frame/src/mm/memory_set.rs | 1 - src/framework/jinux-frame/src/task/mod.rs | 3 +- src/framework/jinux-frame/src/task/task.rs | 2 +- src/services/libs/jinux-std/src/lib.rs | 6 +- .../libs/jinux-std/src/process/clone.rs | 21 ++-- .../libs/jinux-std/src/process/mod.rs | 104 +++++++----------- .../libs/jinux-std/src/process/signal/mod.rs | 4 +- .../libs/jinux-std/src/syscall/clone.rs | 2 +- .../libs/jinux-std/src/syscall/fork.rs | 2 +- .../libs/jinux-std/src/syscall/mod.rs | 4 +- .../jinux-std/src/syscall/rt_sigreturn.rs | 9 +- src/services/libs/jinux-std/src/thread/mod.rs | 93 ++++++++++++++++ .../jinux-std/src/{process => thread}/task.rs | 44 ++------ 14 files changed, 182 insertions(+), 128 deletions(-) create mode 100644 src/services/libs/jinux-std/src/thread/mod.rs rename src/services/libs/jinux-std/src/{process => thread}/task.rs (55%) diff --git a/src/framework/jinux-frame/src/cpu.rs b/src/framework/jinux-frame/src/cpu.rs index a4385f143..a6ad3cfa7 100644 --- a/src/framework/jinux-frame/src/cpu.rs +++ b/src/framework/jinux-frame/src/cpu.rs @@ -38,6 +38,21 @@ pub struct CpuContext { /// trap information, this field is all zero when it is syscall pub trap_information: TrapInformation, } + +impl CpuContext { + pub fn set_rax(&mut self, rax: u64) { + self.gp_regs.rax = rax; + } + + pub fn set_rsp(&mut self, rsp: u64) { + self.gp_regs.rsp = rsp; + } + + pub fn set_rip(&mut self, rip: u64) { + self.gp_regs.rip = rip; + } +} + #[derive(Clone, Default, Copy, Debug)] #[repr(C)] pub struct TrapInformation { diff --git a/src/framework/jinux-frame/src/mm/memory_set.rs b/src/framework/jinux-frame/src/mm/memory_set.rs index b8413d450..e0afebb5b 100644 --- a/src/framework/jinux-frame/src/mm/memory_set.rs +++ b/src/framework/jinux-frame/src/mm/memory_set.rs @@ -1,6 +1,5 @@ use super::{page_table::PageTable, *}; use crate::prelude::*; -use crate::vm::VmIo; use crate::{ config::PAGE_SIZE, mm::address::is_aligned, diff --git a/src/framework/jinux-frame/src/task/mod.rs b/src/framework/jinux-frame/src/task/mod.rs index 57b64a2a5..424070291 100644 --- a/src/framework/jinux-frame/src/task/mod.rs +++ b/src/framework/jinux-frame/src/task/mod.rs @@ -5,7 +5,8 @@ mod scheduler; #[allow(clippy::module_inception)] mod task; -pub(crate) use self::processor::{get_idle_task_cx_ptr, schedule}; +pub(crate) use self::processor::get_idle_task_cx_ptr; +pub use self::processor::schedule; pub use self::scheduler::{set_scheduler, Scheduler}; pub(crate) use self::task::context_switch; pub(crate) use self::task::TaskContext; diff --git a/src/framework/jinux-frame/src/task/task.rs b/src/framework/jinux-frame/src/task/task.rs index 6a80b2056..aa49a3406 100644 --- a/src/framework/jinux-frame/src/task/task.rs +++ b/src/framework/jinux-frame/src/task/task.rs @@ -222,7 +222,7 @@ impl Task { Ok(Arc::new(result)) } - pub fn send_to_scheduler(self: &Arc) { + pub fn run(self: &Arc) { switch_to_task(self.clone()); } diff --git a/src/services/libs/jinux-std/src/lib.rs b/src/services/libs/jinux-std/src/lib.rs index a83a11efc..4128a0c79 100644 --- a/src/services/libs/jinux-std/src/lib.rs +++ b/src/services/libs/jinux-std/src/lib.rs @@ -21,7 +21,6 @@ use crate::{ prelude::*, user_apps::{get_busybox_app, UserApp}, }; -use jinux_frame::{info, println}; use process::Process; use crate::{ @@ -42,6 +41,7 @@ pub mod prelude; mod process; pub mod rights; pub mod syscall; +pub mod thread; pub mod tty; mod user_apps; mod util; @@ -63,9 +63,9 @@ pub fn init_process() { println!("[kernel] Hello world from kernel!"); let current = current!(); let pid = current.pid(); - info!("current pid = {}", pid); + debug!("current pid = {}", pid); let ppid = current.parent().unwrap().pid(); - info!("current ppid = {}", ppid); + debug!("current ppid = {}", ppid); }); info!( "[jinux-std/lib.rs] spawn kernel process, pid = {}", diff --git a/src/services/libs/jinux-std/src/process/clone.rs b/src/services/libs/jinux-std/src/process/clone.rs index 5133944d2..3b049645c 100644 --- a/src/services/libs/jinux-std/src/process/clone.rs +++ b/src/services/libs/jinux-std/src/process/clone.rs @@ -6,7 +6,8 @@ use jinux_frame::{ use crate::{ prelude::*, - process::{new_pid, signal::sig_queues::SigQueues, table, task::create_new_task}, + process::{new_pid, signal::sig_queues::SigQueues, table}, + thread::Thread, }; use super::Process; @@ -127,15 +128,17 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result< let child = Arc::new_cyclic(|child_process_ref| { let weak_child_process = child_process_ref.clone(); - let child_task = create_new_task(child_user_space.clone(), weak_child_process); + let tid = child_pid; + let child_thread = + Thread::new_user_thread(tid, child_user_space.clone(), weak_child_process); Process::new( child_pid, - child_task, + vec![child_thread], child_file_name, child_user_vm, - Some(child_user_space), + // Some(child_user_space), Some(child_root_vmar), - None, + Weak::new(), child_file_table, child_sig_dispositions, child_sig_queues, @@ -143,13 +146,7 @@ pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result< ) }); // Inherit parent's process group - let parent_process_group = current - .process_group() - .lock() - .as_ref() - .map(|ppgrp| ppgrp.upgrade()) - .flatten() - .unwrap(); + let parent_process_group = current.process_group().lock().upgrade().unwrap(); parent_process_group.add_process(child.clone()); child.set_process_group(Arc::downgrade(&parent_process_group)); diff --git a/src/services/libs/jinux-std/src/process/mod.rs b/src/services/libs/jinux-std/src/process/mod.rs index 7709025ad..05443d7ee 100644 --- a/src/services/libs/jinux-std/src/process/mod.rs +++ b/src/services/libs/jinux-std/src/process/mod.rs @@ -10,14 +10,14 @@ use self::signal::sig_mask::SigMask; use self::signal::sig_queues::SigQueues; use self::signal::signals::kernel::KernelSignal; use self::status::ProcessStatus; -use self::task::create_user_task_from_elf; use crate::fs::file_table::FileTable; use crate::prelude::*; use crate::rights::Full; +use crate::thread::Thread; use crate::tty::get_console; use crate::vm::vmar::Vmar; use jinux_frame::sync::WaitQueue; -use jinux_frame::{task::Task, user::UserSpace}; +use jinux_frame::task::Task; pub mod clone; pub mod elf; @@ -30,7 +30,6 @@ pub mod process_vm; pub mod signal; pub mod status; pub mod table; -pub mod task; pub mod wait; static PID_ALLOCATOR: AtomicI32 = AtomicI32::new(0); @@ -44,9 +43,9 @@ pub type ExitCode = i32; pub struct Process { // Immutable Part pid: Pid, - task: Arc, + threads: Vec>, filename: Option, - user_space: Option>, + // user_space: Option>, user_vm: Option, root_vmar: Option>, /// wait for child status changed @@ -60,11 +59,11 @@ pub struct Process { /// Process status status: Mutex, /// Parent process - parent: Mutex>>, + parent: Mutex>, /// Children processes children: Mutex>>, /// Process group - process_group: Mutex>>, + process_group: Mutex>, /// Process name process_name: Mutex>, /// File table @@ -82,35 +81,28 @@ pub struct Process { impl Process { /// returns the current process pub fn current() -> Arc { - let task = Task::current(); - let process = task - .data() - .downcast_ref::>() - .expect("[Internal Error] task data should points to weak"); - process - .upgrade() - .expect("[Internal Error] current process cannot be None") + let current_thread = Thread::current(); + current_thread.process() } /// create a new process(not schedule it) pub fn new( pid: Pid, - task: Arc, + threads: Vec>, exec_filename: Option, user_vm: Option, - user_space: Option>, root_vmar: Option>, - process_group: Option>, + process_group: Weak, file_table: FileTable, sig_dispositions: SigDispositions, sig_queues: SigQueues, sig_mask: SigMask, ) -> Self { let parent = if pid == 0 { - None + Weak::new() } else { let current_process = current!(); - Some(Arc::downgrade(¤t_process)) + Arc::downgrade(¤t_process) }; let children = BTreeMap::new(); let waiting_children = WaitQueue::new(); @@ -122,9 +114,8 @@ impl Process { }); Self { pid, - task, + threads, filename: exec_filename, - user_space, user_vm, root_vmar, waiting_children, @@ -162,7 +153,7 @@ impl Process { // FIXME: How to determine the fg process group? let pgid = process.pgid(); get_console().set_fg(pgid); - process.send_to_scheduler(); + process.run(); process } @@ -176,7 +167,7 @@ impl Process { current!().exit(0); }; let process = Process::create_kernel_process(process_fn); - process.send_to_scheduler(); + process.run(); process } @@ -192,15 +183,16 @@ impl Process { let weak_process = weak_process_ref.clone(); let cloned_filename = Some(filename.clone()); let root_vmar = Vmar::::new_root().unwrap(); - let task = create_user_task_from_elf( + let tid = pid; + let thread = Thread::new_user_thread_from_elf( &root_vmar, filename, elf_file_content, weak_process, + tid, argv, envp, ); - let user_space = task.user_space().map(|user_space| user_space.clone()); let user_vm = UserVm::new(); let file_table = FileTable::new_with_stdio(); let sig_dispositions = SigDispositions::new(); @@ -208,12 +200,11 @@ impl Process { let sig_mask = SigMask::new_empty(); Process::new( pid, - task, + vec![thread], cloned_filename, Some(user_vm), - user_space, Some(root_vmar), - None, + Weak::new(), file_table, sig_dispositions, sig_queues, @@ -237,19 +228,19 @@ impl Process { let pid = new_pid(); let kernel_process = Arc::new_cyclic(|weak_process_ref| { let weak_process = weak_process_ref.clone(); - let task = Task::new(task_fn, weak_process, None).expect("spawn kernel task failed"); + let tid = pid; + let thread = Thread::new_kernel_thread(tid, task_fn, weak_process); let file_table = FileTable::new(); let sig_dispositions = SigDispositions::new(); let sig_queues = SigQueues::new(); let sig_mask = SigMask::new_empty(); Process::new( pid, - task, - None, - None, + vec![thread], None, None, None, + Weak::new(), file_table, sig_dispositions, sig_queues, @@ -271,13 +262,7 @@ impl Process { /// returns the process group id of the process pub fn pgid(&self) -> Pgid { - if let Some(process_group) = self - .process_group - .lock() - .as_ref() - .map(|process_group| process_group.upgrade()) - .flatten() - { + if let Some(process_group) = self.process_group.lock().upgrade() { process_group.pgid() } else { 0 @@ -288,7 +273,7 @@ impl Process { &self.process_name } - pub fn process_group(&self) -> &Mutex>> { + pub fn process_group(&self) -> &Mutex> { &self.process_group } @@ -303,18 +288,16 @@ impl Process { } fn set_parent(&self, parent: Weak) { - let _ = self.parent.lock().insert(parent); + *self.parent.lock() = parent; } /// Set process group for current process. If old process group exists, /// remove current process from old process group. pub fn set_process_group(&self, process_group: Weak) { - if let Some(old_process_group) = &*self.process_group().lock() { - if let Some(old_process_group) = old_process_group.upgrade() { - old_process_group.remove_process(self.pid()); - } + if let Some(old_process_group) = self.process_group.lock().upgrade() { + old_process_group.remove_process(self.pid()); } - let _ = self.process_group.lock().insert(process_group); + *self.process_group.lock() = process_group; } pub fn file_table(&self) -> &Mutex { @@ -331,11 +314,7 @@ impl Process { } pub fn parent(&self) -> Option> { - self.parent - .lock() - .as_ref() - .map(|parent| parent.upgrade()) - .flatten() + self.parent.lock().upgrade() } /// Exit process. @@ -369,8 +348,10 @@ impl Process { } /// start to run current process - pub fn send_to_scheduler(self: &Arc) { - self.task.send_to_scheduler(); + pub fn run(&self) { + for thread in &self.threads { + thread.run() + } } /// yield the current process to allow other processes to run @@ -378,11 +359,6 @@ impl Process { Task::yield_now(); } - /// returns the userspace - pub fn user_space(&self) -> Option<&Arc> { - self.user_space.as_ref() - } - /// returns the user_vm pub fn user_vm(&self) -> Option<&UserVm> { self.user_vm.as_ref() @@ -407,10 +383,8 @@ impl Process { let child_process = self.children.lock().remove(&pid).unwrap(); assert!(child_process.status().lock().is_zombie()); table::remove_process(child_process.pid()); - if let Some(process_group) = child_process.process_group().lock().as_ref() { - if let Some(process_group) = process_group.upgrade() { - process_group.remove_process(child_process.pid); - } + if let Some(process_group) = child_process.process_group().lock().upgrade() { + process_group.remove_process(child_process.pid); } child_process.exit_code() } @@ -456,9 +430,7 @@ pub fn get_init_process() -> Arc { let process = current_process .parent .lock() - .as_ref() - .map(|current| current.upgrade()) - .flatten() + .upgrade() .expect("[Internal Error] init process cannot be None"); current_process = process; } diff --git a/src/services/libs/jinux-std/src/process/signal/mod.rs b/src/services/libs/jinux-std/src/process/signal/mod.rs index a439b13b4..e024370e4 100644 --- a/src/services/libs/jinux-std/src/process/signal/mod.rs +++ b/src/services/libs/jinux-std/src/process/signal/mod.rs @@ -45,7 +45,7 @@ pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> { flags, restorer_addr, mask, - } => handle_user_signal_handler( + } => handle_user_signal( sig_num, handler_addr, flags, @@ -95,7 +95,7 @@ pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> { Ok(()) } -pub fn handle_user_signal_handler( +pub fn handle_user_signal( sig_num: SigNum, handler_addr: Vaddr, flags: SigActionFlags, diff --git a/src/services/libs/jinux-std/src/syscall/clone.rs b/src/services/libs/jinux-std/src/syscall/clone.rs index 709c4225f..7d29e2b5c 100644 --- a/src/services/libs/jinux-std/src/syscall/clone.rs +++ b/src/services/libs/jinux-std/src/syscall/clone.rs @@ -25,7 +25,7 @@ pub fn sys_clone( let child_pid = child_process.pid(); let pid = current!().pid(); debug!("*********schedule child process, pid = {}**********", pid); - child_process.send_to_scheduler(); + child_process.run(); debug!("*********return to parent process, pid = {}*********", pid); Ok(SyscallReturn::Return(child_pid as _)) } diff --git a/src/services/libs/jinux-std/src/syscall/fork.rs b/src/services/libs/jinux-std/src/syscall/fork.rs index 210138b13..1d9a1a77b 100644 --- a/src/services/libs/jinux-std/src/syscall/fork.rs +++ b/src/services/libs/jinux-std/src/syscall/fork.rs @@ -23,7 +23,7 @@ fn fork(parent_context: CpuContext) -> Arc { let child = clone_child(parent_context, clone_args).unwrap(); let pid = current.pid(); debug!("*********schedule child process, pid = {}**********", pid); - child.send_to_scheduler(); + child.run(); debug!("*********return to parent process, pid = {}*********", pid); child } diff --git a/src/services/libs/jinux-std/src/syscall/mod.rs b/src/services/libs/jinux-std/src/syscall/mod.rs index 0fba75db3..1cdd5a89f 100644 --- a/src/services/libs/jinux-std/src/syscall/mod.rs +++ b/src/services/libs/jinux-std/src/syscall/mod.rs @@ -210,13 +210,13 @@ pub fn handle_syscall(context: &mut CpuContext) { Ok(return_value) => { debug!("syscall return: {:?}", return_value); if let SyscallReturn::Return(return_value) = return_value { - context.gp_regs.rax = return_value as u64; + context.set_rax(return_value as u64); } } Err(err) => { debug!("syscall return error: {:?}", err); let errno = err.error() as i32; - context.gp_regs.rax = (-errno) as u64 + context.set_rax((-errno) as u64) } } } diff --git a/src/services/libs/jinux-std/src/syscall/rt_sigreturn.rs b/src/services/libs/jinux-std/src/syscall/rt_sigreturn.rs index 27fb48fdd..fb8d707ac 100644 --- a/src/services/libs/jinux-std/src/syscall/rt_sigreturn.rs +++ b/src/services/libs/jinux-std/src/syscall/rt_sigreturn.rs @@ -8,8 +8,13 @@ use super::{SyscallReturn, SYS_RT_SIGRETRUN}; pub fn sys_rt_sigreturn(context: &mut CpuContext) -> Result { log_syscall_entry!(SYS_RT_SIGRETRUN); let current = current!(); - let sig_context = current.sig_context().lock().pop_back().unwrap(); - let ucontext = read_val_from_user::(sig_context)?; + let sig_context_addr = current.sig_context().lock().pop_back().unwrap(); + println!("sig context address = 0x{:x}", sig_context_addr); + let stack_value = read_val_from_user::((context.gp_regs.rsp) as usize)?; + println!("stack value = 0x{:x}", stack_value); + // debug_assert!(sig_context_addr == stack_value); + // println!("stack value = 0x{:x}", sig_context); + let ucontext = read_val_from_user::(sig_context_addr)?; context.gp_regs = ucontext.uc_mcontext.inner.gp_regs; // unblock sig mask let sig_mask = ucontext.uc_sigmask; diff --git a/src/services/libs/jinux-std/src/thread/mod.rs b/src/services/libs/jinux-std/src/thread/mod.rs new file mode 100644 index 000000000..3d8509cff --- /dev/null +++ b/src/services/libs/jinux-std/src/thread/mod.rs @@ -0,0 +1,93 @@ +//! Posix thread implementation + +use crate::{ + prelude::*, + process::{elf::load_elf_to_root_vmar, Process}, + rights::Full, + vm::vmar::Vmar, +}; +use jinux_frame::{cpu::CpuContext, task::Task, user::UserSpace}; + +use self::task::create_new_user_task; + +pub mod task; + +pub type Tid = i32; + +/// A thread is a wrapper on top of task. +pub struct Thread { + /// Thread id + tid: Tid, + /// Low-level info + task: Arc, + /// The process. FIXME: should we store the process info here? + process: Weak, +} + +impl Thread { + pub fn new_user_thread_from_elf( + root_vmar: &Vmar, + filename: CString, + elf_file_content: &'static [u8], + process: Weak, + tid: Tid, + argv: Vec, + envp: Vec, + ) -> Arc { + let elf_load_info = + load_elf_to_root_vmar(filename, elf_file_content, &root_vmar, argv, envp) + .expect("Load Elf failed"); + let vm_space = root_vmar.vm_space().clone(); + let mut cpu_ctx = CpuContext::default(); + cpu_ctx.set_rip(elf_load_info.entry_point()); + cpu_ctx.set_rsp(elf_load_info.user_stack_top()); + let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx)); + Thread::new_user_thread(tid, user_space, process) + } + + pub fn new_user_thread( + tid: Tid, + user_space: Arc, + process: Weak, + ) -> Arc { + Arc::new_cyclic(|thread_ref| { + let task = create_new_user_task(user_space, thread_ref.clone()); + Thread { tid, task, process } + }) + } + + pub fn new_kernel_thread(tid: Tid, task_fn: F, process: Weak) -> Arc + where + F: Fn() + Send + Sync + 'static, + { + Arc::new_cyclic(|thread_ref| { + let weal_thread = thread_ref.clone(); + let task = Task::new(task_fn, weal_thread, None).unwrap(); + Thread { tid, task, process } + }) + } + + pub fn current() -> Arc { + let task = Task::current(); + let thread = task + .data() + .downcast_ref::>() + .expect("[Internal Error] task data should points to weak"); + thread + .upgrade() + .expect("[Internal Error] current process cannot be None") + } + + pub fn process(&self) -> Arc { + self.process.upgrade().unwrap() + } + + /// Add inner task to the run queue of scheduler. Note this does not means the thread will run at once. + pub fn run(&self) { + self.task.run(); + } + + pub fn yield_now() { + Task::yield_now() + } +} diff --git a/src/services/libs/jinux-std/src/process/task.rs b/src/services/libs/jinux-std/src/thread/task.rs similarity index 55% rename from src/services/libs/jinux-std/src/process/task.rs rename to src/services/libs/jinux-std/src/thread/task.rs index bd1219870..9abbdb05d 100644 --- a/src/services/libs/jinux-std/src/process/task.rs +++ b/src/services/libs/jinux-std/src/thread/task.rs @@ -1,5 +1,3 @@ -use core::sync::atomic::AtomicUsize; - use jinux_frame::{ cpu::CpuContext, task::Task, @@ -9,46 +7,20 @@ use jinux_frame::{ use crate::{ prelude::*, process::{exception::handle_exception, signal::handle_pending_signal}, - rights::Full, - vm::vmar::Vmar, + syscall::handle_syscall, }; -use crate::syscall::handle_syscall; - -use super::{elf::load_elf_to_root_vmar, Process}; - -static COUNTER: AtomicUsize = AtomicUsize::new(0); - -pub fn create_user_task_from_elf( - root_vmar: &Vmar, - filename: CString, - elf_file_content: &'static [u8], - parent: Weak, - argv: Vec, - envp: Vec, -) -> Arc { - let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, &root_vmar, argv, envp) - .expect("Load Elf failed"); - let vm_space = root_vmar.vm_space().clone(); - let mut cpu_ctx = CpuContext::default(); - // set entry point - cpu_ctx.gp_regs.rip = elf_load_info.entry_point(); - // set user stack - cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_top(); - let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx)); - create_new_task(user_space, parent) -} +use super::Thread; /// create new task with userspace and parent process -pub fn create_new_task(userspace: Arc, parent: Weak) -> Arc { +pub fn create_new_user_task(user_space: Arc, thread_ref: Weak) -> Arc { fn user_task_entry() { let cur = Task::current(); let user_space = cur.user_space().expect("user task should have user space"); let mut user_mode = UserMode::new(user_space); - debug!("In user task entry:"); - debug!("[new task] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip); - debug!("[new task] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp); - debug!("[new task] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax); + debug!("[Task entry] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip); + debug!("[Task entry] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp); + debug!("[Task entry] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax); loop { let user_event = user_mode.execute(); let context = user_mode.context_mut(); @@ -66,7 +38,7 @@ pub fn create_new_task(userspace: Arc, parent: Weak) -> Arc< } // If current is suspended, wait for a signal to wake up self while current.status().lock().is_suspend() { - Process::yield_now(); + Thread::yield_now(); debug!("{} is suspended.", current.pid()); handle_pending_signal(context).unwrap(); } @@ -76,7 +48,7 @@ pub fn create_new_task(userspace: Arc, parent: Weak) -> Arc< Task::current().exit(); } - Task::new(user_task_entry, parent, Some(userspace)).expect("spawn task failed") + Task::new(user_task_entry, thread_ref, Some(user_space)).expect("spawn task failed") } fn handle_user_event(user_event: UserEvent, context: &mut CpuContext) {