Remove UserSpace abstraction from OSTD

This commit is contained in:
Chen Chengjun
2025-03-17 13:55:53 +08:00
committed by Tate, Hongliang Tian
parent 244a34a2fc
commit 248b24fb4e
10 changed files with 139 additions and 201 deletions

View File

@ -2,12 +2,7 @@
use core::{num::NonZeroU64, sync::atomic::Ordering};
use ostd::{
cpu::UserContext,
sync::RwArc,
task::Task,
user::{UserContextApi, UserSpace},
};
use ostd::{cpu::UserContext, sync::RwArc, task::Task, user::UserContextApi};
use super::{
posix_thread::{AsPosixThread, PosixThreadBuilder, ThreadName},
@ -230,18 +225,13 @@ fn clone_child_task(
// clone fs
let child_fs = clone_fs(posix_thread.fs(), clone_flags);
let child_root_vmar = process.root_vmar();
let child_user_space = {
let child_vm_space = child_root_vmar.vm_space().clone();
let child_cpu_context = clone_cpu_context(
let child_user_ctx = Arc::new(clone_user_ctx(
parent_context,
clone_args.stack,
clone_args.stack_size,
clone_args.tls,
clone_flags,
);
Arc::new(UserSpace::new(child_vm_space, child_cpu_context))
};
));
// Inherit sigmask from current thread
let sig_mask = posix_thread.sig_mask().load(Ordering::Relaxed).into();
@ -253,7 +243,7 @@ fn clone_child_task(
Credentials::new_from(&credentials)
};
let mut thread_builder = PosixThreadBuilder::new(child_tid, child_user_space, credentials)
let mut thread_builder = PosixThreadBuilder::new(child_tid, child_user_ctx, credentials)
.process(posix_thread.weak_process())
.sig_mask(sig_mask)
.file_table(child_file_table)
@ -297,20 +287,13 @@ fn clone_child_process(
};
// clone user space
let child_user_space = {
let child_cpu_context = clone_cpu_context(
let child_user_ctx = Arc::new(clone_user_ctx(
parent_context,
clone_args.stack,
clone_args.stack_size,
clone_args.tls,
clone_flags,
);
let child_vm_space = {
let child_root_vmar = child_process_vm.root_vmar();
child_root_vmar.vm_space().clone()
};
Arc::new(UserSpace::new(child_vm_space, child_cpu_context))
};
));
// clone file table
let child_file_table = clone_files(&thread_local.file_table().borrow(), clone_flags);
@ -342,7 +325,7 @@ fn clone_child_process(
Credentials::new_from(&credentials)
};
PosixThreadBuilder::new(child_tid, child_user_space, credentials)
PosixThreadBuilder::new(child_tid, child_user_ctx, credentials)
.thread_name(Some(child_thread_name))
.sig_mask(child_sig_mask)
.file_table(child_file_table)
@ -432,7 +415,7 @@ fn clone_vm(parent_process_vm: &ProcessVm, clone_flags: CloneFlags) -> Result<Pr
}
}
fn clone_cpu_context(
fn clone_user_ctx(
parent_context: &UserContext,
new_sp: u64,
stack_size: Option<NonZeroU64>,

View File

@ -2,7 +2,11 @@
#![expect(dead_code)]
use ostd::{cpu::CpuSet, sync::RwArc, task::Task, user::UserSpace};
use ostd::{
cpu::{CpuSet, UserContext},
sync::RwArc,
task::Task,
};
use super::{thread_table, PosixThread, ThreadLocal};
use crate::{
@ -22,7 +26,7 @@ use crate::{
pub struct PosixThreadBuilder {
// The essential part
tid: Tid,
user_space: Arc<UserSpace>,
user_ctx: Arc<UserContext>,
process: Weak<Process>,
credentials: Credentials,
@ -38,10 +42,10 @@ pub struct PosixThreadBuilder {
}
impl PosixThreadBuilder {
pub fn new(tid: Tid, user_space: Arc<UserSpace>, credentials: Credentials) -> Self {
pub fn new(tid: Tid, user_ctx: Arc<UserContext>, credentials: Credentials) -> Self {
Self {
tid,
user_space,
user_ctx,
process: Weak::new(),
credentials,
thread_name: None,
@ -98,7 +102,7 @@ impl PosixThreadBuilder {
pub fn build(self) -> Arc<Task> {
let Self {
tid,
user_space,
user_ctx,
process,
credentials,
thread_name,
@ -148,7 +152,7 @@ impl PosixThreadBuilder {
let thread_local = ThreadLocal::new(set_child_tid, clear_child_tid, file_table);
thread_table::add_thread(tid, thread.clone());
task::create_new_user_task(user_space, thread, thread_local)
task::create_new_user_task(user_ctx, thread, thread_local)
})
}
}

View File

@ -1,10 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::{
cpu::UserContext,
task::Task,
user::{UserContextApi, UserSpace},
};
use ostd::{cpu::UserContext, task::Task, user::UserContextApi};
use super::{builder::PosixThreadBuilder, name::ThreadName, PosixThread};
use crate::{
@ -55,13 +51,11 @@ pub fn create_posix_task_from_executable(
load_program_to_vm(process_vm, elf_file, argv, envp, &fs_resolver, 1)?
};
let vm_space = process_vm.root_vmar().vm_space().clone();
let mut cpu_ctx = UserContext::default();
cpu_ctx.set_instruction_pointer(elf_load_info.entry_point() as _);
cpu_ctx.set_stack_pointer(elf_load_info.user_stack_top() as _);
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
let mut user_ctx = UserContext::default();
user_ctx.set_instruction_pointer(elf_load_info.entry_point() as _);
user_ctx.set_stack_pointer(elf_load_info.user_stack_top() as _);
let thread_name = Some(ThreadName::new_from_executable_path(executable_path)?);
let thread_builder = PosixThreadBuilder::new(tid, user_space, credentials)
let thread_builder = PosixThreadBuilder::new(tid, Arc::new(user_ctx), credentials)
.thread_name(thread_name)
.process(process)
.fs(Arc::new(fs));

View File

@ -1,8 +1,9 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::{
cpu::UserContext,
task::{Task, TaskOptions},
user::{ReturnReason, UserContextApi, UserMode, UserSpace},
user::{ReturnReason, UserContextApi, UserMode},
};
use super::{oops, Thread};
@ -21,7 +22,7 @@ use crate::{
/// create new task with userspace and parent process
pub fn create_new_user_task(
user_space: Arc<UserSpace>,
user_ctx: Arc<UserContext>,
thread_ref: Arc<Thread>,
thread_local: ThreadLocal,
) -> Task {
@ -32,10 +33,10 @@ pub fn create_new_user_task(
let current_thread_local = current_task.as_thread_local().unwrap();
let current_process = current_posix_thread.process();
let user_space = current_task
.user_space()
.expect("user task should have user space");
let mut user_mode = UserMode::new(user_space);
let user_ctx = current_task
.user_ctx()
.expect("user task should have user context");
let mut user_mode = UserMode::new(UserContext::clone(user_ctx));
debug!(
"[Task entry] rip = 0x{:x}",
user_mode.context().instruction_pointer()
@ -67,7 +68,7 @@ pub fn create_new_user_task(
thread_local: current_thread_local,
posix_thread: current_posix_thread,
thread: current_thread.as_ref(),
task: current_task.as_ref(),
task: &current_task,
};
loop {
@ -109,7 +110,7 @@ pub fn create_new_user_task(
})
.data(thread_ref)
.local_data(thread_local)
.user_space(Some(user_space))
.user_ctx(Some(user_ctx))
.build()
.expect("spawn task failed")
}

View File

@ -306,7 +306,6 @@ impl Vmar_ {
fn new_root() -> Arc<Self> {
let vmar_inner = VmarInner::new();
let mut vm_space = VmSpace::new();
vm_space.register_page_fault_handler(handle_page_fault_wrapper);
Vmar_::new(vmar_inner, Arc::new(vm_space), 0, ROOT_VMAR_CAP_ADDR)
}
@ -431,7 +430,6 @@ impl Vmar_ {
let new_vmar_ = {
let vmar_inner = VmarInner::new();
let mut new_space = VmSpace::new();
new_space.register_page_fault_handler(handle_page_fault_wrapper);
Vmar_::new(vmar_inner, Arc::new(new_space), self.base, self.size)
};

View File

@ -23,6 +23,7 @@ mod syscall;
use align_ext::AlignExt;
use cfg_if::cfg_if;
use log::debug;
use spin::Once;
use super::ex_table::ExTable;
use crate::{
@ -34,7 +35,6 @@ use crate::{
page_prop::{CachePolicy, PageProperty},
PageFlags, PrivilegedPageFlags as PrivFlags, MAX_USERSPACE_VADDR, PAGE_SIZE,
},
task::Task,
trap::call_irq_callback_functions,
};
@ -277,20 +277,31 @@ extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
}
}
#[expect(clippy::type_complexity)]
static USER_PAGE_FAULT_HANDLER: Once<fn(&CpuExceptionInfo) -> core::result::Result<(), ()>> =
Once::new();
/// Injects a custom handler for page faults that occur in the kernel and
/// are caused by user-space address.
pub fn inject_user_page_fault_handler(
handler: fn(info: &CpuExceptionInfo) -> core::result::Result<(), ()>,
) {
USER_PAGE_FAULT_HANDLER.call_once(|| handler);
}
/// Handles page fault from user space.
fn handle_user_page_fault(f: &mut TrapFrame, page_fault_addr: u64) {
let current_task = Task::current().unwrap();
let user_space = current_task
.user_space()
.expect("the user space is missing when a page fault from the user happens.");
let info = CpuExceptionInfo {
page_fault_addr: page_fault_addr as usize,
id: f.trap_num,
error_code: f.error_code,
};
let res = user_space.vm_space().handle_page_fault(&info);
let handler = USER_PAGE_FAULT_HANDLER
.get()
.expect("a page fault handler is missing");
let res = handler(&info);
// Copying bytes by bytes can recover directly
// if handling the page fault successfully.
if res.is_ok() {

View File

@ -15,7 +15,7 @@ use crate::{
arch::mm::{
current_page_table_paddr, tlb_flush_all_excluding_global, PageTableEntry, PagingConsts,
},
cpu::{AtomicCpuSet, CpuExceptionInfo, CpuSet, PinCurrentCpu},
cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
cpu_local_cell,
mm::{
io::Fallible,
@ -30,13 +30,31 @@ use crate::{
Error,
};
/// Virtual memory space.
/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
///
/// A virtual memory space (`VmSpace`) can be created and assigned to a user
/// space so that the virtual memory of the user space can be manipulated
/// safely. For example, given an arbitrary user-space pointer, one can read
/// and write the memory location referred to by the user-space pointer without
/// the risk of breaking the memory safety of the kernel space.
/// The `VmSpace` type provides memory isolation guarantees between user-space and
/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
/// write the memory location referred to by the user-space pointer without the risk of
/// breaking the memory safety of the kernel space.
///
/// # Task Association Semantics
///
/// As far as OSTD is concerned, a `VmSpace` is not necessarily associated with a task. Once a
/// `VmSpace` is activated (see [`VmSpace::activate`]), it remains activated until another
/// `VmSpace` is activated **possibly by another task running on the same CPU**.
///
/// This means that it's up to the kernel to ensure that a task's `VmSpace` is always activated
/// while the task is running. This can be done by using the injected post schedule handler
/// (see [`inject_post_schedule_handler`]) to always activate the correct `VmSpace` after each
/// context switch.
///
/// If the kernel otherwise decides not to ensure that the running task's `VmSpace` is always
/// activated, the kernel must deal with race conditions when calling methods that require the
/// `VmSpace` to be activated, e.g., [`UserMode::execute`], [`VmSpace::reader`],
/// [`VmSpace::writer`]. Otherwise, the behavior is unspecified, though it's guaranteed _not_ to
/// compromise the kernel's memory safety.
///
/// # Memory Backing
///
/// A newly-created `VmSpace` is not backed by any physical memory pages. To
/// provide memory pages for a `VmSpace`, one can allocate and map physical
@ -44,11 +62,12 @@ use crate::{
///
/// A `VmSpace` can also attach a page fault handler, which will be invoked to
/// handle page faults generated from user space.
#[expect(clippy::type_complexity)]
///
/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
/// [`UserMode::execute`]: crate::user::UserMode::execute
#[derive(Debug)]
pub struct VmSpace {
pt: PageTable<UserMode>,
page_fault_handler: Option<fn(&VmSpace, &CpuExceptionInfo) -> core::result::Result<(), ()>>,
/// A CPU can only activate a `VmSpace` when no mutable cursors are alive.
/// Cursors hold read locks and activation require a write lock.
activation_lock: RwLock<()>,
@ -60,7 +79,6 @@ impl VmSpace {
pub fn new() -> Self {
Self {
pt: KERNEL_PAGE_TABLE.get().unwrap().create_user_page_table(),
page_fault_handler: None,
activation_lock: RwLock::new(()),
cpus: AtomicCpuSet::new(CpuSet::new_empty()),
}
@ -130,7 +148,7 @@ impl VmSpace {
}
/// Activates the page table on the current CPU.
pub(crate) fn activate(self: &Arc<Self>) {
pub fn activate(self: &Arc<Self>) {
let preempt_guard = disable_preempt();
let cpu = preempt_guard.current_cpu();
@ -159,28 +177,13 @@ impl VmSpace {
self.pt.activate();
}
pub(crate) fn handle_page_fault(
&self,
info: &CpuExceptionInfo,
) -> core::result::Result<(), ()> {
if let Some(func) = self.page_fault_handler {
return func(self, info);
}
Err(())
}
/// Registers the page fault handler in this `VmSpace`.
pub fn register_page_fault_handler(
&mut self,
func: fn(&VmSpace, &CpuExceptionInfo) -> core::result::Result<(), ()>,
) {
self.page_fault_handler = Some(func);
}
/// Creates a reader to read data from the user space of the current task.
///
/// Returns `Err` if this `VmSpace` is not belonged to the user space of the current task
/// or the `vaddr` and `len` do not represent a user space memory range.
///
/// Users must ensure that no other page table is activated in the current task during the
/// lifetime of the created `VmReader`. This guarantees that the `VmReader` can operate correctly.
pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'_, Fallible>> {
if current_page_table_paddr() != unsafe { self.pt.root_paddr() } {
return Err(Error::AccessDenied);
@ -190,10 +193,6 @@ impl VmSpace {
return Err(Error::AccessDenied);
}
// `VmReader` is neither `Sync` nor `Send`, so it will not live longer than the current
// task. This ensures that the correct page table is activated during the usage period of
// the `VmReader`.
//
// SAFETY: The memory range is in user space, as checked above.
Ok(unsafe { VmReader::<Fallible>::from_user_space(vaddr as *const u8, len) })
}
@ -202,6 +201,9 @@ impl VmSpace {
///
/// Returns `Err` if this `VmSpace` is not belonged to the user space of the current task
/// or the `vaddr` and `len` do not represent a user space memory range.
///
/// Users must ensure that no other page table is activated in the current task during the
/// lifetime of the created `VmWriter`. This guarantees that the `VmWriter` can operate correctly.
pub fn writer(&self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'_, Fallible>> {
if current_page_table_paddr() != unsafe { self.pt.root_paddr() } {
return Err(Error::AccessDenied);

View File

@ -19,6 +19,7 @@ use core::{
use kernel_stack::KernelStack;
use processor::current_task;
use spin::Once;
use utils::ForceSync;
pub use self::{
@ -26,7 +27,14 @@ pub use self::{
scheduler::info::{AtomicCpuId, TaskScheduleInfo},
};
pub(crate) use crate::arch::task::{context_switch, TaskContext};
use crate::{prelude::*, trap::in_interrupt_context, user::UserSpace};
use crate::{cpu::UserContext, prelude::*, trap::in_interrupt_context};
static POST_SCHEDULE_HANDLER: Once<fn()> = Once::new();
/// Injects a handler to be executed after scheduling.
pub fn inject_post_schedule_handler(handler: fn()) {
POST_SCHEDULE_HANDLER.call_once(|| handler);
}
/// A task that executes a function to the end.
///
@ -41,7 +49,7 @@ pub struct Task {
data: Box<dyn Any + Send + Sync>,
local_data: ForceSync<Box<dyn Any + Send>>,
user_space: Option<Arc<UserSpace>>,
user_ctx: Option<Arc<UserContext>>,
ctx: SyncUnsafeCell<TaskContext>,
/// kernel stack, note that the top is SyscallFrame/TrapFrame
#[expect(dead_code)]
@ -108,10 +116,10 @@ impl Task {
&self.schedule_info
}
/// Returns the user space of this task, if it has.
pub fn user_space(&self) -> Option<&Arc<UserSpace>> {
if self.user_space.is_some() {
Some(self.user_space.as_ref().unwrap())
/// Returns the user context of this task, if it has.
pub fn user_ctx(&self) -> Option<&Arc<UserContext>> {
if self.user_ctx.is_some() {
Some(self.user_ctx.as_ref().unwrap())
} else {
None
}
@ -119,20 +127,20 @@ impl Task {
/// Saves the FPU state for user task.
pub fn save_fpu_state(&self) {
let Some(user_space) = self.user_space.as_ref() else {
let Some(user_ctx) = self.user_ctx.as_ref() else {
return;
};
user_space.fpu_state().save();
user_ctx.fpu_state().save();
}
/// Restores the FPU state for user task.
pub fn restore_fpu_state(&self) {
let Some(user_space) = self.user_space.as_ref() else {
let Some(user_ctx) = self.user_ctx.as_ref() else {
return;
};
user_space.fpu_state().restore();
user_ctx.fpu_state().restore();
}
}
@ -141,7 +149,7 @@ pub struct TaskOptions {
func: Option<Box<dyn FnOnce() + Send>>,
data: Option<Box<dyn Any + Send + Sync>>,
local_data: Option<Box<dyn Any + Send>>,
user_space: Option<Arc<UserSpace>>,
user_ctx: Option<Arc<UserContext>>,
}
impl TaskOptions {
@ -154,7 +162,7 @@ impl TaskOptions {
func: Some(Box::new(func)),
data: None,
local_data: None,
user_space: None,
user_ctx: None,
}
}
@ -185,9 +193,9 @@ impl TaskOptions {
self
}
/// Sets the user space associated with the task.
pub fn user_space(mut self, user_space: Option<Arc<UserSpace>>) -> Self {
self.user_space = user_space;
/// Sets the user context associated with the task.
pub fn user_ctx(mut self, user_ctx: Option<Arc<UserContext>>) -> Self {
self.user_ctx = user_ctx;
self
}
@ -224,8 +232,8 @@ impl TaskOptions {
let kstack = KernelStack::new_with_guard_page()?;
let mut ctx = SyncUnsafeCell::new(TaskContext::default());
if let Some(user_space) = self.user_space.as_ref() {
ctx.get_mut().set_tls_pointer(user_space.tls_pointer());
if let Some(user_ctx) = self.user_ctx.as_ref() {
ctx.get_mut().set_tls_pointer(user_ctx.tls_pointer());
};
ctx.get_mut()
.set_instruction_pointer(kernel_task_entry as usize);
@ -243,7 +251,7 @@ impl TaskOptions {
func: ForceSync::new(Cell::new(self.func)),
data: self.data.unwrap_or_else(|| Box::new(())),
local_data: ForceSync::new(self.local_data.unwrap_or_else(|| Box::new(()))),
user_space: self.user_space,
user_ctx: self.user_ctx,
ctx,
kstack,
schedule_info: TaskScheduleInfo {

View File

@ -3,7 +3,7 @@
use alloc::sync::Arc;
use core::ptr::NonNull;
use super::{context_switch, Task, TaskContext};
use super::{context_switch, Task, TaskContext, POST_SCHEDULE_HANDLER};
use crate::cpu_local_cell;
cpu_local_cell! {
@ -59,9 +59,6 @@ pub(super) fn switch_to_task(next_task: Arc<Task>) {
};
let next_task_ctx_ptr = next_task.ctx().get().cast_const();
if let Some(next_user_space) = next_task.user_space() {
next_user_space.vm_space().activate();
}
// Change the current task to the next task.
//
@ -71,6 +68,11 @@ pub(super) fn switch_to_task(next_task: Arc<Task>) {
let old_prev = PREVIOUS_TASK_PTR.load();
PREVIOUS_TASK_PTR.store(current_task_ptr);
CURRENT_TASK_PTR.store(Arc::into_raw(next_task));
if let Some(handler) = POST_SCHEDULE_HANDLER.get() {
handler();
}
// Drop the old-previously running task.
if !old_prev.is_null() {
// SAFETY: The pointer is set by `switch_to_task` and is guaranteed to be

View File

@ -1,71 +1,8 @@
// SPDX-License-Identifier: MPL-2.0
#![expect(dead_code)]
//! User mode.
//! User space.
use crate::{
cpu::{FpuState, UserContext},
mm::VmSpace,
prelude::*,
trap::TrapFrame,
};
/// A user space.
///
/// Each user space has a VM address space and allows a task to execute in
/// user mode.
#[derive(Debug)]
pub struct UserSpace {
/// vm space
vm_space: Arc<VmSpace>,
/// cpu context before entering user space
init_ctx: UserContext,
}
impl UserSpace {
/// Creates a new instance.
///
/// Each instance maintains a VM address space and the CPU state to enable
/// execution in the user space.
pub fn new(vm_space: Arc<VmSpace>, init_ctx: UserContext) -> Self {
Self { vm_space, init_ctx }
}
/// Returns the VM address space.
pub fn vm_space(&self) -> &Arc<VmSpace> {
&self.vm_space
}
/// Returns the user mode that is bound to the current task and user space.
///
/// See [`UserMode`] on how to use it to execute user code.
///
/// # Panics
///
/// This method is intended to only allow each task to have at most one
/// instance of [`UserMode`] initiated. If this method is called again before
/// the first instance for the current task is dropped, then the method
/// panics.
pub fn user_mode(&self) -> UserMode<'_> {
todo!()
}
/// Sets thread-local storage pointer.
pub fn set_tls_pointer(&mut self, tls: usize) {
self.init_ctx.set_tls_pointer(tls)
}
/// Gets thread-local storage pointer.
pub fn tls_pointer(&self) -> usize {
self.init_ctx.tls_pointer()
}
/// Gets a reference to the FPU state.
pub fn fpu_state(&self) -> &FpuState {
self.init_ctx.fpu_state()
}
}
use crate::{cpu::UserContext, trap::TrapFrame};
/// Specific architectures need to implement this trait. This should only used in [`UserMode`]
///
@ -112,32 +49,30 @@ pub trait UserContextApi {
/// use ostd::task::Task;
///
/// let current = Task::current();
/// let user_space = current.user_space()
/// .expect("the current task is not associated with a user space");
/// let mut user_mode = user_space.user_mode();
/// let user_ctx = current.user_ctx()
/// .expect("the current task is not associated with a user context");
/// let mut user_mode = UserMode::new(UserContext::clone(user_ctx));
/// loop {
/// // Execute in the user space until some interesting events occur.
/// // Note: users should activate a suitable `VmSpace` before to support
/// // user-mode execution.
/// let return_reason = user_mode.execute(|| false);
/// todo!("handle the event, e.g., syscall");
/// }
/// ```
pub struct UserMode<'a> {
user_space: &'a Arc<UserSpace>,
pub struct UserMode {
context: UserContext,
}
// An instance of `UserMode` is bound to the current task. So it must not be sent to other tasks.
impl !Send for UserMode<'_> {}
impl !Send for UserMode {}
// Note that implementing `!Sync` is unnecessary
// because entering the user space via `UserMode` requires taking a mutable reference.
impl<'a> UserMode<'a> {
impl UserMode {
/// Creates a new `UserMode`.
pub fn new(user_space: &'a Arc<UserSpace>) -> Self {
Self {
user_space,
context: user_space.init_ctx.clone(),
}
pub fn new(context: UserContext) -> Self {
Self { context }
}
/// Starts executing in the user mode. Make sure current task is the task in `UserMode`.