From a215cb54d96a80f67f57209720247cc880fd25b1 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Thu, 9 May 2024 01:16:54 +0800 Subject: [PATCH] Use `UnsafeCell` to store `UserContext` --- framework/aster-frame/src/task/processor.rs | 14 +++++----- framework/aster-frame/src/task/task.rs | 30 ++++++++++++--------- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/framework/aster-frame/src/task/processor.rs b/framework/aster-frame/src/task/processor.rs index 6d0391112..9781ed60e 100644 --- a/framework/aster-frame/src/task/processor.rs +++ b/framework/aster-frame/src/task/processor.rs @@ -97,12 +97,9 @@ fn switch_to_task(next_task: Arc) { let current_task_cx_ptr = match current_task() { None => PROCESSOR.lock().get_idle_task_cx_ptr(), Some(current_task) => { - let mut task = current_task.inner_exclusive_access(); + let cx_ptr = current_task.ctx().get(); - // FIXME: `task.ctx` should be put in a separate `UnsafeCell`, not as a part of - // `TaskInner`. Otherwise, it violates the sematics of `SpinLock` and Rust's memory - // model which requires that mutable references must be exclusive. - let cx_ptr = &mut task.ctx as *mut TaskContext; + let mut task = current_task.inner_exclusive_access(); debug_assert_ne!(task.task_status, TaskStatus::Sleeping); if task.task_status == TaskStatus::Runnable { @@ -116,10 +113,15 @@ fn switch_to_task(next_task: Arc) { } }; - let next_task_cx_ptr = &next_task.inner_ctx() as *const TaskContext; + let next_task_cx_ptr = next_task.ctx().get().cast_const(); // change the current task to the next task PROCESSOR.lock().current = Some(next_task.clone()); + + // SAFETY: + // 1. `ctx` is only used in `schedule()`. We have exclusive access to both the current task + // context and the next task context. + // 2. The next task context is a valid task context. unsafe { context_switch(current_task_cx_ptr, next_task_cx_ptr); } diff --git a/framework/aster-frame/src/task/task.rs b/framework/aster-frame/src/task/task.rs index ed75da7f8..37f21819c 100644 --- a/framework/aster-frame/src/task/task.rs +++ b/framework/aster-frame/src/task/task.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 +use core::cell::UnsafeCell; + use intrusive_collections::{intrusive_adapter, LinkedListAtomicLink}; use super::{ @@ -116,21 +118,24 @@ pub struct Task { data: Box, user_space: Option>, task_inner: SpinLock, - exit_code: usize, + ctx: UnsafeCell, /// kernel stack, note that the top is SyscallFrame/TrapFrame kstack: KernelStack, link: LinkedListAtomicLink, priority: Priority, - // TODO:: add multiprocessor support + // TODO: add multiprocessor support cpu_affinity: CpuSet, } // TaskAdapter struct is implemented for building relationships between doubly linked list and Task struct intrusive_adapter!(pub TaskAdapter = Arc: Task { link: LinkedListAtomicLink }); +// SAFETY: `UnsafeCell` is not `Sync`. However, we only use it in `schedule()` where +// we have exclusive access to the field. +unsafe impl Sync for Task {} + pub(crate) struct TaskInner { pub task_status: TaskStatus, - pub ctx: TaskContext, } impl Task { @@ -144,9 +149,8 @@ impl Task { self.task_inner.lock_irq_disabled() } - /// get inner - pub(crate) fn inner_ctx(&self) -> TaskContext { - self.task_inner.lock_irq_disabled().ctx + pub(super) fn ctx(&self) -> &UnsafeCell { + &self.ctx } /// Yields execution so that another task may be scheduled. @@ -273,32 +277,32 @@ impl TaskOptions { current_task.func.call(()); current_task.exit(); } - let result = Task { + + let mut result = Task { func: self.func.unwrap(), data: self.data.unwrap(), user_space: self.user_space, task_inner: SpinLock::new(TaskInner { task_status: TaskStatus::Runnable, - ctx: TaskContext::default(), }), - exit_code: 0, + ctx: UnsafeCell::new(TaskContext::default()), kstack: KernelStack::new_with_guard_page()?, link: LinkedListAtomicLink::new(), priority: self.priority, cpu_affinity: self.cpu_affinity, }; - result.task_inner.lock().task_status = TaskStatus::Runnable; - result.task_inner.lock().ctx.rip = kernel_task_entry as usize; + let ctx = result.ctx.get_mut(); + ctx.rip = kernel_task_entry as usize; // We should reserve space for the return address in the stack, otherwise // we will write across the page boundary due to the implementation of // the context switch. + // // According to the System V AMD64 ABI, the stack pointer should be aligned // to at least 16 bytes. And a larger alignment is needed if larger arguments // are passed to the function. The `kernel_task_entry` function does not // have any arguments, so we only need to align the stack pointer to 16 bytes. - result.task_inner.lock().ctx.regs.rsp = - (crate::vm::paddr_to_vaddr(result.kstack.end_paddr() - 16)) as u64; + ctx.regs.rsp = (crate::vm::paddr_to_vaddr(result.kstack.end_paddr() - 16)) as u64; Ok(Arc::new(result)) }