diff --git a/ostd/src/task/mod.rs b/ostd/src/task/mod.rs index d857697ec..a8b0293cb 100644 --- a/ostd/src/task/mod.rs +++ b/ostd/src/task/mod.rs @@ -210,7 +210,8 @@ impl TaskOptions { /// all task will entering this function /// this function is mean to executing the task_fn in Task extern "C" fn kernel_task_entry() -> ! { - // SAFETY: This is called only once when we are switched to a CPU. + // SAFETY: The new task is switched on a CPU for the first time, `after_switching_to` + // hasn't been called yet. unsafe { processor::after_switching_to() }; let current_task = Task::current() diff --git a/ostd/src/task/processor.rs b/ostd/src/task/processor.rs index 889cade8e..c74e066d6 100644 --- a/ostd/src/task/processor.rs +++ b/ostd/src/task/processor.rs @@ -49,20 +49,22 @@ pub(super) fn switch_to_task(next_task: Arc) { let current_task_ctx_ptr = if !current_task_ptr.is_null() { // SAFETY: The pointer is set by `switch_to_task` and is guaranteed to be // built with `Arc::into_raw`. It will only be dropped as a previous task, - // So reference will be valid across this function. + // so its reference will be valid until `after_switching_to`. let current_task = unsafe { &*current_task_ptr }; current_task.save_fpu_state(); - // Throughout this method, the task's context is alive and can be exclusively used. + // Until `after_switching_to`, the task's context is alive and can be exclusively used. current_task.ctx.get() } else { - // Throughout this method, interrupts are disabled and the context can be exclusively used. + // Until `after_switching_to`, IRQs are disabled and the context can be exclusively used. BOOTSTRAP_CONTEXT.as_mut_ptr() }; before_switching_to(&next_task, &irq_guard); + // `before_switching_to` guarantees that from now on, and while the next task is running on the + // CPU, its context can be used exclusively. let next_task_ctx_ptr = next_task.ctx().get().cast_const(); CURRENT_TASK_PTR.store(Arc::into_raw(next_task)); @@ -73,16 +75,16 @@ pub(super) fn switch_to_task(next_task: Arc) { let _ = core::mem::ManuallyDrop::new(irq_guard); // SAFETY: - // 1. `ctx` is only used in `reschedule()`. We have exclusive access to both the current task - // context and the next task context. - // 2. The next task context is a valid task context. + // 1. We have exclusive access to both the current context and the next context (see above). + // 2. The next context is valid (because it is either correctly initialized or written by a + // previous `context_switch`). unsafe { // This function may not return, for example, when the current task exits. So make sure // that all variables on the stack can be forgotten without causing resource leakage. context_switch(current_task_ctx_ptr, next_task_ctx_ptr); } - // SAFETY: We would only call once after switching back to this task. + // SAFETY: The task is just switched back, `after_switching_to` hasn't been called yet. unsafe { after_switching_to() }; if let Some(current) = Task::current() { diff --git a/ostd/src/task/scheduler/mod.rs b/ostd/src/task/scheduler/mod.rs index a8e226224..e2faf8698 100644 --- a/ostd/src/task/scheduler/mod.rs +++ b/ostd/src/task/scheduler/mod.rs @@ -259,10 +259,12 @@ where }; }; - // FIXME: At this point, we need to prevent the current task from being scheduled on another - // CPU core. However, we currently have no way to ensure this. This is a soundness hole and - // should be fixed. See for details. - + // `switch_to_task` will spin if it finds that the next task is still running on some CPU core, + // which guarantees soundness regardless of the scheduler implementation. + // + // FIXME: The scheduler decision and context switching are not atomic, which can lead to some + // strange behavior even if the scheduler is implemented correctly. See "Problem 2" at + // for details. cpu_local::clear_need_preempt(); processor::switch_to_task(next_task); }