diff --git a/kernel/comps/softirq/src/taskless.rs b/kernel/comps/softirq/src/taskless.rs index 36e4b1608..03a35e80f 100644 --- a/kernel/comps/softirq/src/taskless.rs +++ b/kernel/comps/softirq/src/taskless.rs @@ -8,7 +8,7 @@ use core::{ }; use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListAtomicLink}; -use ostd::{cpu::local::StaticCpuLocal, cpu_local, trap}; +use ostd::{cpu::local::StaticCpuLocal, cpu_local, sync::SpinLock, trap}; use super::{ softirq_id::{TASKLESS_SOFTIRQ_ID, TASKLESS_URGENT_SOFTIRQ_ID}, @@ -58,7 +58,7 @@ pub struct Taskless { /// Whether the taskless job is running. is_running: AtomicBool, /// The function that will be called when executing this taskless job. - callback: Box>, + callback: Box>, /// Whether this `Taskless` is disabled. is_disabled: AtomicBool, link: LinkedListAtomicLink, @@ -77,14 +77,10 @@ impl Taskless { where F: FnMut() + Send + Sync + 'static, { - // Since the same taskless will not be executed concurrently, - // it is safe to use a `RefCell` here though the `Taskless` will - // be put into an `Arc`. - #[expect(clippy::arc_with_non_send_sync)] Arc::new(Self { is_scheduled: AtomicBool::new(false), is_running: AtomicBool::new(false), - callback: Box::new(RefCell::new(callback)), + callback: Box::new(SpinLock::new(callback)), is_disabled: AtomicBool::new(false), link: LinkedListAtomicLink::new(), }) @@ -185,9 +181,7 @@ fn taskless_softirq_handler( taskless.is_scheduled.store(false, Ordering::Release); - // The same taskless will not be executing concurrently, so it is safe to - // do `borrow_mut` here. - (taskless.callback.borrow_mut())(); + (taskless.callback.lock())(); taskless.is_running.store(false, Ordering::Release); } } diff --git a/ostd/src/cpu/local/mod.rs b/ostd/src/cpu/local/mod.rs index 260848fcf..1a57007fd 100644 --- a/ostd/src/cpu/local/mod.rs +++ b/ostd/src/cpu/local/mod.rs @@ -162,12 +162,14 @@ impl<'a, T: 'static, S: AnyStorage> Deref for CpuLocalDerefGuard<'a, T, S> { } } -// SAFETY: At any given time, only one task can access the inner value `T` of a -// CPU-local variable if `T` is not `Sync`. We guarantee it by disabling the -// reference to the inner value, or turning off preemptions when creating -// the reference. -unsafe impl> Sync for CpuLocal {} -unsafe impl Send for CpuLocal> {} +// SAFETY: Although multiple tasks may access the inner value `T` of a CPU-local +// variable at different times, only one task can access it at any given moment. +// We guarantee it by disabling the reference to the inner value, or turning off +// preemptions when creating the reference. Therefore, if `T` is `Send`, marking +// `CpuLocal` with `Sync` and `Send` only safely transfer ownership of the +// entire `T` instance between tasks. +unsafe impl> Sync for CpuLocal {} +unsafe impl Send for CpuLocal> {} // Implement `!Copy` and `!Clone` for `CpuLocal` to ensure memory safety: // - Prevent valid instances of `CpuLocal>` from being copied diff --git a/ostd/src/mm/heap/slot_list.rs b/ostd/src/mm/heap/slot_list.rs index 22818af14..7aaf98972 100644 --- a/ostd/src/mm/heap/slot_list.rs +++ b/ostd/src/mm/heap/slot_list.rs @@ -16,6 +16,13 @@ pub struct SlabSlotList { head: Option>, } +// SAFETY: Any access or modification (i.e., push and pop operations) to the +// data pointed to by `head` requires a `&mut SlabSlotList`. Therefore, at any +// given time, only one task can access the inner `head`. Additionally, a +// `HeapSlot` will not be allocated again as long as it remains in the list. +unsafe impl Sync for SlabSlotList {} +unsafe impl Send for SlabSlotList {} + impl Default for SlabSlotList { fn default() -> Self { Self::new()