Require T: Send for CpuLocal<T, S>

This commit is contained in:
Wang Siyuan
2025-06-14 12:12:18 +00:00
committed by Tate, Hongliang Tian
parent 627dd0386b
commit d5b12532a8
3 changed files with 19 additions and 16 deletions

View File

@ -8,7 +8,7 @@ use core::{
};
use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListAtomicLink};
use ostd::{cpu::local::StaticCpuLocal, cpu_local, trap};
use ostd::{cpu::local::StaticCpuLocal, cpu_local, sync::SpinLock, trap};
use super::{
softirq_id::{TASKLESS_SOFTIRQ_ID, TASKLESS_URGENT_SOFTIRQ_ID},
@ -58,7 +58,7 @@ pub struct Taskless {
/// Whether the taskless job is running.
is_running: AtomicBool,
/// The function that will be called when executing this taskless job.
callback: Box<RefCell<dyn FnMut() + Send + Sync + 'static>>,
callback: Box<SpinLock<dyn FnMut() + Send + Sync + 'static>>,
/// Whether this `Taskless` is disabled.
is_disabled: AtomicBool,
link: LinkedListAtomicLink,
@ -77,14 +77,10 @@ impl Taskless {
where
F: FnMut() + Send + Sync + 'static,
{
// Since the same taskless will not be executed concurrently,
// it is safe to use a `RefCell` here though the `Taskless` will
// be put into an `Arc`.
#[expect(clippy::arc_with_non_send_sync)]
Arc::new(Self {
is_scheduled: AtomicBool::new(false),
is_running: AtomicBool::new(false),
callback: Box::new(RefCell::new(callback)),
callback: Box::new(SpinLock::new(callback)),
is_disabled: AtomicBool::new(false),
link: LinkedListAtomicLink::new(),
})
@ -185,9 +181,7 @@ fn taskless_softirq_handler(
taskless.is_scheduled.store(false, Ordering::Release);
// The same taskless will not be executing concurrently, so it is safe to
// do `borrow_mut` here.
(taskless.callback.borrow_mut())();
(taskless.callback.lock())();
taskless.is_running.store(false, Ordering::Release);
}
}

View File

@ -162,12 +162,14 @@ impl<'a, T: 'static, S: AnyStorage<T>> Deref for CpuLocalDerefGuard<'a, T, S> {
}
}
// SAFETY: At any given time, only one task can access the inner value `T` of a
// CPU-local variable if `T` is not `Sync`. We guarantee it by disabling the
// reference to the inner value, or turning off preemptions when creating
// the reference.
unsafe impl<T: 'static, S: AnyStorage<T>> Sync for CpuLocal<T, S> {}
unsafe impl<T: 'static> Send for CpuLocal<T, DynamicStorage<T>> {}
// SAFETY: Although multiple tasks may access the inner value `T` of a CPU-local
// variable at different times, only one task can access it at any given moment.
// We guarantee it by disabling the reference to the inner value, or turning off
// preemptions when creating the reference. Therefore, if `T` is `Send`, marking
// `CpuLocal<T, S>` with `Sync` and `Send` only safely transfer ownership of the
// entire `T` instance between tasks.
unsafe impl<T: Send + 'static, S: AnyStorage<T>> Sync for CpuLocal<T, S> {}
unsafe impl<T: Send + 'static> Send for CpuLocal<T, DynamicStorage<T>> {}
// Implement `!Copy` and `!Clone` for `CpuLocal` to ensure memory safety:
// - Prevent valid instances of `CpuLocal<T, StaticStorage<T>>` from being copied

View File

@ -16,6 +16,13 @@ pub struct SlabSlotList<const SLOT_SIZE: usize> {
head: Option<NonNull<u8>>,
}
// SAFETY: Any access or modification (i.e., push and pop operations) to the
// data pointed to by `head` requires a `&mut SlabSlotList`. Therefore, at any
// given time, only one task can access the inner `head`. Additionally, a
// `HeapSlot` will not be allocated again as long as it remains in the list.
unsafe impl<const SLOT_SIZE: usize> Sync for SlabSlotList<SLOT_SIZE> {}
unsafe impl<const SLOT_SIZE: usize> Send for SlabSlotList<SLOT_SIZE> {}
impl<const SLOT_SIZE: usize> Default for SlabSlotList<SLOT_SIZE> {
fn default() -> Self {
Self::new()