Update spin lock users to lock_irq_disabled()

This commit is contained in:
Chuandong Li
2023-07-02 18:31:53 +08:00
committed by Tate, Hongliang Tian
parent ba4121cd6a
commit 78de1af348
15 changed files with 69 additions and 59 deletions

View File

@ -51,7 +51,7 @@ pub(crate) fn init() {
}
pub fn register_serial_input_callback(f: impl Fn(u8) + Send + Sync + 'static) {
SERIAL_INPUT_CALLBACKS.lock().push(Arc::new(f));
SERIAL_INPUT_CALLBACKS.lock_irq_disabled().push(Arc::new(f));
}
pub(crate) fn callback_init() {
@ -67,7 +67,7 @@ where
CONSOLE_IRQ_CALLBACK
.get()
.unwrap()
.lock()
.lock_irq_disabled()
.on_active(callback);
}

View File

@ -35,7 +35,7 @@ impl RcuMonitor {
// on the current CPU. If GP is complete, take the callbacks of the current
// GP.
let callbacks = {
let mut state = self.state.lock();
let mut state = self.state.lock_irq_disabled();
if state.current_gp.is_complete() {
return;
}
@ -69,7 +69,7 @@ impl RcuMonitor {
where
F: FnOnce() -> () + Send + 'static,
{
let mut state = self.state.lock();
let mut state = self.state.lock_irq_disabled();
state.next_callbacks.push_back(Box::new(f));

View File

@ -52,8 +52,8 @@ impl<T> SpinLock<T> {
/// Acquire the spin lock without disabling local IRQs.
///
/// This method is twice as fast as the `lock_irq_disable` method.
/// So prefer using this method over the `lock_irq_disable` method
/// This method is twice as fast as the `lock_irq_disabled` method.
/// So prefer using this method over the `lock_irq_disabled` method
/// when IRQ handlers are allowed to get executed while
/// holding this lock. For example, if a lock is never used
/// in the interrupt context, then it is ok to use this method
@ -137,7 +137,7 @@ impl<'a, T: fmt::Debug> fmt::Debug for SpinLockIrqDisabledGuard<'a, T> {
impl<'a, T> !Send for SpinLockIrqDisabledGuard<'a, T> {}
// Safety. `SpinLockIrqDisabledGuard` can be shared between tasks/threads in same CPU.
// As `lock_irq_disable()` disables interrupts to prevent race conditions caused by interrupts.
// As `lock_irq_disabled()` disables interrupts to prevent race conditions caused by interrupts.
unsafe impl<T: Sync> Sync for SpinLockIrqDisabledGuard<'_, T> {}
pub struct SpinLockGuard<'a, T> {

View File

@ -53,14 +53,14 @@ impl WaitQueue {
/// Wake one waiter thread, if there is one.
pub fn wake_one(&self) {
if let Some(waiter) = self.waiters.lock().front() {
if let Some(waiter) = self.waiters.lock_irq_disabled().front() {
waiter.wake_up();
}
}
/// Wake all not-exclusive waiter threads and at most one exclusive waiter.
pub fn wake_all(&self) {
for waiter in self.waiters.lock().iter() {
for waiter in self.waiters.lock_irq_disabled().iter() {
waiter.wake_up();
if waiter.is_exclusive() {
break;
@ -72,15 +72,15 @@ impl WaitQueue {
// Otherwise, add to the front of waitqueue
fn enqueue(&self, waiter: &Arc<Waiter>) {
if waiter.is_exclusive() {
self.waiters.lock().push_back(waiter.clone())
self.waiters.lock_irq_disabled().push_back(waiter.clone())
} else {
self.waiters.lock().push_front(waiter.clone());
self.waiters.lock_irq_disabled().push_front(waiter.clone());
}
}
/// removes all waiters that have finished wait
fn finish_wait(&self) {
self.waiters.lock().retain(|waiter| !waiter.is_finished())
self.waiters.lock_irq_disabled().retain(|waiter| !waiter.is_finished())
}
}

View File

@ -32,20 +32,20 @@ impl<const ORDER: usize> LockedHeap<ORDER> {
/// Safety: The range [start, start + size) must be a valid memory region.
pub unsafe fn init(&self, start: *const u8, size: usize) {
self.0.lock().init(start as usize, size);
self.0.lock_irq_disabled().init(start as usize, size);
}
}
unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeap<ORDER> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.0
.lock()
.lock_irq_disabled()
.alloc(layout)
.map_or(0 as *mut u8, |allocation| allocation.as_ptr())
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
debug_assert!(ptr as usize != 0);
self.0.lock().dealloc(NonNull::new_unchecked(ptr), layout)
self.0.lock_irq_disabled().dealloc(NonNull::new_unchecked(ptr), layout)
}
}