Add a runtime check for scheduler with spin locks

This commit is contained in:
Chuandong Li
2023-07-25 10:18:20 +08:00
committed by Tate, Hongliang Tian
parent 08f50ac085
commit 9cb759efa2
12 changed files with 170 additions and 177 deletions

View File

@ -87,7 +87,8 @@ impl Capability {
// read all cap_ptr so that it is easy for us to get the length. // read all cap_ptr so that it is easy for us to get the length.
while cap_ptr > 0 { while cap_ptr > 0 {
cap_ptr_vec.push(cap_ptr); cap_ptr_vec.push(cap_ptr);
cap_ptr = dev.location().read8(cap_ptr + 1) as u16 & PciDeviceLocation::BIT32_ALIGN_MASK; cap_ptr =
dev.location().read8(cap_ptr + 1) as u16 & PciDeviceLocation::BIT32_ALIGN_MASK;
} }
cap_ptr_vec.sort(); cap_ptr_vec.sort();
// Push here so that we can calculate the length of the last capability. // Push here so that we can calculate the length of the last capability.

View File

@ -23,8 +23,8 @@ pub mod bus;
pub mod config; pub mod config;
pub mod cpu; pub mod cpu;
mod error; mod error;
pub mod logger;
pub mod io_mem; pub mod io_mem;
pub mod logger;
pub mod prelude; pub mod prelude;
pub mod sync; pub mod sync;
pub mod task; pub mod task;

View File

@ -9,9 +9,6 @@ mod wait;
pub use self::atomic_bits::AtomicBits; pub use self::atomic_bits::AtomicBits;
pub use self::mutex::{Mutex, MutexGuard}; pub use self::mutex::{Mutex, MutexGuard};
pub use self::rcu::{pass_quiescent_state, OwnerPtr, Rcu, RcuReadGuard, RcuReclaimer}; pub use self::rcu::{pass_quiescent_state, OwnerPtr, Rcu, RcuReadGuard, RcuReclaimer};
pub use self::rwlock::{ pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
RwLock, RwLockReadGuard, RwLockReadIrqDisabledGuard, RwLockWriteGuard, pub use self::spin::{SpinLock, SpinLockGuard};
RwLockWriteIrqDisabledGuard,
};
pub use self::spin::{SpinLock, SpinLockGuard, SpinLockIrqDisabledGuard};
pub use self::wait::WaitQueue; pub use self::wait::WaitQueue;

View File

@ -4,6 +4,7 @@ use core::ops::{Deref, DerefMut};
use core::sync::atomic::AtomicUsize; use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::task::{disable_preempt, DisablePreemptGuard};
use crate::trap::disable_local; use crate::trap::disable_local;
use crate::trap::DisabledLocalIrqGuard; use crate::trap::DisabledLocalIrqGuard;
@ -37,7 +38,7 @@ impl<T> RwLock<T> {
/// This method runs in a busy loop until the lock can be acquired (when there are /// This method runs in a busy loop until the lock can be acquired (when there are
/// no writers). /// no writers).
/// After acquiring the spin lock, all interrupts are disabled. /// After acquiring the spin lock, all interrupts are disabled.
pub fn read_irq_disabled(&self) -> RwLockReadIrqDisabledGuard<T> { pub fn read_irq_disabled(&self) -> RwLockReadGuard<T> {
loop { loop {
if let Some(readguard) = self.try_read_irq_disabled() { if let Some(readguard) = self.try_read_irq_disabled() {
return readguard; return readguard;
@ -53,7 +54,7 @@ impl<T> RwLock<T> {
/// This method runs in a busy loop until the lock can be acquired (when there are /// This method runs in a busy loop until the lock can be acquired (when there are
/// no writers and readers). /// no writers and readers).
/// After acquiring the spin lock, all interrupts are disabled. /// After acquiring the spin lock, all interrupts are disabled.
pub fn write_irq_disabled(&self) -> RwLockWriteIrqDisabledGuard<T> { pub fn write_irq_disabled(&self) -> RwLockWriteGuard<T> {
loop { loop {
if let Some(writeguard) = self.try_write_irq_disabled() { if let Some(writeguard) = self.try_write_irq_disabled() {
return writeguard; return writeguard;
@ -64,14 +65,13 @@ impl<T> RwLock<T> {
} }
/// Try acquire a read lock with disabling local IRQs. /// Try acquire a read lock with disabling local IRQs.
pub fn try_read_irq_disabled(&self) -> Option<RwLockReadIrqDisabledGuard<T>> { pub fn try_read_irq_disabled(&self) -> Option<RwLockReadGuard<T>> {
// FIXME: add disable_preemption
let irq_guard = disable_local(); let irq_guard = disable_local();
let lock = self.lock.fetch_add(READER, Acquire); let lock = self.lock.fetch_add(READER, Acquire);
if lock & (WRITER | MAX_READER) == 0 { if lock & (WRITER | MAX_READER) == 0 {
Some(RwLockReadIrqDisabledGuard { Some(RwLockReadGuard {
inner: &self, inner: &self,
irq_guard, inner_guard: InnerGuard::IrqGuard(irq_guard),
}) })
} else { } else {
self.lock.fetch_sub(READER, Release); self.lock.fetch_sub(READER, Release);
@ -80,17 +80,16 @@ impl<T> RwLock<T> {
} }
/// Try acquire a write lock with disabling local IRQs. /// Try acquire a write lock with disabling local IRQs.
pub fn try_write_irq_disabled(&self) -> Option<RwLockWriteIrqDisabledGuard<T>> { pub fn try_write_irq_disabled(&self) -> Option<RwLockWriteGuard<T>> {
// FIXME: add disable_preemption
let irq_guard = disable_local(); let irq_guard = disable_local();
if self if self
.lock .lock
.compare_exchange(0, WRITER, Acquire, Relaxed) .compare_exchange(0, WRITER, Acquire, Relaxed)
.is_ok() .is_ok()
{ {
Some(RwLockWriteIrqDisabledGuard { Some(RwLockWriteGuard {
inner: &self, inner: &self,
irq_guard, inner_guard: InnerGuard::IrqGuard(irq_guard),
}) })
} else { } else {
None None
@ -127,10 +126,13 @@ impl<T> RwLock<T> {
/// Try acquire a read lock without disabling the local IRQs. /// Try acquire a read lock without disabling the local IRQs.
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> { pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
// FIXME: add disable_preemption let guard = disable_preempt();
let lock = self.lock.fetch_add(READER, Acquire); let lock = self.lock.fetch_add(READER, Acquire);
if lock & (WRITER | MAX_READER) == 0 { if lock & (WRITER | MAX_READER) == 0 {
Some(RwLockReadGuard { inner: &self }) Some(RwLockReadGuard {
inner: &self,
inner_guard: InnerGuard::PreemptGuard(guard),
})
} else { } else {
self.lock.fetch_sub(READER, Release); self.lock.fetch_sub(READER, Release);
None None
@ -139,13 +141,16 @@ impl<T> RwLock<T> {
/// Try acquire a write lock without disabling the local IRQs. /// Try acquire a write lock without disabling the local IRQs.
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> { pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
// FIXME: add disable_preemption let guard = disable_preempt();
if self if self
.lock .lock
.compare_exchange(0, WRITER, Acquire, Relaxed) .compare_exchange(0, WRITER, Acquire, Relaxed)
.is_ok() .is_ok()
{ {
Some(RwLockWriteGuard { inner: &self }) Some(RwLockWriteGuard {
inner: &self,
inner_guard: InnerGuard::PreemptGuard(guard),
})
} else { } else {
None None
} }
@ -163,103 +168,21 @@ impl<T: fmt::Debug> fmt::Debug for RwLock<T> {
unsafe impl<T: Send> Send for RwLock<T> {} unsafe impl<T: Send> Send for RwLock<T> {}
unsafe impl<T: Send + Sync> Sync for RwLock<T> {} unsafe impl<T: Send + Sync> Sync for RwLock<T> {}
impl<'a, T> !Send for RwLockWriteIrqDisabledGuard<'a, T> {}
unsafe impl<T: Sync> Sync for RwLockWriteIrqDisabledGuard<'_, T> {}
impl<'a, T> !Send for RwLockReadIrqDisabledGuard<'a, T> {}
unsafe impl<T: Sync> Sync for RwLockReadIrqDisabledGuard<'_, T> {}
/// The guard of a read lock that disables the local IRQs.
pub struct RwLockReadIrqDisabledGuard<'a, T> {
inner: &'a RwLock<T>,
irq_guard: DisabledLocalIrqGuard,
}
/// Upgrade a read lock that disables the local IRQs to a write lock.
///
/// This method first release the old read lock and then aquire a new write lock.
/// So it may not return the guard immidiately
/// due to other readers or another writer.
impl<'a, T> RwLockReadIrqDisabledGuard<'a, T> {
pub fn upgrade(mut self) -> RwLockWriteIrqDisabledGuard<'a, T> {
let inner = self.inner;
let irq_guard = self.irq_guard.transfer_to();
drop(self);
while inner
.lock
.compare_exchange(0, WRITER, Acquire, Relaxed)
.is_err()
{
core::hint::spin_loop();
}
RwLockWriteIrqDisabledGuard { inner, irq_guard }
}
}
impl<'a, T> Deref for RwLockReadIrqDisabledGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.inner.val.get() }
}
}
impl<'a, T> Drop for RwLockReadIrqDisabledGuard<'a, T> {
fn drop(&mut self) {
self.inner.lock.fetch_sub(READER, Release);
}
}
/// The guard of a write lock that disables the local IRQs.
pub struct RwLockWriteIrqDisabledGuard<'a, T> {
inner: &'a RwLock<T>,
irq_guard: DisabledLocalIrqGuard,
}
/// Downgrade a write lock that disables the local IRQs to a read lock.
///
/// This method can return the read guard immidiately
/// due to there must be no other users.
impl<'a, T> RwLockWriteIrqDisabledGuard<'a, T> {
pub fn downgrade(mut self) -> RwLockReadIrqDisabledGuard<'a, T> {
self.inner.lock.fetch_add(READER, Acquire);
let inner = self.inner;
let irq_guard = self.irq_guard.transfer_to();
drop(self);
let irq_guard = disable_local();
RwLockReadIrqDisabledGuard { inner, irq_guard }
}
}
impl<'a, T> Deref for RwLockWriteIrqDisabledGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.inner.val.get() }
}
}
impl<'a, T> DerefMut for RwLockWriteIrqDisabledGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.inner.val.get() }
}
}
impl<'a, T> Drop for RwLockWriteIrqDisabledGuard<'a, T> {
fn drop(&mut self) {
self.inner.lock.fetch_and(!(WRITER), Release);
}
}
impl<'a, T> !Send for RwLockWriteGuard<'a, T> {} impl<'a, T> !Send for RwLockWriteGuard<'a, T> {}
unsafe impl<T: Sync> Sync for RwLockWriteGuard<'_, T> {} unsafe impl<T: Sync> Sync for RwLockWriteGuard<'_, T> {}
impl<'a, T> !Send for RwLockReadGuard<'a, T> {} impl<'a, T> !Send for RwLockReadGuard<'a, T> {}
unsafe impl<T: Sync> Sync for RwLockReadGuard<'_, T> {} unsafe impl<T: Sync> Sync for RwLockReadGuard<'_, T> {}
enum InnerGuard {
IrqGuard(DisabledLocalIrqGuard),
PreemptGuard(DisablePreemptGuard),
}
/// The guard of the read lock. /// The guard of the read lock.
pub struct RwLockReadGuard<'a, T> { pub struct RwLockReadGuard<'a, T> {
inner: &'a RwLock<T>, inner: &'a RwLock<T>,
inner_guard: InnerGuard,
} }
/// Upgrade a read lock to a write lock. /// Upgrade a read lock to a write lock.
@ -268,8 +191,14 @@ pub struct RwLockReadGuard<'a, T> {
/// So it may not return the write guard immidiately /// So it may not return the write guard immidiately
/// due to other readers or another writer. /// due to other readers or another writer.
impl<'a, T> RwLockReadGuard<'a, T> { impl<'a, T> RwLockReadGuard<'a, T> {
pub fn upgrade(self) -> RwLockWriteGuard<'a, T> { pub fn upgrade(mut self) -> RwLockWriteGuard<'a, T> {
let inner = self.inner; let inner = self.inner;
let inner_guard = match &mut self.inner_guard {
InnerGuard::IrqGuard(irq_guard) => InnerGuard::IrqGuard(irq_guard.transfer_to()),
InnerGuard::PreemptGuard(preempt_guard) => {
InnerGuard::PreemptGuard(preempt_guard.transfer_to())
}
};
drop(self); drop(self);
while inner while inner
.lock .lock
@ -278,7 +207,7 @@ impl<'a, T> RwLockReadGuard<'a, T> {
{ {
core::hint::spin_loop(); core::hint::spin_loop();
} }
RwLockWriteGuard { inner } RwLockWriteGuard { inner, inner_guard }
} }
} }
@ -298,6 +227,7 @@ impl<'a, T> Drop for RwLockReadGuard<'a, T> {
pub struct RwLockWriteGuard<'a, T> { pub struct RwLockWriteGuard<'a, T> {
inner: &'a RwLock<T>, inner: &'a RwLock<T>,
inner_guard: InnerGuard,
} }
/// Downgrade a write lock to a read lock. /// Downgrade a write lock to a read lock.
@ -305,11 +235,15 @@ pub struct RwLockWriteGuard<'a, T> {
/// This method can return the read guard immidiately /// This method can return the read guard immidiately
/// due to there are no other users. /// due to there are no other users.
impl<'a, T> RwLockWriteGuard<'a, T> { impl<'a, T> RwLockWriteGuard<'a, T> {
pub fn downgrade(self) -> RwLockReadGuard<'a, T> { pub fn downgrade(mut self) -> RwLockReadGuard<'a, T> {
self.inner.lock.fetch_add(READER, Acquire); self.inner.lock.fetch_add(READER, Acquire);
let inner = self.inner; let inner = self.inner;
let inner_guard = match &mut self.inner_guard {
InnerGuard::IrqGuard(irq_guard) => InnerGuard::IrqGuard(irq_guard.transfer_to()),
InnerGuard::PreemptGuard(preempt_guard) => InnerGuard::PreemptGuard(disable_preempt()),
};
drop(self); drop(self);
RwLockReadGuard { inner } RwLockReadGuard { inner, inner_guard }
} }
} }

View File

@ -3,6 +3,7 @@ use core::fmt;
use core::ops::{Deref, DerefMut}; use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicBool, Ordering}; use core::sync::atomic::{AtomicBool, Ordering};
use crate::task::{disable_preempt, DisablePreemptGuard};
use crate::trap::disable_local; use crate::trap::disable_local;
use crate::trap::DisabledLocalIrqGuard; use crate::trap::DisabledLocalIrqGuard;
@ -26,24 +27,22 @@ impl<T> SpinLock<T> {
/// ///
/// This method runs in a busy loop until the lock can be acquired. /// This method runs in a busy loop until the lock can be acquired.
/// After acquiring the spin lock, all interrupts are disabled. /// After acquiring the spin lock, all interrupts are disabled.
pub fn lock_irq_disabled(&self) -> SpinLockIrqDisabledGuard<T> { pub fn lock_irq_disabled(&self) -> SpinLockGuard<T> {
// FIXME: add disable_preemption
let guard = disable_local(); let guard = disable_local();
self.acquire_lock(); self.acquire_lock();
SpinLockIrqDisabledGuard { SpinLockGuard {
lock: &self, lock: &self,
irq_guard: guard, inner_guard: InnerGuard::IrqGuard(guard),
} }
} }
/// Try acquiring the spin lock immedidately with disabling the local IRQs. /// Try acquiring the spin lock immedidately with disabling the local IRQs.
pub fn try_lock_irq_disabled(&self) -> Option<SpinLockIrqDisabledGuard<T>> { pub fn try_lock_irq_disabled(&self) -> Option<SpinLockGuard<T>> {
// FIXME: add disable_preemption
let irq_guard = disable_local(); let irq_guard = disable_local();
if self.try_acquire_lock() { if self.try_acquire_lock() {
let lock_guard = SpinLockIrqDisabledGuard { let lock_guard = SpinLockGuard {
lock: &self, lock: &self,
irq_guard, inner_guard: InnerGuard::IrqGuard(irq_guard),
}; };
return Some(lock_guard); return Some(lock_guard);
} }
@ -59,16 +58,22 @@ impl<T> SpinLock<T> {
/// in the interrupt context, then it is ok to use this method /// in the interrupt context, then it is ok to use this method
/// in the process context. /// in the process context.
pub fn lock(&self) -> SpinLockGuard<T> { pub fn lock(&self) -> SpinLockGuard<T> {
// FIXME: add disable_preemption let guard = disable_preempt();
self.acquire_lock(); self.acquire_lock();
SpinLockGuard { lock: &self } SpinLockGuard {
lock: &self,
inner_guard: InnerGuard::PreemptGuard(guard),
}
} }
/// Try acquiring the spin lock immedidately without disabling the local IRQs. /// Try acquiring the spin lock immedidately without disabling the local IRQs.
pub fn try_lock(&self) -> Option<SpinLockGuard<T>> { pub fn try_lock(&self) -> Option<SpinLockGuard<T>> {
// FIXME: add disable_preemption let guard = disable_preempt();
if self.try_acquire_lock() { if self.try_acquire_lock() {
let lock_guard = SpinLockGuard { lock: &self }; let lock_guard = SpinLockGuard {
lock: &self,
inner_guard: InnerGuard::PreemptGuard(guard),
};
return Some(lock_guard); return Some(lock_guard);
} }
return None; return None;
@ -102,46 +107,15 @@ impl<T: fmt::Debug> fmt::Debug for SpinLock<T> {
unsafe impl<T: Send> Send for SpinLock<T> {} unsafe impl<T: Send> Send for SpinLock<T> {}
unsafe impl<T: Send> Sync for SpinLock<T> {} unsafe impl<T: Send> Sync for SpinLock<T> {}
enum InnerGuard {
IrqGuard(DisabledLocalIrqGuard),
PreemptGuard(DisablePreemptGuard),
}
/// The guard of a spin lock that disables the local IRQs. /// The guard of a spin lock that disables the local IRQs.
pub struct SpinLockIrqDisabledGuard<'a, T> {
lock: &'a SpinLock<T>,
irq_guard: DisabledLocalIrqGuard,
}
impl<'a, T> Deref for SpinLockIrqDisabledGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &mut *self.lock.val.get() }
}
}
impl<'a, T> DerefMut for SpinLockIrqDisabledGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.lock.val.get() }
}
}
impl<'a, T> Drop for SpinLockIrqDisabledGuard<'a, T> {
fn drop(&mut self) {
self.lock.release_lock();
}
}
impl<'a, T: fmt::Debug> fmt::Debug for SpinLockIrqDisabledGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T> !Send for SpinLockIrqDisabledGuard<'a, T> {}
// Safety. `SpinLockIrqDisabledGuard` can be shared between tasks/threads in same CPU.
// As `lock_irq_disabled()` disables interrupts to prevent race conditions caused by interrupts.
unsafe impl<T: Sync> Sync for SpinLockIrqDisabledGuard<'_, T> {}
pub struct SpinLockGuard<'a, T> { pub struct SpinLockGuard<'a, T> {
lock: &'a SpinLock<T>, lock: &'a SpinLock<T>,
inner_guard: InnerGuard,
} }
impl<'a, T> Deref for SpinLockGuard<'a, T> { impl<'a, T> Deref for SpinLockGuard<'a, T> {

View File

@ -5,6 +5,6 @@ mod scheduler;
#[allow(clippy::module_inception)] #[allow(clippy::module_inception)]
mod task; mod task;
pub use self::processor::{current_task, schedule}; pub use self::processor::{current_task, disable_preempt, schedule, DisablePreemptGuard};
pub use self::scheduler::{add_task, set_scheduler, Scheduler}; pub use self::scheduler::{add_task, set_scheduler, Scheduler};
pub use self::task::{Task, TaskAdapter, TaskStatus}; pub use self::task::{Task, TaskAdapter, TaskStatus};

View File

@ -1,3 +1,10 @@
use core::sync::atomic::AtomicUsize;
use crate::cpu::CpuLocal;
use crate::cpu_local;
use core::sync::atomic::Ordering::Relaxed;
use super::{ use super::{
scheduler::{fetch_task, GLOBAL_SCHEDULER}, scheduler::{fetch_task, GLOBAL_SCHEDULER},
task::{context_switch, TaskContext}, task::{context_switch, TaskContext},
@ -5,6 +12,7 @@ use super::{
}; };
use alloc::sync::Arc; use alloc::sync::Arc;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use log::warn;
use spin::Mutex; use spin::Mutex;
pub struct Processor { pub struct Processor {
@ -64,6 +72,14 @@ pub fn schedule() {
/// ///
/// before context switch, current task will switch to the next task /// before context switch, current task will switch to the next task
pub fn switch_to_task(next_task: Arc<Task>) { pub fn switch_to_task(next_task: Arc<Task>) {
if !PREEMPT_COUNT.is_preemptive() {
panic!(
"Calling schedule() while holding {} locks",
PREEMPT_COUNT.num_locks()
);
//GLOBAL_SCHEDULER.lock_irq_disabled().enqueue(next_task);
//return;
}
let current_task_option = current_task(); let current_task_option = current_task();
let next_task_cx_ptr = &next_task.inner_ctx() as *const TaskContext; let next_task_cx_ptr = &next_task.inner_ctx() as *const TaskContext;
let current_task: Arc<Task>; let current_task: Arc<Task>;
@ -85,3 +101,70 @@ pub fn switch_to_task(next_task: Arc<Task>) {
context_switch(current_task_cx_ptr, next_task_cx_ptr); context_switch(current_task_cx_ptr, next_task_cx_ptr);
} }
} }
cpu_local! {
static PREEMPT_COUNT: PreemptInfo = PreemptInfo::new();
}
/// Currently, ``PreemptInfo`` only holds the number of spin
/// locks held by the current CPU. When it has a non-zero value,
/// the CPU cannot call ``schedule()``.
struct PreemptInfo {
num_locks: AtomicUsize,
}
impl PreemptInfo {
const fn new() -> Self {
Self {
num_locks: AtomicUsize::new(0),
}
}
fn incease_num_locks(&self) {
self.num_locks.fetch_add(1, Relaxed);
}
fn decrease_num_locks(&self) {
self.num_locks.fetch_sub(1, Relaxed);
}
fn is_preemptive(&self) -> bool {
self.num_locks.load(Relaxed) == 0
}
fn num_locks(&self) -> usize {
self.num_locks.load(Relaxed)
}
}
/// a guard for disable preempt.
pub struct DisablePreemptGuard {
// This private field prevents user from constructing values of this type directly.
private: (),
}
impl !Send for DisablePreemptGuard {}
impl DisablePreemptGuard {
fn new() -> Self {
PREEMPT_COUNT.incease_num_locks();
Self { private: () }
}
/// Transfer this guard to a new guard.
/// This guard must be dropped after this function.
pub fn transfer_to(&self) -> Self {
disable_preempt()
}
}
impl Drop for DisablePreemptGuard {
fn drop(&mut self) {
PREEMPT_COUNT.decrease_num_locks();
}
}
#[must_use]
pub fn disable_preempt() -> DisablePreemptGuard {
DisablePreemptGuard::new()
}

View File

@ -1,12 +1,10 @@
use crate::arch::irq; use crate::arch::irq;
use crate::arch::irq::{IRQ_LIST, NOT_USING_IRQ}; use crate::arch::irq::{IRQ_LIST, NOT_USING_IRQ};
use crate::cpu::CpuLocal; use crate::task::{disable_preempt, DisablePreemptGuard};
use crate::cpu_local;
use crate::util::recycle_allocator::RecycleAllocator; use crate::util::recycle_allocator::RecycleAllocator;
use crate::{prelude::*, Error}; use crate::{prelude::*, Error};
use core::fmt::Debug; use core::fmt::Debug;
use core::sync::atomic::{AtomicBool, AtomicU32, Ordering::Relaxed};
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
use trapframe::TrapFrame; use trapframe::TrapFrame;
@ -198,6 +196,7 @@ pub fn disable_local() -> DisabledLocalIrqGuard {
/// A guard for disabled local IRQs. /// A guard for disabled local IRQs.
pub struct DisabledLocalIrqGuard { pub struct DisabledLocalIrqGuard {
was_enabled: bool, was_enabled: bool,
preempt_guard: DisablePreemptGuard,
} }
impl !Send for DisabledLocalIrqGuard {} impl !Send for DisabledLocalIrqGuard {}
@ -208,7 +207,11 @@ impl DisabledLocalIrqGuard {
if was_enabled { if was_enabled {
irq::disable_local(); irq::disable_local();
} }
Self { was_enabled } let preempt_guard = disable_preempt();
Self {
was_enabled,
preempt_guard,
}
} }
/// Transfer the saved IRQ status of this guard to a new guard. /// Transfer the saved IRQ status of this guard to a new guard.
@ -216,7 +219,10 @@ impl DisabledLocalIrqGuard {
pub fn transfer_to(&mut self) -> Self { pub fn transfer_to(&mut self) -> Self {
let was_enabled = self.was_enabled; let was_enabled = self.was_enabled;
self.was_enabled = false; self.was_enabled = false;
Self { was_enabled } Self {
was_enabled,
preempt_guard: disable_preempt(),
}
} }
} }

View File

@ -12,9 +12,7 @@ use core::{
ops::{Index, IndexMut}, ops::{Index, IndexMut},
}; };
use font8x8::UnicodeFonts; use font8x8::UnicodeFonts;
use jinux_frame::{ use jinux_frame::{arch::boot, config::PAGE_SIZE, io_mem::IoMem, sync::SpinLock, vm::VmIo};
config::PAGE_SIZE, io_mem::IoMem, sync::SpinLock, vm::VmIo, arch::boot,
};
use spin::Once; use spin::Once;
#[init_component] #[init_component]
@ -55,7 +53,7 @@ pub(crate) fn init() {
width: framebuffer.width as usize, width: framebuffer.width as usize,
height: framebuffer.height as usize, height: framebuffer.height as usize,
buffer: buffer.leak(), buffer: buffer.leak(),
}) });
writer.unwrap() writer.unwrap()
}; };

View File

@ -38,11 +38,11 @@ impl IfaceCommon {
} }
} }
pub(super) fn interface(&self) -> SpinLockIrqDisabledGuard<smoltcp::iface::Interface> { pub(super) fn interface(&self) -> SpinLockGuard<smoltcp::iface::Interface> {
self.interface.lock_irq_disabled() self.interface.lock_irq_disabled()
} }
pub(super) fn sockets(&self) -> SpinLockIrqDisabledGuard<smoltcp::iface::SocketSet<'static>> { pub(super) fn sockets(&self) -> SpinLockGuard<smoltcp::iface::SocketSet<'static>> {
self.sockets.lock_irq_disabled() self.sockets.lock_irq_disabled()
} }

View File

@ -65,11 +65,11 @@ mod internal {
pub trait IfaceInternal { pub trait IfaceInternal {
fn common(&self) -> &IfaceCommon; fn common(&self) -> &IfaceCommon;
/// The inner socket set /// The inner socket set
fn sockets(&self) -> SpinLockIrqDisabledGuard<SocketSet<'static>> { fn sockets(&self) -> SpinLockGuard<SocketSet<'static>> {
self.common().sockets() self.common().sockets()
} }
/// The inner iface. /// The inner iface.
fn iface_inner(&self) -> SpinLockIrqDisabledGuard<smoltcp::iface::Interface> { fn iface_inner(&self) -> SpinLockGuard<smoltcp::iface::Interface> {
self.common().interface() self.common().interface()
} }
/// The time we should do another poll. /// The time we should do another poll.

View File

@ -18,7 +18,7 @@ pub(crate) use core::ffi::CStr;
pub(crate) use int_to_c_enum::TryFromInt; pub(crate) use int_to_c_enum::TryFromInt;
pub(crate) use jinux_frame::config::PAGE_SIZE; pub(crate) use jinux_frame::config::PAGE_SIZE;
// pub(crate) use jinux_frame::sync::{Mutex, MutexGuard}; // pub(crate) use jinux_frame::sync::{Mutex, MutexGuard};
pub(crate) use jinux_frame::sync::{SpinLock, SpinLockGuard, SpinLockIrqDisabledGuard}; pub(crate) use jinux_frame::sync::{SpinLock, SpinLockGuard};
pub(crate) use jinux_frame::vm::Vaddr; pub(crate) use jinux_frame::vm::Vaddr;
pub(crate) use jinux_frame::{print, println}; pub(crate) use jinux_frame::{print, println};
pub(crate) use log::{debug, error, info, trace, warn}; pub(crate) use log::{debug, error, info, trace, warn};