mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 12:56:48 +00:00
Add guard which disables bottom half
This commit is contained in:
parent
e0bda4677c
commit
9804f053f2
@ -10,12 +10,18 @@ use alloc::boxed::Box;
|
||||
use core::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
use component::{init_component, ComponentInitError};
|
||||
use ostd::{cpu_local_cell, trap::register_bottom_half_handler};
|
||||
use lock::is_softirq_enabled;
|
||||
use ostd::{
|
||||
cpu_local_cell,
|
||||
trap::{disable_local, register_bottom_half_handler, DisabledLocalIrqGuard},
|
||||
};
|
||||
use spin::Once;
|
||||
|
||||
mod lock;
|
||||
pub mod softirq_id;
|
||||
mod taskless;
|
||||
|
||||
pub use lock::{BottomHalfDisabled, DisableLocalBottomHalfGuard};
|
||||
pub use taskless::Taskless;
|
||||
|
||||
/// A representation of a software interrupt (softirq) line.
|
||||
@ -122,10 +128,19 @@ cpu_local_cell! {
|
||||
}
|
||||
|
||||
/// Processes pending softirqs.
|
||||
fn process_pending(irq_guard: DisabledLocalIrqGuard) -> DisabledLocalIrqGuard {
|
||||
if !is_softirq_enabled() {
|
||||
return irq_guard;
|
||||
}
|
||||
|
||||
process_all_pending(irq_guard)
|
||||
}
|
||||
|
||||
/// Processes all pending softirqs regardless of whether softirqs are disabled.
|
||||
///
|
||||
/// The processing instructions will iterate for `SOFTIRQ_RUN_TIMES` times. If any softirq
|
||||
/// is raised during the iteration, it will be processed.
|
||||
fn process_pending() {
|
||||
fn process_all_pending(mut irq_guard: DisabledLocalIrqGuard) -> DisabledLocalIrqGuard {
|
||||
const SOFTIRQ_RUN_TIMES: u8 = 5;
|
||||
|
||||
for _i in 0..SOFTIRQ_RUN_TIMES {
|
||||
@ -138,10 +153,19 @@ fn process_pending() {
|
||||
if action_mask == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
drop(irq_guard);
|
||||
|
||||
while action_mask > 0 {
|
||||
let action_id = u8::trailing_zeros(action_mask) as u8;
|
||||
SoftIrqLine::get(action_id).callback.get().unwrap()();
|
||||
action_mask &= action_mask - 1;
|
||||
}
|
||||
|
||||
irq_guard = disable_local();
|
||||
}
|
||||
|
||||
// TODO: Wake up ksoftirqd if some softirqs are still pending.
|
||||
|
||||
irq_guard
|
||||
}
|
||||
|
176
kernel/comps/softirq/src/lock.rs
Normal file
176
kernel/comps/softirq/src/lock.rs
Normal file
@ -0,0 +1,176 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use ostd::{
|
||||
cpu_local_cell,
|
||||
sync::{GuardTransfer, SpinGuardian},
|
||||
task::{
|
||||
atomic_mode::{AsAtomicModeGuard, InAtomicMode},
|
||||
disable_preempt, DisabledPreemptGuard,
|
||||
},
|
||||
trap::{disable_local, in_interrupt_context},
|
||||
};
|
||||
|
||||
use crate::process_all_pending;
|
||||
|
||||
cpu_local_cell! {
|
||||
static DISABLE_SOFTIRQ_COUNT: u8 = 0;
|
||||
}
|
||||
|
||||
/// Returns whether softirq is enabled on local CPU.
|
||||
pub(super) fn is_softirq_enabled() -> bool {
|
||||
DISABLE_SOFTIRQ_COUNT.load() == 0
|
||||
}
|
||||
|
||||
/// A guardian that disables bottom half while holding a lock.
|
||||
pub enum BottomHalfDisabled {}
|
||||
|
||||
/// A guard for disabled local softirqs.
|
||||
pub struct DisableLocalBottomHalfGuard {
|
||||
preempt: DisabledPreemptGuard,
|
||||
}
|
||||
|
||||
impl AsAtomicModeGuard for DisableLocalBottomHalfGuard {
|
||||
fn as_atomic_mode_guard(&self) -> &dyn InAtomicMode {
|
||||
&self.preempt
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DisableLocalBottomHalfGuard {
|
||||
fn drop(&mut self) {
|
||||
// Once the guard is dropped, we will process pending items within
|
||||
// the current thread's context if softirq is going to be enabled.
|
||||
// This behavior is similar to how Linux handles pending softirqs.
|
||||
if DISABLE_SOFTIRQ_COUNT.load() == 1 && !in_interrupt_context() {
|
||||
// Preemption and softirq are not really enabled at the moment,
|
||||
// so we can guarantee that we'll process any pending softirqs for the current CPU.
|
||||
let irq_guard = disable_local();
|
||||
let irq_guard = process_all_pending(irq_guard);
|
||||
|
||||
// To avoid race conditions, we should decrease the softirq count first,
|
||||
// then drop the IRQ guard.
|
||||
DISABLE_SOFTIRQ_COUNT.sub_assign(1);
|
||||
drop(irq_guard);
|
||||
return;
|
||||
}
|
||||
|
||||
DISABLE_SOFTIRQ_COUNT.sub_assign(1);
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn disable_local_bottom_half() -> DisableLocalBottomHalfGuard {
|
||||
// When disabling softirq, we must also disable preemption
|
||||
// to avoid the task to be scheduled to other CPUs.
|
||||
let preempt = disable_preempt();
|
||||
DISABLE_SOFTIRQ_COUNT.add_assign(1);
|
||||
DisableLocalBottomHalfGuard { preempt }
|
||||
}
|
||||
|
||||
impl GuardTransfer for DisableLocalBottomHalfGuard {
|
||||
fn transfer_to(&mut self) -> Self {
|
||||
disable_local_bottom_half()
|
||||
}
|
||||
}
|
||||
|
||||
impl SpinGuardian for BottomHalfDisabled {
|
||||
type Guard = DisableLocalBottomHalfGuard;
|
||||
type ReadGuard = DisableLocalBottomHalfGuard;
|
||||
|
||||
fn read_guard() -> Self::ReadGuard {
|
||||
disable_local_bottom_half()
|
||||
}
|
||||
|
||||
fn guard() -> Self::Guard {
|
||||
disable_local_bottom_half()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(ktest)]
|
||||
mod test {
|
||||
use ostd::{
|
||||
prelude::*,
|
||||
sync::{RwLock, SpinLock},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[ktest]
|
||||
fn spinlock_disable_bh() {
|
||||
let lock = SpinLock::<(), BottomHalfDisabled>::new(());
|
||||
|
||||
assert!(is_softirq_enabled());
|
||||
|
||||
let guard = lock.lock();
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
drop(guard);
|
||||
assert!(is_softirq_enabled());
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn nested_spin_lock_disable_bh() {
|
||||
let lock1 = SpinLock::<(), BottomHalfDisabled>::new(());
|
||||
let lock2 = SpinLock::<(), BottomHalfDisabled>::new(());
|
||||
|
||||
assert!(is_softirq_enabled());
|
||||
|
||||
let guard1 = lock1.lock();
|
||||
let guard2 = lock2.lock();
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
drop(guard1);
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
drop(guard2);
|
||||
assert!(is_softirq_enabled());
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn rwlock_disable_bh() {
|
||||
let rwlock: RwLock<(), BottomHalfDisabled> = RwLock::new(());
|
||||
|
||||
assert!(is_softirq_enabled());
|
||||
|
||||
let write_guard = rwlock.write();
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
drop(write_guard);
|
||||
assert!(is_softirq_enabled());
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn nested_rwlock_disable_bh() {
|
||||
let rwlock: RwLock<(), BottomHalfDisabled> = RwLock::new(());
|
||||
|
||||
assert!(is_softirq_enabled());
|
||||
|
||||
let read_guard1 = rwlock.read();
|
||||
let read_guard2 = rwlock.read();
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
drop(read_guard1);
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
drop(read_guard2);
|
||||
assert!(is_softirq_enabled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn upgradable_rwlock_disable_bh() {
|
||||
let rwlock: RwLock<(), BottomHalfDisabled> = RwLock::new(());
|
||||
|
||||
assert!(is_softirq_enabled());
|
||||
|
||||
let upgrade_guard = rwlock.upread();
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
let write_guard = upgrade_guard.upgrade();
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
let read_guard = write_guard.downgrade();
|
||||
assert!(!is_softirq_enabled());
|
||||
|
||||
drop(read_guard);
|
||||
assert!(is_softirq_enabled());
|
||||
}
|
||||
}
|
@ -32,7 +32,7 @@ pub trait GuardTransfer {
|
||||
}
|
||||
|
||||
/// A guardian that disables preemption while holding a lock.
|
||||
pub struct PreemptDisabled;
|
||||
pub enum PreemptDisabled {}
|
||||
|
||||
impl SpinGuardian for PreemptDisabled {
|
||||
type Guard = DisabledPreemptGuard;
|
||||
@ -53,7 +53,7 @@ impl SpinGuardian for PreemptDisabled {
|
||||
/// IRQ handlers are allowed to get executed while holding the
|
||||
/// lock. For example, if a lock is never used in the interrupt
|
||||
/// context, then it is ok not to use this guardian in the process context.
|
||||
pub struct LocalIrqDisabled;
|
||||
pub enum LocalIrqDisabled {}
|
||||
|
||||
impl SpinGuardian for LocalIrqDisabled {
|
||||
type Guard = DisabledLocalIrqGuard;
|
||||
@ -79,7 +79,7 @@ impl SpinGuardian for LocalIrqDisabled {
|
||||
///
|
||||
/// [`RwLock`]: super::RwLock
|
||||
/// [`SpinLock`]: super::SpinLock
|
||||
pub struct WriteIrqDisabled;
|
||||
pub enum WriteIrqDisabled {}
|
||||
|
||||
impl SpinGuardian for WriteIrqDisabled {
|
||||
type Guard = DisabledLocalIrqGuard;
|
||||
|
@ -11,9 +11,9 @@ mod rwmutex;
|
||||
mod spin;
|
||||
mod wait;
|
||||
|
||||
pub(crate) use self::{guard::GuardTransfer, rcu::finish_grace_period};
|
||||
pub(crate) use self::rcu::finish_grace_period;
|
||||
pub use self::{
|
||||
guard::{LocalIrqDisabled, PreemptDisabled, WriteIrqDisabled},
|
||||
guard::{GuardTransfer, LocalIrqDisabled, PreemptDisabled, SpinGuardian, WriteIrqDisabled},
|
||||
mutex::{ArcMutexGuard, Mutex, MutexGuard},
|
||||
rcu::{non_null, Rcu, RcuOption, RcuOptionReadGuard, RcuReadGuard},
|
||||
rwarc::{RoArc, RwArc},
|
||||
|
@ -2,9 +2,10 @@
|
||||
|
||||
use spin::Once;
|
||||
|
||||
use super::{disable_local, DisabledLocalIrqGuard};
|
||||
use crate::{arch::irq::IRQ_LIST, cpu_local_cell, task::disable_preempt, trap::TrapFrame};
|
||||
|
||||
static BOTTOM_HALF_HANDLER: Once<fn()> = Once::new();
|
||||
static BOTTOM_HALF_HANDLER: Once<fn(DisabledLocalIrqGuard) -> DisabledLocalIrqGuard> = Once::new();
|
||||
|
||||
/// Registers a function to the interrupt bottom half execution.
|
||||
///
|
||||
@ -13,10 +14,15 @@ static BOTTOM_HALF_HANDLER: Once<fn()> = Once::new();
|
||||
/// Relatively, bottom half defers less critical tasks to reduce the time spent in
|
||||
/// hardware interrupt context, thus allowing the interrupts to be handled more quickly.
|
||||
///
|
||||
/// The bottom half handler will be called after the top half with interrupts enabled.
|
||||
/// The bottom half handler is called following the execution of the top half.
|
||||
/// Because the handler accepts a [`DisabledLocalIrqGuard`] as a parameter,
|
||||
/// interrupts are still disabled upon entering the handler.
|
||||
/// However, the handler can enable interrupts by internally dropping the guard.
|
||||
/// When the handler returns, interrupts should remain disabled,
|
||||
/// as the handler is expected to return an IRQ guard.
|
||||
///
|
||||
/// This function can only be registered once. Subsequent calls will do nothing.
|
||||
pub fn register_bottom_half_handler(func: fn()) {
|
||||
pub fn register_bottom_half_handler(func: fn(DisabledLocalIrqGuard) -> DisabledLocalIrqGuard) {
|
||||
BOTTOM_HALF_HANDLER.call_once(|| func);
|
||||
}
|
||||
|
||||
@ -40,9 +46,15 @@ fn process_bottom_half() {
|
||||
let preempt_guard = disable_preempt();
|
||||
crate::arch::irq::enable_local();
|
||||
|
||||
handler();
|
||||
// We need to ensure that local interrupts are disabled
|
||||
// when the handler returns to prevent race conditions.
|
||||
// See <https://github.com/asterinas/asterinas/pull/1623#discussion_r1964709636> for more details.
|
||||
let irq_guard = disable_local();
|
||||
let irq_guard = handler(irq_guard);
|
||||
|
||||
crate::arch::irq::disable_local();
|
||||
// Interrupts should remain disabled when `process_bottom_half` returns,
|
||||
// so we simply forget the guard.
|
||||
core::mem::forget(irq_guard);
|
||||
drop(preempt_guard);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user