Move SoftIRQ implementations to softirq component

This commit is contained in:
Chen Chengjun
2024-10-17 17:41:24 +08:00
committed by Tate, Hongliang Tian
parent 54a807b5f7
commit 2f511069ee
17 changed files with 106 additions and 47 deletions

View File

@ -0,0 +1,14 @@
[package]
name = "aster-softirq"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ostd = { path = "../../../ostd" }
component = { path = "../../libs/comp-sys/component" }
intrusive-collections = "0.9.5"
spin = "0.9.4"
[features]

View File

@ -0,0 +1,171 @@
// SPDX-License-Identifier: MPL-2.0
//! Software interrupt.
#![no_std]
#![deny(unsafe_code)]
extern crate alloc;
use alloc::boxed::Box;
use core::sync::atomic::{AtomicU8, Ordering};
use component::{init_component, ComponentInitError};
use ostd::{cpu_local_cell, trap::register_bottom_half_handler};
use spin::Once;
pub mod softirq_id;
mod taskless;
pub use taskless::Taskless;
/// A representation of a software interrupt (softirq) line.
///
/// # Overview
///
/// Softirq is an interrupt mechanism in the kernel that enables bottom-half processing;
/// they are cheaper to execute compared to regular interrupts because softirqs are less
/// time-critical and thus can be processed in a more flexible manner.
///
/// The `SoftIrqLine` struct encapsulates the data and functionality associated with each
/// softirq line, including an identifier and an associated callback that gets triggered
/// when the softirq is raised.
///
/// The `SoftIrqLine` with the smaller ID has the higher execution priority.
///
/// # Example
///
/// ```
/// // Define an unused softirq id.
/// const MY_SOFTIRQ_ID: u8 = 4;
/// // Enable the softirq line of this id.
/// SoftIrqLine::get(MY_SOFTIRQ_ID).enable(|| {
/// // Define the action to take when the softirq with MY_SOFTIRQ_ID is raised
/// // ...
/// });
/// // Later on:
/// SoftIrqLine::get(MY_SOFTIRQ_ID).raise(); // This will trigger the registered callback
/// ```
pub struct SoftIrqLine {
id: u8,
callback: Once<Box<dyn Fn() + 'static + Sync + Send>>,
}
impl SoftIrqLine {
/// The number of softirq lines.
const NR_LINES: u8 = 8;
/// Gets a softirq line.
///
/// The value of `id` must be within `0..NR_LINES`.
pub fn get(id: u8) -> &'static SoftIrqLine {
&LINES.get().unwrap()[id as usize]
}
const fn new(id: u8) -> Self {
Self {
id,
callback: Once::new(),
}
}
/// Gets the ID of this softirq line.
pub fn id(&self) -> u8 {
self.id
}
/// Raises the softirq, marking it as pending.
///
/// If this line is not enabled yet, the method has no effect.
pub fn raise(&self) {
PENDING_MASK.bitor_assign(1 << self.id);
}
/// Enables a softirq line by registering its callback.
///
/// # Panics
///
/// Each softirq can only be enabled once.
pub fn enable<F>(&self, callback: F)
where
F: Fn() + 'static + Sync + Send,
{
assert!(!self.is_enabled());
self.callback.call_once(|| Box::new(callback));
ENABLED_MASK.fetch_or(1 << self.id, Ordering::Release);
}
/// Returns whether this softirq line is enabled.
pub fn is_enabled(&self) -> bool {
ENABLED_MASK.load(Ordering::Acquire) & (1 << self.id) != 0
}
}
/// A slice that stores the [`SoftIrqLine`]s, whose ID is equal to its offset in the slice.
static LINES: Once<[SoftIrqLine; SoftIrqLine::NR_LINES as usize]> = Once::new();
#[init_component]
fn init() -> Result<(), ComponentInitError> {
let lines: [SoftIrqLine; SoftIrqLine::NR_LINES as usize] =
core::array::from_fn(|i| SoftIrqLine::new(i as u8));
LINES.call_once(|| lines);
register_bottom_half_handler(process_pending);
taskless::init();
Ok(())
}
static ENABLED_MASK: AtomicU8 = AtomicU8::new(0);
cpu_local_cell! {
static PENDING_MASK: u8 = 0;
static IS_ENABLED: bool = true;
}
/// Enables softirq in current processor.
fn enable_softirq_local() {
IS_ENABLED.store(true);
}
/// Disables softirq in current processor.
fn disable_softirq_local() {
IS_ENABLED.store(false);
}
/// Checks whether the softirq is enabled in current processor.
fn is_softirq_enabled() -> bool {
IS_ENABLED.load()
}
/// Processes pending softirqs.
///
/// The processing instructions will iterate for `SOFTIRQ_RUN_TIMES` times. If any softirq
/// is raised during the iteration, it will be processed.
pub(crate) fn process_pending() {
const SOFTIRQ_RUN_TIMES: u8 = 5;
if !is_softirq_enabled() {
return;
}
disable_softirq_local();
for _i in 0..SOFTIRQ_RUN_TIMES {
let mut action_mask = {
let pending_mask = PENDING_MASK.load();
PENDING_MASK.store(0);
pending_mask & ENABLED_MASK.load(Ordering::Acquire)
};
if action_mask == 0 {
break;
}
while action_mask > 0 {
let action_id = u8::trailing_zeros(action_mask) as u8;
SoftIrqLine::get(action_id).callback.get().unwrap()();
action_mask &= action_mask - 1;
}
}
enable_softirq_local();
}

View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: MPL-2.0
//! Defines the used IDs of software interrupt (softirq) lines.
/// The corresponding softirq line is used to schedule urgent taskless jobs.
pub const TASKLESS_URGENT_SOFTIRQ_ID: u8 = 0;
/// The corresponding softirq line is used to manage timers and handle
/// time-related jobs.
pub const TIMER_SOFTIRQ_ID: u8 = 1;
/// The corresponding softirq line is used to schedule general taskless jobs.
pub const TASKLESS_SOFTIRQ_ID: u8 = 2;

View File

@ -0,0 +1,249 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::{boxed::Box, sync::Arc};
use core::{
cell::RefCell,
ops::DerefMut,
sync::atomic::{AtomicBool, Ordering},
};
use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListAtomicLink};
use ostd::{cpu::local::CpuLocal, cpu_local, trap};
use super::{
softirq_id::{TASKLESS_SOFTIRQ_ID, TASKLESS_URGENT_SOFTIRQ_ID},
SoftIrqLine,
};
/// `Taskless` represents a _taskless_ job whose execution is deferred to a later time.
///
/// # Overview
///
/// `Taskless` provides one "bottom half" mechanism for interrupt handling.
/// With `Taskless`, one can defer the execution of certain logic
/// that would have been otherwise executed in interrupt handlers.
/// `Taskless` makes interrupt handlers finish more quickly,
/// thereby minimizing the periods of time when the interrupts are disabled.
///
/// `Taskless` executes the deferred jobs via the softirq mechanism,
/// rather than doing them with `Task`s.
/// As such, these deferred, taskless jobs can be executed within only a small delay,
/// after the execution of an interrupt handler that schedules the taskless jobs.
/// As the taskless jobs are not executed in the task context,
/// they are not allowed to sleep.
///
/// An `Taskless` instance may be scheduled to run multiple times,
/// but it is guaranteed that a single taskless job will not be run concurrently.
/// Also, a taskless job will not be preempted by another.
/// This makes the programming of a taskless job simpler.
/// Different taskless jobs are allowed to run concurrently.
/// Once a taskless has entered the execution state, it can be scheduled again.
///
/// # Example
///
/// Users can create a `Taskless` and schedule it at any place.
/// ```rust
/// #use ostd::softirq::Taskless;
///
/// #fn my_func() {}
///
/// let taskless = Taskless::new(my_func);
/// // This taskless job will be executed in softirq context soon.
/// taskless.schedule();
///
/// ```
pub struct Taskless {
/// Whether the taskless job has been scheduled.
is_scheduled: AtomicBool,
/// Whether the taskless job is running.
is_running: AtomicBool,
/// The function that will be called when executing this taskless job.
callback: Box<RefCell<dyn FnMut() + Send + Sync + 'static>>,
/// Whether this `Taskless` is disabled.
#[allow(unused)]
is_disabled: AtomicBool,
link: LinkedListAtomicLink,
}
intrusive_adapter!(TasklessAdapter = Arc<Taskless>: Taskless { link: LinkedListAtomicLink });
cpu_local! {
static TASKLESS_LIST: RefCell<LinkedList<TasklessAdapter>> = RefCell::new(LinkedList::new(TasklessAdapter::NEW));
static TASKLESS_URGENT_LIST: RefCell<LinkedList<TasklessAdapter>> = RefCell::new(LinkedList::new(TasklessAdapter::NEW));
}
impl Taskless {
/// Creates a new `Taskless` instance with its callback function.
#[allow(unused)]
pub fn new<F>(callback: F) -> Arc<Self>
where
F: FnMut() + Send + Sync + 'static,
{
// Since the same taskless will not be executed concurrently,
// it is safe to use a `RefCell` here though the `Taskless` will
// be put into an `Arc`.
#[allow(clippy::arc_with_non_send_sync)]
Arc::new(Self {
is_scheduled: AtomicBool::new(false),
is_running: AtomicBool::new(false),
callback: Box::new(RefCell::new(callback)),
is_disabled: AtomicBool::new(false),
link: LinkedListAtomicLink::new(),
})
}
/// Schedules this taskless job and it will be executed in later time.
///
/// If the taskless job has been scheduled, this function will do nothing.
#[allow(unused)]
pub fn schedule(self: &Arc<Self>) {
do_schedule(self, &TASKLESS_LIST);
SoftIrqLine::get(TASKLESS_SOFTIRQ_ID).raise();
}
/// Schedules this taskless job and it will be executed urgently
/// in softirq context.
///
/// If the taskless job has been scheduled, this function will do nothing.
#[allow(unused)]
pub fn schedule_urgent(self: &Arc<Self>) {
do_schedule(self, &TASKLESS_URGENT_LIST);
SoftIrqLine::get(TASKLESS_URGENT_SOFTIRQ_ID).raise();
}
/// Enables this `Taskless` so that it can be executed once it has been scheduled.
///
/// A new `Taskless` is enabled by default.
#[allow(unused)]
pub fn enable(&self) {
self.is_disabled.store(false, Ordering::Release);
}
/// Disables this `Taskless` so that it can not be scheduled. Note that if the `Taskless`
/// has been scheduled, it can still continue to complete this job.
#[allow(unused)]
pub fn disable(&self) {
self.is_disabled.store(true, Ordering::Release);
}
}
#[allow(unused)]
fn do_schedule(
taskless: &Arc<Taskless>,
taskless_list: &'static CpuLocal<RefCell<LinkedList<TasklessAdapter>>>,
) {
if taskless.is_disabled.load(Ordering::Acquire) {
return;
}
if taskless
.is_scheduled
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
return;
}
let irq_guard = trap::disable_local();
taskless_list
.get_with(&irq_guard)
.borrow_mut()
.push_front(taskless.clone());
}
pub(super) fn init() {
SoftIrqLine::get(TASKLESS_URGENT_SOFTIRQ_ID)
.enable(|| taskless_softirq_handler(&TASKLESS_URGENT_LIST, TASKLESS_URGENT_SOFTIRQ_ID));
SoftIrqLine::get(TASKLESS_SOFTIRQ_ID)
.enable(|| taskless_softirq_handler(&TASKLESS_LIST, TASKLESS_SOFTIRQ_ID));
}
/// Executes the pending taskless jobs in the input `taskless_list`.
///
/// This function will retrieve each `Taskless` in the input `taskless_list`
/// and leave it empty. If a `Taskless` is running then this function will
/// ignore it and jump to the next `Taskless`, then put it to the input `taskless_list`.
///
/// If the `Taskless` is ready to be executed, it will be set to not scheduled
/// and can be scheduled again.
fn taskless_softirq_handler(
taskless_list: &'static CpuLocal<RefCell<LinkedList<TasklessAdapter>>>,
softirq_id: u8,
) {
let mut processing_list = {
let irq_guard = trap::disable_local();
let guard = taskless_list.get_with(&irq_guard);
let mut list_mut = guard.borrow_mut();
LinkedList::take(list_mut.deref_mut())
};
while let Some(taskless) = processing_list.pop_back() {
if taskless
.is_running
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
let irq_guard = trap::disable_local();
taskless_list
.get_with(&irq_guard)
.borrow_mut()
.push_front(taskless);
SoftIrqLine::get(softirq_id).raise();
continue;
}
taskless.is_scheduled.store(false, Ordering::Release);
// The same taskless will not be executing concurrently, so it is safe to
// do `borrow_mut` here.
(taskless.callback.borrow_mut())();
taskless.is_running.store(false, Ordering::Release);
}
}
#[cfg(ktest)]
mod test {
use core::sync::atomic::AtomicUsize;
use ostd::prelude::*;
use super::*;
fn init() {
static DONE: AtomicBool = AtomicBool::new(false);
if !DONE.load(Ordering::SeqCst) {
let _ = super::super::init();
DONE.store(true, Ordering::SeqCst);
}
}
#[ktest]
fn schedule_taskless() {
static COUNTER: AtomicUsize = AtomicUsize::new(0);
const SCHEDULE_TIMES: usize = 10;
fn add_counter() {
COUNTER.fetch_add(1, Ordering::Relaxed);
}
init();
let taskless = Taskless::new(add_counter);
let mut counter = 0;
// Schedule this taskless for `SCHEDULE_TIMES`.
while !taskless.is_scheduled.load(Ordering::Acquire) {
taskless.schedule();
counter += 1;
if counter == SCHEDULE_TIMES {
break;
}
}
// Wait for all taskless having finished.
while taskless.is_running.load(Ordering::Acquire)
|| taskless.is_scheduled.load(Ordering::Acquire)
{
core::hint::spin_loop()
}
assert_eq!(counter, COUNTER.load(Ordering::Relaxed));
}
}