Refactor code related to CPU local memory

Co-authored-by: Chuandong Li <lichuand@pku.edu.cn>
This commit is contained in:
Zhang Junyang
2024-06-28 15:47:01 +00:00
committed by Tate, Hongliang Tian
parent 98619f3482
commit 0f8d8da372
11 changed files with 339 additions and 185 deletions

View File

@ -5,11 +5,12 @@
use alloc::{boxed::Box, sync::Arc}; use alloc::{boxed::Box, sync::Arc};
use core::{ use core::{
cell::RefCell, cell::RefCell,
ops::DerefMut,
sync::atomic::{AtomicBool, Ordering}, sync::atomic::{AtomicBool, Ordering},
}; };
use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListAtomicLink}; use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListAtomicLink};
use ostd::{cpu_local, sync::SpinLock, trap::SoftIrqLine, CpuLocal}; use ostd::{cpu_local, trap::SoftIrqLine, CpuLocal};
use crate::softirq_id::{TASKLESS_SOFTIRQ_ID, TASKLESS_URGENT_SOFTIRQ_ID}; use crate::softirq_id::{TASKLESS_SOFTIRQ_ID, TASKLESS_URGENT_SOFTIRQ_ID};
@ -65,8 +66,8 @@ pub struct Taskless {
intrusive_adapter!(TasklessAdapter = Arc<Taskless>: Taskless { link: LinkedListAtomicLink }); intrusive_adapter!(TasklessAdapter = Arc<Taskless>: Taskless { link: LinkedListAtomicLink });
cpu_local! { cpu_local! {
static TASKLESS_LIST: SpinLock<LinkedList<TasklessAdapter>> = SpinLock::new(LinkedList::new(TasklessAdapter::NEW)); static TASKLESS_LIST: RefCell<LinkedList<TasklessAdapter>> = RefCell::new(LinkedList::new(TasklessAdapter::NEW));
static TASKLESS_URGENT_LIST: SpinLock<LinkedList<TasklessAdapter>> = SpinLock::new(LinkedList::new(TasklessAdapter::NEW)); static TASKLESS_URGENT_LIST: RefCell<LinkedList<TasklessAdapter>> = RefCell::new(LinkedList::new(TasklessAdapter::NEW));
} }
impl Taskless { impl Taskless {
@ -121,7 +122,7 @@ impl Taskless {
fn do_schedule( fn do_schedule(
taskless: &Arc<Taskless>, taskless: &Arc<Taskless>,
taskless_list: &'static CpuLocal<SpinLock<LinkedList<TasklessAdapter>>>, taskless_list: &'static CpuLocal<RefCell<LinkedList<TasklessAdapter>>>,
) { ) {
if taskless.is_disabled.load(Ordering::Acquire) { if taskless.is_disabled.load(Ordering::Acquire) {
return; return;
@ -133,10 +134,10 @@ fn do_schedule(
{ {
return; return;
} }
taskless_list
CpuLocal::borrow_with(taskless_list, |list| { .borrow_irq_disabled()
list.lock_irq_disabled().push_front(taskless.clone()); .borrow_mut()
}); .push_front(taskless.clone());
} }
pub(super) fn init() { pub(super) fn init() {
@ -155,13 +156,14 @@ pub(super) fn init() {
/// If the `Taskless` is ready to be executed, it will be set to not scheduled /// If the `Taskless` is ready to be executed, it will be set to not scheduled
/// and can be scheduled again. /// and can be scheduled again.
fn taskless_softirq_handler( fn taskless_softirq_handler(
taskless_list: &'static CpuLocal<SpinLock<LinkedList<TasklessAdapter>>>, taskless_list: &'static CpuLocal<RefCell<LinkedList<TasklessAdapter>>>,
softirq_id: u8, softirq_id: u8,
) { ) {
let mut processing_list = CpuLocal::borrow_with(taskless_list, |list| { let mut processing_list = {
let mut list_mut = list.lock_irq_disabled(); let guard = taskless_list.borrow_irq_disabled();
LinkedList::take(&mut list_mut) let mut list_mut = guard.borrow_mut();
}); LinkedList::take(list_mut.deref_mut())
};
while let Some(taskless) = processing_list.pop_back() { while let Some(taskless) = processing_list.pop_back() {
if taskless if taskless
@ -169,10 +171,11 @@ fn taskless_softirq_handler(
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err() .is_err()
{ {
CpuLocal::borrow_with(taskless_list, |list| { taskless_list
list.lock_irq_disabled().push_front(taskless); .borrow_irq_disabled()
.borrow_mut()
.push_front(taskless);
SoftIrqLine::get(softirq_id).raise(); SoftIrqLine::get(softirq_id).raise();
});
continue; continue;
} }

View File

@ -4,7 +4,7 @@ use alloc::sync::Arc;
use core::time::Duration; use core::time::Duration;
use aster_time::read_monotonic_time; use aster_time::read_monotonic_time;
use ostd::{arch::timer::Jiffies, cpu_local, sync::SpinLock, CpuLocal}; use ostd::{arch::timer::Jiffies, cpu_local, sync::SpinLock};
use paste::paste; use paste::paste;
use spin::Once; use spin::Once;
@ -215,9 +215,7 @@ macro_rules! define_timer_managers {
let clock = paste! {[<$clock_id _INSTANCE>].get().unwrap().clone()}; let clock = paste! {[<$clock_id _INSTANCE>].get().unwrap().clone()};
let clock_manager = TimerManager::new(clock); let clock_manager = TimerManager::new(clock);
paste! { paste! {
CpuLocal::borrow_with(&[<$clock_id _MANAGER>], |manager| { [<$clock_id _MANAGER>].call_once(|| clock_manager.clone());
manager.call_once(|| clock_manager.clone());
});
} }
let callback = move || { let callback = move || {
clock_manager.process_expired_timers(); clock_manager.process_expired_timers();

View File

@ -52,6 +52,16 @@ SECTIONS
. = DATA_SEGMENT_RELRO_END(0, .); . = DATA_SEGMENT_RELRO_END(0, .);
.data : AT(ADDR(.data) - KERNEL_VMA) { *(.data .data.*) } .data : AT(ADDR(.data) - KERNEL_VMA) { *(.data .data.*) }
# The CPU local data storage. It is readable and writable for the bootstrap
# processor, while it would be copied to other dynamically allocated memory
# areas for the application processors.
.cpu_local : AT(ADDR(.cpu_local) - KERNEL_VMA) {
__cpu_local_start = .;
KEEP(*(SORT(.cpu_local)))
__cpu_local_end = .;
}
.bss : AT(ADDR(.bss) - KERNEL_VMA) { .bss : AT(ADDR(.bss) - KERNEL_VMA) {
__bss = .; __bss = .;
*(.bss .bss.*) *(COMMON) *(.bss .bss.*) *(COMMON)

View File

@ -17,7 +17,10 @@ use log::debug;
#[cfg(feature = "intel_tdx")] #[cfg(feature = "intel_tdx")]
use tdx_guest::tdcall; use tdx_guest::tdcall;
use trapframe::{GeneralRegs, UserContext as RawUserContext}; use trapframe::{GeneralRegs, UserContext as RawUserContext};
use x86_64::registers::rflags::RFlags; use x86_64::registers::{
rflags::RFlags,
segmentation::{Segment64, FS},
};
#[cfg(feature = "intel_tdx")] #[cfg(feature = "intel_tdx")]
use crate::arch::tdx_guest::{handle_virtual_exception, TdxTrapFrame}; use crate::arch::tdx_guest::{handle_virtual_exception, TdxTrapFrame};
@ -669,3 +672,22 @@ impl Default for FpRegs {
struct FxsaveArea { struct FxsaveArea {
data: [u8; 512], // 512 bytes data: [u8; 512], // 512 bytes
} }
/// Sets the base address for the CPU local storage by writing to the FS base model-specific register.
/// This operation is marked as `unsafe` because it directly interfaces with low-level CPU registers.
///
/// # Safety
///
/// - This function is safe to call provided that the FS register is dedicated entirely for CPU local storage
/// and is not concurrently accessed for other purposes.
/// - The caller must ensure that `addr` is a valid address and properly aligned, as required by the CPU.
/// - This function should only be called in contexts where the CPU is in a state to accept such changes,
/// such as during processor initialization.
pub(crate) unsafe fn set_cpu_local_base(addr: u64) {
FS::write_base(x86_64::addr::VirtAddr::new(addr));
}
/// Gets the base address for the CPU local storage by reading the FS base model-specific register.
pub(crate) fn get_cpu_local_base() -> u64 {
FS::read_base().as_u64()
}

View File

@ -15,7 +15,7 @@ use spin::Once;
use trapframe::TrapFrame; use trapframe::TrapFrame;
use self::apic::APIC_TIMER_CALLBACK; use self::apic::APIC_TIMER_CALLBACK;
use crate::{arch::x86::kernel, cpu_local, trap::IrqLine, CpuLocal}; use crate::{arch::x86::kernel, cpu_local, trap::IrqLine};
/// The timer frequency (Hz). Here we choose 1000Hz since 1000Hz is easier for unit conversion and /// The timer frequency (Hz). Here we choose 1000Hz since 1000Hz is easier for unit conversion and
/// convenient for timer. What's more, the frequency cannot be set too high or too low, 1000Hz is /// convenient for timer. What's more, the frequency cannot be set too high or too low, 1000Hz is
@ -57,19 +57,20 @@ pub fn register_callback<F>(func: F)
where where
F: Fn() + Sync + Send + 'static, F: Fn() + Sync + Send + 'static,
{ {
CpuLocal::borrow_with(&INTERRUPT_CALLBACKS, |callbacks| { INTERRUPT_CALLBACKS
callbacks.borrow_mut().push(Box::new(func)); .borrow_irq_disabled()
}); .borrow_mut()
.push(Box::new(func));
} }
fn timer_callback(_: &TrapFrame) { fn timer_callback(_: &TrapFrame) {
jiffies::ELAPSED.fetch_add(1, Ordering::SeqCst); jiffies::ELAPSED.fetch_add(1, Ordering::SeqCst);
CpuLocal::borrow_with(&INTERRUPT_CALLBACKS, |callbacks| { let callbacks_guard = INTERRUPT_CALLBACKS.borrow_irq_disabled();
for callback in callbacks.borrow().iter() { for callback in callbacks_guard.borrow().iter() {
(callback)(); (callback)();
} }
}); drop(callbacks_guard);
if APIC_TIMER_CALLBACK.is_completed() { if APIC_TIMER_CALLBACK.is_completed() {
APIC_TIMER_CALLBACK.get().unwrap().call(()); APIC_TIMER_CALLBACK.get().unwrap().call(());

View File

@ -1,108 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! CPU.
use core::{cell::UnsafeCell, ops::Deref};
use crate::trap::disable_local;
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")]{
pub use trapframe::GeneralRegs;
pub use crate::arch::x86::cpu::*;
}
}
/// Defines a CPU-local variable.
///
/// # Example
///
/// ```rust
/// use crate::cpu_local;
/// use core::cell::RefCell;
///
/// cpu_local! {
/// static FOO: RefCell<u32> = RefCell::new(1);
///
/// #[allow(unused)]
/// pub static BAR: RefCell<f32> = RefCell::new(1.0);
/// }
/// CpuLocal::borrow_with(&FOO, |val| {
/// println!("FOO VAL: {:?}", *val);
/// })
///
/// ```
#[macro_export]
macro_rules! cpu_local {
// empty
() => {};
// multiple declarations
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => {
#[allow(clippy::macro_metavars_in_unsafe)]
$(#[$attr])* $vis static $name: $crate::CpuLocal<$t> = unsafe { $crate::CpuLocal::new($init) };
$crate::cpu_local!($($rest)*);
};
// single declaration
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr) => (
// TODO: reimplement cpu-local variable to support multi-core
$(#[$attr])* $vis static $name: $crate::CpuLocal<$t> = $crate::CpuLocal::new($init);
);
}
/// CPU-local objects.
///
/// A CPU-local object only gives you immutable references to the underlying value.
/// To mutate the value, one can use atomic values (e.g., [`AtomicU32`]) or internally mutable
/// objects (e.g., [`RefCell`]).
///
/// The `CpuLocal<T: Sync>` can be used directly.
/// Otherwise, the `CpuLocal<T>` must be used through [`borrow_with`].
///
/// TODO: re-implement `CpuLocal`
///
/// [`AtomicU32`]: core::sync::atomic::AtomicU32
/// [`RefCell`]: core::cell::RefCell
/// [`borrow_with`]: CpuLocal::borrow_with
pub struct CpuLocal<T>(UnsafeCell<T>);
// SAFETY: At any given time, only one task can access the inner value T of a cpu-local variable.
unsafe impl<T> Sync for CpuLocal<T> {}
impl<T> CpuLocal<T> {
/// Initialize CPU-local object
/// Developer cannot construct a valid CpuLocal object arbitrarily
#[allow(clippy::missing_safety_doc)]
pub const unsafe fn new(val: T) -> Self {
Self(UnsafeCell::new(val))
}
/// Borrow an immutable reference to the underlying value and feed it to a closure.
///
/// During the execution of the closure, local IRQs are disabled. This ensures that
/// the CPU-local object is only accessed by the current task or IRQ handler.
/// As local IRQs are disabled, one should keep the closure as short as possible.
pub fn borrow_with<'a, U, F: FnOnce(&'a T) -> U>(this: &'a Self, f: F) -> U {
// FIXME: implement disable preemption
// Disable interrupts when accessing cpu-local variable
let _guard = disable_local();
// SAFETY: Now that the local IRQs are disabled, this CPU-local object can only be
// accessed by the current task/thread. So it is safe to get its immutable reference
// regardless of whether `T` implements `Sync` or not.
let val_ref = unsafe { this.do_borrow() };
f(val_ref)
}
unsafe fn do_borrow(&self) -> &T {
&*self.0.get()
}
}
impl<T: Sync> Deref for CpuLocal<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.0.get() }
}
}

227
ostd/src/cpu/cpu_local.rs Normal file
View File

@ -0,0 +1,227 @@
// SPDX-License-Identifier: MPL-2.0
//! CPU local storage.
//!
//! This module provides a mechanism to define CPU-local objects.
//!
//! This is acheived by placing the CPU-local objects in a special section
//! `.cpu_local`. The bootstrap processor (BSP) uses the objects linked in this
//! section, and these objects are copied to dynamically allocated local
//! storage of each application processors (AP) during the initialization
//! process.
//!
//! Such a mechanism exploits the fact that constant values of non-[`Copy`]
//! types can be bitwise copied. For example, a [`Option<T>`] object, though
//! being not [`Copy`], have a constant constructor [`Option::None`] that
//! produces a value that can be bitwise copied to create a new instance.
//! [`alloc::sync::Arc`] however, don't have such a constructor, and thus cannot
//! be directly used as a CPU-local object. Wrapping it in a type that has a
//! constant constructor, like [`Option<T>`], can make it CPU-local.
use core::ops::Deref;
use crate::{
cpu::{get_cpu_local_base, set_cpu_local_base},
trap::{disable_local, DisabledLocalIrqGuard},
};
/// Defines a CPU-local variable.
///
/// # Example
///
/// ```rust
/// use crate::cpu_local;
/// use core::cell::RefCell;
///
/// cpu_local! {
/// static FOO: RefCell<u32> = RefCell::new(1);
///
/// #[allow(unused)]
/// pub static BAR: RefCell<f32> = RefCell::new(1.0);
/// }
///
/// println!("FOO VAL: {:?}", *FOO.borrow());
/// ```
#[macro_export]
macro_rules! cpu_local {
($( $(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; )*) => {
$(
#[link_section = ".cpu_local"]
$(#[$attr])* $vis static $name: $crate::CpuLocal<$t> = {
let val = $init;
// SAFETY: The CPU local variable instantiated is statically
// stored in the special `.cpu_local` section.
unsafe {
$crate::CpuLocal::__new(val)
}
};
)*
};
}
/// CPU-local objects.
///
/// A CPU-local object only gives you immutable references to the underlying value.
/// To mutate the value, one can use atomic values (e.g., [`AtomicU32`]) or internally mutable
/// objects (e.g., [`RefCell`]).
///
/// [`AtomicU32`]: core::sync::atomic::AtomicU32
/// [`RefCell`]: core::cell::RefCell
pub struct CpuLocal<T>(T);
// SAFETY: At any given time, only one task can access the inner value T
// of a cpu-local variable even if `T` is not `Sync`.
unsafe impl<T> Sync for CpuLocal<T> {}
// Prevent valid instances of CpuLocal from being copied to any memory
// area outside the .cpu_local section.
impl<T> !Copy for CpuLocal<T> {}
impl<T> !Clone for CpuLocal<T> {}
// In general, it does not make any sense to send instances of CpuLocal to
// other tasks as they should live on other CPUs to make sending useful.
impl<T> !Send for CpuLocal<T> {}
impl<T> CpuLocal<T> {
/// Initialize a CPU-local object.
///
/// Please do not call this function directly. Instead, use the
/// `cpu_local!` macro.
///
/// # Safety
///
/// The caller should ensure that the object initialized by this
/// function resides in the `.cpu_local` section. Otherwise the
/// behavior is undefined.
#[doc(hidden)]
pub const unsafe fn __new(val: T) -> Self {
Self(val)
}
/// Get access to the underlying value with IRQs disabled.
///
/// By this method, you can borrow a reference to the underlying value
/// even if `T` is not `Sync`. Because that it is per-CPU and IRQs are
/// disabled, no other running task can access it.
pub fn borrow_irq_disabled(&self) -> CpuLocalDerefGuard<'_, T> {
CpuLocalDerefGuard {
cpu_local: self,
_guard: disable_local(),
}
}
/// Get access to the underlying value through a raw pointer.
///
/// This function calculates the virtual address of the CPU-local object based on the per-
/// cpu base address and the offset in the BSP.
fn get(&self) -> *const T {
let offset = {
let bsp_va = self as *const _ as usize;
let bsp_base = __cpu_local_start as usize;
// The implementation should ensure that the CPU-local object resides in the `.cpu_local`.
debug_assert!(bsp_va + core::mem::size_of::<T>() <= __cpu_local_end as usize);
bsp_va - bsp_base as usize
};
let local_base = get_cpu_local_base() as usize;
let local_va = local_base + offset;
// A sanity check about the alignment.
debug_assert_eq!(local_va % core::mem::align_of::<T>(), 0);
local_va as *mut T
}
}
// Considering a preemptive kernel, a CPU-local object may be dereferenced
// when another task tries to access it. So, we need to ensure that `T` is
// `Sync` before allowing it to be dereferenced.
impl<T: Sync> Deref for CpuLocal<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
// SAFETY: it should be properly initialized before accesses.
// And we do not create a mutable reference over it. It is
// `Sync` so it can be referenced from this task.
unsafe { &*self.get() }
}
}
/// A guard for accessing the CPU-local object.
///
/// It ensures that the CPU-local object is accessed with IRQs
/// disabled. It is created by [`CpuLocal::borrow_irq_disabled`].
/// Do not hold this guard for a long time.
#[must_use]
pub struct CpuLocalDerefGuard<'a, T> {
cpu_local: &'a CpuLocal<T>,
_guard: DisabledLocalIrqGuard,
}
impl<T> Deref for CpuLocalDerefGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
// SAFETY: it should be properly initialized before accesses.
// And we do not create a mutable reference over it. The IRQs
// are disabled so it can be referenced from this task.
unsafe { &*self.cpu_local.get() }
}
}
/// Initializes the CPU local data for the bootstrap processor (BSP).
///
/// # Safety
///
/// This function can only called on the BSP, for once.
///
/// It must be guaranteed that the BSP will not access local data before
/// this function being called, otherwise copying non-constant values
/// will result in pretty bad undefined behavior.
pub unsafe fn init_on_bsp() {
let start_base_va = __cpu_local_start as usize as u64;
set_cpu_local_base(start_base_va);
}
// These symbols are provided by the linker script.
extern "C" {
fn __cpu_local_start();
fn __cpu_local_end();
}
#[cfg(ktest)]
mod test {
use core::{
cell::RefCell,
sync::atomic::{AtomicU8, Ordering},
};
use ostd_macros::ktest;
use super::*;
#[ktest]
fn test_cpu_local() {
cpu_local! {
static FOO: RefCell<usize> = RefCell::new(1);
static BAR: AtomicU8 = AtomicU8::new(3);
}
for _ in 0..10 {
let foo_guard = FOO.borrow_irq_disabled();
assert_eq!(*foo_guard.borrow(), 1);
*foo_guard.borrow_mut() = 2;
drop(foo_guard);
for _ in 0..10 {
assert_eq!(BAR.load(Ordering::Relaxed), 3);
BAR.store(4, Ordering::Relaxed);
assert_eq!(BAR.load(Ordering::Relaxed), 4);
BAR.store(3, Ordering::Relaxed);
}
let foo_guard = FOO.borrow_irq_disabled();
assert_eq!(*foo_guard.borrow(), 2);
*foo_guard.borrow_mut() = 1;
drop(foo_guard);
}
}
}

12
ostd/src/cpu/mod.rs Normal file
View File

@ -0,0 +1,12 @@
// SPDX-License-Identifier: MPL-2.0
//! CPU-related definitions.
pub mod cpu_local;
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")]{
pub use trapframe::GeneralRegs;
pub use crate::arch::x86::cpu::*;
}
}

View File

@ -47,7 +47,7 @@ pub use ostd_macros::main;
#[cfg(feature = "intel_tdx")] #[cfg(feature = "intel_tdx")]
use tdx_guest::init_tdx; use tdx_guest::init_tdx;
pub use self::{cpu::CpuLocal, error::Error, prelude::Result}; pub use self::{cpu::cpu_local::CpuLocal, error::Error, prelude::Result};
/// Initializes OSTD. /// Initializes OSTD.
/// ///
@ -77,6 +77,9 @@ pub fn init() {
mm::page::allocator::init(); mm::page::allocator::init();
mm::kspace::init_boot_page_table(); mm::kspace::init_boot_page_table();
mm::kspace::init_kernel_page_table(mm::init_page_meta()); mm::kspace::init_kernel_page_table(mm::init_page_meta());
// SAFETY: no CPU local objects have been accessed by this far. And
// we are on the BSP.
unsafe { cpu::cpu_local::init_on_bsp() };
mm::misc_init(); mm::misc_init();
trap::init(); trap::init();

View File

@ -1,7 +1,5 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
#![allow(dead_code)]
use alloc::sync::Arc; use alloc::sync::Arc;
use core::{ use core::{
cell::RefCell, cell::RefCell,
@ -13,7 +11,7 @@ use super::{
task::{context_switch, TaskContext}, task::{context_switch, TaskContext},
Task, TaskStatus, Task, TaskStatus,
}; };
use crate::{cpu_local, CpuLocal}; use crate::cpu_local;
pub struct Processor { pub struct Processor {
current: Option<Arc<Task>>, current: Option<Arc<Task>>,
@ -49,21 +47,16 @@ cpu_local! {
static PROCESSOR: RefCell<Processor> = RefCell::new(Processor::new()); static PROCESSOR: RefCell<Processor> = RefCell::new(Processor::new());
} }
pub fn take_current_task() -> Option<Arc<Task>> {
CpuLocal::borrow_with(&PROCESSOR, |processor| {
processor.borrow_mut().take_current()
})
}
/// Retrieves the current task running on the processor. /// Retrieves the current task running on the processor.
pub fn current_task() -> Option<Arc<Task>> { pub fn current_task() -> Option<Arc<Task>> {
CpuLocal::borrow_with(&PROCESSOR, |processor| processor.borrow().current()) PROCESSOR.borrow_irq_disabled().borrow().current()
} }
pub(crate) fn get_idle_task_ctx_ptr() -> *mut TaskContext { pub(crate) fn get_idle_task_ctx_ptr() -> *mut TaskContext {
CpuLocal::borrow_with(&PROCESSOR, |processor| { PROCESSOR
processor.borrow_mut().get_idle_task_ctx_ptr() .borrow_irq_disabled()
}) .borrow_mut()
.get_idle_task_ctx_ptr()
} }
/// Calls this function to switch to other task by using GLOBAL_SCHEDULER /// Calls this function to switch to other task by using GLOBAL_SCHEDULER
@ -134,15 +127,16 @@ fn switch_to_task(next_task: Arc<Task>) {
} }
// Change the current task to the next task. // Change the current task to the next task.
CpuLocal::borrow_with(&PROCESSOR, |processor| { {
let mut processor = processor.borrow_mut(); let processor_guard = PROCESSOR.borrow_irq_disabled();
let mut processor = processor_guard.borrow_mut();
// We cannot directly overwrite `current` at this point. Since we are running as `current`, // We cannot directly overwrite `current` at this point. Since we are running as `current`,
// we must avoid dropping `current`. Otherwise, the kernel stack may be unmapped, leading // we must avoid dropping `current`. Otherwise, the kernel stack may be unmapped, leading
// to soundness problems. // to soundness problems.
let old_current = processor.current.replace(next_task); let old_current = processor.current.replace(next_task);
processor.prev_task = old_current; processor.prev_task = old_current;
}); }
// SAFETY: // SAFETY:
// 1. `ctx` is only used in `schedule()`. We have exclusive access to both the current task // 1. `ctx` is only used in `schedule()`. We have exclusive access to both the current task
@ -200,7 +194,7 @@ impl PreemptInfo {
#[must_use] #[must_use]
pub struct DisablePreemptGuard { pub struct DisablePreemptGuard {
// This private field prevents user from constructing values of this type directly. // This private field prevents user from constructing values of this type directly.
private: (), _private: (),
} }
impl !Send for DisablePreemptGuard {} impl !Send for DisablePreemptGuard {}
@ -208,7 +202,7 @@ impl !Send for DisablePreemptGuard {}
impl DisablePreemptGuard { impl DisablePreemptGuard {
fn new() -> Self { fn new() -> Self {
PREEMPT_COUNT.increase_num_locks(); PREEMPT_COUNT.increase_num_locks();
Self { private: () } Self { _private: () }
} }
/// Transfer this guard to a new guard. /// Transfer this guard to a new guard.

View File

@ -9,7 +9,7 @@ use core::sync::atomic::{AtomicBool, AtomicU8, Ordering};
use spin::Once; use spin::Once;
use crate::{cpu_local, task::disable_preempt, CpuLocal}; use crate::{cpu_local, task::disable_preempt};
/// A representation of a software interrupt (softirq) line. /// A representation of a software interrupt (softirq) line.
/// ///
@ -70,9 +70,7 @@ impl SoftIrqLine {
/// ///
/// If this line is not enabled yet, the method has no effect. /// If this line is not enabled yet, the method has no effect.
pub fn raise(&self) { pub fn raise(&self) {
CpuLocal::borrow_with(&PENDING_MASK, |mask| { PENDING_MASK.fetch_or(1 << self.id, Ordering::Release);
mask.fetch_or(1 << self.id, Ordering::Release);
});
} }
/// Enables a softirq line by registering its callback. /// Enables a softirq line by registering its callback.
@ -114,21 +112,17 @@ cpu_local! {
/// Enables softirq in current processor. /// Enables softirq in current processor.
fn enable_softirq_local() { fn enable_softirq_local() {
CpuLocal::borrow_with(&IS_ENABLED, |is_enabled| { IS_ENABLED.store(true, Ordering::Release);
is_enabled.store(true, Ordering::Release)
})
} }
/// Disables softirq in current processor. /// Disables softirq in current processor.
fn disable_softirq_local() { fn disable_softirq_local() {
CpuLocal::borrow_with(&IS_ENABLED, |is_enabled| { IS_ENABLED.store(false, Ordering::Release);
is_enabled.store(false, Ordering::Release)
})
} }
/// Checks whether the softirq is enabled in current processor. /// Checks whether the softirq is enabled in current processor.
fn is_softirq_enabled() -> bool { fn is_softirq_enabled() -> bool {
CpuLocal::borrow_with(&IS_ENABLED, |is_enabled| is_enabled.load(Ordering::Acquire)) IS_ENABLED.load(Ordering::Acquire)
} }
/// Processes pending softirqs. /// Processes pending softirqs.
@ -145,16 +139,14 @@ pub(crate) fn process_pending() {
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
disable_softirq_local(); disable_softirq_local();
CpuLocal::borrow_with(&PENDING_MASK, |mask| {
for i in 0..SOFTIRQ_RUN_TIMES { for i in 0..SOFTIRQ_RUN_TIMES {
// will not reactive in this handling.
let mut action_mask = { let mut action_mask = {
let pending_mask = mask.fetch_and(0, Ordering::Acquire); let pending_mask = PENDING_MASK.fetch_and(0, Ordering::Acquire);
pending_mask & ENABLED_MASK.load(Ordering::Acquire) pending_mask & ENABLED_MASK.load(Ordering::Acquire)
}; };
if action_mask == 0 { if action_mask == 0 {
return; break;
} }
while action_mask > 0 { while action_mask > 0 {
let action_id = u8::trailing_zeros(action_mask) as u8; let action_id = u8::trailing_zeros(action_mask) as u8;
@ -162,6 +154,6 @@ pub(crate) fn process_pending() {
action_mask &= action_mask - 1; action_mask &= action_mask - 1;
} }
} }
});
enable_softirq_local(); enable_softirq_local();
} }