Refactor preemption-related code in OSTD

This commit is contained in:
jellllly420
2024-08-15 19:36:46 +08:00
committed by Tate, Hongliang Tian
parent f5464e82c0
commit 2ab4ba11bc
7 changed files with 132 additions and 144 deletions

View File

@ -59,17 +59,6 @@ extern "C" {
fn __cpu_local_end();
}
cpu_local_cell! {
/// A 4-byte preemption information consisting of a should_preempt flag at
/// the highest bit and a preemption counter in the lower 31 bits.
///
/// We need to access the preemption info before we can copy the section
/// for application processors. So, the preemption info is not copied from
/// bootstrap processor's section as the initialization. Instead it is
/// initialized to zero for application processors.
pub(crate) static PREEMPT_INFO: u32 = 0;
}
/// Sets the base address of the CPU-local storage for the bootstrap processor.
///
/// It should be called early to let [`crate::task::disable_preempt`] work,
@ -134,24 +123,6 @@ pub unsafe fn init_on_bsp() {
(ap_pages_ptr as *mut u32).write(cpu_i);
}
// SAFETY: the `PREEMPT_INFO` may be dirty on the BSP, so we need
// to ensure that it is initialized to zero for APs. The safety
// requirements are met since the static is defined in the `.cpu_local`
// section and the pointer to that static is the offset in the CPU-
// local area. It is a `usize` so it is safe to be overwritten.
unsafe {
let preempt_info_ptr = &PREEMPT_INFO as *const _ as usize;
let preempt_info_offset = preempt_info_ptr - __cpu_local_start as usize;
let ap_preempt_info_ptr = ap_pages_ptr.add(preempt_info_offset) as *mut usize;
ap_preempt_info_ptr.write(0);
}
// SAFETY: bytes `8:16` are reserved for storing the pointer to the
// current task. We initialize it to null.
unsafe {
(ap_pages_ptr as *mut u64).add(1).write(0);
}
cpu_local_storages.push(ap_pages);
}

View File

@ -2,12 +2,13 @@
//! Tasks are the unit of code execution.
mod preempt;
mod processor;
pub mod scheduler;
#[allow(clippy::module_inception)]
mod task;
pub use self::{
processor::{disable_preempt, DisablePreemptGuard},
preempt::{disable_preempt, DisablePreemptGuard},
task::{AtomicCpuId, Priority, Task, TaskAdapter, TaskContextApi, TaskOptions},
};

View File

@ -0,0 +1,61 @@
// SPDX-License-Identifier: MPL-2.0
//! This module maintains preemption-related information for the curren task
//! on a CPU with a single 32-bit, CPU-local integer value.
//!
//! * Bits from 0 to 30 represents an unsigned counter called `guard_count`,
//! which is the number of `DisablePreemptGuard` instances held by the
//! current CPU;
//! * Bit 31 is set to `!need_preempt`, where `need_preempt` is a boolean value
//! that will be set by the scheduler when it decides that the current task
//! _needs_ to be preempted.
//!
//! Thus, the current task on a CPU _should_ be preempted if and only if this
//! integer is equal to zero.
//!
//! The initial value of this integer is equal to `1 << 31`.
//!
//! This module provides a set of functions to access and manipulate
//! `guard_count` and `need_preempt`.
use crate::cpu_local_cell;
/// Returns whether the current task _should_ be preempted or not.
///
/// `should_preempt() == need_preempt() && get_guard_count() == 0`.
pub(in crate::task) fn should_preempt() -> bool {
PREEMPT_INFO.load() == 0
}
#[allow(dead_code)]
pub(in crate::task) fn need_preempt() -> bool {
PREEMPT_INFO.load() & NEED_PREEMPT_MASK == 0
}
pub(in crate::task) fn set_need_preempt() {
PREEMPT_INFO.bitand_assign(!NEED_PREEMPT_MASK);
}
pub(in crate::task) fn clear_need_preempt() {
PREEMPT_INFO.bitor_assign(NEED_PREEMPT_MASK);
}
pub(in crate::task) fn get_guard_count() -> u32 {
PREEMPT_INFO.load() & GUARD_COUNT_MASK
}
pub(in crate::task) fn inc_guard_count() {
PREEMPT_INFO.add_assign(1);
}
pub(in crate::task) fn dec_guard_count() {
debug_assert!(get_guard_count() > 0);
PREEMPT_INFO.sub_assign(1);
}
cpu_local_cell! {
static PREEMPT_INFO: u32 = NEED_PREEMPT_MASK;
}
const NEED_PREEMPT_MASK: u32 = 1 << 31;
const GUARD_COUNT_MASK: u32 = (1 << 31) - 1;

View File

@ -0,0 +1,35 @@
// SPDX-License-Identifier: MPL-2.0
/// A guard for disable preempt.
#[clippy::has_significant_drop]
#[must_use]
pub struct DisablePreemptGuard {
// This private field prevents user from constructing values of this type directly.
_private: (),
}
impl !Send for DisablePreemptGuard {}
impl DisablePreemptGuard {
fn new() -> Self {
super::cpu_local::inc_guard_count();
Self { _private: () }
}
/// Transfer this guard to a new guard.
/// This guard must be dropped after this function.
pub fn transfer_to(&self) -> Self {
disable_preempt()
}
}
impl Drop for DisablePreemptGuard {
fn drop(&mut self) {
super::cpu_local::dec_guard_count();
}
}
/// Disables preemption.
pub fn disable_preempt() -> DisablePreemptGuard {
DisablePreemptGuard::new()
}

View File

@ -0,0 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
pub(super) mod cpu_local;
mod guard;
pub use self::guard::{disable_preempt, DisablePreemptGuard};

View File

@ -2,8 +2,11 @@
use alloc::sync::Arc;
use super::task::{context_switch, Task, TaskContext};
use crate::{cpu::local::PREEMPT_INFO, cpu_local_cell};
use super::{
preempt::cpu_local,
task::{context_switch, Task, TaskContext},
};
use crate::cpu_local_cell;
cpu_local_cell! {
/// The `Arc<Task>` (casted by [`Arc::into_raw`]) that is the current task.
@ -43,9 +46,12 @@ pub(super) fn current_task() -> Option<Arc<Task>> {
/// This function will panic if called while holding preemption locks or with
/// local IRQ disabled.
pub(super) fn switch_to_task(next_task: Arc<Task>) {
let preemt_count = PREEMPT_COUNT.get();
if preemt_count != 0 {
panic!("Switching task while holding {} locks", preemt_count);
let guard_count = cpu_local::get_guard_count();
if guard_count != 0 {
panic!(
"Switching task with preemption disabled (nesting depth: {})",
guard_count
);
}
assert!(
@ -108,65 +114,3 @@ pub(super) fn switch_to_task(next_task: Arc<Task>) {
// next task. Not dropping is just fine because the only consequence is that we delay the drop
// to the next task switching.
}
static PREEMPT_COUNT: PreemptCount = PreemptCount::new();
struct PreemptCount {}
impl PreemptCount {
const SHIFT: u8 = 0;
const BITS: u8 = 31;
const MASK: u32 = ((1 << Self::BITS) - 1) << Self::SHIFT;
const fn new() -> Self {
Self {}
}
fn inc(&self) {
PREEMPT_INFO.add_assign(1 << Self::SHIFT);
}
fn dec(&self) {
PREEMPT_INFO.sub_assign(1 << Self::SHIFT);
}
fn get(&self) -> u32 {
PREEMPT_INFO.load() & Self::MASK
}
}
/// A guard for disable preempt.
#[clippy::has_significant_drop]
#[must_use]
pub struct DisablePreemptGuard {
// This private field prevents user from constructing values of this type directly.
_private: (),
}
impl !Send for DisablePreemptGuard {}
impl DisablePreemptGuard {
fn new() -> Self {
PREEMPT_COUNT.inc();
Self { _private: () }
}
/// Transfer this guard to a new guard.
/// This guard must be dropped after this function.
pub fn transfer_to(&self) -> Self {
disable_preempt()
}
}
impl Drop for DisablePreemptGuard {
fn drop(&mut self) {
PREEMPT_COUNT.dec();
}
}
/// Disables preemption.
pub fn disable_preempt() -> DisablePreemptGuard {
DisablePreemptGuard::new()
}

View File

@ -11,12 +11,8 @@ use core::sync::atomic::{AtomicBool, Ordering};
use spin::Once;
use super::{processor, task::Task};
use crate::{
arch::timer,
cpu::{local::PREEMPT_INFO, this_cpu},
prelude::*,
};
use super::{preempt::cpu_local, processor, task::Task};
use crate::{arch::timer, cpu::this_cpu, prelude::*};
/// Injects a scheduler implementation into framework.
///
@ -27,7 +23,7 @@ pub fn inject_scheduler(scheduler: &'static dyn Scheduler<Task>) {
timer::register_callback(|| {
SCHEDULER.get().unwrap().local_mut_rq_with(&mut |local_rq| {
if local_rq.update_current(UpdateFlags::Tick) {
SHOULD_PREEMPT.set();
cpu_local::set_need_preempt();
}
})
});
@ -41,7 +37,7 @@ pub trait Scheduler<T = Task>: Sync + Send {
///
/// Scheduler developers can perform load-balancing or some accounting work here.
///
/// If the `current` of a CPU should be preempted, this method returns the id of
/// If the `current` of a CPU needs to be preempted, this method returns the id of
/// that CPU.
fn enqueue(&self, runnable: Arc<T>, flags: EnqueueFlags) -> Option<u32>;
@ -68,7 +64,7 @@ pub trait LocalRunQueue<T = Task> {
/// Updates the current runnable task's scheduling statistics and potentially its
/// position in the queue.
///
/// If the current runnable task should be preempted, the method returns `true`.
/// If the current runnable task needs to be preempted, the method returns `true`.
fn update_current(&mut self, flags: UpdateFlags) -> bool;
/// Picks the next current runnable task.
@ -106,11 +102,7 @@ pub enum UpdateFlags {
/// Preempts the current task.
pub(crate) fn might_preempt() {
fn preempt_check() -> bool {
PREEMPT_INFO.load() == 0
}
if !preempt_check() {
if !cpu_local::should_preempt() {
return;
}
yield_now();
@ -142,15 +134,15 @@ pub(crate) fn park_current(has_woken: &AtomicBool) {
/// Unblocks a target task.
pub(crate) fn unpark_target(runnable: Arc<Task>) {
let should_preempt_info = SCHEDULER
let need_preempt_info = SCHEDULER
.get()
.unwrap()
.enqueue(runnable, EnqueueFlags::Wake);
if should_preempt_info.is_some() {
let cpu_id = should_preempt_info.unwrap();
// FIXME: send IPI to set remote CPU's `SHOULD_PREEMPT` if needed.
if need_preempt_info.is_some() {
let cpu_id = need_preempt_info.unwrap();
// FIXME: send IPI to set remote CPU's need_preempt if needed.
if cpu_id == this_cpu() {
SHOULD_PREEMPT.set();
cpu_local::set_need_preempt();
}
}
}
@ -165,15 +157,15 @@ pub(super) fn run_new_task(runnable: Arc<Task>) {
fifo_scheduler::init();
}
let should_preempt_info = SCHEDULER
let need_preempt_info = SCHEDULER
.get()
.unwrap()
.enqueue(runnable, EnqueueFlags::Spawn);
if should_preempt_info.is_some() {
let cpu_id = should_preempt_info.unwrap();
// FIXME: send IPI to set remote CPU's `SHOULD_PREEMPT` if needed.
if need_preempt_info.is_some() {
let cpu_id = need_preempt_info.unwrap();
// FIXME: send IPI to set remote CPU's need_preempt if needed.
if cpu_id == this_cpu() {
SHOULD_PREEMPT.set();
cpu_local::set_need_preempt();
}
}
@ -234,7 +226,7 @@ where
};
};
SHOULD_PREEMPT.clear();
cpu_local::clear_need_preempt();
processor::switch_to_task(next_task);
}
@ -247,25 +239,3 @@ enum ReschedAction {
/// Switch to target task.
SwitchTo(Arc<Task>),
}
static SHOULD_PREEMPT: ShouldPreemptFlag = ShouldPreemptFlag::new();
struct ShouldPreemptFlag {}
impl ShouldPreemptFlag {
const SHIFT: u8 = 31;
const MASK: u32 = 1 << Self::SHIFT;
const fn new() -> Self {
Self {}
}
fn set(&self) {
PREEMPT_INFO.bitand_assign(!Self::MASK);
}
fn clear(&self) {
PREEMPT_INFO.bitor_assign(Self::MASK);
}
}