mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-22 17:03:23 +00:00
Refactor the this_cpu
API with PinCurrentCpu
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
9a94ba23aa
commit
f7a9510be0
@ -1,11 +1,12 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use ostd::{
|
use ostd::{
|
||||||
cpu::{num_cpus, this_cpu},
|
cpu::{num_cpus, PinCurrentCpu},
|
||||||
task::{
|
task::{
|
||||||
scheduler::{inject_scheduler, EnqueueFlags, LocalRunQueue, Scheduler, UpdateFlags},
|
scheduler::{inject_scheduler, EnqueueFlags, LocalRunQueue, Scheduler, UpdateFlags},
|
||||||
AtomicCpuId, Priority, Task,
|
AtomicCpuId, Priority, Task,
|
||||||
},
|
},
|
||||||
|
trap::disable_local,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
@ -71,13 +72,15 @@ impl<T: Sync + Send + PreemptSchedInfo> Scheduler<T> for PreemptScheduler<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue<T>)) {
|
fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue<T>)) {
|
||||||
let local_rq: &PreemptRunQueue<T> = &self.rq[this_cpu() as usize].disable_irq().lock();
|
let irq_guard = disable_local();
|
||||||
|
let local_rq: &PreemptRunQueue<T> = &self.rq[irq_guard.current_cpu() as usize].lock();
|
||||||
f(local_rq);
|
f(local_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue<T>)) {
|
fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue<T>)) {
|
||||||
|
let irq_guard = disable_local();
|
||||||
let local_rq: &mut PreemptRunQueue<T> =
|
let local_rq: &mut PreemptRunQueue<T> =
|
||||||
&mut self.rq[this_cpu() as usize].disable_irq().lock();
|
&mut self.rq[irq_guard.current_cpu() as usize].lock();
|
||||||
f(local_rq);
|
f(local_rq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,9 +6,10 @@ use core::time::Duration;
|
|||||||
use aster_time::read_monotonic_time;
|
use aster_time::read_monotonic_time;
|
||||||
use ostd::{
|
use ostd::{
|
||||||
arch::timer::Jiffies,
|
arch::timer::Jiffies,
|
||||||
cpu::{num_cpus, this_cpu},
|
cpu::{num_cpus, PinCurrentCpu},
|
||||||
cpu_local,
|
cpu_local,
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
|
task::disable_preempt,
|
||||||
};
|
};
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
use spin::Once;
|
use spin::Once;
|
||||||
@ -35,7 +36,11 @@ impl RealTimeClock {
|
|||||||
|
|
||||||
/// Get the cpu-local system-wide `TimerManager` singleton of this clock.
|
/// Get the cpu-local system-wide `TimerManager` singleton of this clock.
|
||||||
pub fn timer_manager() -> &'static Arc<TimerManager> {
|
pub fn timer_manager() -> &'static Arc<TimerManager> {
|
||||||
CLOCK_REALTIME_MANAGER.get_on_cpu(this_cpu()).get().unwrap()
|
let preempt_guard = disable_preempt();
|
||||||
|
CLOCK_REALTIME_MANAGER
|
||||||
|
.get_on_cpu(preempt_guard.current_cpu())
|
||||||
|
.get()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,8 +58,9 @@ impl MonotonicClock {
|
|||||||
|
|
||||||
/// Get the cpu-local system-wide `TimerManager` singleton of this clock.
|
/// Get the cpu-local system-wide `TimerManager` singleton of this clock.
|
||||||
pub fn timer_manager() -> &'static Arc<TimerManager> {
|
pub fn timer_manager() -> &'static Arc<TimerManager> {
|
||||||
|
let preempt_guard = disable_preempt();
|
||||||
CLOCK_MONOTONIC_MANAGER
|
CLOCK_MONOTONIC_MANAGER
|
||||||
.get_on_cpu(this_cpu())
|
.get_on_cpu(preempt_guard.current_cpu())
|
||||||
.get()
|
.get()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
@ -135,7 +141,11 @@ impl BootTimeClock {
|
|||||||
|
|
||||||
/// Get the cpu-local system-wide `TimerManager` singleton of this clock.
|
/// Get the cpu-local system-wide `TimerManager` singleton of this clock.
|
||||||
pub fn timer_manager() -> &'static Arc<TimerManager> {
|
pub fn timer_manager() -> &'static Arc<TimerManager> {
|
||||||
CLOCK_BOOTTIME_MANAGER.get_on_cpu(this_cpu()).get().unwrap()
|
let preempt_guard = disable_preempt();
|
||||||
|
CLOCK_BOOTTIME_MANAGER
|
||||||
|
.get_on_cpu(preempt_guard.current_cpu())
|
||||||
|
.get()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,10 +119,6 @@ SECTIONS
|
|||||||
. = ALIGN(4096);
|
. = ALIGN(4096);
|
||||||
.cpu_local : AT(ADDR(.cpu_local) - KERNEL_VMA) {
|
.cpu_local : AT(ADDR(.cpu_local) - KERNEL_VMA) {
|
||||||
__cpu_local_start = .;
|
__cpu_local_start = .;
|
||||||
|
|
||||||
# These 4 bytes are used to store the CPU ID.
|
|
||||||
. += 4;
|
|
||||||
|
|
||||||
KEEP(*(SORT(.cpu_local)))
|
KEEP(*(SORT(.cpu_local)))
|
||||||
__cpu_local_end = .;
|
__cpu_local_end = .;
|
||||||
}
|
}
|
||||||
|
@ -63,8 +63,11 @@ pub(crate) fn init_on_bsp() {
|
|||||||
irq::init();
|
irq::init();
|
||||||
kernel::acpi::init();
|
kernel::acpi::init();
|
||||||
|
|
||||||
// SAFETY: it is only called once and ACPI has been initialized.
|
// SAFETY: they are only called once on BSP and ACPI has been initialized.
|
||||||
unsafe { crate::cpu::init() };
|
unsafe {
|
||||||
|
crate::cpu::init_num_cpus();
|
||||||
|
crate::cpu::set_this_cpu_id(0);
|
||||||
|
}
|
||||||
|
|
||||||
match kernel::apic::init() {
|
match kernel::apic::init() {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
|
@ -113,9 +113,11 @@ pub fn register_ap_entry(entry: fn() -> !) {
|
|||||||
fn ap_early_entry(local_apic_id: u32) -> ! {
|
fn ap_early_entry(local_apic_id: u32) -> ! {
|
||||||
crate::arch::enable_cpu_features();
|
crate::arch::enable_cpu_features();
|
||||||
|
|
||||||
// SAFETY: we are on the AP.
|
// SAFETY: we are on the AP and they are only called once with the correct
|
||||||
|
// CPU ID.
|
||||||
unsafe {
|
unsafe {
|
||||||
cpu::local::init_on_ap(local_apic_id);
|
cpu::local::init_on_ap(local_apic_id);
|
||||||
|
cpu::set_this_cpu_id(local_apic_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
trap::init();
|
trap::init();
|
||||||
|
@ -23,7 +23,7 @@ use crate::{
|
|||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use ostd::{cpu_local, cpu::this_cpu};
|
/// use ostd::{cpu_local, cpu::PinCurrentCpu, task::disable_preempt};
|
||||||
/// use core::{sync::atomic::{AtomicU32, Ordering}, cell::Cell};
|
/// use core::{sync::atomic::{AtomicU32, Ordering}, cell::Cell};
|
||||||
///
|
///
|
||||||
/// cpu_local! {
|
/// cpu_local! {
|
||||||
@ -32,16 +32,12 @@ use crate::{
|
|||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn not_an_atomic_function() {
|
/// fn not_an_atomic_function() {
|
||||||
/// let ref_of_foo = FOO.get_on_cpu(this_cpu());
|
/// let preempt_guard = disable_preempt();
|
||||||
/// // Note that the value of `FOO` here doesn't necessarily equal to the value
|
/// let ref_of_foo = FOO.get_on_cpu(preempt_guard.current_cpu());
|
||||||
/// // of `FOO` of exactly the __current__ CPU. Since that task may be preempted
|
|
||||||
/// // and moved to another CPU since `ref_of_foo` is created.
|
|
||||||
/// let val_of_foo = ref_of_foo.load(Ordering::Relaxed);
|
/// let val_of_foo = ref_of_foo.load(Ordering::Relaxed);
|
||||||
/// println!("FOO VAL: {}", val_of_foo);
|
/// println!("FOO VAL: {}", val_of_foo);
|
||||||
///
|
///
|
||||||
/// let bar_guard = BAR.borrow_irq_disabled();
|
/// let bar_guard = BAR.borrow_irq_disabled();
|
||||||
/// // Here the value of `BAR` is always the one in the __current__ CPU since
|
|
||||||
/// // interrupts are disabled and we do not explicitly yield execution here.
|
|
||||||
/// let val_of_bar = bar_guard.get();
|
/// let val_of_bar = bar_guard.get();
|
||||||
/// println!("BAR VAL: {}", val_of_bar);
|
/// println!("BAR VAL: {}", val_of_bar);
|
||||||
/// }
|
/// }
|
||||||
|
@ -98,7 +98,7 @@ pub unsafe fn init_on_bsp() {
|
|||||||
let num_cpus = super::num_cpus();
|
let num_cpus = super::num_cpus();
|
||||||
|
|
||||||
let mut cpu_local_storages = Vec::with_capacity(num_cpus as usize - 1);
|
let mut cpu_local_storages = Vec::with_capacity(num_cpus as usize - 1);
|
||||||
for cpu_i in 1..num_cpus {
|
for _ in 1..num_cpus {
|
||||||
let ap_pages = {
|
let ap_pages = {
|
||||||
let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE);
|
let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE);
|
||||||
page::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap()
|
page::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap()
|
||||||
@ -116,23 +116,11 @@ pub unsafe fn init_on_bsp() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// SAFETY: bytes `0:4` are reserved for storing CPU ID.
|
|
||||||
unsafe {
|
|
||||||
(ap_pages_ptr as *mut u32).write(cpu_i);
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_local_storages.push(ap_pages);
|
cpu_local_storages.push(ap_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
CPU_LOCAL_STORAGES.call_once(|| cpu_local_storages);
|
CPU_LOCAL_STORAGES.call_once(|| cpu_local_storages);
|
||||||
|
|
||||||
// Write the CPU ID of BSP to the first 4 bytes of the CPU-local area.
|
|
||||||
let bsp_cpu_id_ptr = bsp_base_va as *mut u32;
|
|
||||||
// SAFETY: the first 4 bytes is reserved for storing CPU ID.
|
|
||||||
unsafe {
|
|
||||||
bsp_cpu_id_ptr.write(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
arch::cpu::local::set_base(bsp_base_va as u64);
|
arch::cpu::local::set_base(bsp_base_va as u64);
|
||||||
|
|
||||||
has_init::set_true();
|
has_init::set_true();
|
||||||
|
@ -11,43 +11,85 @@ cfg_if::cfg_if! {
|
|||||||
}
|
}
|
||||||
|
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use core::sync::atomic::{AtomicU32, Ordering};
|
|
||||||
|
|
||||||
use bitvec::{
|
use bitvec::{
|
||||||
prelude::{BitVec, Lsb0},
|
prelude::{BitVec, Lsb0},
|
||||||
slice::IterOnes,
|
slice::IterOnes,
|
||||||
};
|
};
|
||||||
|
use local::cpu_local_cell;
|
||||||
|
use spin::Once;
|
||||||
|
|
||||||
use crate::arch::{self, boot::smp::get_num_processors};
|
use crate::{
|
||||||
|
arch::boot::smp::get_num_processors, task::DisabledPreemptGuard, trap::DisabledLocalIrqGuard,
|
||||||
|
};
|
||||||
|
|
||||||
/// The number of CPUs. Zero means uninitialized.
|
/// The number of CPUs.
|
||||||
static NUM_CPUS: AtomicU32 = AtomicU32::new(0);
|
static NUM_CPUS: Once<u32> = Once::new();
|
||||||
|
|
||||||
/// Initializes the number of CPUs.
|
/// Initializes the number of CPUs.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// The caller must ensure that this function is called only once at the
|
/// The caller must ensure that this function is called only once on the BSP
|
||||||
/// correct time when the number of CPUs is available from the platform.
|
/// at the correct time when the number of CPUs is available from the platform.
|
||||||
pub unsafe fn init() {
|
pub(crate) unsafe fn init_num_cpus() {
|
||||||
let num_processors = get_num_processors().unwrap_or(1);
|
let num_processors = get_num_processors().unwrap_or(1);
|
||||||
NUM_CPUS.store(num_processors, Ordering::Release)
|
NUM_CPUS.call_once(|| num_processors);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initializes the number of the current CPU.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The caller must ensure that this function is called only once on the
|
||||||
|
/// correct CPU with the correct CPU ID.
|
||||||
|
pub(crate) unsafe fn set_this_cpu_id(id: u32) {
|
||||||
|
CURRENT_CPU.store(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of CPUs.
|
/// Returns the number of CPUs.
|
||||||
pub fn num_cpus() -> u32 {
|
pub fn num_cpus() -> u32 {
|
||||||
let num = NUM_CPUS.load(Ordering::Acquire);
|
debug_assert!(
|
||||||
debug_assert_ne!(num, 0, "The number of CPUs is not initialized");
|
NUM_CPUS.get().is_some(),
|
||||||
num
|
"The number of CPUs is not initialized"
|
||||||
|
);
|
||||||
|
// SAFETY: The number of CPUs is initialized. The unsafe version is used
|
||||||
|
// to avoid the overhead of the check.
|
||||||
|
unsafe { *NUM_CPUS.get_unchecked() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the ID of this CPU.
|
/// A marker trait for guard types that can "pin" the current task to the
|
||||||
|
/// current CPU.
|
||||||
///
|
///
|
||||||
/// The CPU ID is strategically placed at the beginning of the CPU local storage area.
|
/// Such guard types include [`DisabledLocalIrqGuard`] and
|
||||||
pub fn this_cpu() -> u32 {
|
/// [`DisabledPreemptGuard`]. When such guards exist, the CPU executing the
|
||||||
// SAFETY: the cpu ID is stored at the beginning of the cpu local area, provided
|
/// current task is pinned. So getting the current CPU ID or CPU-local
|
||||||
// by the linker script.
|
/// variables are safe.
|
||||||
unsafe { (arch::cpu::local::get_base() as usize as *mut u32).read() }
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The implementor must ensure that the current task is pinned to the current
|
||||||
|
/// CPU while any one of the instances of the implemented structure exists.
|
||||||
|
pub unsafe trait PinCurrentCpu {
|
||||||
|
/// Returns the number of the current CPU.
|
||||||
|
fn current_cpu(&self) -> u32 {
|
||||||
|
let id = CURRENT_CPU.load();
|
||||||
|
debug_assert_ne!(id, u32::MAX, "This CPU is not initialized");
|
||||||
|
id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SAFETY: When IRQs are disabled, the task cannot be passively preempted and
|
||||||
|
// migrates to another CPU. If the task actively calls `yield`, it will not be
|
||||||
|
// successful either.
|
||||||
|
unsafe impl PinCurrentCpu for DisabledLocalIrqGuard {}
|
||||||
|
// SAFETY: When preemption is disabled, the task cannot be preempted and migrates
|
||||||
|
// to another CPU.
|
||||||
|
unsafe impl PinCurrentCpu for DisabledPreemptGuard {}
|
||||||
|
|
||||||
|
cpu_local_cell! {
|
||||||
|
/// The number of the current CPU.
|
||||||
|
static CURRENT_CPU: u32 = u32::MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A subset of all CPUs in the system.
|
/// A subset of all CPUs in the system.
|
||||||
|
@ -42,7 +42,7 @@ use crate::{
|
|||||||
page_prop::PageProperty,
|
page_prop::PageProperty,
|
||||||
Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
|
Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
task::{disable_preempt, DisablePreemptGuard},
|
task::{disable_preempt, DisabledPreemptGuard},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The raw handle to a page table node.
|
/// The raw handle to a page table node.
|
||||||
@ -190,11 +190,11 @@ pub(super) struct PageTableNode<
|
|||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
pub(super) page: Page<PageTablePageMeta<E, C>>,
|
pub(super) page: Page<PageTablePageMeta<E, C>>,
|
||||||
preempt_guard: DisablePreemptGuard,
|
preempt_guard: DisabledPreemptGuard,
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: We cannot `#[derive(Debug)]` here due to `DisablePreemptGuard`. Should we skip
|
// FIXME: We cannot `#[derive(Debug)]` here due to `DisabledPreemptGuard`. Should we skip
|
||||||
// this field or implement the `Debug` trait also for `DisablePreemptGuard`?
|
// this field or implement the `Debug` trait also for `DisabledPreemptGuard`?
|
||||||
impl<E, C> fmt::Debug for PageTableNode<E, C>
|
impl<E, C> fmt::Debug for PageTableNode<E, C>
|
||||||
where
|
where
|
||||||
E: PageTableEntryTrait,
|
E: PageTableEntryTrait,
|
||||||
|
@ -14,7 +14,7 @@ use core::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
task::{disable_preempt, DisablePreemptGuard},
|
task::{disable_preempt, DisabledPreemptGuard},
|
||||||
trap::{disable_local, DisabledLocalIrqGuard},
|
trap::{disable_local, DisabledLocalIrqGuard},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -544,7 +544,7 @@ unsafe impl<T: ?Sized + Sync, R: Deref<Target = RwLock<T>> + Clone + Sync> Sync
|
|||||||
|
|
||||||
enum InnerGuard {
|
enum InnerGuard {
|
||||||
IrqGuard(DisabledLocalIrqGuard),
|
IrqGuard(DisabledLocalIrqGuard),
|
||||||
PreemptGuard(DisablePreemptGuard),
|
PreemptGuard(DisabledPreemptGuard),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerGuard {
|
impl InnerGuard {
|
||||||
|
@ -12,7 +12,7 @@ use core::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
task::{disable_preempt, DisablePreemptGuard},
|
task::{disable_preempt, DisabledPreemptGuard},
|
||||||
trap::{disable_local, DisabledLocalIrqGuard},
|
trap::{disable_local, DisabledLocalIrqGuard},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ pub trait Guardian {
|
|||||||
pub struct PreemptDisabled;
|
pub struct PreemptDisabled;
|
||||||
|
|
||||||
impl Guardian for PreemptDisabled {
|
impl Guardian for PreemptDisabled {
|
||||||
type Guard = DisablePreemptGuard;
|
type Guard = DisabledPreemptGuard;
|
||||||
|
|
||||||
fn guard() -> Self::Guard {
|
fn guard() -> Self::Guard {
|
||||||
disable_preempt()
|
disable_preempt()
|
||||||
|
@ -9,6 +9,6 @@ pub mod scheduler;
|
|||||||
mod task;
|
mod task;
|
||||||
|
|
||||||
pub use self::{
|
pub use self::{
|
||||||
preempt::{disable_preempt, DisablePreemptGuard},
|
preempt::{disable_preempt, DisabledPreemptGuard},
|
||||||
task::{AtomicCpuId, Priority, Task, TaskAdapter, TaskContextApi, TaskOptions},
|
task::{AtomicCpuId, Priority, Task, TaskAdapter, TaskContextApi, TaskOptions},
|
||||||
};
|
};
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
//! on a CPU with a single 32-bit, CPU-local integer value.
|
//! on a CPU with a single 32-bit, CPU-local integer value.
|
||||||
//!
|
//!
|
||||||
//! * Bits from 0 to 30 represents an unsigned counter called `guard_count`,
|
//! * Bits from 0 to 30 represents an unsigned counter called `guard_count`,
|
||||||
//! which is the number of `DisablePreemptGuard` instances held by the
|
//! which is the number of `DisabledPreemptGuard` instances held by the
|
||||||
//! current CPU;
|
//! current CPU;
|
||||||
//! * Bit 31 is set to `!need_preempt`, where `need_preempt` is a boolean value
|
//! * Bit 31 is set to `!need_preempt`, where `need_preempt` is a boolean value
|
||||||
//! that will be set by the scheduler when it decides that the current task
|
//! that will be set by the scheduler when it decides that the current task
|
||||||
|
@ -3,14 +3,14 @@
|
|||||||
/// A guard for disable preempt.
|
/// A guard for disable preempt.
|
||||||
#[clippy::has_significant_drop]
|
#[clippy::has_significant_drop]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub struct DisablePreemptGuard {
|
pub struct DisabledPreemptGuard {
|
||||||
// This private field prevents user from constructing values of this type directly.
|
// This private field prevents user from constructing values of this type directly.
|
||||||
_private: (),
|
_private: (),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl !Send for DisablePreemptGuard {}
|
impl !Send for DisabledPreemptGuard {}
|
||||||
|
|
||||||
impl DisablePreemptGuard {
|
impl DisabledPreemptGuard {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
super::cpu_local::inc_guard_count();
|
super::cpu_local::inc_guard_count();
|
||||||
Self { _private: () }
|
Self { _private: () }
|
||||||
@ -23,13 +23,13 @@ impl DisablePreemptGuard {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for DisablePreemptGuard {
|
impl Drop for DisabledPreemptGuard {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
super::cpu_local::dec_guard_count();
|
super::cpu_local::dec_guard_count();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Disables preemption.
|
/// Disables preemption.
|
||||||
pub fn disable_preempt() -> DisablePreemptGuard {
|
pub fn disable_preempt() -> DisabledPreemptGuard {
|
||||||
DisablePreemptGuard::new()
|
DisabledPreemptGuard::new()
|
||||||
}
|
}
|
||||||
|
@ -3,4 +3,4 @@
|
|||||||
pub(super) mod cpu_local;
|
pub(super) mod cpu_local;
|
||||||
mod guard;
|
mod guard;
|
||||||
|
|
||||||
pub use self::guard::{disable_preempt, DisablePreemptGuard};
|
pub use self::guard::{disable_preempt, DisabledPreemptGuard};
|
||||||
|
@ -4,9 +4,9 @@ use alloc::{boxed::Box, collections::VecDeque, sync::Arc, vec::Vec};
|
|||||||
|
|
||||||
use super::{inject_scheduler, EnqueueFlags, LocalRunQueue, Scheduler, UpdateFlags};
|
use super::{inject_scheduler, EnqueueFlags, LocalRunQueue, Scheduler, UpdateFlags};
|
||||||
use crate::{
|
use crate::{
|
||||||
cpu::{num_cpus, this_cpu},
|
cpu::{num_cpus, PinCurrentCpu},
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
task::{AtomicCpuId, Task},
|
task::{disable_preempt, AtomicCpuId, Task},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
@ -61,12 +61,18 @@ impl<T: FifoSchedInfo + Send + Sync> Scheduler<T> for FifoScheduler<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue<T>)) {
|
fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue<T>)) {
|
||||||
let local_rq: &FifoRunQueue<T> = &self.rq[this_cpu() as usize].disable_irq().lock();
|
let preempt_guard = disable_preempt();
|
||||||
|
let local_rq: &FifoRunQueue<T> = &self.rq[preempt_guard.current_cpu() as usize]
|
||||||
|
.disable_irq()
|
||||||
|
.lock();
|
||||||
f(local_rq);
|
f(local_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue<T>)) {
|
fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue<T>)) {
|
||||||
let local_rq: &mut FifoRunQueue<T> = &mut self.rq[this_cpu() as usize].disable_irq().lock();
|
let preempt_guard = disable_preempt();
|
||||||
|
let local_rq: &mut FifoRunQueue<T> = &mut self.rq[preempt_guard.current_cpu() as usize]
|
||||||
|
.disable_irq()
|
||||||
|
.lock();
|
||||||
f(local_rq);
|
f(local_rq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ use core::sync::atomic::{AtomicBool, Ordering};
|
|||||||
use spin::Once;
|
use spin::Once;
|
||||||
|
|
||||||
use super::{preempt::cpu_local, processor, task::Task};
|
use super::{preempt::cpu_local, processor, task::Task};
|
||||||
use crate::{arch::timer, cpu::this_cpu, prelude::*};
|
use crate::{arch::timer, cpu::PinCurrentCpu, prelude::*, task::disable_preempt};
|
||||||
|
|
||||||
/// Injects a scheduler implementation into framework.
|
/// Injects a scheduler implementation into framework.
|
||||||
///
|
///
|
||||||
@ -140,8 +140,9 @@ pub(crate) fn unpark_target(runnable: Arc<Task>) {
|
|||||||
.enqueue(runnable, EnqueueFlags::Wake);
|
.enqueue(runnable, EnqueueFlags::Wake);
|
||||||
if need_preempt_info.is_some() {
|
if need_preempt_info.is_some() {
|
||||||
let cpu_id = need_preempt_info.unwrap();
|
let cpu_id = need_preempt_info.unwrap();
|
||||||
|
let preempt_guard = disable_preempt();
|
||||||
// FIXME: send IPI to set remote CPU's need_preempt if needed.
|
// FIXME: send IPI to set remote CPU's need_preempt if needed.
|
||||||
if cpu_id == this_cpu() {
|
if cpu_id == preempt_guard.current_cpu() {
|
||||||
cpu_local::set_need_preempt();
|
cpu_local::set_need_preempt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -163,8 +164,9 @@ pub(super) fn run_new_task(runnable: Arc<Task>) {
|
|||||||
.enqueue(runnable, EnqueueFlags::Spawn);
|
.enqueue(runnable, EnqueueFlags::Spawn);
|
||||||
if need_preempt_info.is_some() {
|
if need_preempt_info.is_some() {
|
||||||
let cpu_id = need_preempt_info.unwrap();
|
let cpu_id = need_preempt_info.unwrap();
|
||||||
|
let preempt_guard = disable_preempt();
|
||||||
// FIXME: send IPI to set remote CPU's need_preempt if needed.
|
// FIXME: send IPI to set remote CPU's need_preempt if needed.
|
||||||
if cpu_id == this_cpu() {
|
if cpu_id == preempt_guard.current_cpu() {
|
||||||
cpu_local::set_need_preempt();
|
cpu_local::set_need_preempt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user