Implement apic::get_or_init using Once

This commit is contained in:
Ruihan Li 2025-05-18 23:46:54 +08:00 committed by Junyang Zhang
parent 0a27a1f37b
commit a18e72b495
10 changed files with 166 additions and 135 deletions

View File

@ -8,7 +8,7 @@ use id_alloc::IdAlloc;
use spin::Once; use spin::Once;
use crate::{ use crate::{
cpu::CpuId, cpu::PinCurrentCpu,
sync::{Mutex, PreemptDisabled, SpinLock, SpinLockGuard}, sync::{Mutex, PreemptDisabled, SpinLock, SpinLockGuard},
trap::TrapFrame, trap::TrapFrame,
}; };
@ -147,7 +147,7 @@ impl Drop for IrqCallbackHandle {
pub(crate) struct HwCpuId(u32); pub(crate) struct HwCpuId(u32);
impl HwCpuId { impl HwCpuId {
pub(crate) fn read_current() -> Self { pub(crate) fn read_current(guard: &dyn PinCurrentCpu) -> Self {
// TODO: Support SMP in RISC-V. // TODO: Support SMP in RISC-V.
Self(0) Self(0)
} }
@ -160,6 +160,6 @@ impl HwCpuId {
/// The caller must ensure that the interrupt number is valid and that /// The caller must ensure that the interrupt number is valid and that
/// the corresponding handler is configured correctly on the remote CPU. /// the corresponding handler is configured correctly on the remote CPU.
/// Furthermore, invoking the interrupt handler must also be safe. /// Furthermore, invoking the interrupt handler must also be safe.
pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) { pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8, guard: &dyn PinCurrentCpu) {
unimplemented!() unimplemented!()
} }

View File

@ -35,8 +35,8 @@ use crate::{
kernel::{ kernel::{
acpi::get_acpi_tables, acpi::get_acpi_tables,
apic::{ apic::{
self, ApicId, DeliveryMode, DeliveryStatus, DestinationMode, DestinationShorthand, self, Apic, ApicId, DeliveryMode, DeliveryStatus, DestinationMode,
Icr, Level, TriggerMode, DestinationShorthand, Icr, Level, TriggerMode,
}, },
}, },
}, },
@ -45,6 +45,7 @@ use crate::{
smp::PerApRawInfo, smp::PerApRawInfo,
}, },
mm::{Paddr, PAGE_SIZE}, mm::{Paddr, PAGE_SIZE},
task::disable_preempt,
}; };
/// Counts the number of processors. /// Counts the number of processors.
@ -255,19 +256,22 @@ unsafe fn wake_up_aps_via_mailbox(num_cpus: u32) {
/// processors to boot successfully (e.g., each AP's page table /// processors to boot successfully (e.g., each AP's page table
/// and stack). /// and stack).
unsafe fn send_boot_ipis() { unsafe fn send_boot_ipis() {
let preempt_guard = disable_preempt();
let apic = apic::get_or_init(&preempt_guard as _);
// SAFETY: We're sending IPIs to boot all application processors. // SAFETY: We're sending IPIs to boot all application processors.
// The safety is upheld by the caller. // The safety is upheld by the caller.
unsafe { unsafe {
send_init_to_all_aps(); send_init_to_all_aps(apic);
spin_wait_cycles(100_000_000); spin_wait_cycles(100_000_000);
send_init_deassert(); send_init_deassert(apic);
spin_wait_cycles(20_000_000); spin_wait_cycles(20_000_000);
send_startup_to_all_aps(); send_startup_to_all_aps(apic);
spin_wait_cycles(20_000_000); spin_wait_cycles(20_000_000);
send_startup_to_all_aps(); send_startup_to_all_aps(apic);
spin_wait_cycles(20_000_000); spin_wait_cycles(20_000_000);
} }
} }
@ -275,7 +279,7 @@ unsafe fn send_boot_ipis() {
/// # Safety /// # Safety
/// ///
/// The caller should ensure it's valid to send STARTUP IPIs to all CPUs excluding self. /// The caller should ensure it's valid to send STARTUP IPIs to all CPUs excluding self.
unsafe fn send_startup_to_all_aps() { unsafe fn send_startup_to_all_aps(apic: &dyn Apic) {
let icr = Icr::new( let icr = Icr::new(
ApicId::from(0), ApicId::from(0),
DestinationShorthand::AllExcludingSelf, DestinationShorthand::AllExcludingSelf,
@ -287,13 +291,13 @@ unsafe fn send_startup_to_all_aps() {
(AP_BOOT_START_PA / PAGE_SIZE) as u8, (AP_BOOT_START_PA / PAGE_SIZE) as u8,
); );
// SAFETY: The safety is upheld by the caller. // SAFETY: The safety is upheld by the caller.
apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) }); unsafe { apic.send_ipi(icr) }
} }
/// # Safety /// # Safety
/// ///
/// The caller should ensure it's valid to send INIT IPIs to all CPUs excluding self. /// The caller should ensure it's valid to send INIT IPIs to all CPUs excluding self.
unsafe fn send_init_to_all_aps() { unsafe fn send_init_to_all_aps(apic: &dyn Apic) {
let icr = Icr::new( let icr = Icr::new(
ApicId::from(0), ApicId::from(0),
DestinationShorthand::AllExcludingSelf, DestinationShorthand::AllExcludingSelf,
@ -305,13 +309,13 @@ unsafe fn send_init_to_all_aps() {
0, 0,
); );
// SAFETY: The safety is upheld by the caller. // SAFETY: The safety is upheld by the caller.
apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) }); unsafe { apic.send_ipi(icr) };
} }
/// # Safety /// # Safety
/// ///
/// The caller should ensure it's valid to deassert INIT IPIs for all CPUs excluding self. /// The caller should ensure it's valid to deassert INIT IPIs for all CPUs excluding self.
unsafe fn send_init_deassert() { unsafe fn send_init_deassert(apic: &dyn Apic) {
let icr = Icr::new( let icr = Icr::new(
ApicId::from(0), ApicId::from(0),
DestinationShorthand::AllIncludingSelf, DestinationShorthand::AllIncludingSelf,
@ -323,7 +327,7 @@ unsafe fn send_init_deassert() {
0, 0,
); );
// SAFETY: The safety is upheld by the caller. // SAFETY: The safety is upheld by the caller.
apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) }); unsafe { apic.send_ipi(icr) };
} }
/// Spin wait approximately `c` cycles. /// Spin wait approximately `c` cycles.

View File

@ -12,6 +12,7 @@ use x86_64::registers::rflags::{self, RFlags};
use super::iommu::{alloc_irt_entry, has_interrupt_remapping, IrtEntryHandle}; use super::iommu::{alloc_irt_entry, has_interrupt_remapping, IrtEntryHandle};
use crate::{ use crate::{
cpu::PinCurrentCpu,
sync::{LocalIrqDisabled, Mutex, PreemptDisabled, RwLock, RwLockReadGuard, SpinLock}, sync::{LocalIrqDisabled, Mutex, PreemptDisabled, RwLock, RwLockReadGuard, SpinLock},
trap::TrapFrame, trap::TrapFrame,
}; };
@ -179,11 +180,11 @@ impl Drop for IrqCallbackHandle {
pub(crate) struct HwCpuId(u32); pub(crate) struct HwCpuId(u32);
impl HwCpuId { impl HwCpuId {
pub(crate) fn read_current() -> Self { pub(crate) fn read_current(guard: &dyn PinCurrentCpu) -> Self {
use crate::arch::kernel::apic; use crate::arch::kernel::apic;
let id = apic::with_borrow(|apic| apic.id()); let apic = apic::get_or_init(guard);
Self(id) Self(apic.id())
} }
} }
@ -194,7 +195,7 @@ impl HwCpuId {
/// The caller must ensure that the interrupt number is valid and that /// The caller must ensure that the interrupt number is valid and that
/// the corresponding handler is configured correctly on the remote CPU. /// the corresponding handler is configured correctly on the remote CPU.
/// Furthermore, invoking the interrupt handler must also be safe. /// Furthermore, invoking the interrupt handler must also be safe.
pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) { pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8, guard: &dyn PinCurrentCpu) {
use crate::arch::kernel::apic::{self, Icr}; use crate::arch::kernel::apic::{self, Icr};
let icr = Icr::new( let icr = Icr::new(
@ -207,7 +208,7 @@ pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) {
apic::DeliveryMode::Fixed, apic::DeliveryMode::Fixed,
irq_num, irq_num,
); );
apic::with_borrow(|apic| {
apic.send_ipi(icr); let apic = apic::get_or_init(guard);
}); apic.send_ipi(icr);
} }

View File

@ -1,10 +1,6 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use alloc::boxed::Box; use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
sync::atomic::{AtomicBool, Ordering},
};
use bit_field::BitField; use bit_field::BitField;
use spin::Once; use spin::Once;
@ -18,90 +14,93 @@ pub mod xapic;
static APIC_TYPE: Once<ApicType> = Once::new(); static APIC_TYPE: Once<ApicType> = Once::new();
/// Do something over the APIC instance for the current CPU. /// Returns a reference to the local APIC instance of the current CPU.
/// ///
/// You should provide a closure operating on the given mutable borrow of the /// The reference to the APIC instance will not outlive the given
/// local APIC instance. During the execution of the closure, the interrupts /// [`PinCurrentCpu`] guard and the APIC instance does not implement
/// are guaranteed to be disabled. /// [`Sync`], so it is safe to assume that the APIC instance belongs
/// to the current CPU. Note that interrupts are not disabled, so the
/// APIC instance may be accessed concurrently by interrupt handlers.
/// ///
/// This function also lazily initializes the Local APIC instance. It does /// At the first time the function is called, the local APIC instance
/// enable the Local APIC if it is not enabled. /// is initialized and enabled if it was not enabled beforehand.
///
/// # Examples
/// ///
/// Example:
/// ```rust /// ```rust
/// use ostd::arch::x86::kernel::apic; /// use ostd::{
/// arch::x86::kernel::apic,
/// task::disable_preempt,
/// };
/// ///
/// let ticks = apic::with_borrow(|apic| { /// let preempt_guard = disable_preempt();
/// let ticks = apic.timer_current_count(); /// let apic = apic::get_or_init(&preempt_guard as _);
/// apic.set_timer_init_count(0); ///
/// ticks /// let ticks = apic.timer_current_count();
/// }); /// apic.set_timer_init_count(0);
/// ``` /// ```
pub fn with_borrow<R>(f: impl FnOnce(&(dyn Apic + 'static)) -> R) -> R { pub fn get_or_init(guard: &dyn PinCurrentCpu) -> &(dyn Apic + 'static) {
cpu_local! { struct ForceSyncSend<T>(T);
static APIC_INSTANCE: UnsafeCell<Option<Box<dyn Apic + 'static>>> = UnsafeCell::new(None);
static IS_INITIALIZED: AtomicBool = AtomicBool::new(false);
}
let preempt_guard = crate::task::disable_preempt(); // SAFETY: `ForceSyncSend` is `Sync + Send`, but accessing its contained value is unsafe.
unsafe impl<T> Sync for ForceSyncSend<T> {}
unsafe impl<T> Send for ForceSyncSend<T> {}
// SAFETY: Preemption is disabled, so we can safely access the CPU-local variable. impl<T> ForceSyncSend<T> {
let apic_ptr = unsafe { /// # Safety
let ptr = APIC_INSTANCE.as_ptr(); ///
(*ptr).get() /// The caller must ensure that its context allows for safe access to `&T`.
}; unsafe fn get(&self) -> &T {
&self.0
// If it is not initialized, lazily initialize it.
if IS_INITIALIZED
.get_on_cpu(preempt_guard.current_cpu())
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
let apic: Option<Box<dyn Apic>> = Some(match APIC_TYPE.get().unwrap() {
ApicType::XApic => {
let mut xapic = xapic::XApic::new().unwrap();
xapic.enable();
let version = xapic.version();
log::info!(
"xAPIC ID:{:x}, Version:{:x}, Max LVT:{:x}",
xapic.id(),
version & 0xff,
(version >> 16) & 0xff
);
Box::new(xapic)
}
ApicType::X2Apic => {
let mut x2apic = x2apic::X2Apic::new().unwrap();
x2apic.enable();
let version = x2apic.version();
log::info!(
"x2APIC ID:{:x}, Version:{:x}, Max LVT:{:x}",
x2apic.id(),
version & 0xff,
(version >> 16) & 0xff
);
Box::new(x2apic)
}
});
// SAFETY: It will be only written once and no other read or write can
// happen concurrently for the synchronization with `IS_INITIALIZED`.
//
// If interrupts happen during the initialization, it can be written
// twice. But it should not happen since we must call this function
// when interrupts are disabled during the initialization of OSTD.
unsafe {
debug_assert!(apic_ptr.read().is_none());
apic_ptr.write(apic);
} }
} }
// SAFETY: The APIC pointer is not accessed by other CPUs. It should point cpu_local! {
// to initialized data and there will not be any write-during-read problem static APIC_INSTANCE: Once<ForceSyncSend<Box<dyn Apic + 'static>>> = Once::new();
// since there would be no write after `IS_INITIALIZED` is set to true. }
let apic_ref = unsafe { &*apic_ptr };
let apic_ref = apic_ref.as_ref().unwrap().as_ref();
f.call_once((apic_ref,)) let apic_instance = APIC_INSTANCE.get_on_cpu(guard.current_cpu());
// The APIC instance has already been initialized.
if let Some(apic) = apic_instance.get() {
// SAFETY: Accessing `&dyn Apic` is safe as long as we're running on the same CPU on which
// the APIC instance was created. The `get_on_cpu` method above ensures this.
return &**unsafe { apic.get() };
}
// Initialize the APIC instance now.
apic_instance.call_once(|| match APIC_TYPE.get().unwrap() {
ApicType::XApic => {
let mut xapic = xapic::XApic::new().unwrap();
xapic.enable();
let version = xapic.version();
log::info!(
"xAPIC ID:{:x}, Version:{:x}, Max LVT:{:x}",
xapic.id(),
version & 0xff,
(version >> 16) & 0xff
);
ForceSyncSend(Box::new(xapic))
}
ApicType::X2Apic => {
let mut x2apic = x2apic::X2Apic::new().unwrap();
x2apic.enable();
let version = x2apic.version();
log::info!(
"x2APIC ID:{:x}, Version:{:x}, Max LVT:{:x}",
x2apic.id(),
version & 0xff,
(version >> 16) & 0xff
);
ForceSyncSend(Box::new(x2apic))
}
});
// We've initialized the APIC instance, so this `unwrap` cannot fail.
let apic = apic_instance.get().unwrap();
// SAFETY: Accessing `&dyn Apic` is safe as long as we're running on the same CPU on which the
// APIC instance was created. The initialization above ensures this.
&**unsafe { apic.get() }
} }
pub trait Apic: ApicTimer { pub trait Apic: ApicTimer {

View File

@ -8,14 +8,22 @@ use x86::msr::{
use super::ApicTimer; use super::ApicTimer;
pub struct X2Apic {} #[derive(Debug)]
pub struct X2Apic {
_private: (),
}
// The APIC instance can be shared among threads running on the same CPU, but not among those
// running on different CPUs. Therefore, it is not `Send`/`Sync`.
impl !Send for X2Apic {}
impl !Sync for X2Apic {}
impl X2Apic { impl X2Apic {
pub(crate) fn new() -> Option<Self> { pub(crate) fn new() -> Option<Self> {
if !Self::has_x2apic() { if !Self::has_x2apic() {
return None; return None;
} }
Some(Self {}) Some(Self { _private: () })
} }
pub(super) fn has_x2apic() -> bool { pub(super) fn has_x2apic() -> bool {

View File

@ -18,6 +18,11 @@ pub struct XApic {
mmio_start: *mut u32, mmio_start: *mut u32,
} }
// The APIC instance can be shared among threads running on the same CPU, but not among those
// running on different CPUs. Therefore, it is not `Send`/`Sync`.
impl !Send for XApic {}
impl !Sync for XApic {}
impl XApic { impl XApic {
pub fn new() -> Option<Self> { pub fn new() -> Option<Self> {
if !Self::has_xapic() { if !Self::has_xapic() {

View File

@ -119,9 +119,9 @@ pub(crate) unsafe fn init_on_ap() {
pub(crate) fn interrupts_ack(irq_number: usize) { pub(crate) fn interrupts_ack(irq_number: usize) {
if !cpu::context::CpuException::is_cpu_exception(irq_number as u16) { if !cpu::context::CpuException::is_cpu_exception(irq_number as u16) {
kernel::apic::with_borrow(|apic| { // TODO: We're in the interrupt context, so `disable_preempt()` is not
apic.eoi(); // really necessary here.
}); kernel::apic::get_or_init(&crate::task::disable_preempt() as _).eoi();
} }
} }

View File

@ -10,10 +10,11 @@ use log::info;
use super::TIMER_FREQ; use super::TIMER_FREQ;
use crate::{ use crate::{
arch::{ arch::{
kernel::apic::{self, DivideConfig}, kernel::apic::{self, Apic, DivideConfig},
timer::pit::OperatingMode, timer::pit::OperatingMode,
tsc_freq, tsc_freq,
}, },
task::disable_preempt,
trap::{IrqLine, TrapFrame}, trap::{IrqLine, TrapFrame},
}; };
@ -63,31 +64,31 @@ fn is_tsc_deadline_mode_supported() -> bool {
} }
fn init_timer(timer_irq: &IrqLine) { fn init_timer(timer_irq: &IrqLine) {
let preempt_guard = disable_preempt();
let apic = apic::get_or_init(&preempt_guard as _);
match CONFIG.get().expect("ACPI timer config is not initialized") { match CONFIG.get().expect("ACPI timer config is not initialized") {
Config::DeadlineMode { .. } => { Config::DeadlineMode { .. } => {
init_deadline_mode(timer_irq); init_deadline_mode(apic, timer_irq);
} }
Config::PeriodicMode { init_count } => { Config::PeriodicMode { init_count } => {
init_periodic_mode(timer_irq, *init_count); init_periodic_mode(apic, timer_irq, *init_count);
} }
} }
} }
fn init_deadline_mode(timer_irq: &IrqLine) { fn init_deadline_mode(apic: &dyn Apic, timer_irq: &IrqLine) {
// Enable tsc deadline mode // Enable TSC deadline mode
apic::with_borrow(|apic| { apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 18));
apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 18));
});
timer_callback(); timer_callback();
} }
fn init_periodic_mode(timer_irq: &IrqLine, init_count: u64) { fn init_periodic_mode(apic: &dyn Apic, timer_irq: &IrqLine, init_count: u64) {
apic::with_borrow(|apic| { // Enable periodic mode
apic.set_timer_init_count(init_count); apic.set_timer_init_count(init_count);
apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 17)); apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 17));
apic.set_timer_div_config(DivideConfig::Divide64); apic.set_timer_div_config(DivideConfig::Divide64);
});
} }
static CONFIG: spin::Once<Config> = spin::Once::new(); static CONFIG: spin::Once<Config> = spin::Once::new();
@ -116,10 +117,10 @@ fn init_periodic_mode_config() {
super::pit::enable_ioapic_line(irq.clone()); super::pit::enable_ioapic_line(irq.clone());
// Set APIC timer count // Set APIC timer count
apic::with_borrow(|apic| { let preempt_guard = disable_preempt();
apic.set_timer_div_config(DivideConfig::Divide64); let apic = apic::get_or_init(&preempt_guard as _);
apic.set_timer_init_count(0xFFFF_FFFF); apic.set_timer_div_config(DivideConfig::Divide64);
}); apic.set_timer_init_count(0xFFFF_FFFF);
x86_64::instructions::interrupts::enable(); x86_64::instructions::interrupts::enable();
while !CONFIG.is_completed() { while !CONFIG.is_completed() {
@ -136,10 +137,14 @@ fn init_periodic_mode_config() {
// This is set to 1/10th of the TIMER_FREQ to ensure enough samples for accurate calculation. // This is set to 1/10th of the TIMER_FREQ to ensure enough samples for accurate calculation.
const CALLBACK_TIMES: u64 = TIMER_FREQ / 10; const CALLBACK_TIMES: u64 = TIMER_FREQ / 10;
let preempt_guard = disable_preempt();
let apic = apic::get_or_init(&preempt_guard as _);
let apic_current_count = 0xFFFF_FFFF - apic.timer_current_count();
if IN_TIME.load(Ordering::Relaxed) < CALLBACK_TIMES || CONFIG.is_completed() { if IN_TIME.load(Ordering::Relaxed) < CALLBACK_TIMES || CONFIG.is_completed() {
if IN_TIME.load(Ordering::Relaxed) == 0 { if IN_TIME.load(Ordering::Relaxed) == 0 {
let remain_ticks = apic::with_borrow(|apic| apic.timer_current_count()); APIC_FIRST_COUNT.store(apic_current_count, Ordering::Relaxed);
APIC_FIRST_COUNT.store(0xFFFF_FFFF - remain_ticks, Ordering::Relaxed);
} }
IN_TIME.fetch_add(1, Ordering::Relaxed); IN_TIME.fetch_add(1, Ordering::Relaxed);
return; return;
@ -147,17 +152,16 @@ fn init_periodic_mode_config() {
// Stop PIT and APIC Timer // Stop PIT and APIC Timer
super::pit::disable_ioapic_line(); super::pit::disable_ioapic_line();
let remain_ticks = apic::with_borrow(|apic| { apic.set_timer_init_count(0);
let remain_ticks = apic.timer_current_count();
apic.set_timer_init_count(0); let apic_first_count = APIC_FIRST_COUNT.load(Ordering::Relaxed);
remain_ticks let apic_init_count = (apic_current_count - apic_first_count) / CALLBACK_TIMES;
});
let ticks = (0xFFFF_FFFF - remain_ticks - APIC_FIRST_COUNT.load(Ordering::Relaxed))
/ CALLBACK_TIMES;
info!( info!(
"APIC Timer ticks count:{:x}, remain ticks: {:x},Timer Freq:{} Hz", "APIC timer: first {:#x}, current {:#x}, init {:#x}",
ticks, remain_ticks, TIMER_FREQ apic_first_count, apic_current_count, apic_init_count,
); );
CONFIG.call_once(|| Config::PeriodicMode { init_count: ticks }); CONFIG.call_once(|| Config::PeriodicMode {
init_count: apic_init_count,
});
} }
} }

View File

@ -155,7 +155,11 @@ fn ap_early_entry(cpu_id: u32) -> ! {
} }
fn report_online_and_hw_cpu_id(cpu_id: u32) { fn report_online_and_hw_cpu_id(cpu_id: u32) {
let old_val = HW_CPU_ID_MAP.lock().insert(cpu_id, HwCpuId::read_current()); // There are no races because this method will only be called in the boot
// context, where preemption won't occur.
let hw_cpu_id = HwCpuId::read_current(&crate::task::disable_preempt());
let old_val = HW_CPU_ID_MAP.lock().insert(cpu_id, hw_cpu_id);
assert!(old_val.is_none()); assert!(old_val.is_none());
} }

View File

@ -52,7 +52,13 @@ pub fn inter_processor_call(targets: &CpuSet, f: fn()) {
} }
// SAFETY: The value of `irq_num` corresponds to a valid IRQ line and // SAFETY: The value of `irq_num` corresponds to a valid IRQ line and
// triggering it will not cause any safety issues. // triggering it will not cause any safety issues.
unsafe { send_ipi(ipi_data.hw_cpu_ids[cpu_id.as_usize()], irq_num) }; unsafe {
send_ipi(
ipi_data.hw_cpu_ids[cpu_id.as_usize()],
irq_num,
&irq_guard as _,
)
};
} }
if call_on_self { if call_on_self {
// Execute the function synchronously. // Execute the function synchronously.