Implement apic::get_or_init using Once

This commit is contained in:
Ruihan Li 2025-05-18 23:46:54 +08:00 committed by Junyang Zhang
parent 0a27a1f37b
commit a18e72b495
10 changed files with 166 additions and 135 deletions

View File

@ -8,7 +8,7 @@ use id_alloc::IdAlloc;
use spin::Once;
use crate::{
cpu::CpuId,
cpu::PinCurrentCpu,
sync::{Mutex, PreemptDisabled, SpinLock, SpinLockGuard},
trap::TrapFrame,
};
@ -147,7 +147,7 @@ impl Drop for IrqCallbackHandle {
pub(crate) struct HwCpuId(u32);
impl HwCpuId {
pub(crate) fn read_current() -> Self {
pub(crate) fn read_current(guard: &dyn PinCurrentCpu) -> Self {
// TODO: Support SMP in RISC-V.
Self(0)
}
@ -160,6 +160,6 @@ impl HwCpuId {
/// The caller must ensure that the interrupt number is valid and that
/// the corresponding handler is configured correctly on the remote CPU.
/// Furthermore, invoking the interrupt handler must also be safe.
pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) {
pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8, guard: &dyn PinCurrentCpu) {
unimplemented!()
}

View File

@ -35,8 +35,8 @@ use crate::{
kernel::{
acpi::get_acpi_tables,
apic::{
self, ApicId, DeliveryMode, DeliveryStatus, DestinationMode, DestinationShorthand,
Icr, Level, TriggerMode,
self, Apic, ApicId, DeliveryMode, DeliveryStatus, DestinationMode,
DestinationShorthand, Icr, Level, TriggerMode,
},
},
},
@ -45,6 +45,7 @@ use crate::{
smp::PerApRawInfo,
},
mm::{Paddr, PAGE_SIZE},
task::disable_preempt,
};
/// Counts the number of processors.
@ -255,19 +256,22 @@ unsafe fn wake_up_aps_via_mailbox(num_cpus: u32) {
/// processors to boot successfully (e.g., each AP's page table
/// and stack).
unsafe fn send_boot_ipis() {
let preempt_guard = disable_preempt();
let apic = apic::get_or_init(&preempt_guard as _);
// SAFETY: We're sending IPIs to boot all application processors.
// The safety is upheld by the caller.
unsafe {
send_init_to_all_aps();
send_init_to_all_aps(apic);
spin_wait_cycles(100_000_000);
send_init_deassert();
send_init_deassert(apic);
spin_wait_cycles(20_000_000);
send_startup_to_all_aps();
send_startup_to_all_aps(apic);
spin_wait_cycles(20_000_000);
send_startup_to_all_aps();
send_startup_to_all_aps(apic);
spin_wait_cycles(20_000_000);
}
}
@ -275,7 +279,7 @@ unsafe fn send_boot_ipis() {
/// # Safety
///
/// The caller should ensure it's valid to send STARTUP IPIs to all CPUs excluding self.
unsafe fn send_startup_to_all_aps() {
unsafe fn send_startup_to_all_aps(apic: &dyn Apic) {
let icr = Icr::new(
ApicId::from(0),
DestinationShorthand::AllExcludingSelf,
@ -287,13 +291,13 @@ unsafe fn send_startup_to_all_aps() {
(AP_BOOT_START_PA / PAGE_SIZE) as u8,
);
// SAFETY: The safety is upheld by the caller.
apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) });
unsafe { apic.send_ipi(icr) }
}
/// # Safety
///
/// The caller should ensure it's valid to send INIT IPIs to all CPUs excluding self.
unsafe fn send_init_to_all_aps() {
unsafe fn send_init_to_all_aps(apic: &dyn Apic) {
let icr = Icr::new(
ApicId::from(0),
DestinationShorthand::AllExcludingSelf,
@ -305,13 +309,13 @@ unsafe fn send_init_to_all_aps() {
0,
);
// SAFETY: The safety is upheld by the caller.
apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) });
unsafe { apic.send_ipi(icr) };
}
/// # Safety
///
/// The caller should ensure it's valid to deassert INIT IPIs for all CPUs excluding self.
unsafe fn send_init_deassert() {
unsafe fn send_init_deassert(apic: &dyn Apic) {
let icr = Icr::new(
ApicId::from(0),
DestinationShorthand::AllIncludingSelf,
@ -323,7 +327,7 @@ unsafe fn send_init_deassert() {
0,
);
// SAFETY: The safety is upheld by the caller.
apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) });
unsafe { apic.send_ipi(icr) };
}
/// Spin wait approximately `c` cycles.

View File

@ -12,6 +12,7 @@ use x86_64::registers::rflags::{self, RFlags};
use super::iommu::{alloc_irt_entry, has_interrupt_remapping, IrtEntryHandle};
use crate::{
cpu::PinCurrentCpu,
sync::{LocalIrqDisabled, Mutex, PreemptDisabled, RwLock, RwLockReadGuard, SpinLock},
trap::TrapFrame,
};
@ -179,11 +180,11 @@ impl Drop for IrqCallbackHandle {
pub(crate) struct HwCpuId(u32);
impl HwCpuId {
pub(crate) fn read_current() -> Self {
pub(crate) fn read_current(guard: &dyn PinCurrentCpu) -> Self {
use crate::arch::kernel::apic;
let id = apic::with_borrow(|apic| apic.id());
Self(id)
let apic = apic::get_or_init(guard);
Self(apic.id())
}
}
@ -194,7 +195,7 @@ impl HwCpuId {
/// The caller must ensure that the interrupt number is valid and that
/// the corresponding handler is configured correctly on the remote CPU.
/// Furthermore, invoking the interrupt handler must also be safe.
pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) {
pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8, guard: &dyn PinCurrentCpu) {
use crate::arch::kernel::apic::{self, Icr};
let icr = Icr::new(
@ -207,7 +208,7 @@ pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) {
apic::DeliveryMode::Fixed,
irq_num,
);
apic::with_borrow(|apic| {
let apic = apic::get_or_init(guard);
apic.send_ipi(icr);
});
}

View File

@ -1,10 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
sync::atomic::{AtomicBool, Ordering},
};
use bit_field::BitField;
use spin::Once;
@ -18,46 +14,62 @@ pub mod xapic;
static APIC_TYPE: Once<ApicType> = Once::new();
/// Do something over the APIC instance for the current CPU.
/// Returns a reference to the local APIC instance of the current CPU.
///
/// You should provide a closure operating on the given mutable borrow of the
/// local APIC instance. During the execution of the closure, the interrupts
/// are guaranteed to be disabled.
/// The reference to the APIC instance will not outlive the given
/// [`PinCurrentCpu`] guard and the APIC instance does not implement
/// [`Sync`], so it is safe to assume that the APIC instance belongs
/// to the current CPU. Note that interrupts are not disabled, so the
/// APIC instance may be accessed concurrently by interrupt handlers.
///
/// This function also lazily initializes the Local APIC instance. It does
/// enable the Local APIC if it is not enabled.
/// At the first time the function is called, the local APIC instance
/// is initialized and enabled if it was not enabled beforehand.
///
/// # Examples
///
/// Example:
/// ```rust
/// use ostd::arch::x86::kernel::apic;
/// use ostd::{
/// arch::x86::kernel::apic,
/// task::disable_preempt,
/// };
///
/// let preempt_guard = disable_preempt();
/// let apic = apic::get_or_init(&preempt_guard as _);
///
/// let ticks = apic::with_borrow(|apic| {
/// let ticks = apic.timer_current_count();
/// apic.set_timer_init_count(0);
/// ticks
/// });
/// ```
pub fn with_borrow<R>(f: impl FnOnce(&(dyn Apic + 'static)) -> R) -> R {
cpu_local! {
static APIC_INSTANCE: UnsafeCell<Option<Box<dyn Apic + 'static>>> = UnsafeCell::new(None);
static IS_INITIALIZED: AtomicBool = AtomicBool::new(false);
pub fn get_or_init(guard: &dyn PinCurrentCpu) -> &(dyn Apic + 'static) {
struct ForceSyncSend<T>(T);
// SAFETY: `ForceSyncSend` is `Sync + Send`, but accessing its contained value is unsafe.
unsafe impl<T> Sync for ForceSyncSend<T> {}
unsafe impl<T> Send for ForceSyncSend<T> {}
impl<T> ForceSyncSend<T> {
/// # Safety
///
/// The caller must ensure that its context allows for safe access to `&T`.
unsafe fn get(&self) -> &T {
&self.0
}
}
let preempt_guard = crate::task::disable_preempt();
cpu_local! {
static APIC_INSTANCE: Once<ForceSyncSend<Box<dyn Apic + 'static>>> = Once::new();
}
// SAFETY: Preemption is disabled, so we can safely access the CPU-local variable.
let apic_ptr = unsafe {
let ptr = APIC_INSTANCE.as_ptr();
(*ptr).get()
};
let apic_instance = APIC_INSTANCE.get_on_cpu(guard.current_cpu());
// If it is not initialized, lazily initialize it.
if IS_INITIALIZED
.get_on_cpu(preempt_guard.current_cpu())
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
let apic: Option<Box<dyn Apic>> = Some(match APIC_TYPE.get().unwrap() {
// The APIC instance has already been initialized.
if let Some(apic) = apic_instance.get() {
// SAFETY: Accessing `&dyn Apic` is safe as long as we're running on the same CPU on which
// the APIC instance was created. The `get_on_cpu` method above ensures this.
return &**unsafe { apic.get() };
}
// Initialize the APIC instance now.
apic_instance.call_once(|| match APIC_TYPE.get().unwrap() {
ApicType::XApic => {
let mut xapic = xapic::XApic::new().unwrap();
xapic.enable();
@ -68,7 +80,7 @@ pub fn with_borrow<R>(f: impl FnOnce(&(dyn Apic + 'static)) -> R) -> R {
version & 0xff,
(version >> 16) & 0xff
);
Box::new(xapic)
ForceSyncSend(Box::new(xapic))
}
ApicType::X2Apic => {
let mut x2apic = x2apic::X2Apic::new().unwrap();
@ -80,28 +92,15 @@ pub fn with_borrow<R>(f: impl FnOnce(&(dyn Apic + 'static)) -> R) -> R {
version & 0xff,
(version >> 16) & 0xff
);
Box::new(x2apic)
ForceSyncSend(Box::new(x2apic))
}
});
// SAFETY: It will be only written once and no other read or write can
// happen concurrently for the synchronization with `IS_INITIALIZED`.
//
// If interrupts happen during the initialization, it can be written
// twice. But it should not happen since we must call this function
// when interrupts are disabled during the initialization of OSTD.
unsafe {
debug_assert!(apic_ptr.read().is_none());
apic_ptr.write(apic);
}
}
// SAFETY: The APIC pointer is not accessed by other CPUs. It should point
// to initialized data and there will not be any write-during-read problem
// since there would be no write after `IS_INITIALIZED` is set to true.
let apic_ref = unsafe { &*apic_ptr };
let apic_ref = apic_ref.as_ref().unwrap().as_ref();
f.call_once((apic_ref,))
// We've initialized the APIC instance, so this `unwrap` cannot fail.
let apic = apic_instance.get().unwrap();
// SAFETY: Accessing `&dyn Apic` is safe as long as we're running on the same CPU on which the
// APIC instance was created. The initialization above ensures this.
&**unsafe { apic.get() }
}
pub trait Apic: ApicTimer {

View File

@ -8,14 +8,22 @@ use x86::msr::{
use super::ApicTimer;
pub struct X2Apic {}
#[derive(Debug)]
pub struct X2Apic {
_private: (),
}
// The APIC instance can be shared among threads running on the same CPU, but not among those
// running on different CPUs. Therefore, it is not `Send`/`Sync`.
impl !Send for X2Apic {}
impl !Sync for X2Apic {}
impl X2Apic {
pub(crate) fn new() -> Option<Self> {
if !Self::has_x2apic() {
return None;
}
Some(Self {})
Some(Self { _private: () })
}
pub(super) fn has_x2apic() -> bool {

View File

@ -18,6 +18,11 @@ pub struct XApic {
mmio_start: *mut u32,
}
// The APIC instance can be shared among threads running on the same CPU, but not among those
// running on different CPUs. Therefore, it is not `Send`/`Sync`.
impl !Send for XApic {}
impl !Sync for XApic {}
impl XApic {
pub fn new() -> Option<Self> {
if !Self::has_xapic() {

View File

@ -119,9 +119,9 @@ pub(crate) unsafe fn init_on_ap() {
pub(crate) fn interrupts_ack(irq_number: usize) {
if !cpu::context::CpuException::is_cpu_exception(irq_number as u16) {
kernel::apic::with_borrow(|apic| {
apic.eoi();
});
// TODO: We're in the interrupt context, so `disable_preempt()` is not
// really necessary here.
kernel::apic::get_or_init(&crate::task::disable_preempt() as _).eoi();
}
}

View File

@ -10,10 +10,11 @@ use log::info;
use super::TIMER_FREQ;
use crate::{
arch::{
kernel::apic::{self, DivideConfig},
kernel::apic::{self, Apic, DivideConfig},
timer::pit::OperatingMode,
tsc_freq,
},
task::disable_preempt,
trap::{IrqLine, TrapFrame},
};
@ -63,31 +64,31 @@ fn is_tsc_deadline_mode_supported() -> bool {
}
fn init_timer(timer_irq: &IrqLine) {
let preempt_guard = disable_preempt();
let apic = apic::get_or_init(&preempt_guard as _);
match CONFIG.get().expect("ACPI timer config is not initialized") {
Config::DeadlineMode { .. } => {
init_deadline_mode(timer_irq);
init_deadline_mode(apic, timer_irq);
}
Config::PeriodicMode { init_count } => {
init_periodic_mode(timer_irq, *init_count);
init_periodic_mode(apic, timer_irq, *init_count);
}
}
}
fn init_deadline_mode(timer_irq: &IrqLine) {
// Enable tsc deadline mode
apic::with_borrow(|apic| {
fn init_deadline_mode(apic: &dyn Apic, timer_irq: &IrqLine) {
// Enable TSC deadline mode
apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 18));
});
timer_callback();
}
fn init_periodic_mode(timer_irq: &IrqLine, init_count: u64) {
apic::with_borrow(|apic| {
fn init_periodic_mode(apic: &dyn Apic, timer_irq: &IrqLine, init_count: u64) {
// Enable periodic mode
apic.set_timer_init_count(init_count);
apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 17));
apic.set_timer_div_config(DivideConfig::Divide64);
});
}
static CONFIG: spin::Once<Config> = spin::Once::new();
@ -116,10 +117,10 @@ fn init_periodic_mode_config() {
super::pit::enable_ioapic_line(irq.clone());
// Set APIC timer count
apic::with_borrow(|apic| {
let preempt_guard = disable_preempt();
let apic = apic::get_or_init(&preempt_guard as _);
apic.set_timer_div_config(DivideConfig::Divide64);
apic.set_timer_init_count(0xFFFF_FFFF);
});
x86_64::instructions::interrupts::enable();
while !CONFIG.is_completed() {
@ -136,10 +137,14 @@ fn init_periodic_mode_config() {
// This is set to 1/10th of the TIMER_FREQ to ensure enough samples for accurate calculation.
const CALLBACK_TIMES: u64 = TIMER_FREQ / 10;
let preempt_guard = disable_preempt();
let apic = apic::get_or_init(&preempt_guard as _);
let apic_current_count = 0xFFFF_FFFF - apic.timer_current_count();
if IN_TIME.load(Ordering::Relaxed) < CALLBACK_TIMES || CONFIG.is_completed() {
if IN_TIME.load(Ordering::Relaxed) == 0 {
let remain_ticks = apic::with_borrow(|apic| apic.timer_current_count());
APIC_FIRST_COUNT.store(0xFFFF_FFFF - remain_ticks, Ordering::Relaxed);
APIC_FIRST_COUNT.store(apic_current_count, Ordering::Relaxed);
}
IN_TIME.fetch_add(1, Ordering::Relaxed);
return;
@ -147,17 +152,16 @@ fn init_periodic_mode_config() {
// Stop PIT and APIC Timer
super::pit::disable_ioapic_line();
let remain_ticks = apic::with_borrow(|apic| {
let remain_ticks = apic.timer_current_count();
apic.set_timer_init_count(0);
remain_ticks
});
let ticks = (0xFFFF_FFFF - remain_ticks - APIC_FIRST_COUNT.load(Ordering::Relaxed))
/ CALLBACK_TIMES;
let apic_first_count = APIC_FIRST_COUNT.load(Ordering::Relaxed);
let apic_init_count = (apic_current_count - apic_first_count) / CALLBACK_TIMES;
info!(
"APIC Timer ticks count:{:x}, remain ticks: {:x},Timer Freq:{} Hz",
ticks, remain_ticks, TIMER_FREQ
"APIC timer: first {:#x}, current {:#x}, init {:#x}",
apic_first_count, apic_current_count, apic_init_count,
);
CONFIG.call_once(|| Config::PeriodicMode { init_count: ticks });
CONFIG.call_once(|| Config::PeriodicMode {
init_count: apic_init_count,
});
}
}

View File

@ -155,7 +155,11 @@ fn ap_early_entry(cpu_id: u32) -> ! {
}
fn report_online_and_hw_cpu_id(cpu_id: u32) {
let old_val = HW_CPU_ID_MAP.lock().insert(cpu_id, HwCpuId::read_current());
// There are no races because this method will only be called in the boot
// context, where preemption won't occur.
let hw_cpu_id = HwCpuId::read_current(&crate::task::disable_preempt());
let old_val = HW_CPU_ID_MAP.lock().insert(cpu_id, hw_cpu_id);
assert!(old_val.is_none());
}

View File

@ -52,7 +52,13 @@ pub fn inter_processor_call(targets: &CpuSet, f: fn()) {
}
// SAFETY: The value of `irq_num` corresponds to a valid IRQ line and
// triggering it will not cause any safety issues.
unsafe { send_ipi(ipi_data.hw_cpu_ids[cpu_id.as_usize()], irq_num) };
unsafe {
send_ipi(
ipi_data.hw_cpu_ids[cpu_id.as_usize()],
irq_num,
&irq_guard as _,
)
};
}
if call_on_self {
// Execute the function synchronously.