From a18e72b495e888dc3f8fbb440213c84294278e21 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Sun, 18 May 2025 23:46:54 +0800 Subject: [PATCH] Implement `apic::get_or_init` using `Once` --- ostd/src/arch/riscv/irq.rs | 6 +- ostd/src/arch/x86/boot/smp.rs | 28 +++-- ostd/src/arch/x86/irq.rs | 15 +-- ostd/src/arch/x86/kernel/apic/mod.rs | 151 ++++++++++++------------ ostd/src/arch/x86/kernel/apic/x2apic.rs | 12 +- ostd/src/arch/x86/kernel/apic/xapic.rs | 5 + ostd/src/arch/x86/mod.rs | 6 +- ostd/src/arch/x86/timer/apic.rs | 64 +++++----- ostd/src/boot/smp.rs | 6 +- ostd/src/smp.rs | 8 +- 10 files changed, 166 insertions(+), 135 deletions(-) diff --git a/ostd/src/arch/riscv/irq.rs b/ostd/src/arch/riscv/irq.rs index 42786b00..f2aa0a61 100644 --- a/ostd/src/arch/riscv/irq.rs +++ b/ostd/src/arch/riscv/irq.rs @@ -8,7 +8,7 @@ use id_alloc::IdAlloc; use spin::Once; use crate::{ - cpu::CpuId, + cpu::PinCurrentCpu, sync::{Mutex, PreemptDisabled, SpinLock, SpinLockGuard}, trap::TrapFrame, }; @@ -147,7 +147,7 @@ impl Drop for IrqCallbackHandle { pub(crate) struct HwCpuId(u32); impl HwCpuId { - pub(crate) fn read_current() -> Self { + pub(crate) fn read_current(guard: &dyn PinCurrentCpu) -> Self { // TODO: Support SMP in RISC-V. Self(0) } @@ -160,6 +160,6 @@ impl HwCpuId { /// The caller must ensure that the interrupt number is valid and that /// the corresponding handler is configured correctly on the remote CPU. /// Furthermore, invoking the interrupt handler must also be safe. -pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) { +pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8, guard: &dyn PinCurrentCpu) { unimplemented!() } diff --git a/ostd/src/arch/x86/boot/smp.rs b/ostd/src/arch/x86/boot/smp.rs index 3a9a931f..8e3c3666 100644 --- a/ostd/src/arch/x86/boot/smp.rs +++ b/ostd/src/arch/x86/boot/smp.rs @@ -35,8 +35,8 @@ use crate::{ kernel::{ acpi::get_acpi_tables, apic::{ - self, ApicId, DeliveryMode, DeliveryStatus, DestinationMode, DestinationShorthand, - Icr, Level, TriggerMode, + self, Apic, ApicId, DeliveryMode, DeliveryStatus, DestinationMode, + DestinationShorthand, Icr, Level, TriggerMode, }, }, }, @@ -45,6 +45,7 @@ use crate::{ smp::PerApRawInfo, }, mm::{Paddr, PAGE_SIZE}, + task::disable_preempt, }; /// Counts the number of processors. @@ -255,19 +256,22 @@ unsafe fn wake_up_aps_via_mailbox(num_cpus: u32) { /// processors to boot successfully (e.g., each AP's page table /// and stack). unsafe fn send_boot_ipis() { + let preempt_guard = disable_preempt(); + let apic = apic::get_or_init(&preempt_guard as _); + // SAFETY: We're sending IPIs to boot all application processors. // The safety is upheld by the caller. unsafe { - send_init_to_all_aps(); + send_init_to_all_aps(apic); spin_wait_cycles(100_000_000); - send_init_deassert(); + send_init_deassert(apic); spin_wait_cycles(20_000_000); - send_startup_to_all_aps(); + send_startup_to_all_aps(apic); spin_wait_cycles(20_000_000); - send_startup_to_all_aps(); + send_startup_to_all_aps(apic); spin_wait_cycles(20_000_000); } } @@ -275,7 +279,7 @@ unsafe fn send_boot_ipis() { /// # Safety /// /// The caller should ensure it's valid to send STARTUP IPIs to all CPUs excluding self. -unsafe fn send_startup_to_all_aps() { +unsafe fn send_startup_to_all_aps(apic: &dyn Apic) { let icr = Icr::new( ApicId::from(0), DestinationShorthand::AllExcludingSelf, @@ -287,13 +291,13 @@ unsafe fn send_startup_to_all_aps() { (AP_BOOT_START_PA / PAGE_SIZE) as u8, ); // SAFETY: The safety is upheld by the caller. - apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) }); + unsafe { apic.send_ipi(icr) } } /// # Safety /// /// The caller should ensure it's valid to send INIT IPIs to all CPUs excluding self. -unsafe fn send_init_to_all_aps() { +unsafe fn send_init_to_all_aps(apic: &dyn Apic) { let icr = Icr::new( ApicId::from(0), DestinationShorthand::AllExcludingSelf, @@ -305,13 +309,13 @@ unsafe fn send_init_to_all_aps() { 0, ); // SAFETY: The safety is upheld by the caller. - apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) }); + unsafe { apic.send_ipi(icr) }; } /// # Safety /// /// The caller should ensure it's valid to deassert INIT IPIs for all CPUs excluding self. -unsafe fn send_init_deassert() { +unsafe fn send_init_deassert(apic: &dyn Apic) { let icr = Icr::new( ApicId::from(0), DestinationShorthand::AllIncludingSelf, @@ -323,7 +327,7 @@ unsafe fn send_init_deassert() { 0, ); // SAFETY: The safety is upheld by the caller. - apic::with_borrow(|apic| unsafe { apic.send_ipi(icr) }); + unsafe { apic.send_ipi(icr) }; } /// Spin wait approximately `c` cycles. diff --git a/ostd/src/arch/x86/irq.rs b/ostd/src/arch/x86/irq.rs index d4bdfcb4..83e8ee94 100644 --- a/ostd/src/arch/x86/irq.rs +++ b/ostd/src/arch/x86/irq.rs @@ -12,6 +12,7 @@ use x86_64::registers::rflags::{self, RFlags}; use super::iommu::{alloc_irt_entry, has_interrupt_remapping, IrtEntryHandle}; use crate::{ + cpu::PinCurrentCpu, sync::{LocalIrqDisabled, Mutex, PreemptDisabled, RwLock, RwLockReadGuard, SpinLock}, trap::TrapFrame, }; @@ -179,11 +180,11 @@ impl Drop for IrqCallbackHandle { pub(crate) struct HwCpuId(u32); impl HwCpuId { - pub(crate) fn read_current() -> Self { + pub(crate) fn read_current(guard: &dyn PinCurrentCpu) -> Self { use crate::arch::kernel::apic; - let id = apic::with_borrow(|apic| apic.id()); - Self(id) + let apic = apic::get_or_init(guard); + Self(apic.id()) } } @@ -194,7 +195,7 @@ impl HwCpuId { /// The caller must ensure that the interrupt number is valid and that /// the corresponding handler is configured correctly on the remote CPU. /// Furthermore, invoking the interrupt handler must also be safe. -pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) { +pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8, guard: &dyn PinCurrentCpu) { use crate::arch::kernel::apic::{self, Icr}; let icr = Icr::new( @@ -207,7 +208,7 @@ pub(crate) unsafe fn send_ipi(hw_cpu_id: HwCpuId, irq_num: u8) { apic::DeliveryMode::Fixed, irq_num, ); - apic::with_borrow(|apic| { - apic.send_ipi(icr); - }); + + let apic = apic::get_or_init(guard); + apic.send_ipi(icr); } diff --git a/ostd/src/arch/x86/kernel/apic/mod.rs b/ostd/src/arch/x86/kernel/apic/mod.rs index 80449c49..49394a67 100644 --- a/ostd/src/arch/x86/kernel/apic/mod.rs +++ b/ostd/src/arch/x86/kernel/apic/mod.rs @@ -1,10 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 use alloc::boxed::Box; -use core::{ - cell::UnsafeCell, - sync::atomic::{AtomicBool, Ordering}, -}; use bit_field::BitField; use spin::Once; @@ -18,90 +14,93 @@ pub mod xapic; static APIC_TYPE: Once = Once::new(); -/// Do something over the APIC instance for the current CPU. +/// Returns a reference to the local APIC instance of the current CPU. /// -/// You should provide a closure operating on the given mutable borrow of the -/// local APIC instance. During the execution of the closure, the interrupts -/// are guaranteed to be disabled. +/// The reference to the APIC instance will not outlive the given +/// [`PinCurrentCpu`] guard and the APIC instance does not implement +/// [`Sync`], so it is safe to assume that the APIC instance belongs +/// to the current CPU. Note that interrupts are not disabled, so the +/// APIC instance may be accessed concurrently by interrupt handlers. /// -/// This function also lazily initializes the Local APIC instance. It does -/// enable the Local APIC if it is not enabled. +/// At the first time the function is called, the local APIC instance +/// is initialized and enabled if it was not enabled beforehand. +/// +/// # Examples /// -/// Example: /// ```rust -/// use ostd::arch::x86::kernel::apic; +/// use ostd::{ +/// arch::x86::kernel::apic, +/// task::disable_preempt, +/// }; /// -/// let ticks = apic::with_borrow(|apic| { -/// let ticks = apic.timer_current_count(); -/// apic.set_timer_init_count(0); -/// ticks -/// }); +/// let preempt_guard = disable_preempt(); +/// let apic = apic::get_or_init(&preempt_guard as _); +/// +/// let ticks = apic.timer_current_count(); +/// apic.set_timer_init_count(0); /// ``` -pub fn with_borrow(f: impl FnOnce(&(dyn Apic + 'static)) -> R) -> R { - cpu_local! { - static APIC_INSTANCE: UnsafeCell>> = UnsafeCell::new(None); - static IS_INITIALIZED: AtomicBool = AtomicBool::new(false); - } +pub fn get_or_init(guard: &dyn PinCurrentCpu) -> &(dyn Apic + 'static) { + struct ForceSyncSend(T); - let preempt_guard = crate::task::disable_preempt(); + // SAFETY: `ForceSyncSend` is `Sync + Send`, but accessing its contained value is unsafe. + unsafe impl Sync for ForceSyncSend {} + unsafe impl Send for ForceSyncSend {} - // SAFETY: Preemption is disabled, so we can safely access the CPU-local variable. - let apic_ptr = unsafe { - let ptr = APIC_INSTANCE.as_ptr(); - (*ptr).get() - }; - - // If it is not initialized, lazily initialize it. - if IS_INITIALIZED - .get_on_cpu(preempt_guard.current_cpu()) - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) - .is_ok() - { - let apic: Option> = Some(match APIC_TYPE.get().unwrap() { - ApicType::XApic => { - let mut xapic = xapic::XApic::new().unwrap(); - xapic.enable(); - let version = xapic.version(); - log::info!( - "xAPIC ID:{:x}, Version:{:x}, Max LVT:{:x}", - xapic.id(), - version & 0xff, - (version >> 16) & 0xff - ); - Box::new(xapic) - } - ApicType::X2Apic => { - let mut x2apic = x2apic::X2Apic::new().unwrap(); - x2apic.enable(); - let version = x2apic.version(); - log::info!( - "x2APIC ID:{:x}, Version:{:x}, Max LVT:{:x}", - x2apic.id(), - version & 0xff, - (version >> 16) & 0xff - ); - Box::new(x2apic) - } - }); - // SAFETY: It will be only written once and no other read or write can - // happen concurrently for the synchronization with `IS_INITIALIZED`. - // - // If interrupts happen during the initialization, it can be written - // twice. But it should not happen since we must call this function - // when interrupts are disabled during the initialization of OSTD. - unsafe { - debug_assert!(apic_ptr.read().is_none()); - apic_ptr.write(apic); + impl ForceSyncSend { + /// # Safety + /// + /// The caller must ensure that its context allows for safe access to `&T`. + unsafe fn get(&self) -> &T { + &self.0 } } - // SAFETY: The APIC pointer is not accessed by other CPUs. It should point - // to initialized data and there will not be any write-during-read problem - // since there would be no write after `IS_INITIALIZED` is set to true. - let apic_ref = unsafe { &*apic_ptr }; - let apic_ref = apic_ref.as_ref().unwrap().as_ref(); + cpu_local! { + static APIC_INSTANCE: Once>> = Once::new(); + } - f.call_once((apic_ref,)) + let apic_instance = APIC_INSTANCE.get_on_cpu(guard.current_cpu()); + + // The APIC instance has already been initialized. + if let Some(apic) = apic_instance.get() { + // SAFETY: Accessing `&dyn Apic` is safe as long as we're running on the same CPU on which + // the APIC instance was created. The `get_on_cpu` method above ensures this. + return &**unsafe { apic.get() }; + } + + // Initialize the APIC instance now. + apic_instance.call_once(|| match APIC_TYPE.get().unwrap() { + ApicType::XApic => { + let mut xapic = xapic::XApic::new().unwrap(); + xapic.enable(); + let version = xapic.version(); + log::info!( + "xAPIC ID:{:x}, Version:{:x}, Max LVT:{:x}", + xapic.id(), + version & 0xff, + (version >> 16) & 0xff + ); + ForceSyncSend(Box::new(xapic)) + } + ApicType::X2Apic => { + let mut x2apic = x2apic::X2Apic::new().unwrap(); + x2apic.enable(); + let version = x2apic.version(); + log::info!( + "x2APIC ID:{:x}, Version:{:x}, Max LVT:{:x}", + x2apic.id(), + version & 0xff, + (version >> 16) & 0xff + ); + ForceSyncSend(Box::new(x2apic)) + } + }); + + // We've initialized the APIC instance, so this `unwrap` cannot fail. + let apic = apic_instance.get().unwrap(); + // SAFETY: Accessing `&dyn Apic` is safe as long as we're running on the same CPU on which the + // APIC instance was created. The initialization above ensures this. + &**unsafe { apic.get() } } pub trait Apic: ApicTimer { diff --git a/ostd/src/arch/x86/kernel/apic/x2apic.rs b/ostd/src/arch/x86/kernel/apic/x2apic.rs index edaee67b..7a74ae80 100644 --- a/ostd/src/arch/x86/kernel/apic/x2apic.rs +++ b/ostd/src/arch/x86/kernel/apic/x2apic.rs @@ -8,14 +8,22 @@ use x86::msr::{ use super::ApicTimer; -pub struct X2Apic {} +#[derive(Debug)] +pub struct X2Apic { + _private: (), +} + +// The APIC instance can be shared among threads running on the same CPU, but not among those +// running on different CPUs. Therefore, it is not `Send`/`Sync`. +impl !Send for X2Apic {} +impl !Sync for X2Apic {} impl X2Apic { pub(crate) fn new() -> Option { if !Self::has_x2apic() { return None; } - Some(Self {}) + Some(Self { _private: () }) } pub(super) fn has_x2apic() -> bool { diff --git a/ostd/src/arch/x86/kernel/apic/xapic.rs b/ostd/src/arch/x86/kernel/apic/xapic.rs index 3ccd0593..53da387c 100644 --- a/ostd/src/arch/x86/kernel/apic/xapic.rs +++ b/ostd/src/arch/x86/kernel/apic/xapic.rs @@ -18,6 +18,11 @@ pub struct XApic { mmio_start: *mut u32, } +// The APIC instance can be shared among threads running on the same CPU, but not among those +// running on different CPUs. Therefore, it is not `Send`/`Sync`. +impl !Send for XApic {} +impl !Sync for XApic {} + impl XApic { pub fn new() -> Option { if !Self::has_xapic() { diff --git a/ostd/src/arch/x86/mod.rs b/ostd/src/arch/x86/mod.rs index aadb0214..7bdc4d85 100644 --- a/ostd/src/arch/x86/mod.rs +++ b/ostd/src/arch/x86/mod.rs @@ -119,9 +119,9 @@ pub(crate) unsafe fn init_on_ap() { pub(crate) fn interrupts_ack(irq_number: usize) { if !cpu::context::CpuException::is_cpu_exception(irq_number as u16) { - kernel::apic::with_borrow(|apic| { - apic.eoi(); - }); + // TODO: We're in the interrupt context, so `disable_preempt()` is not + // really necessary here. + kernel::apic::get_or_init(&crate::task::disable_preempt() as _).eoi(); } } diff --git a/ostd/src/arch/x86/timer/apic.rs b/ostd/src/arch/x86/timer/apic.rs index 8f33d1b3..e58b7e88 100644 --- a/ostd/src/arch/x86/timer/apic.rs +++ b/ostd/src/arch/x86/timer/apic.rs @@ -10,10 +10,11 @@ use log::info; use super::TIMER_FREQ; use crate::{ arch::{ - kernel::apic::{self, DivideConfig}, + kernel::apic::{self, Apic, DivideConfig}, timer::pit::OperatingMode, tsc_freq, }, + task::disable_preempt, trap::{IrqLine, TrapFrame}, }; @@ -63,31 +64,31 @@ fn is_tsc_deadline_mode_supported() -> bool { } fn init_timer(timer_irq: &IrqLine) { + let preempt_guard = disable_preempt(); + let apic = apic::get_or_init(&preempt_guard as _); + match CONFIG.get().expect("ACPI timer config is not initialized") { Config::DeadlineMode { .. } => { - init_deadline_mode(timer_irq); + init_deadline_mode(apic, timer_irq); } Config::PeriodicMode { init_count } => { - init_periodic_mode(timer_irq, *init_count); + init_periodic_mode(apic, timer_irq, *init_count); } } } -fn init_deadline_mode(timer_irq: &IrqLine) { - // Enable tsc deadline mode - apic::with_borrow(|apic| { - apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 18)); - }); +fn init_deadline_mode(apic: &dyn Apic, timer_irq: &IrqLine) { + // Enable TSC deadline mode + apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 18)); timer_callback(); } -fn init_periodic_mode(timer_irq: &IrqLine, init_count: u64) { - apic::with_borrow(|apic| { - apic.set_timer_init_count(init_count); - apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 17)); - apic.set_timer_div_config(DivideConfig::Divide64); - }); +fn init_periodic_mode(apic: &dyn Apic, timer_irq: &IrqLine, init_count: u64) { + // Enable periodic mode + apic.set_timer_init_count(init_count); + apic.set_lvt_timer(timer_irq.num() as u64 | (1 << 17)); + apic.set_timer_div_config(DivideConfig::Divide64); } static CONFIG: spin::Once = spin::Once::new(); @@ -116,10 +117,10 @@ fn init_periodic_mode_config() { super::pit::enable_ioapic_line(irq.clone()); // Set APIC timer count - apic::with_borrow(|apic| { - apic.set_timer_div_config(DivideConfig::Divide64); - apic.set_timer_init_count(0xFFFF_FFFF); - }); + let preempt_guard = disable_preempt(); + let apic = apic::get_or_init(&preempt_guard as _); + apic.set_timer_div_config(DivideConfig::Divide64); + apic.set_timer_init_count(0xFFFF_FFFF); x86_64::instructions::interrupts::enable(); while !CONFIG.is_completed() { @@ -136,10 +137,14 @@ fn init_periodic_mode_config() { // This is set to 1/10th of the TIMER_FREQ to ensure enough samples for accurate calculation. const CALLBACK_TIMES: u64 = TIMER_FREQ / 10; + let preempt_guard = disable_preempt(); + let apic = apic::get_or_init(&preempt_guard as _); + + let apic_current_count = 0xFFFF_FFFF - apic.timer_current_count(); + if IN_TIME.load(Ordering::Relaxed) < CALLBACK_TIMES || CONFIG.is_completed() { if IN_TIME.load(Ordering::Relaxed) == 0 { - let remain_ticks = apic::with_borrow(|apic| apic.timer_current_count()); - APIC_FIRST_COUNT.store(0xFFFF_FFFF - remain_ticks, Ordering::Relaxed); + APIC_FIRST_COUNT.store(apic_current_count, Ordering::Relaxed); } IN_TIME.fetch_add(1, Ordering::Relaxed); return; @@ -147,17 +152,16 @@ fn init_periodic_mode_config() { // Stop PIT and APIC Timer super::pit::disable_ioapic_line(); - let remain_ticks = apic::with_borrow(|apic| { - let remain_ticks = apic.timer_current_count(); - apic.set_timer_init_count(0); - remain_ticks - }); - let ticks = (0xFFFF_FFFF - remain_ticks - APIC_FIRST_COUNT.load(Ordering::Relaxed)) - / CALLBACK_TIMES; + apic.set_timer_init_count(0); + + let apic_first_count = APIC_FIRST_COUNT.load(Ordering::Relaxed); + let apic_init_count = (apic_current_count - apic_first_count) / CALLBACK_TIMES; info!( - "APIC Timer ticks count:{:x}, remain ticks: {:x},Timer Freq:{} Hz", - ticks, remain_ticks, TIMER_FREQ + "APIC timer: first {:#x}, current {:#x}, init {:#x}", + apic_first_count, apic_current_count, apic_init_count, ); - CONFIG.call_once(|| Config::PeriodicMode { init_count: ticks }); + CONFIG.call_once(|| Config::PeriodicMode { + init_count: apic_init_count, + }); } } diff --git a/ostd/src/boot/smp.rs b/ostd/src/boot/smp.rs index a998894d..e68b1ec7 100644 --- a/ostd/src/boot/smp.rs +++ b/ostd/src/boot/smp.rs @@ -155,7 +155,11 @@ fn ap_early_entry(cpu_id: u32) -> ! { } fn report_online_and_hw_cpu_id(cpu_id: u32) { - let old_val = HW_CPU_ID_MAP.lock().insert(cpu_id, HwCpuId::read_current()); + // There are no races because this method will only be called in the boot + // context, where preemption won't occur. + let hw_cpu_id = HwCpuId::read_current(&crate::task::disable_preempt()); + + let old_val = HW_CPU_ID_MAP.lock().insert(cpu_id, hw_cpu_id); assert!(old_val.is_none()); } diff --git a/ostd/src/smp.rs b/ostd/src/smp.rs index 6741ee2a..0cbd9ba8 100644 --- a/ostd/src/smp.rs +++ b/ostd/src/smp.rs @@ -52,7 +52,13 @@ pub fn inter_processor_call(targets: &CpuSet, f: fn()) { } // SAFETY: The value of `irq_num` corresponds to a valid IRQ line and // triggering it will not cause any safety issues. - unsafe { send_ipi(ipi_data.hw_cpu_ids[cpu_id.as_usize()], irq_num) }; + unsafe { + send_ipi( + ipi_data.hw_cpu_ids[cpu_id.as_usize()], + irq_num, + &irq_guard as _, + ) + }; } if call_on_self { // Execute the function synchronously.