diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index b3e640ad2..abf1d26ec 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -115,7 +115,7 @@ fn ap_init() -> ! { let preempt_guard = ostd::task::disable_preempt(); let cpu_id = preempt_guard.current_cpu(); drop(preempt_guard); - log::info!("Kernel idle thread for CPU #{} started.", cpu_id); + log::info!("Kernel idle thread for CPU #{} started.", cpu_id.as_usize()); loop { Thread::yield_now(); } diff --git a/kernel/src/process/posix_thread/futex.rs b/kernel/src/process/posix_thread/futex.rs index a232baea2..c68d9752e 100644 --- a/kernel/src/process/posix_thread/futex.rs +++ b/kernel/src/process/posix_thread/futex.rs @@ -164,7 +164,7 @@ static FUTEX_BUCKETS: Once = Once::new(); /// This number is calculated the same way as Linux's: /// fn get_bucket_count() -> usize { - ((1 << 8) * num_cpus()).next_power_of_two() as usize + ((1 << 8) * num_cpus()).next_power_of_two() } fn get_futex_bucket(key: FutexKey) -> (usize, FutexBucketRef) { diff --git a/kernel/src/sched/priority_scheduler.rs b/kernel/src/sched/priority_scheduler.rs index 289d85945..8c2ff507f 100644 --- a/kernel/src/sched/priority_scheduler.rs +++ b/kernel/src/sched/priority_scheduler.rs @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 use ostd::{ - cpu::{num_cpus, CpuSet, PinCurrentCpu}, + cpu::{num_cpus, CpuId, CpuSet, PinCurrentCpu}, sync::PreemptDisabled, task::{ scheduler::{ @@ -33,8 +33,8 @@ struct PreemptScheduler, U: CommonSchedInfo> { } impl, U: CommonSchedInfo> PreemptScheduler { - fn new(nr_cpus: u32) -> Self { - let mut rq = Vec::with_capacity(nr_cpus as usize); + fn new(nr_cpus: usize) -> Self { + let mut rq = Vec::with_capacity(nr_cpus); for _ in 0..nr_cpus { rq.push(SpinLock::new(PreemptRunQueue::new())); } @@ -42,7 +42,7 @@ impl, U: CommonSchedInfo> PreemptScheduler) -> u32 { + fn select_cpu(&self, entity: &PreemptSchedEntity) -> CpuId { // If the CPU of a runnable task has been set before, keep scheduling // the task to that one. // TODO: Consider migrating tasks between CPUs for load balancing. @@ -55,7 +55,7 @@ impl, U: CommonSchedInfo> PreemptScheduler, U: CommonSchedInfo> PreemptScheduler, U: Sync + Send + CommonSchedInfo> Scheduler for PreemptScheduler { - fn enqueue(&self, task: Arc, flags: EnqueueFlags) -> Option { + fn enqueue(&self, task: Arc, flags: EnqueueFlags) -> Option { let entity = PreemptSchedEntity::new(task); let mut still_in_rq = false; let target_cpu = { @@ -88,7 +88,7 @@ impl, U: Sync + Send + CommonSch cpu_id }; - let mut rq = self.rq[target_cpu as usize].disable_irq().lock(); + let mut rq = self.rq[target_cpu.as_usize()].disable_irq().lock(); if still_in_rq && let Err(_) = entity.task.cpu().set_if_is_none(target_cpu) { return None; } @@ -105,14 +105,14 @@ impl, U: Sync + Send + CommonSch fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue)) { let irq_guard = disable_local(); - let local_rq: &PreemptRunQueue = &self.rq[irq_guard.current_cpu() as usize].lock(); + let local_rq: &PreemptRunQueue = &self.rq[irq_guard.current_cpu().as_usize()].lock(); f(local_rq); } fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue)) { let irq_guard = disable_local(); let local_rq: &mut PreemptRunQueue = - &mut self.rq[irq_guard.current_cpu() as usize].lock(); + &mut self.rq[irq_guard.current_cpu().as_usize()].lock(); f(local_rq); } } diff --git a/kernel/src/thread/work_queue/mod.rs b/kernel/src/thread/work_queue/mod.rs index 080d69ca6..284f97239 100644 --- a/kernel/src/thread/work_queue/mod.rs +++ b/kernel/src/thread/work_queue/mod.rs @@ -64,7 +64,7 @@ //! //! ``` -use ostd::cpu::CpuSet; +use ostd::cpu::{CpuId, CpuSet}; use spin::Once; use work_item::WorkItem; use worker_pool::WorkerPool; @@ -150,7 +150,7 @@ impl WorkQueue { /// Request a pending work item. The `request_cpu` indicates the CPU where /// the calling worker is located. - fn dequeue(&self, request_cpu: u32) -> Option> { + fn dequeue(&self, request_cpu: CpuId) -> Option> { let mut inner = self.inner.disable_irq().lock(); let index = inner .pending_work_items @@ -160,7 +160,7 @@ impl WorkQueue { Some(item) } - fn has_pending_work_items(&self, request_cpu: u32) -> bool { + fn has_pending_work_items(&self, request_cpu: CpuId) -> bool { self.inner .disable_irq() .lock() diff --git a/kernel/src/thread/work_queue/work_item.rs b/kernel/src/thread/work_queue/work_item.rs index ec083e4a8..c561343da 100644 --- a/kernel/src/thread/work_queue/work_item.rs +++ b/kernel/src/thread/work_queue/work_item.rs @@ -4,7 +4,7 @@ use core::sync::atomic::{AtomicBool, Ordering}; -use ostd::cpu::CpuSet; +use ostd::cpu::{CpuId, CpuSet}; use crate::prelude::*; @@ -33,7 +33,7 @@ impl WorkItem { &mut self.cpu_affinity } - pub(super) fn is_valid_cpu(&self, cpu_id: u32) -> bool { + pub(super) fn is_valid_cpu(&self, cpu_id: CpuId) -> bool { self.cpu_affinity.contains(cpu_id) } diff --git a/kernel/src/thread/work_queue/worker.rs b/kernel/src/thread/work_queue/worker.rs index ba1403632..685a68f94 100644 --- a/kernel/src/thread/work_queue/worker.rs +++ b/kernel/src/thread/work_queue/worker.rs @@ -2,7 +2,10 @@ #![allow(dead_code)] -use ostd::{cpu::CpuSet, task::Task}; +use ostd::{ + cpu::{CpuId, CpuSet}, + task::Task, +}; use super::worker_pool::WorkerPool; use crate::{ @@ -19,7 +22,7 @@ use crate::{ pub(super) struct Worker { worker_pool: Weak, bound_task: Arc, - bound_cpu: u32, + bound_cpu: CpuId, inner: SpinLock, } @@ -39,7 +42,7 @@ enum WorkerStatus { impl Worker { /// Creates a new `Worker` to the given `worker_pool`. - pub(super) fn new(worker_pool: Weak, bound_cpu: u32) -> Arc { + pub(super) fn new(worker_pool: Weak, bound_cpu: CpuId) -> Arc { Arc::new_cyclic(|worker_ref| { let weal_worker = worker_ref.clone(); let task_fn = Box::new(move || { diff --git a/kernel/src/thread/work_queue/worker_pool.rs b/kernel/src/thread/work_queue/worker_pool.rs index b380c0f4a..6476e7281 100644 --- a/kernel/src/thread/work_queue/worker_pool.rs +++ b/kernel/src/thread/work_queue/worker_pool.rs @@ -7,7 +7,11 @@ use core::{ time::Duration, }; -use ostd::{cpu::CpuSet, sync::WaitQueue, task::Task}; +use ostd::{ + cpu::{CpuId, CpuSet}, + sync::WaitQueue, + task::Task, +}; use super::{simple_scheduler::SimpleScheduler, worker::Worker, WorkItem, WorkPriority, WorkQueue}; use crate::{ @@ -34,7 +38,7 @@ pub struct WorkerPool { /// A set of workers for a specific CPU. pub struct LocalWorkerPool { - cpu_id: u32, + cpu_id: CpuId, idle_wait_queue: WaitQueue, parent: Weak, /// A liveness check for LocalWorkerPool. The monitor periodically clears heartbeat, @@ -66,7 +70,7 @@ pub struct Monitor { } impl LocalWorkerPool { - fn new(worker_pool: Weak, cpu_id: u32) -> Self { + fn new(worker_pool: Weak, cpu_id: CpuId) -> Self { LocalWorkerPool { cpu_id, idle_wait_queue: WaitQueue::new(), @@ -151,7 +155,7 @@ impl WorkerPool { self.work_queues.disable_irq().lock().push(work_queue); } - pub fn has_pending_work_items(&self, request_cpu: u32) -> bool { + pub fn has_pending_work_items(&self, request_cpu: CpuId) -> bool { self.work_queues .disable_irq() .lock() @@ -163,7 +167,7 @@ impl WorkerPool { self.scheduler.schedule(); } - pub fn num_workers(&self, cpu_id: u32) -> u16 { + pub fn num_workers(&self, cpu_id: CpuId) -> u16 { self.local_pool(cpu_id).workers.disable_irq().lock().len() as u16 } @@ -171,7 +175,7 @@ impl WorkerPool { &self.cpu_set } - pub(super) fn fetch_pending_work_item(&self, request_cpu: u32) -> Option> { + pub(super) fn fetch_pending_work_item(&self, request_cpu: CpuId) -> Option> { for work_queue in self.work_queues.disable_irq().lock().iter() { let item = work_queue.dequeue(request_cpu); if item.is_some() { @@ -181,22 +185,22 @@ impl WorkerPool { None } - fn local_pool(&self, cpu_id: u32) -> &Arc { + fn local_pool(&self, cpu_id: CpuId) -> &Arc { self.local_pools .iter() .find(|local_pool: &&Arc| local_pool.cpu_id == cpu_id) .unwrap() } - pub(super) fn wake_worker(&self, cpu_id: u32) -> bool { + pub(super) fn wake_worker(&self, cpu_id: CpuId) -> bool { self.local_pool(cpu_id).wake_worker() } - pub(super) fn add_worker(&self, cpu_id: u32) { + pub(super) fn add_worker(&self, cpu_id: CpuId) { self.local_pool(cpu_id).add_worker(); } - pub(super) fn remove_worker(&self, cpu_id: u32) { + pub(super) fn remove_worker(&self, cpu_id: CpuId) { self.local_pool(cpu_id).remove_worker(); } @@ -204,15 +208,15 @@ impl WorkerPool { self.priority == WorkPriority::High } - pub(super) fn heartbeat(&self, cpu_id: u32) -> bool { + pub(super) fn heartbeat(&self, cpu_id: CpuId) -> bool { self.local_pool(cpu_id).heartbeat() } - pub(super) fn set_heartbeat(&self, cpu_id: u32, heartbeat: bool) { + pub(super) fn set_heartbeat(&self, cpu_id: CpuId, heartbeat: bool) { self.local_pool(cpu_id).set_heartbeat(heartbeat) } - pub(super) fn idle_current_worker(&self, cpu_id: u32, worker: Arc) { + pub(super) fn idle_current_worker(&self, cpu_id: CpuId, worker: Arc) { self.local_pool(cpu_id).idle_current_worker(worker); } } diff --git a/kernel/src/time/clocks/system_wide.rs b/kernel/src/time/clocks/system_wide.rs index 12c6ef92d..a757030bf 100644 --- a/kernel/src/time/clocks/system_wide.rs +++ b/kernel/src/time/clocks/system_wide.rs @@ -4,13 +4,7 @@ use alloc::sync::Arc; use core::time::Duration; use aster_time::read_monotonic_time; -use ostd::{ - cpu::{num_cpus, PinCurrentCpu}, - cpu_local, - sync::SpinLock, - task::disable_preempt, - timer::Jiffies, -}; +use ostd::{cpu::PinCurrentCpu, cpu_local, sync::SpinLock, task::disable_preempt, timer::Jiffies}; use paste::paste; use spin::Once; @@ -232,7 +226,7 @@ macro_rules! define_timer_managers { $( let clock = paste! {[<$clock_id _INSTANCE>].get().unwrap().clone()}; let clock_manager = TimerManager::new(clock); - for cpu in 0..num_cpus() { + for cpu in ostd::cpu::all_cpus() { paste! { [<$clock_id _MANAGER>].get_on_cpu(cpu).call_once(|| clock_manager.clone()); } @@ -307,7 +301,7 @@ pub(super) fn init() { /// to avoid functions like this one. pub fn init_for_ktest() { // If `spin::Once` has initialized, this closure will not be executed. - for cpu in 0..num_cpus() { + for cpu in ostd::cpu::all_cpus() { CLOCK_REALTIME_MANAGER.get_on_cpu(cpu).call_once(|| { let clock = RealTimeClock { _private: () }; TimerManager::new(Arc::new(clock)) diff --git a/ostd/src/arch/riscv/irq.rs b/ostd/src/arch/riscv/irq.rs index 117257bb0..b476c59a4 100644 --- a/ostd/src/arch/riscv/irq.rs +++ b/ostd/src/arch/riscv/irq.rs @@ -8,6 +8,7 @@ use id_alloc::IdAlloc; use spin::Once; use crate::{ + cpu::CpuId, sync::{Mutex, PreemptDisabled, SpinLock, SpinLockGuard}, trap::TrapFrame, }; @@ -145,6 +146,6 @@ impl Drop for IrqCallbackHandle { /// /// The caller must ensure that the CPU ID and the interrupt number corresponds /// to a safe function to call. -pub(crate) unsafe fn send_ipi(cpu_id: u32, irq_num: u8) { +pub(crate) unsafe fn send_ipi(cpu_id: CpuId, irq_num: u8) { unimplemented!() } diff --git a/ostd/src/arch/x86/irq.rs b/ostd/src/arch/x86/irq.rs index 2d3f9a2c4..073c5d2aa 100644 --- a/ostd/src/arch/x86/irq.rs +++ b/ostd/src/arch/x86/irq.rs @@ -11,6 +11,7 @@ use spin::Once; use x86_64::registers::rflags::{self, RFlags}; use crate::{ + cpu::CpuId, sync::{Mutex, RwLock, RwLockReadGuard, SpinLock}, trap::TrapFrame, }; @@ -160,11 +161,11 @@ impl Drop for IrqCallbackHandle { /// /// The caller must ensure that the CPU ID and the interrupt number corresponds /// to a safe function to call. -pub(crate) unsafe fn send_ipi(cpu_id: u32, irq_num: u8) { +pub(crate) unsafe fn send_ipi(cpu_id: CpuId, irq_num: u8) { use crate::arch::kernel::apic::{self, Icr}; let icr = Icr::new( - apic::ApicId::from(cpu_id), + apic::ApicId::from(cpu_id.as_usize() as u32), apic::DestinationShorthand::NoShorthand, apic::TriggerMode::Edge, apic::Level::Assert, diff --git a/ostd/src/cpu/local/cpu_local.rs b/ostd/src/cpu/local/cpu_local.rs index f8548b451..98f9b0a84 100644 --- a/ostd/src/cpu/local/cpu_local.rs +++ b/ostd/src/cpu/local/cpu_local.rs @@ -5,7 +5,7 @@ use core::{marker::Sync, ops::Deref}; use super::{__cpu_local_end, __cpu_local_start}; -use crate::{arch, trap::DisabledLocalIrqGuard}; +use crate::{arch, cpu::CpuId, trap::DisabledLocalIrqGuard}; /// Defines a CPU-local variable. /// @@ -139,9 +139,11 @@ impl CpuLocal { /// # Panics /// /// Panics if the CPU ID is out of range. - pub fn get_on_cpu(&'static self, cpu_id: u32) -> &'static T { + pub fn get_on_cpu(&'static self, cpu_id: CpuId) -> &'static T { super::has_init::assert_true(); + let cpu_id = cpu_id.as_usize(); + // If on the BSP, just use the statically linked storage. if cpu_id == 0 { return &self.0; @@ -153,7 +155,7 @@ impl CpuLocal { let base = unsafe { super::CPU_LOCAL_STORAGES .get_unchecked() - .get(cpu_id as usize - 1) + .get(cpu_id - 1) .unwrap() .start_paddr() }; diff --git a/ostd/src/cpu/local/mod.rs b/ostd/src/cpu/local/mod.rs index 5ff528b8b..c91f8cd78 100644 --- a/ostd/src/cpu/local/mod.rs +++ b/ostd/src/cpu/local/mod.rs @@ -95,7 +95,7 @@ pub unsafe fn init_on_bsp() { let num_cpus = super::num_cpus(); - let mut cpu_local_storages = Vec::with_capacity(num_cpus as usize - 1); + let mut cpu_local_storages = Vec::with_capacity(num_cpus - 1); for _ in 1..num_cpus { let ap_pages = { let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE); diff --git a/ostd/src/cpu/mod.rs b/ostd/src/cpu/mod.rs index 1c50d15ad..d9be816ba 100644 --- a/ostd/src/cpu/mod.rs +++ b/ostd/src/cpu/mod.rs @@ -21,6 +21,37 @@ use crate::{ trap::DisabledLocalIrqGuard, }; +/// The ID of a CPU in the system. +/// +/// If converting from/to an integer, the integer must start from 0 and be less +/// than the number of CPUs. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct CpuId(u32); + +impl CpuId { + /// Returns the CPU ID of the bootstrap processor (BSP). + pub const fn bsp() -> Self { + CpuId(0) + } + + /// Converts the CPU ID to an `usize`. + pub const fn as_usize(self) -> usize { + self.0 as usize + } +} + +impl TryFrom for CpuId { + type Error = &'static str; + + fn try_from(value: usize) -> Result { + if value < num_cpus() { + Ok(CpuId(value as u32)) + } else { + Err("The given CPU ID is out of range") + } + } +} + /// The number of CPUs. static NUM_CPUS: Once = Once::new(); @@ -46,14 +77,20 @@ pub(crate) unsafe fn set_this_cpu_id(id: u32) { } /// Returns the number of CPUs. -pub fn num_cpus() -> u32 { +pub fn num_cpus() -> usize { debug_assert!( NUM_CPUS.get().is_some(), "The number of CPUs is not initialized" ); // SAFETY: The number of CPUs is initialized. The unsafe version is used // to avoid the overhead of the check. - unsafe { *NUM_CPUS.get_unchecked() } + let num = unsafe { *NUM_CPUS.get_unchecked() }; + num as usize +} + +/// Returns an iterator over all CPUs. +pub fn all_cpus() -> impl Iterator { + (0..num_cpus()).map(|id| CpuId(id as u32)) } /// A marker trait for guard types that can "pin" the current task to the @@ -70,10 +107,10 @@ pub fn num_cpus() -> u32 { /// CPU while any one of the instances of the implemented structure exists. pub unsafe trait PinCurrentCpu { /// Returns the number of the current CPU. - fn current_cpu(&self) -> u32 { + fn current_cpu(&self) -> CpuId { let id = CURRENT_CPU.load(); debug_assert_ne!(id, u32::MAX, "This CPU is not initialized"); - id + CpuId(id) } } diff --git a/ostd/src/cpu/set.rs b/ostd/src/cpu/set.rs index 01db7bb21..1865241b9 100644 --- a/ostd/src/cpu/set.rs +++ b/ostd/src/cpu/set.rs @@ -7,7 +7,7 @@ use core::sync::atomic::{AtomicU64, Ordering}; use smallvec::SmallVec; use static_assertions::const_assert_eq; -use super::num_cpus; +use super::{num_cpus, CpuId}; /// A subset of all CPUs in the system. #[derive(Clone, Debug, Default)] @@ -21,12 +21,12 @@ type InnerPart = u64; const BITS_PER_PART: usize = core::mem::size_of::() * 8; const NR_PARTS_NO_ALLOC: usize = 2; -const fn part_idx(cpu_id: u32) -> usize { - cpu_id as usize / BITS_PER_PART +const fn part_idx(cpu_id: CpuId) -> usize { + cpu_id.as_usize() / BITS_PER_PART } -const fn bit_idx(cpu_id: u32) -> usize { - cpu_id as usize % BITS_PER_PART +const fn bit_idx(cpu_id: CpuId) -> usize { + cpu_id.as_usize() % BITS_PER_PART } const fn parts_for_cpus(num_cpus: usize) -> usize { @@ -36,18 +36,18 @@ const fn parts_for_cpus(num_cpus: usize) -> usize { impl CpuSet { /// Creates a new `CpuSet` with all CPUs in the system. pub fn new_full() -> Self { - let mut ret = Self::with_capacity_val(num_cpus() as usize, !0); + let mut ret = Self::with_capacity_val(num_cpus(), !0); ret.clear_nonexistent_cpu_bits(); ret } /// Creates a new `CpuSet` with no CPUs in the system. pub fn new_empty() -> Self { - Self::with_capacity_val(num_cpus() as usize, 0) + Self::with_capacity_val(num_cpus(), 0) } /// Adds a CPU to the set. - pub fn add(&mut self, cpu_id: u32) { + pub fn add(&mut self, cpu_id: CpuId) { let part_idx = part_idx(cpu_id); let bit_idx = bit_idx(cpu_id); if part_idx >= self.bits.len() { @@ -57,7 +57,7 @@ impl CpuSet { } /// Removes a CPU from the set. - pub fn remove(&mut self, cpu_id: u32) { + pub fn remove(&mut self, cpu_id: CpuId) { let part_idx = part_idx(cpu_id); let bit_idx = bit_idx(cpu_id); if part_idx < self.bits.len() { @@ -66,7 +66,7 @@ impl CpuSet { } /// Returns true if the set contains the specified CPU. - pub fn contains(&self, cpu_id: u32) -> bool { + pub fn contains(&self, cpu_id: CpuId) -> bool { let part_idx = part_idx(cpu_id); let bit_idx = bit_idx(cpu_id); part_idx < self.bits.len() && (self.bits[part_idx] & (1 << bit_idx)) != 0 @@ -92,11 +92,12 @@ impl CpuSet { } /// Iterates over the CPUs in the set. - pub fn iter(&self) -> impl Iterator + '_ { + pub fn iter(&self) -> impl Iterator + '_ { self.bits.iter().enumerate().flat_map(|(part_idx, &part)| { (0..BITS_PER_PART).filter_map(move |bit_idx| { if (part & (1 << bit_idx)) != 0 { - Some((part_idx * BITS_PER_PART + bit_idx) as u32) + let id = part_idx * BITS_PER_PART + bit_idx; + Some(CpuId(id as u32)) } else { None } @@ -113,7 +114,7 @@ impl CpuSet { } fn clear_nonexistent_cpu_bits(&mut self) { - let num_cpus = num_cpus() as usize; + let num_cpus = num_cpus(); if num_cpus % BITS_PER_PART != 0 { let num_parts = parts_for_cpus(num_cpus); self.bits[num_parts - 1] &= (1 << (num_cpus % BITS_PER_PART)) - 1; @@ -121,8 +122,8 @@ impl CpuSet { } } -impl From for CpuSet { - fn from(cpu_id: u32) -> Self { +impl From for CpuSet { + fn from(cpu_id: CpuId) -> Self { let mut set = Self::new_empty(); set.add(cpu_id); set @@ -171,7 +172,7 @@ impl AtomicCpuSet { } /// Atomically adds a CPU with the given ordering. - pub fn add(&self, cpu_id: u32, ordering: Ordering) { + pub fn add(&self, cpu_id: CpuId, ordering: Ordering) { let part_idx = part_idx(cpu_id); let bit_idx = bit_idx(cpu_id); if part_idx < self.bits.len() { @@ -180,7 +181,7 @@ impl AtomicCpuSet { } /// Atomically removes a CPU with the given ordering. - pub fn remove(&self, cpu_id: u32, ordering: Ordering) { + pub fn remove(&self, cpu_id: CpuId, ordering: Ordering) { let part_idx = part_idx(cpu_id); let bit_idx = bit_idx(cpu_id); if part_idx < self.bits.len() { @@ -189,7 +190,7 @@ impl AtomicCpuSet { } /// Atomically checks if the set contains the specified CPU. - pub fn contains(&self, cpu_id: u32, ordering: Ordering) -> bool { + pub fn contains(&self, cpu_id: CpuId, ordering: Ordering) -> bool { let part_idx = part_idx(cpu_id); let bit_idx = bit_idx(cpu_id); part_idx < self.bits.len() && (self.bits[part_idx].load(ordering) & (1 << bit_idx)) != 0 @@ -199,24 +200,23 @@ impl AtomicCpuSet { #[cfg(ktest)] mod test { use super::*; - use crate::prelude::*; + use crate::{cpu::all_cpus, prelude::*}; #[ktest] fn test_full_cpu_set_iter_is_all() { let set = CpuSet::new_full(); let num_cpus = num_cpus(); - let all_cpus = (0..num_cpus).collect::>(); + let all_cpus = all_cpus().collect::>(); let set_cpus = set.iter().collect::>(); - assert!(set_cpus.len() == num_cpus as usize); + assert!(set_cpus.len() == num_cpus); assert_eq!(set_cpus, all_cpus); } #[ktest] fn test_full_cpu_set_contains_all() { let set = CpuSet::new_full(); - let num_cpus = num_cpus(); - for cpu_id in 0..num_cpus { + for cpu_id in all_cpus() { assert!(set.contains(cpu_id)); } } @@ -231,28 +231,29 @@ mod test { #[ktest] fn test_empty_cpu_set_contains_none() { let set = CpuSet::new_empty(); - let num_cpus = num_cpus(); - for cpu_id in 0..num_cpus { + for cpu_id in all_cpus() { assert!(!set.contains(cpu_id)); } } #[ktest] fn test_atomic_cpu_set_multiple_sizes() { - for test_num_cpus in [1, 3, 12, 64, 96, 99, 128, 256, 288, 1024] { + for test_num_cpus in [1usize, 3, 12, 64, 96, 99, 128, 256, 288, 1024] { + let test_all_iter = || (0..test_num_cpus).map(|id| CpuId(id as u32)); + let set = CpuSet::with_capacity_val(test_num_cpus, 0); let atomic_set = AtomicCpuSet::new(set); - for cpu_id in 0..test_num_cpus as u32 { + for cpu_id in test_all_iter() { assert!(!atomic_set.contains(cpu_id, Ordering::Relaxed)); - if cpu_id % 3 == 0 { + if cpu_id.as_usize() % 3 == 0 { atomic_set.add(cpu_id, Ordering::Relaxed); } } let loaded = atomic_set.load(); for cpu_id in loaded.iter() { - if cpu_id % 3 == 0 { + if cpu_id.as_usize() % 3 == 0 { assert!(loaded.contains(cpu_id)); } else { assert!(!loaded.contains(cpu_id)); @@ -261,7 +262,7 @@ mod test { atomic_set.store(CpuSet::with_capacity_val(test_num_cpus, 0)); - for cpu_id in 0..test_num_cpus as u32 { + for cpu_id in test_all_iter() { assert!(!atomic_set.contains(cpu_id, Ordering::Relaxed)); atomic_set.add(cpu_id, Ordering::Relaxed); } diff --git a/ostd/src/mm/page_table/boot_pt.rs b/ostd/src/mm/page_table/boot_pt.rs index 6cd36dfa3..39f01001a 100644 --- a/ostd/src/mm/page_table/boot_pt.rs +++ b/ostd/src/mm/page_table/boot_pt.rs @@ -68,7 +68,7 @@ where /// another page table and before this dismissal. pub(crate) unsafe fn dismiss() { IS_DISMISSED.store(true); - if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) == num_cpus() - 1 { + if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) as usize == num_cpus() - 1 { BOOT_PAGE_TABLE.lock().take(); } } diff --git a/ostd/src/mm/vm_space.rs b/ostd/src/mm/vm_space.rs index ccd211eb3..1bcf8179c 100644 --- a/ostd/src/mm/vm_space.rs +++ b/ostd/src/mm/vm_space.rs @@ -16,7 +16,7 @@ use core::{ use crate::{ arch::mm::{current_page_table_paddr, PageTableEntry, PagingConsts}, - cpu::{num_cpus, CpuExceptionInfo, CpuSet, PinCurrentCpu}, + cpu::{all_cpus, CpuExceptionInfo, CpuSet, PinCurrentCpu}, cpu_local, mm::{ io::Fallible, @@ -93,7 +93,7 @@ impl VmSpace { let mut activated_cpus = CpuSet::new_empty(); - for cpu in 0..num_cpus() { + for cpu in all_cpus() { // The activation lock is held; other CPUs cannot activate this `VmSpace`. let ptr = ACTIVATED_VM_SPACE.get_on_cpu(cpu).load(Ordering::Relaxed) as *const VmSpace; diff --git a/ostd/src/smp.rs b/ostd/src/smp.rs index 910bb564a..7e3913ec9 100644 --- a/ostd/src/smp.rs +++ b/ostd/src/smp.rs @@ -72,7 +72,7 @@ fn do_inter_processor_call(_trapframe: &TrapFrame) { let mut queue = CALL_QUEUES.get_on_cpu(cur_cpu).lock(); while let Some(f) = queue.pop_front() { log::trace!( - "Performing inter-processor call to {:#?} on CPU {}", + "Performing inter-processor call to {:#?} on CPU {:#?}", f, cur_cpu ); diff --git a/ostd/src/task/scheduler/fifo_scheduler.rs b/ostd/src/task/scheduler/fifo_scheduler.rs index 28121e69c..78e0c45da 100644 --- a/ostd/src/task/scheduler/fifo_scheduler.rs +++ b/ostd/src/task/scheduler/fifo_scheduler.rs @@ -6,7 +6,7 @@ use super::{ info::CommonSchedInfo, inject_scheduler, EnqueueFlags, LocalRunQueue, Scheduler, UpdateFlags, }; use crate::{ - cpu::{num_cpus, PinCurrentCpu}, + cpu::{num_cpus, CpuId, PinCurrentCpu}, sync::SpinLock, task::{disable_preempt, Task}, }; @@ -25,7 +25,7 @@ struct FifoScheduler { impl FifoScheduler { /// Creates a new instance of `FifoScheduler`. - fn new(nr_cpus: u32) -> Self { + fn new(nr_cpus: usize) -> Self { let mut rq = Vec::new(); for _ in 0..nr_cpus { rq.push(SpinLock::new(FifoRunQueue::new())); @@ -33,14 +33,14 @@ impl FifoScheduler { Self { rq } } - fn select_cpu(&self) -> u32 { + fn select_cpu(&self) -> CpuId { // FIXME: adopt more reasonable policy once we fully enable SMP. - 0 + CpuId::bsp() } } impl Scheduler for FifoScheduler { - fn enqueue(&self, runnable: Arc, flags: EnqueueFlags) -> Option { + fn enqueue(&self, runnable: Arc, flags: EnqueueFlags) -> Option { let mut still_in_rq = false; let target_cpu = { let mut cpu_id = self.select_cpu(); @@ -53,7 +53,7 @@ impl Scheduler for FifoScheduler { cpu_id }; - let mut rq = self.rq[target_cpu as usize].disable_irq().lock(); + let mut rq = self.rq[target_cpu.as_usize()].disable_irq().lock(); if still_in_rq && let Err(_) = runnable.cpu().set_if_is_none(target_cpu) { return None; } @@ -64,7 +64,7 @@ impl Scheduler for FifoScheduler { fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue)) { let preempt_guard = disable_preempt(); - let local_rq: &FifoRunQueue = &self.rq[preempt_guard.current_cpu() as usize] + let local_rq: &FifoRunQueue = &self.rq[preempt_guard.current_cpu().as_usize()] .disable_irq() .lock(); f(local_rq); @@ -72,7 +72,7 @@ impl Scheduler for FifoScheduler { fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue)) { let preempt_guard = disable_preempt(); - let local_rq: &mut FifoRunQueue = &mut self.rq[preempt_guard.current_cpu() as usize] + let local_rq: &mut FifoRunQueue = &mut self.rq[preempt_guard.current_cpu().as_usize()] .disable_irq() .lock(); f(local_rq); diff --git a/ostd/src/task/scheduler/info.rs b/ostd/src/task/scheduler/info.rs index f804613b8..dd007bf39 100644 --- a/ostd/src/task/scheduler/info.rs +++ b/ostd/src/task/scheduler/info.rs @@ -4,7 +4,7 @@ use core::sync::atomic::{AtomicU32, Ordering}; -use crate::task::Task; +use crate::{cpu::CpuId, task::Task}; /// Fields of a task that OSTD will never touch. /// @@ -28,17 +28,22 @@ impl AtomicCpuId { /// An `AtomicCpuId` with `AtomicCpuId::NONE` as its inner value is empty. const NONE: u32 = u32::MAX; - fn new(cpu_id: u32) -> Self { - Self(AtomicU32::new(cpu_id)) - } - /// Sets the inner value of an `AtomicCpuId` if it's empty. /// /// The return value is a result indicating whether the new value was written - /// and containing the previous value. - pub fn set_if_is_none(&self, cpu_id: u32) -> core::result::Result { + /// and containing the previous value. If the previous value is empty, it returns + /// `Ok(())`. Otherwise, it returns `Err(previous_value)` which the previous + /// value is a valid CPU ID. + pub fn set_if_is_none(&self, cpu_id: CpuId) -> core::result::Result<(), CpuId> { self.0 - .compare_exchange(Self::NONE, cpu_id, Ordering::Relaxed, Ordering::Relaxed) + .compare_exchange( + Self::NONE, + cpu_id.as_usize() as u32, + Ordering::Relaxed, + Ordering::Relaxed, + ) + .map(|_| ()) + .map_err(|prev| (prev as usize).try_into().unwrap()) } /// Sets the inner value of an `AtomicCpuId` to `AtomicCpuId::NONE`, i.e. makes @@ -48,19 +53,19 @@ impl AtomicCpuId { } /// Gets the inner value of an `AtomicCpuId`. - pub fn get(&self) -> Option { + pub fn get(&self) -> Option { let val = self.0.load(Ordering::Relaxed); if val == Self::NONE { None } else { - Some(val) + Some((val as usize).try_into().ok()?) } } } impl Default for AtomicCpuId { fn default() -> Self { - Self::new(Self::NONE) + Self(AtomicU32::new(Self::NONE)) } } diff --git a/ostd/src/task/scheduler/mod.rs b/ostd/src/task/scheduler/mod.rs index 016d48532..2ac389b3b 100644 --- a/ostd/src/task/scheduler/mod.rs +++ b/ostd/src/task/scheduler/mod.rs @@ -13,7 +13,12 @@ use core::sync::atomic::{AtomicBool, Ordering}; use spin::Once; use super::{preempt::cpu_local, processor, Task}; -use crate::{cpu::PinCurrentCpu, prelude::*, task::disable_preempt, timer}; +use crate::{ + cpu::{CpuId, PinCurrentCpu}, + prelude::*, + task::disable_preempt, + timer, +}; /// Injects a scheduler implementation into framework. /// @@ -40,7 +45,7 @@ pub trait Scheduler: Sync + Send { /// /// If the `current` of a CPU needs to be preempted, this method returns the id of /// that CPU. - fn enqueue(&self, runnable: Arc, flags: EnqueueFlags) -> Option; + fn enqueue(&self, runnable: Arc, flags: EnqueueFlags) -> Option; /// Gets an immutable access to the local runqueue of the current CPU core. fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue));