Make atomic mode panics have clear messages

This commit is contained in:
Ruihan Li
2024-11-18 23:35:21 +08:00
committed by Tate, Hongliang Tian
parent 065a3bd1c3
commit 969ac97144
13 changed files with 31 additions and 0 deletions

View File

@ -32,6 +32,7 @@ pub trait Pause: WaitTimeout {
/// condition is met. /// condition is met.
/// ///
/// [`EINTR`]: crate::error::Errno::EINTR /// [`EINTR`]: crate::error::Errno::EINTR
#[track_caller]
fn pause_until<F, R>(&self, cond: F) -> Result<R> fn pause_until<F, R>(&self, cond: F) -> Result<R>
where where
F: FnMut() -> Option<R>, F: FnMut() -> Option<R>,
@ -49,6 +50,7 @@ pub trait Pause: WaitTimeout {
/// ///
/// [`ETIME`]: crate::error::Errno::ETIME /// [`ETIME`]: crate::error::Errno::ETIME
/// [`EINTR`]: crate::error::Errno::EINTR /// [`EINTR`]: crate::error::Errno::EINTR
#[track_caller]
fn pause_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result<R> fn pause_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result<R>
where where
F: FnMut() -> Option<R>, F: FnMut() -> Option<R>,
@ -74,6 +76,7 @@ pub trait Pause: WaitTimeout {
/// [`ETIME`]: crate::error::Errno::ETIME /// [`ETIME`]: crate::error::Errno::ETIME
/// [`EINTR`]: crate::error::Errno::EINTR /// [`EINTR`]: crate::error::Errno::EINTR
#[doc(hidden)] #[doc(hidden)]
#[track_caller]
fn pause_until_or_timeout_impl<F, R>( fn pause_until_or_timeout_impl<F, R>(
&self, &self,
cond: F, cond: F,
@ -94,6 +97,7 @@ pub trait Pause: WaitTimeout {
/// ///
/// [`ETIME`]: crate::error::Errno::ETIME /// [`ETIME`]: crate::error::Errno::ETIME
/// [`EINTR`]: crate::error::Errno::EINTR /// [`EINTR`]: crate::error::Errno::EINTR
#[track_caller]
fn pause_timeout<'a>(&self, timeout: impl Into<TimeoutExt<'a>>) -> Result<()>; fn pause_timeout<'a>(&self, timeout: impl Into<TimeoutExt<'a>>) -> Result<()>;
} }

View File

@ -368,6 +368,7 @@ pub trait Pollable {
/// The user must ensure that a call to `try_op()` does not fail with `EAGAIN` when the /// The user must ensure that a call to `try_op()` does not fail with `EAGAIN` when the
/// interesting events occur. However, it is allowed to have spurious `EAGAIN` failures due to /// interesting events occur. However, it is allowed to have spurious `EAGAIN` failures due to
/// race opitions where the events are consumed by another thread. /// race opitions where the events are consumed by another thread.
#[track_caller]
fn wait_events<F, R>( fn wait_events<F, R>(
&self, &self,
mask: IoEvents, mask: IoEvents,

View File

@ -73,6 +73,7 @@ impl ThreadOptions {
} }
/// Builds a new kernel thread and runs it immediately. /// Builds a new kernel thread and runs it immediately.
#[track_caller]
pub fn spawn(self) -> Arc<Thread> { pub fn spawn(self) -> Arc<Thread> {
let task = self.build(); let task = self.build();
let thread = task.as_thread().unwrap().clone(); let thread = task.as_thread().unwrap().clone();

View File

@ -78,6 +78,7 @@ impl Thread {
} }
/// Runs this thread at once. /// Runs this thread at once.
#[track_caller]
pub fn run(&self) { pub fn run(&self) {
self.status.store(ThreadStatus::Running, Ordering::Release); self.status.store(ThreadStatus::Running, Ordering::Release);
self.task.upgrade().unwrap().run(); self.task.upgrade().unwrap().run();
@ -151,6 +152,7 @@ impl Thread {
/// Yields the execution to another thread. /// Yields the execution to another thread.
/// ///
/// This method will return once the current thread is scheduled again. /// This method will return once the current thread is scheduled again.
#[track_caller]
pub fn yield_now() { pub fn yield_now() {
Task::yield_now() Task::yield_now()
} }
@ -158,6 +160,7 @@ impl Thread {
/// Joins the execution of the thread. /// Joins the execution of the thread.
/// ///
/// This method will return after the thread exits. /// This method will return after the thread exits.
#[track_caller]
pub fn join(&self) { pub fn join(&self) {
while !self.is_exited() { while !self.is_exited() {
Self::yield_now(); Self::yield_now();

View File

@ -17,6 +17,7 @@ pub trait WaitTimeout {
/// will return an error with [`ETIME`]. /// will return an error with [`ETIME`].
/// ///
/// [`ETIME`]: crate::error::Errno::ETIME /// [`ETIME`]: crate::error::Errno::ETIME
#[track_caller]
fn wait_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result<R> fn wait_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result<R>
where where
F: FnMut() -> Option<R>, F: FnMut() -> Option<R>,
@ -41,6 +42,7 @@ pub trait WaitTimeout {
/// - an error with [`ETIME`] if the timeout is reached; /// - an error with [`ETIME`] if the timeout is reached;
/// - the error returned by the cancel condition if the cancel condition returns `Err(_)`. /// - the error returned by the cancel condition if the cancel condition returns `Err(_)`.
#[doc(hidden)] #[doc(hidden)]
#[track_caller]
fn wait_until_or_timeout_cancelled<F, R, FCancel>( fn wait_until_or_timeout_cancelled<F, R, FCancel>(
&self, &self,
cond: F, cond: F,

View File

@ -32,6 +32,7 @@ impl<T: ?Sized> Mutex<T> {
/// Acquires the mutex. /// Acquires the mutex.
/// ///
/// This method runs in a block way until the mutex can be acquired. /// This method runs in a block way until the mutex can be acquired.
#[track_caller]
pub fn lock(&self) -> MutexGuard<T> { pub fn lock(&self) -> MutexGuard<T> {
self.queue.wait_until(|| self.try_lock()) self.queue.wait_until(|| self.try_lock())
} }

View File

@ -121,6 +121,7 @@ impl<T: ?Sized> RwMutex<T> {
/// upreaders present. The implementation of [`WaitQueue`] guarantees the /// upreaders present. The implementation of [`WaitQueue`] guarantees the
/// order in which other concurrent readers or writers waiting simultaneously /// order in which other concurrent readers or writers waiting simultaneously
/// will acquire the mutex. /// will acquire the mutex.
#[track_caller]
pub fn read(&self) -> RwMutexReadGuard<T> { pub fn read(&self) -> RwMutexReadGuard<T> {
self.queue.wait_until(|| self.try_read()) self.queue.wait_until(|| self.try_read())
} }
@ -131,6 +132,7 @@ impl<T: ?Sized> RwMutex<T> {
/// or readers present. The implementation of [`WaitQueue`] guarantees the /// or readers present. The implementation of [`WaitQueue`] guarantees the
/// order in which other concurrent readers or writers waiting simultaneously /// order in which other concurrent readers or writers waiting simultaneously
/// will acquire the mutex. /// will acquire the mutex.
#[track_caller]
pub fn write(&self) -> RwMutexWriteGuard<T> { pub fn write(&self) -> RwMutexWriteGuard<T> {
self.queue.wait_until(|| self.try_write()) self.queue.wait_until(|| self.try_write())
} }
@ -145,6 +147,7 @@ impl<T: ?Sized> RwMutex<T> {
/// and reader do not differ before invoking the upgread method. However, /// and reader do not differ before invoking the upgread method. However,
/// only one upreader can exist at any time to avoid deadlock in the /// only one upreader can exist at any time to avoid deadlock in the
/// upgread method. /// upgread method.
#[track_caller]
pub fn upread(&self) -> RwMutexUpgradeableGuard<T> { pub fn upread(&self) -> RwMutexUpgradeableGuard<T> {
self.queue.wait_until(|| self.try_upread()) self.queue.wait_until(|| self.try_upread())
} }

View File

@ -65,6 +65,7 @@ impl WaitQueue {
/// ///
/// By taking a condition closure, this wait-wakeup mechanism becomes /// By taking a condition closure, this wait-wakeup mechanism becomes
/// more efficient and robust. /// more efficient and robust.
#[track_caller]
pub fn wait_until<F, R>(&self, mut cond: F) -> R pub fn wait_until<F, R>(&self, mut cond: F) -> R
where where
F: FnMut() -> Option<R>, F: FnMut() -> Option<R>,
@ -194,6 +195,7 @@ impl Waiter {
/// This method returns immediately if the waiter has been woken since the end of the last call /// This method returns immediately if the waiter has been woken since the end of the last call
/// to this method (or since the waiter was created, if this method has not been called /// to this method (or since the waiter was created, if this method has not been called
/// before). Otherwise, it puts the current thread to sleep until the waiter is woken up. /// before). Otherwise, it puts the current thread to sleep until the waiter is woken up.
#[track_caller]
pub fn wait(&self) { pub fn wait(&self) {
self.waker.do_wait(); self.waker.do_wait();
} }
@ -203,6 +205,7 @@ impl Waiter {
/// This method will return `Ok(_)` if the condition returns `Some(_)`, and will stop waiting /// This method will return `Ok(_)` if the condition returns `Some(_)`, and will stop waiting
/// if the cancel condition returns `Err(_)`. In this situation, this method will return the `Err(_)` /// if the cancel condition returns `Err(_)`. In this situation, this method will return the `Err(_)`
/// generated by the cancel condition. /// generated by the cancel condition.
#[track_caller]
pub fn wait_until_or_cancelled<F, R, FCancel, E>( pub fn wait_until_or_cancelled<F, R, FCancel, E>(
&self, &self,
mut cond: F, mut cond: F,
@ -265,6 +268,7 @@ impl Waker {
true true
} }
#[track_caller]
fn do_wait(&self) { fn do_wait(&self) {
while !self.has_woken.swap(false, Ordering::Acquire) { while !self.has_woken.swap(false, Ordering::Acquire) {
scheduler::park_current(|| self.has_woken.load(Ordering::Acquire)); scheduler::park_current(|| self.has_woken.load(Ordering::Acquire));

View File

@ -32,6 +32,7 @@ use core::sync::atomic::Ordering;
/// Marks a function as one that might sleep. /// Marks a function as one that might sleep.
/// ///
/// This function will panic if it is executed in atomic mode. /// This function will panic if it is executed in atomic mode.
#[track_caller]
pub fn might_sleep() { pub fn might_sleep() {
let preempt_count = super::preempt::cpu_local::get_guard_count(); let preempt_count = super::preempt::cpu_local::get_guard_count();
let is_local_irq_enabled = crate::arch::irq::is_local_enabled(); let is_local_irq_enabled = crate::arch::irq::is_local_enabled();

View File

@ -83,6 +83,7 @@ impl Task {
/// ///
/// Note that this method cannot be simply named "yield" as the name is /// Note that this method cannot be simply named "yield" as the name is
/// a Rust keyword. /// a Rust keyword.
#[track_caller]
pub fn yield_now() { pub fn yield_now() {
scheduler::yield_now() scheduler::yield_now()
} }
@ -90,6 +91,7 @@ impl Task {
/// Kicks the task scheduler to run the task. /// Kicks the task scheduler to run the task.
/// ///
/// BUG: This method highly depends on the current scheduling policy. /// BUG: This method highly depends on the current scheduling policy.
#[track_caller]
pub fn run(self: &Arc<Self>) { pub fn run(self: &Arc<Self>) {
scheduler::run_new_task(self.clone()); scheduler::run_new_task(self.clone());
} }
@ -239,6 +241,7 @@ impl TaskOptions {
} }
/// Builds a new task and runs it immediately. /// Builds a new task and runs it immediately.
#[track_caller]
pub fn spawn(self) -> Result<Arc<Task>> { pub fn spawn(self) -> Result<Arc<Task>> {
let task = Arc::new(self.build()?); let task = Arc::new(self.build()?);
task.run(); task.run();

View File

@ -33,6 +33,7 @@ pub(super) fn current_task() -> Option<NonNull<Task>> {
/// ///
/// This function will panic if called while holding preemption locks or with /// This function will panic if called while holding preemption locks or with
/// local IRQ disabled. /// local IRQ disabled.
#[track_caller]
pub(super) fn switch_to_task(next_task: Arc<Task>) { pub(super) fn switch_to_task(next_task: Arc<Task>) {
super::atomic_mode::might_sleep(); super::atomic_mode::might_sleep();

View File

@ -105,6 +105,7 @@ pub enum UpdateFlags {
} }
/// Preempts the current task. /// Preempts the current task.
#[track_caller]
pub(crate) fn might_preempt() { pub(crate) fn might_preempt() {
if !cpu_local::should_preempt() { if !cpu_local::should_preempt() {
return; return;
@ -116,6 +117,7 @@ pub(crate) fn might_preempt() {
/// ///
/// Note that this method may return due to spurious wake events. It's the caller's responsibility /// Note that this method may return due to spurious wake events. It's the caller's responsibility
/// to detect them (if necessary). /// to detect them (if necessary).
#[track_caller]
pub(crate) fn park_current<F>(has_woken: F) pub(crate) fn park_current<F>(has_woken: F)
where where
F: Fn() -> bool, F: Fn() -> bool,
@ -168,6 +170,7 @@ pub(crate) fn unpark_target(runnable: Arc<Task>) {
/// Enqueues a newly built task. /// Enqueues a newly built task.
/// ///
/// Note that the new task is not guaranteed to run at once. /// Note that the new task is not guaranteed to run at once.
#[track_caller]
pub(super) fn run_new_task(runnable: Arc<Task>) { pub(super) fn run_new_task(runnable: Arc<Task>) {
// FIXME: remove this check for `SCHEDULER`. // FIXME: remove this check for `SCHEDULER`.
// Currently OSTD cannot know whether its user has injected a scheduler. // Currently OSTD cannot know whether its user has injected a scheduler.
@ -199,6 +202,7 @@ fn set_need_preempt(cpu_id: CpuId) {
/// Dequeues the current task from its runqueue. /// Dequeues the current task from its runqueue.
/// ///
/// This should only be called if the current is to exit. /// This should only be called if the current is to exit.
#[track_caller]
pub(super) fn exit_current() -> ! { pub(super) fn exit_current() -> ! {
reschedule(|local_rq: &mut dyn LocalRunQueue| { reschedule(|local_rq: &mut dyn LocalRunQueue| {
let _ = local_rq.dequeue_current(); let _ = local_rq.dequeue_current();
@ -213,6 +217,7 @@ pub(super) fn exit_current() -> ! {
} }
/// Yields execution. /// Yields execution.
#[track_caller]
pub(super) fn yield_now() { pub(super) fn yield_now() {
reschedule(|local_rq| { reschedule(|local_rq| {
local_rq.update_current(UpdateFlags::Yield); local_rq.update_current(UpdateFlags::Yield);
@ -228,6 +233,7 @@ pub(super) fn yield_now() {
/// user-given closure. /// user-given closure.
/// ///
/// The closure makes the scheduling decision by taking the local runqueue has its input. /// The closure makes the scheduling decision by taking the local runqueue has its input.
#[track_caller]
fn reschedule<F>(mut f: F) fn reschedule<F>(mut f: F)
where where
F: FnMut(&mut dyn LocalRunQueue) -> ReschedAction, F: FnMut(&mut dyn LocalRunQueue) -> ReschedAction,

View File

@ -151,6 +151,7 @@ impl<'a> UserMode<'a> {
/// cause the method to return /// cause the method to return
/// and updating the user-mode CPU context, /// and updating the user-mode CPU context,
/// this method can be invoked again to go back to the user space. /// this method can be invoked again to go back to the user space.
#[track_caller]
pub fn execute<F>(&mut self, has_kernel_event: F) -> ReturnReason pub fn execute<F>(&mut self, has_kernel_event: F) -> ReturnReason
where where
F: FnMut() -> bool, F: FnMut() -> bool,