mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-25 10:23:23 +00:00
Make atomic mode panics have clear messages
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
065a3bd1c3
commit
969ac97144
@ -32,6 +32,7 @@ pub trait Pause: WaitTimeout {
|
||||
/// condition is met.
|
||||
///
|
||||
/// [`EINTR`]: crate::error::Errno::EINTR
|
||||
#[track_caller]
|
||||
fn pause_until<F, R>(&self, cond: F) -> Result<R>
|
||||
where
|
||||
F: FnMut() -> Option<R>,
|
||||
@ -49,6 +50,7 @@ pub trait Pause: WaitTimeout {
|
||||
///
|
||||
/// [`ETIME`]: crate::error::Errno::ETIME
|
||||
/// [`EINTR`]: crate::error::Errno::EINTR
|
||||
#[track_caller]
|
||||
fn pause_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result<R>
|
||||
where
|
||||
F: FnMut() -> Option<R>,
|
||||
@ -74,6 +76,7 @@ pub trait Pause: WaitTimeout {
|
||||
/// [`ETIME`]: crate::error::Errno::ETIME
|
||||
/// [`EINTR`]: crate::error::Errno::EINTR
|
||||
#[doc(hidden)]
|
||||
#[track_caller]
|
||||
fn pause_until_or_timeout_impl<F, R>(
|
||||
&self,
|
||||
cond: F,
|
||||
@ -94,6 +97,7 @@ pub trait Pause: WaitTimeout {
|
||||
///
|
||||
/// [`ETIME`]: crate::error::Errno::ETIME
|
||||
/// [`EINTR`]: crate::error::Errno::EINTR
|
||||
#[track_caller]
|
||||
fn pause_timeout<'a>(&self, timeout: impl Into<TimeoutExt<'a>>) -> Result<()>;
|
||||
}
|
||||
|
||||
|
@ -368,6 +368,7 @@ pub trait Pollable {
|
||||
/// The user must ensure that a call to `try_op()` does not fail with `EAGAIN` when the
|
||||
/// interesting events occur. However, it is allowed to have spurious `EAGAIN` failures due to
|
||||
/// race opitions where the events are consumed by another thread.
|
||||
#[track_caller]
|
||||
fn wait_events<F, R>(
|
||||
&self,
|
||||
mask: IoEvents,
|
||||
|
@ -73,6 +73,7 @@ impl ThreadOptions {
|
||||
}
|
||||
|
||||
/// Builds a new kernel thread and runs it immediately.
|
||||
#[track_caller]
|
||||
pub fn spawn(self) -> Arc<Thread> {
|
||||
let task = self.build();
|
||||
let thread = task.as_thread().unwrap().clone();
|
||||
|
@ -78,6 +78,7 @@ impl Thread {
|
||||
}
|
||||
|
||||
/// Runs this thread at once.
|
||||
#[track_caller]
|
||||
pub fn run(&self) {
|
||||
self.status.store(ThreadStatus::Running, Ordering::Release);
|
||||
self.task.upgrade().unwrap().run();
|
||||
@ -151,6 +152,7 @@ impl Thread {
|
||||
/// Yields the execution to another thread.
|
||||
///
|
||||
/// This method will return once the current thread is scheduled again.
|
||||
#[track_caller]
|
||||
pub fn yield_now() {
|
||||
Task::yield_now()
|
||||
}
|
||||
@ -158,6 +160,7 @@ impl Thread {
|
||||
/// Joins the execution of the thread.
|
||||
///
|
||||
/// This method will return after the thread exits.
|
||||
#[track_caller]
|
||||
pub fn join(&self) {
|
||||
while !self.is_exited() {
|
||||
Self::yield_now();
|
||||
|
@ -17,6 +17,7 @@ pub trait WaitTimeout {
|
||||
/// will return an error with [`ETIME`].
|
||||
///
|
||||
/// [`ETIME`]: crate::error::Errno::ETIME
|
||||
#[track_caller]
|
||||
fn wait_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result<R>
|
||||
where
|
||||
F: FnMut() -> Option<R>,
|
||||
@ -41,6 +42,7 @@ pub trait WaitTimeout {
|
||||
/// - an error with [`ETIME`] if the timeout is reached;
|
||||
/// - the error returned by the cancel condition if the cancel condition returns `Err(_)`.
|
||||
#[doc(hidden)]
|
||||
#[track_caller]
|
||||
fn wait_until_or_timeout_cancelled<F, R, FCancel>(
|
||||
&self,
|
||||
cond: F,
|
||||
|
@ -32,6 +32,7 @@ impl<T: ?Sized> Mutex<T> {
|
||||
/// Acquires the mutex.
|
||||
///
|
||||
/// This method runs in a block way until the mutex can be acquired.
|
||||
#[track_caller]
|
||||
pub fn lock(&self) -> MutexGuard<T> {
|
||||
self.queue.wait_until(|| self.try_lock())
|
||||
}
|
||||
|
@ -121,6 +121,7 @@ impl<T: ?Sized> RwMutex<T> {
|
||||
/// upreaders present. The implementation of [`WaitQueue`] guarantees the
|
||||
/// order in which other concurrent readers or writers waiting simultaneously
|
||||
/// will acquire the mutex.
|
||||
#[track_caller]
|
||||
pub fn read(&self) -> RwMutexReadGuard<T> {
|
||||
self.queue.wait_until(|| self.try_read())
|
||||
}
|
||||
@ -131,6 +132,7 @@ impl<T: ?Sized> RwMutex<T> {
|
||||
/// or readers present. The implementation of [`WaitQueue`] guarantees the
|
||||
/// order in which other concurrent readers or writers waiting simultaneously
|
||||
/// will acquire the mutex.
|
||||
#[track_caller]
|
||||
pub fn write(&self) -> RwMutexWriteGuard<T> {
|
||||
self.queue.wait_until(|| self.try_write())
|
||||
}
|
||||
@ -145,6 +147,7 @@ impl<T: ?Sized> RwMutex<T> {
|
||||
/// and reader do not differ before invoking the upgread method. However,
|
||||
/// only one upreader can exist at any time to avoid deadlock in the
|
||||
/// upgread method.
|
||||
#[track_caller]
|
||||
pub fn upread(&self) -> RwMutexUpgradeableGuard<T> {
|
||||
self.queue.wait_until(|| self.try_upread())
|
||||
}
|
||||
|
@ -65,6 +65,7 @@ impl WaitQueue {
|
||||
///
|
||||
/// By taking a condition closure, this wait-wakeup mechanism becomes
|
||||
/// more efficient and robust.
|
||||
#[track_caller]
|
||||
pub fn wait_until<F, R>(&self, mut cond: F) -> R
|
||||
where
|
||||
F: FnMut() -> Option<R>,
|
||||
@ -194,6 +195,7 @@ impl Waiter {
|
||||
/// This method returns immediately if the waiter has been woken since the end of the last call
|
||||
/// to this method (or since the waiter was created, if this method has not been called
|
||||
/// before). Otherwise, it puts the current thread to sleep until the waiter is woken up.
|
||||
#[track_caller]
|
||||
pub fn wait(&self) {
|
||||
self.waker.do_wait();
|
||||
}
|
||||
@ -203,6 +205,7 @@ impl Waiter {
|
||||
/// This method will return `Ok(_)` if the condition returns `Some(_)`, and will stop waiting
|
||||
/// if the cancel condition returns `Err(_)`. In this situation, this method will return the `Err(_)`
|
||||
/// generated by the cancel condition.
|
||||
#[track_caller]
|
||||
pub fn wait_until_or_cancelled<F, R, FCancel, E>(
|
||||
&self,
|
||||
mut cond: F,
|
||||
@ -265,6 +268,7 @@ impl Waker {
|
||||
true
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn do_wait(&self) {
|
||||
while !self.has_woken.swap(false, Ordering::Acquire) {
|
||||
scheduler::park_current(|| self.has_woken.load(Ordering::Acquire));
|
||||
|
@ -32,6 +32,7 @@ use core::sync::atomic::Ordering;
|
||||
/// Marks a function as one that might sleep.
|
||||
///
|
||||
/// This function will panic if it is executed in atomic mode.
|
||||
#[track_caller]
|
||||
pub fn might_sleep() {
|
||||
let preempt_count = super::preempt::cpu_local::get_guard_count();
|
||||
let is_local_irq_enabled = crate::arch::irq::is_local_enabled();
|
||||
|
@ -83,6 +83,7 @@ impl Task {
|
||||
///
|
||||
/// Note that this method cannot be simply named "yield" as the name is
|
||||
/// a Rust keyword.
|
||||
#[track_caller]
|
||||
pub fn yield_now() {
|
||||
scheduler::yield_now()
|
||||
}
|
||||
@ -90,6 +91,7 @@ impl Task {
|
||||
/// Kicks the task scheduler to run the task.
|
||||
///
|
||||
/// BUG: This method highly depends on the current scheduling policy.
|
||||
#[track_caller]
|
||||
pub fn run(self: &Arc<Self>) {
|
||||
scheduler::run_new_task(self.clone());
|
||||
}
|
||||
@ -239,6 +241,7 @@ impl TaskOptions {
|
||||
}
|
||||
|
||||
/// Builds a new task and runs it immediately.
|
||||
#[track_caller]
|
||||
pub fn spawn(self) -> Result<Arc<Task>> {
|
||||
let task = Arc::new(self.build()?);
|
||||
task.run();
|
||||
|
@ -33,6 +33,7 @@ pub(super) fn current_task() -> Option<NonNull<Task>> {
|
||||
///
|
||||
/// This function will panic if called while holding preemption locks or with
|
||||
/// local IRQ disabled.
|
||||
#[track_caller]
|
||||
pub(super) fn switch_to_task(next_task: Arc<Task>) {
|
||||
super::atomic_mode::might_sleep();
|
||||
|
||||
|
@ -105,6 +105,7 @@ pub enum UpdateFlags {
|
||||
}
|
||||
|
||||
/// Preempts the current task.
|
||||
#[track_caller]
|
||||
pub(crate) fn might_preempt() {
|
||||
if !cpu_local::should_preempt() {
|
||||
return;
|
||||
@ -116,6 +117,7 @@ pub(crate) fn might_preempt() {
|
||||
///
|
||||
/// Note that this method may return due to spurious wake events. It's the caller's responsibility
|
||||
/// to detect them (if necessary).
|
||||
#[track_caller]
|
||||
pub(crate) fn park_current<F>(has_woken: F)
|
||||
where
|
||||
F: Fn() -> bool,
|
||||
@ -168,6 +170,7 @@ pub(crate) fn unpark_target(runnable: Arc<Task>) {
|
||||
/// Enqueues a newly built task.
|
||||
///
|
||||
/// Note that the new task is not guaranteed to run at once.
|
||||
#[track_caller]
|
||||
pub(super) fn run_new_task(runnable: Arc<Task>) {
|
||||
// FIXME: remove this check for `SCHEDULER`.
|
||||
// Currently OSTD cannot know whether its user has injected a scheduler.
|
||||
@ -199,6 +202,7 @@ fn set_need_preempt(cpu_id: CpuId) {
|
||||
/// Dequeues the current task from its runqueue.
|
||||
///
|
||||
/// This should only be called if the current is to exit.
|
||||
#[track_caller]
|
||||
pub(super) fn exit_current() -> ! {
|
||||
reschedule(|local_rq: &mut dyn LocalRunQueue| {
|
||||
let _ = local_rq.dequeue_current();
|
||||
@ -213,6 +217,7 @@ pub(super) fn exit_current() -> ! {
|
||||
}
|
||||
|
||||
/// Yields execution.
|
||||
#[track_caller]
|
||||
pub(super) fn yield_now() {
|
||||
reschedule(|local_rq| {
|
||||
local_rq.update_current(UpdateFlags::Yield);
|
||||
@ -228,6 +233,7 @@ pub(super) fn yield_now() {
|
||||
/// user-given closure.
|
||||
///
|
||||
/// The closure makes the scheduling decision by taking the local runqueue has its input.
|
||||
#[track_caller]
|
||||
fn reschedule<F>(mut f: F)
|
||||
where
|
||||
F: FnMut(&mut dyn LocalRunQueue) -> ReschedAction,
|
||||
|
@ -151,6 +151,7 @@ impl<'a> UserMode<'a> {
|
||||
/// cause the method to return
|
||||
/// and updating the user-mode CPU context,
|
||||
/// this method can be invoked again to go back to the user space.
|
||||
#[track_caller]
|
||||
pub fn execute<F>(&mut self, has_kernel_event: F) -> ReturnReason
|
||||
where
|
||||
F: FnMut() -> bool,
|
||||
|
Reference in New Issue
Block a user