diff --git a/kernel/src/process/signal/pause.rs b/kernel/src/process/signal/pause.rs index 85744dfba..56625ac73 100644 --- a/kernel/src/process/signal/pause.rs +++ b/kernel/src/process/signal/pause.rs @@ -32,6 +32,7 @@ pub trait Pause: WaitTimeout { /// condition is met. /// /// [`EINTR`]: crate::error::Errno::EINTR + #[track_caller] fn pause_until(&self, cond: F) -> Result where F: FnMut() -> Option, @@ -49,6 +50,7 @@ pub trait Pause: WaitTimeout { /// /// [`ETIME`]: crate::error::Errno::ETIME /// [`EINTR`]: crate::error::Errno::EINTR + #[track_caller] fn pause_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result where F: FnMut() -> Option, @@ -74,6 +76,7 @@ pub trait Pause: WaitTimeout { /// [`ETIME`]: crate::error::Errno::ETIME /// [`EINTR`]: crate::error::Errno::EINTR #[doc(hidden)] + #[track_caller] fn pause_until_or_timeout_impl( &self, cond: F, @@ -94,6 +97,7 @@ pub trait Pause: WaitTimeout { /// /// [`ETIME`]: crate::error::Errno::ETIME /// [`EINTR`]: crate::error::Errno::EINTR + #[track_caller] fn pause_timeout<'a>(&self, timeout: impl Into>) -> Result<()>; } diff --git a/kernel/src/process/signal/poll.rs b/kernel/src/process/signal/poll.rs index 1f97a6f93..bf3c63665 100644 --- a/kernel/src/process/signal/poll.rs +++ b/kernel/src/process/signal/poll.rs @@ -368,6 +368,7 @@ pub trait Pollable { /// The user must ensure that a call to `try_op()` does not fail with `EAGAIN` when the /// interesting events occur. However, it is allowed to have spurious `EAGAIN` failures due to /// race opitions where the events are consumed by another thread. + #[track_caller] fn wait_events( &self, mask: IoEvents, diff --git a/kernel/src/thread/kernel_thread.rs b/kernel/src/thread/kernel_thread.rs index 04ee8cf0b..9bfb67b0b 100644 --- a/kernel/src/thread/kernel_thread.rs +++ b/kernel/src/thread/kernel_thread.rs @@ -73,6 +73,7 @@ impl ThreadOptions { } /// Builds a new kernel thread and runs it immediately. + #[track_caller] pub fn spawn(self) -> Arc { let task = self.build(); let thread = task.as_thread().unwrap().clone(); diff --git a/kernel/src/thread/mod.rs b/kernel/src/thread/mod.rs index 84b937200..a7d2f0b9c 100644 --- a/kernel/src/thread/mod.rs +++ b/kernel/src/thread/mod.rs @@ -78,6 +78,7 @@ impl Thread { } /// Runs this thread at once. + #[track_caller] pub fn run(&self) { self.status.store(ThreadStatus::Running, Ordering::Release); self.task.upgrade().unwrap().run(); @@ -151,6 +152,7 @@ impl Thread { /// Yields the execution to another thread. /// /// This method will return once the current thread is scheduled again. + #[track_caller] pub fn yield_now() { Task::yield_now() } @@ -158,6 +160,7 @@ impl Thread { /// Joins the execution of the thread. /// /// This method will return after the thread exits. + #[track_caller] pub fn join(&self) { while !self.is_exited() { Self::yield_now(); diff --git a/kernel/src/time/wait.rs b/kernel/src/time/wait.rs index ce308249b..a246211fa 100644 --- a/kernel/src/time/wait.rs +++ b/kernel/src/time/wait.rs @@ -17,6 +17,7 @@ pub trait WaitTimeout { /// will return an error with [`ETIME`]. /// /// [`ETIME`]: crate::error::Errno::ETIME + #[track_caller] fn wait_until_or_timeout<'a, F, T, R>(&self, mut cond: F, timeout: T) -> Result where F: FnMut() -> Option, @@ -41,6 +42,7 @@ pub trait WaitTimeout { /// - an error with [`ETIME`] if the timeout is reached; /// - the error returned by the cancel condition if the cancel condition returns `Err(_)`. #[doc(hidden)] + #[track_caller] fn wait_until_or_timeout_cancelled( &self, cond: F, diff --git a/ostd/src/sync/mutex.rs b/ostd/src/sync/mutex.rs index 75036e181..34bc4f4ed 100644 --- a/ostd/src/sync/mutex.rs +++ b/ostd/src/sync/mutex.rs @@ -32,6 +32,7 @@ impl Mutex { /// Acquires the mutex. /// /// This method runs in a block way until the mutex can be acquired. + #[track_caller] pub fn lock(&self) -> MutexGuard { self.queue.wait_until(|| self.try_lock()) } diff --git a/ostd/src/sync/rwmutex.rs b/ostd/src/sync/rwmutex.rs index d5fc9a843..2ba98744a 100644 --- a/ostd/src/sync/rwmutex.rs +++ b/ostd/src/sync/rwmutex.rs @@ -121,6 +121,7 @@ impl RwMutex { /// upreaders present. The implementation of [`WaitQueue`] guarantees the /// order in which other concurrent readers or writers waiting simultaneously /// will acquire the mutex. + #[track_caller] pub fn read(&self) -> RwMutexReadGuard { self.queue.wait_until(|| self.try_read()) } @@ -131,6 +132,7 @@ impl RwMutex { /// or readers present. The implementation of [`WaitQueue`] guarantees the /// order in which other concurrent readers or writers waiting simultaneously /// will acquire the mutex. + #[track_caller] pub fn write(&self) -> RwMutexWriteGuard { self.queue.wait_until(|| self.try_write()) } @@ -145,6 +147,7 @@ impl RwMutex { /// and reader do not differ before invoking the upgread method. However, /// only one upreader can exist at any time to avoid deadlock in the /// upgread method. + #[track_caller] pub fn upread(&self) -> RwMutexUpgradeableGuard { self.queue.wait_until(|| self.try_upread()) } diff --git a/ostd/src/sync/wait.rs b/ostd/src/sync/wait.rs index dd4d6ce8f..40301b670 100644 --- a/ostd/src/sync/wait.rs +++ b/ostd/src/sync/wait.rs @@ -65,6 +65,7 @@ impl WaitQueue { /// /// By taking a condition closure, this wait-wakeup mechanism becomes /// more efficient and robust. + #[track_caller] pub fn wait_until(&self, mut cond: F) -> R where F: FnMut() -> Option, @@ -194,6 +195,7 @@ impl Waiter { /// This method returns immediately if the waiter has been woken since the end of the last call /// to this method (or since the waiter was created, if this method has not been called /// before). Otherwise, it puts the current thread to sleep until the waiter is woken up. + #[track_caller] pub fn wait(&self) { self.waker.do_wait(); } @@ -203,6 +205,7 @@ impl Waiter { /// This method will return `Ok(_)` if the condition returns `Some(_)`, and will stop waiting /// if the cancel condition returns `Err(_)`. In this situation, this method will return the `Err(_)` /// generated by the cancel condition. + #[track_caller] pub fn wait_until_or_cancelled( &self, mut cond: F, @@ -265,6 +268,7 @@ impl Waker { true } + #[track_caller] fn do_wait(&self) { while !self.has_woken.swap(false, Ordering::Acquire) { scheduler::park_current(|| self.has_woken.load(Ordering::Acquire)); diff --git a/ostd/src/task/atomic_mode.rs b/ostd/src/task/atomic_mode.rs index 799c8b288..eb959a7ac 100644 --- a/ostd/src/task/atomic_mode.rs +++ b/ostd/src/task/atomic_mode.rs @@ -32,6 +32,7 @@ use core::sync::atomic::Ordering; /// Marks a function as one that might sleep. /// /// This function will panic if it is executed in atomic mode. +#[track_caller] pub fn might_sleep() { let preempt_count = super::preempt::cpu_local::get_guard_count(); let is_local_irq_enabled = crate::arch::irq::is_local_enabled(); diff --git a/ostd/src/task/mod.rs b/ostd/src/task/mod.rs index fcac605ec..ae3a9984d 100644 --- a/ostd/src/task/mod.rs +++ b/ostd/src/task/mod.rs @@ -83,6 +83,7 @@ impl Task { /// /// Note that this method cannot be simply named "yield" as the name is /// a Rust keyword. + #[track_caller] pub fn yield_now() { scheduler::yield_now() } @@ -90,6 +91,7 @@ impl Task { /// Kicks the task scheduler to run the task. /// /// BUG: This method highly depends on the current scheduling policy. + #[track_caller] pub fn run(self: &Arc) { scheduler::run_new_task(self.clone()); } @@ -239,6 +241,7 @@ impl TaskOptions { } /// Builds a new task and runs it immediately. + #[track_caller] pub fn spawn(self) -> Result> { let task = Arc::new(self.build()?); task.run(); diff --git a/ostd/src/task/processor.rs b/ostd/src/task/processor.rs index 51527cc5d..1eadb4be3 100644 --- a/ostd/src/task/processor.rs +++ b/ostd/src/task/processor.rs @@ -33,6 +33,7 @@ pub(super) fn current_task() -> Option> { /// /// This function will panic if called while holding preemption locks or with /// local IRQ disabled. +#[track_caller] pub(super) fn switch_to_task(next_task: Arc) { super::atomic_mode::might_sleep(); diff --git a/ostd/src/task/scheduler/mod.rs b/ostd/src/task/scheduler/mod.rs index e714ee280..ae5baa3c3 100644 --- a/ostd/src/task/scheduler/mod.rs +++ b/ostd/src/task/scheduler/mod.rs @@ -105,6 +105,7 @@ pub enum UpdateFlags { } /// Preempts the current task. +#[track_caller] pub(crate) fn might_preempt() { if !cpu_local::should_preempt() { return; @@ -116,6 +117,7 @@ pub(crate) fn might_preempt() { /// /// Note that this method may return due to spurious wake events. It's the caller's responsibility /// to detect them (if necessary). +#[track_caller] pub(crate) fn park_current(has_woken: F) where F: Fn() -> bool, @@ -168,6 +170,7 @@ pub(crate) fn unpark_target(runnable: Arc) { /// Enqueues a newly built task. /// /// Note that the new task is not guaranteed to run at once. +#[track_caller] pub(super) fn run_new_task(runnable: Arc) { // FIXME: remove this check for `SCHEDULER`. // Currently OSTD cannot know whether its user has injected a scheduler. @@ -199,6 +202,7 @@ fn set_need_preempt(cpu_id: CpuId) { /// Dequeues the current task from its runqueue. /// /// This should only be called if the current is to exit. +#[track_caller] pub(super) fn exit_current() -> ! { reschedule(|local_rq: &mut dyn LocalRunQueue| { let _ = local_rq.dequeue_current(); @@ -213,6 +217,7 @@ pub(super) fn exit_current() -> ! { } /// Yields execution. +#[track_caller] pub(super) fn yield_now() { reschedule(|local_rq| { local_rq.update_current(UpdateFlags::Yield); @@ -228,6 +233,7 @@ pub(super) fn yield_now() { /// user-given closure. /// /// The closure makes the scheduling decision by taking the local runqueue has its input. +#[track_caller] fn reschedule(mut f: F) where F: FnMut(&mut dyn LocalRunQueue) -> ReschedAction, diff --git a/ostd/src/user.rs b/ostd/src/user.rs index 16fe7fe9d..070b3cfaa 100644 --- a/ostd/src/user.rs +++ b/ostd/src/user.rs @@ -151,6 +151,7 @@ impl<'a> UserMode<'a> { /// cause the method to return /// and updating the user-mode CPU context, /// this method can be invoked again to go back to the user space. + #[track_caller] pub fn execute(&mut self, has_kernel_event: F) -> ReturnReason where F: FnMut() -> bool,