Remove the timer module from the aster-frame and adjust the related code

This commit is contained in:
Chen Chengjun 2024-04-30 11:21:56 +08:00 committed by Tate, Hongliang Tian
parent d019de29f9
commit b226928349
8 changed files with 118 additions and 184 deletions

View File

@ -27,7 +27,6 @@ extern crate alloc;
#[cfg(ktest)]
#[macro_use]
extern crate ktest;
#[macro_use]
extern crate static_assertions;
pub mod arch;
@ -43,7 +42,6 @@ pub mod panicking;
pub mod prelude;
pub mod sync;
pub mod task;
pub mod timer;
pub mod trap;
pub mod user;
pub mod vm;

View File

@ -23,5 +23,5 @@ pub use self::{
RwMutexReadGuard, RwMutexUpgradeableGuard, RwMutexWriteGuard,
},
spin::{ArcSpinLockGuard, SpinLock, SpinLockGuard},
wait::WaitQueue,
wait::{WaitQueue, Waiter, Waker},
};

View File

@ -1,16 +1,10 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::{collections::VecDeque, sync::Arc};
use core::{
sync::atomic::{AtomicBool, AtomicU32, Ordering},
time::Duration,
};
use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use super::SpinLock;
use crate::{
arch::timer::{add_timeout_list, TIMER_FREQ},
task::{add_task, current_task, schedule, Task, TaskStatus},
};
use crate::task::{add_task, current_task, schedule, Task, TaskStatus};
/// A wait queue.
///
@ -42,72 +36,44 @@ impl WaitQueue {
///
/// By taking a condition closure, his wait-wakeup mechanism becomes
/// more efficient and robust.
pub fn wait_until<F, R>(&self, cond: F) -> R
where
F: FnMut() -> Option<R>,
{
self.do_wait(cond, None).unwrap()
}
/// Wait until some condition returns Some(_), or a given timeout is reached. If
/// the condition does not becomes Some(_) before the timeout is reached, the
/// function will return None.
pub fn wait_until_or_timeout<F, R>(&self, cond: F, timeout: &Duration) -> Option<R>
where
F: FnMut() -> Option<R>,
{
self.do_wait(cond, Some(timeout))
}
fn do_wait<F, R>(&self, mut cond: F, timeout: Option<&Duration>) -> Option<R>
pub fn wait_until<F, R>(&self, mut cond: F) -> R
where
F: FnMut() -> Option<R>,
{
if let Some(res) = cond() {
return Some(res);
return res;
}
let (waiter, waker) = Waiter::new_pair();
let timer_callback = timeout.map(|timeout| {
let remaining_ticks = {
// FIXME: We currently require 1000 to be a multiple of TIMER_FREQ, but
// this may not hold true in the future, because TIMER_FREQ can be greater
// than 1000. Then, the code need to be refactored.
const_assert!(1000 % TIMER_FREQ == 0);
let ms_per_tick = 1000 / TIMER_FREQ;
// The ticks should be equal to or greater than timeout
(timeout.as_millis() as u64 + ms_per_tick - 1) / ms_per_tick
};
add_timeout_list(remaining_ticks, waker.clone(), |timer_call_back| {
let waker = timer_call_back.data().downcast_ref::<Arc<Waker>>().unwrap();
waker.wake_up();
})
});
loop {
// Enqueue the waker before checking `cond()` to avoid races
self.enqueue(waker.clone());
if let Some(res) = cond() {
if let Some(timer_callback) = timer_callback {
timer_callback.cancel();
}
return Some(res);
return res;
};
waiter.wait();
}
}
if let Some(ref timer_callback) = timer_callback
&& timer_callback.is_expired()
{
// Drop the waiter and check again to avoid missing a wake event
drop(waiter);
return cond();
}
pub fn wait_until_or_cancelled<F, R>(&self, mut cond: F) -> R
where
F: FnMut() -> Option<R>,
{
if let Some(res) = cond() {
return res;
}
let (waiter, waker) = Waiter::new_pair();
loop {
// Enqueue the waker before checking `cond()` to avoid races
self.enqueue(waker.clone());
if let Some(res) = cond() {
return res;
};
waiter.wait();
}
}
@ -170,7 +136,7 @@ impl WaitQueue {
///
/// By definition, a waiter belongs to the current thread, so it cannot be sent to another thread
/// and its reference cannot be shared between threads.
struct Waiter {
pub struct Waiter {
waker: Arc<Waker>,
}
@ -181,7 +147,7 @@ impl !Sync for Waiter {}
///
/// A waker can be created by calling [`Waiter::new`]. This method creates an `Arc<Waker>` that can
/// be used across different threads.
struct Waker {
pub struct Waker {
has_woken: AtomicBool,
task: Arc<Task>,
}
@ -271,3 +237,9 @@ impl Waker {
self.has_woken.store(true, Ordering::Release);
}
}
impl Default for Waiter {
fn default() -> Self {
Self::new()
}
}

View File

@ -1,105 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Timer.
use core::{sync::atomic::Ordering, time::Duration};
pub use crate::arch::timer::read_monotonic_milli_seconds;
use crate::{
arch::timer::{add_timeout_list, TimerCallback, TICK, TIMER_FREQ},
prelude::*,
sync::SpinLock,
};
/// A timer invokes a callback function after a specified span of time elapsed.
///
/// A new timer is initially inactive. Only after a timeout value is set with
/// the `set` method can the timer become active and the callback function
/// be triggered.
///
/// Timers are one-shot. If the time is out, one has to set the timer again
/// in order to trigger the callback again.
pub struct Timer {
function: Arc<dyn Fn(Arc<Self>) + Send + Sync>,
inner: SpinLock<TimerInner>,
}
#[derive(Default)]
struct TimerInner {
start_tick: u64,
timeout_tick: u64,
timer_callback: Option<Arc<TimerCallback>>,
}
fn timer_callback(callback: &TimerCallback) {
let data = callback.data();
if data.is::<Arc<Timer>>() {
let timer = data.downcast_ref::<Arc<Timer>>().unwrap();
timer.function.call((timer.clone(),));
} else {
panic!("the timer callback is not Timer structure");
}
}
const NANOS_DIVIDE: u64 = 1_000_000_000 / TIMER_FREQ;
impl Timer {
/// Creates a new instance, given a callback function.
pub fn new<F>(f: F) -> Result<Arc<Self>>
where
F: Fn(Arc<Timer>) + Send + Sync + 'static,
{
Ok(Arc::new(Self {
function: Arc::new(f),
inner: SpinLock::new(TimerInner::default()),
}))
}
/// Set a timeout value.
///
/// If a timeout value is already set, the timeout value will be refreshed.
///
pub fn set(self: &Arc<Self>, timeout: Duration) {
let mut lock = self.inner.lock_irq_disabled();
match &lock.timer_callback {
Some(callback) => {
callback.cancel();
}
None => {}
}
let tick_count =
timeout.as_secs() * TIMER_FREQ + timeout.subsec_nanos() as u64 / NANOS_DIVIDE;
let tick = TICK.load(Ordering::SeqCst);
lock.start_tick = tick;
lock.timeout_tick = tick + tick_count;
lock.timer_callback = Some(add_timeout_list(tick_count, self.clone(), timer_callback));
}
/// Returns the remaining timeout value.
///
/// If the timer is not set, then the remaining timeout value is zero.
pub fn remain(&self) -> Duration {
let lock = self.inner.lock_irq_disabled();
let tick_remain = {
let tick = TICK.load(Ordering::SeqCst) as i64;
lock.timeout_tick as i64 - tick
};
if tick_remain <= 0 {
Duration::new(0, 0)
} else {
let second_count = tick_remain as u64 / TIMER_FREQ;
let remain_count = tick_remain as u64 % TIMER_FREQ;
Duration::new(second_count, (remain_count * NANOS_DIVIDE) as u32)
}
}
/// Clear the timeout value.
pub fn clear(&self) {
let mut lock = self.inner.lock_irq_disabled();
if let Some(callback) = &lock.timer_callback {
callback.cancel();
}
lock.timeout_tick = 0;
lock.start_tick = 0;
lock.timer_callback = None;
}
}

View File

@ -2,7 +2,7 @@
use core::time::Duration;
use aster_frame::{task::Priority, timer::read_monotonic_milli_seconds};
use aster_frame::{arch::timer::Jiffies, task::Priority};
use super::Iface;
use crate::{
@ -11,6 +11,7 @@ use crate::{
kernel_thread::{KernelThreadExt, ThreadOptions},
Thread,
},
time::wait::WaitTimeout,
};
pub enum BindPortConfig {
@ -58,7 +59,7 @@ pub fn spawn_background_poll_thread(iface: Arc<dyn Iface>) {
wait_queue.wait_until(|| iface.next_poll_at_ms())
};
let now_as_ms = read_monotonic_milli_seconds();
let now_as_ms = Jiffies::elapsed().as_duration().as_millis() as u64;
// FIXME: Ideally, we should perform the `poll` just before `next_poll_at_ms`.
// However, this approach may result in a spinning busy loop

View File

@ -8,7 +8,9 @@ use core::{
use aster_frame::sync::WaitQueue;
use super::{sig_mask::SigMask, SigEvents, SigEventsFilter};
use crate::{events::Observer, prelude::*, process::posix_thread::PosixThreadExt};
use crate::{
events::Observer, prelude::*, process::posix_thread::PosixThreadExt, time::wait::WaitTimeout,
};
/// A `Pauser` allows pausing the execution of the current thread until certain conditions are reached.
///

View File

@ -0,0 +1,57 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::sync::Arc;
use core::time::Duration;
use aster_frame::sync::{WaitQueue, Waiter, Waker};
use super::clock::JIFFIES_TIMER_MANAGER;
/// A trait that provide the timeout related function for WaitQueue.
pub trait WaitTimeout {
/// Wait until some condition returns `Some(_)`, or a given timeout is reached. If
/// the condition does not becomes `Some(_)` before the timeout is reached, the
/// function will return `None`.
fn wait_until_or_timeout<F, R>(&self, cond: F, timeout: &Duration) -> Option<R>
where
F: FnMut() -> Option<R>;
}
impl WaitTimeout for WaitQueue {
fn wait_until_or_timeout<F, R>(&self, mut cond: F, timeout: &Duration) -> Option<R>
where
F: FnMut() -> Option<R>,
{
if let Some(res) = cond() {
return Some(res);
}
let (waiter, waker) = Waiter::new_pair();
let wake_up = {
let waker = waker.clone();
move || {
waker.wake_up();
}
};
let jiffies_timer = JIFFIES_TIMER_MANAGER.get().unwrap().create_timer(wake_up);
jiffies_timer.set_timeout(*timeout);
loop {
// Enqueue the waker before checking `cond()` to avoid races
self.enqueue(waker.clone());
if let Some(res) = cond() {
jiffies_timer.clear();
return Some(res);
};
if jiffies_timer.remain() == Duration::ZERO {
drop(waiter);
return cond();
}
waiter.wait();
}
}
}

View File

@ -4,11 +4,12 @@
//!
//! Use `init` to initialize this module.
use alloc::sync::Arc;
use core::time::Duration;
use core::sync::atomic::{AtomicU64, Ordering};
use aster_frame::{
arch::{read_tsc, x86::tsc_freq},
timer::Timer,
use aster_frame::arch::{
read_tsc,
timer::{self, TIMER_FREQ},
x86::tsc_freq,
};
use spin::Once;
@ -53,7 +54,7 @@ pub(super) fn read_instant() -> Instant {
clock.read_instant()
}
fn update_clocksource(timer: Arc<Timer>) {
fn update_clocksource() {
let clock = CLOCK.get().unwrap();
clock.update();
@ -62,20 +63,28 @@ fn update_clocksource(timer: Arc<Timer>) {
let (last_instant, last_cycles) = clock.last_record();
update_fn(last_instant, last_cycles);
}
// Setting the timer as `clock.max_delay_secs() - 1` is to avoid
// the actual delay time is greater than the maximum delay seconds due to the latency of execution.
timer.set(Duration::from_secs(clock.max_delay_secs() - 1));
}
static TSC_UPDATE_COUNTER: AtomicU64 = AtomicU64::new(1);
fn init_timer() {
let timer = Timer::new(update_clocksource).unwrap();
// The initial timer should be set as `clock.max_delay_secs() >> 1` or something much smaller than `max_delay_secs`.
// The `max_delay_secs` should be set as `clock.max_delay_secs() >> 1` or something much smaller than `max_delay_secs`.
// This is because the initialization of this timer occurs during system startup,
// and the system will also undergo other initialization processes, during which time interrupts are disabled.
// This results in the actual trigger time of the timer being delayed by about 5 seconds compared to the set time.
// If without KVM, the delayed time will be larger.
// TODO: This is a temporary solution, and should be modified in the future.
timer.set(Duration::from_secs(
CLOCK.get().unwrap().max_delay_secs() >> 1,
));
let max_delay_secs = CLOCK.get().unwrap().max_delay_secs() >> 1;
let delay_counts = TIMER_FREQ * max_delay_secs;
let update = move || {
let counter = TSC_UPDATE_COUNTER.fetch_add(1, Ordering::Relaxed);
if counter % delay_counts == 0 {
update_clocksource();
}
};
// TODO: re-organize the code structure and use the `Timer` to achieve the updating.
timer::register_callback(update);
}