This commit is contained in:
Tate, Hongliang Tian
2022-08-25 02:33:53 -07:00
parent 17bd704722
commit 2e28cf5fcb
5 changed files with 339 additions and 0 deletions

View File

@ -61,6 +61,25 @@ impl AtomicBits {
} }
} }
/// Clear all the bits.
pub fn clear(&self) {
todo!()
}
/// Are all bits ones.
pub fn is_full(&self) -> bool {
self.match_pattern(!0)
}
/// Are all bits zeroes.
pub fn is_empty(&self) -> bool {
self.match_pattern(0)
}
fn match_pattern(&self, pattern: u64) -> bool {
todo!()
}
/// Get an iterator for the bits. /// Get an iterator for the bits.
pub fn iter<'a>(&'a self) -> Iter<'a> { pub fn iter<'a>(&'a self) -> Iter<'a> {
Iter::new(self) Iter::new(self)

View File

@ -1,3 +1,4 @@
mod rcu;
mod spin; mod spin;
pub mod up; pub mod up;
mod wait; mod wait;
@ -6,3 +7,4 @@ mod atomic_bits;
pub use self::atomic_bits::{AtomicBits}; pub use self::atomic_bits::{AtomicBits};
pub use self::spin::{SpinLock, SpinLockGuard}; pub use self::spin::{SpinLock, SpinLockGuard};
pub use self::wait::WaitQueue; pub use self::wait::WaitQueue;
pub use self::rcu::{Rcu, RcuReadGuard, RcuReclaimer, OwnerPtr, pass_quiescent_state};

View File

@ -0,0 +1,102 @@
//! Read-copy update (RCU).
use core::marker::PhantomData;
use core::ops::Deref;
use core::sync::atomic::{
AtomicPtr,
Ordering::{AcqRel, Acquire, Release},
};
use self::monitor::RcuMonitor;
use crate::prelude::*;
use crate::sync::WaitQueue;
mod monitor;
mod owner_ptr;
pub use owner_ptr::OwnerPtr;
pub struct Rcu<P: OwnerPtr> {
ptr: AtomicPtr<<P as OwnerPtr>::Target>,
marker: PhantomData<P::Target>,
}
impl<P: OwnerPtr> Rcu<P> {
pub fn new(ptr: P) -> Self {
let ptr = AtomicPtr::new(OwnerPtr::into_raw(ptr) as *mut _);
Self {
ptr,
marker: PhantomData,
}
}
pub fn get(&self) -> RcuReadGuard<'_, P> {
let obj = unsafe { &*self.ptr.load(Acquire) };
RcuReadGuard { obj, rcu: self }
}
}
impl<P: OwnerPtr + Send> Rcu<P> {
pub fn replace(&self, new_ptr: P) -> RcuReclaimer<P> {
let new_ptr = <P as OwnerPtr>::into_raw(new_ptr) as *mut _;
let old_ptr = {
let old_raw_ptr = self.ptr.swap(new_ptr, AcqRel);
unsafe { <P as OwnerPtr>::from_raw(old_raw_ptr) }
};
RcuReclaimer { ptr: old_ptr }
}
}
pub struct RcuReadGuard<'a, P: OwnerPtr> {
obj: &'a <P as OwnerPtr>::Target,
rcu: &'a Rcu<P>,
}
impl<'a, P: OwnerPtr> Deref for RcuReadGuard<'a, P> {
type Target = <P as OwnerPtr>::Target;
fn deref(&self) -> &Self::Target {
self.obj
}
}
#[repr(transparent)]
pub struct RcuReclaimer<P> {
ptr: P,
}
impl<P: Send + 'static> RcuReclaimer<P> {
pub fn delay(mut self) {
let ptr: P = unsafe {
let ptr = core::mem::replace(&mut self.ptr, core::mem::uninitialized());
core::mem::forget(self);
ptr
};
get_singleton().after_grace_period(move || {
drop(ptr);
});
}
}
impl<P> Drop for RcuReclaimer<P> {
fn drop(&mut self) {
let wq = Arc::new(WaitQueue::new());
get_singleton().after_grace_period({
let wq = wq.clone();
move || {
wq.wake_one();
}
});
wq.wait_until(|| true);
}
}
pub unsafe fn pass_quiescent_state() {
get_singleton().pass_quiescent_state()
}
fn get_singleton() -> &'static RcuMonitor {
todo!()
}

View File

@ -0,0 +1,138 @@
use alloc::collections::VecDeque;
use core::sync::atomic::{
AtomicBool,
Ordering::{Acquire, Relaxed, Release},
};
use crate::cpu;
use crate::prelude::*;
use crate::sync::AtomicBits;
use crate::sync::SpinLock;
/// A RCU monitor ensures the completion of _grace periods_ by keeping track
/// of each CPU's passing _quiescent states_.
pub struct RcuMonitor {
is_monitoring: AtomicBool,
state: SpinLock<State>,
}
impl RcuMonitor {
pub fn new(num_cpus: u32) -> Self {
Self {
is_monitoring: AtomicBool::new(false),
state: SpinLock::new(State::new(num_cpus)),
}
}
pub unsafe fn pass_quiescent_state(&self) {
// Fast path
if !self.is_monitoring.load(Relaxed) {
return;
}
// Check if the current GP is complete after passing the quiescent state
// on the current CPU. If GP is complete, take the callbacks of the current
// GP.
let callbacks = {
let mut state = self.state.lock();
if state.current_gp.is_complete() {
return;
}
state.current_gp.pass_quiescent_state();
if !state.current_gp.is_complete() {
return;
}
// Now that the current GP is complete, take its callbacks
let current_callbacks = state.current_gp.take_callbacks();
// Check if we need to70G watch for a next GP
if !state.next_callbacks.is_empty() {
let callbacks = core::mem::take(&mut state.next_callbacks);
state.current_gp.restart(callbacks);
} else {
self.is_monitoring.store(false, Relaxed);
}
current_callbacks
};
// Invoke the callbacks to notify the completion of GP
for f in callbacks {
(f)();
}
}
pub fn after_grace_period<F>(&self, f: F)
where
F: FnOnce() -> () + Send + 'static,
{
let mut state = self.state.lock();
state.next_callbacks.push_back(Box::new(f));
if !state.current_gp.is_complete() {
return;
}
let callbacks = core::mem::take(&mut state.next_callbacks);
state.current_gp.restart(callbacks);
self.is_monitoring.store(true, Relaxed);
}
}
struct State {
current_gp: GracePeriod,
next_callbacks: Callbacks,
}
impl State {
pub fn new(num_cpus: u32) -> Self {
Self {
current_gp: GracePeriod::new(num_cpus),
next_callbacks: VecDeque::new(),
}
}
}
type Callbacks = VecDeque<Box<dyn FnOnce() -> () + Send + 'static>>;
struct GracePeriod {
callbacks: Callbacks,
cpu_mask: AtomicBits,
is_complete: bool,
}
impl GracePeriod {
pub fn new(num_cpus: u32) -> Self {
Self {
callbacks: Default::default(),
cpu_mask: AtomicBits::new_zeroes(num_cpus as usize),
is_complete: false,
}
}
pub fn is_complete(&self) -> bool {
self.is_complete
}
pub unsafe fn pass_quiescent_state(&mut self) {
let this_cpu = cpu::this_cpu();
self.cpu_mask.set(this_cpu as usize, true);
if self.cpu_mask.is_full() {
self.is_complete = true;
}
}
pub fn take_callbacks(&mut self) -> Callbacks {
core::mem::take(&mut self.callbacks)
}
pub fn restart(&mut self, callbacks: Callbacks) {
self.is_complete = false;
self.cpu_mask.clear();
self.callbacks = callbacks;
}
}

View File

@ -0,0 +1,78 @@
use core::ptr::NonNull;
use crate::prelude::*;
/// A trait that abstracts pointers that have the ownership of the objects they
/// refer to.
///
/// The most typical examples smart pointer types like `Box<T>` and `Arc<T>`.
///
/// which can be converted to and from the raw pointer type of `*const T`.
pub trait OwnerPtr {
/// The target type that this pointer refers to.
// TODO: allow ?Sized
type Target;
/// Converts to a raw pointer.
///
/// If `Self` owns the object that it refers to (e.g., `Box<_>`), then
/// each call to `into_raw` must be paired with a call to `from_raw`
/// in order to avoid memory leakage.
fn into_raw(self) -> *const Self::Target;
/// Converts back from a raw pointer.
///
/// # Safety
///
/// The raw pointer must have been previously returned by a call to `into_raw`.
unsafe fn from_raw(ptr: *const Self::Target) -> Self;
}
impl<T> OwnerPtr for Box<T> {
type Target = T;
fn into_raw(self) -> *const Self::Target {
Box::into_raw(self) as *const _
}
unsafe fn from_raw(ptr: *const Self::Target) -> Self {
Box::from_raw(ptr as *mut _)
}
}
impl<T> OwnerPtr for Arc<T> {
type Target = T;
fn into_raw(self) -> *const Self::Target {
Arc::into_raw(self)
}
unsafe fn from_raw(ptr: *const Self::Target) -> Self {
Arc::from_raw(ptr)
}
}
impl<P> OwnerPtr for Option<P>
where
P: OwnerPtr,
// We cannot support fat pointers, e.g., when `Target: dyn Trait`.
// This is because Rust does not allow fat null pointers. Yet,
// we need the null pointer to represent `None`.
// See https://github.com/rust-lang/rust/issues/66316.
<P as OwnerPtr>::Target: Sized,
{
type Target = P::Target;
fn into_raw(self) -> *const Self::Target {
self.map(|p| <P as OwnerPtr>::into_raw(p))
.unwrap_or(core::ptr::null())
}
unsafe fn from_raw(ptr: *const Self::Target) -> Self {
if ptr.is_null() {
Some(<P as OwnerPtr>::from_raw(ptr))
} else {
None
}
}
}