Remove guard generics from xarray::Cursor

This commit is contained in:
Ruihan Li 2025-05-13 23:57:26 +08:00 committed by Tate, Hongliang Tian
parent 72fb0752ae
commit 14388da548
5 changed files with 79 additions and 72 deletions

View File

@ -4,8 +4,8 @@ use alloc::sync::Arc;
use core::ops::{Deref, DerefMut};
use ostd::{
sync::{non_null::NonNullPtr, SpinGuardian},
task::{atomic_mode::AsAtomicModeGuard, DisabledPreemptGuard},
sync::{non_null::NonNullPtr, SpinGuardian, SpinLockGuard},
task::atomic_mode::{AsAtomicModeGuard, InAtomicMode},
util::Either,
};
@ -98,7 +98,7 @@ impl<'a, P: NonNullPtr + Send + Sync> CursorState<'a, P> {
/// point by performing a [`Cursor::reset`] operation.
///
/// The typical way to obtain a `Cursor` instance is to call [`XArray::cursor`].
pub struct Cursor<'a, P, M = NoneMark, G = DisabledPreemptGuard>
pub struct Cursor<'a, P, M = NoneMark>
where
P: NonNullPtr + Send + Sync,
{
@ -107,19 +107,22 @@ where
/// The target index of the cursor.
index: u64,
/// The atomic-mode guard that protects cursor operations.
guard: &'a G,
guard: &'a dyn InAtomicMode,
/// The state of the cursor.
state: CursorState<'a, P>,
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: AsAtomicModeGuard> Cursor<'a, P, M, G> {
impl<'a, P: NonNullPtr + Send + Sync, M> Cursor<'a, P, M> {
/// Creates a `Cursor` to perform read-related operations in the `XArray`.
pub(super) fn new(xa: &'a XArray<P, M>, guard: &'a G, index: u64) -> Self {
let _ = guard.as_atomic_mode_guard();
pub(super) fn new<G: AsAtomicModeGuard>(
xa: &'a XArray<P, M>,
guard: &'a G,
index: u64,
) -> Self {
Self {
xa,
index,
guard,
guard: guard.as_atomic_mode_guard(),
state: CursorState::Inactive,
}
}
@ -229,7 +232,7 @@ impl<'a, P: NonNullPtr + Send + Sync, M, G: AsAtomicModeGuard> Cursor<'a, P, M,
}
}
impl<P: NonNullPtr + Send + Sync, M: Into<XMark>, G: AsAtomicModeGuard> Cursor<'_, P, M, G> {
impl<P: NonNullPtr + Send + Sync, M: Into<XMark>> Cursor<'_, P, M> {
/// Checks whether the target item is marked with the input `mark`.
///
/// If the target item does not exist, this method will also return false.
@ -253,16 +256,26 @@ impl<P: NonNullPtr + Send + Sync, M: Into<XMark>, G: AsAtomicModeGuard> Cursor<'
/// The typical way to obtain a `CursorMut` instance is to call [`LockedXArray::cursor_mut`].
///
/// [`LockedXArray::cursor_mut`]: super::LockedXArray::cursor_mut
pub struct CursorMut<'a, P, M, G>(Cursor<'a, P, M, XLockGuard<'a, G>>)
pub struct CursorMut<'a, P, M>(Cursor<'a, P, M>)
where
P: NonNullPtr + Send + Sync,
G: SpinGuardian;
P: NonNullPtr + Send + Sync;
impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> CursorMut<'a, P, M, G> {
pub(super) fn new(xa: &'a XArray<P, M>, guard: &'a XLockGuard<'a, G>, index: u64) -> Self {
impl<'a, P: NonNullPtr + Send + Sync, M> CursorMut<'a, P, M> {
/// Creates a `CursorMut` to perform read- and write-related operations in the `XArray`.
pub(super) fn new<G: SpinGuardian>(
xa: &'a XArray<P, M>,
guard: &'a SpinLockGuard<'a, (), G>,
index: u64,
) -> Self {
Self(Cursor::new(xa, guard, index))
}
/// Returns an `XLockGuard` that marks the `XArray` is locked.
fn lock_guard(&self) -> XLockGuard {
// Having a `CursorMut` means that the `XArray` is locked.
XLockGuard(self.guard)
}
/// Increases the height of the `XArray` so that the `index`-th element can be stored.
fn reserve(&self, index: u64) {
if self.xa.head.read_with(self.guard).is_none() {
@ -280,7 +293,7 @@ impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> CursorMut<'a, P, M, G>
}
let new_head = Arc::new(XNode::new_root(height.go_root()));
new_head.set_entry(self.guard, 0, Some(Either::Left(head.clone())));
new_head.set_entry(self.lock_guard(), 0, Some(Either::Left(head.clone())));
self.xa.head.update(Some(new_head));
}
@ -317,7 +330,7 @@ impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> CursorMut<'a, P, M, G>
{
let new_node = XNode::new(current_node.height().go_leaf(), operation_offset);
let new_entry = Either::Left(Arc::new(new_node));
current_node.set_entry(self.guard, operation_offset, Some(new_entry));
current_node.set_entry(self.lock_guard(), operation_offset, Some(new_entry));
}
let next_node = current_node
@ -337,7 +350,11 @@ impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> CursorMut<'a, P, M, G>
pub fn store(&mut self, item: P) {
self.expand_and_traverse_to_target();
let (node, operation_offset) = self.state.as_node().unwrap();
node.set_entry(self.guard, operation_offset, Some(Either::Right(item)));
node.set_entry(
self.lock_guard(),
operation_offset,
Some(Either::Right(item)),
);
}
/// Removes the item at the target index.
@ -352,7 +369,7 @@ impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> CursorMut<'a, P, M, G>
.deref_target()
.entry_with(self.guard, off)
.and_then(|entry| entry.right());
node.set_entry(self.guard, off, None);
node.set_entry(self.lock_guard(), off, None);
res
})
}
@ -362,7 +379,7 @@ impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> CursorMut<'a, P, M, G>
#[derive(Debug)]
pub struct SetMarkError;
impl<P: NonNullPtr + Send + Sync, M: Into<XMark>, G: SpinGuardian> CursorMut<'_, P, M, G> {
impl<P: NonNullPtr + Send + Sync, M: Into<XMark>> CursorMut<'_, P, M> {
/// Sets the input `mark` for the item at the target index.
///
/// # Errors
@ -378,7 +395,7 @@ impl<P: NonNullPtr + Send + Sync, M: Into<XMark>, G: SpinGuardian> CursorMut<'_,
})
.map(|(node, off)| {
let mark_index = mark.into().index();
node.set_mark(self.guard, off, mark_index);
node.set_mark(self.lock_guard(), off, mark_index);
})
.ok_or(SetMarkError)
}
@ -398,21 +415,21 @@ impl<P: NonNullPtr + Send + Sync, M: Into<XMark>, G: SpinGuardian> CursorMut<'_,
})
.map(|(node, off)| {
let mark_index = mark.into().index();
node.unset_mark(self.guard, off, mark_index);
node.unset_mark(self.lock_guard(), off, mark_index);
})
.ok_or(SetMarkError)
}
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> Deref for CursorMut<'a, P, M, G> {
type Target = Cursor<'a, P, M, XLockGuard<'a, G>>;
impl<'a, P: NonNullPtr + Send + Sync, M> Deref for CursorMut<'a, P, M> {
type Target = Cursor<'a, P, M>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<P: NonNullPtr + Send + Sync, M, G: SpinGuardian> DerefMut for CursorMut<'_, P, M, G> {
impl<P: NonNullPtr + Send + Sync, M> DerefMut for CursorMut<'_, P, M> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}

View File

@ -62,7 +62,7 @@ use ostd::{
non_null::NonNullPtr, LocalIrqDisabled, PreemptDisabled, RcuOption, SpinGuardian, SpinLock,
SpinLockGuard,
},
task::atomic_mode::AsAtomicModeGuard,
task::atomic_mode::{AsAtomicModeGuard, InAtomicMode},
};
pub use range::Range;
@ -112,8 +112,9 @@ where
_marker: PhantomData<M>,
}
/// A type that represents the spinlock guard used in [`XArray`].
pub type XLockGuard<'a, G> = SpinLockGuard<'a, (), G>;
/// A type that marks the [`XArray`] is locked.
#[derive(Clone, Copy)]
struct XLockGuard<'a>(&'a dyn InAtomicMode);
impl<P: NonNullPtr + Send + Sync, M> Default for XArray<P, M> {
fn default() -> Self {
@ -154,7 +155,7 @@ impl<P: NonNullPtr + Send + Sync, M> XArray<P, M> {
&'a self,
guard: &'a G,
index: u64,
) -> Cursor<'a, P, M, G> {
) -> Cursor<'a, P, M> {
Cursor::new(self, guard, index)
}
@ -163,7 +164,7 @@ impl<P: NonNullPtr + Send + Sync, M> XArray<P, M> {
&'a self,
guard: &'a G,
range: core::ops::Range<u64>,
) -> Range<'a, P, M, G> {
) -> Range<'a, P, M> {
let cursor = self.cursor(guard, range.start);
Range::new(cursor, range.end)
}
@ -206,15 +207,16 @@ impl<P: NonNullPtr + Send + Sync, M, G: SpinGuardian> LockedXArray<'_, P, M, G>
/// Clears the corresponding [`XArray`].
pub fn clear(&mut self) {
if let Some(head) = self.xa.head.read_with(&self.guard) {
head.clear_parent(&self.guard);
// Having a `LockedXArray` means that the `XArray` is locked.
head.clear_parent(XLockGuard(self.guard.as_atomic_mode_guard()));
}
self.xa.head.update(None);
}
/// Creates a [`CursorMut`] to perform read- and write-related operations.
pub fn cursor_mut(&mut self, index: u64) -> cursor::CursorMut<'_, P, M, G> {
cursor::CursorMut::new(self.xa, &self.guard, index)
pub fn cursor_mut(&mut self, index: u64) -> CursorMut<'_, P, M> {
CursorMut::new(self.xa, &self.guard, index)
}
/// Stores the provided item at the target index.
@ -232,12 +234,12 @@ impl<P: NonNullPtr + Send + Sync, M, G: SpinGuardian> LockedXArray<'_, P, M, G>
}
/// Creates a [`Cursor`] to perform read-related operations.
pub fn cursor(&self, index: u64) -> Cursor<'_, P, M, XLockGuard<G>> {
pub fn cursor(&self, index: u64) -> Cursor<'_, P, M> {
Cursor::new(self.xa, &self.guard, index)
}
/// Creates a [`Range`] to immutably iterated over the specified `range`.
pub fn range(&self, range: core::ops::Range<u64>) -> Range<'_, P, M, XLockGuard<G>> {
pub fn range(&self, range: core::ops::Range<u64>) -> Range<'_, P, M> {
let cursor = self.cursor(range.start);
Range::new(cursor, range.end)
}

View File

@ -2,8 +2,6 @@
use core::sync::atomic::{AtomicU64, Ordering};
use ostd::sync::SpinGuardian;
use crate::XLockGuard;
/// A mark used to indicate which slots in an [`XNode`] contain items that have been marked.
@ -28,7 +26,7 @@ impl Mark {
Self::new(0)
}
pub fn update<G: SpinGuardian>(&self, offset: u8, set: bool, _guard: &XLockGuard<G>) -> bool {
pub fn update(&self, _guard: XLockGuard, offset: u8, set: bool) -> bool {
let old_val = self.inner.load(Ordering::Acquire);
let new_val = if set {
old_val | (1 << offset as u64)

View File

@ -7,8 +7,8 @@ use core::{
};
use ostd::{
sync::{non_null::NonNullPtr, RcuOption, SpinGuardian},
task::atomic_mode::AsAtomicModeGuard,
sync::{non_null::NonNullPtr, RcuOption},
task::atomic_mode::InAtomicMode,
util::Either,
};
@ -156,7 +156,7 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
self.height
}
pub fn parent<'a>(&'a self, guard: &'a dyn AsAtomicModeGuard) -> Option<NodeEntryRef<'a, P>> {
pub fn parent<'a>(&'a self, guard: &'a dyn InAtomicMode) -> Option<NodeEntryRef<'a, P>> {
let parent = self.parent.read_with(guard)?;
Some(parent)
}
@ -167,7 +167,7 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
pub fn entry_with<'a>(
&'a self,
guard: &'a dyn AsAtomicModeGuard,
guard: &'a dyn InAtomicMode,
offset: u8,
) -> Option<XEntryRef<'a, P>> {
self.slots[offset as usize].read_with(guard)
@ -188,17 +188,17 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
impl<P: NonNullPtr + Send + Sync> XNode<P> {
/// Sets the parent pointer of this node to the given `parent`.
fn set_parent<G: SpinGuardian>(&self, _guard: &XLockGuard<G>, parent: NodeEntry<P>) {
fn set_parent(&self, _guard: XLockGuard, parent: NodeEntry<P>) {
self.parent.update(Some(parent));
}
/// Clears the parent pointers of this node and all its descendant nodes.
///
/// This method should be invoked when the node is being removed from the tree.
pub fn clear_parent<G: SpinGuardian>(&self, guard: &XLockGuard<G>) {
pub fn clear_parent(&self, guard: XLockGuard) {
self.parent.update(None);
for child in self.slots.iter() {
if let Some(node) = child.read_with(guard).and_then(|entry| entry.left()) {
if let Some(node) = child.read_with(guard.0).and_then(|entry| entry.left()) {
node.clear_parent(guard);
}
}
@ -211,13 +211,8 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
/// updated according to whether the new node contains marked items.
///
/// This method will also propagate the updated marks to the ancestors.
pub fn set_entry<G: SpinGuardian>(
self: &Arc<Self>,
guard: &XLockGuard<G>,
offset: u8,
entry: Option<XEntry<P>>,
) {
let old_entry = self.slots[offset as usize].read_with(guard);
pub fn set_entry(self: &Arc<Self>, guard: XLockGuard, offset: u8, entry: Option<XEntry<P>>) {
let old_entry = self.slots[offset as usize].read_with(guard.0);
if let Some(node) = old_entry.and_then(|entry| entry.left()) {
node.clear_parent(guard);
}
@ -245,8 +240,8 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
///
/// This method will also update the marks on the ancestors of this node
/// if necessary to ensure that the marks on the ancestors are up to date.
pub fn set_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, offset: u8, mark: usize) {
let changed = self.marks[mark].update(offset, true, guard);
pub fn set_mark(&self, guard: XLockGuard, offset: u8, mark: usize) {
let changed = self.marks[mark].update(guard, offset, true);
if changed {
self.propagate_mark(guard, mark);
}
@ -256,8 +251,8 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
///
/// This method will also update the marks on the ancestors of this node
/// if necessary to ensure that the marks on the ancestors are up to date.
pub fn unset_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, offset: u8, mark: usize) {
let changed = self.marks[mark].update(offset, false, guard);
pub fn unset_mark(&self, guard: XLockGuard, offset: u8, mark: usize) {
let changed = self.marks[mark].update(guard, offset, false);
if changed {
self.propagate_mark(guard, mark);
}
@ -271,14 +266,14 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
///
/// This method will also update the marks on the ancestors of this node
/// if necessary to ensure that the marks on the ancestors are up to date.
fn update_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, offset: u8) {
let entry = self.slots[offset as usize].read_with(guard);
fn update_mark(&self, guard: XLockGuard, offset: u8) {
let entry = self.slots[offset as usize].read_with(guard.0);
let Some(node) = entry.and_then(|entry| entry.left()) else {
return;
};
for i in 0..NUM_MARKS {
let changed = self.marks[i].update(offset, !node.is_mark_clear(i), guard);
let changed = self.marks[i].update(guard, offset, !node.is_mark_clear(i));
if changed {
self.propagate_mark(guard, i);
}
@ -289,13 +284,13 @@ impl<P: NonNullPtr + Send + Sync> XNode<P> {
///
/// This method must be called after the marks are updated to ensure that the marks on the
/// ancestors are up to date.
fn propagate_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, mark: usize) {
let Some(parent) = self.parent(guard) else {
fn propagate_mark(&self, guard: XLockGuard, mark: usize) {
let Some(parent) = self.parent(guard.0) else {
return;
};
let changed =
parent.marks[mark].update(self.offset_in_parent, !self.is_mark_clear(mark), guard);
parent.marks[mark].update(guard, self.offset_in_parent, !self.is_mark_clear(mark));
if changed {
parent.propagate_mark(guard, mark);
}

View File

@ -1,9 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::{
sync::non_null::NonNullPtr,
task::{atomic_mode::AsAtomicModeGuard, DisabledPreemptGuard},
};
use ostd::sync::non_null::NonNullPtr;
use crate::{cursor::Cursor, mark::NoneMark};
@ -13,23 +10,21 @@ use crate::{cursor::Cursor, mark::NoneMark};
///
/// [`XArray`]: super::XArray
/// [`XArray::range`]: super::XArray::range
pub struct Range<'a, P, M = NoneMark, G = DisabledPreemptGuard>
pub struct Range<'a, P, M = NoneMark>
where
P: NonNullPtr + Send + Sync,
{
cursor: Cursor<'a, P, M, G>,
cursor: Cursor<'a, P, M>,
end: u64,
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: AsAtomicModeGuard> Range<'a, P, M, G> {
pub(super) fn new(cursor: Cursor<'a, P, M, G>, end: u64) -> Self {
impl<'a, P: NonNullPtr + Send + Sync, M> Range<'a, P, M> {
pub(super) fn new(cursor: Cursor<'a, P, M>, end: u64) -> Self {
Range { cursor, end }
}
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: AsAtomicModeGuard> core::iter::Iterator
for Range<'a, P, M, G>
{
impl<'a, P: NonNullPtr + Send + Sync, M> core::iter::Iterator for Range<'a, P, M> {
type Item = (u64, P::Ref<'a>);
fn next(&mut self) -> Option<Self::Item> {