Introduce RCU-based XArray

This commit is contained in:
Chen Chengjun 2025-04-18 10:41:54 +08:00 committed by Tate, Hongliang Tian
parent 6c0827b681
commit c3dd607777
12 changed files with 1172 additions and 2 deletions

10
Cargo.lock generated
View File

@ -226,6 +226,7 @@ dependencies = [
"time",
"typeflags",
"typeflags-util",
"xarray 0.1.0",
"xmas-elf 0.8.0",
]
@ -1319,7 +1320,7 @@ dependencies = [
"volatile 0.6.1",
"x86",
"x86_64",
"xarray",
"xarray 0.1.0 (git+https://github.com/asterinas/xarray)",
]
[[package]]
@ -1962,6 +1963,13 @@ dependencies = [
"volatile 0.4.6",
]
[[package]]
name = "xarray"
version = "0.1.0"
dependencies = [
"ostd",
]
[[package]]
name = "xarray"
version = "0.1.0"

View File

@ -35,6 +35,7 @@ members = [
"kernel/libs/typeflags",
"kernel/libs/typeflags-util",
"kernel/libs/atomic-integer-wrapper",
"kernel/libs/xarray",
]
exclude = [
"kernel/libs/comp-sys/cargo-component",

View File

@ -176,7 +176,8 @@ OSDK_CRATES := \
kernel/comps/time \
kernel/comps/virtio \
kernel/libs/aster-util \
kernel/libs/aster-bigtcp
kernel/libs/aster-bigtcp \
kernel/libs/xarray
# OSDK dependencies
OSDK_SRC_FILES := \

View File

@ -31,6 +31,7 @@ atomic-integer-wrapper = { path = "libs/atomic-integer-wrapper" }
id-alloc = { path = "../ostd/libs/id-alloc" }
int-to-c-enum = { path = "libs/int-to-c-enum" }
cpio-decoder = { path = "libs/cpio-decoder" }
xarray = { path = "libs/xarray" }
intrusive-collections = "0.9.5"
paste = "1.0"
time = { version = "0.3", default-features = false, features = ["alloc"] }

View File

@ -0,0 +1,12 @@
[package]
name = "xarray"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ostd = { path = "../../../ostd" }
[lints]
workspace = true

View File

@ -0,0 +1,419 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::sync::Arc;
use core::ops::{Deref, DerefMut};
use ostd::{
sync::{non_null::NonNullPtr, SpinGuardian},
task::{atomic_mode::AsAtomicModeGuard, DisabledPreemptGuard},
util::Either,
};
use crate::{
entry::NodeEntryRef,
mark::{NoneMark, XMark},
node::{Height, XNode},
XArray, XLockGuard, SLOT_SIZE,
};
/// A type representing the state of a [`Cursor`] or a [`CursorMut`].
///
/// Currently, there are two possible states:
/// - `Inactive`: The cursor is not positioned on any node.
/// - `AtNode`: The cursor is positioned on some node and holds a shared reference
/// to it.
///
/// A cursor never ends up on an interior node. In other words, when methods
/// of `Cursor` or `CursorMut` finish, the cursor will either not positioned on any node
/// or positioned on some leaf node.
enum CursorState<'a, P>
where
P: NonNullPtr + Send + Sync,
{
Inactive,
AtNode {
node: NodeEntryRef<'a, P>,
operation_offset: u8,
},
}
impl<P: NonNullPtr + Send + Sync> Default for CursorState<'_, P> {
fn default() -> Self {
Self::Inactive
}
}
impl<'a, P: NonNullPtr + Send + Sync> CursorState<'a, P> {
fn move_to(&mut self, node: NodeEntryRef<'a, P>, index: u64) {
let operation_offset = node.entry_offset(index);
*self = Self::AtNode {
node,
operation_offset,
};
}
fn as_node(&self) -> Option<(&NodeEntryRef<'a, P>, u8)> {
match self {
Self::AtNode {
node,
operation_offset,
} => Some((node, *operation_offset)),
Self::Inactive => None,
}
}
fn into_node(self) -> Option<(NodeEntryRef<'a, P>, u8)> {
match self {
Self::AtNode {
node,
operation_offset,
} => Some((node, operation_offset)),
Self::Inactive => None,
}
}
fn is_at_node(&self) -> bool {
match self {
Self::AtNode { .. } => true,
Self::Inactive => false,
}
}
fn is_at_leaf(&self) -> bool {
match self {
Self::AtNode { node, .. } => node.is_leaf(),
Self::Inactive => false,
}
}
}
/// A `Cursor` can traverse in the [`XArray`] by setting or increasing the
/// target index and can perform read-only operations to the target item.
///
/// Multiple `Cursor`s of the same `XArray` can exist simultaneously, and their existence
/// does not conflict with that of a [`CursorMut`].
///
/// A newly created `Cursor` can read all modifications that occurred before its creation.
/// Additionally, a `Cursor` can ensure it reads all modifications made before a specific
/// point by performing a [`Cursor::reset`] operation.
///
/// The typical way to obtain a `Cursor` instance is to call [`XArray::cursor`].
pub struct Cursor<'a, P, M = NoneMark, G = DisabledPreemptGuard>
where
P: NonNullPtr + Send + Sync,
{
/// The `XArray` where the cursor locates.
xa: &'a XArray<P, M>,
/// The target index of the cursor.
index: u64,
/// The atomic-mode guard that protects cursor operations.
guard: &'a G,
/// The state of the cursor.
state: CursorState<'a, P>,
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: AsAtomicModeGuard> Cursor<'a, P, M, G> {
/// Creates a `Cursor` to perform read-related operations in the `XArray`.
pub(super) fn new(xa: &'a XArray<P, M>, guard: &'a G, index: u64) -> Self {
let _ = guard.as_atomic_mode_guard();
Self {
xa,
index,
guard,
state: CursorState::Inactive,
}
}
/// Traverses from the root node to the leaf node according to the target index.
///
/// This method will not create new nodes. If the cursor can not reach the target
/// leaf node, the cursor will remain the inactive state.
fn traverse_to_target(&mut self) {
if self.state.is_at_node() {
return;
}
let Some(head) = self.xa.head.read_with(self.guard) else {
return;
};
let max_index = head.height().max_index();
if max_index < self.index {
return;
}
self.state.move_to(head, self.index);
self.continue_traverse_to_target();
}
/// Traverses from an interior node to the leaf node according to the target index.
///
/// This method will not create new nodes. If the cursor can not reach the target
/// leaf node, the cursor will be reset to the inactive state.
fn continue_traverse_to_target(&mut self) {
while !self.state.is_at_leaf() {
let (current_node, operation_offset) =
core::mem::take(&mut self.state).into_node().unwrap();
let Some(next_node) = current_node
.deref_target()
.entry_with(self.guard, operation_offset)
.map(|operated_entry| operated_entry.left().unwrap())
else {
self.reset();
return;
};
self.state.move_to(next_node, self.index);
}
}
/**** Public ****/
/// Loads the item at the target index.
///
/// If the target item exists, this method will return a [`NonNullPtr::Ref`]
/// that acts exactly like a `&'_ P` wrapped in `Some(_)`. Otherwises, it will
/// return `None`.
pub fn load(&mut self) -> Option<P::Ref<'a>> {
self.traverse_to_target();
let (node, operation_offset) = self.state.as_node()?;
node.deref_target()
.entry_with(self.guard, operation_offset)
.and_then(|item_entry| item_entry.right())
}
/// Returns the target index of the cursor.
pub fn index(&self) -> u64 {
self.index
}
/// Resets the target index to `index`.
pub fn reset_to(&mut self, index: u64) {
self.reset();
self.index = index;
}
/// Resets the cursor to the inactive state.
pub fn reset(&mut self) {
self.state = CursorState::Inactive;
}
/// Increases the target index of the cursor by one.
///
/// Once increased, the cursor will be positioned on the corresponding leaf node
/// if the leaf node exists.
pub fn next(&mut self) {
self.index = self.index.checked_add(1).unwrap();
if !self.state.is_at_node() {
return;
}
let (mut current_node, mut operation_offset) =
core::mem::take(&mut self.state).into_node().unwrap();
operation_offset += 1;
while operation_offset == SLOT_SIZE as u8 {
let Some(parent_node) = current_node.deref_target().parent(self.guard) else {
self.reset();
return;
};
operation_offset = current_node.offset_in_parent() + 1;
current_node = parent_node;
}
self.state.move_to(current_node, self.index);
self.continue_traverse_to_target();
}
}
impl<P: NonNullPtr + Send + Sync, M: Into<XMark>, G: AsAtomicModeGuard> Cursor<'_, P, M, G> {
/// Checks whether the target item is marked with the input `mark`.
///
/// If the target item does not exist, this method will also return false.
pub fn is_marked(&mut self, mark: M) -> bool {
self.traverse_to_target();
self.state
.as_node()
.map(|(node, off)| node.is_marked(off, mark.into().index()))
.unwrap_or(false)
}
}
/// A `CursorMut` can traverse in the [`XArray`] by setting or increasing the
/// target index and can perform read-write operations to the target item.
///
/// An `XArray` can only have one `CursorMut` at a time, but a `CursorMut` can coexist
/// with multiple `Cursors` simultaneously.
///
/// The read-related operations of a `CursorMut` always retrieve up-to-date information.
///
/// The typical way to obtain a `CursorMut` instance is to call [`LockedXArray::cursor_mut`].
///
/// [`LockedXArray::cursor_mut`]: super::LockedXArray::cursor_mut
pub struct CursorMut<'a, P, M, G>(Cursor<'a, P, M, XLockGuard<'a, G>>)
where
P: NonNullPtr + Send + Sync,
G: SpinGuardian;
impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> CursorMut<'a, P, M, G> {
pub(super) fn new(xa: &'a XArray<P, M>, guard: &'a XLockGuard<'a, G>, index: u64) -> Self {
Self(Cursor::new(xa, guard, index))
}
/// Increases the height of the `XArray` so that the `index`-th element can be stored.
fn reserve(&self, index: u64) {
if self.xa.head.read_with(self.guard).is_none() {
let height = Height::from_index(index);
let new_head = Arc::new(XNode::new_root(height));
self.xa.head.update(Some(new_head));
return;
};
loop {
let head = self.xa.head.read_with(self.guard).unwrap();
let height = head.height();
if height.max_index() >= index {
return;
}
let new_head = Arc::new(XNode::new_root(height.go_root()));
new_head.set_entry(self.guard, 0, Some(Either::Left(head.clone())));
self.xa.head.update(Some(new_head));
}
}
/// Traverses from the root node to the leaf node according to the target index.
///
/// This method will potentially create new nodes.
fn expand_and_traverse_to_target(&mut self) {
if self.state.is_at_node() {
return;
}
let head = {
self.reserve(self.index);
self.xa.head.read_with(self.guard).unwrap()
};
self.0.state.move_to(head, self.0.index);
self.continue_traverse_to_target_mut();
}
/// Traverses from an interior node to the leaf node according to the target index.
///
/// This method will potentially create new nodes.
fn continue_traverse_to_target_mut(&mut self) {
while !self.state.is_at_leaf() {
let (current_node, operation_offset) =
core::mem::take(&mut self.state).into_node().unwrap();
if current_node
.entry_with(self.guard, operation_offset)
.is_none()
{
let new_node = XNode::new(current_node.height().go_leaf(), operation_offset);
let new_entry = Either::Left(Arc::new(new_node));
current_node.set_entry(self.guard, operation_offset, Some(new_entry));
}
let next_node = current_node
.deref_target()
.entry_with(self.guard, operation_offset)
.unwrap()
.left()
.unwrap();
self.0.state.move_to(next_node, self.0.index);
}
}
/**** Public ****/
/// Stores a new `item` at the target index.
pub fn store(&mut self, item: P) {
self.expand_and_traverse_to_target();
let (node, operation_offset) = self.state.as_node().unwrap();
node.set_entry(self.guard, operation_offset, Some(Either::Right(item)));
}
/// Removes the item at the target index.
///
/// Returns the removed item if it previously exists.
//
// TODO: Remove the interior node once it becomes empty.
pub fn remove(&mut self) -> Option<P::Ref<'a>> {
self.traverse_to_target();
self.state.as_node().and_then(|(node, off)| {
let res = node
.deref_target()
.entry_with(self.guard, off)
.and_then(|entry| entry.right());
node.set_entry(self.guard, off, None);
res
})
}
}
/// An error indicating that the mark cannot be set because the item does not exist.
#[derive(Debug)]
pub struct SetMarkError;
impl<P: NonNullPtr + Send + Sync, M: Into<XMark>, G: SpinGuardian> CursorMut<'_, P, M, G> {
/// Sets the input `mark` for the item at the target index.
///
/// # Errors
///
/// This method will fail with an error if the target item does not exist.
pub fn set_mark(&mut self, mark: M) -> Result<(), SetMarkError> {
self.traverse_to_target();
self.state
.as_node()
.filter(|(node, off)| {
node.entry_with(self.guard, *off)
.is_some_and(|entry| entry.is_right())
})
.map(|(node, off)| {
let mark_index = mark.into().index();
node.set_mark(self.guard, off, mark_index);
})
.ok_or(SetMarkError)
}
/// Unsets the input `mark` for the item at the target index.
///
/// # Errors
///
/// This method will fail with an error if the target item does not exist.
pub fn unset_mark(&mut self, mark: M) -> Result<(), SetMarkError> {
self.traverse_to_target();
self.state
.as_node()
.filter(|(node, off)| {
node.entry_with(self.guard, *off)
.is_some_and(|entry| entry.is_right())
})
.map(|(node, off)| {
let mark_index = mark.into().index();
node.unset_mark(self.guard, off, mark_index);
})
.ok_or(SetMarkError)
}
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: SpinGuardian> Deref for CursorMut<'a, P, M, G> {
type Target = Cursor<'a, P, M, XLockGuard<'a, G>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<P: NonNullPtr + Send + Sync, M, G: SpinGuardian> DerefMut for CursorMut<'_, P, M, G> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

View File

@ -0,0 +1,31 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::sync::Arc;
use ostd::{
sync::non_null::{ArcRef, NonNullPtr},
util::Either,
};
use crate::node::XNode;
/// A type serving as the basic unit of storage for `XArray`s, used in the head of the `XArray` and
/// the slots of `XNode`s.
///
/// There are the following types of `XEntry`:
/// - Internal entries: These are invisible to users. Currently these entries represent pointers to
/// `XNode`s (`Arc<XNode<P>>`).
/// - Item entries: These represent user-given items of type `P`.
///
/// An `XEntry` owns the item or node that it represents. Once an `XEntry` generated from an item
/// or an `XNode`, the ownership of the item or the `XNode` will be transferred to the `XEntry`.
pub(super) type XEntry<P> = Either<Arc<XNode<P>>, P>;
/// A type that represents the reference to `XEntry`.
pub(super) type XEntryRef<'a, P> = Either<ArcRef<'a, XNode<P>>, <P as NonNullPtr>::Ref<'a>>;
/// A type that represents the interior entries in `XArray`.
pub(super) type NodeEntry<P> = Arc<XNode<P>>;
/// A type that represents the reference to `NodeEntry`.
pub(super) type NodeEntryRef<'a, P> = ArcRef<'a, XNode<P>>;

View File

@ -0,0 +1,250 @@
// SPDX-License-Identifier: MPL-2.0
//! This crate introduces a RCU-based [`XArray`] implementation.
//!
//! `XArray` is an abstract data type functioning like an expansive array of items
//! where each item is a [`NonNullPtr`], such as `Arc<T>` or `Box<T>`. It facilitates
//! efficient sequential access to adjacent entries, supporting multiple concurrent reads
//! and exclusively allowing one write operation at a time.
//!
//! In addition to directly manipulating the `XArray`, users can typically achieve more
//! flexible operations by creating a [`Cursor`]/[`CursorMut`] within the `XArray`. Since the
//! `XArray` enforces a single write operation at any given time, performing write operations
//! requires first acquiring a [`LockedXArray`] by calling its `lock` method.
//!
//! `XArray` also provides a convenient way to mark individual items (see [`XMark`]).
//!
//! # Example
//!
//! ```
//! use alloc::sync::Arc;
//!
//! use crare::rcu_xarray::*;
//! use crate::task::disable_preempt;
//!
//! let xarray_arc: XArray<Arc<i32>> = XArray::new();
//! let value = Arc::new(10);
//! xarray_arc.lock().store(10, value);
//!
//! let guard = disable_preempt();
//! assert_eq!(*xarray_arc.load(&guard, 10).unwrap().as_ref(), 10);
//!
//! // Usage of the cursor
//!
//! let locked_xarray = xarray_arc.lock();
//! let cursor_mut = locked_xarray.cursor_mut(100);
//!
//! let value = Arc::new(100);
//! cursor_mut.store(value);
//! assert_eq!(cursor_mut.load(10).unwrap().as_ref(), 100);
//! let cursor = xarray_arc.cursor(&guard, 100);
//! assert_eq!(cursor.load(10).unwrap().as_ref(), 100);
//! ```
//!
//! # Background
//!
//! The XArray concept was originally introduced by Linux, which keeps the data structure of
//! [Linux Radix Trees](https://lwn.net/Articles/175432/).
#![no_std]
#![deny(unsafe_code)]
extern crate alloc;
use core::marker::PhantomData;
pub use cursor::{Cursor, CursorMut, SetMarkError};
use entry::NodeEntry;
use mark::NoneMark;
pub use mark::XMark;
use ostd::{
sync::{
non_null::NonNullPtr, LocalIrqDisabled, PreemptDisabled, RcuOption, SpinGuardian, SpinLock,
SpinLockGuard,
},
task::atomic_mode::AsAtomicModeGuard,
};
pub use range::Range;
mod cursor;
mod entry;
mod mark;
mod node;
mod range;
const BITS_PER_LAYER: usize = 6;
const SLOT_SIZE: usize = 1 << BITS_PER_LAYER;
const SLOT_MASK: usize = SLOT_SIZE - 1;
/// A RCU-based `XArray` implementation.
///
/// `XArray` is used to store [`NonNullPtr`], with the additional requirement that user-stored
/// pointers must have a minimum alignment of 2 bytes.
///
/// `XArray` is RCU-based, which means:
/// - Multiple concurrent readers are permitted.
/// - Only one writer is allowed at a time.
/// - Simultaneous read operations are allowed while writing.
/// - Readers may see stale data (see [`Cursor`] and [`CursorMut`] for more information).
///
/// Interaction with `XArray` is generally through `Cursor` and `CursorMut`. Similar to
/// XArray's read-write properties, multiple `Cursor`s may coexist (shared read access) and
/// only one `CursorMut` may exist at a time (exclusive write access).
///
/// To create a `Cursor`, users can invoke [`XArray::cursor`] with an atomic-guard.
/// To create a `CursorMut`, users need to call [`XArray::lock`] or [`XArray::lock_irq_disabled`]
/// first to obtain a [`LockedXArray`] first.
///
/// `XArray` enables marking of individual items for user convenience. Items can have up to three
/// distinct marks by default, with each mark independently maintained. Users can use self-defined
/// types as marks by implementing the `From<Type>` trait for [`XMark`]. Marking is also applicable
/// to internal nodes, indicating marked descendant nodes, though such marking is not transparent
/// to users.
pub struct XArray<P, M = NoneMark>
where
P: NonNullPtr + Send + Sync,
{
head: RcuOption<NodeEntry<P>>,
xlock: SpinLock<()>,
_marker: PhantomData<M>,
}
/// A type that represents the spinlock guard used in [`XArray`].
pub type XLockGuard<'a, G> = SpinLockGuard<'a, (), G>;
impl<P: NonNullPtr + Send + Sync, M> Default for XArray<P, M> {
fn default() -> Self {
Self::new()
}
}
impl<P: NonNullPtr + Send + Sync, M> XArray<P, M> {
/// Makes a new, empty `XArray`.
pub const fn new() -> Self {
Self {
head: RcuOption::new_none(),
xlock: SpinLock::new(()),
_marker: PhantomData,
}
}
/// Acquires the lock to perform mutable operations.
pub fn lock(&self) -> LockedXArray<P, M> {
LockedXArray {
xa: self,
guard: self.xlock.lock(),
_marker: PhantomData,
}
}
/// Acquires the lock with local IRQs disabled to perform mutable operations.
pub fn lock_irq_disabled(&self) -> LockedXArray<P, M, LocalIrqDisabled> {
LockedXArray {
xa: self,
guard: self.xlock.disable_irq().lock(),
_marker: PhantomData,
}
}
/// Creates a [`Cursor`] to perform read-related operations.
pub fn cursor<'a, G: AsAtomicModeGuard>(
&'a self,
guard: &'a G,
index: u64,
) -> Cursor<'a, P, M, G> {
Cursor::new(self, guard, index)
}
/// Creates a [`Range`] to immutably iterated over the specified `range`.
pub fn range<'a, G: AsAtomicModeGuard>(
&'a self,
guard: &'a G,
range: core::ops::Range<u64>,
) -> Range<'a, P, M, G> {
let cursor = self.cursor(guard, range.start);
Range::new(cursor, range.end)
}
/// Loads the `index`-th item.
///
/// If the target item exists, it will be returned with `Some(_)`,
/// otherwise, `None` will be returned.
pub fn load<'a, G: AsAtomicModeGuard>(
&'a self,
guard: &'a G,
index: u64,
) -> Option<P::Ref<'a>> {
let mut cursor = self.cursor(guard, index);
cursor.load()
}
}
impl<P: NonNullPtr + Sync + Send, M> Drop for XArray<P, M> {
fn drop(&mut self) {
self.lock().clear();
}
}
/// The locked [`XArray`] which obtains its inner spinlock.
///
/// The locked `XArray` is able to create `CursorMut` and do mutable operations.
/// There can only be one locked `XArray` at the same time.
pub struct LockedXArray<'a, P, M, G = PreemptDisabled>
where
P: NonNullPtr + Send + Sync,
G: SpinGuardian,
{
xa: &'a XArray<P, M>,
guard: SpinLockGuard<'a, (), G>,
_marker: PhantomData<(P, M)>,
}
impl<P: NonNullPtr + Send + Sync, M, G: SpinGuardian> LockedXArray<'_, P, M, G> {
/// Clears the corresponding [`XArray`].
pub fn clear(&mut self) {
if let Some(head) = self.xa.head.read_with(&self.guard) {
head.clear_parent(&self.guard);
}
self.xa.head.update(None);
}
/// Creates a [`CursorMut`] to perform read- and write-related operations.
pub fn cursor_mut(&mut self, index: u64) -> cursor::CursorMut<'_, P, M, G> {
cursor::CursorMut::new(self.xa, &self.guard, index)
}
/// Stores the provided item at the target index.
pub fn store(&mut self, index: u64, item: P) {
let mut cursor = self.cursor_mut(index);
cursor.store(item)
}
/// Removes the item at the target index.
///
/// Returns the removed item if some item was previously stored in the same position.
pub fn remove(&mut self, index: u64) -> Option<P::Ref<'_>> {
let mut cursor = self.cursor_mut(index);
cursor.remove()
}
/// Creates a [`Cursor`] to perform read-related operations.
pub fn cursor(&self, index: u64) -> Cursor<'_, P, M, XLockGuard<G>> {
Cursor::new(self.xa, &self.guard, index)
}
/// Creates a [`Range`] to immutably iterated over the specified `range`.
pub fn range(&self, range: core::ops::Range<u64>) -> Range<'_, P, M, XLockGuard<G>> {
let cursor = self.cursor(range.start);
Range::new(cursor, range.end)
}
/// Loads the `index`-th item.
///
/// If the target item exists, it will be returned with `Some(_)`, otherwise, `None` will be
/// returned.
pub fn load(&self, index: u64) -> Option<P::Ref<'_>> {
let mut cursor = self.cursor(index);
cursor.load()
}
}

View File

@ -0,0 +1,90 @@
// SPDX-License-Identifier: MPL-2.0
use core::sync::atomic::{AtomicU64, Ordering};
use ostd::sync::SpinGuardian;
use crate::XLockGuard;
/// A mark used to indicate which slots in an [`XNode`] contain items that have been marked.
///
/// [`Xnode`]: super::node::XNode
///
/// It internally stores an `AtomicU64`, functioning as a bitmap, where each bit that is set to 1
/// represents a slot at the corresponding offset that has been marked.
#[derive(Debug)]
pub(super) struct Mark {
inner: AtomicU64,
}
impl Mark {
pub const fn new(inner: u64) -> Self {
Self {
inner: AtomicU64::new(inner),
}
}
pub const fn new_empty() -> Self {
Self::new(0)
}
pub fn update<G: SpinGuardian>(&self, offset: u8, set: bool, _guard: &XLockGuard<G>) -> bool {
let old_val = self.inner.load(Ordering::Acquire);
let new_val = if set {
old_val | (1 << offset as u64)
} else {
old_val & !(1 << offset as u64)
};
self.inner.store(new_val, Ordering::Release);
old_val != new_val
}
pub fn is_marked(&self, offset: u8) -> bool {
self.inner.load(Ordering::Acquire) & (1 << offset as u64) != 0
}
pub fn is_clear(&self) -> bool {
self.inner.load(Ordering::Acquire) == 0
}
}
/// The mark type used in the [`XArray`].
///
/// The `XArray` itself and an item in it can have up to three different marks.
///
/// Users can use a self-defined type to distinguish which kind of mark they want to set. Such a
/// type must implement the `Into<XMark>` trait.
///
/// [`XArray`]: crate::XArray
pub enum XMark {
/// The mark kind 0.
Mark0,
/// The mark kind 1.
Mark1,
/// The mark kind 2.
Mark2,
}
pub const NUM_MARKS: usize = 3;
impl XMark {
/// Maps the `XMark` to an index in the range 0 to 2.
pub(super) fn index(&self) -> usize {
match self {
XMark::Mark0 => 0,
XMark::Mark1 => 1,
XMark::Mark2 => 2,
}
}
}
/// A mark type that disables the mark functionality in the [`XArray`].
///
/// This indicates that the mark functionality is not needed and is the default generic parameter
/// for an `XArray`.
///
/// [`XArray`]: crate::XArray
#[derive(Clone, Copy)]
pub enum NoneMark {}

View File

@ -0,0 +1,303 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::sync::Arc;
use core::{
cmp::Ordering,
ops::{Deref, DerefMut},
};
use ostd::{
sync::{non_null::NonNullPtr, RcuOption, SpinGuardian},
task::atomic_mode::AsAtomicModeGuard,
util::Either,
};
use crate::{
entry::{NodeEntry, NodeEntryRef, XEntry, XEntryRef},
mark::{Mark, NUM_MARKS},
XLockGuard, BITS_PER_LAYER, SLOT_MASK, SLOT_SIZE,
};
/// The height of an `XNode` within an `XArray`.
///
/// In an `XArray`, the head has the highest height, while the `XNode`s that
/// directly store items are at the lowest height, with a height value of 1.
/// Each level up from the bottom height increases the height number by 1.
/// The height of an `XArray` is the height of its head.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
pub(super) struct Height {
height: u8,
}
impl Deref for Height {
type Target = u8;
fn deref(&self) -> &Self::Target {
&self.height
}
}
impl DerefMut for Height {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.height
}
}
impl PartialEq<u8> for Height {
fn eq(&self, other: &u8) -> bool {
self.height == *other
}
}
impl PartialOrd<u8> for Height {
fn partial_cmp(&self, other: &u8) -> Option<Ordering> {
self.height.partial_cmp(other)
}
}
impl Height {
/// Creates a `Height` directly from a height value.
pub fn new(height: u8) -> Self {
Self { height }
}
/// Creates a minimal `Height` that allows the `index`-th item to be stored.
pub fn from_index(index: u64) -> Self {
let mut height = Height::new(1);
while index > height.max_index() {
*height += 1;
}
height
}
/// Goes up, which increases the height value by one.
pub fn go_root(&self) -> Self {
Self::new(self.height + 1)
}
/// Goes down, which decreases the height value by one.
pub fn go_leaf(&self) -> Self {
Self::new(self.height - 1)
}
fn height_shift(&self) -> u8 {
(self.height - 1) * BITS_PER_LAYER as u8
}
/// Calculates the corresponding offset for the target index at
/// the current height.
pub fn height_offset(&self, index: u64) -> u8 {
((index >> self.height_shift()) & SLOT_MASK as u64) as u8
}
/// Calculates the maximum index that can be represented in an `XArray`
/// with the current height.
pub fn max_index(&self) -> u64 {
((SLOT_SIZE as u64) << self.height_shift()) - 1
}
}
/// The `XNode` is the intermediate node in the tree-like structure of the `XArray`.
///
/// It contains `SLOT_SIZE` number of `XEntry`s, meaning it can accommodate up to
/// `SLOT_SIZE` child nodes. The `height` and `offset_in_parent` attributes of an
/// `XNode` are determined at initialization and remain unchanged thereafter.
pub(super) struct XNode<P>
where
P: NonNullPtr + Send + Sync,
{
/// The pointer that refers to the parent node.
///
/// If the current node is the head node, its parent pointer will be `None`.
parent: RcuOption<NodeEntry<P>>,
/// The height of the subtree rooted at the current node.
///
/// The height of a leaf node, which stores the user-given items, is 1.
height: Height,
/// This node is its parent's `offset_in_parent`-th child.
///
/// This field will be zero if this node is the root, as the node will be
/// the 0-th child of its parent once the height of `XArray` is increased.
offset_in_parent: u8,
/// The slots in which `XEntry`s are stored.
///
/// The entries point to user-given items for leaf nodes and other `XNode`s for
/// interior nodes.
slots: [RcuOption<XEntry<P>>; SLOT_SIZE],
/// The marks representing whether each slot is marked or not.
///
/// Users can set mark or unset mark on user-given items, and a leaf
/// node or an interior node is marked if and only if there is at least
/// one marked item within the node.
marks: [Mark; NUM_MARKS],
}
impl<P: NonNullPtr + Send + Sync> XNode<P> {
pub fn new_root(height: Height) -> Self {
Self::new(height, 0)
}
pub fn new(height: Height, offset: u8) -> Self {
Self {
parent: RcuOption::new_none(),
height,
offset_in_parent: offset,
slots: [const { RcuOption::new_none() }; SLOT_SIZE],
marks: [const { Mark::new_empty() }; NUM_MARKS],
}
}
/// Gets the slot offset at the current `XNode` for the target index `target_index`.
pub fn entry_offset(&self, target_index: u64) -> u8 {
self.height.height_offset(target_index)
}
pub fn height(&self) -> Height {
self.height
}
pub fn parent<'a>(&'a self, guard: &'a dyn AsAtomicModeGuard) -> Option<NodeEntryRef<'a, P>> {
let parent = self.parent.read_with(guard)?;
Some(parent)
}
pub fn offset_in_parent(&self) -> u8 {
self.offset_in_parent
}
pub fn entry_with<'a>(
&'a self,
guard: &'a dyn AsAtomicModeGuard,
offset: u8,
) -> Option<XEntryRef<'a, P>> {
self.slots[offset as usize].read_with(guard)
}
pub fn is_marked(&self, offset: u8, mark: usize) -> bool {
self.marks[mark].is_marked(offset)
}
pub fn is_mark_clear(&self, mark: usize) -> bool {
self.marks[mark].is_clear()
}
pub fn is_leaf(&self) -> bool {
self.height == 1
}
}
impl<P: NonNullPtr + Send + Sync> XNode<P> {
/// Sets the parent pointer of this node to the given `parent`.
fn set_parent<G: SpinGuardian>(&self, _guard: &XLockGuard<G>, parent: NodeEntry<P>) {
self.parent.update(Some(parent));
}
/// Clears the parent pointers of this node and all its descendant nodes.
///
/// This method should be invoked when the node is being removed from the tree.
pub fn clear_parent<G: SpinGuardian>(&self, guard: &XLockGuard<G>) {
self.parent.update(None);
for child in self.slots.iter() {
if let Some(node) = child.read_with(guard).and_then(|entry| entry.left()) {
node.clear_parent(guard);
}
}
}
/// Sets the slot at the given `offset` to the given `entry`.
///
/// If `entry` represents an item, the old marks at the same offset will be cleared.
/// Otherwise, if `entry` represents a node, the marks at the same offset will be
/// updated according to whether the new node contains marked items.
///
/// This method will also propagate the updated marks to the ancestors.
pub fn set_entry<G: SpinGuardian>(
self: &Arc<Self>,
guard: &XLockGuard<G>,
offset: u8,
entry: Option<XEntry<P>>,
) {
let old_entry = self.slots[offset as usize].read_with(guard);
if let Some(node) = old_entry.and_then(|entry| entry.left()) {
node.clear_parent(guard);
}
let is_new_node = match &entry {
Some(Either::Left(new_node)) => {
new_node.set_parent(guard, self.clone());
true
}
_ => false,
};
self.slots[offset as usize].update(entry);
if is_new_node {
self.update_mark(guard, offset);
} else {
for i in 0..NUM_MARKS {
self.unset_mark(guard, offset, i);
}
}
}
/// Sets the input `mark` at the given `offset`.
///
/// This method will also update the marks on the ancestors of this node
/// if necessary to ensure that the marks on the ancestors are up to date.
pub fn set_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, offset: u8, mark: usize) {
let changed = self.marks[mark].update(offset, true, guard);
if changed {
self.propagate_mark(guard, mark);
}
}
/// Unsets the input `mark` at the given `offset`.
///
/// This method will also update the marks on the ancestors of this node
/// if necessary to ensure that the marks on the ancestors are up to date.
pub fn unset_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, offset: u8, mark: usize) {
let changed = self.marks[mark].update(offset, false, guard);
if changed {
self.propagate_mark(guard, mark);
}
}
/// Updates the mark at the given `offset`.
///
/// This method does nothing if the slot at the given `offset` does not represent
/// a node. It assumes the marks of the child node are up to date, and ensures
/// the mark at the given `offset` is also up to date.
///
/// This method will also update the marks on the ancestors of this node
/// if necessary to ensure that the marks on the ancestors are up to date.
fn update_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, offset: u8) {
let entry = self.slots[offset as usize].read_with(guard);
let Some(node) = entry.and_then(|entry| entry.left()) else {
return;
};
for i in 0..NUM_MARKS {
let changed = self.marks[i].update(offset, !node.is_mark_clear(i), guard);
if changed {
self.propagate_mark(guard, i);
}
}
}
/// Propagates the mark updates on this node to the ancestors.
///
/// This method must be called after the marks are updated to ensure that the marks on the
/// ancestors are up to date.
fn propagate_mark<G: SpinGuardian>(&self, guard: &XLockGuard<G>, mark: usize) {
let Some(parent) = self.parent(guard) else {
return;
};
let changed =
parent.marks[mark].update(self.offset_in_parent, !self.is_mark_clear(mark), guard);
if changed {
parent.propagate_mark(guard, mark);
}
}
}

View File

@ -0,0 +1,52 @@
// SPDX-License-Identifier: MPL-2.0
use ostd::{
sync::non_null::NonNullPtr,
task::{atomic_mode::AsAtomicModeGuard, DisabledPreemptGuard},
};
use crate::{cursor::Cursor, mark::NoneMark};
/// An iterator over a range of entries in an [`XArray`].
///
/// The typical way to obtain a `Range` instance is to call [`XArray::range`].
///
/// [`XArray`]: super::XArray
/// [`XArray::range`]: super::XArray::range
pub struct Range<'a, P, M = NoneMark, G = DisabledPreemptGuard>
where
P: NonNullPtr + Send + Sync,
{
cursor: Cursor<'a, P, M, G>,
end: u64,
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: AsAtomicModeGuard> Range<'a, P, M, G> {
pub(super) fn new(cursor: Cursor<'a, P, M, G>, end: u64) -> Self {
Range { cursor, end }
}
}
impl<'a, P: NonNullPtr + Send + Sync, M, G: AsAtomicModeGuard> core::iter::Iterator
for Range<'a, P, M, G>
{
type Item = (u64, P::Ref<'a>);
fn next(&mut self) -> Option<Self::Item> {
loop {
if self.cursor.index() >= self.end {
return None;
}
let item = self.cursor.load();
if item.is_none() {
self.cursor.next();
continue;
}
let res = item.map(|item| (self.cursor.index(), item));
self.cursor.next();
return res;
}
}
}

View File

@ -1,4 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
//! This module provides some advanced collections.
// TODO: Remove the old xarray module.
pub mod xarray;