Avoid generic_const_exprs feature

This commit is contained in:
Ruihan Li 2025-03-02 23:53:46 +08:00 committed by Tate, Hongliang Tian
parent b08e655a76
commit 0d36375dfa
14 changed files with 73 additions and 118 deletions

7
Cargo.lock generated
View File

@ -451,12 +451,6 @@ dependencies = [
"toml",
]
[[package]]
name = "const-assert"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8298db53081b3a951cadb6e0f4ebbe36def7bcb591a34676708d0d7ac87dd86"
[[package]]
name = "controlled"
version = "0.1.0"
@ -1282,7 +1276,6 @@ dependencies = [
"bitflags 1.3.2",
"buddy_system_allocator",
"cfg-if",
"const-assert",
"fdt",
"gimli 0.28.1",
"iced-x86",

View File

@ -2,7 +2,7 @@
use aster_util::safe_ptr::SafePtr;
use bitflags::bitflags;
use ostd::{io_mem::IoMem, Pod};
use ostd::{io_mem::IoMem, mm::PodOnce, Pod};
use crate::transport::VirtioTransport;
@ -32,6 +32,8 @@ pub struct VirtioVsockConfig {
pub guest_cid_high: u32,
}
impl PodOnce for VirtioVsockConfig {}
impl VirtioVsockConfig {
pub(crate) fn new(transport: &dyn VirtioTransport) -> SafePtr<Self, IoMem> {
let memory = transport.device_config_mem().unwrap();

View File

@ -13,7 +13,7 @@ use aster_util::{field_ptr, safe_ptr::SafePtr};
use bitflags::bitflags;
use log::debug;
use ostd::{
mm::{DmaCoherent, FrameAllocOptions},
mm::{DmaCoherent, FrameAllocOptions, PodOnce},
offset_of, Pod,
};
@ -445,6 +445,8 @@ bitflags! {
}
}
impl PodOnce for DescFlags {}
/// The driver uses the available ring to offer buffers to the device:
/// each ring entry refers to the head of a descriptor chain.
/// It is only written by the driver and read by the device.
@ -487,3 +489,5 @@ bitflags! {
const VIRTQ_AVAIL_F_NO_INTERRUPT = 1;
}
}
impl PodOnce for AvailFlags {}

View File

@ -20,7 +20,6 @@ bit_field = "0.10.1"
buddy_system_allocator = { version = "0.10", default-features = false, features = ["alloc"] }
bitflags = "1.3"
cfg-if = "1.0"
const-assert = "1.0"
gimli = { version = "0.28", default-features = false, features = ["read-core"] }
id-alloc = { path = "libs/id-alloc", version = "0.1.0" }
inherit-methods-macro = { git = "https://github.com/asterinas/inherit-methods-macro", rev = "98f7e3e", version = "0.1.0" }

View File

@ -8,7 +8,7 @@ use crate::{
mm::{
page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableEntryTrait, PageTableMode},
Paddr, PageProperty, PagingConstsTrait, PagingLevel, Vaddr,
Paddr, PageProperty, PagingConstsTrait, PagingLevel, PodOnce, Vaddr,
},
util::SameSizeAs,
Pod,
@ -78,6 +78,8 @@ impl PageTableEntry {
// SAFETY: `PageTableEntry` has the same size as `usize` in our supported x86 architecture.
unsafe impl SameSizeAs<usize> for PageTableEntry {}
impl PodOnce for PageTableEntry {}
impl PageTableEntryTrait for PageTableEntry {
fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self {
let mut pte = Self(paddr as u64 & Self::PHYS_MASK | PageTableFlags::LAST_PAGE.bits());

View File

@ -13,7 +13,7 @@ use crate::{
mm::{
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableEntryTrait,
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
Paddr, PagingConstsTrait, PagingLevel, PodOnce, Vaddr, PAGE_SIZE,
},
util::SameSizeAs,
Pod,
@ -167,6 +167,8 @@ macro_rules! parse_flags {
// SAFETY: `PageTableEntry` has the same size as `usize`
unsafe impl SameSizeAs<usize> for PageTableEntry {}
impl PodOnce for PageTableEntry {}
impl PageTableEntryTrait for PageTableEntry {
fn is_present(&self) -> bool {
// For PT child, `PRESENT` should be set; for huge page, `HUGE` should

View File

@ -9,7 +9,6 @@
#![feature(core_intrinsics)]
#![feature(coroutines)]
#![feature(fn_traits)]
#![feature(generic_const_exprs)]
#![feature(iter_from_coroutine)]
#![feature(let_chains)]
#![feature(linkage)]
@ -20,9 +19,6 @@
#![feature(sync_unsafe_cell)]
#![feature(trait_upcasting)]
#![feature(iter_advance_by)]
// The `generic_const_exprs` feature is incomplete however required for the page table
// const generic implementation. We are using this feature in a conservative manner.
#![expect(incomplete_features)]
#![expect(internal_features)]
#![no_std]
#![warn(missing_docs)]

View File

@ -44,7 +44,6 @@ use alloc::vec;
use core::marker::PhantomData;
use align_ext::AlignExt;
use const_assert::{Assert, IsTrue};
use inherit_methods_macro::inherit_methods;
use crate::{
@ -525,6 +524,8 @@ impl<'a> VmReader<'a, Infallible> {
let cursor = self.cursor.cast::<T>();
assert!(cursor.is_aligned());
const { assert!(pod_once_impls::is_non_tearing::<T>()) };
// SAFETY: We have checked that the number of bytes remaining is at least the size of `T`
// and that the cursor is properly aligned with respect to the type `T`. All other safety
// requirements are the same as for `Self::read`.
@ -746,6 +747,8 @@ impl<'a> VmWriter<'a, Infallible> {
let cursor = self.cursor.cast::<T>();
assert!(cursor.is_aligned());
const { assert!(pod_once_impls::is_non_tearing::<T>()) };
// SAFETY: We have checked that the number of bytes remaining is at least the size of `T`
// and that the cursor is properly aligned with respect to the type `T`. All other safety
// requirements are the same as for `Self::writer`.
@ -926,27 +929,35 @@ impl<'a> From<&'a mut [u8]> for VmWriter<'a, Infallible> {
/// A marker trait for POD types that can be read or written with one instruction.
///
/// We currently rely on this trait to ensure that the memory operation created by
/// `ptr::read_volatile` and `ptr::write_volatile` doesn't tear. However, the Rust documentation
/// makes no such guarantee, and even the wording in the LLVM LangRef is ambiguous.
///
/// At this point, we can only _hope_ that this doesn't break in future versions of the Rust or
/// LLVM compilers. However, this is unlikely to happen in practice, since the Linux kernel also
/// uses "volatile" semantics to implement `READ_ONCE`/`WRITE_ONCE`.
/// This trait is mostly a hint, since it's safe and can be implemented for _any_ POD type. If it
/// is implemented for a type that cannot be read or written with a single instruction, calling
/// `read_once`/`write_once` will lead to a failed compile-time assertion.
pub trait PodOnce: Pod {}
impl<T: Pod> PodOnce for T where Assert<{ is_pod_once::<T>() }>: IsTrue {}
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
mod pod_once_impls {
use super::PodOnce;
#[cfg(target_arch = "x86_64")]
const fn is_pod_once<T: Pod>() -> bool {
let size = size_of::<T>();
impl PodOnce for u8 {}
impl PodOnce for u16 {}
impl PodOnce for u32 {}
impl PodOnce for u64 {}
impl PodOnce for usize {}
impl PodOnce for i8 {}
impl PodOnce for i16 {}
impl PodOnce for i32 {}
impl PodOnce for i64 {}
impl PodOnce for isize {}
size == 1 || size == 2 || size == 4 || size == 8
}
#[cfg(target_arch = "riscv64")]
const fn is_pod_once<T: Pod>() -> bool {
let size = size_of::<T>();
size == 1 || size == 2 || size == 4 || size == 8
/// Checks whether the memory operation created by `ptr::read_volatile` and
/// `ptr::write_volatile` doesn't tear.
///
/// Note that the Rust documentation makes no such guarantee, and even the wording in the LLVM
/// LangRef is ambiguous. But this is unlikely to break in practice because the Linux kernel
/// also uses "volatile" semantics to implement `READ_ONCE`/`WRITE_ONCE`.
pub(super) const fn is_non_tearing<T>() -> bool {
let size = core::mem::size_of::<T>();
size == 1 || size == 2 || size == 4 || size == 8
}
}

View File

@ -114,10 +114,7 @@ pub enum PageTableItem {
/// simulate the recursion, and adpot a page table locking protocol to
/// provide concurrency.
#[derive(Debug)]
pub struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_LEVELS as usize]:,
{
pub struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> {
/// The lock guards of the cursor. The level 1 page table lock guard is at
/// index 0, and the level N page table lock guard is at index N - 1.
///
@ -125,7 +122,7 @@ where
/// from low to high, exactly the reverse order of the acquisition.
/// This behavior is ensured by the default drop implementation of Rust:
/// <https://doc.rust-lang.org/reference/destructors.html>.
guards: [Option<PageTableNode<E, C>>; C::NR_LEVELS as usize],
guards: [Option<PageTableNode<E, C>>; MAX_NR_LEVELS],
/// The level of the page table that the cursor points to.
level: PagingLevel,
/// From `guard_level` to `level`, the locks are held in `guards`.
@ -139,10 +136,10 @@ where
_phantom: PhantomData<&'a PageTable<M, E, C>>,
}
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Cursor<'a, M, E, C>
where
[(); C::NR_LEVELS as usize]:,
{
/// The maximum value of `PagingConstsTrait::NR_LEVELS`.
const MAX_NR_LEVELS: usize = 4;
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Cursor<'a, M, E, C> {
/// Creates a cursor claiming the read access for the given range.
///
/// The cursor created will only be able to query or jump within the given
@ -159,6 +156,8 @@ where
return Err(PageTableError::UnalignedVaddr);
}
const { assert!(C::NR_LEVELS as usize <= MAX_NR_LEVELS) };
let mut cursor = Self {
guards: core::array::from_fn(|_| None),
level: C::NR_LEVELS,
@ -340,8 +339,6 @@ where
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Iterator
for Cursor<'_, M, E, C>
where
[(); C::NR_LEVELS as usize]:,
{
type Item = PageTableItem;
@ -361,14 +358,9 @@ where
#[derive(Debug)]
pub struct CursorMut<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>(
Cursor<'a, M, E, C>,
)
where
[(); C::NR_LEVELS as usize]:;
);
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> CursorMut<'a, M, E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> CursorMut<'a, M, E, C> {
/// Creates a cursor claiming the write access for the given range.
///
/// The cursor created will only be able to map, query or jump within the given

View File

@ -9,8 +9,8 @@ use core::{
};
use super::{
io::PodOnce, nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait,
PagingLevel, Vaddr,
nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait, PagingLevel,
PodOnce, Vaddr,
};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
@ -84,9 +84,7 @@ pub struct PageTable<
M: PageTableMode,
E: PageTableEntryTrait = PageTableEntry,
C: PagingConstsTrait = PagingConsts,
> where
[(); C::NR_LEVELS as usize]:,
{
> {
root: RawPageTableNode<E, C>,
_phantom: PhantomData<M>,
}
@ -201,10 +199,7 @@ impl PageTable<KernelMode> {
}
}
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M, E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M, E, C> {
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
pub fn empty() -> Self {
PageTable {

View File

@ -23,9 +23,7 @@ use crate::{
pub(in crate::mm) enum Child<
E: PageTableEntryTrait = PageTableEntry,
C: PagingConstsTrait = PagingConsts,
> where
[(); C::NR_LEVELS as usize]:,
{
> {
PageTable(RawPageTableNode<E, C>),
Frame(Frame<dyn AnyFrameMeta>, PageProperty),
/// Pages not tracked by handles.
@ -33,10 +31,7 @@ pub(in crate::mm) enum Child<
None,
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<E, C> {
/// Returns whether the child does not map to anything.
pub(in crate::mm) fn is_none(&self) -> bool {
matches!(self, Child::None)

View File

@ -12,10 +12,7 @@ use crate::mm::{nr_subpage_per_huge, page_prop::PageProperty, page_size, PagingC
/// This is a static reference to an entry in a node that does not account for
/// a dynamic reference count to the child. It can be used to create a owned
/// handle, which is a [`Child`].
pub(in crate::mm) struct Entry<'a, E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_LEVELS as usize]:,
{
pub(in crate::mm) struct Entry<'a, E: PageTableEntryTrait, C: PagingConstsTrait> {
/// The page table entry.
///
/// We store the page table entry here to optimize the number of reads from
@ -30,10 +27,7 @@ where
node: &'a mut PageTableNode<E, C>,
}
impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, E, C> {
/// Returns if the entry does not map to anything.
pub(in crate::mm) fn is_none(&self) -> bool {
!self.pte.is_present()

View File

@ -56,19 +56,13 @@ use crate::{
/// Only the CPU or a PTE can access a page table node using a raw handle. To access the page
/// table node from the kernel code, use the handle [`PageTableNode`].
#[derive(Debug)]
pub(super) struct RawPageTableNode<E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_LEVELS as usize]:,
{
pub(super) struct RawPageTableNode<E: PageTableEntryTrait, C: PagingConstsTrait> {
raw: Paddr,
level: PagingLevel,
_phantom: PhantomData<(E, C)>,
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> RawPageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<E: PageTableEntryTrait, C: PagingConstsTrait> RawPageTableNode<E, C> {
pub(super) fn paddr(&self) -> Paddr {
self.raw
}
@ -189,8 +183,6 @@ where
impl<E: PageTableEntryTrait, C: PagingConstsTrait> From<RawPageTableNode<E, C>>
for Frame<PageTablePageMeta<E, C>>
where
[(); C::NR_LEVELS as usize]:,
{
fn from(raw: RawPageTableNode<E, C>) -> Self {
let raw = ManuallyDrop::new(raw);
@ -201,10 +193,7 @@ where
}
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for RawPageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for RawPageTableNode<E, C> {
fn drop(&mut self) {
// SAFETY: The physical address in the raw handle is valid. The restored
// handle is dropped to decrement the reference count.
@ -223,16 +212,11 @@ where
pub(super) struct PageTableNode<
E: PageTableEntryTrait = PageTableEntry,
C: PagingConstsTrait = PagingConsts,
> where
[(); C::NR_LEVELS as usize]:,
{
> {
page: Frame<PageTablePageMeta<E, C>>,
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C> {
/// Borrows an entry in the node at a given index.
///
/// # Panics
@ -346,10 +330,7 @@ where
}
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableNode<E, C> {
fn drop(&mut self) {
// Release the lock.
self.page.meta().lock.store(0, Ordering::Release);
@ -362,9 +343,7 @@ where
pub(in crate::mm) struct PageTablePageMeta<
E: PageTableEntryTrait = PageTableEntry,
C: PagingConstsTrait = PagingConsts,
> where
[(); C::NR_LEVELS as usize]:,
{
> {
/// The number of valid PTEs. It is mutable if the lock is held.
pub nr_children: SyncUnsafeCell<u16>,
/// The level of the page table page. A page table page cannot be
@ -393,10 +372,7 @@ pub(in crate::mm) enum MapTrackingStatus {
Tracked,
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTablePageMeta<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTablePageMeta<E, C> {
pub fn new_locked(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
Self {
nr_children: SyncUnsafeCell::new(0),
@ -410,10 +386,7 @@ where
// SAFETY: The layout of the `PageTablePageMeta` is ensured to be the same for
// all possible generic parameters. And the layout fits the requirements.
unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> AnyFrameMeta for PageTablePageMeta<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> AnyFrameMeta for PageTablePageMeta<E, C> {
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
let nr_children = self.nr_children.get_mut();

View File

@ -146,10 +146,7 @@ fn test_user_copy_on_write() {
assert!(child_pt.query(from.start + 10).is_none());
}
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M, E, C>
where
[(); C::NR_LEVELS as usize]:,
{
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M, E, C> {
fn protect(&self, range: &Range<Vaddr>, mut op: impl FnMut(&mut PageProperty)) {
let mut cursor = self.cursor_mut(range).unwrap();
loop {