Fix typos in API documentation

This commit is contained in:
Jianfeng Jiang 2024-05-30 11:25:58 +00:00 committed by Tate, Hongliang Tian
parent 6e59617fc7
commit 33a7da9991
65 changed files with 415 additions and 323 deletions

View File

@ -66,7 +66,7 @@ impl CpuSet {
self.bitset.set(cpu_id as usize, true);
}
/// Adds multiple CPUs from a vector to the `CpuSet`.
/// Adds multiple CPUs from `cpu_ids` to the `CpuSet`.
pub fn add_from_vec(&mut self, cpu_ids: Vec<u32>) {
for cpu_id in cpu_ids {
self.add(cpu_id)
@ -83,7 +83,7 @@ impl CpuSet {
self.bitset.set(cpu_id as usize, false);
}
/// Removes multiple CPUs from a vector from the `CpuSet`.
/// Removes multiple CPUs from `cpu_ids` from the `CpuSet`.
pub fn remove_from_vec(&mut self, cpu_ids: Vec<u32>) {
for cpu_id in cpu_ids {
self.remove(cpu_id);
@ -584,7 +584,7 @@ pub struct FpRegs {
}
impl FpRegs {
/// Create a new instance.
/// Creates a new instance.
///
/// Note that a newly-created instance's floating point state is not
/// initialized, thus considered invalid (i.e., `self.is_valid() == false`).
@ -611,7 +611,7 @@ impl FpRegs {
self.is_valid = true;
}
/// Save the floating state given by a slice of u8.
/// Saves the floating state given by a slice of u8.
///
/// After calling this method, the state of the instance will be considered valid.
///
@ -629,7 +629,7 @@ impl FpRegs {
self.is_valid
}
/// Clear the state of the instance.
/// Clears the state of the instance.
///
/// This method does not reset the underlying buffer that contains the floating
/// point state; it only marks the buffer __invalid__.
@ -637,9 +637,11 @@ impl FpRegs {
self.is_valid = false;
}
/// Restore CPU's CPU floating pointer states from this instance.
/// Restores CPU's CPU floating pointer states from this instance.
///
/// Panic. If the current state is invalid, the method will panic.
/// # Panics
///
/// If the current state is invalid, the method will panic.
pub fn restore(&self) {
debug!("restore fpregs");
assert!(self.is_valid);

View File

@ -85,7 +85,7 @@ pub(crate) struct IrqLine {
}
impl IrqLine {
/// Acquire an interrupt request line.
/// Acquires an interrupt request line.
///
/// # Safety
///
@ -96,7 +96,7 @@ impl IrqLine {
Arc::new(IRQ_LIST.get().unwrap().get(irq_num as usize).unwrap())
}
/// Get the IRQ number.
/// Gets the IRQ number.
pub fn num(&self) -> u8 {
self.irq_num
}
@ -105,7 +105,7 @@ impl IrqLine {
self.callback_list.lock()
}
/// Register a callback that will be invoked when the IRQ is active.
/// Registers a callback that will be invoked when the IRQ is active.
///
/// A handle to the callback is returned. Dropping the handle
/// automatically unregisters the callback.

View File

@ -64,7 +64,7 @@ impl AcpiTable for DmarHeader {
}
impl Dmar {
/// Create a instance from ACPI table.
/// Creates a instance from ACPI table.
pub fn new() -> Option<Self> {
if !super::ACPI_TABLES.is_completed() {
return None;

View File

@ -177,7 +177,7 @@ pub struct DeviceScopeHeader {
macro_rules! impl_from_bytes {
($(($struct:tt,$header_struct:tt,$dst_name:ident)),*) => {
$(impl $struct {
/// Create instance from bytes
/// Creates instance from bytes
///
/// # Safety
///
@ -219,7 +219,7 @@ impl_from_bytes!(
);
impl DeviceScope {
/// Create instance from bytes
/// Creates instance from bytes
///
/// # Safety
///
@ -245,7 +245,7 @@ impl DeviceScope {
}
impl Rhsa {
/// Create instance from bytes
/// Creates instance from bytes
///
/// # Safety
///
@ -259,7 +259,7 @@ impl Rhsa {
}
impl Andd {
/// Create instance from bytes
/// Creates instance from bytes
///
/// # Safety
///

View File

@ -34,7 +34,7 @@ pub struct IoApic {
impl IoApic {
const TABLE_REG_BASE: u8 = 0x10;
/// Enable an entry. The index should not exceed the `max_redirection_entry`
/// Enables an entry. The index should not exceed the `max_redirection_entry`
pub fn enable(&mut self, index: u8, irq: IrqLine) -> Result<()> {
if index >= self.max_redirection_entry() {
return Err(Error::InvalidArgs);
@ -50,7 +50,7 @@ impl IoApic {
Ok(())
}
/// Disable an entry. The index should not exceed the `max_redirection_entry`
/// Disables an entry. The index should not exceed the `max_redirection_entry`
pub fn disable(&mut self, index: u8) -> Result<()> {
if index >= self.max_redirection_entry() {
return Err(Error::InvalidArgs);

View File

@ -25,21 +25,21 @@ pub trait Apic: ApicTimer + Sync + Send {
}
pub trait ApicTimer: Sync + Send {
/// Set the initial timer count, the APIC timer will count down from this value.
/// Sets the initial timer count, the APIC timer will count down from this value.
fn set_timer_init_count(&mut self, value: u64);
/// Get the current count of the timer.
/// Gets the current count of the timer.
/// The interval can be expressed by the expression: `init_count` - `current_count`.
fn timer_current_count(&self) -> u64;
/// Set the timer register in the APIC.
/// Sets the timer register in the APIC.
/// Bit 0-7: The interrupt vector of timer interrupt.
/// Bit 12: Delivery Status, 0 for Idle, 1 for Send Pending.
/// Bit 16: Mask bit.
/// Bit 17-18: Timer Mode, 0 for One-shot, 1 for Periodic, 2 for TSC-Deadline.
fn set_lvt_timer(&mut self, value: u64);
/// Set timer divide config register.
/// Sets timer divide config register.
fn set_timer_div_config(&mut self, div_config: DivideConfig);
}

View File

@ -33,14 +33,14 @@ impl XApic {
})
}
/// Read a register from the MMIO region.
/// Reads a register from the MMIO region.
fn read(&self, offset: u32) -> u32 {
assert!(offset as usize % 4 == 0);
let index = offset as usize / 4;
unsafe { core::ptr::read_volatile(&self.mmio_region[index]) }
}
/// write a register in the MMIO region.
/// Writes a register in the MMIO region.
fn write(&mut self, offset: u32, val: u32) {
assert!(offset as usize % 4 == 0);
let index = offset as usize / 4;
@ -94,7 +94,7 @@ impl ApicTimer for XApic {
}
}
/// set APIC base address and enable it
/// Sets APIC base address and enables it
fn set_apic_base_address(address: usize) {
unsafe {
x86_64::registers::model_specific::Msr::new(IA32_APIC_BASE_MSR)
@ -102,7 +102,7 @@ fn set_apic_base_address(address: usize) {
}
}
/// get APIC base address
/// Gets APIC base address
fn get_apic_base_address() -> usize {
unsafe {
(x86_64::registers::model_specific::Msr::new(IA32_APIC_BASE_MSR).read() & 0xf_ffff_f000)

View File

@ -22,7 +22,7 @@ static MASK_MASTER: AtomicU8 = AtomicU8::new(0x00);
static MASK_SLAVE: AtomicU8 = AtomicU8::new(0x00);
static CHANGE_LOCK: AtomicBool = AtomicBool::new(false);
/// init the PIC device
/// Initializes the PIC device
pub fn init() {
if CHANGE_LOCK.load(Relaxed) {
return;
@ -36,7 +36,7 @@ pub fn init() {
set_mask(master_mask, slave_mask);
}
/// allocate irq, for example, if timer need IRQ0, it will return IrqAllocateHandle with irq num: IRQ_OFFSET+0
/// Allocates irq, for example, if timer need IRQ0, it will return IrqAllocateHandle with irq num: IRQ_OFFSET+0
pub fn allocate_irq(index: u8) -> Option<IrqLine> {
if index >= 16 {
return None;
@ -53,14 +53,14 @@ pub fn allocate_irq(index: u8) -> Option<IrqLine> {
}
}
/// enable the PIC device, this function will permanent enable all the interrupts
/// Enables the PIC device, this function will permanent enable all the interrupts
#[inline]
pub fn enable() {
CHANGE_LOCK.store(true, Relaxed);
set_mask(0, 0);
}
/// disable the PIC device, this function will permanent disable all the interrupts
/// Disables the PIC device, this function will permanent disable all the interrupts
/// the interrupts mask may not exists after calling init function
#[inline]
pub fn disable() {
@ -68,14 +68,14 @@ pub fn disable() {
set_mask(0xFF, 0xFF);
}
/// enable the PIC device, this function will allow all the interrupts
/// Enables the PIC device, this function will allow all the interrupts
/// the interrupts mask may not exists after calling init function
#[inline]
pub fn enable_temp() {
set_mask(0, 0);
}
/// disable the PIC device, this function will disable all the interrupts
/// Disables the PIC device, this function will disable all the interrupts
/// the interrupts mask may not exists after calling init function
#[inline]
pub fn disable_temp() {

View File

@ -29,7 +29,7 @@ pub fn init_tsc_freq() {
info!("TSC frequency:{:?} Hz", tsc_freq);
}
/// Determine TSC frequency via CPUID. If the CPU does not support calculating TSC frequency by
/// Determines TSC frequency via CPUID. If the CPU does not support calculating TSC frequency by
/// CPUID, the function will return None. The unit of the return value is KHz.
///
/// Ref: function `native_calibrate_tsc` in linux `arch/x86/kernel/tsc.c`

View File

@ -93,10 +93,10 @@ pub(crate) fn tlb_flush_all_including_global() {
#[repr(C)]
pub struct PageTableEntry(usize);
/// Activate the given level 4 page table.
/// Activates the given level 4 page table.
/// The cache policy of the root page table node is controlled by `root_pt_cache`.
///
/// ## Safety
/// # Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.

View File

@ -67,7 +67,7 @@ pub(crate) fn interrupts_ack() {
}
}
/// Return the frequency of TSC. The unit is Hz.
/// Returns the frequency of TSC. The unit is Hz.
pub fn tsc_freq() -> u64 {
kernel::tsc::TSC_FREQ.load(Ordering::Acquire)
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
//! Providing the ability to exit QEMU and return a value as debug result.
//! Provides the ability to exit QEMU and return a value as debug result.
/// The exit code of x86 QEMU isa debug device. In `qemu-system-x86_64` the
/// exit code will be `(code << 1) | 1`. So you could never let QEMU invoke
@ -16,7 +16,7 @@ pub enum QemuExitCode {
Failed = 0x20,
}
/// Exit QEMU with the given exit code.
/// Exits QEMU with the given exit code.
///
/// This function assumes that the kernel is run in QEMU with the following
/// QEMU command line arguments that specifies the ISA debug exit device:

View File

@ -22,6 +22,7 @@ impl TaskContext {
}
}
/// Callee-saved registers.
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct CalleeRegs {
@ -42,6 +43,7 @@ pub struct CalleeRegs {
}
impl CalleeRegs {
/// Creates new `CalleeRegs`
pub const fn new() -> Self {
CalleeRegs {
rsp: 0,

View File

@ -29,8 +29,8 @@ use crate::{
trap::IrqLine,
};
/// Init APIC with tsc deadline mode or periodic mode.
/// Return the corresponding `IrqLine` for the System Timer.
/// Initializes APIC with tsc deadline mode or periodic mode.
/// Return the corresponding [`IrqLine`] for the System Timer.
pub(super) fn init() -> IrqLine {
init_tsc_freq();
if is_tsc_deadline_mode_supported() {
@ -44,7 +44,7 @@ pub(super) fn init() -> IrqLine {
pub(super) static APIC_TIMER_CALLBACK: Once<Arc<dyn Fn() + Sync + Send>> = Once::new();
/// Determine if the current system supports tsc_deadline mode APIC timer
/// Determines if the current system supports tsc_deadline mode APIC timer
fn is_tsc_deadline_mode_supported() -> bool {
const TSC_DEADLINE_MODE_SUPPORT: u32 = 1 << 24;
let cpuid = cpuid!(1);

View File

@ -32,7 +32,7 @@ impl Jiffies {
self.0
}
/// Gets the `Duration` calculated from the jiffies counts.
/// Gets the [`Duration`] calculated from the jiffies counts.
pub fn as_duration(self) -> Duration {
Duration::from_millis(self.0 * 1000 / TIMER_FREQ)
}

View File

@ -52,7 +52,7 @@ cpu_local! {
static INTERRUPT_CALLBACKS: RefCell<Vec<Box<dyn Fn() + Sync + Send>>> = RefCell::new(Vec::new());
}
/// Register a function that will be executed during the system timer interruption.
/// Registers a function that will be executed during the system timer interruption.
pub fn register_callback<F>(func: F)
where
F: Fn() + Sync + Send + 'static,

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MPL-2.0
//! Handles trap.
use core::sync::atomic::{AtomicBool, Ordering};
use align_ext::AlignExt;

View File

@ -45,25 +45,25 @@ pub struct KCmdlineArg {
// Define get APIs.
impl KCmdlineArg {
/// Get the path of the initprocess.
/// Gets the path of the initprocess.
pub fn get_initproc_path(&self) -> Option<&str> {
self.initproc.path.as_deref()
}
/// Get the argument vector(argv) of the initprocess.
/// Gets the argument vector(argv) of the initprocess.
pub fn get_initproc_argv(&self) -> &Vec<CString> {
&self.initproc.argv
}
/// Get the environment vector(envp) of the initprocess.
/// Gets the environment vector(envp) of the initprocess.
pub fn get_initproc_envp(&self) -> &Vec<CString> {
&self.initproc.envp
}
/// Get the argument vector of a kernel module.
/// Gets the argument vector of a kernel module.
pub fn get_module_args(&self, module: &str) -> Option<&Vec<ModuleArg>> {
self.module_args.get(module)
}
}
// Split the command line string by spaces but preserve
// Splits the command line string by spaces but preserve
// ones that are protected by double quotes(`"`).
fn split_arg(input: &str) -> impl Iterator<Item = &str> {
let mut inside_quotes = false;

View File

@ -39,12 +39,12 @@ pub struct MemoryRegion {
}
impl MemoryRegion {
/// Construct a valid memory region.
/// Constructs a valid memory region.
pub fn new(base: usize, len: usize, typ: MemoryRegionType) -> Self {
MemoryRegion { base, len, typ }
}
/// Construct a memory region where kernel sections are loaded.
/// Constructs a memory region where kernel sections are loaded.
///
/// Most boot protocols do not mark the place where the kernel loads as unusable. In this case,
/// we need to explicitly construct and append this memory region.
@ -81,7 +81,7 @@ impl MemoryRegion {
self.typ
}
/// Remove range t from self, resulting in 0, 1 or 2 truncated ranges.
/// Removes range `t` from self, resulting in 0, 1 or 2 truncated ranges.
/// We need to have this method since memory regions can overlap.
pub fn truncate(&self, t: &MemoryRegion) -> Vec<MemoryRegion> {
if self.base < t.base {
@ -125,7 +125,7 @@ impl MemoryRegion {
}
}
/// Truncate regions, resulting in a set of regions that does not overlap.
/// Truncates regions, resulting in a set of regions that does not overlap.
///
/// The truncation will be done according to the type of the regions, that
/// usable and reclaimable regions will be truncated by the unusable regions.

View File

@ -108,7 +108,7 @@ pub fn init() {
call_all_boot_init_callbacks();
}
/// Call the framework-user defined entrypoint of the actual kernel.
/// Calls the framework-user defined entrypoint of the actual kernel.
///
/// Any kernel that uses the aster-frame crate should define a function named
/// `aster_main` as the entrypoint.

View File

@ -53,13 +53,17 @@ macro_rules! cpu_local {
/// CPU-local objects.
///
/// A CPU-local object only gives you immutable references to the underlying value.
/// To mutate the value, one can use atomic values (e.g., `AtomicU32`) or internally mutable
/// objects (e.g., `RefCell`).
/// To mutate the value, one can use atomic values (e.g., [`AtomicU32`]) or internally mutable
/// objects (e.g., [`RefCell`]).
///
/// The `CpuLocal<T: Sync>` can be used directly.
/// Otherwise, the `CpuLocal<T>` must be used through `CpuLocal::borrow_with`.
/// Otherwise, the `CpuLocal<T>` must be used through [`borrow_with`].
///
/// TODO: re-implement `CpuLocal`
///
/// [`AtomicU32`]: core::sync::atomic::AtomicU32
/// [`RefCell`]: core::cell::RefCell
/// [`borrow_with`]: CpuLocal::borrow_with
pub struct CpuLocal<T>(UnsafeCell<T>);
// SAFETY: At any given time, only one task can access the inner value T of a cpu-local variable.

View File

@ -8,6 +8,8 @@ use crate::early_println;
const LOGGER: Logger = Logger {};
/// The log level.
///
/// FIXME: The logs should be able to be read from files in the userspace,
/// and the log level should be configurable.
pub const INIT_LOG_LEVEL: Level = Level::Error;

View File

@ -38,14 +38,14 @@ struct DmaCoherentInner {
}
impl DmaCoherent {
/// Create a coherent DMA mapping backed by `vm_segment`.
/// Creates a coherent DMA mapping backed by `vm_segment`.
///
/// The `is_cache_coherent` argument specifies whether
/// the target device that the DMA mapping is prepared for
/// can access the main memory in a CPU cache coherent way
/// or not.
///
/// The method fails if any part of the given VM segment
/// The method fails if any part of the given `vm_segment`
/// already belongs to a DMA mapping.
pub fn map(vm_segment: Segment, is_cache_coherent: bool) -> Result<Self, DmaError> {
let frame_count = vm_segment.nframes();

View File

@ -36,8 +36,8 @@ struct DmaStreamInner {
direction: DmaDirection,
}
/// `DmaDirection` limits the data flow direction of `DmaStream` and
/// prevents users from reading and writing to `DmaStream` unexpectedly.
/// `DmaDirection` limits the data flow direction of [`DmaStream`] and
/// prevents users from reading and writing to [`DmaStream`] unexpectedly.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum DmaDirection {
/// Data flows to the device
@ -101,7 +101,7 @@ impl DmaStream {
})
}
/// Get the underlying [`VmSegment`].
/// Gets the underlying [`Segment`].
///
/// Usually, the CPU side should not access the memory
/// after the DMA mapping is established because
@ -121,7 +121,7 @@ impl DmaStream {
self.inner.vm_segment.nbytes()
}
/// Synchronize the streaming DMA mapping with the device.
/// Synchronizes the streaming DMA mapping with the device.
///
/// This method should be called under one of the two conditions:
/// 1. The data of the stream DMA mapping has been updated by the device side.
@ -189,7 +189,7 @@ impl Drop for DmaStreamInner {
}
impl VmIo for DmaStream {
/// Read data into the buffer.
/// Reads data into the buffer.
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<(), Error> {
if self.inner.direction == DmaDirection::ToDevice {
return Err(Error::AccessDenied);
@ -197,7 +197,7 @@ impl VmIo for DmaStream {
self.inner.vm_segment.read_bytes(offset, buf)
}
/// Write data from the buffer.
/// Writes data from the buffer.
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<(), Error> {
if self.inner.direction == DmaDirection::FromDevice {
return Err(Error::AccessDenied);
@ -239,9 +239,9 @@ pub struct DmaStreamSlice<'a> {
}
impl<'a> DmaStreamSlice<'a> {
/// Constructs a `DmaStreamSlice` from the `DmaStream`.
/// Constructs a `DmaStreamSlice` from the [`DmaStream`].
///
/// # Panic
/// # Panics
///
/// If the `offset` is greater than or equal to the length of the stream,
/// this method will panic.

View File

@ -13,6 +13,8 @@ use spin::Once;
use super::Paddr;
use crate::{arch::iommu::has_iommu, mm::PAGE_SIZE, sync::SpinLock};
/// The devide address.
///
/// If a device performs DMA to read or write system
/// memory, the addresses used by the device are device addresses.
/// Daddr can distinguish the address space used by cpu side and
@ -31,9 +33,9 @@ pub enum DmaError {
AlreadyMapped,
}
/// Has mapped address in the device address space.
/// A trait for types that have mapped address in the device address space.
pub trait HasDaddr {
/// Get the base address of the mapping in the
/// Gets the base address of the mapping in the
/// device address space.
fn daddr(&self) -> Daddr;
}
@ -58,7 +60,7 @@ pub fn init() {
DMA_MAPPING_SET.call_once(|| SpinLock::new(BTreeSet::new()));
}
/// Check whether the physical addresses has dma mapping.
/// Checks whether the physical addresses has dma mapping.
/// Fail if they have been mapped, otherwise insert them.
fn check_and_insert_dma_mapping(start_paddr: Paddr, num_pages: usize) -> bool {
let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled();
@ -77,7 +79,7 @@ fn check_and_insert_dma_mapping(start_paddr: Paddr, num_pages: usize) -> bool {
true
}
/// Remove a physical address from the dma mapping set.
/// Removes a physical address from the dma mapping set.
fn remove_dma_mapping(start_paddr: Paddr, num_pages: usize) {
let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled();
// Ensure that the addresses used later will not overflow

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MPL-2.0
//! Page frames.
use alloc::{vec, vec::Vec};
use crate::{
@ -10,7 +12,7 @@ use crate::{
/// A collection of base page frames (regular physical memory pages).
///
/// For the most parts, `FrameVec` is like `Vec<Frame>`. But the
/// implementation may or may not be based on `Vec`. Having a dedicated
/// implementation may or may not be based on [`Vec`]. Having a dedicated
/// type to represent a series of page frames is convenient because,
/// more often than not, one needs to operate on a batch of frames rather
/// a single frame.
@ -18,25 +20,27 @@ use crate::{
pub struct FrameVec(pub(crate) Vec<Frame>);
impl FrameVec {
/// Retrieves a reference to a [`Frame`] at the specified index.
pub fn get(&self, index: usize) -> Option<&Frame> {
self.0.get(index)
}
/// returns an empty Frame vec
/// Creates an empty `FrameVec`.
pub fn empty() -> Self {
Self(Vec::new())
}
/// Creates a new `FrameVec` with the specified capacity.
pub fn new_with_capacity(capacity: usize) -> Self {
Self(Vec::with_capacity(capacity))
}
/// Pushs a new frame to the collection.
/// Pushes a new frame to the collection.
pub fn push(&mut self, new_frame: Frame) {
self.0.push(new_frame);
}
/// Pop a frame from the collection.
/// Pops a frame from the collection.
pub fn pop(&mut self) -> Option<Frame> {
self.0.pop()
}
@ -46,13 +50,14 @@ impl FrameVec {
self.0.remove(at)
}
/// Append some frames.
/// Appends all the [`Frame`]s from `more` to the end of this collection.
/// and clears the frames in `more`.
pub fn append(&mut self, more: &mut FrameVec) -> Result<()> {
self.0.append(&mut more.0);
Ok(())
}
/// Truncate some frames.
/// Truncates the `FrameVec` to the specified length.
///
/// If `new_len >= self.len()`, then this method has no effect.
pub fn truncate(&mut self, new_len: usize) {
@ -62,7 +67,7 @@ impl FrameVec {
self.0.truncate(new_len)
}
/// Returns an iterator
/// Returns an iterator over all frames.
pub fn iter(&self) -> core::slice::Iter<'_, Frame> {
self.0.iter()
}
@ -84,6 +89,7 @@ impl FrameVec {
self.0.len() * PAGE_SIZE
}
/// Creates a new `FrameVec` from a single [`Frame`].
pub fn from_one_frame(frame: Frame) -> Self {
Self(vec![frame])
}
@ -148,6 +154,7 @@ pub struct FrameVecIter<'a> {
}
impl<'a> FrameVecIter<'a> {
/// Creates a new `FrameVecIter` from the given [`FrameVec`].
pub fn new(frames: &'a FrameVec) -> Self {
Self { frames, current: 0 }
}

View File

@ -2,8 +2,8 @@
//! Untyped physical memory management.
//!
//! A frame is a special page (defined in [`super::page`]) that is _untyped_
//! memory. It is used to store data irrelevant to the integrity of the kernel.
//! A frame is a special page that is _untyped_ memory.
//! It is used to store data irrelevant to the integrity of the kernel.
//! All pages mapped to the virtual address space of the users are backed by
//! frames. Frames, with all the properties of pages, can additionally be safely
//! read and written by the kernel or the user.
@ -56,11 +56,12 @@ impl Frame {
self.page.paddr()
}
/// Returns the end physical address of the page frame.
pub fn end_paddr(&self) -> Paddr {
self.start_paddr() + PAGE_SIZE
}
/// Get the paging level of the frame.
/// Gets the paging level of the frame.
///
/// This is the level of the page table entry that maps the frame,
/// which determines the size of the frame.
@ -71,18 +72,22 @@ impl Frame {
1
}
pub fn size(&self) -> usize {
/// Returns the size of the frame
pub const fn size(&self) -> usize {
PAGE_SIZE
}
/// Returns a raw pointer to the starting virtual address of the frame.
pub fn as_ptr(&self) -> *const u8 {
paddr_to_vaddr(self.start_paddr()) as *const u8
}
/// Returns a mutable raw pointer to the starting virtual address of the frame.
pub fn as_mut_ptr(&self) -> *mut u8 {
paddr_to_vaddr(self.start_paddr()) as *mut u8
}
/// Copies the content of `src` to the frame.
pub fn copy_from(&self, src: &Frame) {
if self.paddr() == src.paddr() {
return;

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MPL-2.0
//! Options for allocating frames
use super::{Frame, FrameVec, Segment};
use crate::{mm::page::allocator, prelude::*, Error};
@ -45,7 +47,7 @@ impl FrameAllocOptions {
self
}
/// Allocate a collection of page frames according to the given options.
/// Allocates a collection of page frames according to the given options.
pub fn alloc(&self) -> Result<FrameVec> {
let frames = if self.is_contiguous {
allocator::alloc(self.nframes).ok_or(Error::NoMemory)?
@ -65,7 +67,7 @@ impl FrameAllocOptions {
Ok(frames)
}
/// Allocate a single page frame according to the given options.
/// Allocates a single page frame according to the given options.
pub fn alloc_single(&self) -> Result<Frame> {
if self.nframes != 1 {
return Err(Error::InvalidArgs);
@ -79,9 +81,9 @@ impl FrameAllocOptions {
Ok(frame)
}
/// Allocate a contiguous range of page frames according to the given options.
/// Allocates a contiguous range of page frames according to the given options.
///
/// The returned `Segment` contains at least one page frame.
/// The returned [`Segment`] contains at least one page frame.
pub fn alloc_contiguous(&self) -> Result<Segment> {
// It's no use to checking `self.is_contiguous` here.
if self.nframes == 0 {

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MPL-2.0
//! A contiguous range of page frames.
use alloc::sync::Arc;
use core::ops::Range;
@ -14,13 +16,15 @@ use crate::{
/// A handle to a contiguous range of page frames (physical memory pages).
///
/// The biggest difference between `Segment` and `FrameVec` is that
/// The biggest difference between `Segment` and [`FrameVec`] is that
/// the page frames must be contiguous for `Segment`.
///
/// A cloned `Segment` refers to the same page frames as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other.
///
/// [`FrameVec`]: crate::mm::FrameVec
///
/// #Example
///
/// ```rust
@ -35,7 +39,7 @@ pub struct Segment {
range: Range<usize>,
}
/// This behaves like a `[Frame]` that owns a list of frame handles.
/// This behaves like a [`Frame`] that owns a list of frame handles.
///
/// The ownership is acheived by the reference counting mechanism of
/// frames. When constructing a `SegmentInner`, the frame handles are
@ -71,7 +75,7 @@ impl Segment {
///
/// The given range of page frames must be contiguous and valid for use.
/// The given range of page frames must not have been allocated before,
/// as part of either a `Frame` or `Segment`.
/// as part of either a [`Frame`] or `Segment`.
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
for i in 0..nframes {
let pa_i = paddr + i * PAGE_SIZE;
@ -89,7 +93,7 @@ impl Segment {
/// Returns a part of the `Segment`.
///
/// # Panic
/// # Panics
///
/// If `range` is not within the range of this `Segment`,
/// then the method panics.
@ -128,10 +132,12 @@ impl Segment {
self.inner.start / PAGE_SIZE + self.range.start
}
/// Returns a raw pointer to the starting virtual address of the `Segment`.
pub fn as_ptr(&self) -> *const u8 {
super::paddr_to_vaddr(self.start_paddr()) as *const u8
}
/// Returns a mutable raw pointer to the starting virtual address of the `Segment`.
pub fn as_mut_ptr(&self) -> *mut u8 {
super::paddr_to_vaddr(self.start_paddr()) as *mut u8
}

View File

@ -11,7 +11,7 @@ use pod::Pod;
use crate::prelude::*;
/// A trait that enables reading/writing data from/to a VM object,
/// e.g., `VmSpace`, `FrameVec`, and `Frame`.
/// e.g., [`VmSpace`], [`FrameVec`], and [`Frame`].
///
/// # Concurrency
///
@ -19,8 +19,12 @@ use crate::prelude::*;
/// threads. In this case, if the results of concurrent reads or writes
/// desire predictability or atomicity, the users should add extra mechanism
/// for such properties.
///
/// [`VmSpace`]: crate::mm::VmSpace
/// [`FrameVec`]: crate::mm::FrameVec
/// [`Frame`]: crate::mm::Frame
pub trait VmIo: Send + Sync {
/// Read a specified number of bytes at a specified offset into a given buffer.
/// Reads a specified number of bytes at a specified offset into a given buffer.
///
/// # No short reads
///
@ -29,24 +33,26 @@ pub trait VmIo: Send + Sync {
/// available, then the method shall return an error.
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()>;
/// Read a value of a specified type at a specified offset.
/// Reads a value of a specified type at a specified offset.
fn read_val<T: Pod>(&self, offset: usize) -> Result<T> {
let mut val = T::new_uninit();
self.read_bytes(offset, val.as_bytes_mut())?;
Ok(val)
}
/// Read a slice of a specified type at a specified offset.
/// Reads a slice of a specified type at a specified offset.
///
/// # No short reads
///
/// Similar to `read_bytes`.
/// Similar to [`read_bytes`].
///
/// [`read_bytes`]: VmIo::read_bytes
fn read_slice<T: Pod>(&self, offset: usize, slice: &mut [T]) -> Result<()> {
let buf = unsafe { core::mem::transmute(slice) };
self.read_bytes(offset, buf)
}
/// Write a specified number of bytes from a given buffer at a specified offset.
/// Writes a specified number of bytes from a given buffer at a specified offset.
///
/// # No short writes
///
@ -55,23 +61,25 @@ pub trait VmIo: Send + Sync {
/// then the method shall return an error.
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()>;
/// Write a value of a specified type at a specified offset.
/// Writes a value of a specified type at a specified offset.
fn write_val<T: Pod>(&self, offset: usize, new_val: &T) -> Result<()> {
self.write_bytes(offset, new_val.as_bytes())?;
Ok(())
}
/// Write a slice of a specified type at a specified offset.
/// Writes a slice of a specified type at a specified offset.
///
/// # No short write
///
/// Similar to `write_bytes`.
/// Similar to [`write_bytes`].
///
/// [`write_bytes`]: VmIo::write_bytes
fn write_slice<T: Pod>(&self, offset: usize, slice: &[T]) -> Result<()> {
let buf = unsafe { core::mem::transmute(slice) };
self.write_bytes(offset, buf)
}
/// Write a sequence of values given by an iterator (`iter`) from the specified offset (`offset`).
/// Writes a sequence of values given by an iterator (`iter`) from the specified offset (`offset`).
///
/// The write process stops until the VM object does not have enough remaining space
/// or the iterator returns `None`. If any value is written, the function returns `Ok(nr_written)`,
@ -83,15 +91,15 @@ pub trait VmIo: Send + Sync {
///
/// # Example
///
/// Initializing an VM object with the same value can be done easily with `write_values`.
/// Initializes an VM object with the same value can be done easily with `write_values`.
///
/// ```
/// use core::iter::self;
///
/// let _nr_values = vm_obj.write_values(0, iter::repeat(0_u32), 0).unwrap();
/// let _nr_values = vm_obj.write_vals(0, iter::repeat(0_u32), 0).unwrap();
/// ```
///
/// # Panic
/// # Panics
///
/// This method panics if `align` is greater than two,
/// but not a power of two, in release mode.

View File

@ -98,7 +98,7 @@ pub fn paddr_to_vaddr(pa: Paddr) -> usize {
pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PagingConsts>> =
Once::new();
/// Initialize the kernel page table.
/// Initializes the kernel page table.
///
/// This function should be called after:
/// - the page allocator and the heap allocator are initialized;
@ -115,7 +115,7 @@ pub fn init_kernel_page_table(
let regions = crate::boot::memory_regions();
let phys_mem_cap = regions.iter().map(|r| r.base() + r.len()).max().unwrap();
// Starting to initialize the kernel page table.
// Start to initialize the kernel page table.
let kpt = PageTable::<KernelMode>::empty();
// Make shared the page tables mapped by the root table in the kernel space.

View File

@ -71,6 +71,7 @@ pub(crate) trait PagingConstsTrait: Clone + Debug + Default + Sync + 'static {
const ADDRESS_WIDTH: usize;
}
/// The page size
pub const PAGE_SIZE: usize = page_size::<PagingConsts>(1);
/// The page size at a given level.
@ -105,7 +106,7 @@ pub const MAX_USERSPACE_VADDR: Vaddr = 0x0000_8000_0000_0000 - PAGE_SIZE;
/// architectures.
pub(crate) const KERNEL_VADDR_RANGE: Range<Vaddr> = 0xffff_8000_0000_0000..0xffff_ffff_ffff_0000;
/// Get physical address trait
/// Gets physical address trait
pub trait HasPaddr {
/// Returns the physical address.
fn paddr(&self) -> Paddr;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
/// Get the offset of a field within a type as a pointer.
/// Gets the offset of a field within a type as a pointer.
///
/// ```rust
/// #[repr(C)]
@ -36,7 +36,7 @@ macro_rules! offset_of {
});
}
/// Get the offset of a field within an object as a pointer.
/// Gets the offset of a field within an object as a pointer.
///
/// ```rust
/// #[repr(C)]

View File

@ -65,7 +65,7 @@ pub(crate) fn alloc_contiguous(nframes: usize) -> Option<Segment> {
})
}
/// Deallocate a contiguous range of page frames.
/// Deallocates a contiguous range of page frames.
///
/// # Safety
///

View File

@ -23,14 +23,14 @@ pub mod mapping {
use super::MetaSlot;
use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE};
/// Convert a physical address of a base page to the virtual address of the metadata slot.
/// Converts a physical address of a base page to the virtual address of the metadata slot.
pub const fn page_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr {
let base = FRAME_METADATA_RANGE.start;
let offset = paddr / PAGE_SIZE;
base + offset * size_of::<MetaSlot>()
}
/// Convert a virtual address of the metadata slot to the physical address of the page.
/// Converts a virtual address of the metadata slot to the physical address of the page.
pub const fn meta_to_page<C: PagingConstsTrait>(vaddr: Vaddr) -> Paddr {
let base = FRAME_METADATA_RANGE.start;
let offset = (vaddr - base) / size_of::<MetaSlot>();
@ -188,7 +188,7 @@ impl PageMeta for KernelMeta {
// ======== End of all the specific metadata structures definitions ===========
/// Initialize the metadata of all physical pages.
/// Initializes the metadata of all physical pages.
///
/// The function returns a list of `Page`s containing the metadata.
pub(crate) fn init(boot_pt: &mut BootPageTable) -> Vec<Range<Paddr>> {

View File

@ -17,7 +17,7 @@ pub struct PageProperty {
}
impl PageProperty {
/// Create a new `PageProperty` with the given flags and cache policy for the user.
/// Creates a new `PageProperty` with the given flags and cache policy for the user.
pub fn new(flags: PageFlags, cache: CachePolicy) -> Self {
Self {
flags,
@ -25,7 +25,7 @@ impl PageProperty {
priv_flags: PrivilegedPageFlags::USER,
}
}
/// Create a page property that implies an invalid page without mappings.
/// Creates a page property that implies an invalid page without mappings.
pub fn new_absent() -> Self {
Self {
flags: PageFlags::empty(),

View File

@ -33,7 +33,7 @@ pub struct BootPageTable<
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
/// Create a new boot page table from the current page table root physical address.
/// Creates a new boot page table from the current page table root physical address.
///
/// The caller must ensure that the current page table may be set up by the firmware,
/// loader or the setup code.
@ -46,7 +46,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
}
}
/// Map a base page to a frame.
/// Maps a base page to a frame.
/// This function will panic if the page is already mapped.
pub fn map_base_page(&mut self, from: Vaddr, to: FrameNumber, prop: PageProperty) {
let mut pt = self.root_pt;
@ -86,7 +86,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
frame
}
/// Retire this boot-stage page table.
/// Retires this boot-stage page table.
///
/// Do not drop a boot-stage page table. Instead, retire it.
///

View File

@ -108,7 +108,7 @@ impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Cursor<
where
[(); C::NR_LEVELS as usize]:,
{
/// Create a cursor exclusively owning the locks for the given range.
/// Creates a cursor exclusively owning the locks for the given range.
///
/// The cursor created will only be able to map, query or jump within the
/// given range.
@ -160,7 +160,7 @@ where
Ok(cursor)
}
/// Get the information of the current slot.
/// Gets the information of the current slot.
pub(crate) fn query(&mut self) -> Option<PageTableQueryResult> {
if self.va >= self.barrier_va.end {
return None;
@ -202,7 +202,7 @@ where
}
}
/// Traverse forward in the current level to the next PTE.
/// Traverses forward in the current level to the next PTE.
///
/// If reached the end of a page table node, it leads itself up to the next frame of the parent
/// frame if possible.
@ -215,7 +215,7 @@ where
self.va = next_va;
}
/// Go up a level. We release the current frame if it has no mappings since the cursor only moves
/// Goes up a level. We release the current frame if it has no mappings since the cursor only moves
/// forward. And if needed we will do the final cleanup using this method after re-walk when the
/// cursor is dropped.
///
@ -237,7 +237,7 @@ where
}
}
/// Go down a level assuming a child page table exists.
/// Goes down a level assuming a child page table exists.
fn level_down(&mut self) {
debug_assert!(self.level > 1);
let idx = pte_index::<C>(self.va, self.level);
@ -268,7 +268,7 @@ where
self.cur_node().read_pte(self.cur_idx())
}
/// Tell if the current virtual range must contain untracked mappings.
/// Tells if the current virtual range must contain untracked mappings.
///
/// In the kernel mode, this is aligned with the definition in [`crate::mm::kspace`].
/// Only linear mappings in the kernel are considered as untracked mappings.
@ -354,7 +354,7 @@ where
Cursor::new(pt, va).map(|inner| Self(inner))
}
/// Get the information of the current slot and go to the next slot.
/// Gets the information of the current slot and go to the next slot.
///
/// We choose not to implement `Iterator` or `IterMut` for [`CursorMut`]
/// because the mutable cursor is indeed not an iterator.
@ -362,9 +362,11 @@ where
self.0.next()
}
/// Jump to the given virtual address.
/// Jumps to the given virtual address.
///
/// It panics if the address is out of the range where the cursor is required to operate,
/// # Panics
///
/// This method panics if the address is out of the range where the cursor is required to operate,
/// or has bad alignment.
pub(crate) fn jump(&mut self, va: Vaddr) {
assert!(self.0.barrier_va.contains(&va));
@ -388,9 +390,9 @@ where
}
}
/// Map the range starting from the current address to a `Frame`.
/// Maps the range starting from the current address to a [`Frame`].
///
/// # Panic
/// # Panics
///
/// This function will panic if
/// - the virtual address range to be mapped is out of the range;
@ -428,7 +430,7 @@ where
self.0.move_forward();
}
/// Map the range starting from the current address to a physical address range.
/// Maps the range starting from the current address to a physical address range.
///
/// The function will map as more huge pages as possible, and it will split
/// the huge pages into smaller pages if necessary. If the input range is
@ -444,7 +446,7 @@ where
///
/// In practice it is not suggested to use this method for safety and conciseness.
///
/// # Panic
/// # Panics
///
/// This function will panic if
/// - the virtual address range to be mapped is out of the range.
@ -491,13 +493,13 @@ where
}
}
/// Unmap the range starting from the current address with the given length of virtual address.
/// Unmaps the range starting from the current address with the given length of virtual address.
///
/// # Safety
///
/// The caller should ensure that the range being unmapped does not affect kernel's memory safety.
///
/// # Panic
/// # Panics
///
/// This function will panic if:
/// - the range to be unmapped is out of the range where the cursor is required to operate;
@ -544,7 +546,7 @@ where
}
}
/// Apply the given operation to all the mappings within the range.
/// Applies the given operation to all the mappings within the range.
///
/// The funtction will return an error if it is not allowed to protect an invalid range and
/// it does so, or if the range to be protected only covers a part of a page.
@ -553,7 +555,7 @@ where
///
/// The caller should ensure that the range being protected does not affect kernel's memory safety.
///
/// # Panic
/// # Panics
///
/// This function will panic if:
/// - the range to be protected is out of the range where the cursor is required to operate.
@ -599,7 +601,7 @@ where
Ok(())
}
/// Consume itself and leak the root guard for the caller if it locked the root level.
/// Consumes itself and leak the root guard for the caller if it locked the root level.
///
/// It is useful when the caller wants to keep the root guard while the cursor should be dropped.
pub(super) fn leak_root_guard(mut self) -> Option<PageTableNode<E, C>> {
@ -614,7 +616,7 @@ where
// level is the root level when running the dropping method.
}
/// Go down a level assuming the current slot is absent.
/// Goes down a level assuming the current slot is absent.
///
/// This method will create a new child frame and go down to it.
fn level_down_create(&mut self) {
@ -628,7 +630,7 @@ where
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_frame);
}
/// Go down a level assuming the current slot is an untracked huge page.
/// Goes down a level assuming the current slot is an untracked huge page.
///
/// This method will split the huge page and go down to the next level.
fn level_down_split(&mut self) {

View File

@ -69,7 +69,7 @@ where
self.raw
}
/// Convert a raw handle to an accessible handle by pertaining the lock.
/// Converts a raw handle to an accessible handle by pertaining the lock.
pub(super) fn lock(self) -> PageTableNode<E, C> {
// SAFETY: The physical address in the raw handle is valid and we are
// transferring the ownership to a new handle. No increment of the reference
@ -90,7 +90,7 @@ where
PageTableNode::<E, C> { page }
}
/// Create a copy of the handle.
/// Creates a copy of the handle.
pub(super) fn clone_shallow(&self) -> Self {
self.inc_ref();
Self {
@ -109,7 +109,7 @@ where
nr
}
/// Activate the page table assuming it is a root page table.
/// Activates the page table assuming it is a root page table.
///
/// Here we ensure not dropping an active page table by making a
/// processor a page table owner. When activating a page table, the
@ -148,7 +148,7 @@ where
});
}
/// Activate the (root) page table assuming it is the first activation.
/// Activates the (root) page table assuming it is the first activation.
///
/// It will not try dropping the last activate page table. It is the same
/// with [`Self::activate()`] in other senses.
@ -213,7 +213,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
/// Allocate a new empty page table node.
/// Allocates a new empty page table node.
///
/// This function returns an owning handle. The newly created handle does not
/// set the lock bit for performance as it is exclusive and unlocking is an
@ -241,7 +241,7 @@ where
self.page.meta().level
}
/// Convert the handle into a raw handle to be stored in a PTE or CPU.
/// Converts the handle into a raw handle to be stored in a PTE or CPU.
pub(super) fn into_raw(self) -> RawPageTableNode<E, C> {
let level = self.level();
let raw = self.page.paddr();
@ -254,7 +254,7 @@ where
}
}
/// Get a raw handle while still preserving the original handle.
/// Gets a raw handle while still preserving the original handle.
pub(super) fn clone_raw(&self) -> RawPageTableNode<E, C> {
core::mem::forget(self.page.clone());
RawPageTableNode {
@ -264,7 +264,7 @@ where
}
}
/// Get an extra reference of the child at the given index.
/// Gets an extra reference of the child at the given index.
pub(super) fn child(&self, idx: usize, tracked: bool) -> Child<E, C> {
debug_assert!(idx < nr_subpage_per_huge::<C>());
let pte = self.read_pte(idx);
@ -298,7 +298,7 @@ where
}
}
/// Make a copy of the page table node.
/// Makes a copy of the page table node.
///
/// This function allows you to control about the way to copy the children.
/// For indexes in `deep`, the children are deep copied and this function will be recursively called.
@ -347,13 +347,13 @@ where
new_frame
}
/// Remove a child if the child at the given index is present.
/// Removes a child if the child at the given index is present.
pub(super) fn unset_child(&mut self, idx: usize, in_untracked_range: bool) {
debug_assert!(idx < nr_subpage_per_huge::<C>());
self.overwrite_pte(idx, None, in_untracked_range);
}
/// Set a child page table at a given index.
/// Sets a child page table at a given index.
pub(super) fn set_child_pt(
&mut self,
idx: usize,
@ -380,7 +380,7 @@ where
let _ = ManuallyDrop::new(frame);
}
/// Set an untracked child frame at a given index.
/// Sets an untracked child frame at a given index.
///
/// # Safety
///
@ -398,12 +398,12 @@ where
self.page.meta().nr_children
}
/// Read the info from a page table entry at a given index.
/// Reads the info from a page table entry at a given index.
pub(super) fn read_pte_prop(&self, idx: usize) -> PageProperty {
self.read_pte(idx).prop()
}
/// Split the untracked huge page mapped at `idx` to smaller pages.
/// Splits the untracked huge page mapped at `idx` to smaller pages.
pub(super) fn split_untracked_huge(&mut self, idx: usize) {
// These should be ensured by the cursor.
debug_assert!(idx < nr_subpage_per_huge::<C>());
@ -423,7 +423,7 @@ where
self.set_child_pt(idx, new_frame.into_raw(), true);
}
/// Protect an already mapped child at a given index.
/// Protects an already mapped child at a given index.
pub(super) fn protect(&mut self, idx: usize, prop: PageProperty) {
let mut pte = self.read_pte(idx);
debug_assert!(pte.is_present()); // This should be ensured by the cursor.
@ -445,7 +445,7 @@ where
self.page.paddr()
}
/// Replace a page table entry at a given index.
/// Replaces a page table entry at a given index.
///
/// This method will ensure that the child presented by the overwritten
/// PTE is dropped, and the child count is updated.

View File

@ -31,7 +31,7 @@ use crate::{
///
/// A newly-created `VmSpace` is not backed by any physical memory pages.
/// To provide memory pages for a `VmSpace`, one can allocate and map
/// physical memory (`Frame`s) to the `VmSpace`.
/// physical memory ([`Frame`]s) to the `VmSpace`.
#[derive(Debug)]
pub struct VmSpace {
pt: PageTable<UserMode>,
@ -54,7 +54,7 @@ impl VmSpace {
}
}
/// Activate the page table.
/// Activates the page table.
pub(crate) fn activate(&self) {
self.pt.activate();
}
@ -64,7 +64,7 @@ impl VmSpace {
///
/// The ownership of the frames will be transferred to the `VmSpace`.
///
/// For more information, see `VmMapOptions`.
/// For more information, see [`VmMapOptions`].
pub fn map(&self, frames: FrameVec, options: &VmMapOptions) -> Result<Vaddr> {
if options.addr.is_none() {
return Err(Error::InvalidArgs);
@ -115,7 +115,7 @@ impl VmSpace {
Ok(addr)
}
/// Query about a range of virtual memory.
/// Queries about a range of virtual memory.
/// You will get a iterator of `VmQueryResult` which contains the information of
/// each parts of the range.
pub fn query_range(&self, range: &Range<Vaddr>) -> Result<VmQueryIter> {
@ -124,7 +124,7 @@ impl VmSpace {
})
}
/// Query about the mapping information about a byte in virtual memory.
/// Queries about the mapping information about a byte in virtual memory.
/// This is more handy than [`query_range`], but less efficient if you want
/// to query in a batch.
///
@ -157,7 +157,7 @@ impl VmSpace {
Ok(())
}
/// clear all mappings
/// Clears all mappings
pub fn clear(&self) {
// SAFETY: unmapping user space is safe, and we don't care unmapping
// invalid ranges.
@ -167,7 +167,7 @@ impl VmSpace {
tlb_flush_all_excluding_global();
}
/// Update the VM protection permissions within the VM address range.
/// Updates the VM protection permissions within the VM address range.
///
/// If any of the page in the given range is not mapped, it is skipped.
/// The method panics when virtual address is not aligned to base page
@ -196,7 +196,7 @@ impl VmSpace {
Ok(())
}
/// To fork a new VM space with copy-on-write semantics.
/// Forks a new VM space with copy-on-write semantics.
///
/// Both the parent and the newly forked VM space will be marked as
/// read-only. And both the VM space will take handles to the same
@ -217,16 +217,16 @@ impl Default for VmSpace {
}
/// Options for mapping physical memory pages into a VM address space.
/// See `VmSpace::map`.
/// See [`VmSpace::map`].
#[derive(Clone, Debug)]
pub struct VmMapOptions {
/// start virtual address
/// Starting virtual address
addr: Option<Vaddr>,
/// map align
/// Map align
align: usize,
/// page permissions and status
/// Page permissions and status
flags: PageFlags,
/// can overwrite
/// Can overwrite
can_overwrite: bool,
}

View File

@ -2,7 +2,9 @@
//! The prelude.
/// A specialized [`core::result::Result``] type for this crate.
/// A specialized [`Result`] type for this crate.
///
/// [`Result`]: core::result::Result
pub type Result<T> = core::result::Result<T, crate::error::Error>;
pub(crate) use alloc::{boxed::Box, sync::Arc, vec::Vec};

View File

@ -18,12 +18,12 @@ pub struct AtomicBits {
}
impl AtomicBits {
/// Create a given number of bit 0s.
/// Creates a given number of bit 0s.
pub fn new_zeroes(num_bits: usize) -> Self {
Self::new(0, num_bits)
}
/// Create a given number of bit 1s.
/// Creates a given number of bit 1s.
pub fn new_ones(num_bits: usize) -> Self {
Self::new(!0, num_bits)
}
@ -45,7 +45,7 @@ impl AtomicBits {
self.num_bits
}
/// Get the bit at a given position.
/// Gets the bit at a given position.
pub fn get(&self, index: usize) -> bool {
assert!(index < self.num_bits);
let i = index / 64;
@ -55,7 +55,7 @@ impl AtomicBits {
(u64_atomic.load(Relaxed) & 1 << j) != 0
}
/// Set the bit at a given position.
/// Sets the bit at a given position.
pub fn set(&self, index: usize, new_bit: bool) {
assert!(index < self.num_bits);
let i = index / 64;
@ -69,7 +69,7 @@ impl AtomicBits {
}
}
/// Clear all the bits.
/// Clears all the bits.
pub fn clear(&self) {
todo!()
}
@ -88,17 +88,17 @@ impl AtomicBits {
todo!()
}
/// Get an iterator for the bits.
/// Gets an iterator for the bits.
pub fn iter(&self) -> Iter<'_> {
Iter::new(self)
}
/// Get an iterator that gives the positions of all 1s in the bits.
/// Gets an iterator that gives the positions of all 1s in the bits.
pub fn iter_ones(&self) -> OnesIter<'_> {
OnesIter::new(self)
}
/// Get an iterator that gives the positions of all 0s in the bits.
/// Gets an iterator that gives the positions of all 0s in the bits.
pub fn iter_zeroes(&self) -> ZeroesIter<'_> {
ZeroesIter::new(self)
}
@ -130,7 +130,7 @@ impl<'a> Iterator for Iter<'a> {
}
}
/// An iterator that returns the positions of 1s in an `AtomicBits`.
/// An iterator that returns the positions of 1s in an [`AtomicBits`].
pub struct OnesIter<'a> {
bits: &'a AtomicBits,
u64_idx: usize,
@ -157,7 +157,7 @@ impl<'a> OnesIter<'a> {
new_self
}
/// Get the u64 value at the given position, removing the garbage bits if any.
/// Gets the u64 value at the given position, removing the garbage bits if any.
fn get_u64_val(&self, idx: usize) -> u64 {
let mut u64_val = self.bits.u64s[idx].load(Relaxed);
// Clear the garbage bits, if any, in the last u64 so that they
@ -195,7 +195,7 @@ impl<'a> Iterator for OnesIter<'a> {
}
}
/// An iterator that returns the positions of 0s in an `AtomicBits`.
/// An iterator that returns the positions of 0s in an [`AtomicBits`].
pub struct ZeroesIter<'a> {
bits: &'a AtomicBits,
u64_idx: usize,
@ -222,7 +222,7 @@ impl<'a> ZeroesIter<'a> {
new_self
}
/// Get the u64 value at the given position, removing the garbage bits if any.
/// Gets the u64 value at the given position, removing the garbage bits if any.
fn get_u64_val(&self, idx: usize) -> u64 {
let mut u64_val = self.bits.u64s[idx].load(Relaxed);
// Set all garbage bits, if any, in the last u64 so that they

View File

@ -18,7 +18,7 @@ pub struct Mutex<T: ?Sized> {
}
impl<T> Mutex<T> {
/// Create a new mutex.
/// Creates a new mutex.
pub const fn new(val: T) -> Self {
Self {
lock: AtomicBool::new(false),
@ -29,39 +29,43 @@ impl<T> Mutex<T> {
}
impl<T: ?Sized> Mutex<T> {
/// Acquire the mutex.
/// Acquires the mutex.
///
/// This method runs in a block way until the mutex can be acquired.
pub fn lock(&self) -> MutexGuard<T> {
self.queue.wait_until(|| self.try_lock())
}
/// Acquire the mutex through an [`Arc`].
/// Acquires the mutex through an [`Arc`].
///
/// The method is similar to [`Self::lock`], but it doesn't have the requirement
/// The method is similar to [`lock`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the mutex guard.
///
/// [`lock`]: Self::lock
pub fn lock_arc(self: &Arc<Self>) -> ArcMutexGuard<T> {
self.queue.wait_until(|| self.try_lock_arc())
}
/// Try Acquire the mutex immedidately.
/// Tries Acquire the mutex immedidately.
pub fn try_lock(&self) -> Option<MutexGuard<T>> {
// Cannot be reduced to `then_some`, or the possible dropping of the temporary
// guard will cause an unexpected unlock.
self.acquire_lock().then_some(MutexGuard { mutex: self })
}
/// Try acquire the mutex through an [`Arc`].
/// Tries acquire the mutex through an [`Arc`].
///
/// The method is similar to [`Self::try_lock`], but it doesn't have the requirement
/// The method is similar to [`try_lock`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the mutex guard.
///
/// [`try_lock`]: Self::try_lock
pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcMutexGuard<T>> {
self.acquire_lock().then(|| ArcMutexGuard {
mutex: self.clone(),
})
}
/// Release the mutex and wake up one thread which is blocked on this mutex.
/// Releases the mutex and wake up one thread which is blocked on this mutex.
fn unlock(&self) {
self.release_lock();
self.queue.wake_one();
@ -95,7 +99,7 @@ pub struct MutexGuard_<T: ?Sized, R: Deref<Target = Mutex<T>>> {
/// A guard that provides exclusive access to the data protected by a [`Mutex`].
pub type MutexGuard<'a, T> = MutexGuard_<T, &'a Mutex<T>>;
/// An guard that provides exclusive access to the data protected by a [`Arc<Mutex>`].
/// An guard that provides exclusive access to the data protected by a `Arc<Mutex>`.
pub type ArcMutexGuard<T> = MutexGuard_<T, Arc<Mutex<T>>>;
impl<T: ?Sized, R: Deref<Target = Mutex<T>>> Deref for MutexGuard_<T, R> {

View File

@ -43,9 +43,9 @@ use crate::{
/// where a decision to write is made after reading.
///
/// The type parameter `T` represents the data that this lock is protecting.
/// It is necessary for `T` to satisfy `Send` to be shared across tasks and
/// `Sync` to permit concurrent access via readers. The `Deref` method (and
/// `DerefMut` for the writer) is implemented for the RAII guards returned
/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
/// by the locking methods, which allows for the access to the protected data
/// while the lock is held.
///
@ -126,7 +126,7 @@ impl<T> RwLock<T> {
}
impl<T: ?Sized> RwLock<T> {
/// Acquire a read lock while disabling the local IRQs and spin-wait
/// Acquires a read lock while disabling the local IRQs and spin-wait
/// until it can be acquired.
///
/// The calling thread will spin-wait until there are no writers or
@ -144,11 +144,11 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Acquire a write lock while disabling the local IRQs and spin-wait
/// Acquires a write lock while disabling the local IRQs and spin-wait
/// until it can be acquired.
///
/// The calling thread will spin-wait until there are no other writers,
/// , upreaders or readers present. There is no guarantee for the order
/// upreaders or readers present. There is no guarantee for the order
/// in which other readers or writers waiting simultaneously will
/// obtain the lock. Once this lock is acquired, the calling thread
/// will not be interrupted.
@ -162,7 +162,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Acquire an upgradeable reader (upreader) while disabling local IRQs
/// Acquires an upgradeable reader (upreader) while disabling local IRQs
/// and spin-wait until it can be acquired.
///
/// The calling thread will spin-wait until there are no other writers,
@ -184,7 +184,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire a read lock while disabling local IRQs.
/// Attempts to acquire a read lock while disabling local IRQs.
///
/// This function will never spin-wait and will return immediately. When
/// multiple readers or writers attempt to acquire the lock, this method
@ -204,7 +204,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire a write lock while disabling local IRQs.
/// Attempts to acquire a write lock while disabling local IRQs.
///
/// This function will never spin-wait and will return immediately. When
/// multiple readers or writers attempt to acquire the lock, this method
@ -226,7 +226,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire a upread lock while disabling local IRQs.
/// Attempts to acquire a upread lock while disabling local IRQs.
///
/// This function will never spin-wait and will return immediately. When
/// multiple readers or writers attempt to acquire the lock, this method
@ -246,7 +246,7 @@ impl<T: ?Sized> RwLock<T> {
None
}
/// Acquire a read lock and spin-wait until it can be acquired.
/// Acquires a read lock and spin-wait until it can be acquired.
///
/// The calling thread will spin-wait until there are no writers or
/// upgrading upreaders present. There is no guarantee for the order
@ -254,10 +254,12 @@ impl<T: ?Sized> RwLock<T> {
/// obtain the lock.
///
/// This method does not disable interrupts, so any locks related to
/// interrupt context should avoid using this method, and use `read_irq_disabled`
/// interrupt context should avoid using this method, and use [`read_irq_disabled`]
/// instead. When IRQ handlers are allowed to be executed while holding
/// this lock, it is preferable to use this method over the `read_irq_disabled`
/// this lock, it is preferable to use this method over the [`read_irq_disabled`]
/// method as it has a higher efficiency.
///
/// [`read_irq_disabled`]: Self::read_irq_disabled
pub fn read(&self) -> RwLockReadGuard<T> {
loop {
if let Some(readguard) = self.try_read() {
@ -268,10 +270,12 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Acquire a read lock through an [`Arc`].
/// Acquires a read lock through an [`Arc`].
///
/// The method is similar to [`Self::read`], but it doesn't have the requirement
/// The method is similar to [`read`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the read guard.
///
/// [`read`]: Self::read
pub fn read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<T> {
loop {
if let Some(readguard) = self.try_read_arc() {
@ -282,18 +286,20 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Acquire a write lock and spin-wait until it can be acquired.
/// Acquires a write lock and spin-wait until it can be acquired.
///
/// The calling thread will spin-wait until there are no other writers,
/// , upreaders or readers present. There is no guarantee for the order
/// upreaders or readers present. There is no guarantee for the order
/// in which other readers or writers waiting simultaneously will
/// obtain the lock.
///
/// This method does not disable interrupts, so any locks related to
/// interrupt context should avoid using this method, and use `write_irq_disabled`
/// interrupt context should avoid using this method, and use [`write_irq_disabled`]
/// instead. When IRQ handlers are allowed to be executed while holding
/// this lock, it is preferable to use this method over the `write_irq_disabled`
/// this lock, it is preferable to use this method over the [`write_irq_disabled`]
/// method as it has a higher efficiency.
///
/// [`write_irq_disabled`]: Self::write_irq_disabled
pub fn write(&self) -> RwLockWriteGuard<T> {
loop {
if let Some(writeguard) = self.try_write() {
@ -304,10 +310,12 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Acquire a write lock through an [`Arc`].
/// Acquires a write lock through an [`Arc`].
///
/// The method is similar to [`Self::write`], but it doesn't have the requirement
/// The method is similar to [`write`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the lock guard.
///
/// [`write`]: Self::write
pub fn write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<T> {
loop {
if let Some(writeguard) = self.try_write_arc() {
@ -318,7 +326,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Acquire an upreader and spin-wait until it can be acquired.
/// Acquires an upreader and spin-wait until it can be acquired.
///
/// The calling thread will spin-wait until there are no other writers,
/// or upreaders. There is no guarantee for the order in which other
@ -330,10 +338,12 @@ impl<T: ?Sized> RwLock<T> {
/// upgread method.
///
/// This method does not disable interrupts, so any locks related to
/// interrupt context should avoid using this method, and use `upread_irq_disabled`
/// interrupt context should avoid using this method, and use [`upread_irq_disabled`]
/// instead. When IRQ handlers are allowed to be executed while holding
/// this lock, it is preferable to use this method over the `upread_irq_disabled`
/// this lock, it is preferable to use this method over the [`upread_irq_disabled`]
/// method as it has a higher efficiency.
///
/// [`upread_irq_disabled`]: Self::upread_irq_disabled
pub fn upread(&self) -> RwLockUpgradeableGuard<T> {
loop {
if let Some(guard) = self.try_upread() {
@ -344,10 +354,12 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Acquire an upgradeable read lock through an [`Arc`].
/// Acquires an upgradeable read lock through an [`Arc`].
///
/// The method is similar to [`Self::upread`], but it doesn't have the requirement
/// The method is similar to [`upread`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the lock guard.
///
/// [`upread`]: Self::upread
pub fn upread_arc(self: &Arc<Self>) -> ArcRwLockUpgradeableGuard<T> {
loop {
if let Some(guard) = self.try_upread_arc() {
@ -358,16 +370,18 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire a read lock.
/// Attempts to acquire a read lock.
///
/// This function will never spin-wait and will return immediately.
///
/// This method does not disable interrupts, so any locks related to
/// interrupt context should avoid using this method, and use
/// `try_read_irq_disabled` instead. When IRQ handlers are allowed to
/// [`try_read_irq_disabled`] instead. When IRQ handlers are allowed to
/// be executed while holding this lock, it is preferable to use this
/// method over the `try_read_irq_disabled` method as it has a higher
/// method over the [`try_read_irq_disabled`] method as it has a higher
/// efficiency.
///
/// [`try_read_irq_disabled`]: Self::try_read_irq_disabled
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
let guard = disable_preempt();
let lock = self.lock.fetch_add(READER, Acquire);
@ -382,10 +396,12 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire an read lock through an [`Arc`].
/// Attempts to acquire an read lock through an [`Arc`].
///
/// The method is similar to [`Self::try_read`], but it doesn't have the requirement
/// The method is similar to [`try_read`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the lock guard.
///
/// [`try_read`]: Self::try_read
pub fn try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<T>> {
let guard = disable_preempt();
let lock = self.lock.fetch_add(READER, Acquire);
@ -400,16 +416,18 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire a write lock.
/// Attempts to acquire a write lock.
///
/// This function will never spin-wait and will return immediately.
///
/// This method does not disable interrupts, so any locks related to
/// interrupt context should avoid using this method, and use
/// `try_write_irq_disabled` instead. When IRQ handlers are allowed to
/// [`try_write_irq_disabled`] instead. When IRQ handlers are allowed to
/// be executed while holding this lock, it is preferable to use this
/// method over the `try_write_irq_disabled` method as it has a higher
/// method over the [`try_write_irq_disabled`] method as it has a higher
/// efficiency.
///
/// [`try_write_irq_disabled`]: Self::try_write_irq_disabled
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
let guard = disable_preempt();
if self
@ -426,10 +444,12 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire a write lock through an [`Arc`].
/// Attempts to acquire a write lock through an [`Arc`].
///
/// The method is similar to [`Self::try_write`], but it doesn't have the requirement
/// The method is similar to [`try_write`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the lock guard.
///
/// [`try_write`]: Self::try_write
fn try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<T>> {
let guard = disable_preempt();
if self
@ -446,16 +466,18 @@ impl<T: ?Sized> RwLock<T> {
}
}
/// Attempt to acquire an upread lock.
/// Attempts to acquire an upread lock.
///
/// This function will never spin-wait and will return immediately.
///
/// This method does not disable interrupts, so any locks related to
/// interrupt context should avoid using this method, and use
/// `try_upread_irq_disabled` instead. When IRQ handlers are allowed to
/// [`try_upread_irq_disabled`] instead. When IRQ handlers are allowed to
/// be executed while holding this lock, it is preferable to use this
/// method over the `try_upread_irq_disabled` method as it has a higher
/// method over the [`try_upread_irq_disabled`] method as it has a higher
/// efficiency.
///
/// [`try_upread_irq_disabled`]: Self::try_upread_irq_disabled
pub fn try_upread(&self) -> Option<RwLockUpgradeableGuard<T>> {
let guard = disable_preempt();
let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER);
@ -470,10 +492,12 @@ impl<T: ?Sized> RwLock<T> {
None
}
/// Attempt to acquire an upgradeable read lock through an [`Arc`].
/// Attempts to acquire an upgradeable read lock through an [`Arc`].
///
/// The method is similar to [`Self::try_upread`], but it doesn't have the requirement
/// The method is similar to [`try_upread`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the lock guard.
///
/// [`try_upread`]: Self::try_upread
pub fn try_upread_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradeableGuard<T>> {
let guard = disable_preempt();
let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER);
@ -547,7 +571,7 @@ pub struct RwLockReadGuard_<T: ?Sized, R: Deref<Target = RwLock<T>> + Clone> {
/// A guard that provides shared read access to the data protected by a [`RwLock`].
pub type RwLockReadGuard<'a, T> = RwLockReadGuard_<T, &'a RwLock<T>>;
/// A guard that provides shared read access to the data protected by a [`Arc<RwLock>`].
/// A guard that provides shared read access to the data protected by a `Arc<RwLock>`.
pub type ArcRwLockReadGuard<T> = RwLockReadGuard_<T, Arc<RwLock<T>>>;
impl<T: ?Sized, R: Deref<Target = RwLock<T>> + Clone> Deref for RwLockReadGuard_<T, R> {
@ -580,7 +604,7 @@ pub struct RwLockWriteGuard_<T: ?Sized, R: Deref<Target = RwLock<T>> + Clone> {
/// A guard that provides exclusive write access to the data protected by a [`RwLock`].
pub type RwLockWriteGuard<'a, T> = RwLockWriteGuard_<T, &'a RwLock<T>>;
/// A guard that provides exclusive write access to the data protected by a [`Arc<RwLock>`].
/// A guard that provides exclusive write access to the data protected by a `Arc<RwLock>`.
pub type ArcRwLockWriteGuard<T> = RwLockWriteGuard_<T, Arc<RwLock<T>>>;
impl<T: ?Sized, R: Deref<Target = RwLock<T>> + Clone> Deref for RwLockWriteGuard_<T, R> {
@ -651,11 +675,11 @@ pub struct RwLockUpgradeableGuard_<T: ?Sized, R: Deref<Target = RwLock<T>> + Clo
/// A upgradable guard that provides read access to the data protected by a [`RwLock`].
pub type RwLockUpgradeableGuard<'a, T> = RwLockUpgradeableGuard_<T, &'a RwLock<T>>;
/// A upgradable guard that provides read access to the data protected by a [`Arc<RwLock>`].
/// A upgradable guard that provides read access to the data protected by a `Arc<RwLock>`.
pub type ArcRwLockUpgradeableGuard<T> = RwLockUpgradeableGuard_<T, Arc<RwLock<T>>>;
impl<T: ?Sized, R: Deref<Target = RwLock<T>> + Clone> RwLockUpgradeableGuard_<T, R> {
/// Upgrade this upread guard to a write guard atomically.
/// Upgrades this upread guard to a write guard atomically.
///
/// After calling this method, subsequent readers will be blocked
/// while previous readers remain unaffected. The calling thread

View File

@ -32,9 +32,9 @@ use super::WaitQueue;
/// scenarios where a decision to write is made after reading.
///
/// The type parameter `T` represents the data that this mutex is protecting.
/// It is necessary for `T` to satisfy `Send` to be shared across tasks and
/// `Sync` to permit concurrent access via readers. The `Deref` method (and
/// `DerefMut` for the writer) is implemented for the RAII guards returned
/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
/// by the locking methods, which allows for the access to the protected data
/// while the mutex is held.
///
@ -115,30 +115,30 @@ impl<T> RwMutex<T> {
}
impl<T: ?Sized> RwMutex<T> {
/// Acquire a read mutex and sleep until it can be acquired.
/// Acquires a read mutex and sleep until it can be acquired.
///
/// The calling thread will sleep until there are no writers or upgrading
/// upreaders present. The implementation of `WaitQueue` guarantees the
/// upreaders present. The implementation of [`WaitQueue`] guarantees the
/// order in which other concurrent readers or writers waiting simultaneously
/// will acquire the mutex.
pub fn read(&self) -> RwMutexReadGuard<T> {
self.queue.wait_until(|| self.try_read())
}
/// Acquire a write mutex and sleep until it can be acquired.
/// Acquires a write mutex and sleep until it can be acquired.
///
/// The calling thread will sleep until there are no writers, upreaders,
/// or readers present. The implementation of `WaitQueue` guarantees the
/// or readers present. The implementation of [`WaitQueue`] guarantees the
/// order in which other concurrent readers or writers waiting simultaneously
/// will acquire the mutex.
pub fn write(&self) -> RwMutexWriteGuard<T> {
self.queue.wait_until(|| self.try_write())
}
/// Acquire a upread mutex and sleep until it can be acquired.
/// Acquires a upread mutex and sleep until it can be acquired.
///
/// The calling thread will sleep until there are no writers or upreaders present.
/// The implementation of `WaitQueue` guarantees the order in which other concurrent
/// The implementation of [`WaitQueue`] guarantees the order in which other concurrent
/// readers or writers waiting simultaneously will acquire the mutex.
///
/// Upreader will not block new readers until it tries to upgrade. Upreader
@ -149,7 +149,7 @@ impl<T: ?Sized> RwMutex<T> {
self.queue.wait_until(|| self.try_upread())
}
/// Attempt to acquire a read mutex.
/// Attempts to acquire a read mutex.
///
/// This function will never sleep and will return immediately.
pub fn try_read(&self) -> Option<RwMutexReadGuard<T>> {
@ -162,7 +162,7 @@ impl<T: ?Sized> RwMutex<T> {
}
}
/// Attempt to acquire a write mutex.
/// Attempts to acquire a write mutex.
///
/// This function will never sleep and will return immediately.
pub fn try_write(&self) -> Option<RwMutexWriteGuard<T>> {
@ -177,7 +177,7 @@ impl<T: ?Sized> RwMutex<T> {
}
}
/// Attempt to acquire a upread mutex.
/// Attempts to acquire a upread mutex.
///
/// This function will never sleep and will return immediately.
pub fn try_upread(&self) -> Option<RwMutexUpgradeableGuard<T>> {
@ -227,7 +227,7 @@ pub struct RwMutexReadGuard_<T: ?Sized, R: Deref<Target = RwMutex<T>>> {
/// A guard that provides shared read access to the data protected by a [`RwMutex`].
pub type RwMutexReadGuard<'a, T> = RwMutexReadGuard_<T, &'a RwMutex<T>>;
/// A guard that provides shared read access to the data protected by a [`Arc<RwMutex>`].
/// A guard that provides shared read access to the data protected by a `Arc<RwMutex>`.
pub type ArcRwMutexReadGuard<T> = RwMutexReadGuard_<T, Arc<RwMutex<T>>>;
impl<T: ?Sized, R: Deref<Target = RwMutex<T>>> Deref for RwMutexReadGuard_<T, R> {
@ -254,7 +254,7 @@ pub struct RwMutexWriteGuard_<T: ?Sized, R: Deref<Target = RwMutex<T>>> {
/// A guard that provides exclusive write access to the data protected by a [`RwMutex`].
pub type RwMutexWriteGuard<'a, T> = RwMutexWriteGuard_<T, &'a RwMutex<T>>;
/// A guard that provides exclusive write access to the data protected by a [`Arc<RwMutex>`].
/// A guard that provides exclusive write access to the data protected by a `Arc<RwMutex>`.
pub type ArcRwMutexWriteGuard<T> = RwMutexWriteGuard_<T, Arc<RwMutex<T>>>;
impl<T: ?Sized, R: Deref<Target = RwMutex<T>>> Deref for RwMutexWriteGuard_<T, R> {
@ -314,18 +314,18 @@ impl<T: ?Sized, R: Deref<Target = RwMutex<T>>> Drop for RwMutexWriteGuard_<T, R>
}
/// A guard that provides immutable data access but can be atomically
/// upgraded to `RwMutexWriteGuard`.
/// upgraded to [`RwMutexWriteGuard`].
pub struct RwMutexUpgradeableGuard_<T: ?Sized, R: Deref<Target = RwMutex<T>>> {
inner: R,
}
/// A upgradable guard that provides read access to the data protected by a [`RwMutex`].
pub type RwMutexUpgradeableGuard<'a, T> = RwMutexUpgradeableGuard_<T, &'a RwMutex<T>>;
/// A upgradable guard that provides read access to the data protected by a [`Arc<RwMutex>`].
/// A upgradable guard that provides read access to the data protected by a `Arc<RwMutex>`.
pub type ArcRwMutexUpgradeableGuard<T> = RwMutexUpgradeableGuard_<T, Arc<RwMutex<T>>>;
impl<T: ?Sized, R: Deref<Target = RwMutex<T>> + Clone> RwMutexUpgradeableGuard_<T, R> {
/// Upgrade this upread guard to a write guard atomically.
/// Upgrades this upread guard to a write guard atomically.
///
/// After calling this method, subsequent readers will be blocked
/// while previous readers remain unaffected.

View File

@ -32,7 +32,7 @@ impl<T> SpinLock<T> {
}
impl<T: ?Sized> SpinLock<T> {
/// Acquire the spin lock with disabling the local IRQs. This is the most secure
/// Acquires the spin lock with disabling the local IRQs. This is the most secure
/// locking way.
///
/// This method runs in a busy loop until the lock can be acquired.
@ -46,7 +46,7 @@ impl<T: ?Sized> SpinLock<T> {
}
}
/// Try acquiring the spin lock immedidately with disabling the local IRQs.
/// Tries acquiring the spin lock immedidately with disabling the local IRQs.
pub fn try_lock_irq_disabled(&self) -> Option<SpinLockGuard<T>> {
let irq_guard = disable_local();
if self.try_acquire_lock() {
@ -59,14 +59,16 @@ impl<T: ?Sized> SpinLock<T> {
None
}
/// Acquire the spin lock without disabling local IRQs.
/// Acquires the spin lock without disabling local IRQs.
///
/// This method is twice as fast as the `lock_irq_disabled` method.
/// So prefer using this method over the `lock_irq_disabled` method
/// This method is twice as fast as the [`lock_irq_disabled`] method.
/// So prefer using this method over the [`lock_irq_disabled`] method
/// when IRQ handlers are allowed to get executed while
/// holding this lock. For example, if a lock is never used
/// in the interrupt context, then it is ok to use this method
/// in the process context.
///
/// [`lock_irq_disabled`]: Self::lock_irq_disabled
pub fn lock(&self) -> SpinLockGuard<T> {
let guard = disable_preempt();
self.acquire_lock();
@ -76,10 +78,12 @@ impl<T: ?Sized> SpinLock<T> {
}
}
/// Acquire the spin lock through an [`Arc`].
/// Acquires the spin lock through an [`Arc`].
///
/// The method is similar to [`Self::lock`], but it doesn't have the requirement
/// The method is similar to [`lock`], but it doesn't have the requirement
/// for compile-time checked lifetimes of the lock guard.
///
/// [`lock`]: Self::lock
pub fn lock_arc(self: &Arc<Self>) -> ArcSpinLockGuard<T> {
let guard = disable_preempt();
self.acquire_lock();
@ -89,7 +93,7 @@ impl<T: ?Sized> SpinLock<T> {
}
}
/// Try acquiring the spin lock immedidately without disabling the local IRQs.
/// Tries acquiring the spin lock immedidately without disabling the local IRQs.
pub fn try_lock(&self) -> Option<SpinLockGuard<T>> {
let guard = disable_preempt();
if self.try_acquire_lock() {
@ -102,7 +106,7 @@ impl<T: ?Sized> SpinLock<T> {
None
}
/// Access the spin lock, otherwise busy waiting
/// Acquires the spin lock, otherwise busy waiting
fn acquire_lock(&self) {
while !self.try_acquire_lock() {
core::hint::spin_loop();
@ -137,7 +141,7 @@ enum InnerGuard {
/// A guard that provides exclusive access to the data protected by a [`SpinLock`].
pub type SpinLockGuard<'a, T> = SpinLockGuard_<T, &'a SpinLock<T>>;
/// A guard that provides exclusive access to the data protected by a [`Arc<SpinLock>`].
/// A guard that provides exclusive access to the data protected by a `Arc<SpinLock>`.
pub type ArcSpinLockGuard<T> = SpinLockGuard_<T, Arc<SpinLock<T>>>;
/// The guard of a spin lock that disables the local IRQs.

View File

@ -66,13 +66,15 @@ pub(crate) fn get_idle_task_ctx_ptr() -> *mut TaskContext {
})
}
/// call this function to switch to other task by using GLOBAL_SCHEDULER
/// Calls this function to switch to other task by using GLOBAL_SCHEDULER
pub fn schedule() {
if let Some(task) = fetch_task() {
switch_to_task(task);
}
}
/// Preempts the `task`.
///
/// TODO: This interface of this method is error prone.
/// The method takes an argument for the current task to optimize its efficiency,
/// but the argument provided by the caller may not be the current task, really.
@ -91,7 +93,7 @@ pub fn preempt(task: &Arc<Task>) {
switch_to_task(next_task);
}
/// call this function to switch to other task
/// Calls this function to switch to other task
///
/// if current task is none, then it will use the default task context and it will not return to this function again
///
@ -193,7 +195,7 @@ impl PreemptInfo {
}
}
/// a guard for disable preempt.
/// A guard for disable preempt.
pub struct DisablePreemptGuard {
// This private field prevents user from constructing values of this type directly.
private: (),

View File

@ -27,16 +27,16 @@ pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
/// Trait for manipulating the task context.
pub trait TaskContextApi {
/// Set instruction pointer
/// Sets instruction pointer
fn set_instruction_pointer(&mut self, ip: usize);
/// Get instruction pointer
/// Gets instruction pointer
fn instruction_pointer(&self) -> usize;
/// Set stack pointer
/// Sets stack pointer
fn set_stack_pointer(&mut self, sp: usize);
/// Get stack pointer
/// Gets stack pointer
fn stack_pointer(&self) -> usize;
}
@ -53,7 +53,7 @@ impl KernelStack {
})
}
/// Generate a kernel stack with a guard page.
/// Generates a kernel stack with a guard page.
/// An additional page is allocated and be regarded as a guard page, which should not be accessed.
pub fn new_with_guard_page() -> Result<Self> {
let stack_segment =
@ -140,7 +140,7 @@ impl Task {
current_task().unwrap()
}
/// get inner
/// Gets inner
pub(crate) fn inner_exclusive_access(&self) -> SpinLockGuard<TaskInner> {
self.task_inner.lock_irq_disabled()
}
@ -275,14 +275,14 @@ impl TaskOptions {
/// Sets the CPU affinity mask for the task.
///
/// The `cpu_affinity` parameter is an instance of the [`CpuSet`] struct
/// that represents the desired set of CPUs to run the task on.
/// The `cpu_affinity` parameter represents
/// the desired set of CPUs to run the task on.
pub fn cpu_affinity(mut self, cpu_affinity: CpuSet) -> Self {
self.cpu_affinity = cpu_affinity;
self
}
/// Build a new task without running it immediately.
/// Builds a new task without running it immediately.
pub fn build(self) -> Result<Arc<Task>> {
/// all task will entering this function
/// this function is mean to executing the task_fn in Task
@ -322,7 +322,7 @@ impl TaskOptions {
Ok(Arc::new(new_task))
}
/// Build a new task and run it immediately.
/// Builds a new task and run it immediately.
pub fn spawn(self) -> Result<Arc<Task>> {
let task = self.build()?;
task.run();

View File

@ -62,12 +62,12 @@ impl IrqLine {
}
}
/// Get the IRQ number.
/// Gets the IRQ number.
pub fn num(&self) -> u8 {
self.irq_num
}
/// Register a callback that will be invoked when the IRQ is active.
/// Registers a callback that will be invoked when the IRQ is active.
///
/// For each IRQ line, multiple callbacks may be registered.
pub fn on_active<F>(&mut self, callback: F)
@ -105,15 +105,17 @@ impl Drop for IrqLine {
}
}
/// Disable all IRQs on the current CPU (i.e., locally).
/// Disables all IRQs on the current CPU (i.e., locally).
///
/// This function returns a guard object, which will automatically enable local IRQs again when
/// it is dropped. This function works correctly even when it is called in a _nested_ way.
/// The local IRQs shall only be re-enabled when the most outer guard is dropped.
///
/// This function can play nicely with `SpinLock` as the type uses this function internally.
/// This function can play nicely with [`SpinLock`] as the type uses this function internally.
/// One can invoke this function even after acquiring a spin lock. And the reversed order is also ok.
///
/// [`SpinLock`]: crate::sync::SpinLock
///
/// # Example
///
/// ```rust
@ -150,7 +152,7 @@ impl DisabledLocalIrqGuard {
}
}
/// Transfer the saved IRQ status of this guard to a new guard.
/// Transfers the saved IRQ status of this guard to a new guard.
/// The saved IRQ status of this guard is cleared.
pub fn transfer_to(&mut self) -> Self {
let was_enabled = self.was_enabled;
@ -170,6 +172,8 @@ impl Drop for DisabledLocalIrqGuard {
}
}
/// Enables all IRQs on the current CPU.
///
/// FIXME: The reason we need to add this API is that currently IRQs
/// are enabled when the CPU enters the user space for the first time,
/// which is too late. During the OS initialization phase,

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
//! Handle trap across kernel and user space.
//! Handles trap across kernel and user space.
mod handler;
mod irq;

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MPL-2.0
//! Software interrupt.
#![allow(unused_variables)]
use alloc::boxed::Box;
@ -31,7 +33,7 @@ use crate::{cpu_local, task::disable_preempt, CpuLocal};
/// // Enable the softirq line of this id.
/// SoftIrqLine::get(MY_SOFTIRQ_ID).enable(|| {
/// // Define the action to take when the softirq with MY_SOFTIRQ_ID is raised
/// ...
/// // ...
/// });
/// // Later on:
/// SoftIrqLine::get(MY_SOFTIRQ_ID).raise(); // This will trigger the registered callback
@ -75,7 +77,7 @@ impl SoftIrqLine {
/// Enables a softirq line by registering its callback.
///
/// # Panic
/// # Panics
///
/// Each softirq can only be enabled once.
pub fn enable<F>(&self, callback: F)
@ -94,7 +96,7 @@ impl SoftIrqLine {
}
}
/// A slice that stores the `SoftIrqLine`s, whose ID is equal to its offset in the slice.
/// A slice that stores the [`SoftIrqLine`]s, whose ID is equal to its offset in the slice.
static LINES: Once<[SoftIrqLine; SoftIrqLine::NR_LINES as usize]> = Once::new();
pub(super) fn init() {

View File

@ -35,12 +35,12 @@ impl UserSpace {
/// Returns the user mode that is bound to the current task and user space.
///
/// See `UserMode` on how to use it to execute user code.
/// See [`UserMode`] on how to use it to execute user code.
///
/// # Panic
/// # Panics
///
/// This method is intended to only allow each task to have at most one
/// instance of `UserMode` initiated. If this method is called again before
/// instance of [`UserMode`] initiated. If this method is called again before
/// the first instance for the current task is dropped, then the method
/// panics.
pub fn user_mode(&self) -> UserMode<'_> {
@ -48,7 +48,7 @@ impl UserSpace {
}
}
/// Specific architectures need to implement this trait. This should only used in `UserMode`
/// Specific architectures need to implement this trait. This should only used in [`UserMode`]
///
/// Only visible in aster-frame
pub(crate) trait UserContextApiInternal {
@ -57,28 +57,28 @@ pub(crate) trait UserContextApiInternal {
where
F: FnMut() -> bool;
/// Use the information inside CpuContext to build a trapframe
/// Uses the information inside CpuContext to build a trapframe
fn as_trap_frame(&self) -> TrapFrame;
}
/// The common interface that every CPU architecture-specific `CpuContext` implements.
/// The common interface that every CPU architecture-specific [`UserContext`] implements.
pub trait UserContextApi {
/// Get the trap number of this interrupt.
/// Gets the trap number of this interrupt.
fn trap_number(&self) -> usize;
/// Get the trap error code of this interrupt.
/// Gets the trap error code of this interrupt.
fn trap_error_code(&self) -> usize;
/// Set instruction pointer
/// Sets the instruction pointer
fn set_instruction_pointer(&mut self, ip: usize);
/// Get instruction pointer
/// Gets the instruction pointer
fn instruction_pointer(&self) -> usize;
/// Set stack pointer
/// Sets the stack pointer
fn set_stack_pointer(&mut self, sp: usize);
/// Get stack pointer
/// Gets the stack pointer
fn stack_pointer(&self) -> usize;
}
@ -123,7 +123,7 @@ impl<'a> UserMode<'a> {
/// Starts executing in the user mode. Make sure current task is the task in `UserMode`.
///
/// The method returns for one of three possible reasons indicated by `ReturnReason`.
/// The method returns for one of three possible reasons indicated by [`ReturnReason`].
/// 1. A system call is issued by the user space;
/// 2. A CPU exception is triggered by the user space;
/// 3. A kernel event is pending, as indicated by the given closure.

View File

@ -4,6 +4,7 @@ use proc_macro::TokenStream;
use quote::quote;
use syn::{parse_macro_input, ItemFn};
/// The kernel entry point.
#[proc_macro_attribute]
pub fn aster_main(_attr: TokenStream, item: TokenStream) -> TokenStream {
let main_fn = parse_macro_input!(item as ItemFn);

View File

@ -115,7 +115,7 @@ impl IdAlloc {
/// Releases the consecutive range of allocated `id`s.
///
/// # Panic
/// # Panics
///
/// If the `range` is out of bounds, this method will panic.
pub fn free_consecutive(&mut self, range: Range<usize>) {
@ -136,7 +136,7 @@ impl IdAlloc {
/// Releases the allocated `id`.
///
/// # Panic
/// # Panics
///
/// If the `id` is out of bounds, this method will panic.
pub fn free(&mut self, id: usize) {
@ -153,7 +153,7 @@ impl IdAlloc {
/// If the ID is already allocated, it returns `None`, otherwise it
/// returns the allocated ID.
///
/// # Panic
/// # Panics
///
/// If the `id` is out of bounds, this method will panic.
pub fn alloc_specific(&mut self, id: usize) -> Option<usize> {
@ -171,7 +171,7 @@ impl IdAlloc {
/// Returns true if the `id` is allocated.
///
/// # Panic
/// # Panics
///
/// If the `id` is out of bounds, this method will panic.
pub fn is_allocated(&self, id: usize) -> bool {

View File

@ -141,7 +141,7 @@ impl BlockGroup {
/// Inserts the inode into the inode cache.
///
/// # Panic
/// # Panics
///
/// If `inode_idx` has not been allocated before, then the method panics.
pub fn insert_cache(&self, inode_idx: u32, inode: Arc<Inode>) {
@ -163,7 +163,7 @@ impl BlockGroup {
/// Frees the allocated inode idx.
///
/// # Panic
/// # Panics
///
/// If `inode_idx` has not been allocated before, then the method panics.
pub fn free_inode(&self, inode_idx: u32, is_dir: bool) {
@ -192,7 +192,7 @@ impl BlockGroup {
/// Frees the consecutive range of allocated block indices.
///
/// # Panic
/// # Panics
///
/// If the `range` is out of bounds, this method will panic.
/// If one of the `idx` in `range` has not been allocated before, then the method panics.

View File

@ -14,7 +14,7 @@ pub struct BlockPtrs {
impl BlockPtrs {
/// Returns the direct block ID.
///
/// # Panic
/// # Panics
///
/// If the `idx` is out of bounds, this method will panic.
pub fn direct(&self, idx: usize) -> Ext2Bid {
@ -24,7 +24,7 @@ impl BlockPtrs {
/// Sets the direct block ID.
///
/// # Panic
/// # Panics
///
/// If the `idx` is out of bounds, this method will panic.
pub fn set_direct(&mut self, idx: usize, bid: Ext2Bid) {

View File

@ -33,7 +33,7 @@ impl BlocksHoleDesc {
/// Returns if the block `idx` is a hole.
///
/// # Panic
/// # Panics
///
/// If the `idx` is out of bounds, this method will panic.
pub fn is_hole(&self, idx: usize) -> bool {
@ -42,7 +42,7 @@ impl BlocksHoleDesc {
/// Marks the block `idx` as a hole.
///
/// # Panic
/// # Panics
///
/// If the `idx` is out of bounds, this method will panic.
pub fn set(&mut self, idx: usize) {
@ -51,7 +51,7 @@ impl BlocksHoleDesc {
/// Unmarks the block `idx` as a hole.
///
/// # Panic
/// # Panics
///
/// If the `idx` is out of bounds, this method will panic.
pub fn unset(&mut self, idx: usize) {

View File

@ -1326,7 +1326,7 @@ struct DeviceRangeReader<'a> {
impl<'a> DeviceRangeReader<'a> {
/// Creates a new reader.
///
/// # Panic
/// # Panics
///
/// If the 'range' is empty, this method will panic.
pub fn new(inode: &'a InodeImpl_, range: Range<Ext2Bid>) -> Result<Self> {

View File

@ -294,7 +294,7 @@ impl SuperBlock {
/// Returns the starting block id of the super block
/// inside the block group pointed by `block_group_idx`.
///
/// # Panic
/// # Panics
///
/// If `block_group_idx` is neither 0 nor a backup block group index,
/// then the method panics.
@ -312,7 +312,7 @@ impl SuperBlock {
/// Returns the starting block id of the block group descripter table
/// inside the block group pointed by `block_group_idx`.
///
/// # Panic
/// # Panics
///
/// If `block_group_idx` is neither 0 nor a backup block group index,
/// then the method panics.

View File

@ -29,6 +29,8 @@ pub trait FileLike: Send + Sync + Any {
///
/// The file must be seekable to support `read_at`.
/// Unlike [`read`], `read_at` will not change the file offset.
///
/// [`read`]: FileLike::read
fn read_at(&self, offset: usize, buf: &mut [u8]) -> Result<usize> {
return_errno_with_message!(Errno::EINVAL, "read_at is not supported");
}
@ -38,6 +40,8 @@ pub trait FileLike: Send + Sync + Any {
/// The file must be seekable to support `write_at`.
/// Unlike [`write`], `write_at` will not change the file offset.
/// If the file is append-only, the `offset` will be ignored.
///
/// [`write`]: FileLike::write
fn write_at(&self, offset: usize, buf: &[u8]) -> Result<usize> {
return_errno_with_message!(Errno::EINVAL, "write_at is not supported");
}

View File

@ -28,7 +28,7 @@ pub struct Credentials<R = FullOp>(Arc<Credentials_>, R);
/// Gets read-only credentials of current thread.
///
/// # Panic
/// # Panics
///
/// This method should only be called in process context.
pub fn credentials() -> Credentials<ReadOp> {
@ -39,7 +39,7 @@ pub fn credentials() -> Credentials<ReadOp> {
/// Gets write-only credentials of current thread.
///
/// # Panic
/// # Panics
///
/// This method should only be called in process context.
pub fn credentials_mut() -> Credentials<WriteOp> {

View File

@ -44,7 +44,7 @@ impl JobControl {
/// Sets the terminal as the controlling terminal of the `session`.
///
/// # Panic
/// # Panics
///
/// This terminal should not belong to any session.
pub fn set_session(&self, session: &Arc<Session>) {
@ -54,7 +54,7 @@ impl JobControl {
/// Sets the terminal as the controlling terminal of the session of current process.
///
/// # Panic
/// # Panics
///
/// This function should only be called in process context.
pub fn set_current_session(&self) -> Result<()> {
@ -103,7 +103,7 @@ impl JobControl {
/// Sets the foreground process group.
///
/// # Panic
/// # Panics
///
/// The process group should belong to one session.
pub fn set_foreground(&self, process_group: Option<&Arc<ProcessGroup>>) -> Result<()> {
@ -136,7 +136,7 @@ impl JobControl {
/// Wait until the current process is the foreground process group. If
/// the foreground process group is None, returns true.
///
/// # Panic
/// # Panics
///
/// This function should only be called in process context.
pub fn wait_until_in_foreground(&self) -> Result<()> {

View File

@ -23,7 +23,7 @@ pub trait Terminal: Send + Sync + FileIo {
///
/// If the terminal is not controlling terminal, this method returns `ENOTTY`.
///
/// # Panic
/// # Panics
///
/// This method should be called in process context.
fn set_foreground(&self, pgid: &Pgid) -> Result<()> {
@ -40,7 +40,7 @@ pub trait Terminal: Send + Sync + FileIo {
/// Returns whether the terminal is the controlling terminal of current process.
///
/// # Panic
/// # Panics
///
/// This method should be called in process context.
fn is_controlling_terminal(&self) -> bool {
@ -58,7 +58,7 @@ pub trait Terminal: Send + Sync + FileIo {
/// If self is not session leader, or the terminal is controlling terminal of other session,
/// or the session already has controlling terminal, this method returns `EPERM`.
///
/// # Panic
/// # Panics
///
/// This method should only be called in process context.
fn set_current_session(&self) -> Result<()> {
@ -80,7 +80,7 @@ pub trait Terminal: Send + Sync + FileIo {
///
/// If the terminal is not the controlling terminal of the session, this method will return `ENOTTY`.
///
/// # Panic
/// # Panics
///
/// This method should only be called in process context.
fn release_current_session(&self) -> Result<()> {

View File

@ -73,7 +73,7 @@ impl Bio {
///
/// Returns a `BioWaiter` to the caller to wait for its completion.
///
/// # Panic
/// # Panics
///
/// The caller must not submit a `Bio` more than once. Otherwise, a panic shall be triggered.
pub fn submit(&self, block_device: &dyn BlockDevice) -> Result<BioWaiter, BioEnqueueError> {
@ -107,7 +107,7 @@ impl Bio {
///
/// Returns the result status of the `Bio`.
///
/// # Panic
/// # Panics
///
/// The caller must not submit a `Bio` more than once. Otherwise, a panic shall be triggered.
pub fn submit_sync(
@ -169,7 +169,7 @@ impl BioWaiter {
/// Gets the `index`-th `Bio` request associated with `self`.
///
/// # Panic
/// # Panics
///
/// If the `index` is out of bounds, this method will panic.
pub fn req(&self, index: usize) -> Bio {
@ -178,7 +178,7 @@ impl BioWaiter {
/// Returns the status of the `index`-th `Bio` request associated with `self`.
///
/// # Panic
/// # Panics
///
/// If the `index` is out of bounds, this method will panic.
pub fn status(&self, index: usize) -> BioStatus {

View File

@ -182,7 +182,7 @@ impl BioRequest {
///
/// The merged `SubmittedBio` can only be placed at the front or back.
///
/// # Panic
/// # Panics
///
/// If the `SubmittedBio` can not be merged, this method will panic.
pub fn merge_bio(&mut self, rq_bio: SubmittedBio) {