mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-21 16:33:24 +00:00
Fix all spelling mistakes in history by typos tool
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
b29d3b5409
commit
86f09eef75
@ -326,7 +326,7 @@ impl LineDiscipline {
|
||||
}
|
||||
} else {
|
||||
// raw mode
|
||||
// FIXME: avoid addtional bound check
|
||||
// FIXME: avoid additional bound check
|
||||
*dst_i = next_char;
|
||||
read_len += 1;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ impl EpollFile {
|
||||
let mask = ep_event.events;
|
||||
let entry = EpollEntry::new(fd, weak_file, ep_event, ep_flags, self.weak_self.clone());
|
||||
|
||||
// Add the new entry to the interest list and start monitering its events
|
||||
// Add the new entry to the interest list and start monitoring its events
|
||||
let mut interest = self.interest.lock();
|
||||
if interest.contains_key(&fd) {
|
||||
return_errno_with_message!(Errno::EEXIST, "the fd has been added");
|
||||
|
@ -144,7 +144,7 @@ impl ExfatBitmap {
|
||||
.fs()
|
||||
.is_cluster_range_valid(search_start_cluster..search_start_cluster + num_clusters)
|
||||
{
|
||||
return_errno_with_message!(Errno::ENOSPC, "free contigous clusters not avalable.")
|
||||
return_errno_with_message!(Errno::ENOSPC, "free contiguous clusters not available.")
|
||||
}
|
||||
|
||||
let mut cur_index = search_start_cluster - EXFAT_RESERVED_CLUSTERS;
|
||||
|
@ -246,11 +246,11 @@ impl ExfatDentrySet {
|
||||
create_utc_offset: dos_time.utc_offset,
|
||||
create_date: dos_time.date,
|
||||
create_time: dos_time.time,
|
||||
create_time_cs: dos_time.increament_10ms,
|
||||
create_time_cs: dos_time.increment_10ms,
|
||||
modify_utc_offset: dos_time.utc_offset,
|
||||
modify_date: dos_time.date,
|
||||
modify_time: dos_time.time,
|
||||
modify_time_cs: dos_time.increament_10ms,
|
||||
modify_time_cs: dos_time.increment_10ms,
|
||||
access_utc_offset: dos_time.utc_offset,
|
||||
access_date: dos_time.date,
|
||||
access_time: dos_time.time,
|
||||
@ -403,7 +403,7 @@ impl ExfatDentrySet {
|
||||
}
|
||||
Ok(name)
|
||||
}
|
||||
/// Name dentries are not permited to modify. We should create a new dentry set for renaming.
|
||||
/// Name dentries are not permitted to modify. We should create a new dentry set for renaming.
|
||||
|
||||
fn calculate_checksum(&self) -> u16 {
|
||||
const CHECKSUM_BYTES_RANGE: Range<usize> = 2..4;
|
||||
@ -505,7 +505,7 @@ impl Iterator for ExfatDentryIterator {
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Clone, Debug, Default, Copy, Pod)]
|
||||
// For files & directorys
|
||||
// For files & directories
|
||||
pub(super) struct ExfatFileDentry {
|
||||
pub(super) dentry_type: u8, // 0x85
|
||||
// Number of Secondary directory entries.
|
||||
@ -635,7 +635,7 @@ pub(super) struct ExfatGenericSecondaryDentry {
|
||||
#[derive(Clone, Debug, Default, Copy, Pod)]
|
||||
pub(super) struct ExfatDeletedDentry {
|
||||
pub(super) dentry_type: u8,
|
||||
pub(super) reserverd: [u8; 31],
|
||||
pub(super) reserved: [u8; 31],
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
|
@ -52,7 +52,7 @@ bitflags! {
|
||||
const SYSTEM = 0x0004;
|
||||
/// This inode represents a volume. This attribute is not supported in our implementation.
|
||||
const VOLUME = 0x0008;
|
||||
/// This inode reprents a directory.
|
||||
/// This inode represents a directory.
|
||||
const DIRECTORY = 0x0010;
|
||||
/// This file has been touched since the last DOS backup was performed on it. This attribute is not supported in our implementation.
|
||||
const ARCHIVE = 0x0020;
|
||||
@ -187,7 +187,7 @@ impl ExfatInodeInner {
|
||||
self.fs().find_opened_inode(self.parent_hash)
|
||||
}
|
||||
|
||||
/// Get physical sector id from logical sector id fot this Inode.
|
||||
/// Get physical sector id from logical sector id for this Inode.
|
||||
fn get_sector_id(&self, sector_id: usize) -> Result<usize> {
|
||||
let chain_offset = self
|
||||
.start_chain
|
||||
@ -315,12 +315,12 @@ impl ExfatInodeInner {
|
||||
file_dentry.create_utc_offset = self.ctime.utc_offset;
|
||||
file_dentry.create_date = self.ctime.date;
|
||||
file_dentry.create_time = self.ctime.time;
|
||||
file_dentry.create_time_cs = self.ctime.increament_10ms;
|
||||
file_dentry.create_time_cs = self.ctime.increment_10ms;
|
||||
|
||||
file_dentry.modify_utc_offset = self.mtime.utc_offset;
|
||||
file_dentry.modify_date = self.mtime.date;
|
||||
file_dentry.modify_time = self.mtime.time;
|
||||
file_dentry.modify_time_cs = self.mtime.increament_10ms;
|
||||
file_dentry.modify_time_cs = self.mtime.increment_10ms;
|
||||
|
||||
file_dentry.access_utc_offset = self.atime.utc_offset;
|
||||
file_dentry.access_date = self.atime.date;
|
||||
@ -692,11 +692,11 @@ impl ExfatInode {
|
||||
parent_hash: usize,
|
||||
fs_guard: &MutexGuard<()>,
|
||||
) -> Result<Arc<ExfatInode>> {
|
||||
const EXFAT_MIMIMUM_DENTRY: usize = 3;
|
||||
const EXFAT_MINIMUM_DENTRY: usize = 3;
|
||||
|
||||
let ino = fs.alloc_inode_number();
|
||||
|
||||
if dentry_set.len() < EXFAT_MIMIMUM_DENTRY {
|
||||
if dentry_set.len() < EXFAT_MINIMUM_DENTRY {
|
||||
return_errno_with_message!(Errno::EINVAL, "invalid dentry length")
|
||||
}
|
||||
|
||||
@ -1313,7 +1313,7 @@ impl Inode for ExfatInode {
|
||||
new_size.max(file_size)
|
||||
};
|
||||
|
||||
// Locks released here, so that file write can be parallized.
|
||||
// Locks released here, so that file write can be parallelized.
|
||||
let inner = self.inner.upread();
|
||||
inner.page_cache.pages().write(offset, reader)?;
|
||||
|
||||
|
@ -263,7 +263,7 @@ mod test {
|
||||
info!("Successfully creating and reading {} files", file_id + 1);
|
||||
}
|
||||
|
||||
//Test skiped readdir.
|
||||
//Test skipped readdir.
|
||||
let mut sub_inodes: Vec<String> = Vec::new();
|
||||
let _ = root.readdir_at(file_names.len() / 3 + 2, &mut sub_inodes);
|
||||
|
||||
@ -956,7 +956,7 @@ mod test {
|
||||
let resize_too_large = f.resize(initial_free_clusters as usize * cluster_size + 1);
|
||||
assert!(
|
||||
resize_too_large.is_err() && fs.num_free_clusters() == initial_free_clusters,
|
||||
"Fail to deal with a memeory overflow allocation"
|
||||
"Fail to deal with a memory overflow allocation"
|
||||
);
|
||||
|
||||
// Try to allocate a file of exactly the same size as the remaining spaces. This will succeed.
|
||||
|
@ -47,11 +47,11 @@ const EXFAT_TIME_ZONE_VALID: u8 = 1 << 7;
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy)]
|
||||
pub struct DosTimestamp {
|
||||
// Timestamp at the precesion of double seconds.
|
||||
// Timestamp at the precision of double seconds.
|
||||
pub(super) time: u16,
|
||||
pub(super) date: u16,
|
||||
// Precise time in 10ms.
|
||||
pub(super) increament_10ms: u8,
|
||||
pub(super) increment_10ms: u8,
|
||||
pub(super) utc_offset: u8,
|
||||
}
|
||||
|
||||
@ -73,11 +73,11 @@ impl DosTimestamp {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(time: u16, date: u16, increament_10ms: u8, utc_offset: u8) -> Result<Self> {
|
||||
pub fn new(time: u16, date: u16, increment_10ms: u8, utc_offset: u8) -> Result<Self> {
|
||||
let time = Self {
|
||||
time,
|
||||
date,
|
||||
increament_10ms,
|
||||
increment_10ms,
|
||||
utc_offset,
|
||||
};
|
||||
Ok(time)
|
||||
@ -102,13 +102,13 @@ impl DosTimestamp {
|
||||
| ((date_time.day() as u16) << DAY_RANGE.start);
|
||||
|
||||
const NSEC_PER_10MSEC: u32 = 10000000;
|
||||
let increament_10ms =
|
||||
let increment_10ms =
|
||||
(date_time.second() as u32 % 2 * 100 + date_time.nanosecond() / NSEC_PER_10MSEC) as u8;
|
||||
|
||||
Ok(Self {
|
||||
time,
|
||||
date,
|
||||
increament_10ms,
|
||||
increment_10ms,
|
||||
utc_offset: 0,
|
||||
})
|
||||
}
|
||||
@ -144,15 +144,15 @@ impl DosTimestamp {
|
||||
let mut sec = date_time.assume_utc().unix_timestamp() as u64;
|
||||
|
||||
let mut nano_sec: u32 = 0;
|
||||
if self.increament_10ms != 0 {
|
||||
if self.increment_10ms != 0 {
|
||||
const NSEC_PER_MSEC: u32 = 1000000;
|
||||
sec += self.increament_10ms as u64 / 100;
|
||||
nano_sec = (self.increament_10ms as u32 % 100) * 10 * NSEC_PER_MSEC;
|
||||
sec += self.increment_10ms as u64 / 100;
|
||||
nano_sec = (self.increment_10ms as u32 % 100) * 10 * NSEC_PER_MSEC;
|
||||
}
|
||||
|
||||
/* Adjust timezone to UTC0. */
|
||||
if (self.utc_offset & EXFAT_TIME_ZONE_VALID) != 0u8 {
|
||||
sec = Self::ajust_time_zone(sec, self.utc_offset & (!EXFAT_TIME_ZONE_VALID));
|
||||
sec = Self::adjust_time_zone(sec, self.utc_offset & (!EXFAT_TIME_ZONE_VALID));
|
||||
} else {
|
||||
// TODO: Use mount info for timezone adjustment.
|
||||
}
|
||||
@ -160,7 +160,7 @@ impl DosTimestamp {
|
||||
Ok(Duration::new(sec, nano_sec))
|
||||
}
|
||||
|
||||
fn ajust_time_zone(sec: u64, time_zone: u8) -> u64 {
|
||||
fn adjust_time_zone(sec: u64, time_zone: u8) -> u64 {
|
||||
if time_zone <= 0x3F {
|
||||
sec + Self::time_zone_sec(time_zone)
|
||||
} else {
|
||||
|
@ -1485,7 +1485,7 @@ impl InodeImpl_ {
|
||||
|
||||
/// Shrinks inode size.
|
||||
///
|
||||
/// After the reduction, the size will be shrinked to `new_size`,
|
||||
/// After the reduction, the size will be shrunk to `new_size`,
|
||||
/// which may result in an decreased block count.
|
||||
fn shrink(&mut self, new_size: usize) {
|
||||
let new_blocks = self.desc.size_to_blocks(new_size);
|
||||
|
@ -133,7 +133,7 @@ impl TryFrom<RawSuperBlock> for SuperBlock {
|
||||
check_interval: Duration::from_secs(sb.check_interval as _),
|
||||
creator_os: {
|
||||
let os_id = OsId::try_from(sb.creator_os)
|
||||
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid creater os"))?;
|
||||
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid creator os"))?;
|
||||
if os_id != OsId::Linux {
|
||||
return_errno_with_message!(Errno::EINVAL, "not supported os id");
|
||||
}
|
||||
@ -309,7 +309,7 @@ impl SuperBlock {
|
||||
Bid::new(super_block_bid as u64)
|
||||
}
|
||||
|
||||
/// Returns the starting block id of the block group descripter table
|
||||
/// Returns the starting block id of the block group descriptor table
|
||||
/// inside the block group pointed by `block_group_idx`.
|
||||
///
|
||||
/// # Panics
|
||||
@ -465,7 +465,7 @@ pub(super) struct RawSuperBlock {
|
||||
pub prealloc_dir_blocks: u8,
|
||||
padding1: u16,
|
||||
///
|
||||
/// This fileds are for journaling support in Ext3.
|
||||
/// This fields are for journaling support in Ext3.
|
||||
///
|
||||
/// Uuid of journal superblock.
|
||||
pub journal_uuid: [u8; 16],
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#![allow(unused_variables)]
|
||||
|
||||
//! Opend File Handle
|
||||
//! Opened File Handle
|
||||
|
||||
use crate::{
|
||||
events::{IoEvents, Observer},
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#![allow(unused_variables)]
|
||||
|
||||
//! Opend Inode-backed File Handle
|
||||
//! Opened Inode-backed File Handle
|
||||
|
||||
mod dyn_cap;
|
||||
mod static_cap;
|
||||
|
@ -315,7 +315,7 @@ impl<T: Copy, R: TRights> Fifo<T, R> {
|
||||
|
||||
impl<T, R: TRights> Fifo<T, R> {
|
||||
/// Pushes an item into the endpoint.
|
||||
/// If the `push` method failes, this method will return
|
||||
/// If the `push` method fails, this method will return
|
||||
/// `Err` containing the item that hasn't been pushed
|
||||
#[require(R > Write)]
|
||||
pub fn push(&self, item: T) -> core::result::Result<(), T> {
|
||||
|
@ -72,7 +72,7 @@ impl PageCache {
|
||||
pub fn resize(&self, new_size: usize) -> Result<()> {
|
||||
// If the new size is smaller and not page-aligned,
|
||||
// first zero the gap between the new size and the
|
||||
// next page boundry (or the old size), if such a gap exists.
|
||||
// next page boundary (or the old size), if such a gap exists.
|
||||
let old_size = self.pages.size();
|
||||
if old_size > new_size && new_size % PAGE_SIZE != 0 {
|
||||
let gap_size = old_size.min(new_size.align_up(PAGE_SIZE)) - new_size;
|
||||
|
@ -153,7 +153,7 @@ impl DirInMemory {
|
||||
create_result.unwrap_err()
|
||||
);
|
||||
info!(
|
||||
" create {:?}/{:?}({:?}) succeeeded",
|
||||
" create {:?}/{:?}({:?}) succeeded",
|
||||
self.name, name, type_
|
||||
);
|
||||
|
||||
|
@ -117,7 +117,7 @@ impl RangeLockItem {
|
||||
.range
|
||||
.set_start(new_start)
|
||||
.expect("invalid new start");
|
||||
if let FileRangeChange::Shrinked = change {
|
||||
if let FileRangeChange::Shrunk = change {
|
||||
self.wake_all();
|
||||
}
|
||||
}
|
||||
@ -126,7 +126,7 @@ impl RangeLockItem {
|
||||
/// If the range shrinks, it will wake all waiting processes
|
||||
pub fn set_end(&mut self, new_end: usize) {
|
||||
let change = self.range().set_end(new_end).expect("invalid new end");
|
||||
if let FileRangeChange::Shrinked = change {
|
||||
if let FileRangeChange::Shrunk = change {
|
||||
self.wake_all();
|
||||
}
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ impl FileRange {
|
||||
let old_start = self.start;
|
||||
self.start = new_start;
|
||||
let change = match new_start {
|
||||
new_start if new_start > old_start => FileRangeChange::Shrinked,
|
||||
new_start if new_start > old_start => FileRangeChange::Shrunk,
|
||||
new_start if new_start < old_start => FileRangeChange::Expanded,
|
||||
_ => FileRangeChange::Same,
|
||||
};
|
||||
@ -64,7 +64,7 @@ impl FileRange {
|
||||
let old_end = self.end;
|
||||
self.end = new_end;
|
||||
let change = match new_end {
|
||||
new_end if new_end < old_end => FileRangeChange::Shrinked,
|
||||
new_end if new_end < old_end => FileRangeChange::Shrunk,
|
||||
new_end if new_end > old_end => FileRangeChange::Expanded,
|
||||
_ => FileRangeChange::Same,
|
||||
};
|
||||
@ -110,7 +110,7 @@ impl FileRange {
|
||||
pub enum FileRangeChange {
|
||||
Same,
|
||||
Expanded,
|
||||
Shrinked,
|
||||
Shrunk,
|
||||
}
|
||||
|
||||
/// The position of a range (say A) relative another overlapping range (say B).
|
||||
|
@ -31,7 +31,7 @@ pub const SEMMNS: usize = SEMMNI * SEMMSL;
|
||||
pub const SEMOPM: usize = 500;
|
||||
/// MAximum semaphore value.
|
||||
pub const SEMVMX: i32 = 32767;
|
||||
/// Maximum value that can be recored for semaphore adjustment (SEM_UNDO).
|
||||
/// Maximum value that can be recorded for semaphore adjustment (SEM_UNDO).
|
||||
pub const SEMAEM: i32 = SEMVMX;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -89,7 +89,7 @@ impl AnyBoundSocket {
|
||||
/// Set the observer whose `on_events` will be called when certain iface events happen. After
|
||||
/// setting, the new observer will fire once immediately to avoid missing any events.
|
||||
///
|
||||
/// If there is an existing observer, due to race conditions, this function does not guarentee
|
||||
/// If there is an existing observer, due to race conditions, this function does not guarantee
|
||||
/// that the old observer will never be called after the setting. Users should be aware of this
|
||||
/// and proactively handle the race conditions if necessary.
|
||||
pub fn set_observer(&self, handler: Weak<dyn Observer<()>>) {
|
||||
|
@ -41,7 +41,7 @@ pub trait Iface: internal::IfaceInternal + Send + Sync {
|
||||
fn poll(&self);
|
||||
|
||||
/// Bind a socket to the iface. So the packet for this socket will be dealt with by the interface.
|
||||
/// If port is None, the iface will pick up an empheral port for the socket.
|
||||
/// If port is None, the iface will pick up an ephemeral port for the socket.
|
||||
/// FIXME: The reason for binding socket and interface together is because there are limitations inside smoltcp.
|
||||
/// See discussion at <https://github.com/smoltcp-rs/smoltcp/issues/779>.
|
||||
fn bind_socket(
|
||||
|
@ -71,7 +71,7 @@ impl BoundDatagram {
|
||||
return_errno_with_message!(Errno::EAGAIN, "the send buffer is full")
|
||||
}
|
||||
Some(Err(SendError::Unaddressable)) => {
|
||||
return_errno_with_message!(Errno::EINVAL, "the destionation address is invalid")
|
||||
return_errno_with_message!(Errno::EINVAL, "the destination address is invalid")
|
||||
}
|
||||
None => return_errno_with_message!(Errno::EMSGSIZE, "the message is too large"),
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ impl DatagramSocket {
|
||||
}
|
||||
}
|
||||
|
||||
fn try_bind_empheral(&self, remote_endpoint: &IpEndpoint) -> Result<()> {
|
||||
fn try_bind_ephemeral(&self, remote_endpoint: &IpEndpoint) -> Result<()> {
|
||||
// Fast path
|
||||
if let Inner::Bound(_) = self.inner.read().as_ref() {
|
||||
return Ok(());
|
||||
@ -269,7 +269,7 @@ impl Socket for DatagramSocket {
|
||||
fn connect(&self, socket_addr: SocketAddr) -> Result<()> {
|
||||
let endpoint = socket_addr.try_into()?;
|
||||
|
||||
self.try_bind_empheral(&endpoint)?;
|
||||
self.try_bind_ephemeral(&endpoint)?;
|
||||
|
||||
let mut inner = self.inner.write();
|
||||
let Inner::Bound(bound_datagram) = inner.as_mut() else {
|
||||
@ -311,7 +311,7 @@ impl Socket for DatagramSocket {
|
||||
let remote_endpoint = match addr {
|
||||
Some(remote_addr) => {
|
||||
let endpoint = remote_addr.try_into()?;
|
||||
self.try_bind_empheral(&endpoint)?;
|
||||
self.try_bind_ephemeral(&endpoint)?;
|
||||
endpoint
|
||||
}
|
||||
None => self.remote_endpoint().ok_or_else(|| {
|
||||
|
@ -481,7 +481,7 @@ impl Socket for StreamSocket {
|
||||
let state = self.state.read();
|
||||
match state.as_ref() {
|
||||
State::Connected(connected_stream) => connected_stream.shutdown(cmd),
|
||||
// TDOD: shutdown listening stream
|
||||
// TODO: shutdown listening stream
|
||||
_ => return_errno_with_message!(Errno::EINVAL, "cannot shutdown"),
|
||||
}
|
||||
}
|
||||
|
@ -20,8 +20,8 @@ impl Connected {
|
||||
addr: Option<UnixSocketAddrBound>,
|
||||
peer_addr: Option<UnixSocketAddrBound>,
|
||||
) -> (Connected, Connected) {
|
||||
let (writer_this, reader_peer) = Channel::with_capacity(DAFAULT_BUF_SIZE).split();
|
||||
let (writer_peer, reader_this) = Channel::with_capacity(DAFAULT_BUF_SIZE).split();
|
||||
let (writer_this, reader_peer) = Channel::with_capacity(DEFAULT_BUF_SIZE).split();
|
||||
let (writer_peer, reader_this) = Channel::with_capacity(DEFAULT_BUF_SIZE).split();
|
||||
|
||||
let this = Connected {
|
||||
addr: addr.clone(),
|
||||
@ -122,4 +122,4 @@ impl Connected {
|
||||
}
|
||||
}
|
||||
|
||||
const DAFAULT_BUF_SIZE: usize = 65536;
|
||||
const DEFAULT_BUF_SIZE: usize = 65536;
|
||||
|
@ -4,7 +4,7 @@ use crate::prelude::*;
|
||||
|
||||
bitflags! {
|
||||
/// Flags used for send/recv.
|
||||
/// The definiton is from https://elixir.bootlin.com/linux/v6.0.9/source/include/linux/socket.h
|
||||
/// The definition is from https://elixir.bootlin.com/linux/v6.0.9/source/include/linux/socket.h
|
||||
#[repr(C)]
|
||||
#[derive(Pod)]
|
||||
pub struct SendRecvFlags: i32 {
|
||||
|
@ -225,7 +225,7 @@ impl VsockSpace {
|
||||
let Some(listen) = listen_sockets.get(&event.destination.into()) else {
|
||||
return_errno_with_message!(
|
||||
Errno::EINVAL,
|
||||
"connecion request can only be handled by listening socket"
|
||||
"connection request can only be handled by listening socket"
|
||||
);
|
||||
};
|
||||
let peer = event.source;
|
||||
|
@ -56,8 +56,8 @@ impl Listen {
|
||||
}
|
||||
|
||||
pub fn update_io_events(&self) {
|
||||
let incomming_connection = self.incoming_connection.disable_irq().lock();
|
||||
if !incomming_connection.is_empty() {
|
||||
let incoming_connection = self.incoming_connection.disable_irq().lock();
|
||||
if !incoming_connection.is_empty() {
|
||||
self.pollee.add_events(IoEvents::IN);
|
||||
} else {
|
||||
self.pollee.del_events(IoEvents::IN);
|
||||
|
@ -124,7 +124,7 @@ impl CloneFlags {
|
||||
/// Clone a child thread or child process.
|
||||
///
|
||||
/// FIXME: currently, the child process or thread will be scheduled to run at once,
|
||||
/// but this may not be the expected bahavior.
|
||||
/// but this may not be the expected behavior.
|
||||
pub fn clone_child(
|
||||
ctx: &Context,
|
||||
parent_context: &UserContext,
|
||||
@ -411,7 +411,7 @@ fn clone_sighand(
|
||||
parent_sig_dispositions: &Arc<Mutex<SigDispositions>>,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Arc<Mutex<SigDispositions>> {
|
||||
// similer to CLONE_FILES
|
||||
// similar to CLONE_FILES
|
||||
if clone_flags.contains(CloneFlags::CLONE_SIGHAND) {
|
||||
parent_sig_dispositions.clone()
|
||||
} else {
|
||||
|
@ -49,7 +49,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets real user id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn ruid(&self) -> Uid {
|
||||
self.0.ruid()
|
||||
@ -57,7 +57,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets effective user id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn euid(&self) -> Uid {
|
||||
self.0.euid()
|
||||
@ -65,7 +65,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets saved-set user id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn suid(&self) -> Uid {
|
||||
self.0.suid()
|
||||
@ -73,7 +73,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets file system user id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn fsuid(&self) -> Uid {
|
||||
self.0.fsuid()
|
||||
@ -143,7 +143,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets real group id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn rgid(&self) -> Gid {
|
||||
self.0.rgid()
|
||||
@ -151,7 +151,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets effective group id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn egid(&self) -> Gid {
|
||||
self.0.egid()
|
||||
@ -159,7 +159,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets saved-set group id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn sgid(&self) -> Gid {
|
||||
self.0.sgid()
|
||||
@ -167,7 +167,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets file system group id.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn fsgid(&self) -> Gid {
|
||||
self.0.fsgid()
|
||||
@ -237,7 +237,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Acquires the read lock of supplementary group ids.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn groups(&self) -> RwLockReadGuard<BTreeSet<Gid>> {
|
||||
self.0.groups()
|
||||
@ -255,7 +255,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets the capabilities that child process can inherit.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn inheritable_capset(&self) -> CapSet {
|
||||
self.0.inheritable_capset()
|
||||
@ -263,7 +263,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets the capabilities that are permitted.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn permitted_capset(&self) -> CapSet {
|
||||
self.0.permitted_capset()
|
||||
@ -271,7 +271,7 @@ impl<R: TRights> Credentials<R> {
|
||||
|
||||
/// Gets the capabilities that actually use.
|
||||
///
|
||||
/// This method requies the `Read` right.
|
||||
/// This method requires the `Read` right.
|
||||
#[require(R > Read)]
|
||||
pub fn effective_capset(&self) -> CapSet {
|
||||
self.0.effective_capset()
|
||||
|
@ -33,7 +33,7 @@ pub struct RobustListHead {
|
||||
impl RobustListHead {
|
||||
/// Return an iterator for all futexes in the robust list.
|
||||
///
|
||||
/// The futex refered to by `list_op_pending`, if any, will be returned as
|
||||
/// The futex referred to by `list_op_pending`, if any, will be returned as
|
||||
/// the last item.
|
||||
pub fn futexes(&self) -> FutexIter<'_> {
|
||||
FutexIter::new(self)
|
||||
|
@ -7,7 +7,7 @@ use crate::{
|
||||
process::{process_table, Pgid, ProcessGroup},
|
||||
};
|
||||
|
||||
/// A termial is used to interact with system. A terminal can support the shell
|
||||
/// A terminal is used to interact with system. A terminal can support the shell
|
||||
/// job control.
|
||||
///
|
||||
/// We currently support two kinds of terminal, the tty and pty.
|
||||
|
@ -217,7 +217,7 @@ struct InitStackWriter {
|
||||
|
||||
impl InitStackWriter {
|
||||
fn write(mut self) -> Result<()> {
|
||||
// FIXME: Some OSes may put the first page of excutable file here
|
||||
// FIXME: Some OSes may put the first page of executable file here
|
||||
// for interpreting elf headers.
|
||||
|
||||
let argc = self.argv.len() as u64;
|
||||
@ -268,7 +268,7 @@ impl InitStackWriter {
|
||||
}
|
||||
|
||||
/// Libc ABI requires 16-byte alignment of the stack entrypoint.
|
||||
/// Current postion of the stack is 8-byte aligned already, insert 8 byte
|
||||
/// Current position of the stack is 8-byte aligned already, insert 8 byte
|
||||
/// to meet the requirement if necessary.
|
||||
fn adjust_stack_alignment(&self, envp_pointers: &[u64], argv_pointers: &[u64]) -> Result<()> {
|
||||
// Ensure 8-byte alignment
|
||||
@ -285,7 +285,7 @@ impl InitStackWriter {
|
||||
}
|
||||
|
||||
fn write_aux_vec(&self) -> Result<()> {
|
||||
// Write NULL auxilary
|
||||
// Write NULL auxiliary
|
||||
self.write_u64(0)?;
|
||||
self.write_u64(AuxKey::AT_NULL as u64)?;
|
||||
// Write Auxiliary vectors
|
||||
|
@ -16,7 +16,7 @@ use crate::{
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
/// Load an executable to root vmar, including loading programe image, preparing heap and stack,
|
||||
/// Load an executable to root vmar, including loading programme image, preparing heap and stack,
|
||||
/// initializing argv, envp and aux tables.
|
||||
/// About recursion_limit: recursion limit is used to limit th recursion depth of shebang executables.
|
||||
/// If the interpreter(the program behind #!) of shebang executable is also a shebang,
|
||||
|
@ -26,7 +26,7 @@ pub fn parse_shebang_line(file_header_buffer: &[u8]) -> Result<Option<Vec<CStrin
|
||||
if shebang_argv.len() != 1 {
|
||||
return_errno_with_message!(
|
||||
Errno::EINVAL,
|
||||
"One and only one intpreter program should be specified"
|
||||
"One and only one interpreter program should be specified"
|
||||
);
|
||||
}
|
||||
Ok(Some(shebang_argv))
|
||||
|
@ -239,7 +239,7 @@ impl Observer<IoEvents> for EventCounter {
|
||||
/// according to the events.
|
||||
///
|
||||
/// This trait is added instead of creating a new method in [`Pollee`] because sometimes we do not
|
||||
/// have access to the internal [`Pollee`], but there is a method that provides the same sematics
|
||||
/// have access to the internal [`Pollee`], but there is a method that provides the same semantics
|
||||
/// as [`Pollee::poll`] and we need to perform event-based operations using that method.
|
||||
pub trait Pollable {
|
||||
/// Returns the interesting events if there are any, or waits for them to happen if there are
|
||||
|
@ -26,7 +26,7 @@ impl TryFrom<u8> for SigNum {
|
||||
}
|
||||
|
||||
impl SigNum {
|
||||
/// Caller must ensure the sig_num is valid. otherweise, use try_from will check sig_num and does not panic.
|
||||
/// Caller must ensure the sig_num is valid. Otherwise, use try_from will check sig_num and does not panic.
|
||||
pub const fn from_u8(sig_num: u8) -> Self {
|
||||
if sig_num > MAX_RT_SIG_NUM || sig_num < MIN_STD_SIG_NUM {
|
||||
panic!("invalid signal number")
|
||||
|
@ -83,7 +83,7 @@ impl SigStack {
|
||||
self.handler_counter -= 1
|
||||
}
|
||||
|
||||
/// Determins whether the stack is executed on by any signal handler
|
||||
/// Determines whether the stack is executed on by any signal handler
|
||||
pub fn is_active(&self) -> bool {
|
||||
// FIXME: can DISABLE stack be used?
|
||||
self.handler_counter != 0 && !self.flags.contains(SigStackFlags::SS_AUTODISARM)
|
||||
|
@ -41,7 +41,7 @@ impl FaultSignal {
|
||||
let addr = Some(trap_info.page_fault_addr as u64);
|
||||
(SIGSEGV, code, addr)
|
||||
}
|
||||
_ => panic!("Exception cannnot be a signal"),
|
||||
_ => panic!("Exception cannot be a signal"),
|
||||
};
|
||||
FaultSignal { num, code, addr }
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ impl Condvar {
|
||||
}
|
||||
|
||||
/// Wait for the condition to become true,
|
||||
/// and until the condition is explicitly woken up or interupted.
|
||||
/// and until the condition is explicitly woken up or interrupted.
|
||||
///
|
||||
/// This function blocks until either the condition becomes false
|
||||
/// or the condition variable is explicitly notified.
|
||||
|
@ -76,7 +76,7 @@ pub enum MadviseBehavior {
|
||||
MADV_HUGEPAGE = 14, /* Worth backing with hugepages */
|
||||
MADV_NOHUGEPAGE = 15, /* Not worth backing with hugepages */
|
||||
|
||||
MADV_DONTDUMP = 16, /* Explicity exclude from the core dump,
|
||||
MADV_DONTDUMP = 16, /* Explicitly exclude from the core dump,
|
||||
overrides the coredump filter bits */
|
||||
MADV_DODUMP = 17, /* Clear the MADV_DONTDUMP flag */
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! Read the Cpu ctx content then dispatch syscall to corrsponding handler
|
||||
//! Read the Cpu ctx content then dispatch syscall to corresponding handler
|
||||
//! The each sub module contains functions that handle real syscall logic.
|
||||
pub use clock_gettime::ClockId;
|
||||
use ostd::cpu::UserContext;
|
||||
@ -141,7 +141,7 @@ mod waitid;
|
||||
mod write;
|
||||
|
||||
/// This macro is used to define syscall handler.
|
||||
/// The first param is ths number of parameters,
|
||||
/// The first param is the number of parameters,
|
||||
/// The second param is the function name of syscall handler,
|
||||
/// The third is optional, means the args(if parameter number > 0),
|
||||
/// The third is optional, means if cpu ctx is required.
|
||||
|
@ -21,7 +21,7 @@ pub fn sys_read(
|
||||
|
||||
// According to <https://man7.org/linux/man-pages/man2/read.2.html>, if
|
||||
// the user specified an empty buffer, we should detect errors by checking
|
||||
// the file discriptor. If no errors detected, return 0 successfully.
|
||||
// the file descriptor. If no errors detected, return 0 successfully.
|
||||
let read_len = if buf_len != 0 {
|
||||
let mut writer = ctx
|
||||
.process
|
||||
|
@ -12,7 +12,7 @@ use crate::{
|
||||
};
|
||||
|
||||
pub fn sys_ftruncate(fd: FileDesc, len: isize, ctx: &Context) -> Result<SyscallReturn> {
|
||||
debug!("fd = {}, lentgh = {}", fd, len);
|
||||
debug!("fd = {}, length = {}", fd, len);
|
||||
|
||||
check_length(len, ctx)?;
|
||||
|
||||
|
@ -21,7 +21,7 @@ pub fn sys_write(
|
||||
|
||||
// According to <https://man7.org/linux/man-pages/man2/write.2.html>, if
|
||||
// the user specified an empty buffer, we should detect errors by checking
|
||||
// the file discriptor. If no errors detected, return 0 successfully.
|
||||
// the file descriptor. If no errors detected, return 0 successfully.
|
||||
let write_len = if user_buf_len != 0 {
|
||||
let mut reader = ctx
|
||||
.process
|
||||
|
@ -97,7 +97,7 @@ fn log_trap_info(exception: &CpuException, trap_info: &CpuExceptionInfo) {
|
||||
DEVICE_NOT_AVAILABLE => log_trap_common!(DEVICE_NOT_AVAILABLE, trap_info),
|
||||
DOUBLE_FAULT => log_trap_common!(DOUBLE_FAULT, trap_info),
|
||||
COPROCESSOR_SEGMENT_OVERRUN => log_trap_common!(COPROCESSOR_SEGMENT_OVERRUN, trap_info),
|
||||
INVAILD_TSS => log_trap_common!(INVAILD_TSS, trap_info),
|
||||
INVALID_TSS => log_trap_common!(INVALID_TSS, trap_info),
|
||||
SEGMENT_NOT_PRESENT => log_trap_common!(SEGMENT_NOT_PRESENT, trap_info),
|
||||
STACK_SEGMENT_FAULT => log_trap_common!(STACK_SEGMENT_FAULT, trap_info),
|
||||
GENERAL_PROTECTION_FAULT => log_trap_common!(GENERAL_PROTECTION_FAULT, trap_info),
|
||||
|
@ -54,7 +54,7 @@ impl SystemTime {
|
||||
self.0.checked_add(duration).map(SystemTime)
|
||||
}
|
||||
|
||||
/// Substract a duration from self. If the result does not exceed inner bounds return Some(t), else return None.
|
||||
/// Subtract a duration from self. If the result does not exceed inner bounds return Some(t), else return None.
|
||||
pub fn checked_sub(&self, duration: Duration) -> Option<Self> {
|
||||
let duration = convert_to_time_duration(duration);
|
||||
self.0.checked_sub(duration).map(SystemTime)
|
||||
|
@ -334,7 +334,7 @@ impl VmMapping {
|
||||
///
|
||||
/// Generally, this function is only used in `protect()` method.
|
||||
/// This method modifies the parent `Vmar` in the end if subdividing is required.
|
||||
/// It removes current mapping and add splitted mapping to the Vmar.
|
||||
/// It removes current mapping and add split mapping to the Vmar.
|
||||
fn protect_with_subdivision(
|
||||
&self,
|
||||
intersect_range: &Range<usize>,
|
||||
@ -402,7 +402,7 @@ impl VmMapping {
|
||||
return Ok(());
|
||||
}
|
||||
if trim_range.start <= map_to_addr && trim_range.end >= map_to_addr + map_size {
|
||||
// Fast path: the whole mapping was trimed.
|
||||
// Fast path: the whole mapping was trimmed.
|
||||
self.unmap(trim_range, true)?;
|
||||
mappings_to_remove.push_back(map_to_addr);
|
||||
return Ok(());
|
||||
|
@ -73,7 +73,7 @@ pub use pager::Pager;
|
||||
///
|
||||
pub struct Vmo<R = Rights>(pub(super) Arc<Vmo_>, R);
|
||||
|
||||
/// Functions exist both for static capbility and dynamic capibility
|
||||
/// Functions exist both for static capbility and dynamic capability
|
||||
pub trait VmoRightsOp {
|
||||
/// Returns the access rights.
|
||||
fn rights(&self) -> Rights;
|
||||
@ -94,7 +94,7 @@ pub trait VmoRightsOp {
|
||||
}
|
||||
|
||||
// We implement this trait for VMO, so we can use functions on type like Vmo<R> without trait bounds.
|
||||
// FIXME: This requires the imcomplete feature specialization, which should be fixed further.
|
||||
// FIXME: This requires the incomplete feature specialization, which should be fixed further.
|
||||
impl<R> VmoRightsOp for Vmo<R> {
|
||||
default fn rights(&self) -> Rights {
|
||||
unimplemented!()
|
||||
|
Reference in New Issue
Block a user