mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 04:55:03 +00:00
Fix all spelling mistakes in history by typos tool
This commit is contained in:
parent
b29d3b5409
commit
86f09eef75
28
.typos.toml
Normal file
28
.typos.toml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# This file is to help typos avoid false-positives.
|
||||||
|
|
||||||
|
# Words listed below are marked as valid, not mistakes.
|
||||||
|
[default.extend-words]
|
||||||
|
rela = "rela"
|
||||||
|
ANDD = "ANDD"
|
||||||
|
ethe = "ethe"
|
||||||
|
mke = "mke"
|
||||||
|
WHT = "WHT"
|
||||||
|
ist = "ist"
|
||||||
|
TME = "TME"
|
||||||
|
BA = "BA"
|
||||||
|
ND = "ND"
|
||||||
|
Fo = "Fo"
|
||||||
|
|
||||||
|
# Files with svg suffix are ignored to check.
|
||||||
|
[type.svg]
|
||||||
|
extend-glob = ["*.svg"]
|
||||||
|
check-file = false
|
||||||
|
|
||||||
|
# Files listed below are ignored to check.
|
||||||
|
[files]
|
||||||
|
extend-exclude = [
|
||||||
|
"test/syscall_test/blocklists/pty_test",
|
||||||
|
"test/build/initramfs/opt/syscall_test/blocklists/pty_test",
|
||||||
|
"test/syscall_test/blocklists/sync_test",
|
||||||
|
"test/build/initramfs/opt/syscall_test/blocklists/sync_test",
|
||||||
|
]
|
16
.vscode/settings.json
vendored
16
.vscode/settings.json
vendored
@ -15,5 +15,19 @@
|
|||||||
"-Zbuild-std=core,alloc,compiler_builtins",
|
"-Zbuild-std=core,alloc,compiler_builtins",
|
||||||
"-Zbuild-std-features=compiler-builtins-mem"
|
"-Zbuild-std-features=compiler-builtins-mem"
|
||||||
],
|
],
|
||||||
"rust-analyzer.showUnlinkedFileNotification": false
|
"rust-analyzer.showUnlinkedFileNotification": false,
|
||||||
|
"search.exclude": {
|
||||||
|
"**/*.code-search": false,
|
||||||
|
"**/bower_components": false,
|
||||||
|
"**/node_modules": false
|
||||||
|
},
|
||||||
|
"search.useIgnoreFiles": false,
|
||||||
|
"files.exclude": {
|
||||||
|
"**/.DS_Store": false,
|
||||||
|
"**/.git": false,
|
||||||
|
"**/.hg": false,
|
||||||
|
"**/.svn": false,
|
||||||
|
"**/CVS": false,
|
||||||
|
"**/Thumbs.db": false
|
||||||
|
}
|
||||||
}
|
}
|
@ -91,7 +91,7 @@ After starting a debug server with OSDK from the shell with `make gdb_server`,
|
|||||||
a temporary `launch.json` is generated under `.vscode`.
|
a temporary `launch.json` is generated under `.vscode`.
|
||||||
Your previous launch configs will be restored after the server is down.
|
Your previous launch configs will be restored after the server is down.
|
||||||
Press `F5`(Run and Debug) to start a debug session via VS Code.
|
Press `F5`(Run and Debug) to start a debug session via VS Code.
|
||||||
Click `Continue`(or, press `F5`) at the fisrt break to resume the paused server instance,
|
Click `Continue`(or, press `F5`) at the first break to resume the paused server instance,
|
||||||
then it will continue until reaching your first breakpoint.
|
then it will continue until reaching your first breakpoint.
|
||||||
|
|
||||||
Note that if debugging with KVM enabled, you must use hardware assisted breakpoints. See "hbreak" in
|
Note that if debugging with KVM enabled, you must use hardware assisted breakpoints. See "hbreak" in
|
||||||
|
@ -73,7 +73,7 @@ to introduce minimal overheads.
|
|||||||
Ideally, these APIs should be realized
|
Ideally, these APIs should be realized
|
||||||
as [zero-cost abstractions](https://monomorph.is/posts/zero-cost-abstractions/).
|
as [zero-cost abstractions](https://monomorph.is/posts/zero-cost-abstractions/).
|
||||||
|
|
||||||
Fortunatelly, our efforts
|
Fortunately, our efforts
|
||||||
to design and implement an OS framework meeting these standards
|
to design and implement an OS framework meeting these standards
|
||||||
have borne fruit in the form of the [Asterinas OSTD](../ostd/).
|
have borne fruit in the form of the [Asterinas OSTD](../ostd/).
|
||||||
Using this framework as a foundation,
|
Using this framework as a foundation,
|
||||||
|
@ -55,7 +55,7 @@ and can be installed by
|
|||||||
cargo install cargo-osdk
|
cargo install cargo-osdk
|
||||||
```
|
```
|
||||||
|
|
||||||
### Upgrate
|
### Upgrade
|
||||||
If `cargo-osdk` is already installed,
|
If `cargo-osdk` is already installed,
|
||||||
the tool can be upgraded by
|
the tool can be upgraded by
|
||||||
```bash
|
```bash
|
||||||
|
@ -188,7 +188,7 @@ impl InputDevice {
|
|||||||
|
|
||||||
fn handle_irq(&self) {
|
fn handle_irq(&self) {
|
||||||
let callbacks = self.callbacks.read_irq_disabled();
|
let callbacks = self.callbacks.read_irq_disabled();
|
||||||
// Returns ture if there may be more events to handle
|
// Returns true if there may be more events to handle
|
||||||
let handle_event = |event: &EventBuf| -> bool {
|
let handle_event = |event: &EventBuf| -> bool {
|
||||||
event.sync().unwrap();
|
event.sync().unwrap();
|
||||||
let event: VirtioInputEvent = event.read().unwrap();
|
let event: VirtioInputEvent = event.read().unwrap();
|
||||||
|
@ -61,7 +61,7 @@ impl SocketDevice {
|
|||||||
<< 32;
|
<< 32;
|
||||||
|
|
||||||
let mut recv_queue = VirtQueue::new(QUEUE_RECV, QUEUE_SIZE, transport.as_mut())
|
let mut recv_queue = VirtQueue::new(QUEUE_RECV, QUEUE_SIZE, transport.as_mut())
|
||||||
.expect("createing recv queue fails");
|
.expect("creating recv queue fails");
|
||||||
let send_queue = VirtQueue::new(QUEUE_SEND, QUEUE_SIZE, transport.as_mut())
|
let send_queue = VirtQueue::new(QUEUE_SEND, QUEUE_SIZE, transport.as_mut())
|
||||||
.expect("creating send queue fails");
|
.expect("creating send queue fails");
|
||||||
let event_queue = VirtQueue::new(QUEUE_EVENT, QUEUE_SIZE, transport.as_mut())
|
let event_queue = VirtQueue::new(QUEUE_EVENT, QUEUE_SIZE, transport.as_mut())
|
||||||
|
@ -103,7 +103,7 @@ impl fmt::Display for SocketError {
|
|||||||
Self::UnexpectedDataInPacket => write!(f, "No data is expected in the packet"),
|
Self::UnexpectedDataInPacket => write!(f, "No data is expected in the packet"),
|
||||||
Self::InsufficientBufferSpaceInPeer => write!(f, "Peer has insufficient buffer space, try again later"),
|
Self::InsufficientBufferSpaceInPeer => write!(f, "Peer has insufficient buffer space, try again later"),
|
||||||
Self::RecycledWrongBuffer => write!(f, "Recycled a wrong buffer"),
|
Self::RecycledWrongBuffer => write!(f, "Recycled a wrong buffer"),
|
||||||
Self::QueueError(_) => write!(f,"Error encounted out of vsock itself!"),
|
Self::QueueError(_) => write!(f,"Error encountered out of vsock itself!"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ fn negotiate_features(transport: &mut Box<dyn VirtioTransport>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// all device features, bits 0~23 and 50~63 are sepecified by device.
|
/// all device features, bits 0~23 and 50~63 are specified by device.
|
||||||
/// if using this struct to translate u64, use from_bits_truncate function instead of from_bits
|
/// if using this struct to translate u64, use from_bits_truncate function instead of from_bits
|
||||||
///
|
///
|
||||||
struct Feature: u64 {
|
struct Feature: u64 {
|
||||||
|
@ -73,7 +73,7 @@ impl VirtQueue {
|
|||||||
|
|
||||||
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
|
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
|
||||||
// FIXME: How about pci legacy?
|
// FIXME: How about pci legacy?
|
||||||
// Currently, we use one Frame to place the descriptors and avaliable rings, one Frame to place used rings
|
// Currently, we use one Frame to place the descriptors and available rings, one Frame to place used rings
|
||||||
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
|
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
|
||||||
if size > 128 {
|
if size > 128 {
|
||||||
return Err(QueueError::InvalidArgs);
|
return Err(QueueError::InvalidArgs);
|
||||||
|
@ -62,7 +62,7 @@ impl VirtioPciCapabilityData {
|
|||||||
3 => VirtioPciCpabilityType::IsrCfg,
|
3 => VirtioPciCpabilityType::IsrCfg,
|
||||||
4 => VirtioPciCpabilityType::DeviceCfg,
|
4 => VirtioPciCpabilityType::DeviceCfg,
|
||||||
5 => VirtioPciCpabilityType::PciCfg,
|
5 => VirtioPciCpabilityType::PciCfg,
|
||||||
_ => panic!("Unsupport virtio capability type:{:?}", cfg_type),
|
_ => panic!("Unsupported virtio capability type:{:?}", cfg_type),
|
||||||
};
|
};
|
||||||
let bar = vendor_cap.read8(4).unwrap();
|
let bar = vendor_cap.read8(4).unwrap();
|
||||||
let capability_length = vendor_cap.read8(2).unwrap();
|
let capability_length = vendor_cap.read8(2).unwrap();
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
//! which means the `SomeRightSet` should **include** the `AnotherRightSet`. In this case, `AnotherRightSet` should be a **generic parameter**.
|
//! which means the `SomeRightSet` should **include** the `AnotherRightSet`. In this case, `AnotherRightSet` should be a **generic parameter**.
|
||||||
//! i.e., `AnotherRightSet` should occur the the generic param list of the function.
|
//! i.e., `AnotherRightSet` should occur the the generic param list of the function.
|
||||||
//!
|
//!
|
||||||
//! If there are multiple constraits, they can be seperated with `|`, which means all constraits should be satisfied.
|
//! If there are multiple constraints, they can be separated with `|`, which means all constraints should be satisfied.
|
||||||
//!
|
//!
|
||||||
//! The require can also be used multiple times, which means each macro should be satisfied.
|
//! The require can also be used multiple times, which means each macro should be satisfied.
|
||||||
//!
|
//!
|
||||||
|
@ -135,7 +135,7 @@ pub use typeflags_util::SetContain;
|
|||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// But this coding pattern is too tedius for such a common task.
|
/// But this coding pattern is too tedius for such a common task.
|
||||||
/// To make the life of users easier, we provide a convinient macro named
|
/// To make the life of users easier, we provide a convenient macro named
|
||||||
/// `field_ptr`, which can be used to obtain the safe pointer of a field from
|
/// `field_ptr`, which can be used to obtain the safe pointer of a field from
|
||||||
/// that of its containing struct.
|
/// that of its containing struct.
|
||||||
///
|
///
|
||||||
|
@ -9,7 +9,7 @@ cargo install --path .
|
|||||||
This will install two binaries `cargo-component` and `component-driver` at `$HOME/.cargo/bin`(by default, it depends on the cargo config).
|
This will install two binaries `cargo-component` and `component-driver` at `$HOME/.cargo/bin`(by default, it depends on the cargo config).
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
Use `cargo component` or `cargo component check` or `cargo component audit`. The three commands are the same now. For Asterinas, we shoud use another alias command `cargo component-check`, which was defined in `src/.cargo/config.toml`.
|
Use `cargo component` or `cargo component check` or `cargo component audit`. The three commands are the same now. For Asterinas, we should use another alias command `cargo component-check`, which was defined in `src/.cargo/config.toml`.
|
||||||
|
|
||||||
### Two notes:
|
### Two notes:
|
||||||
- The directory **where you run the command** should contains a `Components.toml` config file, where defines all components and whitelist.
|
- The directory **where you run the command** should contains a `Components.toml` config file, where defines all components and whitelist.
|
||||||
|
@ -166,7 +166,7 @@ fn check_inline_asm_operand(
|
|||||||
|
|
||||||
/// check whether visiting the operand in local crate is valid.
|
/// check whether visiting the operand in local crate is valid.
|
||||||
/// if the operand is invalid, add the def_path to def_paths.
|
/// if the operand is invalid, add the def_path to def_paths.
|
||||||
/// The operand is invalid only when follwing four points are all satisfied.
|
/// The operand is invalid only when following four points are all satisfied.
|
||||||
/// 1. The operand represents a static variable or a func(the first argument can not be self or its variants).
|
/// 1. The operand represents a static variable or a func(the first argument can not be self or its variants).
|
||||||
/// 2. The operand is not defined in local crate.
|
/// 2. The operand is not defined in local crate.
|
||||||
/// 3. The operand is marked with #[component_access_control::controlled]
|
/// 3. The operand is marked with #[component_access_control::controlled]
|
||||||
|
@ -180,7 +180,7 @@ fn read_component_file(workspace_root: &str) -> Vec<String> {
|
|||||||
.collect();
|
.collect();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
panic!("Componets.toml file not valid")
|
panic!("Components.toml file not valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// calculate the priority of one node
|
/// calculate the priority of one node
|
||||||
|
@ -11,7 +11,7 @@ Registering a crate as component by marking a function in the lib.rs with `#[ini
|
|||||||
|
|
||||||
### Component initialization
|
### Component initialization
|
||||||
|
|
||||||
Component system need to be initialized by calling `componet::init_all` function and it needs information about all components. Usually it is used with the `component::parse_metadata` macro.
|
Component system need to be initialized by calling `component::init_all` function and it needs information about all components. Usually it is used with the `component::parse_metadata` macro.
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ fn parse_input(components: Vec<ComponentInfo>) -> BTreeMap<String, ComponentInfo
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Match the ComponetInfo with ComponentRegistry. The key is the relative path of one component
|
/// Match the ComponentInfo with ComponentRegistry. The key is the relative path of one component
|
||||||
fn match_and_call(
|
fn match_and_call(
|
||||||
mut components: BTreeMap<String, ComponentInfo>,
|
mut components: BTreeMap<String, ComponentInfo>,
|
||||||
) -> Result<(), ComponentSystemInitError> {
|
) -> Result<(), ComponentSystemInitError> {
|
||||||
@ -161,7 +161,7 @@ fn match_and_call(
|
|||||||
infos.push(info);
|
infos.push(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!("Remain componets:{components:?}");
|
debug!("Remain components:{components:?}");
|
||||||
|
|
||||||
if !components.is_empty() {
|
if !components.is_empty() {
|
||||||
info!("Exists components that are not initialized");
|
info!("Exists components that are not initialized");
|
||||||
@ -174,11 +174,11 @@ fn match_and_call(
|
|||||||
for i in infos {
|
for i in infos {
|
||||||
info!("Component initializing:{:?}", i);
|
info!("Component initializing:{:?}", i);
|
||||||
if let Err(res) = i.function.unwrap().call(()) {
|
if let Err(res) = i.function.unwrap().call(()) {
|
||||||
error!("Component initalize error:{:?}", res);
|
error!("Component initialize error:{:?}", res);
|
||||||
} else {
|
} else {
|
||||||
info!("Component initalize complete");
|
info!("Component initialize complete");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
info!("All components initalization completed");
|
info!("All components initialization completed");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -31,5 +31,5 @@ fn main() {
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
This crate provides a derive procedural macro named `TryFromInt`. This macro will automatically implement [TryFrom](https://doc.rust-lang.org/core/convert/trait.TryFrom.html) trait for enums that meet the following requirements:
|
This crate provides a derive procedural macro named `TryFromInt`. This macro will automatically implement [TryFrom](https://doc.rust-lang.org/core/convert/trait.TryFrom.html) trait for enums that meet the following requirements:
|
||||||
1. The enum must have a primitive repr, i.e., the enum should have attribute like #[repr(u8)], #[repr(u32)], etc. The type parameter of TryFrom will be the repr, e.g., in the `QuickStart` example, the macro will implment `TryFrom<u8>` for `Color`.
|
1. The enum must have a primitive repr, i.e., the enum should have attribute like #[repr(u8)], #[repr(u32)], etc. The type parameter of TryFrom will be the repr, e.g., in the `QuickStart` example, the macro will implement `TryFrom<u8>` for `Color`.
|
||||||
2. The enum must consist solely of unit variants, which is called [units only enum](https://doc.rust-lang.org/reference/items/enumerations.html#unit-only-enum). Each field should have an **explicit discriminant**.
|
2. The enum must consist solely of unit variants, which is called [units only enum](https://doc.rust-lang.org/reference/items/enumerations.html#unit-only-enum). Each field should have an **explicit discriminant**.
|
||||||
|
@ -69,8 +69,8 @@ fn fn_body_tokens(value_name: &str, data_enum: &DataEnum, ident: Ident) -> Token
|
|||||||
.discriminant
|
.discriminant
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.expect("Each field must be assigned a discriminant value explicitly");
|
.expect("Each field must be assigned a discriminant value explicitly");
|
||||||
let vairant_ident = &variant.ident;
|
let variant_ident = &variant.ident;
|
||||||
let statement = quote!(#value => ::core::result::Result::Ok(#ident::#vairant_ident),);
|
let statement = quote!(#value => ::core::result::Result::Ok(#ident::#variant_ident),);
|
||||||
match_bodys.append_all(statement);
|
match_bodys.append_all(statement);
|
||||||
}
|
}
|
||||||
match_bodys.append_all(
|
match_bodys.append_all(
|
||||||
|
@ -10,7 +10,7 @@ use crate::type_flag::TypeFlagDef;
|
|||||||
const EMPTY_SET_NAME: &str = "::typeflags_util::Nil";
|
const EMPTY_SET_NAME: &str = "::typeflags_util::Nil";
|
||||||
const SET_NAME: &str = "::typeflags_util::Cons";
|
const SET_NAME: &str = "::typeflags_util::Cons";
|
||||||
|
|
||||||
/// A flagSet represent the combination of differnt flag item.
|
/// A flagSet represent the combination of different flag item.
|
||||||
/// e.g. [Read, Write], [Read], [] are all flag sets.
|
/// e.g. [Read, Write], [Read], [] are all flag sets.
|
||||||
/// The order of flagItem does not matters. So flag sets with same sets of items should be viewed as the same set.
|
/// The order of flagItem does not matters. So flag sets with same sets of items should be viewed as the same set.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -25,8 +25,8 @@ pub fn expand_type_flag(type_flags_def: &TypeFlagDef) -> TokenStream {
|
|||||||
all_tokens.append_all(impl_main_trait_tokens);
|
all_tokens.append_all(impl_main_trait_tokens);
|
||||||
});
|
});
|
||||||
|
|
||||||
let impl_set_entend_tokens = impl_set_extend(type_flags_def, &flag_sets);
|
let impl_set_intend_tokens = impl_set_extend(type_flags_def, &flag_sets);
|
||||||
all_tokens.append_all(impl_set_entend_tokens);
|
all_tokens.append_all(impl_set_intend_tokens);
|
||||||
|
|
||||||
let export_declarive_macro_tokens = export_declarive_macro(type_flags_def, &flag_sets);
|
let export_declarive_macro_tokens = export_declarive_macro(type_flags_def, &flag_sets);
|
||||||
all_tokens.append_all(export_declarive_macro_tokens);
|
all_tokens.append_all(export_declarive_macro_tokens);
|
||||||
|
@ -326,7 +326,7 @@ impl LineDiscipline {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// raw mode
|
// raw mode
|
||||||
// FIXME: avoid addtional bound check
|
// FIXME: avoid additional bound check
|
||||||
*dst_i = next_char;
|
*dst_i = next_char;
|
||||||
read_len += 1;
|
read_len += 1;
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ impl EpollFile {
|
|||||||
let mask = ep_event.events;
|
let mask = ep_event.events;
|
||||||
let entry = EpollEntry::new(fd, weak_file, ep_event, ep_flags, self.weak_self.clone());
|
let entry = EpollEntry::new(fd, weak_file, ep_event, ep_flags, self.weak_self.clone());
|
||||||
|
|
||||||
// Add the new entry to the interest list and start monitering its events
|
// Add the new entry to the interest list and start monitoring its events
|
||||||
let mut interest = self.interest.lock();
|
let mut interest = self.interest.lock();
|
||||||
if interest.contains_key(&fd) {
|
if interest.contains_key(&fd) {
|
||||||
return_errno_with_message!(Errno::EEXIST, "the fd has been added");
|
return_errno_with_message!(Errno::EEXIST, "the fd has been added");
|
||||||
|
@ -144,7 +144,7 @@ impl ExfatBitmap {
|
|||||||
.fs()
|
.fs()
|
||||||
.is_cluster_range_valid(search_start_cluster..search_start_cluster + num_clusters)
|
.is_cluster_range_valid(search_start_cluster..search_start_cluster + num_clusters)
|
||||||
{
|
{
|
||||||
return_errno_with_message!(Errno::ENOSPC, "free contigous clusters not avalable.")
|
return_errno_with_message!(Errno::ENOSPC, "free contiguous clusters not available.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut cur_index = search_start_cluster - EXFAT_RESERVED_CLUSTERS;
|
let mut cur_index = search_start_cluster - EXFAT_RESERVED_CLUSTERS;
|
||||||
|
@ -246,11 +246,11 @@ impl ExfatDentrySet {
|
|||||||
create_utc_offset: dos_time.utc_offset,
|
create_utc_offset: dos_time.utc_offset,
|
||||||
create_date: dos_time.date,
|
create_date: dos_time.date,
|
||||||
create_time: dos_time.time,
|
create_time: dos_time.time,
|
||||||
create_time_cs: dos_time.increament_10ms,
|
create_time_cs: dos_time.increment_10ms,
|
||||||
modify_utc_offset: dos_time.utc_offset,
|
modify_utc_offset: dos_time.utc_offset,
|
||||||
modify_date: dos_time.date,
|
modify_date: dos_time.date,
|
||||||
modify_time: dos_time.time,
|
modify_time: dos_time.time,
|
||||||
modify_time_cs: dos_time.increament_10ms,
|
modify_time_cs: dos_time.increment_10ms,
|
||||||
access_utc_offset: dos_time.utc_offset,
|
access_utc_offset: dos_time.utc_offset,
|
||||||
access_date: dos_time.date,
|
access_date: dos_time.date,
|
||||||
access_time: dos_time.time,
|
access_time: dos_time.time,
|
||||||
@ -403,7 +403,7 @@ impl ExfatDentrySet {
|
|||||||
}
|
}
|
||||||
Ok(name)
|
Ok(name)
|
||||||
}
|
}
|
||||||
/// Name dentries are not permited to modify. We should create a new dentry set for renaming.
|
/// Name dentries are not permitted to modify. We should create a new dentry set for renaming.
|
||||||
|
|
||||||
fn calculate_checksum(&self) -> u16 {
|
fn calculate_checksum(&self) -> u16 {
|
||||||
const CHECKSUM_BYTES_RANGE: Range<usize> = 2..4;
|
const CHECKSUM_BYTES_RANGE: Range<usize> = 2..4;
|
||||||
@ -505,7 +505,7 @@ impl Iterator for ExfatDentryIterator {
|
|||||||
|
|
||||||
#[repr(C, packed)]
|
#[repr(C, packed)]
|
||||||
#[derive(Clone, Debug, Default, Copy, Pod)]
|
#[derive(Clone, Debug, Default, Copy, Pod)]
|
||||||
// For files & directorys
|
// For files & directories
|
||||||
pub(super) struct ExfatFileDentry {
|
pub(super) struct ExfatFileDentry {
|
||||||
pub(super) dentry_type: u8, // 0x85
|
pub(super) dentry_type: u8, // 0x85
|
||||||
// Number of Secondary directory entries.
|
// Number of Secondary directory entries.
|
||||||
@ -635,7 +635,7 @@ pub(super) struct ExfatGenericSecondaryDentry {
|
|||||||
#[derive(Clone, Debug, Default, Copy, Pod)]
|
#[derive(Clone, Debug, Default, Copy, Pod)]
|
||||||
pub(super) struct ExfatDeletedDentry {
|
pub(super) struct ExfatDeletedDentry {
|
||||||
pub(super) dentry_type: u8,
|
pub(super) dentry_type: u8,
|
||||||
pub(super) reserverd: [u8; 31],
|
pub(super) reserved: [u8; 31],
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
|
@ -52,7 +52,7 @@ bitflags! {
|
|||||||
const SYSTEM = 0x0004;
|
const SYSTEM = 0x0004;
|
||||||
/// This inode represents a volume. This attribute is not supported in our implementation.
|
/// This inode represents a volume. This attribute is not supported in our implementation.
|
||||||
const VOLUME = 0x0008;
|
const VOLUME = 0x0008;
|
||||||
/// This inode reprents a directory.
|
/// This inode represents a directory.
|
||||||
const DIRECTORY = 0x0010;
|
const DIRECTORY = 0x0010;
|
||||||
/// This file has been touched since the last DOS backup was performed on it. This attribute is not supported in our implementation.
|
/// This file has been touched since the last DOS backup was performed on it. This attribute is not supported in our implementation.
|
||||||
const ARCHIVE = 0x0020;
|
const ARCHIVE = 0x0020;
|
||||||
@ -187,7 +187,7 @@ impl ExfatInodeInner {
|
|||||||
self.fs().find_opened_inode(self.parent_hash)
|
self.fs().find_opened_inode(self.parent_hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get physical sector id from logical sector id fot this Inode.
|
/// Get physical sector id from logical sector id for this Inode.
|
||||||
fn get_sector_id(&self, sector_id: usize) -> Result<usize> {
|
fn get_sector_id(&self, sector_id: usize) -> Result<usize> {
|
||||||
let chain_offset = self
|
let chain_offset = self
|
||||||
.start_chain
|
.start_chain
|
||||||
@ -315,12 +315,12 @@ impl ExfatInodeInner {
|
|||||||
file_dentry.create_utc_offset = self.ctime.utc_offset;
|
file_dentry.create_utc_offset = self.ctime.utc_offset;
|
||||||
file_dentry.create_date = self.ctime.date;
|
file_dentry.create_date = self.ctime.date;
|
||||||
file_dentry.create_time = self.ctime.time;
|
file_dentry.create_time = self.ctime.time;
|
||||||
file_dentry.create_time_cs = self.ctime.increament_10ms;
|
file_dentry.create_time_cs = self.ctime.increment_10ms;
|
||||||
|
|
||||||
file_dentry.modify_utc_offset = self.mtime.utc_offset;
|
file_dentry.modify_utc_offset = self.mtime.utc_offset;
|
||||||
file_dentry.modify_date = self.mtime.date;
|
file_dentry.modify_date = self.mtime.date;
|
||||||
file_dentry.modify_time = self.mtime.time;
|
file_dentry.modify_time = self.mtime.time;
|
||||||
file_dentry.modify_time_cs = self.mtime.increament_10ms;
|
file_dentry.modify_time_cs = self.mtime.increment_10ms;
|
||||||
|
|
||||||
file_dentry.access_utc_offset = self.atime.utc_offset;
|
file_dentry.access_utc_offset = self.atime.utc_offset;
|
||||||
file_dentry.access_date = self.atime.date;
|
file_dentry.access_date = self.atime.date;
|
||||||
@ -692,11 +692,11 @@ impl ExfatInode {
|
|||||||
parent_hash: usize,
|
parent_hash: usize,
|
||||||
fs_guard: &MutexGuard<()>,
|
fs_guard: &MutexGuard<()>,
|
||||||
) -> Result<Arc<ExfatInode>> {
|
) -> Result<Arc<ExfatInode>> {
|
||||||
const EXFAT_MIMIMUM_DENTRY: usize = 3;
|
const EXFAT_MINIMUM_DENTRY: usize = 3;
|
||||||
|
|
||||||
let ino = fs.alloc_inode_number();
|
let ino = fs.alloc_inode_number();
|
||||||
|
|
||||||
if dentry_set.len() < EXFAT_MIMIMUM_DENTRY {
|
if dentry_set.len() < EXFAT_MINIMUM_DENTRY {
|
||||||
return_errno_with_message!(Errno::EINVAL, "invalid dentry length")
|
return_errno_with_message!(Errno::EINVAL, "invalid dentry length")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1313,7 +1313,7 @@ impl Inode for ExfatInode {
|
|||||||
new_size.max(file_size)
|
new_size.max(file_size)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Locks released here, so that file write can be parallized.
|
// Locks released here, so that file write can be parallelized.
|
||||||
let inner = self.inner.upread();
|
let inner = self.inner.upread();
|
||||||
inner.page_cache.pages().write(offset, reader)?;
|
inner.page_cache.pages().write(offset, reader)?;
|
||||||
|
|
||||||
|
@ -263,7 +263,7 @@ mod test {
|
|||||||
info!("Successfully creating and reading {} files", file_id + 1);
|
info!("Successfully creating and reading {} files", file_id + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Test skiped readdir.
|
//Test skipped readdir.
|
||||||
let mut sub_inodes: Vec<String> = Vec::new();
|
let mut sub_inodes: Vec<String> = Vec::new();
|
||||||
let _ = root.readdir_at(file_names.len() / 3 + 2, &mut sub_inodes);
|
let _ = root.readdir_at(file_names.len() / 3 + 2, &mut sub_inodes);
|
||||||
|
|
||||||
@ -956,7 +956,7 @@ mod test {
|
|||||||
let resize_too_large = f.resize(initial_free_clusters as usize * cluster_size + 1);
|
let resize_too_large = f.resize(initial_free_clusters as usize * cluster_size + 1);
|
||||||
assert!(
|
assert!(
|
||||||
resize_too_large.is_err() && fs.num_free_clusters() == initial_free_clusters,
|
resize_too_large.is_err() && fs.num_free_clusters() == initial_free_clusters,
|
||||||
"Fail to deal with a memeory overflow allocation"
|
"Fail to deal with a memory overflow allocation"
|
||||||
);
|
);
|
||||||
|
|
||||||
// Try to allocate a file of exactly the same size as the remaining spaces. This will succeed.
|
// Try to allocate a file of exactly the same size as the remaining spaces. This will succeed.
|
||||||
|
@ -47,11 +47,11 @@ const EXFAT_TIME_ZONE_VALID: u8 = 1 << 7;
|
|||||||
|
|
||||||
#[derive(Default, Debug, Clone, Copy)]
|
#[derive(Default, Debug, Clone, Copy)]
|
||||||
pub struct DosTimestamp {
|
pub struct DosTimestamp {
|
||||||
// Timestamp at the precesion of double seconds.
|
// Timestamp at the precision of double seconds.
|
||||||
pub(super) time: u16,
|
pub(super) time: u16,
|
||||||
pub(super) date: u16,
|
pub(super) date: u16,
|
||||||
// Precise time in 10ms.
|
// Precise time in 10ms.
|
||||||
pub(super) increament_10ms: u8,
|
pub(super) increment_10ms: u8,
|
||||||
pub(super) utc_offset: u8,
|
pub(super) utc_offset: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,11 +73,11 @@ impl DosTimestamp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(time: u16, date: u16, increament_10ms: u8, utc_offset: u8) -> Result<Self> {
|
pub fn new(time: u16, date: u16, increment_10ms: u8, utc_offset: u8) -> Result<Self> {
|
||||||
let time = Self {
|
let time = Self {
|
||||||
time,
|
time,
|
||||||
date,
|
date,
|
||||||
increament_10ms,
|
increment_10ms,
|
||||||
utc_offset,
|
utc_offset,
|
||||||
};
|
};
|
||||||
Ok(time)
|
Ok(time)
|
||||||
@ -102,13 +102,13 @@ impl DosTimestamp {
|
|||||||
| ((date_time.day() as u16) << DAY_RANGE.start);
|
| ((date_time.day() as u16) << DAY_RANGE.start);
|
||||||
|
|
||||||
const NSEC_PER_10MSEC: u32 = 10000000;
|
const NSEC_PER_10MSEC: u32 = 10000000;
|
||||||
let increament_10ms =
|
let increment_10ms =
|
||||||
(date_time.second() as u32 % 2 * 100 + date_time.nanosecond() / NSEC_PER_10MSEC) as u8;
|
(date_time.second() as u32 % 2 * 100 + date_time.nanosecond() / NSEC_PER_10MSEC) as u8;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
time,
|
time,
|
||||||
date,
|
date,
|
||||||
increament_10ms,
|
increment_10ms,
|
||||||
utc_offset: 0,
|
utc_offset: 0,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -144,15 +144,15 @@ impl DosTimestamp {
|
|||||||
let mut sec = date_time.assume_utc().unix_timestamp() as u64;
|
let mut sec = date_time.assume_utc().unix_timestamp() as u64;
|
||||||
|
|
||||||
let mut nano_sec: u32 = 0;
|
let mut nano_sec: u32 = 0;
|
||||||
if self.increament_10ms != 0 {
|
if self.increment_10ms != 0 {
|
||||||
const NSEC_PER_MSEC: u32 = 1000000;
|
const NSEC_PER_MSEC: u32 = 1000000;
|
||||||
sec += self.increament_10ms as u64 / 100;
|
sec += self.increment_10ms as u64 / 100;
|
||||||
nano_sec = (self.increament_10ms as u32 % 100) * 10 * NSEC_PER_MSEC;
|
nano_sec = (self.increment_10ms as u32 % 100) * 10 * NSEC_PER_MSEC;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust timezone to UTC0. */
|
/* Adjust timezone to UTC0. */
|
||||||
if (self.utc_offset & EXFAT_TIME_ZONE_VALID) != 0u8 {
|
if (self.utc_offset & EXFAT_TIME_ZONE_VALID) != 0u8 {
|
||||||
sec = Self::ajust_time_zone(sec, self.utc_offset & (!EXFAT_TIME_ZONE_VALID));
|
sec = Self::adjust_time_zone(sec, self.utc_offset & (!EXFAT_TIME_ZONE_VALID));
|
||||||
} else {
|
} else {
|
||||||
// TODO: Use mount info for timezone adjustment.
|
// TODO: Use mount info for timezone adjustment.
|
||||||
}
|
}
|
||||||
@ -160,7 +160,7 @@ impl DosTimestamp {
|
|||||||
Ok(Duration::new(sec, nano_sec))
|
Ok(Duration::new(sec, nano_sec))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ajust_time_zone(sec: u64, time_zone: u8) -> u64 {
|
fn adjust_time_zone(sec: u64, time_zone: u8) -> u64 {
|
||||||
if time_zone <= 0x3F {
|
if time_zone <= 0x3F {
|
||||||
sec + Self::time_zone_sec(time_zone)
|
sec + Self::time_zone_sec(time_zone)
|
||||||
} else {
|
} else {
|
||||||
|
@ -1485,7 +1485,7 @@ impl InodeImpl_ {
|
|||||||
|
|
||||||
/// Shrinks inode size.
|
/// Shrinks inode size.
|
||||||
///
|
///
|
||||||
/// After the reduction, the size will be shrinked to `new_size`,
|
/// After the reduction, the size will be shrunk to `new_size`,
|
||||||
/// which may result in an decreased block count.
|
/// which may result in an decreased block count.
|
||||||
fn shrink(&mut self, new_size: usize) {
|
fn shrink(&mut self, new_size: usize) {
|
||||||
let new_blocks = self.desc.size_to_blocks(new_size);
|
let new_blocks = self.desc.size_to_blocks(new_size);
|
||||||
|
@ -133,7 +133,7 @@ impl TryFrom<RawSuperBlock> for SuperBlock {
|
|||||||
check_interval: Duration::from_secs(sb.check_interval as _),
|
check_interval: Duration::from_secs(sb.check_interval as _),
|
||||||
creator_os: {
|
creator_os: {
|
||||||
let os_id = OsId::try_from(sb.creator_os)
|
let os_id = OsId::try_from(sb.creator_os)
|
||||||
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid creater os"))?;
|
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid creator os"))?;
|
||||||
if os_id != OsId::Linux {
|
if os_id != OsId::Linux {
|
||||||
return_errno_with_message!(Errno::EINVAL, "not supported os id");
|
return_errno_with_message!(Errno::EINVAL, "not supported os id");
|
||||||
}
|
}
|
||||||
@ -309,7 +309,7 @@ impl SuperBlock {
|
|||||||
Bid::new(super_block_bid as u64)
|
Bid::new(super_block_bid as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the starting block id of the block group descripter table
|
/// Returns the starting block id of the block group descriptor table
|
||||||
/// inside the block group pointed by `block_group_idx`.
|
/// inside the block group pointed by `block_group_idx`.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
@ -465,7 +465,7 @@ pub(super) struct RawSuperBlock {
|
|||||||
pub prealloc_dir_blocks: u8,
|
pub prealloc_dir_blocks: u8,
|
||||||
padding1: u16,
|
padding1: u16,
|
||||||
///
|
///
|
||||||
/// This fileds are for journaling support in Ext3.
|
/// This fields are for journaling support in Ext3.
|
||||||
///
|
///
|
||||||
/// Uuid of journal superblock.
|
/// Uuid of journal superblock.
|
||||||
pub journal_uuid: [u8; 16],
|
pub journal_uuid: [u8; 16],
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
#![allow(unused_variables)]
|
#![allow(unused_variables)]
|
||||||
|
|
||||||
//! Opend File Handle
|
//! Opened File Handle
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
events::{IoEvents, Observer},
|
events::{IoEvents, Observer},
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
#![allow(unused_variables)]
|
#![allow(unused_variables)]
|
||||||
|
|
||||||
//! Opend Inode-backed File Handle
|
//! Opened Inode-backed File Handle
|
||||||
|
|
||||||
mod dyn_cap;
|
mod dyn_cap;
|
||||||
mod static_cap;
|
mod static_cap;
|
||||||
|
@ -315,7 +315,7 @@ impl<T: Copy, R: TRights> Fifo<T, R> {
|
|||||||
|
|
||||||
impl<T, R: TRights> Fifo<T, R> {
|
impl<T, R: TRights> Fifo<T, R> {
|
||||||
/// Pushes an item into the endpoint.
|
/// Pushes an item into the endpoint.
|
||||||
/// If the `push` method failes, this method will return
|
/// If the `push` method fails, this method will return
|
||||||
/// `Err` containing the item that hasn't been pushed
|
/// `Err` containing the item that hasn't been pushed
|
||||||
#[require(R > Write)]
|
#[require(R > Write)]
|
||||||
pub fn push(&self, item: T) -> core::result::Result<(), T> {
|
pub fn push(&self, item: T) -> core::result::Result<(), T> {
|
||||||
|
@ -72,7 +72,7 @@ impl PageCache {
|
|||||||
pub fn resize(&self, new_size: usize) -> Result<()> {
|
pub fn resize(&self, new_size: usize) -> Result<()> {
|
||||||
// If the new size is smaller and not page-aligned,
|
// If the new size is smaller and not page-aligned,
|
||||||
// first zero the gap between the new size and the
|
// first zero the gap between the new size and the
|
||||||
// next page boundry (or the old size), if such a gap exists.
|
// next page boundary (or the old size), if such a gap exists.
|
||||||
let old_size = self.pages.size();
|
let old_size = self.pages.size();
|
||||||
if old_size > new_size && new_size % PAGE_SIZE != 0 {
|
if old_size > new_size && new_size % PAGE_SIZE != 0 {
|
||||||
let gap_size = old_size.min(new_size.align_up(PAGE_SIZE)) - new_size;
|
let gap_size = old_size.min(new_size.align_up(PAGE_SIZE)) - new_size;
|
||||||
|
@ -153,7 +153,7 @@ impl DirInMemory {
|
|||||||
create_result.unwrap_err()
|
create_result.unwrap_err()
|
||||||
);
|
);
|
||||||
info!(
|
info!(
|
||||||
" create {:?}/{:?}({:?}) succeeeded",
|
" create {:?}/{:?}({:?}) succeeded",
|
||||||
self.name, name, type_
|
self.name, name, type_
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ impl RangeLockItem {
|
|||||||
.range
|
.range
|
||||||
.set_start(new_start)
|
.set_start(new_start)
|
||||||
.expect("invalid new start");
|
.expect("invalid new start");
|
||||||
if let FileRangeChange::Shrinked = change {
|
if let FileRangeChange::Shrunk = change {
|
||||||
self.wake_all();
|
self.wake_all();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -126,7 +126,7 @@ impl RangeLockItem {
|
|||||||
/// If the range shrinks, it will wake all waiting processes
|
/// If the range shrinks, it will wake all waiting processes
|
||||||
pub fn set_end(&mut self, new_end: usize) {
|
pub fn set_end(&mut self, new_end: usize) {
|
||||||
let change = self.range().set_end(new_end).expect("invalid new end");
|
let change = self.range().set_end(new_end).expect("invalid new end");
|
||||||
if let FileRangeChange::Shrinked = change {
|
if let FileRangeChange::Shrunk = change {
|
||||||
self.wake_all();
|
self.wake_all();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ impl FileRange {
|
|||||||
let old_start = self.start;
|
let old_start = self.start;
|
||||||
self.start = new_start;
|
self.start = new_start;
|
||||||
let change = match new_start {
|
let change = match new_start {
|
||||||
new_start if new_start > old_start => FileRangeChange::Shrinked,
|
new_start if new_start > old_start => FileRangeChange::Shrunk,
|
||||||
new_start if new_start < old_start => FileRangeChange::Expanded,
|
new_start if new_start < old_start => FileRangeChange::Expanded,
|
||||||
_ => FileRangeChange::Same,
|
_ => FileRangeChange::Same,
|
||||||
};
|
};
|
||||||
@ -64,7 +64,7 @@ impl FileRange {
|
|||||||
let old_end = self.end;
|
let old_end = self.end;
|
||||||
self.end = new_end;
|
self.end = new_end;
|
||||||
let change = match new_end {
|
let change = match new_end {
|
||||||
new_end if new_end < old_end => FileRangeChange::Shrinked,
|
new_end if new_end < old_end => FileRangeChange::Shrunk,
|
||||||
new_end if new_end > old_end => FileRangeChange::Expanded,
|
new_end if new_end > old_end => FileRangeChange::Expanded,
|
||||||
_ => FileRangeChange::Same,
|
_ => FileRangeChange::Same,
|
||||||
};
|
};
|
||||||
@ -110,7 +110,7 @@ impl FileRange {
|
|||||||
pub enum FileRangeChange {
|
pub enum FileRangeChange {
|
||||||
Same,
|
Same,
|
||||||
Expanded,
|
Expanded,
|
||||||
Shrinked,
|
Shrunk,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The position of a range (say A) relative another overlapping range (say B).
|
/// The position of a range (say A) relative another overlapping range (say B).
|
||||||
|
@ -31,7 +31,7 @@ pub const SEMMNS: usize = SEMMNI * SEMMSL;
|
|||||||
pub const SEMOPM: usize = 500;
|
pub const SEMOPM: usize = 500;
|
||||||
/// MAximum semaphore value.
|
/// MAximum semaphore value.
|
||||||
pub const SEMVMX: i32 = 32767;
|
pub const SEMVMX: i32 = 32767;
|
||||||
/// Maximum value that can be recored for semaphore adjustment (SEM_UNDO).
|
/// Maximum value that can be recorded for semaphore adjustment (SEM_UNDO).
|
||||||
pub const SEMAEM: i32 = SEMVMX;
|
pub const SEMAEM: i32 = SEMVMX;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -89,7 +89,7 @@ impl AnyBoundSocket {
|
|||||||
/// Set the observer whose `on_events` will be called when certain iface events happen. After
|
/// Set the observer whose `on_events` will be called when certain iface events happen. After
|
||||||
/// setting, the new observer will fire once immediately to avoid missing any events.
|
/// setting, the new observer will fire once immediately to avoid missing any events.
|
||||||
///
|
///
|
||||||
/// If there is an existing observer, due to race conditions, this function does not guarentee
|
/// If there is an existing observer, due to race conditions, this function does not guarantee
|
||||||
/// that the old observer will never be called after the setting. Users should be aware of this
|
/// that the old observer will never be called after the setting. Users should be aware of this
|
||||||
/// and proactively handle the race conditions if necessary.
|
/// and proactively handle the race conditions if necessary.
|
||||||
pub fn set_observer(&self, handler: Weak<dyn Observer<()>>) {
|
pub fn set_observer(&self, handler: Weak<dyn Observer<()>>) {
|
||||||
|
@ -41,7 +41,7 @@ pub trait Iface: internal::IfaceInternal + Send + Sync {
|
|||||||
fn poll(&self);
|
fn poll(&self);
|
||||||
|
|
||||||
/// Bind a socket to the iface. So the packet for this socket will be dealt with by the interface.
|
/// Bind a socket to the iface. So the packet for this socket will be dealt with by the interface.
|
||||||
/// If port is None, the iface will pick up an empheral port for the socket.
|
/// If port is None, the iface will pick up an ephemeral port for the socket.
|
||||||
/// FIXME: The reason for binding socket and interface together is because there are limitations inside smoltcp.
|
/// FIXME: The reason for binding socket and interface together is because there are limitations inside smoltcp.
|
||||||
/// See discussion at <https://github.com/smoltcp-rs/smoltcp/issues/779>.
|
/// See discussion at <https://github.com/smoltcp-rs/smoltcp/issues/779>.
|
||||||
fn bind_socket(
|
fn bind_socket(
|
||||||
|
@ -71,7 +71,7 @@ impl BoundDatagram {
|
|||||||
return_errno_with_message!(Errno::EAGAIN, "the send buffer is full")
|
return_errno_with_message!(Errno::EAGAIN, "the send buffer is full")
|
||||||
}
|
}
|
||||||
Some(Err(SendError::Unaddressable)) => {
|
Some(Err(SendError::Unaddressable)) => {
|
||||||
return_errno_with_message!(Errno::EINVAL, "the destionation address is invalid")
|
return_errno_with_message!(Errno::EINVAL, "the destination address is invalid")
|
||||||
}
|
}
|
||||||
None => return_errno_with_message!(Errno::EMSGSIZE, "the message is too large"),
|
None => return_errno_with_message!(Errno::EMSGSIZE, "the message is too large"),
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ impl DatagramSocket {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_bind_empheral(&self, remote_endpoint: &IpEndpoint) -> Result<()> {
|
fn try_bind_ephemeral(&self, remote_endpoint: &IpEndpoint) -> Result<()> {
|
||||||
// Fast path
|
// Fast path
|
||||||
if let Inner::Bound(_) = self.inner.read().as_ref() {
|
if let Inner::Bound(_) = self.inner.read().as_ref() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -269,7 +269,7 @@ impl Socket for DatagramSocket {
|
|||||||
fn connect(&self, socket_addr: SocketAddr) -> Result<()> {
|
fn connect(&self, socket_addr: SocketAddr) -> Result<()> {
|
||||||
let endpoint = socket_addr.try_into()?;
|
let endpoint = socket_addr.try_into()?;
|
||||||
|
|
||||||
self.try_bind_empheral(&endpoint)?;
|
self.try_bind_ephemeral(&endpoint)?;
|
||||||
|
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
let Inner::Bound(bound_datagram) = inner.as_mut() else {
|
let Inner::Bound(bound_datagram) = inner.as_mut() else {
|
||||||
@ -311,7 +311,7 @@ impl Socket for DatagramSocket {
|
|||||||
let remote_endpoint = match addr {
|
let remote_endpoint = match addr {
|
||||||
Some(remote_addr) => {
|
Some(remote_addr) => {
|
||||||
let endpoint = remote_addr.try_into()?;
|
let endpoint = remote_addr.try_into()?;
|
||||||
self.try_bind_empheral(&endpoint)?;
|
self.try_bind_ephemeral(&endpoint)?;
|
||||||
endpoint
|
endpoint
|
||||||
}
|
}
|
||||||
None => self.remote_endpoint().ok_or_else(|| {
|
None => self.remote_endpoint().ok_or_else(|| {
|
||||||
|
@ -481,7 +481,7 @@ impl Socket for StreamSocket {
|
|||||||
let state = self.state.read();
|
let state = self.state.read();
|
||||||
match state.as_ref() {
|
match state.as_ref() {
|
||||||
State::Connected(connected_stream) => connected_stream.shutdown(cmd),
|
State::Connected(connected_stream) => connected_stream.shutdown(cmd),
|
||||||
// TDOD: shutdown listening stream
|
// TODO: shutdown listening stream
|
||||||
_ => return_errno_with_message!(Errno::EINVAL, "cannot shutdown"),
|
_ => return_errno_with_message!(Errno::EINVAL, "cannot shutdown"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,8 @@ impl Connected {
|
|||||||
addr: Option<UnixSocketAddrBound>,
|
addr: Option<UnixSocketAddrBound>,
|
||||||
peer_addr: Option<UnixSocketAddrBound>,
|
peer_addr: Option<UnixSocketAddrBound>,
|
||||||
) -> (Connected, Connected) {
|
) -> (Connected, Connected) {
|
||||||
let (writer_this, reader_peer) = Channel::with_capacity(DAFAULT_BUF_SIZE).split();
|
let (writer_this, reader_peer) = Channel::with_capacity(DEFAULT_BUF_SIZE).split();
|
||||||
let (writer_peer, reader_this) = Channel::with_capacity(DAFAULT_BUF_SIZE).split();
|
let (writer_peer, reader_this) = Channel::with_capacity(DEFAULT_BUF_SIZE).split();
|
||||||
|
|
||||||
let this = Connected {
|
let this = Connected {
|
||||||
addr: addr.clone(),
|
addr: addr.clone(),
|
||||||
@ -122,4 +122,4 @@ impl Connected {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const DAFAULT_BUF_SIZE: usize = 65536;
|
const DEFAULT_BUF_SIZE: usize = 65536;
|
||||||
|
@ -4,7 +4,7 @@ use crate::prelude::*;
|
|||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Flags used for send/recv.
|
/// Flags used for send/recv.
|
||||||
/// The definiton is from https://elixir.bootlin.com/linux/v6.0.9/source/include/linux/socket.h
|
/// The definition is from https://elixir.bootlin.com/linux/v6.0.9/source/include/linux/socket.h
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Pod)]
|
#[derive(Pod)]
|
||||||
pub struct SendRecvFlags: i32 {
|
pub struct SendRecvFlags: i32 {
|
||||||
|
@ -225,7 +225,7 @@ impl VsockSpace {
|
|||||||
let Some(listen) = listen_sockets.get(&event.destination.into()) else {
|
let Some(listen) = listen_sockets.get(&event.destination.into()) else {
|
||||||
return_errno_with_message!(
|
return_errno_with_message!(
|
||||||
Errno::EINVAL,
|
Errno::EINVAL,
|
||||||
"connecion request can only be handled by listening socket"
|
"connection request can only be handled by listening socket"
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
let peer = event.source;
|
let peer = event.source;
|
||||||
|
@ -56,8 +56,8 @@ impl Listen {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_io_events(&self) {
|
pub fn update_io_events(&self) {
|
||||||
let incomming_connection = self.incoming_connection.disable_irq().lock();
|
let incoming_connection = self.incoming_connection.disable_irq().lock();
|
||||||
if !incomming_connection.is_empty() {
|
if !incoming_connection.is_empty() {
|
||||||
self.pollee.add_events(IoEvents::IN);
|
self.pollee.add_events(IoEvents::IN);
|
||||||
} else {
|
} else {
|
||||||
self.pollee.del_events(IoEvents::IN);
|
self.pollee.del_events(IoEvents::IN);
|
||||||
|
@ -124,7 +124,7 @@ impl CloneFlags {
|
|||||||
/// Clone a child thread or child process.
|
/// Clone a child thread or child process.
|
||||||
///
|
///
|
||||||
/// FIXME: currently, the child process or thread will be scheduled to run at once,
|
/// FIXME: currently, the child process or thread will be scheduled to run at once,
|
||||||
/// but this may not be the expected bahavior.
|
/// but this may not be the expected behavior.
|
||||||
pub fn clone_child(
|
pub fn clone_child(
|
||||||
ctx: &Context,
|
ctx: &Context,
|
||||||
parent_context: &UserContext,
|
parent_context: &UserContext,
|
||||||
@ -411,7 +411,7 @@ fn clone_sighand(
|
|||||||
parent_sig_dispositions: &Arc<Mutex<SigDispositions>>,
|
parent_sig_dispositions: &Arc<Mutex<SigDispositions>>,
|
||||||
clone_flags: CloneFlags,
|
clone_flags: CloneFlags,
|
||||||
) -> Arc<Mutex<SigDispositions>> {
|
) -> Arc<Mutex<SigDispositions>> {
|
||||||
// similer to CLONE_FILES
|
// similar to CLONE_FILES
|
||||||
if clone_flags.contains(CloneFlags::CLONE_SIGHAND) {
|
if clone_flags.contains(CloneFlags::CLONE_SIGHAND) {
|
||||||
parent_sig_dispositions.clone()
|
parent_sig_dispositions.clone()
|
||||||
} else {
|
} else {
|
||||||
|
@ -49,7 +49,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets real user id.
|
/// Gets real user id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn ruid(&self) -> Uid {
|
pub fn ruid(&self) -> Uid {
|
||||||
self.0.ruid()
|
self.0.ruid()
|
||||||
@ -57,7 +57,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets effective user id.
|
/// Gets effective user id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn euid(&self) -> Uid {
|
pub fn euid(&self) -> Uid {
|
||||||
self.0.euid()
|
self.0.euid()
|
||||||
@ -65,7 +65,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets saved-set user id.
|
/// Gets saved-set user id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn suid(&self) -> Uid {
|
pub fn suid(&self) -> Uid {
|
||||||
self.0.suid()
|
self.0.suid()
|
||||||
@ -73,7 +73,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets file system user id.
|
/// Gets file system user id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn fsuid(&self) -> Uid {
|
pub fn fsuid(&self) -> Uid {
|
||||||
self.0.fsuid()
|
self.0.fsuid()
|
||||||
@ -143,7 +143,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets real group id.
|
/// Gets real group id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn rgid(&self) -> Gid {
|
pub fn rgid(&self) -> Gid {
|
||||||
self.0.rgid()
|
self.0.rgid()
|
||||||
@ -151,7 +151,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets effective group id.
|
/// Gets effective group id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn egid(&self) -> Gid {
|
pub fn egid(&self) -> Gid {
|
||||||
self.0.egid()
|
self.0.egid()
|
||||||
@ -159,7 +159,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets saved-set group id.
|
/// Gets saved-set group id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn sgid(&self) -> Gid {
|
pub fn sgid(&self) -> Gid {
|
||||||
self.0.sgid()
|
self.0.sgid()
|
||||||
@ -167,7 +167,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets file system group id.
|
/// Gets file system group id.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn fsgid(&self) -> Gid {
|
pub fn fsgid(&self) -> Gid {
|
||||||
self.0.fsgid()
|
self.0.fsgid()
|
||||||
@ -237,7 +237,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Acquires the read lock of supplementary group ids.
|
/// Acquires the read lock of supplementary group ids.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn groups(&self) -> RwLockReadGuard<BTreeSet<Gid>> {
|
pub fn groups(&self) -> RwLockReadGuard<BTreeSet<Gid>> {
|
||||||
self.0.groups()
|
self.0.groups()
|
||||||
@ -255,7 +255,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets the capabilities that child process can inherit.
|
/// Gets the capabilities that child process can inherit.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn inheritable_capset(&self) -> CapSet {
|
pub fn inheritable_capset(&self) -> CapSet {
|
||||||
self.0.inheritable_capset()
|
self.0.inheritable_capset()
|
||||||
@ -263,7 +263,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets the capabilities that are permitted.
|
/// Gets the capabilities that are permitted.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn permitted_capset(&self) -> CapSet {
|
pub fn permitted_capset(&self) -> CapSet {
|
||||||
self.0.permitted_capset()
|
self.0.permitted_capset()
|
||||||
@ -271,7 +271,7 @@ impl<R: TRights> Credentials<R> {
|
|||||||
|
|
||||||
/// Gets the capabilities that actually use.
|
/// Gets the capabilities that actually use.
|
||||||
///
|
///
|
||||||
/// This method requies the `Read` right.
|
/// This method requires the `Read` right.
|
||||||
#[require(R > Read)]
|
#[require(R > Read)]
|
||||||
pub fn effective_capset(&self) -> CapSet {
|
pub fn effective_capset(&self) -> CapSet {
|
||||||
self.0.effective_capset()
|
self.0.effective_capset()
|
||||||
|
@ -33,7 +33,7 @@ pub struct RobustListHead {
|
|||||||
impl RobustListHead {
|
impl RobustListHead {
|
||||||
/// Return an iterator for all futexes in the robust list.
|
/// Return an iterator for all futexes in the robust list.
|
||||||
///
|
///
|
||||||
/// The futex refered to by `list_op_pending`, if any, will be returned as
|
/// The futex referred to by `list_op_pending`, if any, will be returned as
|
||||||
/// the last item.
|
/// the last item.
|
||||||
pub fn futexes(&self) -> FutexIter<'_> {
|
pub fn futexes(&self) -> FutexIter<'_> {
|
||||||
FutexIter::new(self)
|
FutexIter::new(self)
|
||||||
|
@ -7,7 +7,7 @@ use crate::{
|
|||||||
process::{process_table, Pgid, ProcessGroup},
|
process::{process_table, Pgid, ProcessGroup},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A termial is used to interact with system. A terminal can support the shell
|
/// A terminal is used to interact with system. A terminal can support the shell
|
||||||
/// job control.
|
/// job control.
|
||||||
///
|
///
|
||||||
/// We currently support two kinds of terminal, the tty and pty.
|
/// We currently support two kinds of terminal, the tty and pty.
|
||||||
|
@ -217,7 +217,7 @@ struct InitStackWriter {
|
|||||||
|
|
||||||
impl InitStackWriter {
|
impl InitStackWriter {
|
||||||
fn write(mut self) -> Result<()> {
|
fn write(mut self) -> Result<()> {
|
||||||
// FIXME: Some OSes may put the first page of excutable file here
|
// FIXME: Some OSes may put the first page of executable file here
|
||||||
// for interpreting elf headers.
|
// for interpreting elf headers.
|
||||||
|
|
||||||
let argc = self.argv.len() as u64;
|
let argc = self.argv.len() as u64;
|
||||||
@ -268,7 +268,7 @@ impl InitStackWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Libc ABI requires 16-byte alignment of the stack entrypoint.
|
/// Libc ABI requires 16-byte alignment of the stack entrypoint.
|
||||||
/// Current postion of the stack is 8-byte aligned already, insert 8 byte
|
/// Current position of the stack is 8-byte aligned already, insert 8 byte
|
||||||
/// to meet the requirement if necessary.
|
/// to meet the requirement if necessary.
|
||||||
fn adjust_stack_alignment(&self, envp_pointers: &[u64], argv_pointers: &[u64]) -> Result<()> {
|
fn adjust_stack_alignment(&self, envp_pointers: &[u64], argv_pointers: &[u64]) -> Result<()> {
|
||||||
// Ensure 8-byte alignment
|
// Ensure 8-byte alignment
|
||||||
@ -285,7 +285,7 @@ impl InitStackWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn write_aux_vec(&self) -> Result<()> {
|
fn write_aux_vec(&self) -> Result<()> {
|
||||||
// Write NULL auxilary
|
// Write NULL auxiliary
|
||||||
self.write_u64(0)?;
|
self.write_u64(0)?;
|
||||||
self.write_u64(AuxKey::AT_NULL as u64)?;
|
self.write_u64(AuxKey::AT_NULL as u64)?;
|
||||||
// Write Auxiliary vectors
|
// Write Auxiliary vectors
|
||||||
|
@ -16,7 +16,7 @@ use crate::{
|
|||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Load an executable to root vmar, including loading programe image, preparing heap and stack,
|
/// Load an executable to root vmar, including loading programme image, preparing heap and stack,
|
||||||
/// initializing argv, envp and aux tables.
|
/// initializing argv, envp and aux tables.
|
||||||
/// About recursion_limit: recursion limit is used to limit th recursion depth of shebang executables.
|
/// About recursion_limit: recursion limit is used to limit th recursion depth of shebang executables.
|
||||||
/// If the interpreter(the program behind #!) of shebang executable is also a shebang,
|
/// If the interpreter(the program behind #!) of shebang executable is also a shebang,
|
||||||
|
@ -26,7 +26,7 @@ pub fn parse_shebang_line(file_header_buffer: &[u8]) -> Result<Option<Vec<CStrin
|
|||||||
if shebang_argv.len() != 1 {
|
if shebang_argv.len() != 1 {
|
||||||
return_errno_with_message!(
|
return_errno_with_message!(
|
||||||
Errno::EINVAL,
|
Errno::EINVAL,
|
||||||
"One and only one intpreter program should be specified"
|
"One and only one interpreter program should be specified"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Ok(Some(shebang_argv))
|
Ok(Some(shebang_argv))
|
||||||
|
@ -239,7 +239,7 @@ impl Observer<IoEvents> for EventCounter {
|
|||||||
/// according to the events.
|
/// according to the events.
|
||||||
///
|
///
|
||||||
/// This trait is added instead of creating a new method in [`Pollee`] because sometimes we do not
|
/// This trait is added instead of creating a new method in [`Pollee`] because sometimes we do not
|
||||||
/// have access to the internal [`Pollee`], but there is a method that provides the same sematics
|
/// have access to the internal [`Pollee`], but there is a method that provides the same semantics
|
||||||
/// as [`Pollee::poll`] and we need to perform event-based operations using that method.
|
/// as [`Pollee::poll`] and we need to perform event-based operations using that method.
|
||||||
pub trait Pollable {
|
pub trait Pollable {
|
||||||
/// Returns the interesting events if there are any, or waits for them to happen if there are
|
/// Returns the interesting events if there are any, or waits for them to happen if there are
|
||||||
|
@ -26,7 +26,7 @@ impl TryFrom<u8> for SigNum {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SigNum {
|
impl SigNum {
|
||||||
/// Caller must ensure the sig_num is valid. otherweise, use try_from will check sig_num and does not panic.
|
/// Caller must ensure the sig_num is valid. Otherwise, use try_from will check sig_num and does not panic.
|
||||||
pub const fn from_u8(sig_num: u8) -> Self {
|
pub const fn from_u8(sig_num: u8) -> Self {
|
||||||
if sig_num > MAX_RT_SIG_NUM || sig_num < MIN_STD_SIG_NUM {
|
if sig_num > MAX_RT_SIG_NUM || sig_num < MIN_STD_SIG_NUM {
|
||||||
panic!("invalid signal number")
|
panic!("invalid signal number")
|
||||||
|
@ -83,7 +83,7 @@ impl SigStack {
|
|||||||
self.handler_counter -= 1
|
self.handler_counter -= 1
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determins whether the stack is executed on by any signal handler
|
/// Determines whether the stack is executed on by any signal handler
|
||||||
pub fn is_active(&self) -> bool {
|
pub fn is_active(&self) -> bool {
|
||||||
// FIXME: can DISABLE stack be used?
|
// FIXME: can DISABLE stack be used?
|
||||||
self.handler_counter != 0 && !self.flags.contains(SigStackFlags::SS_AUTODISARM)
|
self.handler_counter != 0 && !self.flags.contains(SigStackFlags::SS_AUTODISARM)
|
||||||
|
@ -41,7 +41,7 @@ impl FaultSignal {
|
|||||||
let addr = Some(trap_info.page_fault_addr as u64);
|
let addr = Some(trap_info.page_fault_addr as u64);
|
||||||
(SIGSEGV, code, addr)
|
(SIGSEGV, code, addr)
|
||||||
}
|
}
|
||||||
_ => panic!("Exception cannnot be a signal"),
|
_ => panic!("Exception cannot be a signal"),
|
||||||
};
|
};
|
||||||
FaultSignal { num, code, addr }
|
FaultSignal { num, code, addr }
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ impl Condvar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for the condition to become true,
|
/// Wait for the condition to become true,
|
||||||
/// and until the condition is explicitly woken up or interupted.
|
/// and until the condition is explicitly woken up or interrupted.
|
||||||
///
|
///
|
||||||
/// This function blocks until either the condition becomes false
|
/// This function blocks until either the condition becomes false
|
||||||
/// or the condition variable is explicitly notified.
|
/// or the condition variable is explicitly notified.
|
||||||
|
@ -76,7 +76,7 @@ pub enum MadviseBehavior {
|
|||||||
MADV_HUGEPAGE = 14, /* Worth backing with hugepages */
|
MADV_HUGEPAGE = 14, /* Worth backing with hugepages */
|
||||||
MADV_NOHUGEPAGE = 15, /* Not worth backing with hugepages */
|
MADV_NOHUGEPAGE = 15, /* Not worth backing with hugepages */
|
||||||
|
|
||||||
MADV_DONTDUMP = 16, /* Explicity exclude from the core dump,
|
MADV_DONTDUMP = 16, /* Explicitly exclude from the core dump,
|
||||||
overrides the coredump filter bits */
|
overrides the coredump filter bits */
|
||||||
MADV_DODUMP = 17, /* Clear the MADV_DONTDUMP flag */
|
MADV_DODUMP = 17, /* Clear the MADV_DONTDUMP flag */
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
//! Read the Cpu ctx content then dispatch syscall to corrsponding handler
|
//! Read the Cpu ctx content then dispatch syscall to corresponding handler
|
||||||
//! The each sub module contains functions that handle real syscall logic.
|
//! The each sub module contains functions that handle real syscall logic.
|
||||||
pub use clock_gettime::ClockId;
|
pub use clock_gettime::ClockId;
|
||||||
use ostd::cpu::UserContext;
|
use ostd::cpu::UserContext;
|
||||||
@ -141,7 +141,7 @@ mod waitid;
|
|||||||
mod write;
|
mod write;
|
||||||
|
|
||||||
/// This macro is used to define syscall handler.
|
/// This macro is used to define syscall handler.
|
||||||
/// The first param is ths number of parameters,
|
/// The first param is the number of parameters,
|
||||||
/// The second param is the function name of syscall handler,
|
/// The second param is the function name of syscall handler,
|
||||||
/// The third is optional, means the args(if parameter number > 0),
|
/// The third is optional, means the args(if parameter number > 0),
|
||||||
/// The third is optional, means if cpu ctx is required.
|
/// The third is optional, means if cpu ctx is required.
|
||||||
|
@ -21,7 +21,7 @@ pub fn sys_read(
|
|||||||
|
|
||||||
// According to <https://man7.org/linux/man-pages/man2/read.2.html>, if
|
// According to <https://man7.org/linux/man-pages/man2/read.2.html>, if
|
||||||
// the user specified an empty buffer, we should detect errors by checking
|
// the user specified an empty buffer, we should detect errors by checking
|
||||||
// the file discriptor. If no errors detected, return 0 successfully.
|
// the file descriptor. If no errors detected, return 0 successfully.
|
||||||
let read_len = if buf_len != 0 {
|
let read_len = if buf_len != 0 {
|
||||||
let mut writer = ctx
|
let mut writer = ctx
|
||||||
.process
|
.process
|
||||||
|
@ -12,7 +12,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub fn sys_ftruncate(fd: FileDesc, len: isize, ctx: &Context) -> Result<SyscallReturn> {
|
pub fn sys_ftruncate(fd: FileDesc, len: isize, ctx: &Context) -> Result<SyscallReturn> {
|
||||||
debug!("fd = {}, lentgh = {}", fd, len);
|
debug!("fd = {}, length = {}", fd, len);
|
||||||
|
|
||||||
check_length(len, ctx)?;
|
check_length(len, ctx)?;
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ pub fn sys_write(
|
|||||||
|
|
||||||
// According to <https://man7.org/linux/man-pages/man2/write.2.html>, if
|
// According to <https://man7.org/linux/man-pages/man2/write.2.html>, if
|
||||||
// the user specified an empty buffer, we should detect errors by checking
|
// the user specified an empty buffer, we should detect errors by checking
|
||||||
// the file discriptor. If no errors detected, return 0 successfully.
|
// the file descriptor. If no errors detected, return 0 successfully.
|
||||||
let write_len = if user_buf_len != 0 {
|
let write_len = if user_buf_len != 0 {
|
||||||
let mut reader = ctx
|
let mut reader = ctx
|
||||||
.process
|
.process
|
||||||
|
@ -97,7 +97,7 @@ fn log_trap_info(exception: &CpuException, trap_info: &CpuExceptionInfo) {
|
|||||||
DEVICE_NOT_AVAILABLE => log_trap_common!(DEVICE_NOT_AVAILABLE, trap_info),
|
DEVICE_NOT_AVAILABLE => log_trap_common!(DEVICE_NOT_AVAILABLE, trap_info),
|
||||||
DOUBLE_FAULT => log_trap_common!(DOUBLE_FAULT, trap_info),
|
DOUBLE_FAULT => log_trap_common!(DOUBLE_FAULT, trap_info),
|
||||||
COPROCESSOR_SEGMENT_OVERRUN => log_trap_common!(COPROCESSOR_SEGMENT_OVERRUN, trap_info),
|
COPROCESSOR_SEGMENT_OVERRUN => log_trap_common!(COPROCESSOR_SEGMENT_OVERRUN, trap_info),
|
||||||
INVAILD_TSS => log_trap_common!(INVAILD_TSS, trap_info),
|
INVALID_TSS => log_trap_common!(INVALID_TSS, trap_info),
|
||||||
SEGMENT_NOT_PRESENT => log_trap_common!(SEGMENT_NOT_PRESENT, trap_info),
|
SEGMENT_NOT_PRESENT => log_trap_common!(SEGMENT_NOT_PRESENT, trap_info),
|
||||||
STACK_SEGMENT_FAULT => log_trap_common!(STACK_SEGMENT_FAULT, trap_info),
|
STACK_SEGMENT_FAULT => log_trap_common!(STACK_SEGMENT_FAULT, trap_info),
|
||||||
GENERAL_PROTECTION_FAULT => log_trap_common!(GENERAL_PROTECTION_FAULT, trap_info),
|
GENERAL_PROTECTION_FAULT => log_trap_common!(GENERAL_PROTECTION_FAULT, trap_info),
|
||||||
|
@ -54,7 +54,7 @@ impl SystemTime {
|
|||||||
self.0.checked_add(duration).map(SystemTime)
|
self.0.checked_add(duration).map(SystemTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Substract a duration from self. If the result does not exceed inner bounds return Some(t), else return None.
|
/// Subtract a duration from self. If the result does not exceed inner bounds return Some(t), else return None.
|
||||||
pub fn checked_sub(&self, duration: Duration) -> Option<Self> {
|
pub fn checked_sub(&self, duration: Duration) -> Option<Self> {
|
||||||
let duration = convert_to_time_duration(duration);
|
let duration = convert_to_time_duration(duration);
|
||||||
self.0.checked_sub(duration).map(SystemTime)
|
self.0.checked_sub(duration).map(SystemTime)
|
||||||
|
@ -334,7 +334,7 @@ impl VmMapping {
|
|||||||
///
|
///
|
||||||
/// Generally, this function is only used in `protect()` method.
|
/// Generally, this function is only used in `protect()` method.
|
||||||
/// This method modifies the parent `Vmar` in the end if subdividing is required.
|
/// This method modifies the parent `Vmar` in the end if subdividing is required.
|
||||||
/// It removes current mapping and add splitted mapping to the Vmar.
|
/// It removes current mapping and add split mapping to the Vmar.
|
||||||
fn protect_with_subdivision(
|
fn protect_with_subdivision(
|
||||||
&self,
|
&self,
|
||||||
intersect_range: &Range<usize>,
|
intersect_range: &Range<usize>,
|
||||||
@ -402,7 +402,7 @@ impl VmMapping {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
if trim_range.start <= map_to_addr && trim_range.end >= map_to_addr + map_size {
|
if trim_range.start <= map_to_addr && trim_range.end >= map_to_addr + map_size {
|
||||||
// Fast path: the whole mapping was trimed.
|
// Fast path: the whole mapping was trimmed.
|
||||||
self.unmap(trim_range, true)?;
|
self.unmap(trim_range, true)?;
|
||||||
mappings_to_remove.push_back(map_to_addr);
|
mappings_to_remove.push_back(map_to_addr);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -73,7 +73,7 @@ pub use pager::Pager;
|
|||||||
///
|
///
|
||||||
pub struct Vmo<R = Rights>(pub(super) Arc<Vmo_>, R);
|
pub struct Vmo<R = Rights>(pub(super) Arc<Vmo_>, R);
|
||||||
|
|
||||||
/// Functions exist both for static capbility and dynamic capibility
|
/// Functions exist both for static capbility and dynamic capability
|
||||||
pub trait VmoRightsOp {
|
pub trait VmoRightsOp {
|
||||||
/// Returns the access rights.
|
/// Returns the access rights.
|
||||||
fn rights(&self) -> Rights;
|
fn rights(&self) -> Rights;
|
||||||
@ -94,7 +94,7 @@ pub trait VmoRightsOp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We implement this trait for VMO, so we can use functions on type like Vmo<R> without trait bounds.
|
// We implement this trait for VMO, so we can use functions on type like Vmo<R> without trait bounds.
|
||||||
// FIXME: This requires the imcomplete feature specialization, which should be fixed further.
|
// FIXME: This requires the incomplete feature specialization, which should be fixed further.
|
||||||
impl<R> VmoRightsOp for Vmo<R> {
|
impl<R> VmoRightsOp for Vmo<R> {
|
||||||
default fn rights(&self) -> Rights {
|
default fn rights(&self) -> Rights {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
|
@ -13,7 +13,7 @@ OSDK (short for Operating System Development Kit) is designed to simplify the de
|
|||||||
|
|
||||||
#### Requirements
|
#### Requirements
|
||||||
|
|
||||||
Currenly, `cargo-osdk` only supports x86_64 ubuntu system.
|
Currently, `cargo-osdk` only supports x86_64 ubuntu system.
|
||||||
|
|
||||||
To run a kernel with QEMU, `cargo-osdk` requires the following tools to be installed:
|
To run a kernel with QEMU, `cargo-osdk` requires the following tools to be installed:
|
||||||
- Rust >= 1.75.0
|
- Rust >= 1.75.0
|
||||||
|
@ -80,9 +80,9 @@ pub fn new_base_crate(
|
|||||||
// here when OSTD is ready
|
// here when OSTD is ready
|
||||||
include_linker_script!(["x86_64.ld"]);
|
include_linker_script!(["x86_64.ld"]);
|
||||||
|
|
||||||
// Overrite the main.rs file
|
// Overwrite the main.rs file
|
||||||
let main_rs = include_str!("main.rs.template");
|
let main_rs = include_str!("main.rs.template");
|
||||||
// Replace all occurence of `#TARGET_NAME#` with the `dep_crate_name`
|
// Replace all occurrence of `#TARGET_NAME#` with the `dep_crate_name`
|
||||||
let main_rs = main_rs.replace("#TARGET_NAME#", &dep_crate_name.replace('-', "_"));
|
let main_rs = main_rs.replace("#TARGET_NAME#", &dep_crate_name.replace('-', "_"));
|
||||||
fs::write("src/main.rs", main_rs).unwrap();
|
fs::write("src/main.rs", main_rs).unwrap();
|
||||||
|
|
||||||
@ -104,10 +104,10 @@ fn add_manifest_dependency(
|
|||||||
crate_path: impl AsRef<Path>,
|
crate_path: impl AsRef<Path>,
|
||||||
link_unit_test_runner: bool,
|
link_unit_test_runner: bool,
|
||||||
) {
|
) {
|
||||||
let mainfest_path = "Cargo.toml";
|
let manifest_path = "Cargo.toml";
|
||||||
|
|
||||||
let mut manifest: toml::Table = {
|
let mut manifest: toml::Table = {
|
||||||
let content = fs::read_to_string(mainfest_path).unwrap();
|
let content = fs::read_to_string(manifest_path).unwrap();
|
||||||
toml::from_str(&content).unwrap()
|
toml::from_str(&content).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ fn add_manifest_dependency(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let content = toml::to_string(&manifest).unwrap();
|
let content = toml::to_string(&manifest).unwrap();
|
||||||
fs::write(mainfest_path, content).unwrap();
|
fs::write(manifest_path, content).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn copy_profile_configurations(workspace_root: impl AsRef<Path>) {
|
fn copy_profile_configurations(workspace_root: impl AsRef<Path>) {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# This template file is used by the runner script to generate the acutal grub.cfg
|
# This template file is used by the runner script to generate the actual grub.cfg
|
||||||
|
|
||||||
# AUTOMATICALLY GENERATED FILE, DO NOT EDIT IF YOU KNOW WHAT YOU ARE DOING
|
# AUTOMATICALLY GENERATED FILE, DO NOT EDIT IF YOU KNOW WHAT YOU ARE DOING
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ fn generate_grub_cfg(
|
|||||||
|
|
||||||
// Delete the first two lines that notes the file a template file.
|
// Delete the first two lines that notes the file a template file.
|
||||||
let grub_cfg = grub_cfg.lines().skip(2).collect::<Vec<&str>>().join("\n");
|
let grub_cfg = grub_cfg.lines().skip(2).collect::<Vec<&str>>().join("\n");
|
||||||
// Set the timout style and timeout.
|
// Set the timeout style and timeout.
|
||||||
let grub_cfg = grub_cfg
|
let grub_cfg = grub_cfg
|
||||||
.replace(
|
.replace(
|
||||||
"#GRUB_TIMEOUT_STYLE#",
|
"#GRUB_TIMEOUT_STYLE#",
|
||||||
|
@ -31,10 +31,10 @@ fn aster_rust_toolchain() -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn add_manifest_dependencies(cargo_metadata: &serde_json::Value, crate_name: &str) {
|
fn add_manifest_dependencies(cargo_metadata: &serde_json::Value, crate_name: &str) {
|
||||||
let mainfest_path = get_manifest_path(cargo_metadata, crate_name);
|
let manifest_path = get_manifest_path(cargo_metadata, crate_name);
|
||||||
|
|
||||||
let mut manifest: toml::Table = {
|
let mut manifest: toml::Table = {
|
||||||
let content = fs::read_to_string(mainfest_path).unwrap();
|
let content = fs::read_to_string(manifest_path).unwrap();
|
||||||
toml::from_str(&content).unwrap()
|
toml::from_str(&content).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ fn add_manifest_dependencies(cargo_metadata: &serde_json::Value, crate_name: &st
|
|||||||
dependencies.as_table_mut().unwrap().extend(ostd_dep);
|
dependencies.as_table_mut().unwrap().extend(ostd_dep);
|
||||||
|
|
||||||
let content = toml::to_string(&manifest).unwrap();
|
let content = toml::to_string(&manifest).unwrap();
|
||||||
fs::write(mainfest_path, content).unwrap();
|
fs::write(manifest_path, content).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add `target/osdk/base` to `exclude` array of the workspace manifest
|
// Add `target/osdk/base` to `exclude` array of the workspace manifest
|
||||||
|
@ -56,8 +56,8 @@ fn apply_args_before_finalize(action_scheme: &mut ActionScheme, args: &CommonArg
|
|||||||
if let Some(ref mut boot) = action_scheme.boot {
|
if let Some(ref mut boot) = action_scheme.boot {
|
||||||
apply_kv_array(&mut boot.kcmd_args, &args.kcmd_args, "=", &[]);
|
apply_kv_array(&mut boot.kcmd_args, &args.kcmd_args, "=", &[]);
|
||||||
for init_arg in &args.init_args {
|
for init_arg in &args.init_args {
|
||||||
for seperated_arg in init_arg.split(' ') {
|
for separated_arg in init_arg.split(' ') {
|
||||||
boot.init_args.push(seperated_arg.to_string());
|
boot.init_args.push(separated_arg.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(initramfs) = &args.initramfs {
|
if let Some(initramfs) = &args.initramfs {
|
||||||
|
@ -26,7 +26,7 @@ pub enum BootMethod {
|
|||||||
/// Boot the kernel by making a Qcow2 image with Grub as the bootloader.
|
/// Boot the kernel by making a Qcow2 image with Grub as the bootloader.
|
||||||
GrubQcow2,
|
GrubQcow2,
|
||||||
/// Use the [QEMU direct boot](https://qemu-project.gitlab.io/qemu/system/linuxboot.html)
|
/// Use the [QEMU direct boot](https://qemu-project.gitlab.io/qemu/system/linuxboot.html)
|
||||||
/// to boot the kernel with QEMU's built-in Seabios and Coreboot utilites.
|
/// to boot the kernel with QEMU's built-in Seabios and Coreboot utilities.
|
||||||
#[default]
|
#[default]
|
||||||
QemuDirect,
|
QemuDirect,
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ pub use qemu::*;
|
|||||||
pub struct Scheme {
|
pub struct Scheme {
|
||||||
// The user is not allowed to set this field. However,
|
// The user is not allowed to set this field. However,
|
||||||
// the manifest loader set this and all actions such
|
// the manifest loader set this and all actions such
|
||||||
// as runnning, testing, and building will use this field.
|
// as running, testing, and building will use this field.
|
||||||
pub work_dir: Option<PathBuf>,
|
pub work_dir: Option<PathBuf>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub supported_archs: Vec<Arch>,
|
pub supported_archs: Vec<Arch>,
|
||||||
|
@ -45,11 +45,11 @@ pub fn split_to_kv_array(args: &str) -> Vec<String> {
|
|||||||
pub fn apply_kv_array(
|
pub fn apply_kv_array(
|
||||||
array: &mut Vec<String>,
|
array: &mut Vec<String>,
|
||||||
args: &Vec<String>,
|
args: &Vec<String>,
|
||||||
seperator: &str,
|
separator: &str,
|
||||||
multi_value_keys: &[&str],
|
multi_value_keys: &[&str],
|
||||||
) {
|
) {
|
||||||
let multi_value_keys = {
|
let multi_value_keys = {
|
||||||
let mut inferred_keys = infer_multi_value_keys(array, seperator);
|
let mut inferred_keys = infer_multi_value_keys(array, separator);
|
||||||
for key in multi_value_keys {
|
for key in multi_value_keys {
|
||||||
inferred_keys.insert(key.to_string());
|
inferred_keys.insert(key.to_string());
|
||||||
}
|
}
|
||||||
@ -63,8 +63,8 @@ pub fn apply_kv_array(
|
|||||||
let mut multi_value_key_strings: IndexMap<String, Vec<String>> = IndexMap::new();
|
let mut multi_value_key_strings: IndexMap<String, Vec<String>> = IndexMap::new();
|
||||||
for item in array.drain(..) {
|
for item in array.drain(..) {
|
||||||
// Each key-value string has two patterns:
|
// Each key-value string has two patterns:
|
||||||
// 1. Seperated by separator: key value / key=value
|
// 1. Separated by separator: key value / key=value
|
||||||
if let Some(key) = get_key(&item, seperator) {
|
if let Some(key) = get_key(&item, separator) {
|
||||||
if multi_value_keys.contains(&key) {
|
if multi_value_keys.contains(&key) {
|
||||||
if let Some(v) = multi_value_key_strings.get_mut(&key) {
|
if let Some(v) = multi_value_key_strings.get_mut(&key) {
|
||||||
v.push(item);
|
v.push(item);
|
||||||
@ -83,7 +83,7 @@ pub fn apply_kv_array(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for arg in args {
|
for arg in args {
|
||||||
if let Some(key) = get_key(arg, seperator) {
|
if let Some(key) = get_key(arg, separator) {
|
||||||
if multi_value_keys.contains(&key) {
|
if multi_value_keys.contains(&key) {
|
||||||
if let Some(v) = multi_value_key_strings.get_mut(&key) {
|
if let Some(v) = multi_value_key_strings.get_mut(&key) {
|
||||||
v.push(arg.to_owned());
|
v.push(arg.to_owned());
|
||||||
@ -108,27 +108,27 @@ pub fn apply_kv_array(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn infer_multi_value_keys(array: &Vec<String>, seperator: &str) -> IndexSet<String> {
|
fn infer_multi_value_keys(array: &Vec<String>, separator: &str) -> IndexSet<String> {
|
||||||
let mut multi_val_keys = IndexSet::new();
|
let mut multi_val_keys = IndexSet::new();
|
||||||
|
|
||||||
let mut occured_keys = IndexSet::new();
|
let mut occurred_keys = IndexSet::new();
|
||||||
for item in array {
|
for item in array {
|
||||||
let Some(key) = get_key(item, seperator) else {
|
let Some(key) = get_key(item, separator) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if occured_keys.contains(&key) {
|
if occurred_keys.contains(&key) {
|
||||||
multi_val_keys.insert(key);
|
multi_val_keys.insert(key);
|
||||||
} else {
|
} else {
|
||||||
occured_keys.insert(key);
|
occurred_keys.insert(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
multi_val_keys
|
multi_val_keys
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_key(item: &str, seperator: &str) -> Option<String> {
|
pub fn get_key(item: &str, separator: &str) -> Option<String> {
|
||||||
let split = item.split(seperator).collect::<Vec<_>>();
|
let split = item.split(separator).collect::<Vec<_>>();
|
||||||
let len = split.len();
|
let len = split.len();
|
||||||
if len > 2 || len == 0 {
|
if len > 2 || len == 0 {
|
||||||
error_msg!("`{}` is an invalid argument.", item);
|
error_msg!("`{}` is an invalid argument.", item);
|
||||||
|
@ -133,7 +133,7 @@ impl Default for KtestTree {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The `KtestTreeIter` will iterate over all crates. Yeilding `KtestCrate`s.
|
/// The `KtestTreeIter` will iterate over all crates. Yielding `KtestCrate`s.
|
||||||
pub struct KtestTreeIter<'a> {
|
pub struct KtestTreeIter<'a> {
|
||||||
crate_iter: btree_map::Iter<'a, String, KtestCrate>,
|
crate_iter: btree_map::Iter<'a, String, KtestCrate>,
|
||||||
}
|
}
|
||||||
@ -156,7 +156,7 @@ impl<'a> Iterator for KtestTreeIter<'a> {
|
|||||||
|
|
||||||
type CrateChildrenIter<'a> = btree_map::Iter<'a, String, KtestModule>;
|
type CrateChildrenIter<'a> = btree_map::Iter<'a, String, KtestModule>;
|
||||||
|
|
||||||
/// The `KtestCrateIter` will iterate over all modules in a crate. Yeilding `KtestModule`s.
|
/// The `KtestCrateIter` will iterate over all modules in a crate. Yielding `KtestModule`s.
|
||||||
/// The iterator will return modules in the depth-first-search order of the module tree.
|
/// The iterator will return modules in the depth-first-search order of the module tree.
|
||||||
pub struct KtestCrateIter<'a> {
|
pub struct KtestCrateIter<'a> {
|
||||||
path: Vec<(&'a KtestModule, CrateChildrenIter<'a>)>,
|
path: Vec<(&'a KtestModule, CrateChildrenIter<'a>)>,
|
||||||
@ -192,7 +192,7 @@ impl<'a> Iterator for KtestCrateIter<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The `KtestModuleIter` will iterate over all tests in a crate. Yeilding `KtestItem`s.
|
/// The `KtestModuleIter` will iterate over all tests in a crate. Yielding `KtestItem`s.
|
||||||
pub struct KtestModuleIter<'a> {
|
pub struct KtestModuleIter<'a> {
|
||||||
test_iter: core::slice::Iter<'a, KtestItem>,
|
test_iter: core::slice::Iter<'a, KtestItem>,
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
|
|||||||
let nframes = program.len().align_up(PAGE_SIZE) / PAGE_SIZE;
|
let nframes = program.len().align_up(PAGE_SIZE) / PAGE_SIZE;
|
||||||
let user_pages = {
|
let user_pages = {
|
||||||
let vm_frames = FrameAllocOptions::new(nframes).alloc().unwrap();
|
let vm_frames = FrameAllocOptions::new(nframes).alloc().unwrap();
|
||||||
// Phyiscal memory pages can be only accessed
|
// Physical memory pages can be only accessed
|
||||||
// via the Frame abstraction.
|
// via the Frame abstraction.
|
||||||
vm_frames.write_bytes(0, program).unwrap();
|
vm_frames.write_bytes(0, program).unwrap();
|
||||||
vm_frames
|
vm_frames
|
||||||
|
@ -35,7 +35,7 @@ bitflags::bitflags! {
|
|||||||
const EXECUTABLE_IMAGE = 1 << 1;
|
const EXECUTABLE_IMAGE = 1 << 1;
|
||||||
const LINE_NUMS_STRIPPED = 1 << 2;
|
const LINE_NUMS_STRIPPED = 1 << 2;
|
||||||
const LOCAL_SYMS_STRIPPED = 1 << 3;
|
const LOCAL_SYMS_STRIPPED = 1 << 3;
|
||||||
const AGGRESIVE_WS_TRIM = 1 << 4;
|
const AGGRESSIVE_WS_TRIM = 1 << 4;
|
||||||
const LARGE_ADDRESS_AWARE = 1 << 5;
|
const LARGE_ADDRESS_AWARE = 1 << 5;
|
||||||
const SIXTEEN_BIT_MACHINE = 1 << 6;
|
const SIXTEEN_BIT_MACHINE = 1 << 6;
|
||||||
const BYTES_REVERSED_LO = 1 << 7;
|
const BYTES_REVERSED_LO = 1 << 7;
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
//! immediately after the initialization of `ostd`. Thus you can use any
|
//! immediately after the initialization of `ostd`. Thus you can use any
|
||||||
//! feature provided by the frame including the heap allocator, etc.
|
//! feature provided by the frame including the heap allocator, etc.
|
||||||
//!
|
//!
|
||||||
//! By all means, ostd-test is an individule crate that only requires:
|
//! By all means, ostd-test is an individual crate that only requires:
|
||||||
//! - a custom linker script section `.ktest_array`,
|
//! - a custom linker script section `.ktest_array`,
|
||||||
//! - and an alloc implementation.
|
//! - and an alloc implementation.
|
||||||
//!
|
//!
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/* SPDX-License-Identifier: MPL-2.0 */
|
/* SPDX-License-Identifier: MPL-2.0 */
|
||||||
|
|
||||||
// The boot routine excecuted by the application processor.
|
// The boot routine executed by the application processor.
|
||||||
|
|
||||||
.extern boot_gdtr
|
.extern boot_gdtr
|
||||||
.extern boot_page_table_start
|
.extern boot_page_table_start
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/* SPDX-License-Identifier: MPL-2.0 */
|
/* SPDX-License-Identifier: MPL-2.0 */
|
||||||
|
|
||||||
// The boot routine excecuted by the bootstrap processor.
|
// The boot routine executed by the bootstrap processor.
|
||||||
|
|
||||||
// The boot header, initial boot setup code, temporary GDT and page tables are
|
// The boot header, initial boot setup code, temporary GDT and page tables are
|
||||||
// in the boot section. The boot section is mapped writable since kernel may
|
// in the boot section. The boot section is mapped writable since kernel may
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Asterinas diffrentiates the boot protocol by the entry point
|
//! Asterinas diffrentiates the boot protocol by the entry point
|
||||||
//! chosen by the boot loader. In each entry point function,
|
//! chosen by the boot loader. In each entry point function,
|
||||||
//! the universal callback registeration method from
|
//! the universal callback registration method from
|
||||||
//! `crate::boot` will be called. Thus the initialization of
|
//! `crate::boot` will be called. Thus the initialization of
|
||||||
//! boot information is transparent for the upper level kernel.
|
//! boot information is transparent for the upper level kernel.
|
||||||
//!
|
//!
|
||||||
|
@ -345,24 +345,24 @@ struct MemoryEntry {
|
|||||||
|
|
||||||
impl MemoryEntry {
|
impl MemoryEntry {
|
||||||
fn size(&self) -> u32 {
|
fn size(&self) -> u32 {
|
||||||
// SAFETY: the entry can only be contructed from a valid address.
|
// SAFETY: the entry can only be constructed from a valid address.
|
||||||
unsafe { (self.ptr as *const u32).read_unaligned() }
|
unsafe { (self.ptr as *const u32).read_unaligned() }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn base_addr(&self) -> u64 {
|
fn base_addr(&self) -> u64 {
|
||||||
// SAFETY: the entry can only be contructed from a valid address.
|
// SAFETY: the entry can only be constructed from a valid address.
|
||||||
unsafe { ((self.ptr + 4) as *const u64).read_unaligned() }
|
unsafe { ((self.ptr + 4) as *const u64).read_unaligned() }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn length(&self) -> u64 {
|
fn length(&self) -> u64 {
|
||||||
// SAFETY: the entry can only be contructed from a valid address.
|
// SAFETY: the entry can only be constructed from a valid address.
|
||||||
unsafe { ((self.ptr + 12) as *const u64).read_unaligned() }
|
unsafe { ((self.ptr + 12) as *const u64).read_unaligned() }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn memory_type(&self) -> MemoryRegionType {
|
fn memory_type(&self) -> MemoryRegionType {
|
||||||
// The multiboot (v1) manual doesn't specify the length of the type field.
|
// The multiboot (v1) manual doesn't specify the length of the type field.
|
||||||
// Experimental result shows that "u8" works. So be it.
|
// Experimental result shows that "u8" works. So be it.
|
||||||
// SAFETY: the entry can only be contructed from a valid address.
|
// SAFETY: the entry can only be constructed from a valid address.
|
||||||
let typ_val = unsafe { ((self.ptr + 20) as *const u8).read_unaligned() };
|
let typ_val = unsafe { ((self.ptr + 20) as *const u8).read_unaligned() };
|
||||||
// The meaning of the values are however documented clearly by the manual.
|
// The meaning of the values are however documented clearly by the manual.
|
||||||
match typ_val {
|
match typ_val {
|
||||||
|
@ -142,11 +142,11 @@ fn send_startup_to_all_aps() {
|
|||||||
let icr = Icr::new(
|
let icr = Icr::new(
|
||||||
ApicId::from(0),
|
ApicId::from(0),
|
||||||
DestinationShorthand::AllExcludingSelf,
|
DestinationShorthand::AllExcludingSelf,
|
||||||
TriggerMode::Egde,
|
TriggerMode::Edge,
|
||||||
Level::Assert,
|
Level::Assert,
|
||||||
DeliveryStatus::Idle,
|
DeliveryStatus::Idle,
|
||||||
DestinationMode::Physical,
|
DestinationMode::Physical,
|
||||||
DeliveryMode::StrartUp,
|
DeliveryMode::StartUp,
|
||||||
(AP_BOOT_START_PA / PAGE_SIZE) as u8,
|
(AP_BOOT_START_PA / PAGE_SIZE) as u8,
|
||||||
);
|
);
|
||||||
// SAFETY: we are sending startup IPI to all APs.
|
// SAFETY: we are sending startup IPI to all APs.
|
||||||
|
@ -238,7 +238,7 @@ define_cpu_exception!(
|
|||||||
[DEVICE_NOT_AVAILABLE = 7, Fault],
|
[DEVICE_NOT_AVAILABLE = 7, Fault],
|
||||||
[DOUBLE_FAULT = 8, Abort],
|
[DOUBLE_FAULT = 8, Abort],
|
||||||
[COPROCESSOR_SEGMENT_OVERRUN = 9, Fault],
|
[COPROCESSOR_SEGMENT_OVERRUN = 9, Fault],
|
||||||
[INVAILD_TSS = 10, Fault],
|
[INVALID_TSS = 10, Fault],
|
||||||
[SEGMENT_NOT_PRESENT = 11, Fault],
|
[SEGMENT_NOT_PRESENT = 11, Fault],
|
||||||
[STACK_SEGMENT_FAULT = 12, Fault],
|
[STACK_SEGMENT_FAULT = 12, Fault],
|
||||||
[GENERAL_PROTECTION_FAULT = 13, Fault],
|
[GENERAL_PROTECTION_FAULT = 13, Fault],
|
||||||
|
@ -67,7 +67,7 @@ impl SerialPort {
|
|||||||
// set interrupt watermark at 14 bytes
|
// set interrupt watermark at 14 bytes
|
||||||
self.fifo_ctrl.write(0xC7);
|
self.fifo_ctrl.write(0xC7);
|
||||||
// Mark data terminal ready, signal request to send
|
// Mark data terminal ready, signal request to send
|
||||||
// and enable auxilliary output #2 (used as interrupt line for CPU)
|
// and enable auxiliary output #2 (used as interrupt line for CPU)
|
||||||
self.modem_ctrl.write(0x0B);
|
self.modem_ctrl.write(0x0B);
|
||||||
// Enable interrupts
|
// Enable interrupts
|
||||||
self.int_en.write(0x01);
|
self.int_en.write(0x01);
|
||||||
|
@ -117,7 +117,7 @@ impl RootTable {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
if bus_entry.is_present() {
|
if bus_entry.is_present() {
|
||||||
warn!("IOMMU: Overwritting the existing device page table");
|
warn!("IOMMU: Overwriting the existing device page table");
|
||||||
}
|
}
|
||||||
let address = unsafe { page_table.root_paddr() };
|
let address = unsafe { page_table.root_paddr() };
|
||||||
context_table.page_tables.insert(address, page_table);
|
context_table.page_tables.insert(address, page_table);
|
||||||
|
@ -156,7 +156,7 @@ impl Debug for FaultRecording {
|
|||||||
.field("Request type", &self.request_type())
|
.field("Request type", &self.request_type())
|
||||||
.field("Address type", &self.address_type())
|
.field("Address type", &self.address_type())
|
||||||
.field("Source identifier", &self.source_identifier())
|
.field("Source identifier", &self.source_identifier())
|
||||||
.field("Fault Reson", &self.fault_reason())
|
.field("Fault Reason", &self.fault_reason())
|
||||||
.field("Fault info", &self.fault_info())
|
.field("Fault info", &self.fault_info())
|
||||||
.field("Raw", &self.0)
|
.field("Raw", &self.0)
|
||||||
.finish()
|
.finish()
|
||||||
|
@ -24,7 +24,7 @@ use crate::{
|
|||||||
/// RSDP information, key is the signature, value is the virtual address of the signature
|
/// RSDP information, key is the signature, value is the virtual address of the signature
|
||||||
pub static ACPI_TABLES: Once<SpinLock<AcpiTables<AcpiMemoryHandler>>> = Once::new();
|
pub static ACPI_TABLES: Once<SpinLock<AcpiTables<AcpiMemoryHandler>>> = Once::new();
|
||||||
|
|
||||||
/// Sdt header wrapper, user can use this structure to easily derive Debug, get table information without creating a new struture.
|
/// Sdt header wrapper, user can use this structure to easily derive Debug, get table information without creating a new structure.
|
||||||
///
|
///
|
||||||
/// For example, in DMAR (DMA Remapping) structure,
|
/// For example, in DMAR (DMA Remapping) structure,
|
||||||
/// we can use the following code to get some information of DMAR, including address, length:
|
/// we can use the following code to get some information of DMAR, including address, length:
|
||||||
|
@ -22,7 +22,7 @@ static APIC_TYPE: Once<ApicType> = Once::new();
|
|||||||
///
|
///
|
||||||
/// You should provide a closure operating on the given mutable borrow of the
|
/// You should provide a closure operating on the given mutable borrow of the
|
||||||
/// local APIC instance. During the execution of the closure, the interrupts
|
/// local APIC instance. During the execution of the closure, the interrupts
|
||||||
/// are guarenteed to be disabled.
|
/// are guaranteed to be disabled.
|
||||||
///
|
///
|
||||||
/// Example:
|
/// Example:
|
||||||
/// ```rust
|
/// ```rust
|
||||||
@ -38,7 +38,7 @@ pub fn borrow<R>(f: impl FnOnce(&mut (dyn Apic + 'static)) -> R) -> R {
|
|||||||
let irq_guard = crate::trap::disable_local();
|
let irq_guard = crate::trap::disable_local();
|
||||||
let apic_guard = APIC_INSTANCE.get_with(&irq_guard);
|
let apic_guard = APIC_INSTANCE.get_with(&irq_guard);
|
||||||
|
|
||||||
// If it is not initialzed, lazily initialize it.
|
// If it is not initialized, lazily initialize it.
|
||||||
if !apic_guard.is_completed() {
|
if !apic_guard.is_completed() {
|
||||||
apic_guard.call_once(|| match APIC_TYPE.get().unwrap() {
|
apic_guard.call_once(|| match APIC_TYPE.get().unwrap() {
|
||||||
ApicType::XApic => {
|
ApicType::XApic => {
|
||||||
@ -115,7 +115,7 @@ enum ApicType {
|
|||||||
/// The inter-processor interrupt control register.
|
/// The inter-processor interrupt control register.
|
||||||
///
|
///
|
||||||
/// ICR is a 64-bit local APIC register that allows software running on the
|
/// ICR is a 64-bit local APIC register that allows software running on the
|
||||||
/// porcessor to specify and send IPIs to other porcessors in the system.
|
/// processor to specify and send IPIs to other processors in the system.
|
||||||
/// To send an IPI, software must set up the ICR to indicate the type of IPI
|
/// To send an IPI, software must set up the ICR to indicate the type of IPI
|
||||||
/// message to be sent and the destination processor or processors. (All fields
|
/// message to be sent and the destination processor or processors. (All fields
|
||||||
/// of the ICR are read-write by software with the exception of the delivery
|
/// of the ICR are read-write by software with the exception of the delivery
|
||||||
@ -248,7 +248,7 @@ pub enum DestinationShorthand {
|
|||||||
|
|
||||||
#[repr(u64)]
|
#[repr(u64)]
|
||||||
pub enum TriggerMode {
|
pub enum TriggerMode {
|
||||||
Egde = 0,
|
Edge = 0,
|
||||||
Level = 1,
|
Level = 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,7 +297,7 @@ pub enum DeliveryMode {
|
|||||||
/// perform an initialization.
|
/// perform an initialization.
|
||||||
Init = 0b101,
|
Init = 0b101,
|
||||||
/// Start-up Interrupt
|
/// Start-up Interrupt
|
||||||
StrartUp = 0b110,
|
StartUp = 0b110,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -110,7 +110,7 @@ bitflags! {
|
|||||||
const CAPABILITIES_LIST = 1 << 4;
|
const CAPABILITIES_LIST = 1 << 4;
|
||||||
/// Sets to 1 if the device is capable of running at 66 MHz.
|
/// Sets to 1 if the device is capable of running at 66 MHz.
|
||||||
const MHZ66_CAPABLE = 1 << 5;
|
const MHZ66_CAPABLE = 1 << 5;
|
||||||
/// Sets to 1 if the device can accpet fast back-to-back transactions
|
/// Sets to 1 if the device can accept fast back-to-back transactions
|
||||||
/// that are not from the same agent.
|
/// that are not from the same agent.
|
||||||
const FAST_BACK_TO_BACK_CAPABLE = 1 << 7;
|
const FAST_BACK_TO_BACK_CAPABLE = 1 << 7;
|
||||||
/// This bit is only set when the following conditions are met:
|
/// This bit is only set when the following conditions are met:
|
||||||
@ -136,7 +136,7 @@ bitflags! {
|
|||||||
/// Sets to 1 by a master device when its transaction is terminated with
|
/// Sets to 1 by a master device when its transaction is terminated with
|
||||||
/// Target-Abort
|
/// Target-Abort
|
||||||
const RECEIVED_TARGET_ABORT = 1 << 12;
|
const RECEIVED_TARGET_ABORT = 1 << 12;
|
||||||
/// Sets to 1 by a master device when its transcation (except for Special
|
/// Sets to 1 by a master device when its transaction (except for Special
|
||||||
/// Cycle transactions) is terminated with Master-Abort.
|
/// Cycle transactions) is terminated with Master-Abort.
|
||||||
const RECEIVED_MASTER_ABORT = 1 << 13;
|
const RECEIVED_MASTER_ABORT = 1 << 13;
|
||||||
/// Sets to 1 when the device asserts SERR#
|
/// Sets to 1 when the device asserts SERR#
|
||||||
|
@ -59,7 +59,7 @@ pub struct PciDeviceLocation {
|
|||||||
pub bus: u8,
|
pub bus: u8,
|
||||||
/// Device number with max 31
|
/// Device number with max 31
|
||||||
pub device: u8,
|
pub device: u8,
|
||||||
/// Deivce number with max 7
|
/// Device number with max 7
|
||||||
pub function: u8,
|
pub function: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
//! The implementaion of CPU-local variables that have inner mutability.
|
//! The implementation of CPU-local variables that have inner mutability.
|
||||||
|
|
||||||
use core::cell::UnsafeCell;
|
use core::cell::UnsafeCell;
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ use crate::arch;
|
|||||||
///
|
///
|
||||||
/// let _irq_guard = ostd::trap::disable_local_irq();
|
/// let _irq_guard = ostd::trap::disable_local_irq();
|
||||||
/// println!("1st FOO VAL: {:?}", FOO.load());
|
/// println!("1st FOO VAL: {:?}", FOO.load());
|
||||||
/// // No suprises here, the two accesses must result in the same value.
|
/// // No surprises here, the two accesses must result in the same value.
|
||||||
/// println!("2nd FOO VAL: {:?}", FOO.load());
|
/// println!("2nd FOO VAL: {:?}", FOO.load());
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
|
@ -58,7 +58,7 @@ macro_rules! cpu_local {
|
|||||||
|
|
||||||
/// CPU-local objects.
|
/// CPU-local objects.
|
||||||
///
|
///
|
||||||
/// CPU-local objects are instanciated once per CPU core. They can be shared to
|
/// CPU-local objects are instantiated once per CPU core. They can be shared to
|
||||||
/// other cores. In the context of a preemptible kernel task, when holding the
|
/// other cores. In the context of a preemptible kernel task, when holding the
|
||||||
/// reference to the inner object, the object is always the one in the original
|
/// reference to the inner object, the object is always the one in the original
|
||||||
/// core (when the reference is created), no matter which core the code is
|
/// core (when the reference is created), no matter which core the code is
|
||||||
@ -169,7 +169,7 @@ impl<T: 'static + Sync> CpuLocal<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SAFETY: At any given time, only one task can access the inner value `T` of a
|
// SAFETY: At any given time, only one task can access the inner value `T` of a
|
||||||
// CPU-local variable if `T` is not `Sync`. We guarentee it by disabling the
|
// CPU-local variable if `T` is not `Sync`. We guarantee it by disabling the
|
||||||
// reference to the inner value, or turning off preemptions when creating
|
// reference to the inner value, or turning off preemptions when creating
|
||||||
// the reference.
|
// the reference.
|
||||||
unsafe impl<T: 'static> Sync for CpuLocal<T> {}
|
unsafe impl<T: 'static> Sync for CpuLocal<T> {}
|
||||||
|
@ -50,7 +50,7 @@ pub use ostd_macros::main;
|
|||||||
pub use ostd_pod::Pod;
|
pub use ostd_pod::Pod;
|
||||||
|
|
||||||
pub use self::{error::Error, prelude::Result};
|
pub use self::{error::Error, prelude::Result};
|
||||||
// [`CpuLocalCell`] is easy to be mis-used, so we don't expose it to the users.
|
// [`CpuLocalCell`] is easy to be misused, so we don't expose it to the users.
|
||||||
pub(crate) use crate::cpu::local::cpu_local_cell;
|
pub(crate) use crate::cpu::local::cpu_local_cell;
|
||||||
|
|
||||||
/// Initializes OSTD.
|
/// Initializes OSTD.
|
||||||
|
@ -266,7 +266,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn reader_and_wirter() {
|
fn reader_and_writer() {
|
||||||
let vm_segment = FrameAllocOptions::new(2)
|
let vm_segment = FrameAllocOptions::new(2)
|
||||||
.is_contiguous(true)
|
.is_contiguous(true)
|
||||||
.alloc_contiguous()
|
.alloc_contiguous()
|
||||||
|
@ -358,7 +358,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn reader_and_wirter() {
|
fn reader_and_writer() {
|
||||||
let vm_segment = FrameAllocOptions::new(2)
|
let vm_segment = FrameAllocOptions::new(2)
|
||||||
.is_contiguous(true)
|
.is_contiguous(true)
|
||||||
.alloc_contiguous()
|
.alloc_contiguous()
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user