Add Ext2 fs and basic bio layer

This commit is contained in:
LI Qing
2023-09-18 11:47:17 +08:00
committed by Tate, Hongliang Tian
parent 1616f2d32c
commit 9473889c6b
51 changed files with 5346 additions and 427 deletions

View File

@ -64,6 +64,9 @@ log = "0.4"
getrandom = { version = "0.2.10", default-features = false, features = [
"rdrand",
] }
bitvec = { version = "1.0", default-features = false, features = ["alloc"] }
static_assertions = "1.1.0"
inherit-methods-macro = { git = "https://github.com/asterinas/inherit-methods-macro", rev = "98f7e3e" }
[dependencies.lazy_static]
version = "1.0"

View File

@ -1,10 +1,3 @@
use core::mem::size_of;
use alloc::vec::Vec;
use aster_frame::{
println,
vm::{VmAllocOptions, VmIo},
};
use log::info;
pub fn init() {
@ -13,43 +6,3 @@ pub fn init() {
info!("Found Input device, name:{}", name);
}
}
#[allow(unused)]
fn block_device_test() {
for (_, device) in aster_block::all_devices() {
let write_frame = VmAllocOptions::new(1).alloc_single().unwrap();
let read_frame = VmAllocOptions::new(1).alloc_single().unwrap();
info!("write_buffer address:{:x}", write_frame.start_paddr());
info!("read_buffer address:{:x}", read_frame.start_paddr());
// init write frame
for i in 0..=8 {
let slice: [u8; 512] = [i; 512];
write_frame.write_slice(i as usize * 512, &slice);
}
// Test multiple Writer & Reader
let mut writers = Vec::with_capacity(8);
for i in 0..8 {
let writer = read_frame.writer().skip(i * 512).limit(512);
writers.push(writer);
}
let mut readers = Vec::with_capacity(8);
for i in 0..8 {
let reader = write_frame.reader().skip(i * 512).limit(512);
readers.push(reader);
}
device.write_block(0, readers.as_slice());
device.read_block(0, writers.as_slice());
let mut read_slice = [0u8; 512];
let mut write_slice = [0u8; 512];
for i in 0..8 {
read_frame.read_bytes(i * size_of::<[u8; 512]>(), &mut read_slice);
write_frame.read_bytes(i * size_of::<[u8; 512]>(), &mut write_slice);
assert_eq!(read_slice, write_slice);
}
println!("block device test passed!");
}
}

View File

@ -191,12 +191,48 @@ impl From<aster_frame::Error> for Error {
}
}
impl From<aster_block::bio::BioEnqueueError> for Error {
fn from(error: aster_block::bio::BioEnqueueError) -> Self {
match error {
aster_block::bio::BioEnqueueError::IsFull => {
Error::with_message(Errno::EBUSY, "The request queue is full")
}
aster_block::bio::BioEnqueueError::Refused => {
Error::with_message(Errno::EBUSY, "Refuse to enqueue the bio")
}
}
}
}
impl From<aster_block::bio::BioStatus> for Error {
fn from(err_status: aster_block::bio::BioStatus) -> Self {
match err_status {
aster_block::bio::BioStatus::NotSupported => {
Error::with_message(Errno::EIO, "I/O operation is not supported")
}
aster_block::bio::BioStatus::NoSpace => {
Error::with_message(Errno::ENOSPC, "Insufficient space on device")
}
aster_block::bio::BioStatus::IoError => {
Error::with_message(Errno::EIO, "I/O operation fails")
}
status => panic!("Can not convert the status: {:?} to an error", status),
}
}
}
impl From<core::str::Utf8Error> for Error {
fn from(_: core::str::Utf8Error) -> Self {
Error::with_message(Errno::EINVAL, "Invalid utf-8 string")
}
}
impl From<alloc::string::FromUtf8Error> for Error {
fn from(_: alloc::string::FromUtf8Error) -> Self {
Error::with_message(Errno::EINVAL, "Invalid utf-8 string")
}
}
impl From<core::ffi::FromBytesUntilNulError> for Error {
fn from(_: core::ffi::FromBytesUntilNulError) -> Self {
Error::with_message(Errno::E2BIG, "Cannot find null in cstring")

View File

@ -6,7 +6,6 @@ use crate::fs::utils::{
};
use crate::prelude::*;
use aster_frame::vm::VmFrame;
use aster_util::{id_allocator::IdAlloc, slot_vec::SlotVec};
use core::time::Duration;
@ -140,12 +139,18 @@ impl Inode for RootInode {
self.metadata.size
}
fn resize(&self, new_size: usize) {}
fn resize(&self, new_size: usize) -> Result<()> {
Err(Error::new(Errno::EISDIR))
}
fn metadata(&self) -> Metadata {
self.metadata.clone()
}
fn ino(&self) -> u64 {
self.metadata.ino as _
}
fn type_(&self) -> InodeType {
self.metadata.type_
}

View File

@ -65,12 +65,18 @@ impl Inode for Ptmx {
self.metadata.size
}
fn resize(&self, new_size: usize) {}
fn resize(&self, new_size: usize) -> Result<()> {
Ok(())
}
fn metadata(&self) -> Metadata {
self.metadata.clone()
}
fn ino(&self) -> u64 {
self.metadata.ino as _
}
fn type_(&self) -> InodeType {
self.metadata.type_
}
@ -93,14 +99,6 @@ impl Inode for Ptmx {
fn set_mtime(&self, time: Duration) {}
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
Ok(())
}
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
Ok(())
}
fn read_at(&self, offset: usize, buf: &mut [u8]) -> Result<usize> {
Ok(0)
}

View File

@ -45,12 +45,18 @@ impl Inode for PtySlaveInode {
self.metadata.size
}
fn resize(&self, new_size: usize) {}
fn resize(&self, new_size: usize) -> Result<()> {
Err(Error::new(Errno::EPERM))
}
fn metadata(&self) -> Metadata {
self.metadata.clone()
}
fn ino(&self) -> u64 {
self.metadata.ino as _
}
fn type_(&self) -> InodeType {
self.metadata.type_
}
@ -73,14 +79,6 @@ impl Inode for PtySlaveInode {
fn set_mtime(&self, time: Duration) {}
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
Ok(())
}
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
Ok(())
}
fn read_at(&self, offset: usize, buf: &mut [u8]) -> Result<usize> {
self.device.read(buf)
}

View File

@ -0,0 +1,476 @@
use super::fs::Ext2;
use super::inode::{Inode, InodeDesc, RawInode};
use super::prelude::*;
use super::super_block::SuperBlock;
use aster_util::id_allocator::IdAlloc;
/// Blocks are clustered into block groups in order to reduce fragmentation and minimise
/// the amount of head seeking when reading a large amount of consecutive data.
pub(super) struct BlockGroup {
idx: usize,
bg_impl: Arc<BlockGroupImpl>,
raw_inodes_cache: PageCache,
}
struct BlockGroupImpl {
inode_table_bid: Bid,
raw_inodes_size: usize,
inner: RwMutex<Inner>,
fs: Weak<Ext2>,
}
impl BlockGroup {
/// Loads and constructs a block group.
pub fn load(
group_descriptors_segment: &VmSegment,
idx: usize,
block_device: &dyn BlockDevice,
super_block: &SuperBlock,
fs: Weak<Ext2>,
) -> Result<Self> {
let raw_inodes_size = (super_block.inodes_per_group() as usize) * super_block.inode_size();
let bg_impl = {
let metadata = {
let descriptor = {
// Read the block group descriptor
// TODO: if the main is corrupted, should we load the backup?
let offset = idx * core::mem::size_of::<RawGroupDescriptor>();
let raw_descriptor = group_descriptors_segment
.read_val::<RawGroupDescriptor>(offset)
.unwrap();
GroupDescriptor::from(raw_descriptor)
};
let get_bitmap = |bid: Bid, capacity: usize| -> Result<IdAlloc> {
if capacity > BLOCK_SIZE * 8 {
return_errno_with_message!(Errno::EINVAL, "bad bitmap");
}
let mut buf = vec![0u8; BLOCK_SIZE];
block_device.read_bytes(bid.to_offset(), &mut buf)?;
Ok(IdAlloc::from_bytes_with_capacity(&buf, capacity))
};
let block_bitmap = get_bitmap(
descriptor.block_bitmap_bid,
super_block.blocks_per_group() as usize,
)?;
let inode_bitmap = get_bitmap(
descriptor.inode_bitmap_bid,
super_block.inodes_per_group() as usize,
)?;
GroupMetadata {
descriptor,
block_bitmap,
inode_bitmap,
}
};
Arc::new(BlockGroupImpl {
inode_table_bid: metadata.descriptor.inode_table_bid,
raw_inodes_size,
inner: RwMutex::new(Inner {
metadata: Dirty::new(metadata),
inode_cache: BTreeMap::new(),
}),
fs,
})
};
let raw_inodes_cache =
PageCache::with_capacity(raw_inodes_size, Arc::downgrade(&bg_impl) as _)?;
Ok(Self {
idx,
bg_impl,
raw_inodes_cache,
})
}
/// Finds and returns the inode.
pub fn lookup_inode(&self, inode_idx: u32) -> Result<Arc<Inode>> {
// The fast path
let inner = self.bg_impl.inner.read();
if !inner.metadata.is_inode_allocated(inode_idx) {
return_errno!(Errno::ENOENT);
}
if let Some(inode) = inner.inode_cache.get(&inode_idx) {
return Ok(inode.clone());
}
// The slow path
drop(inner);
let mut inner = self.bg_impl.inner.write();
if !inner.metadata.is_inode_allocated(inode_idx) {
return_errno!(Errno::ENOENT);
}
if let Some(inode) = inner.inode_cache.get(&inode_idx) {
return Ok(inode.clone());
}
// Loads the inode, then inserts it into the inode cache.
let inode = self.load_inode(inode_idx)?;
inner.inode_cache.insert(inode_idx, inode.clone());
Ok(inode)
}
/// Loads an existing inode.
///
/// This method may load the raw inode metadata from block device.
fn load_inode(&self, inode_idx: u32) -> Result<Arc<Inode>> {
let fs = self.fs();
let raw_inode = {
let offset = (inode_idx as usize) * fs.inode_size();
self.raw_inodes_cache
.pages()
.read_val::<RawInode>(offset)
.unwrap()
};
let inode_desc = Dirty::new(InodeDesc::try_from(raw_inode)?);
let ino = inode_idx + self.idx as u32 * fs.inodes_per_group() + 1;
Ok(Inode::new(ino, self.idx, inode_desc, Arc::downgrade(&fs)))
}
/// Inserts the inode into the inode cache.
///
/// # Panic
///
/// If `inode_idx` has not been allocated before, then the method panics.
pub fn insert_cache(&self, inode_idx: u32, inode: Arc<Inode>) {
let mut inner = self.bg_impl.inner.write();
assert!(inner.metadata.is_inode_allocated(inode_idx));
inner.inode_cache.insert(inode_idx, inode);
}
/// Allocates and returns an inode index.
pub fn alloc_inode(&self, is_dir: bool) -> Option<u32> {
// The fast path
if self.bg_impl.inner.read().metadata.free_inodes_count() == 0 {
return None;
}
// The slow path
self.bg_impl.inner.write().metadata.alloc_inode(is_dir)
}
/// Frees the allocated inode idx.
///
/// # Panic
///
/// If `inode_idx` has not been allocated before, then the method panics.
pub fn free_inode(&self, inode_idx: u32, is_dir: bool) {
let mut inner = self.bg_impl.inner.write();
assert!(inner.metadata.is_inode_allocated(inode_idx));
inner.metadata.free_inode(inode_idx, is_dir);
inner.inode_cache.remove(&inode_idx);
}
/// Allocates and returns a block index.
pub fn alloc_block(&self) -> Option<u32> {
// The fast path
if self.bg_impl.inner.read().metadata.free_blocks_count() == 0 {
return None;
}
// The slow path
self.bg_impl.inner.write().metadata.alloc_block()
}
/// Frees the allocated block idx.
///
/// # Panic
///
/// If `block_idx` has not been allocated before, then the method panics.
pub fn free_block(&self, block_idx: u32) {
let mut inner = self.bg_impl.inner.write();
assert!(inner.metadata.is_block_allocated(block_idx));
inner.metadata.free_block(block_idx);
}
/// Writes back the raw inode metadata to the raw inode metadata cache.
pub fn sync_raw_inode(&self, inode_idx: u32, raw_inode: &RawInode) {
let offset = (inode_idx as usize) * self.fs().inode_size();
self.raw_inodes_cache
.pages()
.write_val(offset, raw_inode)
.unwrap();
}
/// Writes back the metadata of this group.
pub fn sync_metadata(&self, super_block: &SuperBlock) -> Result<()> {
if !self.bg_impl.inner.read().metadata.is_dirty() {
return Ok(());
}
let mut inner = self.bg_impl.inner.write();
let fs = self.fs();
// Writes back the descriptor.
let raw_descriptor = RawGroupDescriptor::from(&inner.metadata.descriptor);
self.fs().sync_group_descriptor(self.idx, &raw_descriptor)?;
let mut bio_waiter = BioWaiter::new();
// Writes back the inode bitmap.
let inode_bitmap_bid = inner.metadata.descriptor.inode_bitmap_bid;
bio_waiter.concat(fs.block_device().write_bytes_async(
inode_bitmap_bid.to_offset(),
inner.metadata.inode_bitmap.as_bytes(),
)?);
// Writes back the block bitmap.
let block_bitmap_bid = inner.metadata.descriptor.block_bitmap_bid;
bio_waiter.concat(fs.block_device().write_bytes_async(
block_bitmap_bid.to_offset(),
inner.metadata.block_bitmap.as_bytes(),
)?);
// Waits for the completion of all submitted bios.
bio_waiter.wait().ok_or_else(|| {
Error::with_message(Errno::EIO, "failed to sync metadata of block group")
})?;
inner.metadata.clear_dirty();
Ok(())
}
/// Writes back all of the cached inodes.
///
/// The `sync_all` method of inode may modify the data of this block group,
/// so we should not hold the lock while syncing the inodes.
pub fn sync_all_inodes(&self) -> Result<()> {
// Removes the inodes that is unused from the inode cache.
let unused_inodes: Vec<Arc<Inode>> = self
.bg_impl
.inner
.write()
.inode_cache
.extract_if(|_, inode| Arc::strong_count(inode) == 1)
.map(|(_, inode)| inode)
.collect();
// Writes back the unused inodes.
for inode in unused_inodes.iter() {
inode.sync_all()?;
}
drop(unused_inodes);
// Writes back the remaining inodes in the inode cache.
let remaining_inodes: Vec<Arc<Inode>> = self
.bg_impl
.inner
.read()
.inode_cache
.values()
.cloned()
.collect();
for inode in remaining_inodes.iter() {
inode.sync_all()?;
}
drop(remaining_inodes);
// Writes back the raw inode metadata.
self.raw_inodes_cache
.pages()
.decommit(0..self.bg_impl.raw_inodes_size)?;
Ok(())
}
fn fs(&self) -> Arc<Ext2> {
self.bg_impl.fs.upgrade().unwrap()
}
}
impl Debug for BlockGroup {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("BlockGroup")
.field("idx", &self.idx)
.field("descriptor", &self.bg_impl.inner.read().metadata.descriptor)
.field(
"block_bitmap",
&self.bg_impl.inner.read().metadata.block_bitmap,
)
.field(
"inode_bitmap",
&self.bg_impl.inner.read().metadata.inode_bitmap,
)
.finish()
}
}
impl PageCacheBackend for BlockGroupImpl {
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
let bid = self.inode_table_bid + idx as u64;
self.fs.upgrade().unwrap().read_block(bid, frame)?;
Ok(())
}
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
let bid = self.inode_table_bid + idx as u64;
self.fs.upgrade().unwrap().write_block(bid, frame)?;
Ok(())
}
fn npages(&self) -> usize {
self.raw_inodes_size.div_ceil(BLOCK_SIZE)
}
}
#[derive(Debug)]
struct Inner {
metadata: Dirty<GroupMetadata>,
inode_cache: BTreeMap<u32, Arc<Inode>>,
}
#[derive(Clone, Debug)]
struct GroupMetadata {
descriptor: GroupDescriptor,
block_bitmap: IdAlloc,
inode_bitmap: IdAlloc,
}
impl GroupMetadata {
pub fn is_inode_allocated(&self, inode_idx: u32) -> bool {
self.inode_bitmap.is_allocated(inode_idx as usize)
}
pub fn alloc_inode(&mut self, is_dir: bool) -> Option<u32> {
let Some(inode_idx) = self.inode_bitmap.alloc() else {
return None;
};
self.dec_free_inodes();
if is_dir {
self.inc_dirs();
}
Some(inode_idx as u32)
}
pub fn free_inode(&mut self, inode_idx: u32, is_dir: bool) {
self.inode_bitmap.free(inode_idx as usize);
self.inc_free_inodes();
if is_dir {
self.dec_dirs();
}
}
pub fn is_block_allocated(&self, block_idx: u32) -> bool {
self.block_bitmap.is_allocated(block_idx as usize)
}
pub fn alloc_block(&mut self) -> Option<u32> {
let Some(block_idx) = self.block_bitmap.alloc() else {
return None;
};
self.dec_free_blocks();
Some(block_idx as u32)
}
pub fn free_block(&mut self, block_idx: u32) {
self.block_bitmap.free(block_idx as usize);
self.inc_free_blocks();
}
pub fn free_inodes_count(&self) -> u16 {
self.descriptor.free_inodes_count
}
pub fn free_blocks_count(&self) -> u16 {
self.descriptor.free_blocks_count
}
pub fn inc_free_inodes(&mut self) {
self.descriptor.free_inodes_count += 1;
}
pub fn dec_free_inodes(&mut self) {
debug_assert!(self.descriptor.free_inodes_count > 0);
self.descriptor.free_inodes_count -= 1;
}
pub fn inc_free_blocks(&mut self) {
self.descriptor.free_blocks_count += 1;
}
pub fn dec_free_blocks(&mut self) {
debug_assert!(self.descriptor.free_blocks_count > 0);
self.descriptor.free_blocks_count -= 1;
}
pub fn inc_dirs(&mut self) {
self.descriptor.dirs_count += 1;
}
pub fn dec_dirs(&mut self) {
debug_assert!(self.descriptor.dirs_count > 0);
self.descriptor.dirs_count -= 1;
}
}
/// The in-memory rust block group descriptor.
///
/// The block group descriptor contains information regarding where important data
/// structures for that group are located.
#[derive(Clone, Copy, Debug)]
struct GroupDescriptor {
/// Blocks usage bitmap block
block_bitmap_bid: Bid,
/// Inodes usage bitmap block
inode_bitmap_bid: Bid,
/// Starting block of inode table
inode_table_bid: Bid,
/// Number of free blocks in group
free_blocks_count: u16,
/// Number of free inodes in group
free_inodes_count: u16,
/// Number of directories in group
dirs_count: u16,
}
impl From<RawGroupDescriptor> for GroupDescriptor {
fn from(desc: RawGroupDescriptor) -> Self {
Self {
block_bitmap_bid: Bid::new(desc.block_bitmap as _),
inode_bitmap_bid: Bid::new(desc.inode_bitmap as _),
inode_table_bid: Bid::new(desc.inode_table as _),
free_blocks_count: desc.free_blocks_count,
free_inodes_count: desc.free_inodes_count,
dirs_count: desc.dirs_count,
}
}
}
const_assert!(core::mem::size_of::<RawGroupDescriptor>() == 32);
/// The raw block group descriptor.
///
/// The table starts on the first block following the superblock.
#[repr(C)]
#[derive(Clone, Copy, Debug, Pod)]
pub(super) struct RawGroupDescriptor {
pub block_bitmap: u32,
pub inode_bitmap: u32,
pub inode_table: u32,
pub free_blocks_count: u16,
pub free_inodes_count: u16,
pub dirs_count: u16,
pad: u16,
reserved: [u32; 3],
}
impl From<&GroupDescriptor> for RawGroupDescriptor {
fn from(desc: &GroupDescriptor) -> Self {
Self {
block_bitmap: desc.block_bitmap_bid.to_raw() as _,
inode_bitmap: desc.inode_bitmap_bid.to_raw() as _,
inode_table: desc.inode_table_bid.to_raw() as _,
free_blocks_count: desc.free_blocks_count,
free_inodes_count: desc.free_inodes_count,
dirs_count: desc.dirs_count,
pad: 0u16,
reserved: [0u32; 3],
}
}
}

View File

@ -0,0 +1,56 @@
use bitvec::prelude::BitVec;
/// A blocks hole descriptor implemented by the `BitVec`.
///
/// The true bit implies that the block is a hole, and conversely.
pub(super) struct BlocksHoleDesc(BitVec);
impl BlocksHoleDesc {
/// Constructs a blocks hole descriptor with initial size.
///
/// The `initial_size` usually is the number of blocks for a file.
pub fn new(initial_size: usize) -> Self {
let mut bit_vec = BitVec::with_capacity(initial_size);
bit_vec.resize(initial_size, false);
Self(bit_vec)
}
/// Returns the size.
pub fn size(&self) -> usize {
self.0.len()
}
/// Resizes the blocks hole to a new size.
///
/// If `new_size` is greater than current size, the new blocks are all marked as hole.
pub fn resize(&mut self, new_size: usize) {
self.0.resize(new_size, true);
}
/// Returns if the block `idx` is a hole.
///
/// # Panic
///
/// If the `idx` is out of bounds, this method will panic.
pub fn is_hole(&self, idx: usize) -> bool {
self.0[idx]
}
/// Marks the block `idx` as a hole.
///
/// # Panic
///
/// If the `idx` is out of bounds, this method will panic.
pub fn set(&mut self, idx: usize) {
self.0.set(idx, true);
}
/// Unmarks the block `idx` as a hole.
///
/// # Panic
///
/// If the `idx` is out of bounds, this method will panic.
pub fn unset(&mut self, idx: usize) {
self.0.set(idx, false);
}
}

View File

@ -0,0 +1,321 @@
use super::inode::{FileType, MAX_FNAME_LEN};
use super::prelude::*;
use core::iter::Iterator;
/// The data structure in a directory's data block. It is stored in a linked list.
///
/// Each entry contains the name of the entry, the inode number, the file type,
/// and the distance within the directory file to the next entry.
#[derive(Clone, Debug)]
pub struct DirEntry {
/// The header part.
header: DirEntryHeader,
/// Name of the entry, up to 255 bytes (excluding the null terminator).
name: CStr256,
}
impl DirEntry {
/// Constructs a new `DirEntry` object with the specified inode (`ino`),
/// name (`name`), and file type (`file_type`).
pub(super) fn new(ino: u32, name: &str, file_type: FileType) -> Self {
debug_assert!(name.len() <= MAX_FNAME_LEN);
let record_len = (Self::header_len() + name.len()).align_up(4) as u16;
Self {
header: DirEntryHeader {
ino,
record_len,
name_len: name.len() as u8,
file_type: DirEntryFileType::from(file_type) as _,
},
name: CStr256::from(name),
}
}
/// Constructs a `DirEntry` with the name "." and `self_ino` as its inode.
pub(super) fn self_entry(self_ino: u32) -> Self {
Self::new(self_ino, ".", FileType::Dir)
}
/// Constructs a `DirEntry` with the name ".." and `parent_ino` as its inode.
pub(super) fn parent_entry(parent_ino: u32) -> Self {
Self::new(parent_ino, "..", FileType::Dir)
}
/// Returns a reference to the header.
fn header(&self) -> &DirEntryHeader {
&self.header
}
/// Returns the length of the header.
fn header_len() -> usize {
core::mem::size_of::<DirEntryHeader>()
}
/// Returns the inode number.
pub fn ino(&self) -> u32 {
self.header.ino
}
/// Modifies the inode number.
pub fn set_ino(&mut self, ino: u32) {
self.header.ino = ino;
}
/// Returns the name.
pub fn name(&self) -> &str {
self.name.as_str().unwrap()
}
/// Returns the type.
pub fn type_(&self) -> FileType {
FileType::from(DirEntryFileType::try_from(self.header.file_type).unwrap())
}
/// Returns the distance to the next entry.
pub fn record_len(&self) -> usize {
self.header.record_len as _
}
/// Modifies the distance to the next entry.
pub(super) fn set_record_len(&mut self, record_len: usize) {
debug_assert!(record_len >= self.actual_len());
self.header.record_len = record_len as _;
}
/// Returns the actual length of the current entry.
pub(super) fn actual_len(&self) -> usize {
(Self::header_len() + self.name.len()).align_up(4)
}
/// Returns the length of the gap between the current entry and the next entry.
pub(super) fn gap_len(&self) -> usize {
self.record_len() - self.actual_len()
}
}
/// The header of `DirEntry`.
#[repr(C)]
#[derive(Clone, Copy, Debug, Pod)]
struct DirEntryHeader {
/// Inode number
ino: u32,
/// Directory entry length
record_len: u16,
/// Name Length
name_len: u8,
/// Type indicator
file_type: u8,
}
/// The type indicator in the `DirEntry`.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)]
enum DirEntryFileType {
Unknown = 0,
File = 1,
Dir = 2,
Char = 3,
Block = 4,
Fifo = 5,
Socket = 6,
Symlink = 7,
}
impl From<FileType> for DirEntryFileType {
fn from(file_type: FileType) -> Self {
match file_type {
FileType::Fifo => Self::Fifo,
FileType::Char => Self::Char,
FileType::Dir => Self::Dir,
FileType::Block => Self::Block,
FileType::File => Self::File,
FileType::Symlink => Self::Symlink,
FileType::Socket => Self::Socket,
}
}
}
impl From<DirEntryFileType> for FileType {
fn from(file_type: DirEntryFileType) -> Self {
match file_type {
DirEntryFileType::Fifo => Self::Fifo,
DirEntryFileType::Char => Self::Char,
DirEntryFileType::Dir => Self::Dir,
DirEntryFileType::Block => Self::Block,
DirEntryFileType::File => Self::File,
DirEntryFileType::Symlink => Self::Symlink,
DirEntryFileType::Socket => Self::Socket,
DirEntryFileType::Unknown => panic!("unknown file type"),
}
}
}
/// A reader for reading `DirEntry` from the page cache.
pub struct DirEntryReader<'a> {
page_cache: &'a PageCache,
offset: usize,
}
impl<'a> DirEntryReader<'a> {
/// Constructs a reader with the given page cache and offset.
pub(super) fn new(page_cache: &'a PageCache, from_offset: usize) -> Self {
Self {
page_cache,
offset: from_offset,
}
}
/// Reads one `DirEntry` from the current offset.
pub fn read_entry(&mut self) -> Result<DirEntry> {
let header = self
.page_cache
.pages()
.read_val::<DirEntryHeader>(self.offset)?;
if header.ino == 0 {
return_errno!(Errno::ENOENT);
}
let mut name = vec![0u8; header.name_len as _];
self.page_cache
.pages()
.read_bytes(self.offset + DirEntry::header_len(), &mut name)?;
let entry = DirEntry {
header,
name: CStr256::from(name.as_slice()),
};
self.offset += entry.record_len();
Ok(entry)
}
}
impl<'a> Iterator for DirEntryReader<'a> {
type Item = (usize, DirEntry);
fn next(&mut self) -> Option<Self::Item> {
let offset = self.offset;
let entry = match self.read_entry() {
Ok(entry) => entry,
Err(_) => {
return None;
}
};
Some((offset, entry))
}
}
/// A writer for modifying `DirEntry` of the page cache.
pub struct DirEntryWriter<'a> {
page_cache: &'a PageCache,
offset: usize,
}
impl<'a> DirEntryWriter<'a> {
/// Constructs a writer with the given page cache and offset.
pub(super) fn new(page_cache: &'a PageCache, from_offset: usize) -> Self {
Self {
page_cache,
offset: from_offset,
}
}
/// Writes a `DirEntry` at the current offset.
pub fn write_entry(&mut self, entry: &DirEntry) -> Result<()> {
self.page_cache
.pages()
.write_val(self.offset, entry.header())?;
self.page_cache.pages().write_bytes(
self.offset + DirEntry::header_len(),
entry.name().as_bytes(),
)?;
self.offset += entry.record_len();
Ok(())
}
/// Appends a new `DirEntry` starting from the current offset.
///
/// If there is a gap between existing entries, inserts the new entry into the gap
/// If there is no available space, expands the size and appends the new entry at the end.
pub fn append_entry(&mut self, mut new_entry: DirEntry) -> Result<()> {
let Some((offset, mut entry)) = DirEntryReader::new(self.page_cache, self.offset)
.find(|(_, entry)| entry.gap_len() >= new_entry.record_len())
else {
// Resize and append it at the new block.
let old_size = self.page_cache.pages().size();
let new_size = old_size + BLOCK_SIZE;
self.page_cache.pages().resize(new_size)?;
new_entry.set_record_len(BLOCK_SIZE);
self.offset = old_size;
self.write_entry(&new_entry)?;
return Ok(());
};
// Write in the gap between existing entries.
new_entry.set_record_len(entry.gap_len());
entry.set_record_len(entry.actual_len());
self.offset = offset;
self.write_entry(&entry)?;
self.write_entry(&new_entry)?;
Ok(())
}
/// Removes and returns an existing `DirEntry` indicated by `name`.
pub fn remove_entry(&mut self, name: &str) -> Result<DirEntry> {
let self_entry_record_len = DirEntry::self_entry(0).record_len();
let reader = DirEntryReader::new(self.page_cache, 0);
let next_reader = DirEntryReader::new(self.page_cache, self_entry_record_len);
let Some(((pre_offset, mut pre_entry), (offset, entry))) = reader
.zip(next_reader)
.find(|((offset, _), (_, dir_entry))| dir_entry.name() == name)
else {
return_errno!(Errno::ENOENT);
};
if DirEntryReader::new(self.page_cache, offset)
.next()
.is_none()
&& Bid::from_offset(pre_offset) != Bid::from_offset(offset)
{
// Shrink the size.
let new_size = pre_offset.align_up(BLOCK_SIZE);
self.page_cache.pages().resize(new_size)?;
pre_entry.set_record_len(new_size - pre_offset);
self.offset = pre_offset;
self.write_entry(&pre_entry)?;
} else {
// Update the previous entry.
pre_entry.set_record_len(pre_entry.record_len() + entry.record_len());
self.offset = pre_offset;
self.write_entry(&pre_entry)?;
}
Ok(entry)
}
/// Renames the `DirEntry` from `old_name` to the `new_name` from the current offset.
///
/// It will moves the `DirEntry` to another position,
/// if the record length is not big enough.
pub fn rename_entry(&mut self, old_name: &str, new_name: &str) -> Result<()> {
let (offset, entry) = DirEntryReader::new(self.page_cache, self.offset)
.find(|(offset, entry)| entry.name() == old_name)
.ok_or(Error::new(Errno::ENOENT))?;
let mut new_entry = DirEntry::new(entry.ino(), new_name, entry.type_());
if new_entry.record_len() <= entry.record_len() {
// Just rename the entry.
new_entry.set_record_len(entry.record_len());
self.offset = offset;
self.write_entry(&new_entry)?;
} else {
// Move to another position.
self.remove_entry(old_name)?;
self.offset = 0;
self.append_entry(new_entry)?;
}
Ok(())
}
}

View File

@ -0,0 +1,366 @@
use super::block_group::{BlockGroup, RawGroupDescriptor};
use super::inode::{FilePerm, FileType, Inode, InodeDesc, RawInode};
use super::prelude::*;
use super::super_block::{RawSuperBlock, SuperBlock, SUPER_BLOCK_OFFSET};
/// The root inode number.
const ROOT_INO: u32 = 2;
/// The Ext2 filesystem.
#[derive(Debug)]
pub struct Ext2 {
block_device: Arc<dyn BlockDevice>,
super_block: RwMutex<Dirty<SuperBlock>>,
block_groups: Vec<BlockGroup>,
inodes_per_group: u32,
blocks_per_group: u32,
inode_size: usize,
block_size: usize,
group_descriptors_segment: VmSegment,
self_ref: Weak<Self>,
}
impl Ext2 {
/// Opens and loads an Ext2 from the `block_device`.
pub fn open(block_device: Arc<dyn BlockDevice>) -> Result<Arc<Self>> {
// Load the superblock
// TODO: if the main superblock is corrupted, should we load the backup?
let super_block = {
let raw_super_block = block_device.read_val::<RawSuperBlock>(SUPER_BLOCK_OFFSET)?;
SuperBlock::try_from(raw_super_block)?
};
assert!(super_block.block_size() == BLOCK_SIZE);
let group_descriptors_segment = {
let npages = ((super_block.block_groups_count() as usize)
* core::mem::size_of::<RawGroupDescriptor>())
.div_ceil(BLOCK_SIZE);
let segment = VmAllocOptions::new(npages)
.uninit(true)
.is_contiguous(true)
.alloc_contiguous()?;
match block_device.read_blocks_sync(super_block.group_descriptors_bid(0), &segment)? {
BioStatus::Complete => (),
err_status => {
return Err(Error::from(err_status));
}
}
segment
};
// Load the block groups information
let load_block_groups = |fs: Weak<Ext2>,
block_device: &dyn BlockDevice,
group_descriptors_segment: &VmSegment|
-> Result<Vec<BlockGroup>> {
let block_groups_count = super_block.block_groups_count() as usize;
let mut block_groups = Vec::with_capacity(block_groups_count);
for idx in 0..block_groups_count {
let block_group = BlockGroup::load(
group_descriptors_segment,
idx,
block_device,
&super_block,
fs.clone(),
)?;
block_groups.push(block_group);
}
Ok(block_groups)
};
let ext2 = Arc::new_cyclic(|weak_ref| Self {
inodes_per_group: super_block.inodes_per_group(),
blocks_per_group: super_block.blocks_per_group(),
inode_size: super_block.inode_size(),
block_size: super_block.block_size(),
block_groups: load_block_groups(
weak_ref.clone(),
block_device.as_ref(),
&group_descriptors_segment,
)
.unwrap(),
block_device,
super_block: RwMutex::new(Dirty::new(super_block)),
group_descriptors_segment,
self_ref: weak_ref.clone(),
});
Ok(ext2)
}
/// Returns the block device.
pub fn block_device(&self) -> &dyn BlockDevice {
self.block_device.as_ref()
}
/// Returns the size of block.
pub fn block_size(&self) -> usize {
self.block_size
}
/// Returns the size of inode.
pub fn inode_size(&self) -> usize {
self.inode_size
}
/// Returns the number of inodes in each block group.
pub fn inodes_per_group(&self) -> u32 {
self.inodes_per_group
}
/// Returns the number of blocks in each block group.
pub fn blocks_per_group(&self) -> u32 {
self.blocks_per_group
}
/// Returns the super block.
pub fn super_block(&self) -> RwMutexReadGuard<'_, Dirty<SuperBlock>> {
self.super_block.read()
}
/// Returns the root inode.
pub fn root_inode(&self) -> Result<Arc<Inode>> {
self.lookup_inode(ROOT_INO)
}
/// Finds and returns the inode by `ino`.
pub(super) fn lookup_inode(&self, ino: u32) -> Result<Arc<Inode>> {
let (_, block_group) = self.block_group_of_ino(ino)?;
let inode_idx = self.inode_idx(ino);
block_group.lookup_inode(inode_idx)
}
/// Creates a new inode.
pub(super) fn create_inode(
&self,
dir_block_group_idx: usize,
file_type: FileType,
file_perm: FilePerm,
) -> Result<Arc<Inode>> {
let (block_group_idx, ino) =
self.alloc_ino(dir_block_group_idx, file_type == FileType::Dir)?;
let inode = {
let inode_desc = InodeDesc::new(file_type, file_perm);
Inode::new(ino, block_group_idx, inode_desc, self.self_ref.clone())
};
let block_group = &self.block_groups[block_group_idx];
block_group.insert_cache(self.inode_idx(ino), inode.clone());
Ok(inode)
}
/// Allocates a new inode number, internally used by `new_inode`.
///
/// Attempts to allocate from the `dir_block_group_idx` group first.
/// If allocation is not possible from this group, then search the remaining groups.
fn alloc_ino(&self, dir_block_group_idx: usize, is_dir: bool) -> Result<(usize, u32)> {
let mut block_group_idx = dir_block_group_idx;
if block_group_idx >= self.block_groups.len() {
return_errno_with_message!(Errno::EINVAL, "invalid block group idx");
}
for _ in 0..self.block_groups.len() {
if block_group_idx >= self.block_groups.len() {
block_group_idx = 0;
}
let block_group = &self.block_groups[block_group_idx];
if let Some(inode_idx) = block_group.alloc_inode(is_dir) {
let ino = block_group_idx as u32 * self.inodes_per_group + inode_idx + 1;
self.super_block.write().dec_free_inodes();
return Ok((block_group_idx, ino));
}
block_group_idx += 1;
}
return_errno_with_message!(Errno::ENOSPC, "no space on device");
}
/// Frees an inode.
pub(super) fn free_inode(&self, ino: u32, is_dir: bool) -> Result<()> {
let (_, block_group) = self.block_group_of_ino(ino)?;
let inode_idx = self.inode_idx(ino);
// In order to prevent value underflow, it is necessary to increment
// the free inode counter prior to freeing the inode.
self.super_block.write().inc_free_inodes();
block_group.free_inode(inode_idx, is_dir);
Ok(())
}
/// Writes back the metadata of inode.
pub(super) fn sync_inode(&self, ino: u32, inode: &InodeDesc) -> Result<()> {
let (_, block_group) = self.block_group_of_ino(ino)?;
let inode_idx = self.inode_idx(ino);
block_group.sync_raw_inode(inode_idx, &RawInode::from(inode));
Ok(())
}
/// Writes back the block group descriptor to the descriptors table.
pub(super) fn sync_group_descriptor(
&self,
block_group_idx: usize,
raw_descriptor: &RawGroupDescriptor,
) -> Result<()> {
let offset = block_group_idx * core::mem::size_of::<RawGroupDescriptor>();
self.group_descriptors_segment
.write_val(offset, raw_descriptor)?;
Ok(())
}
/// Allocates a new block.
///
/// Attempts to allocate from the `block_group_idx` group first.
/// If allocation is not possible from this group, then search the remaining groups.
pub(super) fn alloc_block(&self, block_group_idx: usize) -> Result<Bid> {
let mut block_group_idx = block_group_idx;
if block_group_idx >= self.block_groups.len() {
return_errno_with_message!(Errno::EINVAL, "invalid block group idx");
}
for _ in 0..self.block_groups.len() {
if block_group_idx >= self.block_groups.len() {
block_group_idx = 0;
}
let block_group = &self.block_groups[block_group_idx];
if let Some(block_idx) = block_group.alloc_block() {
let bid = block_group_idx as u32 * self.blocks_per_group + block_idx;
self.super_block.write().dec_free_blocks();
return Ok(Bid::new(bid as _));
}
block_group_idx += 1;
}
return_errno_with_message!(Errno::ENOSPC, "no space on device");
}
/// Frees a block.
pub(super) fn free_block(&self, bid: Bid) -> Result<()> {
let (_, block_group) = self.block_group_of_bid(bid)?;
let block_idx = self.block_idx(bid);
// In order to prevent value underflow, it is necessary to increment
// the free block counter prior to freeing the block.
self.super_block.write().inc_free_blocks();
block_group.free_block(block_idx);
Ok(())
}
/// Reads contiguous blocks starting from the `bid` synchronously.
pub(super) fn read_blocks(&self, bid: Bid, segment: &VmSegment) -> Result<()> {
let status = self.block_device.read_blocks_sync(bid, segment)?;
match status {
BioStatus::Complete => Ok(()),
err_status => Err(Error::from(err_status)),
}
}
/// Reads one block indicated by the `bid` synchronously.
pub(super) fn read_block(&self, bid: Bid, frame: &VmFrame) -> Result<()> {
let status = self.block_device.read_block_sync(bid, frame)?;
match status {
BioStatus::Complete => Ok(()),
err_status => Err(Error::from(err_status)),
}
}
/// Writes contiguous blocks starting from the `bid` synchronously.
pub(super) fn write_blocks(&self, bid: Bid, segment: &VmSegment) -> Result<()> {
let status = self.block_device.write_blocks_sync(bid, segment)?;
match status {
BioStatus::Complete => Ok(()),
err_status => Err(Error::from(err_status)),
}
}
/// Writes one block indicated by the `bid` synchronously.
pub(super) fn write_block(&self, bid: Bid, frame: &VmFrame) -> Result<()> {
let status = self.block_device.write_block_sync(bid, frame)?;
match status {
BioStatus::Complete => Ok(()),
err_status => Err(Error::from(err_status)),
}
}
/// Writes back the metadata to the block device.
pub fn sync_metadata(&self) -> Result<()> {
// If the superblock is clean, the block groups must be clean.
if !self.super_block.read().is_dirty() {
return Ok(());
}
let mut super_block = self.super_block.write();
// Writes back the metadata of block groups
for block_group in &self.block_groups {
block_group.sync_metadata(&super_block)?;
}
let mut bio_waiter = BioWaiter::new();
// Writes back the main superblock and group descriptor table.
let raw_super_block = RawSuperBlock::from((*super_block).deref());
bio_waiter.concat(
self.block_device
.write_bytes_async(SUPER_BLOCK_OFFSET, raw_super_block.as_bytes())?,
);
bio_waiter.concat(self.block_device.write_blocks(
super_block.group_descriptors_bid(0),
&self.group_descriptors_segment,
)?);
// Writes back the backups of superblock and group descriptor table.
let mut raw_super_block_backup = raw_super_block;
for idx in 1..super_block.block_groups_count() {
if super_block.is_backup_group(idx as usize) {
raw_super_block_backup.block_group_idx = idx as u16;
bio_waiter.concat(self.block_device.write_bytes_async(
super_block.bid(idx as usize).to_offset(),
raw_super_block_backup.as_bytes(),
)?);
bio_waiter.concat(self.block_device.write_blocks(
super_block.group_descriptors_bid(idx as usize),
&self.group_descriptors_segment,
)?);
}
}
// Waits for the completion of all submitted bios.
bio_waiter
.wait()
.ok_or_else(|| Error::with_message(Errno::EIO, "failed to sync metadata of fs"))?;
// Reset to clean.
super_block.clear_dirty();
Ok(())
}
/// Writes back all the cached inodes to the block device.
pub fn sync_all_inodes(&self) -> Result<()> {
for block_group in &self.block_groups {
block_group.sync_all_inodes()?;
}
Ok(())
}
#[inline]
fn block_group_of_bid(&self, bid: Bid) -> Result<(usize, &BlockGroup)> {
let block_group_idx = (bid.to_raw() / (self.blocks_per_group as u64)) as usize;
if block_group_idx >= self.block_groups.len() {
return_errno!(Errno::ENOENT);
}
Ok((block_group_idx, &self.block_groups[block_group_idx]))
}
#[inline]
fn block_group_of_ino(&self, ino: u32) -> Result<(usize, &BlockGroup)> {
let block_group_idx = ((ino - 1) / self.inodes_per_group) as usize;
if block_group_idx >= self.block_groups.len() {
return_errno!(Errno::ENOENT);
}
Ok((block_group_idx, &self.block_groups[block_group_idx]))
}
#[inline]
fn inode_idx(&self, ino: u32) -> u32 {
(ino - 1) % self.inodes_per_group
}
#[inline]
fn block_idx(&self, bid: Bid) -> u32 {
(bid.to_raw() as u32) % self.blocks_per_group
}
}

View File

@ -0,0 +1,43 @@
use crate::fs::ext2::{utils::Dirty, Ext2, SuperBlock as Ext2SuperBlock, MAGIC_NUM as EXT2_MAGIC};
use crate::fs::utils::{FileSystem, FsFlags, Inode, SuperBlock, NAME_MAX};
use crate::prelude::*;
use aster_frame::sync::RwMutexReadGuard;
impl FileSystem for Ext2 {
fn sync(&self) -> Result<()> {
self.sync_all_inodes()?;
self.sync_metadata()?;
Ok(())
}
fn root_inode(&self) -> Arc<dyn Inode> {
self.root_inode().unwrap()
}
fn sb(&self) -> SuperBlock {
SuperBlock::from(self.super_block())
}
fn flags(&self) -> FsFlags {
FsFlags::empty()
}
}
impl From<RwMutexReadGuard<'_, Dirty<Ext2SuperBlock>>> for SuperBlock {
fn from(ext2_sb: RwMutexReadGuard<Dirty<Ext2SuperBlock>>) -> Self {
Self {
magic: EXT2_MAGIC as _,
bsize: ext2_sb.block_size(),
blocks: ext2_sb.total_blocks() as _,
bfree: ext2_sb.free_blocks() as _,
bavail: ext2_sb.free_blocks() as _,
files: ext2_sb.total_inodes() as _,
ffree: ext2_sb.free_inodes() as _,
fsid: 0, // TODO
namelen: NAME_MAX,
frsize: ext2_sb.fragment_size(),
flags: 0, // TODO
}
}
}

View File

@ -0,0 +1,175 @@
use crate::fs::device::Device;
use crate::fs::ext2::{FilePerm, FileType, Inode as Ext2Inode};
use crate::fs::utils::{
DirentVisitor, FileSystem, Inode, InodeMode, InodeType, IoctlCmd, Metadata,
};
use crate::prelude::*;
use crate::vm::vmo::Vmo;
use aster_rights::Full;
use core::time::Duration;
impl Inode for Ext2Inode {
fn len(&self) -> usize {
self.file_size() as _
}
fn resize(&self, new_size: usize) -> Result<()> {
self.resize(new_size)
}
fn metadata(&self) -> Metadata {
Metadata {
dev: 0, // TODO: ID of block device
ino: self.ino() as _,
size: self.file_size() as _,
blk_size: self.fs().super_block().block_size(),
blocks: self.blocks_count() as _,
atime: self.atime(),
mtime: self.mtime(),
ctime: self.ctime(),
type_: InodeType::from(self.file_type()),
mode: InodeMode::from(self.file_perm()),
nlinks: self.hard_links() as _,
uid: self.uid() as _,
gid: self.gid() as _,
rdev: self.device_id(),
}
}
fn atime(&self) -> Duration {
self.atime()
}
fn set_atime(&self, time: Duration) {
self.set_atime(time)
}
fn mtime(&self) -> Duration {
self.mtime()
}
fn set_mtime(&self, time: Duration) {
self.set_mtime(time)
}
fn ino(&self) -> u64 {
self.ino() as _
}
fn type_(&self) -> InodeType {
InodeType::from(self.file_type())
}
fn mode(&self) -> InodeMode {
InodeMode::from(self.file_perm())
}
fn set_mode(&self, mode: InodeMode) {
self.set_file_perm(mode.into());
}
fn page_cache(&self) -> Option<Vmo<Full>> {
Some(self.page_cache())
}
fn read_at(&self, offset: usize, buf: &mut [u8]) -> Result<usize> {
self.read_at(offset, buf)
}
fn read_direct_at(&self, offset: usize, buf: &mut [u8]) -> Result<usize> {
self.read_direct_at(offset, buf)
}
fn write_at(&self, offset: usize, buf: &[u8]) -> Result<usize> {
self.write_at(offset, buf)
}
fn write_direct_at(&self, offset: usize, buf: &[u8]) -> Result<usize> {
self.write_direct_at(offset, buf)
}
fn create(&self, name: &str, type_: InodeType, mode: InodeMode) -> Result<Arc<dyn Inode>> {
Ok(self.create(name, type_.into(), mode.into())?)
}
fn mknod(&self, name: &str, mode: InodeMode, dev: Arc<dyn Device>) -> Result<Arc<dyn Inode>> {
let inode = self.create(name, InodeType::from(dev.type_()).into(), mode.into())?;
inode.set_device_id(dev.id().into()).unwrap();
Ok(inode)
}
fn lookup(&self, name: &str) -> Result<Arc<dyn Inode>> {
Ok(self.lookup(name)?)
}
fn readdir_at(&self, offset: usize, visitor: &mut dyn DirentVisitor) -> Result<usize> {
self.readdir_at(offset, visitor)
}
fn link(&self, old: &Arc<dyn Inode>, name: &str) -> Result<()> {
let old = old
.downcast_ref::<Ext2Inode>()
.ok_or_else(|| Error::with_message(Errno::EXDEV, "not same fs"))?;
self.link(old, name)
}
fn unlink(&self, name: &str) -> Result<()> {
self.unlink(name)
}
fn rmdir(&self, name: &str) -> Result<()> {
self.rmdir(name)
}
fn rename(&self, old_name: &str, target: &Arc<dyn Inode>, new_name: &str) -> Result<()> {
let target = target
.downcast_ref::<Ext2Inode>()
.ok_or_else(|| Error::with_message(Errno::EXDEV, "not same fs"))?;
self.rename(old_name, target, new_name)
}
fn read_link(&self) -> Result<String> {
self.read_link()
}
fn write_link(&self, target: &str) -> Result<()> {
self.write_link(target)
}
fn ioctl(&self, cmd: IoctlCmd, arg: usize) -> Result<i32> {
Err(Error::new(Errno::EINVAL))
}
fn sync(&self) -> Result<()> {
self.sync_all()
}
fn fs(&self) -> Arc<dyn FileSystem> {
self.fs()
}
}
impl From<FilePerm> for InodeMode {
fn from(perm: FilePerm) -> Self {
Self::from_bits_truncate(perm.bits() as _)
}
}
impl From<InodeMode> for FilePerm {
fn from(mode: InodeMode) -> Self {
Self::from_bits_truncate(mode.bits() as _)
}
}
impl From<FileType> for InodeType {
fn from(type_: FileType) -> Self {
Self::try_from(type_ as u32).unwrap()
}
}
impl From<InodeType> for FileType {
fn from(type_: InodeType) -> Self {
Self::try_from(type_ as u16).unwrap()
}
}

View File

@ -0,0 +1,2 @@
mod fs;
mod inode;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,50 @@
//! A safe Rust Ext2 filesystem.
//!
//! The Second Extended File System(Ext2) is a major rewrite of the Ext filesystem.
//! It is the predominant filesystem in use by Linux from the early 1990s to the early 2000s.
//! The structures of Ext3 and Ext4 are based on Ext2 and add some additional options
//! such as journaling.
//!
//! The features of this version of Ext2 are as follows:
//! 1. No unsafe Rust. The filesystem is written is Rust without any unsafe code,
//! ensuring that there are no memory safety issues in the code.
//! 2. Deep integration with PageCache. The data and metadata of the filesystem are
//! stored in PageCache, which accelerates the performance of data access.
//! 3. Compatible with queue-based block device. The filesystem can submits multiple
//! BIO requests to be block device at once, thereby enhancing I/O performance.
//!
//! # Example
//!
//! ```no_run
//! // Opens an Ext2 from the block device.
//! let ext2 = Ext2::open(block_device)?;
//! // Lookup the root inode.
//! let root = ext2.root_inode()?;
//! // Create a file inside root directory.
//! let file = root.create("file", FileType::File, FilePerm::from_bits_truncate(0o666))?;
//! // Write data into the file.
//! const WRITE_DATA: &[u8] = b"Hello, World";
//! let len = file.write_at(0, WRITE_DATA)?;
//! assert!(len == WRITE_DATA.len());
//! ```
//!
//! # Limitation
//!
//! Here we summarizes the features that need to be implemented in the future.
//! 1. Supports large file.
//! 2. Supports merging small read/write operations.
//! 3. Handles the intermediate failure status correctly.
pub use fs::Ext2;
pub use inode::{FilePerm, FileType, Inode};
pub use super_block::{SuperBlock, MAGIC_NUM};
mod block_group;
mod blocks_hole;
mod dir;
mod fs;
mod impl_for_vfs;
mod inode;
mod prelude;
mod super_block;
mod utils;

View File

@ -0,0 +1,23 @@
pub(super) use super::utils::{Dirty, IsPowerOf};
pub(super) use crate::fs::utils::{
CStr256, DirentVisitor, InodeType, PageCache, PageCacheBackend, Str16, Str64,
};
pub(super) use crate::prelude::*;
pub(super) use crate::time::UnixTime;
pub(super) use crate::vm::vmo::Vmo;
pub(super) use align_ext::AlignExt;
pub(super) use aster_block::{
bio::{BioStatus, BioWaiter},
id::Bid,
BlockDevice, BLOCK_SIZE,
};
pub(super) use aster_frame::sync::{RwMutex, RwMutexReadGuard};
pub(super) use aster_frame::vm::VmAllocOptions;
pub(super) use aster_frame::vm::VmIo;
pub(super) use aster_frame::vm::{VmFrame, VmSegment};
pub(super) use aster_rights::Full;
pub(super) use core::ops::{Deref, DerefMut};
pub(super) use core::time::Duration;
pub(super) use static_assertions::const_assert;

View File

@ -0,0 +1,542 @@
use super::inode::RawInode;
use super::prelude::*;
/// The magic number of Ext2.
pub const MAGIC_NUM: u16 = 0xef53;
/// The main superblock is located at byte 1024 from the beginning of the device.
pub const SUPER_BLOCK_OFFSET: usize = 1024;
const SUPER_BLOCK_SIZE: usize = 1024;
/// The in-memory rust superblock.
///
/// It contains all information about the layout of the Ext2.
#[derive(Clone, Copy, Debug)]
pub struct SuperBlock {
/// Total number of inodes.
inodes_count: u32,
/// Total number of blocks.
blocks_count: u32,
/// Total number of reserved blocks.
reserved_blocks_count: u32,
/// Total number of free blocks.
free_blocks_count: u32,
/// Total number of free inodes.
free_inodes_count: u32,
/// First data block.
first_data_block: Bid,
/// Block size.
block_size: usize,
/// Fragment size.
frag_size: usize,
/// Number of blocks in each block group.
blocks_per_group: u32,
/// Number of fragments in each block group.
frags_per_group: u32,
/// Number of inodes in each block group.
inodes_per_group: u32,
/// Mount time.
mtime: UnixTime,
/// Write time.
wtime: UnixTime,
/// Mount count.
mnt_count: u16,
/// Maximal mount count.
max_mnt_count: u16,
/// Magic signature.
magic: u16,
/// Filesystem state.
state: FsState,
/// Behaviour when detecting errors.
errors_behaviour: ErrorsBehaviour,
/// Time of last check.
last_check_time: UnixTime,
/// Interval between checks.
check_interval: Duration,
/// Creator OS ID.
creator_os: OsId,
/// Revision level.
rev_level: RevLevel,
/// Default uid for reserved blocks.
def_resuid: u32,
/// Default gid for reserved blocks.
def_resgid: u32,
//
// These fields are valid for RevLevel::Dynamic only.
//
/// First non-reserved inode number.
first_ino: u32,
/// Size of inode structure.
inode_size: usize,
/// Block group that this superblock is part of (if backup copy).
block_group_idx: usize,
/// Compatible feature set.
feature_compat: FeatureCompatSet,
/// Incompatible feature set.
feature_incompat: FeatureInCompatSet,
/// Readonly-compatible feature set.
feature_ro_compat: FeatureRoCompatSet,
/// 128-bit uuid for volume.
uuid: [u8; 16],
/// Volume name.
volume_name: Str16,
/// Directory where last mounted.
last_mounted_dir: Str64,
///
/// This fields are valid if the FeatureCompatSet::DIR_PREALLOC is set.
///
/// Number of blocks to preallocate for files.
prealloc_file_blocks: u8,
/// Number of blocks to preallocate for directories.
prealloc_dir_blocks: u8,
}
impl TryFrom<RawSuperBlock> for SuperBlock {
type Error = crate::error::Error;
fn try_from(sb: RawSuperBlock) -> Result<Self> {
Ok(Self {
inodes_count: sb.inodes_count,
blocks_count: sb.blocks_count,
reserved_blocks_count: sb.reserved_blocks_count,
free_blocks_count: sb.free_blocks_count,
free_inodes_count: sb.free_inodes_count,
first_data_block: Bid::new(sb.first_data_block as _),
block_size: 1024 << sb.log_block_size,
frag_size: 1024 << sb.log_frag_size,
blocks_per_group: sb.blocks_per_group,
frags_per_group: sb.frags_per_group,
inodes_per_group: sb.inodes_per_group,
mtime: sb.mtime,
wtime: sb.wtime,
mnt_count: sb.mnt_count,
max_mnt_count: sb.max_mnt_count,
magic: {
if sb.magic != MAGIC_NUM {
return_errno_with_message!(Errno::EINVAL, "bad ext2 magic number");
}
MAGIC_NUM
},
state: {
let state = FsState::try_from(sb.state)
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid fs state"))?;
if state == FsState::Corrupted {
return_errno_with_message!(Errno::EUCLEAN, "fs is corrupted");
}
state
},
errors_behaviour: ErrorsBehaviour::try_from(sb.errors)
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid errors behaviour"))?,
last_check_time: sb.last_check_time,
check_interval: Duration::from_secs(sb.check_interval as _),
creator_os: {
let os_id = OsId::try_from(sb.creator_os)
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid creater os"))?;
if os_id != OsId::Linux {
return_errno_with_message!(Errno::EINVAL, "not supported os id");
}
OsId::Linux
},
rev_level: {
let rev_level = RevLevel::try_from(sb.rev_level)
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid revision level"))?;
if rev_level != RevLevel::Dynamic {
return_errno_with_message!(Errno::EINVAL, "not supported rev level");
}
RevLevel::Dynamic
},
def_resuid: sb.def_resuid as _,
def_resgid: sb.def_resgid as _,
first_ino: sb.first_ino,
inode_size: {
let inode_size = sb.inode_size as _;
if inode_size < core::mem::size_of::<RawInode>() {
return_errno_with_message!(Errno::EINVAL, "inode size is too small");
}
inode_size
},
block_group_idx: sb.block_group_idx as _,
feature_compat: FeatureCompatSet::from_bits(sb.feature_compat).ok_or(
Error::with_message(Errno::EINVAL, "invalid feature compat set"),
)?,
feature_incompat: FeatureInCompatSet::from_bits(sb.feature_incompat).ok_or(
Error::with_message(Errno::EINVAL, "invalid feature incompat set"),
)?,
feature_ro_compat: FeatureRoCompatSet::from_bits(sb.feature_ro_compat).ok_or(
Error::with_message(Errno::EINVAL, "invalid feature ro compat set"),
)?,
uuid: sb.uuid,
volume_name: sb.volume_name,
last_mounted_dir: sb.last_mounted_dir,
prealloc_file_blocks: sb.prealloc_file_blocks,
prealloc_dir_blocks: sb.prealloc_dir_blocks,
})
}
}
impl SuperBlock {
/// Returns the block size.
pub fn block_size(&self) -> usize {
self.block_size
}
/// Returns the size of inode structure.
pub fn inode_size(&self) -> usize {
self.inode_size
}
/// Returns the fragment size.
pub fn fragment_size(&self) -> usize {
self.frag_size
}
/// Returns total number of inodes.
pub fn total_inodes(&self) -> u32 {
self.inodes_count
}
/// Returns total number of blocks.
pub fn total_blocks(&self) -> u32 {
self.blocks_count
}
/// Returns the number of blocks in each block group.
pub fn blocks_per_group(&self) -> u32 {
self.blocks_per_group
}
/// Returns the number of inodes in each block group.
pub fn inodes_per_group(&self) -> u32 {
self.inodes_per_group
}
/// Returns the number of block groups.
pub fn block_groups_count(&self) -> u32 {
self.blocks_count / self.blocks_per_group
}
/// Returns the filesystem state.
pub fn state(&self) -> FsState {
self.state
}
/// Returns the revision level.
pub fn rev_level(&self) -> RevLevel {
self.rev_level
}
/// Returns the compatible feature set.
pub fn feature_compat(&self) -> FeatureCompatSet {
self.feature_compat
}
/// Returns the incompatible feature set.
pub fn feature_incompat(&self) -> FeatureInCompatSet {
self.feature_incompat
}
/// Returns the readonly-compatible feature set.
pub fn feature_ro_compat(&self) -> FeatureRoCompatSet {
self.feature_ro_compat
}
/// Returns the number of free blocks.
pub fn free_blocks(&self) -> u32 {
self.free_blocks_count
}
/// Increase the number of free blocks.
pub(super) fn inc_free_blocks(&mut self) {
self.free_blocks_count += 1;
}
/// Decrease the number of free blocks.
pub(super) fn dec_free_blocks(&mut self) {
debug_assert!(self.free_blocks_count > 0);
self.free_blocks_count -= 1;
}
/// Returns the number of free inodes.
pub fn free_inodes(&self) -> u32 {
self.free_inodes_count
}
/// Increase the number of free inodes.
pub(super) fn inc_free_inodes(&mut self) {
self.free_inodes_count += 1;
}
/// Decrease the number of free inodes.
pub(super) fn dec_free_inodes(&mut self) {
debug_assert!(self.free_inodes_count > 0);
self.free_inodes_count -= 1;
}
/// Checks if the block group will backup the super block.
pub(super) fn is_backup_group(&self, block_group_idx: usize) -> bool {
if block_group_idx == 0 {
false
} else if self
.feature_ro_compat
.contains(FeatureRoCompatSet::SPARSE_SUPER)
{
// The backup groups chosen are 1 and powers of 3, 5 and 7.
block_group_idx == 1
|| block_group_idx.is_power_of(3)
|| block_group_idx.is_power_of(5)
|| block_group_idx.is_power_of(7)
} else {
true
}
}
/// Returns the starting block id of the super block
/// inside the block group pointed by `block_group_idx`.
///
/// # Panic
///
/// If `block_group_idx` is neither 0 nor a backup block group index,
/// then the method panics.
pub(super) fn bid(&self, block_group_idx: usize) -> Bid {
if block_group_idx == 0 {
let bid = (SUPER_BLOCK_OFFSET / self.block_size) as u64;
return Bid::new(bid);
}
assert!(self.is_backup_group(block_group_idx));
let super_block_bid = block_group_idx * (self.blocks_per_group as usize);
Bid::new(super_block_bid as u64)
}
/// Returns the starting block id of the block group descripter table
/// inside the block group pointed by `block_group_idx`.
///
/// # Panic
///
/// If `block_group_idx` is neither 0 nor a backup block group index,
/// then the method panics.
pub(super) fn group_descriptors_bid(&self, block_group_idx: usize) -> Bid {
let super_block_bid = self.bid(block_group_idx);
super_block_bid + (SUPER_BLOCK_SIZE.div_ceil(self.block_size) as u64)
}
}
bitflags! {
/// Compatible feature set.
pub struct FeatureCompatSet: u32 {
/// Preallocate some number of blocks to a directory when creating a new one
const DIR_PREALLOC = 1 << 0;
/// AFS server inodes exist
const IMAGIC_INODES = 1 << 1;
/// File system has a journal
const HAS_JOURNAL = 1 << 2;
/// Inodes have extended attributes
const EXT_ATTR = 1 << 3;
/// File system can resize itself for larger partitions
const RESIZE_INO = 1 << 4;
/// Directories use hash index
const DIR_INDEX = 1 << 5;
}
}
bitflags! {
/// Incompatible feature set.
pub struct FeatureInCompatSet: u32 {
/// Compression is used
const COMPRESSION = 1 << 0;
/// Directory entries contain a type field
const FILETYPE = 1 << 1;
/// File system needs to replay its journal
const RECOVER = 1 << 2;
/// File system uses a journal device
const JOURNAL_DEV = 1 << 3;
/// Metablock block group
const META_BG = 1 << 4;
}
}
bitflags! {
/// Readonly-compatible feature set.
pub struct FeatureRoCompatSet: u32 {
/// Sparse superblocks and group descriptor tables
const SPARSE_SUPER = 1 << 0;
/// File system uses a 64-bit file size
const LARGE_FILE = 1 << 1;
/// Directory contents are stored in the form of a Binary Tree
const BTREE_DIR = 1 << 2;
}
}
#[repr(u16)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)]
pub enum FsState {
/// Unmounted cleanly
Valid = 1,
/// Errors detected
Err = 2,
/// Filesystem is corrupted (EUCLEAN)
Corrupted = 117,
}
#[repr(u16)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)]
pub enum ErrorsBehaviour {
/// Continue execution
Continue = 1,
// Remount fs read-only
RemountReadonly = 2,
// Should panic
Panic = 3,
}
impl Default for ErrorsBehaviour {
fn default() -> Self {
Self::Continue
}
}
#[repr(u32)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)]
pub enum OsId {
Linux = 0,
Hurd = 1,
Masix = 2,
FreeBSD = 3,
Lites = 4,
}
#[repr(u32)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)]
pub enum RevLevel {
/// The good old (original) format.
GoodOld = 0,
/// V2 format with dynamic inode size.
Dynamic = 1,
}
const_assert!(core::mem::size_of::<RawSuperBlock>() == SUPER_BLOCK_SIZE);
/// The raw superblock, it must be exactly 1024 bytes in length.
#[repr(C)]
#[derive(Clone, Copy, Debug, Pod, Default)]
pub(super) struct RawSuperBlock {
pub inodes_count: u32,
pub blocks_count: u32,
pub reserved_blocks_count: u32,
pub free_blocks_count: u32,
pub free_inodes_count: u32,
pub first_data_block: u32,
/// The number to left-shift 1024 to obtain the block size.
pub log_block_size: u32,
/// The number to left-shift 1024 to obtain the fragment size.
pub log_frag_size: u32,
pub blocks_per_group: u32,
pub frags_per_group: u32,
pub inodes_per_group: u32,
/// Mount time.
pub mtime: UnixTime,
/// Write time.
pub wtime: UnixTime,
pub mnt_count: u16,
pub max_mnt_count: u16,
pub magic: u16,
pub state: u16,
pub errors: u16,
pub min_rev_level: u16,
/// Time of last check.
pub last_check_time: UnixTime,
pub check_interval: u32,
pub creator_os: u32,
pub rev_level: u32,
pub def_resuid: u16,
pub def_resgid: u16,
pub first_ino: u32,
pub inode_size: u16,
pub block_group_idx: u16,
pub feature_compat: u32,
pub feature_incompat: u32,
pub feature_ro_compat: u32,
pub uuid: [u8; 16],
pub volume_name: Str16,
pub last_mounted_dir: Str64,
pub algorithm_usage_bitmap: u32,
pub prealloc_file_blocks: u8,
pub prealloc_dir_blocks: u8,
padding1: u16,
///
/// This fileds are for journaling support in Ext3.
///
/// Uuid of journal superblock.
pub journal_uuid: [u8; 16],
/// Inode number of journal file.
pub journal_ino: u32,
/// Device number of journal file.
pub journal_dev: u32,
/// Start of list of inodes to delete.
pub last_orphan: u32,
/// HTREE hash seed.
pub hash_seed: [u32; 4],
/// Default hash version to use
pub def_hash_version: u8,
reserved_char_pad: u8,
reserved_word_pad: u16,
/// Default mount options.
pub default_mount_opts: u32,
/// First metablock block group.
pub first_meta_bg: u32,
reserved: Reserved,
}
impl From<&SuperBlock> for RawSuperBlock {
fn from(sb: &SuperBlock) -> Self {
Self {
inodes_count: sb.inodes_count,
blocks_count: sb.blocks_count,
reserved_blocks_count: sb.reserved_blocks_count,
free_blocks_count: sb.free_blocks_count,
free_inodes_count: sb.free_inodes_count,
first_data_block: sb.first_data_block.to_raw() as u32,
log_block_size: (sb.block_size >> 11) as u32,
log_frag_size: (sb.frag_size >> 11) as u32,
blocks_per_group: sb.blocks_per_group,
frags_per_group: sb.frags_per_group,
inodes_per_group: sb.inodes_per_group,
mtime: sb.mtime,
wtime: sb.wtime,
mnt_count: sb.mnt_count,
max_mnt_count: sb.max_mnt_count,
magic: sb.magic,
state: sb.state as u16,
errors: sb.errors_behaviour as u16,
last_check_time: sb.last_check_time,
check_interval: sb.check_interval.as_secs() as u32,
creator_os: sb.creator_os as u32,
rev_level: sb.rev_level as u32,
def_resuid: sb.def_resuid as u16,
def_resgid: sb.def_resgid as u16,
first_ino: sb.first_ino,
inode_size: sb.inode_size as u16,
block_group_idx: sb.block_group_idx as u16,
feature_compat: sb.feature_compat.bits(),
feature_incompat: sb.feature_incompat.bits(),
feature_ro_compat: sb.feature_ro_compat.bits(),
uuid: sb.uuid,
volume_name: sb.volume_name,
last_mounted_dir: sb.last_mounted_dir,
prealloc_file_blocks: sb.prealloc_file_blocks,
prealloc_dir_blocks: sb.prealloc_dir_blocks,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Pod)]
struct Reserved([u32; 190]);
impl Default for Reserved {
fn default() -> Self {
Self([0u32; 190])
}
}

View File

@ -0,0 +1,93 @@
use super::prelude::*;
use core::ops::MulAssign;
pub trait IsPowerOf: Copy + Sized + MulAssign + PartialOrd {
/// Returns true if and only if `self == x^k` for some `k` where `k > 0`.
///
/// The `x` must be a positive value.
fn is_power_of(&self, x: Self) -> bool {
let mut power = x;
while power < *self {
power *= x;
}
power == *self
}
}
macro_rules! impl_ipo_for {
($($ipo_ty:ty),*) => {
$(impl IsPowerOf for $ipo_ty {})*
};
}
impl_ipo_for!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, isize, usize);
/// The `Dirty` wraps a value of type `T` with functions similar to that of a rw-lock,
/// but simply sets a dirty flag on `write()`.
pub struct Dirty<T: Debug> {
value: T,
dirty: bool,
}
impl<T: Debug> Dirty<T> {
/// Creates a new Dirty without setting the dirty flag.
pub fn new(val: T) -> Dirty<T> {
Dirty {
value: val,
dirty: false,
}
}
/// Creates a new Dirty with setting the dirty flag.
pub fn new_dirty(val: T) -> Dirty<T> {
Dirty {
value: val,
dirty: true,
}
}
/// Returns true if dirty, false otherwise.
pub fn is_dirty(&self) -> bool {
self.dirty
}
/// Clears the dirty flag.
pub fn clear_dirty(&mut self) {
self.dirty = false;
}
}
impl<T: Debug> Deref for Dirty<T> {
type Target = T;
/// Returns the imutable value.
fn deref(&self) -> &T {
&self.value
}
}
impl<T: Debug> DerefMut for Dirty<T> {
/// Returns the mutable value, sets the dirty flag.
fn deref_mut(&mut self) -> &mut T {
self.dirty = true;
&mut self.value
}
}
impl<T: Debug> Drop for Dirty<T> {
/// Guards if it is dirty when dropping.
fn drop(&mut self) {
if self.is_dirty() {
warn!("[{:?}] is dirty then dropping", self.value);
}
}
}
impl<T: Debug> Debug for Dirty<T> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
let tag = if self.dirty { "Dirty" } else { "Clean" };
write!(f, "[{}] {:?}", tag, self.value)
}
}

View File

@ -1,6 +1,7 @@
pub mod device;
pub mod devpts;
pub mod epoll;
pub mod ext2;
pub mod file_handle;
pub mod file_table;
pub mod fs_resolver;
@ -10,3 +11,28 @@ pub mod procfs;
pub mod ramfs;
pub mod rootfs;
pub mod utils;
use crate::fs::{ext2::Ext2, fs_resolver::FsPath};
use crate::prelude::*;
use crate::thread::kernel_thread::KernelThreadExt;
use aster_virtio::device::block::device::BlockDevice as VirtIoBlockDevice;
use aster_virtio::device::block::DEVICE_NAME as VIRTIO_BLOCK_NAME;
pub fn lazy_init() {
let block_device = aster_block::get_device(VIRTIO_BLOCK_NAME).unwrap();
let cloned_block_device = block_device.clone();
let task_fn = move || {
info!("spawn the virt-io-block thread");
let virtio_block_device = block_device.downcast_ref::<VirtIoBlockDevice>().unwrap();
loop {
virtio_block_device.handle_requests();
}
};
crate::Thread::spawn_kernel_thread(crate::ThreadOptions::new(task_fn));
let ext2_fs = Ext2::open(cloned_block_device).unwrap();
let target_path = FsPath::try_from("/ext2").unwrap();
println!("[kernel] Mount Ext2 fs at {:?} ", target_path);
self::rootfs::mount_fs_at(ext2_fs, &target_path).unwrap();
}

View File

@ -58,12 +58,18 @@ impl<D: DirOps + 'static> Inode for ProcDir<D> {
self.info.size()
}
fn resize(&self, _new_size: usize) {}
fn resize(&self, _new_size: usize) -> Result<()> {
Err(Error::new(Errno::EISDIR))
}
fn metadata(&self) -> Metadata {
self.info.metadata()
}
fn ino(&self) -> u64 {
self.info.ino()
}
fn type_(&self) -> InodeType {
InodeType::Dir
}

View File

@ -1,4 +1,3 @@
use aster_frame::vm::VmFrame;
use core::time::Duration;
use crate::fs::utils::{FileSystem, Inode, InodeMode, InodeType, IoctlCmd, Metadata};
@ -31,12 +30,18 @@ impl<F: FileOps + 'static> Inode for ProcFile<F> {
self.info.size()
}
fn resize(&self, _new_size: usize) {}
fn resize(&self, _new_size: usize) -> Result<()> {
Err(Error::new(Errno::EPERM))
}
fn metadata(&self) -> Metadata {
self.info.metadata()
}
fn ino(&self) -> u64 {
self.info.ino()
}
fn type_(&self) -> InodeType {
InodeType::File
}
@ -65,14 +70,6 @@ impl<F: FileOps + 'static> Inode for ProcFile<F> {
self.info.set_mtime(time)
}
fn read_page(&self, _idx: usize, _frame: &VmFrame) -> Result<()> {
unreachable!()
}
fn write_page(&self, _idx: usize, _frame: &VmFrame) -> Result<()> {
unreachable!()
}
fn read_at(&self, offset: usize, buf: &mut [u8]) -> Result<usize> {
let data = self.inner.data()?;
let start = data.len().min(offset);

View File

@ -38,6 +38,10 @@ impl ProcInodeInfo {
self.metadata.read().clone()
}
pub fn ino(&self) -> u64 {
self.metadata.read().ino as _
}
pub fn size(&self) -> usize {
self.metadata.read().size
}

View File

@ -1,4 +1,3 @@
use aster_frame::vm::VmFrame;
use core::time::Duration;
use crate::fs::utils::{FileSystem, Inode, InodeMode, InodeType, IoctlCmd, Metadata};
@ -31,12 +30,18 @@ impl<S: SymOps + 'static> Inode for ProcSym<S> {
self.info.size()
}
fn resize(&self, _new_size: usize) {}
fn resize(&self, _new_size: usize) -> Result<()> {
Err(Error::new(Errno::EPERM))
}
fn metadata(&self) -> Metadata {
self.info.metadata()
}
fn ino(&self) -> u64 {
self.info.ino()
}
fn type_(&self) -> InodeType {
InodeType::SymLink
}
@ -65,14 +70,6 @@ impl<S: SymOps + 'static> Inode for ProcSym<S> {
self.info.set_mtime(time)
}
fn read_page(&self, _idx: usize, _frame: &VmFrame) -> Result<()> {
Err(Error::new(Errno::EPERM))
}
fn write_page(&self, _idx: usize, _frame: &VmFrame) -> Result<()> {
Err(Error::new(Errno::EPERM))
}
fn read_at(&self, _offset: usize, _buf: &mut [u8]) -> Result<usize> {
Err(Error::new(Errno::EPERM))
}

View File

@ -1,6 +1,6 @@
use alloc::str;
use aster_frame::sync::{RwLock, RwLockWriteGuard};
use aster_frame::vm::{VmFrame, VmIo};
use aster_frame::sync::RwLockWriteGuard;
use aster_frame::vm::VmFrame;
use aster_frame::vm::VmIo;
use aster_rights::Full;
use aster_util::slot_vec::SlotVec;
use core::sync::atomic::{AtomicUsize, Ordering};
@ -10,8 +10,8 @@ use super::*;
use crate::events::IoEvents;
use crate::fs::device::Device;
use crate::fs::utils::{
DirentVisitor, FileSystem, FsFlags, Inode, InodeMode, InodeType, IoctlCmd, Metadata, PageCache,
SuperBlock,
CStr256, DirentVisitor, FileSystem, FsFlags, Inode, InodeMode, InodeType, IoctlCmd, Metadata,
PageCache, PageCacheBackend, SuperBlock,
};
use crate::prelude::*;
use crate::process::signal::Poller;
@ -219,7 +219,7 @@ impl Inner {
}
struct DirEntry {
children: SlotVec<(Str256, Arc<RamInode>)>,
children: SlotVec<(CStr256, Arc<RamInode>)>,
this: Weak<RamInode>,
parent: Weak<RamInode>,
}
@ -248,7 +248,7 @@ impl DirEntry {
} else {
self.children
.iter()
.any(|(child, _)| child.as_ref() == name)
.any(|(child, _)| child.as_str().unwrap() == name)
}
}
@ -260,16 +260,16 @@ impl DirEntry {
} else {
self.children
.idxes_and_items()
.find(|(_, (child, _))| child.as_ref() == name)
.find(|(_, (child, _))| child.as_str().unwrap() == name)
.map(|(idx, (_, inode))| (idx + 2, inode.clone()))
}
}
fn append_entry(&mut self, name: &str, inode: Arc<RamInode>) -> usize {
self.children.put((Str256::from(name), inode))
self.children.put((CStr256::from(name), inode))
}
fn remove_entry(&mut self, idx: usize) -> Option<(Str256, Arc<RamInode>)> {
fn remove_entry(&mut self, idx: usize) -> Option<(CStr256, Arc<RamInode>)> {
assert!(idx >= 2);
self.children.remove(idx - 2)
}
@ -277,8 +277,8 @@ impl DirEntry {
fn substitute_entry(
&mut self,
idx: usize,
new_entry: (Str256, Arc<RamInode>),
) -> Option<(Str256, Arc<RamInode>)> {
new_entry: (CStr256, Arc<RamInode>),
) -> Option<(CStr256, Arc<RamInode>)> {
assert!(idx >= 2);
self.children.put_at(idx - 2, new_entry)
}
@ -315,7 +315,7 @@ impl DirEntry {
.skip_while(|(offset, _)| offset < &start_idx)
{
visitor.visit(
name.as_ref(),
name.as_str().unwrap(),
child.metadata().ino as u64,
child.metadata().type_,
offset,
@ -337,36 +337,6 @@ impl DirEntry {
}
}
#[repr(C)]
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord)]
pub struct Str256([u8; 256]);
impl AsRef<str> for Str256 {
fn as_ref(&self) -> &str {
let len = self.0.iter().enumerate().find(|(_, &b)| b == 0).unwrap().0;
str::from_utf8(&self.0[0..len]).unwrap()
}
}
impl<'a> From<&'a str> for Str256 {
fn from(s: &'a str) -> Self {
let mut inner = [0u8; 256];
let len = if s.len() > NAME_MAX {
NAME_MAX
} else {
s.len()
};
inner[0..len].copy_from_slice(&s.as_bytes()[0..len]);
Self(inner)
}
}
impl core::fmt::Debug for Str256 {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{}", self.as_ref())
}
}
impl RamInode {
fn new_dir(fs: &Arc<RamFS>, mode: InodeMode, parent: &Weak<Self>) -> Arc<Self> {
Arc::new_cyclic(|weak_self| {
@ -439,7 +409,7 @@ impl RamInode {
}
}
impl Inode for RamInode {
impl PageCacheBackend for RamInode {
fn read_page(&self, _idx: usize, _frame: &VmFrame) -> Result<()> {
// do nothing
Ok(())
@ -450,6 +420,12 @@ impl Inode for RamInode {
Ok(())
}
fn npages(&self) -> usize {
self.0.read().metadata.blocks
}
}
impl Inode for RamInode {
fn page_cache(&self) -> Option<Vmo<Full>> {
self.0
.read()
@ -515,8 +491,9 @@ impl Inode for RamInode {
self.0.read().metadata.size
}
fn resize(&self, new_size: usize) {
self.0.write().resize(new_size)
fn resize(&self, new_size: usize) -> Result<()> {
self.0.write().resize(new_size);
Ok(())
}
fn atime(&self) -> Duration {
@ -535,6 +512,10 @@ impl Inode for RamInode {
self.0.write().metadata.mtime = time;
}
fn ino(&self) -> u64 {
self.0.read().metadata.ino as _
}
fn type_(&self) -> InodeType {
self.0.read().metadata.type_
}
@ -780,7 +761,7 @@ impl Inode for RamInode {
let (idx, inode) = self_dir
.get_entry(old_name)
.ok_or(Error::new(Errno::ENOENT))?;
self_dir.substitute_entry(idx, (Str256::from(new_name), inode));
self_dir.substitute_entry(idx, (CStr256::from(new_name), inode));
} else {
let (mut self_inode, mut target_inode) = write_lock_two_inodes(self, target);
let self_dir = self_inode.inner.as_direntry_mut().unwrap();

View File

@ -3,7 +3,7 @@ use crate::prelude::*;
use super::fs_resolver::{FsPath, FsResolver};
use super::procfs::ProcFS;
use super::ramfs::RamFS;
use super::utils::{InodeMode, InodeType, MountNode};
use super::utils::{FileSystem, InodeMode, InodeType, MountNode};
use cpio_decoder::{CpioDecoder, FileType};
use lending_iterator::LendingIterator;
@ -77,11 +77,18 @@ pub fn init(initramfs_buf: &[u8]) -> Result<()> {
// Mount DevFS
let dev_dentry = fs.lookup(&FsPath::try_from("/dev")?)?;
dev_dentry.mount(RamFS::new())?;
println!("[kernel] rootfs is ready");
Ok(())
}
pub fn mount_fs_at(fs: Arc<dyn FileSystem>, fs_path: &FsPath) -> Result<()> {
let target_dentry = FsResolver::new().lookup(fs_path)?;
target_dentry.mount(fs)?;
Ok(())
}
static ROOT_MOUNT: Once<Arc<MountNode>> = Once::new();
pub fn init_root_mount() {

View File

@ -1,4 +1,3 @@
use aster_frame::vm::VmFrame;
use aster_rights::Full;
use core::time::Duration;
use core2::io::{Error as IoError, ErrorKind as IoErrorKind, Result as IoResult, Write};
@ -232,10 +231,12 @@ pub trait Inode: Any + Sync + Send {
self.len() == 0
}
fn resize(&self, new_size: usize);
fn resize(&self, new_size: usize) -> Result<()>;
fn metadata(&self) -> Metadata;
fn ino(&self) -> u64;
fn type_(&self) -> InodeType;
fn mode(&self) -> InodeMode;
@ -250,14 +251,6 @@ pub trait Inode: Any + Sync + Send {
fn set_mtime(&self, time: Duration);
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
Err(Error::new(Errno::EISDIR))
}
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
Err(Error::new(Errno::EISDIR))
}
fn page_cache(&self) -> Option<Vmo<Full>> {
None
}

View File

@ -11,7 +11,7 @@ pub use fs::{FileSystem, FsFlags, SuperBlock};
pub use inode::{Inode, InodeMode, InodeType, Metadata};
pub use ioctl::IoctlCmd;
pub use mount::MountNode;
pub use page_cache::PageCache;
pub use page_cache::{PageCache, PageCacheBackend};
pub use status_flags::StatusFlags;
mod access_mode;
@ -28,6 +28,8 @@ mod mount;
mod page_cache;
mod status_flags;
use crate::prelude::*;
#[derive(Copy, PartialEq, Eq, Clone, Debug)]
pub enum SeekFrom {
Start(usize),
@ -43,3 +45,152 @@ pub const NAME_MAX: usize = 255;
/// The upper limit for resolving symbolic links
pub const SYMLINKS_MAX: usize = 40;
pub type CStr256 = FixedCStr<256>;
pub type Str16 = FixedStr<16>;
pub type Str64 = FixedStr<64>;
/// An owned C-compatible string with a fixed capacity of `N`.
///
/// The string is terminated with a null byte.
#[repr(C)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Pod)]
pub struct FixedCStr<const N: usize>([u8; N]);
impl<const N: usize> FixedCStr<N> {
pub fn len(&self) -> usize {
self.0.iter().position(|&b| b == 0).unwrap()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn as_str(&self) -> Result<&str> {
Ok(alloc::str::from_utf8(self.as_bytes())?)
}
pub fn as_cstr(&self) -> Result<&CStr> {
Ok(CStr::from_bytes_with_nul(self.as_bytes_with_nul())?)
}
pub fn as_bytes(&self) -> &[u8] {
&self.0[0..self.len()]
}
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.0[0..=self.len()]
}
}
impl<'a, const N: usize> From<&'a [u8]> for FixedCStr<N> {
fn from(bytes: &'a [u8]) -> Self {
assert!(N > 0);
let mut inner = [0u8; N];
let len = {
let mut nul_byte_idx = match bytes.iter().position(|&b| b == 0) {
Some(idx) => idx,
None => bytes.len(),
};
if nul_byte_idx >= N {
nul_byte_idx = N - 1;
}
nul_byte_idx
};
inner[0..len].copy_from_slice(&bytes[0..len]);
Self(inner)
}
}
impl<'a, const N: usize> From<&'a str> for FixedCStr<N> {
fn from(string: &'a str) -> Self {
let bytes = string.as_bytes();
Self::from(bytes)
}
}
impl<'a, const N: usize> From<&'a CStr> for FixedCStr<N> {
fn from(cstr: &'a CStr) -> Self {
let bytes = cstr.to_bytes_with_nul();
Self::from(bytes)
}
}
impl<const N: usize> Default for FixedCStr<N> {
fn default() -> Self {
Self([0u8; N])
}
}
impl<const N: usize> Debug for FixedCStr<N> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self.as_cstr() {
Ok(cstr) => write!(f, "{:?}", cstr),
Err(_) => write!(f, "{:?}", self.as_bytes()),
}
}
}
/// An owned string with a fixed capacity of `N`.
#[repr(C)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Pod)]
pub struct FixedStr<const N: usize>([u8; N]);
impl<const N: usize> FixedStr<N> {
pub fn len(&self) -> usize {
self.0.iter().position(|&b| b == 0).unwrap_or(N)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn as_str(&self) -> Result<&str> {
Ok(alloc::str::from_utf8(self.as_bytes())?)
}
pub fn as_bytes(&self) -> &[u8] {
&self.0[0..self.len()]
}
}
impl<'a, const N: usize> From<&'a [u8]> for FixedStr<N> {
fn from(bytes: &'a [u8]) -> Self {
let mut inner = [0u8; N];
let len = {
let mut nul_byte_idx = match bytes.iter().position(|&b| b == 0) {
Some(idx) => idx,
None => bytes.len(),
};
if nul_byte_idx > N {
nul_byte_idx = N;
}
nul_byte_idx
};
inner[0..len].copy_from_slice(&bytes[0..len]);
Self(inner)
}
}
impl<'a, const N: usize> From<&'a str> for FixedStr<N> {
fn from(string: &'a str) -> Self {
let bytes = string.as_bytes();
Self::from(bytes)
}
}
impl<const N: usize> Default for FixedStr<N> {
fn default() -> Self {
Self([0u8; N])
}
}
impl<const N: usize> Debug for FixedStr<N> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self.as_str() {
Ok(string) => write!(f, "{}", string),
Err(_) => write!(f, "{:?}", self.as_bytes()),
}
}
}

View File

@ -1,4 +1,3 @@
use super::Inode;
use crate::prelude::*;
use crate::vm::vmo::{get_page_idx_range, Pager, Vmo, VmoFlags, VmoOptions};
use aster_rights::Full;
@ -13,9 +12,9 @@ pub struct PageCache {
}
impl PageCache {
/// Creates an empty size page cache associated with a new inode.
pub fn new(backed_inode: Weak<dyn Inode>) -> Result<Self> {
let manager = Arc::new(PageCacheManager::new(backed_inode));
/// Creates an empty size page cache associated with a new backend.
pub fn new(backend: Weak<dyn PageCacheBackend>) -> Result<Self> {
let manager = Arc::new(PageCacheManager::new(backend));
let pages = VmoOptions::<Full>::new(0)
.flags(VmoFlags::RESIZABLE)
.pager(manager.clone())
@ -23,12 +22,12 @@ impl PageCache {
Ok(Self { pages, manager })
}
/// Creates a page cache associated with an existing inode.
/// Creates a page cache associated with an existing backend.
///
/// The `capacity` is the initial cache size required by the inode.
/// It is usually used the same size as the inode.
pub fn with_capacity(capacity: usize, backed_inode: Weak<dyn Inode>) -> Result<Self> {
let manager = Arc::new(PageCacheManager::new(backed_inode));
/// The `capacity` is the initial cache size required by the backend.
/// This size usually corresponds to the size of the backend.
pub fn with_capacity(capacity: usize, backend: Weak<dyn PageCacheBackend>) -> Result<Self> {
let manager = Arc::new(PageCacheManager::new(backend));
let pages = VmoOptions::<Full>::new(capacity)
.flags(VmoFlags::RESIZABLE)
.pager(manager.clone())
@ -36,7 +35,7 @@ impl PageCache {
Ok(Self { pages, manager })
}
/// Returns the Vmo object backed by inode.
/// Returns the Vmo object.
// TODO: The capability is too highrestrict it to eliminate the possibility of misuse.
// For example, the `resize` api should be forbidded.
pub fn pages(&self) -> Vmo<Full> {
@ -44,10 +43,15 @@ impl PageCache {
}
/// Evict the data within a specified range from the page cache and persist
/// them to the disk.
/// them to the backend.
pub fn evict_range(&self, range: Range<usize>) -> Result<()> {
self.manager.evict_range(range)
}
/// Returns the backend.
pub fn backend(&self) -> Arc<dyn PageCacheBackend> {
self.manager.backend()
}
}
impl Debug for PageCache {
@ -61,33 +65,36 @@ impl Debug for PageCache {
struct PageCacheManager {
pages: Mutex<LruCache<usize, Page>>,
backed_inode: Weak<dyn Inode>,
backend: Weak<dyn PageCacheBackend>,
}
impl PageCacheManager {
pub fn new(backed_inode: Weak<dyn Inode>) -> Self {
pub fn new(backend: Weak<dyn PageCacheBackend>) -> Self {
Self {
pages: Mutex::new(LruCache::unbounded()),
backed_inode,
backend,
}
}
pub fn backend(&self) -> Arc<dyn PageCacheBackend> {
self.backend.upgrade().unwrap()
}
pub fn evict_range(&self, range: Range<usize>) -> Result<()> {
let page_idx_range = get_page_idx_range(&range);
let mut pages = self.pages.lock();
for page_idx in page_idx_range {
if let Some(page) = pages.get_mut(&page_idx) {
for idx in page_idx_range {
if let Some(page) = pages.get_mut(&idx) {
if let PageState::Dirty = page.state() {
self.backed_inode
.upgrade()
.unwrap()
.write_page(page_idx, page.frame())?;
page.set_state(PageState::UpToDate);
let backend = self.backend();
if idx < backend.npages() {
backend.write_page(idx, page.frame())?;
page.set_state(PageState::UpToDate);
}
}
} else {
warn!("page {} is not in page cache, do nothing", page_idx);
}
}
Ok(())
}
}
@ -101,53 +108,50 @@ impl Debug for PageCacheManager {
}
impl Pager for PageCacheManager {
fn commit_page(&self, offset: usize) -> Result<VmFrame> {
let page_idx = offset / PAGE_SIZE;
fn commit_page(&self, idx: usize) -> Result<VmFrame> {
let mut pages = self.pages.lock();
let frame = if let Some(page) = pages.get(&page_idx) {
let frame = if let Some(page) = pages.get(&idx) {
page.frame().clone()
} else {
let backed_inode = self.backed_inode.upgrade().unwrap();
let page = if offset < backed_inode.len() {
let backend = self.backend();
let page = if idx < backend.npages() {
let mut page = Page::alloc()?;
backed_inode.read_page(page_idx, page.frame())?;
backend.read_page(idx, page.frame())?;
page.set_state(PageState::UpToDate);
page
} else {
Page::alloc_zero()?
};
let frame = page.frame().clone();
pages.put(page_idx, page);
pages.put(idx, page);
frame
};
Ok(frame)
}
fn update_page(&self, offset: usize) -> Result<()> {
let page_idx = offset / PAGE_SIZE;
fn update_page(&self, idx: usize) -> Result<()> {
let mut pages = self.pages.lock();
if let Some(page) = pages.get_mut(&page_idx) {
if let Some(page) = pages.get_mut(&idx) {
page.set_state(PageState::Dirty);
} else {
error!("page {} is not in page cache", page_idx);
panic!();
warn!("The page {} is not in page cache", idx);
}
Ok(())
}
fn decommit_page(&self, offset: usize) -> Result<()> {
let page_idx = offset / PAGE_SIZE;
fn decommit_page(&self, idx: usize) -> Result<()> {
let mut pages = self.pages.lock();
if let Some(page) = pages.pop(&page_idx) {
if let Some(page) = pages.pop(&idx) {
if let PageState::Dirty = page.state() {
self.backed_inode
.upgrade()
.unwrap()
.write_page(page_idx, page.frame())?
let backend = self.backend();
if idx < backend.npages() {
backend.write_page(idx, page.frame())?;
}
}
} else {
warn!("page {} is not in page cache, do nothing", page_idx);
}
Ok(())
}
}
@ -200,3 +204,13 @@ enum PageState {
/// The page is available to read and write.
Dirty,
}
/// This trait represents the backend for the page cache.
pub trait PageCacheBackend: Sync + Send {
/// Reads a page from the backend.
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<()>;
/// Writes a page to the backend.
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<()>;
/// Returns the number of pages in the backend.
fn npages(&self) -> usize;
}

View File

@ -19,6 +19,8 @@
#![feature(register_tool)]
#![feature(trait_upcasting)]
#![feature(format_args_nl)]
#![feature(int_roundings)]
#![feature(step_trait)]
#![register_tool(component_access_control)]
use crate::{
@ -73,6 +75,7 @@ fn init_thread() {
current_thread!().tid()
);
net::lazy_init();
fs::lazy_init();
// driver::pci::virtio::block::block_device_test();
let thread = Thread::spawn_kernel_thread(ThreadOptions::new(|| {
println!("[kernel] Hello world from kernel!");

View File

@ -13,7 +13,7 @@ pub(crate) use alloc::sync::Weak;
pub(crate) use alloc::vec;
pub(crate) use alloc::vec::Vec;
pub(crate) use aster_frame::config::PAGE_SIZE;
pub(crate) use aster_frame::sync::{Mutex, MutexGuard, RwLock, SpinLock, SpinLockGuard};
pub(crate) use aster_frame::sync::{Mutex, MutexGuard, RwLock, RwMutex, SpinLock, SpinLockGuard};
pub(crate) use aster_frame::vm::Vaddr;
pub(crate) use bitflags::bitflags;
pub(crate) use core::any::Any;

View File

@ -379,13 +379,13 @@ fn clone_cpu_context(
}
fn clone_fs(
parent_fs: &Arc<RwLock<FsResolver>>,
parent_fs: &Arc<RwMutex<FsResolver>>,
clone_flags: CloneFlags,
) -> Arc<RwLock<FsResolver>> {
) -> Arc<RwMutex<FsResolver>> {
if clone_flags.contains(CloneFlags::CLONE_FS) {
parent_fs.clone()
} else {
Arc::new(RwLock::new(parent_fs.read().clone()))
Arc::new(RwMutex::new(parent_fs.read().clone()))
}
}

View File

@ -23,7 +23,7 @@ pub struct ProcessBuilder<'a> {
envp: Option<Vec<CString>>,
process_vm: Option<ProcessVm>,
file_table: Option<Arc<Mutex<FileTable>>>,
fs: Option<Arc<RwLock<FsResolver>>>,
fs: Option<Arc<RwMutex<FsResolver>>>,
umask: Option<Arc<RwLock<FileCreationMask>>>,
resource_limits: Option<ResourceLimits>,
sig_dispositions: Option<Arc<Mutex<SigDispositions>>>,
@ -64,7 +64,7 @@ impl<'a> ProcessBuilder<'a> {
self
}
pub fn fs(&mut self, fs: Arc<RwLock<FsResolver>>) -> &mut Self {
pub fn fs(&mut self, fs: Arc<RwMutex<FsResolver>>) -> &mut Self {
self.fs = Some(fs);
self
}
@ -142,7 +142,7 @@ impl<'a> ProcessBuilder<'a> {
.unwrap();
let fs = fs
.or_else(|| Some(Arc::new(RwLock::new(FsResolver::new()))))
.or_else(|| Some(Arc::new(RwMutex::new(FsResolver::new()))))
.unwrap();
let umask = umask

View File

@ -64,7 +64,7 @@ pub struct Process {
/// File table
file_table: Arc<Mutex<FileTable>>,
/// FsResolver
fs: Arc<RwLock<FsResolver>>,
fs: Arc<RwMutex<FsResolver>>,
/// umask
umask: Arc<RwLock<FileCreationMask>>,
/// resource limits
@ -84,7 +84,7 @@ impl Process {
executable_path: String,
process_vm: ProcessVm,
file_table: Arc<Mutex<FileTable>>,
fs: Arc<RwLock<FsResolver>>,
fs: Arc<RwMutex<FsResolver>>,
umask: Arc<RwLock<FileCreationMask>>,
sig_dispositions: Arc<Mutex<SigDispositions>>,
resource_limits: ResourceLimits,
@ -496,7 +496,7 @@ impl Process {
&self.file_table
}
pub fn fs(&self) -> &Arc<RwLock<FsResolver>> {
pub fn fs(&self) -> &Arc<RwMutex<FsResolver>> {
&self.fs
}
@ -595,7 +595,7 @@ mod test {
String::new(),
ProcessVm::alloc(),
Arc::new(Mutex::new(FileTable::new())),
Arc::new(RwLock::new(FsResolver::new())),
Arc::new(RwMutex::new(FsResolver::new())),
Arc::new(RwLock::new(FileCreationMask::default())),
Arc::new(Mutex::new(SigDispositions::default())),
ResourceLimits::default(),

View File

@ -104,3 +104,25 @@ pub fn now_as_duration(clock_id: &ClockID) -> Result<Duration> {
}
}
}
/// Unix time measures time by the number of seconds that have elapsed since
/// the Unix epoch, without adjustments made due to leap seconds.
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Pod)]
pub struct UnixTime {
sec: u32,
}
impl From<Duration> for UnixTime {
fn from(duration: Duration) -> Self {
Self {
sec: duration.as_secs() as u32,
}
}
}
impl From<UnixTime> for Duration {
fn from(time: UnixTime) -> Self {
Duration::from_secs(time.sec as _)
}
}

View File

@ -154,7 +154,7 @@ impl VmoInner {
}
let frame = match &self.pager {
None => VmAllocOptions::new(1).alloc_single()?,
Some(pager) => pager.commit_page(offset)?,
Some(pager) => pager.commit_page(page_idx)?,
};
self.insert_frame(page_idx, frame);
Ok(())
@ -164,7 +164,7 @@ impl VmoInner {
let page_idx = offset / PAGE_SIZE;
if self.committed_pages.remove(&page_idx).is_some() {
if let Some(pager) = &self.pager {
pager.decommit_page(offset)?;
pager.decommit_page(page_idx)?;
}
}
Ok(())
@ -302,7 +302,7 @@ impl Vmo_ {
if let Some(pager) = &self.inner.lock().pager {
let page_idx_range = get_page_idx_range(&write_range);
for page_idx in page_idx_range {
pager.update_page(page_idx * PAGE_SIZE)?;
pager.update_page(page_idx)?;
}
}
Ok(())

View File

@ -11,7 +11,7 @@ use aster_frame::vm::VmFrame;
/// Finally, when a frame is no longer needed (i.e., on decommits),
/// the frame pager will also be notified.
pub trait Pager: Send + Sync {
/// Ask the pager to provide a frame at a specified offset (in bytes).
/// Ask the pager to provide a frame at a specified index.
///
/// After a page of a VMO is committed, the VMO shall not call this method
/// again until the page is decommitted. But a robust implementation of
@ -22,13 +22,10 @@ pub trait Pager: Send + Sync {
/// and is to be committed again, then the pager is free to return
/// whatever frame that may or may not be the same as the last time.
///
/// It is up to the pager to decide the range of valid offsets.
///
/// The offset will be rounded down to page boundary.
fn commit_page(&self, offset: usize) -> Result<VmFrame>;
/// It is up to the pager to decide the range of valid indices.
fn commit_page(&self, idx: usize) -> Result<VmFrame>;
/// Notify the pager that the frame at a specified offset (in bytes)
/// has been updated.
/// Notify the pager that the frame at a specified index has been updated.
///
/// Being aware of the updates allow the pager (e.g., an inode) to
/// know which pages are dirty and only write back the _dirty_ pages back
@ -38,12 +35,9 @@ pub trait Pager: Send + Sync {
/// But a robust implementation of `Pager` should not make
/// such an assumption for its correctness; instead, it should simply ignore the
/// call or return an error.
///
/// The offset will be rounded down to page boundary.
fn update_page(&self, offset: usize) -> Result<()>;
fn update_page(&self, idx: usize) -> Result<()>;
/// Notify the pager that the frame at the specified offset (in bytes)
/// has been decommitted.
/// Notify the pager that the frame at the specified index has been decommitted.
///
/// Knowing that a frame is no longer needed, the pager (e.g., an inode)
/// can free the frame after writing back its data to the disk.
@ -52,7 +46,5 @@ pub trait Pager: Send + Sync {
/// But a robust implementation of `Pager` should not make
/// such an assumption for its correctness; instead, it should simply ignore the
/// call or return an error.
///
/// The offset will be rounded down to page boundary.
fn decommit_page(&self, offset: usize) -> Result<()>;
fn decommit_page(&self, idx: usize) -> Result<()>;
}