mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 21:06:48 +00:00
Support big file for Ext2
This commit is contained in:
parent
bd8be26b30
commit
a60a8ad3e1
@ -3,6 +3,7 @@
|
||||
use aster_util::id_allocator::IdAlloc;
|
||||
|
||||
use super::{
|
||||
block_ptr::Ext2Bid,
|
||||
fs::Ext2,
|
||||
inode::{Inode, InodeDesc, RawInode},
|
||||
prelude::*,
|
||||
@ -18,7 +19,7 @@ pub(super) struct BlockGroup {
|
||||
}
|
||||
|
||||
struct BlockGroupImpl {
|
||||
inode_table_bid: Bid,
|
||||
inode_table_bid: Ext2Bid,
|
||||
raw_inodes_size: usize,
|
||||
inner: RwMutex<Inner>,
|
||||
fs: Weak<Ext2>,
|
||||
@ -47,12 +48,12 @@ impl BlockGroup {
|
||||
GroupDescriptor::from(raw_descriptor)
|
||||
};
|
||||
|
||||
let get_bitmap = |bid: Bid, capacity: usize| -> Result<IdAlloc> {
|
||||
let get_bitmap = |bid: Ext2Bid, capacity: usize| -> Result<IdAlloc> {
|
||||
if capacity > BLOCK_SIZE * 8 {
|
||||
return_errno_with_message!(Errno::EINVAL, "bad bitmap");
|
||||
}
|
||||
let mut buf = vec![0u8; BLOCK_SIZE];
|
||||
block_device.read_bytes(bid.to_offset(), &mut buf)?;
|
||||
block_device.read_bytes(bid as usize * BLOCK_SIZE, &mut buf)?;
|
||||
Ok(IdAlloc::from_bytes_with_capacity(&buf, capacity))
|
||||
};
|
||||
|
||||
@ -173,27 +174,38 @@ impl BlockGroup {
|
||||
inner.inode_cache.remove(&inode_idx);
|
||||
}
|
||||
|
||||
/// Allocates and returns a block index.
|
||||
pub fn alloc_block(&self) -> Option<u32> {
|
||||
/// Allocates and returns a consecutive range of block indices.
|
||||
///
|
||||
/// Returns `None` if the allocation fails.
|
||||
///
|
||||
/// The actual allocated range size may be smaller than the requested `count` if
|
||||
/// insufficient consecutive blocks are available.
|
||||
pub fn alloc_blocks(&self, count: Ext2Bid) -> Option<Range<Ext2Bid>> {
|
||||
// The fast path
|
||||
if self.bg_impl.inner.read().metadata.free_blocks_count() == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// The slow path
|
||||
self.bg_impl.inner.write().metadata.alloc_block()
|
||||
self.bg_impl.inner.write().metadata.alloc_blocks(count)
|
||||
}
|
||||
|
||||
/// Frees the allocated block idx.
|
||||
/// Frees the consecutive range of allocated block indices.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// If `block_idx` has not been allocated before, then the method panics.
|
||||
pub fn free_block(&self, block_idx: u32) {
|
||||
let mut inner = self.bg_impl.inner.write();
|
||||
assert!(inner.metadata.is_block_allocated(block_idx));
|
||||
/// If the `range` is out of bounds, this method will panic.
|
||||
/// If one of the `idx` in `range` has not been allocated before, then the method panics.
|
||||
pub fn free_blocks(&self, range: Range<Ext2Bid>) {
|
||||
if range.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
inner.metadata.free_block(block_idx);
|
||||
let mut inner = self.bg_impl.inner.write();
|
||||
for idx in range.clone() {
|
||||
assert!(inner.metadata.is_block_allocated(idx));
|
||||
}
|
||||
inner.metadata.free_blocks(range);
|
||||
}
|
||||
|
||||
/// Writes back the raw inode metadata to the raw inode metadata cache.
|
||||
@ -206,7 +218,7 @@ impl BlockGroup {
|
||||
}
|
||||
|
||||
/// Writes back the metadata of this group.
|
||||
pub fn sync_metadata(&self, super_block: &SuperBlock) -> Result<()> {
|
||||
pub fn sync_metadata(&self) -> Result<()> {
|
||||
if !self.bg_impl.inner.read().metadata.is_dirty() {
|
||||
return Ok(());
|
||||
}
|
||||
@ -219,14 +231,14 @@ impl BlockGroup {
|
||||
|
||||
let mut bio_waiter = BioWaiter::new();
|
||||
// Writes back the inode bitmap.
|
||||
let inode_bitmap_bid = inner.metadata.descriptor.inode_bitmap_bid;
|
||||
let inode_bitmap_bid = Bid::new(inner.metadata.descriptor.inode_bitmap_bid as u64);
|
||||
bio_waiter.concat(fs.block_device().write_bytes_async(
|
||||
inode_bitmap_bid.to_offset(),
|
||||
inner.metadata.inode_bitmap.as_bytes(),
|
||||
)?);
|
||||
|
||||
// Writes back the block bitmap.
|
||||
let block_bitmap_bid = inner.metadata.descriptor.block_bitmap_bid;
|
||||
let block_bitmap_bid = Bid::new(inner.metadata.descriptor.block_bitmap_bid as u64);
|
||||
bio_waiter.concat(fs.block_device().write_bytes_async(
|
||||
block_bitmap_bid.to_offset(),
|
||||
inner.metadata.block_bitmap.as_bytes(),
|
||||
@ -307,12 +319,12 @@ impl Debug for BlockGroup {
|
||||
|
||||
impl PageCacheBackend for BlockGroupImpl {
|
||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let bid = self.inode_table_bid + idx as u64;
|
||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||
self.fs.upgrade().unwrap().read_block_async(bid, frame)
|
||||
}
|
||||
|
||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let bid = self.inode_table_bid + idx as u64;
|
||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||
self.fs.upgrade().unwrap().write_block_async(bid, frame)
|
||||
}
|
||||
|
||||
@ -356,19 +368,28 @@ impl GroupMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_block_allocated(&self, block_idx: u32) -> bool {
|
||||
pub fn is_block_allocated(&self, block_idx: Ext2Bid) -> bool {
|
||||
self.block_bitmap.is_allocated(block_idx as usize)
|
||||
}
|
||||
|
||||
pub fn alloc_block(&mut self) -> Option<u32> {
|
||||
let block_idx = self.block_bitmap.alloc()?;
|
||||
self.dec_free_blocks();
|
||||
Some(block_idx as u32)
|
||||
pub fn alloc_blocks(&mut self, count: Ext2Bid) -> Option<Range<Ext2Bid>> {
|
||||
let mut current_count = count.min(self.free_blocks_count() as Ext2Bid) as usize;
|
||||
while current_count > 0 {
|
||||
let Some(range) = self.block_bitmap.alloc_consecutive(current_count) else {
|
||||
// It is efficient to halve the value
|
||||
current_count /= 2;
|
||||
continue;
|
||||
};
|
||||
self.dec_free_blocks(current_count as u16);
|
||||
return Some((range.start as Ext2Bid)..(range.end as Ext2Bid));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn free_block(&mut self, block_idx: u32) {
|
||||
self.block_bitmap.free(block_idx as usize);
|
||||
self.inc_free_blocks();
|
||||
pub fn free_blocks(&mut self, range: Range<Ext2Bid>) {
|
||||
self.block_bitmap
|
||||
.free_consecutive((range.start as usize)..(range.end as usize));
|
||||
self.inc_free_blocks(range.len() as u16);
|
||||
}
|
||||
|
||||
pub fn free_inodes_count(&self) -> u16 {
|
||||
@ -388,13 +409,20 @@ impl GroupMetadata {
|
||||
self.descriptor.free_inodes_count -= 1;
|
||||
}
|
||||
|
||||
pub fn inc_free_blocks(&mut self) {
|
||||
self.descriptor.free_blocks_count += 1;
|
||||
pub fn inc_free_blocks(&mut self, count: u16) {
|
||||
self.descriptor.free_blocks_count = self
|
||||
.descriptor
|
||||
.free_blocks_count
|
||||
.checked_add(count)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn dec_free_blocks(&mut self) {
|
||||
debug_assert!(self.descriptor.free_blocks_count > 0);
|
||||
self.descriptor.free_blocks_count -= 1;
|
||||
pub fn dec_free_blocks(&mut self, count: u16) {
|
||||
self.descriptor.free_blocks_count = self
|
||||
.descriptor
|
||||
.free_blocks_count
|
||||
.checked_sub(count)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub fn inc_dirs(&mut self) {
|
||||
@ -414,11 +442,11 @@ impl GroupMetadata {
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct GroupDescriptor {
|
||||
/// Blocks usage bitmap block
|
||||
block_bitmap_bid: Bid,
|
||||
block_bitmap_bid: Ext2Bid,
|
||||
/// Inodes usage bitmap block
|
||||
inode_bitmap_bid: Bid,
|
||||
inode_bitmap_bid: Ext2Bid,
|
||||
/// Starting block of inode table
|
||||
inode_table_bid: Bid,
|
||||
inode_table_bid: Ext2Bid,
|
||||
/// Number of free blocks in group
|
||||
free_blocks_count: u16,
|
||||
/// Number of free inodes in group
|
||||
@ -430,9 +458,9 @@ struct GroupDescriptor {
|
||||
impl From<RawGroupDescriptor> for GroupDescriptor {
|
||||
fn from(desc: RawGroupDescriptor) -> Self {
|
||||
Self {
|
||||
block_bitmap_bid: Bid::new(desc.block_bitmap as _),
|
||||
inode_bitmap_bid: Bid::new(desc.inode_bitmap as _),
|
||||
inode_table_bid: Bid::new(desc.inode_table as _),
|
||||
block_bitmap_bid: desc.block_bitmap,
|
||||
inode_bitmap_bid: desc.inode_bitmap,
|
||||
inode_table_bid: desc.inode_table,
|
||||
free_blocks_count: desc.free_blocks_count,
|
||||
free_inodes_count: desc.free_inodes_count,
|
||||
dirs_count: desc.dirs_count,
|
||||
@ -461,9 +489,9 @@ pub(super) struct RawGroupDescriptor {
|
||||
impl From<&GroupDescriptor> for RawGroupDescriptor {
|
||||
fn from(desc: &GroupDescriptor) -> Self {
|
||||
Self {
|
||||
block_bitmap: desc.block_bitmap_bid.to_raw() as _,
|
||||
inode_bitmap: desc.inode_bitmap_bid.to_raw() as _,
|
||||
inode_table: desc.inode_table_bid.to_raw() as _,
|
||||
block_bitmap: desc.block_bitmap_bid,
|
||||
inode_bitmap: desc.inode_bitmap_bid,
|
||||
inode_table: desc.inode_table_bid,
|
||||
free_blocks_count: desc.free_blocks_count,
|
||||
free_inodes_count: desc.free_inodes_count,
|
||||
dirs_count: desc.dirs_count,
|
||||
|
179
kernel/aster-nix/src/fs/ext2/block_ptr.rs
Normal file
179
kernel/aster-nix/src/fs/ext2/block_ptr.rs
Normal file
@ -0,0 +1,179 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use super::prelude::*;
|
||||
|
||||
pub type Ext2Bid = u32;
|
||||
|
||||
/// The pointers to blocks for an inode.
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Default, Debug, Pod)]
|
||||
pub struct BlockPtrs {
|
||||
inner: [Ext2Bid; MAX_BLOCK_PTRS],
|
||||
}
|
||||
|
||||
impl BlockPtrs {
|
||||
/// Returns the direct block ID.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// If the `idx` is out of bounds, this method will panic.
|
||||
pub fn direct(&self, idx: usize) -> Ext2Bid {
|
||||
assert!(DIRECT_RANGE.contains(&idx));
|
||||
self.inner[idx]
|
||||
}
|
||||
|
||||
/// Sets the direct block ID.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// If the `idx` is out of bounds, this method will panic.
|
||||
pub fn set_direct(&mut self, idx: usize, bid: Ext2Bid) {
|
||||
assert!(DIRECT_RANGE.contains(&idx));
|
||||
self.inner[idx] = bid;
|
||||
}
|
||||
|
||||
/// Returns the block ID of single indirect block pointer.
|
||||
pub fn indirect(&self) -> Ext2Bid {
|
||||
self.inner[INDIRECT]
|
||||
}
|
||||
|
||||
/// Sets the block ID of single indirect block pointer.
|
||||
pub fn set_indirect(&mut self, bid: Ext2Bid) {
|
||||
self.inner[INDIRECT] = bid;
|
||||
}
|
||||
|
||||
/// Returns the block ID of double indirect block pointer.
|
||||
pub fn db_indirect(&self) -> Ext2Bid {
|
||||
self.inner[DB_INDIRECT]
|
||||
}
|
||||
|
||||
/// Sets the block ID of double indirect block pointer.
|
||||
pub fn set_db_indirect(&mut self, bid: Ext2Bid) {
|
||||
self.inner[DB_INDIRECT] = bid;
|
||||
}
|
||||
|
||||
/// Returns the block ID of treble indirect block pointer.
|
||||
pub fn tb_indirect(&self) -> Ext2Bid {
|
||||
self.inner[TB_INDIRECT]
|
||||
}
|
||||
|
||||
/// Sets the block ID of treble indirect block pointer.
|
||||
pub fn set_tb_indirect(&mut self, bid: Ext2Bid) {
|
||||
self.inner[TB_INDIRECT] = bid;
|
||||
}
|
||||
|
||||
/// Views it as a slice of `u8` bytes.
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
self.inner.as_bytes()
|
||||
}
|
||||
|
||||
/// Views it as a mutable slice of `u8` bytes.
|
||||
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
|
||||
self.inner.as_bytes_mut()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the various ways in which a block ID can be located in Ext2.
|
||||
/// It is an enum with different variants corresponding to the level of indirection
|
||||
/// used to locate the block.
|
||||
///
|
||||
/// We choose `u16` because it is reasonably large to represent the index.
|
||||
#[derive(Debug)]
|
||||
pub enum BidPath {
|
||||
/// Direct reference to a block. The block can be accessed directly through the given
|
||||
/// index with no levels of indirection.
|
||||
Direct(u16),
|
||||
/// Single level of indirection. The block ID can be found at the specified index
|
||||
/// within an indirect block.
|
||||
Indirect(u16),
|
||||
/// Double level of indirection. The first item is the index of the first-level
|
||||
/// indirect block, and the second item is the index within the second-level
|
||||
/// indirect block where the block ID can be found.
|
||||
DbIndirect(u16, u16),
|
||||
/// Treble level of indirection. The three values represent the index within the
|
||||
/// first-level, second-level, and third-level indirect blocks, respectively.
|
||||
/// The block ID can be found at the third-level indirect block.
|
||||
TbIndirect(u16, u16, u16),
|
||||
}
|
||||
|
||||
impl From<Ext2Bid> for BidPath {
|
||||
fn from(bid: Ext2Bid) -> Self {
|
||||
if bid < MAX_DIRECT_BLOCKS {
|
||||
Self::Direct(bid as u16)
|
||||
} else if bid < MAX_DIRECT_BLOCKS + MAX_INDIRECT_BLOCKS {
|
||||
let indirect_bid = bid - MAX_DIRECT_BLOCKS;
|
||||
Self::Indirect(indirect_bid as u16)
|
||||
} else if bid < MAX_DIRECT_BLOCKS + MAX_INDIRECT_BLOCKS + MAX_DB_INDIRECT_BLOCKS {
|
||||
let db_indirect_bid = bid - (MAX_DIRECT_BLOCKS + MAX_INDIRECT_BLOCKS);
|
||||
let lvl1_idx = (db_indirect_bid / MAX_INDIRECT_BLOCKS) as u16;
|
||||
let lvl2_idx = (db_indirect_bid % MAX_INDIRECT_BLOCKS) as u16;
|
||||
Self::DbIndirect(lvl1_idx, lvl2_idx)
|
||||
} else if bid
|
||||
< MAX_DIRECT_BLOCKS
|
||||
+ MAX_INDIRECT_BLOCKS
|
||||
+ MAX_DB_INDIRECT_BLOCKS
|
||||
+ MAX_TB_INDIRECT_BLOCKS
|
||||
{
|
||||
let tb_indirect_bid =
|
||||
bid - (MAX_DIRECT_BLOCKS + MAX_INDIRECT_BLOCKS + MAX_DB_INDIRECT_BLOCKS);
|
||||
let lvl1_idx = (tb_indirect_bid / MAX_DB_INDIRECT_BLOCKS) as u16;
|
||||
let lvl2_idx = ((tb_indirect_bid / MAX_INDIRECT_BLOCKS) % MAX_INDIRECT_BLOCKS) as u16;
|
||||
let lvl3_idx = (tb_indirect_bid % MAX_INDIRECT_BLOCKS) as u16;
|
||||
Self::TbIndirect(lvl1_idx, lvl2_idx, lvl3_idx)
|
||||
} else {
|
||||
// The bid value in Ext2 must not surpass the representation of BidPath.
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BidPath {
|
||||
/// Returns the number of blocks remaining before the next indirect block is required.
|
||||
pub fn cnt_to_next_indirect(&self) -> Ext2Bid {
|
||||
match self {
|
||||
Self::Direct(idx) => MAX_DIRECT_BLOCKS - (*idx as Ext2Bid),
|
||||
Self::Indirect(idx) | Self::DbIndirect(_, idx) | Self::TbIndirect(_, _, idx) => {
|
||||
MAX_INDIRECT_BLOCKS - (*idx as Ext2Bid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the last level index.
|
||||
///
|
||||
/// This index corresponds to the position of a block within the most deeply nested
|
||||
/// indirect block (if any), or the direct block index if no indirection is involved.
|
||||
pub fn last_lvl_idx(&self) -> usize {
|
||||
match self {
|
||||
Self::Direct(idx)
|
||||
| Self::Indirect(idx)
|
||||
| Self::DbIndirect(_, idx)
|
||||
| Self::TbIndirect(_, _, idx) => *idx as _,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Direct pointers to blocks.
|
||||
pub const DIRECT_RANGE: core::ops::Range<usize> = 0..12;
|
||||
/// The number of direct blocks.
|
||||
pub const MAX_DIRECT_BLOCKS: Ext2Bid = DIRECT_RANGE.end as Ext2Bid;
|
||||
|
||||
/// Indirect pointer to blocks.
|
||||
pub const INDIRECT: usize = DIRECT_RANGE.end;
|
||||
/// The number of indirect blocks.
|
||||
pub const MAX_INDIRECT_BLOCKS: Ext2Bid = (BLOCK_SIZE / BID_SIZE) as Ext2Bid;
|
||||
|
||||
/// Doubly indirect pointer to blocks.
|
||||
pub const DB_INDIRECT: usize = INDIRECT + 1;
|
||||
/// The number of doubly indirect blocks.
|
||||
pub const MAX_DB_INDIRECT_BLOCKS: Ext2Bid = MAX_INDIRECT_BLOCKS * MAX_INDIRECT_BLOCKS;
|
||||
|
||||
/// Treble indirect pointer to blocks.
|
||||
pub const TB_INDIRECT: usize = DB_INDIRECT + 1;
|
||||
/// The number of trebly indirect blocks.
|
||||
pub const MAX_TB_INDIRECT_BLOCKS: Ext2Bid = MAX_INDIRECT_BLOCKS * MAX_DB_INDIRECT_BLOCKS;
|
||||
|
||||
/// The number of block pointers.
|
||||
pub const MAX_BLOCK_PTRS: usize = TB_INDIRECT + 1;
|
||||
|
||||
/// The size of of the block id.
|
||||
pub const BID_SIZE: usize = core::mem::size_of::<Ext2Bid>();
|
@ -2,6 +2,7 @@
|
||||
|
||||
use super::{
|
||||
block_group::{BlockGroup, RawGroupDescriptor},
|
||||
block_ptr::Ext2Bid,
|
||||
inode::{FilePerm, FileType, Inode, InodeDesc, RawInode},
|
||||
prelude::*,
|
||||
super_block::{RawSuperBlock, SuperBlock, SUPER_BLOCK_OFFSET},
|
||||
@ -17,7 +18,7 @@ pub struct Ext2 {
|
||||
super_block: RwMutex<Dirty<SuperBlock>>,
|
||||
block_groups: Vec<BlockGroup>,
|
||||
inodes_per_group: u32,
|
||||
blocks_per_group: u32,
|
||||
blocks_per_group: Ext2Bid,
|
||||
inode_size: usize,
|
||||
block_size: usize,
|
||||
group_descriptors_segment: VmSegment,
|
||||
@ -112,7 +113,7 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Returns the number of blocks in each block group.
|
||||
pub fn blocks_per_group(&self) -> u32 {
|
||||
pub fn blocks_per_group(&self) -> Ext2Bid {
|
||||
self.blocks_per_group
|
||||
}
|
||||
|
||||
@ -208,46 +209,93 @@ impl Ext2 {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allocates a new block.
|
||||
/// Allocates a consecutive range of blocks.
|
||||
///
|
||||
/// Attempts to allocate from the `block_group_idx` group first.
|
||||
/// The returned allocated range size may be smaller than the requested `count` if
|
||||
/// insufficient consecutive blocks are available.
|
||||
///
|
||||
/// Attempts to allocate blocks from the `block_group_idx` group first.
|
||||
/// If allocation is not possible from this group, then search the remaining groups.
|
||||
pub(super) fn alloc_block(&self, block_group_idx: usize) -> Result<Bid> {
|
||||
let mut block_group_idx = block_group_idx;
|
||||
if block_group_idx >= self.block_groups.len() {
|
||||
return_errno_with_message!(Errno::EINVAL, "invalid block group idx");
|
||||
pub(super) fn alloc_blocks(
|
||||
&self,
|
||||
mut block_group_idx: usize,
|
||||
count: Ext2Bid,
|
||||
) -> Option<Range<Ext2Bid>> {
|
||||
if count > self.super_block.read().free_blocks_count() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut remaining_count = count;
|
||||
let mut allocated_range: Option<Range<Ext2Bid>> = None;
|
||||
for _ in 0..self.block_groups.len() {
|
||||
if remaining_count == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
if block_group_idx >= self.block_groups.len() {
|
||||
block_group_idx = 0;
|
||||
}
|
||||
let block_group = &self.block_groups[block_group_idx];
|
||||
if let Some(block_idx) = block_group.alloc_block() {
|
||||
let bid = block_group_idx as u32 * self.blocks_per_group + block_idx;
|
||||
self.super_block.write().dec_free_blocks();
|
||||
return Ok(Bid::new(bid as _));
|
||||
if let Some(range_in_group) = block_group.alloc_blocks(remaining_count) {
|
||||
let device_range = {
|
||||
let start =
|
||||
(block_group_idx as Ext2Bid) * self.blocks_per_group + range_in_group.start;
|
||||
start..start + (range_in_group.len() as Ext2Bid)
|
||||
};
|
||||
match allocated_range {
|
||||
Some(ref mut range) => {
|
||||
if range.end == device_range.start {
|
||||
// Accumulate consecutive bids
|
||||
range.end = device_range.end;
|
||||
remaining_count -= range_in_group.len() as Ext2Bid;
|
||||
} else {
|
||||
block_group.free_blocks(range_in_group);
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
allocated_range = Some(device_range);
|
||||
}
|
||||
}
|
||||
}
|
||||
block_group_idx += 1;
|
||||
}
|
||||
|
||||
return_errno_with_message!(Errno::ENOSPC, "no space on device");
|
||||
if let Some(range) = allocated_range.as_ref() {
|
||||
self.super_block
|
||||
.write()
|
||||
.dec_free_blocks(range.len() as Ext2Bid);
|
||||
}
|
||||
allocated_range
|
||||
}
|
||||
|
||||
/// Frees a block.
|
||||
pub(super) fn free_block(&self, bid: Bid) -> Result<()> {
|
||||
let (_, block_group) = self.block_group_of_bid(bid)?;
|
||||
let block_idx = self.block_idx(bid);
|
||||
// In order to prevent value underflow, it is necessary to increment
|
||||
// the free block counter prior to freeing the block.
|
||||
self.super_block.write().inc_free_blocks();
|
||||
block_group.free_block(block_idx);
|
||||
/// Frees a range of blocks.
|
||||
pub(super) fn free_blocks(&self, range: Range<Ext2Bid>) -> Result<()> {
|
||||
let mut current_range = range.clone();
|
||||
while !current_range.is_empty() {
|
||||
let (_, block_group) = self.block_group_of_bid(current_range.start)?;
|
||||
let range_in_group = {
|
||||
let start = self.block_idx(current_range.start);
|
||||
let len = (current_range.len() as Ext2Bid).min(self.blocks_per_group - start);
|
||||
start..start + len
|
||||
};
|
||||
// In order to prevent value underflow, it is necessary to increment
|
||||
// the free block counter prior to freeing the block.
|
||||
self.super_block
|
||||
.write()
|
||||
.inc_free_blocks(range_in_group.len() as Ext2Bid);
|
||||
block_group.free_blocks(range_in_group.clone());
|
||||
current_range.start += range_in_group.len() as Ext2Bid
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads contiguous blocks starting from the `bid` synchronously.
|
||||
pub(super) fn read_blocks(&self, bid: Bid, segment: &VmSegment) -> Result<()> {
|
||||
let status = self.block_device.read_blocks_sync(bid, segment)?;
|
||||
pub(super) fn read_blocks(&self, bid: Ext2Bid, segment: &VmSegment) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.read_blocks_sync(Bid::new(bid as u64), segment)?;
|
||||
match status {
|
||||
BioStatus::Complete => Ok(()),
|
||||
err_status => Err(Error::from(err_status)),
|
||||
@ -255,8 +303,10 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Reads one block indicated by the `bid` synchronously.
|
||||
pub(super) fn read_block(&self, bid: Bid, frame: &VmFrame) -> Result<()> {
|
||||
let status = self.block_device.read_block_sync(bid, frame)?;
|
||||
pub(super) fn read_block(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.read_block_sync(Bid::new(bid as u64), frame)?;
|
||||
match status {
|
||||
BioStatus::Complete => Ok(()),
|
||||
err_status => Err(Error::from(err_status)),
|
||||
@ -264,14 +314,16 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Reads one block indicated by the `bid` asynchronously.
|
||||
pub(super) fn read_block_async(&self, bid: Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let waiter = self.block_device.read_block(bid, frame)?;
|
||||
pub(super) fn read_block_async(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let waiter = self.block_device.read_block(Bid::new(bid as u64), frame)?;
|
||||
Ok(waiter)
|
||||
}
|
||||
|
||||
/// Writes contiguous blocks starting from the `bid` synchronously.
|
||||
pub(super) fn write_blocks(&self, bid: Bid, segment: &VmSegment) -> Result<()> {
|
||||
let status = self.block_device.write_blocks_sync(bid, segment)?;
|
||||
pub(super) fn write_blocks(&self, bid: Ext2Bid, segment: &VmSegment) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.write_blocks_sync(Bid::new(bid as u64), segment)?;
|
||||
match status {
|
||||
BioStatus::Complete => Ok(()),
|
||||
err_status => Err(Error::from(err_status)),
|
||||
@ -279,8 +331,10 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Writes one block indicated by the `bid` synchronously.
|
||||
pub(super) fn write_block(&self, bid: Bid, frame: &VmFrame) -> Result<()> {
|
||||
let status = self.block_device.write_block_sync(bid, frame)?;
|
||||
pub(super) fn write_block(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<()> {
|
||||
let status = self
|
||||
.block_device
|
||||
.write_block_sync(Bid::new(bid as u64), frame)?;
|
||||
match status {
|
||||
BioStatus::Complete => Ok(()),
|
||||
err_status => Err(Error::from(err_status)),
|
||||
@ -288,8 +342,8 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
/// Writes one block indicated by the `bid` asynchronously.
|
||||
pub(super) fn write_block_async(&self, bid: Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let waiter = self.block_device.write_block(bid, frame)?;
|
||||
pub(super) fn write_block_async(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let waiter = self.block_device.write_block(Bid::new(bid as u64), frame)?;
|
||||
Ok(waiter)
|
||||
}
|
||||
|
||||
@ -303,7 +357,7 @@ impl Ext2 {
|
||||
let mut super_block = self.super_block.write();
|
||||
// Writes back the metadata of block groups
|
||||
for block_group in &self.block_groups {
|
||||
block_group.sync_metadata(&super_block)?;
|
||||
block_group.sync_metadata()?;
|
||||
}
|
||||
|
||||
let mut bio_waiter = BioWaiter::new();
|
||||
@ -353,10 +407,10 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn block_group_of_bid(&self, bid: Bid) -> Result<(usize, &BlockGroup)> {
|
||||
let block_group_idx = (bid.to_raw() / (self.blocks_per_group as u64)) as usize;
|
||||
fn block_group_of_bid(&self, bid: Ext2Bid) -> Result<(usize, &BlockGroup)> {
|
||||
let block_group_idx = (bid / self.blocks_per_group) as usize;
|
||||
if block_group_idx >= self.block_groups.len() {
|
||||
return_errno!(Errno::ENOENT);
|
||||
return_errno_with_message!(Errno::EINVAL, "invalid bid");
|
||||
}
|
||||
Ok((block_group_idx, &self.block_groups[block_group_idx]))
|
||||
}
|
||||
@ -376,7 +430,7 @@ impl Ext2 {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn block_idx(&self, bid: Bid) -> u32 {
|
||||
(bid.to_raw() as u32) % self.blocks_per_group
|
||||
fn block_idx(&self, bid: Ext2Bid) -> Ext2Bid {
|
||||
bid % self.blocks_per_group
|
||||
}
|
||||
}
|
||||
|
@ -36,10 +36,10 @@ impl From<RwMutexReadGuard<'_, Dirty<Ext2SuperBlock>>> for SuperBlock {
|
||||
magic: EXT2_MAGIC as _,
|
||||
bsize: ext2_sb.block_size(),
|
||||
blocks: ext2_sb.total_blocks() as _,
|
||||
bfree: ext2_sb.free_blocks() as _,
|
||||
bavail: ext2_sb.free_blocks() as _,
|
||||
bfree: ext2_sb.free_blocks_count() as _,
|
||||
bavail: ext2_sb.free_blocks_count() as _,
|
||||
files: ext2_sb.total_inodes() as _,
|
||||
ffree: ext2_sb.free_inodes() as _,
|
||||
ffree: ext2_sb.free_inodes_count() as _,
|
||||
fsid: 0, // TODO
|
||||
namelen: NAME_MAX,
|
||||
frsize: ext2_sb.fragment_size(),
|
||||
|
192
kernel/aster-nix/src/fs/ext2/indirect_block_cache.rs
Normal file
192
kernel/aster-nix/src/fs/ext2/indirect_block_cache.rs
Normal file
@ -0,0 +1,192 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use lru::LruCache;
|
||||
|
||||
use super::{block_ptr::BID_SIZE, fs::Ext2, prelude::*};
|
||||
|
||||
/// `IndirectBlockCache` is a caching structure that stores `IndirectBlock` objects for Ext2.
|
||||
///
|
||||
/// This cache uses an `LruCache` to manage the indirect blocks, ensuring that frequently accessed
|
||||
/// blocks remain in memory for quick retrieval, while less used blocks can be evicted to make room
|
||||
/// for new blocks.
|
||||
#[derive(Debug)]
|
||||
pub struct IndirectBlockCache {
|
||||
cache: LruCache<u32, IndirectBlock>,
|
||||
fs: Weak<Ext2>,
|
||||
}
|
||||
|
||||
impl IndirectBlockCache {
|
||||
/// The upper bound on the size of the cache.
|
||||
///
|
||||
/// Use the same value as `BH_LRU_SIZE`.
|
||||
const MAX_SIZE: usize = 16;
|
||||
|
||||
/// Creates a new cache.
|
||||
pub fn new(fs: Weak<Ext2>) -> Self {
|
||||
Self {
|
||||
cache: LruCache::unbounded(),
|
||||
fs,
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves a reference to an `IndirectBlock` by its `bid`.
|
||||
///
|
||||
/// If the block is not present in the cache, it will be loaded from the disk.
|
||||
pub fn find(&mut self, bid: u32) -> Result<&IndirectBlock> {
|
||||
self.try_shrink()?;
|
||||
|
||||
let fs = self.fs();
|
||||
let load_block = || -> Result<IndirectBlock> {
|
||||
let mut block = IndirectBlock::alloc_uninit()?;
|
||||
fs.read_block(bid, &block.frame)?;
|
||||
block.state = State::UpToDate;
|
||||
Ok(block)
|
||||
};
|
||||
|
||||
self.cache.try_get_or_insert(bid, load_block)
|
||||
}
|
||||
|
||||
/// Retrieves a mutable reference to an `IndirectBlock` by its `bid`.
|
||||
///
|
||||
/// If the block is not present in the cache, it will be loaded from the disk.
|
||||
pub fn find_mut(&mut self, bid: u32) -> Result<&mut IndirectBlock> {
|
||||
self.try_shrink()?;
|
||||
|
||||
let fs = self.fs();
|
||||
let load_block = || -> Result<IndirectBlock> {
|
||||
let mut block = IndirectBlock::alloc_uninit()?;
|
||||
fs.read_block(bid, &block.frame)?;
|
||||
block.state = State::UpToDate;
|
||||
Ok(block)
|
||||
};
|
||||
|
||||
self.cache.try_get_or_insert_mut(bid, load_block)
|
||||
}
|
||||
|
||||
/// Inserts or updates an `IndirectBlock` in the cache with the specified `bid`.
|
||||
pub fn insert(&mut self, bid: u32, block: IndirectBlock) -> Result<()> {
|
||||
self.try_shrink()?;
|
||||
self.cache.put(bid, block);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes and returns the `IndirectBlock` corresponding to the `bid`
|
||||
/// from the cache or `None` if does not exist.
|
||||
pub fn remove(&mut self, bid: u32) -> Option<IndirectBlock> {
|
||||
self.cache.pop(&bid)
|
||||
}
|
||||
|
||||
/// Evicts all blocks from the cache, persisting any with a 'Dirty' state to the disk.
|
||||
pub fn evict_all(&mut self) -> Result<()> {
|
||||
let mut bio_waiter = BioWaiter::new();
|
||||
loop {
|
||||
let Some((bid, block)) = self.cache.pop_lru() else {
|
||||
break;
|
||||
};
|
||||
|
||||
if block.is_dirty() {
|
||||
bio_waiter.concat(
|
||||
self.fs()
|
||||
.block_device()
|
||||
.write_block(Bid::new(bid as _), &block.frame)?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
bio_waiter.wait().ok_or_else(|| {
|
||||
Error::with_message(Errno::EIO, "failed to evict_all the indirect blocks")
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to shrink the cache size if it exceeds the maximum allowed cache size.
|
||||
fn try_shrink(&mut self) -> Result<()> {
|
||||
if self.cache.len() < Self::MAX_SIZE {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut bio_waiter = BioWaiter::new();
|
||||
for _ in 0..(Self::MAX_SIZE / 2) {
|
||||
let (bid, block) = self.cache.pop_lru().unwrap();
|
||||
if block.is_dirty() {
|
||||
bio_waiter.concat(
|
||||
self.fs()
|
||||
.block_device()
|
||||
.write_block(Bid::new(bid as _), &block.frame)?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
bio_waiter.wait().ok_or_else(|| {
|
||||
Error::with_message(Errno::EIO, "failed to write back the indirect block")
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn fs(&self) -> Arc<Ext2> {
|
||||
self.fs.upgrade().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a single indirect block buffer cached by the `IndirectCache`.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IndirectBlock {
|
||||
frame: VmFrame,
|
||||
state: State,
|
||||
}
|
||||
|
||||
impl IndirectBlock {
|
||||
/// Allocates an uninitialized block whose bytes are to be populated with
|
||||
/// data loaded from the disk.
|
||||
fn alloc_uninit() -> Result<Self> {
|
||||
let frame = VmAllocOptions::new(1).uninit(true).alloc_single()?;
|
||||
Ok(Self {
|
||||
frame,
|
||||
state: State::Uninit,
|
||||
})
|
||||
}
|
||||
|
||||
/// Allocates a new block with its bytes initialized to zero.
|
||||
pub fn alloc() -> Result<Self> {
|
||||
let frame = VmAllocOptions::new(1).alloc_single()?;
|
||||
Ok(Self {
|
||||
frame,
|
||||
state: State::Dirty,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns `true` if it is in dirty state.
|
||||
pub fn is_dirty(&self) -> bool {
|
||||
self.state == State::Dirty
|
||||
}
|
||||
|
||||
/// Reads a bid at a specified `idx`.
|
||||
pub fn read_bid(&self, idx: usize) -> Result<u32> {
|
||||
assert!(self.state != State::Uninit);
|
||||
let bid: u32 = self.frame.read_val(idx * BID_SIZE)?;
|
||||
Ok(bid)
|
||||
}
|
||||
|
||||
/// Writes a value of bid at a specified `idx`.
|
||||
///
|
||||
/// After a successful write operation, the block's state will be marked as dirty.
|
||||
pub fn write_bid(&mut self, idx: usize, bid: &u32) -> Result<()> {
|
||||
assert!(self.state != State::Uninit);
|
||||
self.frame.write_val(idx * BID_SIZE, bid)?;
|
||||
self.state = State::Dirty;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
enum State {
|
||||
/// Indicates a new allocated block which content has not been initialized.
|
||||
Uninit,
|
||||
/// Indicates a block which content is consistent with corresponding disk content.
|
||||
UpToDate,
|
||||
/// indicates a block which content has been updated and not written back to underlying disk.
|
||||
Dirty,
|
||||
}
|
@ -1,35 +1,21 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use core::cmp::Ordering;
|
||||
|
||||
use inherit_methods_macro::inherit_methods;
|
||||
|
||||
use super::{
|
||||
block_ptr::{BidPath, BlockPtrs, Ext2Bid, BID_SIZE, MAX_BLOCK_PTRS},
|
||||
blocks_hole::BlocksHoleDesc,
|
||||
dir::{DirEntry, DirEntryReader, DirEntryWriter},
|
||||
fs::Ext2,
|
||||
indirect_block_cache::{IndirectBlock, IndirectBlockCache},
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
mod field {
|
||||
pub type Field = core::ops::Range<usize>;
|
||||
|
||||
/// Direct pointer to blocks.
|
||||
pub const DIRECT: Field = 0..12;
|
||||
/// Indirect pointer to blocks.
|
||||
pub const INDIRECT: Field = 12..13;
|
||||
/// Doubly indirect pointer to blocks.
|
||||
pub const DB_INDIRECT: Field = 13..14;
|
||||
/// Trebly indirect pointer to blocks.
|
||||
pub const TB_INDIRECT: Field = 14..15;
|
||||
}
|
||||
|
||||
/// The number of block pointers.
|
||||
pub const BLOCK_PTR_CNT: usize = field::TB_INDIRECT.end;
|
||||
/// Max length of file name.
|
||||
pub const MAX_FNAME_LEN: usize = 255;
|
||||
|
||||
/// Max path length of the fast symlink.
|
||||
pub const FAST_SYMLINK_MAX_LEN: usize = BLOCK_PTR_CNT * core::mem::size_of::<u32>();
|
||||
pub const MAX_FAST_SYMLINK_LEN: usize = MAX_BLOCK_PTRS * BID_SIZE;
|
||||
|
||||
/// The Ext2 inode.
|
||||
pub struct Inode {
|
||||
@ -49,7 +35,7 @@ impl Inode {
|
||||
Arc::new_cyclic(|weak_self| Self {
|
||||
ino,
|
||||
block_group_idx,
|
||||
inner: RwMutex::new(Inner::new(desc, weak_self.clone())),
|
||||
inner: RwMutex::new(Inner::new(desc, weak_self.clone(), fs.clone())),
|
||||
fs,
|
||||
})
|
||||
}
|
||||
@ -567,7 +553,7 @@ impl Inode {
|
||||
pub fn gid(&self) -> u32;
|
||||
pub fn file_flags(&self) -> FileFlags;
|
||||
pub fn hard_links(&self) -> u16;
|
||||
pub fn blocks_count(&self) -> u32;
|
||||
pub fn blocks_count(&self) -> Ext2Bid;
|
||||
pub fn acl(&self) -> Option<Bid>;
|
||||
pub fn atime(&self) -> Duration;
|
||||
pub fn mtime(&self) -> Duration;
|
||||
@ -613,7 +599,7 @@ impl Inner {
|
||||
pub fn hard_links(&self) -> u16;
|
||||
pub fn inc_hard_links(&mut self);
|
||||
pub fn dec_hard_links(&mut self);
|
||||
pub fn blocks_count(&self) -> u32;
|
||||
pub fn blocks_count(&self) -> Ext2Bid;
|
||||
pub fn acl(&self) -> Option<Bid>;
|
||||
pub fn atime(&self) -> Duration;
|
||||
pub fn set_atime(&mut self, time: Duration);
|
||||
@ -626,9 +612,9 @@ impl Inner {
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
pub fn new(desc: Dirty<InodeDesc>, weak_self: Weak<Inode>) -> Self {
|
||||
pub fn new(desc: Dirty<InodeDesc>, weak_self: Weak<Inode>, fs: Weak<Ext2>) -> Self {
|
||||
let num_page_bytes = desc.num_page_bytes();
|
||||
let inode_impl = InodeImpl::new(desc, weak_self);
|
||||
let inode_impl = InodeImpl::new(desc, weak_self, fs);
|
||||
Self {
|
||||
page_cache: PageCache::with_capacity(num_page_bytes, Arc::downgrade(&inode_impl) as _)
|
||||
.unwrap(),
|
||||
@ -670,7 +656,8 @@ impl Inner {
|
||||
let mut buf_offset = 0;
|
||||
for bid in Bid::from_offset(offset)..Bid::from_offset(offset + read_len) {
|
||||
let frame = VmAllocOptions::new(1).uninit(true).alloc_single().unwrap();
|
||||
self.inode_impl.read_block_sync(bid, &frame)?;
|
||||
self.inode_impl
|
||||
.read_block_sync(bid.to_raw() as Ext2Bid, &frame)?;
|
||||
frame.read_bytes(0, &mut buf[buf_offset..buf_offset + BLOCK_SIZE])?;
|
||||
buf_offset += BLOCK_SIZE;
|
||||
}
|
||||
@ -710,7 +697,8 @@ impl Inner {
|
||||
frame.write_bytes(0, &buf[buf_offset..buf_offset + BLOCK_SIZE])?;
|
||||
frame
|
||||
};
|
||||
self.inode_impl.write_block_sync(bid, &frame)?;
|
||||
self.inode_impl
|
||||
.write_block_sync(bid.to_raw() as Ext2Bid, &frame)?;
|
||||
buf_offset += BLOCK_SIZE;
|
||||
}
|
||||
|
||||
@ -718,7 +706,7 @@ impl Inner {
|
||||
}
|
||||
|
||||
pub fn write_link(&mut self, target: &str) -> Result<()> {
|
||||
if target.len() <= FAST_SYMLINK_MAX_LEN {
|
||||
if target.len() <= MAX_FAST_SYMLINK_LEN {
|
||||
return self.inode_impl.write_link(target);
|
||||
}
|
||||
|
||||
@ -733,7 +721,7 @@ impl Inner {
|
||||
|
||||
pub fn read_link(&self) -> Result<String> {
|
||||
let file_size = self.inode_impl.file_size();
|
||||
if file_size <= FAST_SYMLINK_MAX_LEN {
|
||||
if file_size <= MAX_FAST_SYMLINK_LEN {
|
||||
return self.inode_impl.read_link();
|
||||
}
|
||||
|
||||
@ -825,17 +813,21 @@ struct InodeImpl(RwMutex<InodeImpl_>);
|
||||
|
||||
struct InodeImpl_ {
|
||||
desc: Dirty<InodeDesc>,
|
||||
blocks_hole_desc: BlocksHoleDesc,
|
||||
blocks_hole_desc: RwLock<BlocksHoleDesc>,
|
||||
indirect_blocks: RwMutex<IndirectBlockCache>,
|
||||
is_freed: bool,
|
||||
last_alloc_device_bid: Option<Ext2Bid>,
|
||||
weak_self: Weak<Inode>,
|
||||
}
|
||||
|
||||
impl InodeImpl_ {
|
||||
pub fn new(desc: Dirty<InodeDesc>, weak_self: Weak<Inode>) -> Self {
|
||||
pub fn new(desc: Dirty<InodeDesc>, weak_self: Weak<Inode>, fs: Weak<Ext2>) -> Self {
|
||||
Self {
|
||||
blocks_hole_desc: BlocksHoleDesc::new(desc.blocks_count() as usize),
|
||||
blocks_hole_desc: RwLock::new(BlocksHoleDesc::new(desc.blocks_count() as usize)),
|
||||
desc,
|
||||
indirect_blocks: RwMutex::new(IndirectBlockCache::new(fs)),
|
||||
is_freed: false,
|
||||
last_alloc_device_bid: None,
|
||||
weak_self,
|
||||
}
|
||||
}
|
||||
@ -844,40 +836,45 @@ impl InodeImpl_ {
|
||||
self.weak_self.upgrade().unwrap()
|
||||
}
|
||||
|
||||
pub fn read_block_async(&self, bid: Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
let bid = bid.to_raw() as u32;
|
||||
pub fn fs(&self) -> Arc<Ext2> {
|
||||
self.inode().fs()
|
||||
}
|
||||
|
||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
if bid >= self.desc.blocks_count() {
|
||||
return_errno!(Errno::EINVAL);
|
||||
}
|
||||
|
||||
debug_assert!(field::DIRECT.contains(&(bid as usize)));
|
||||
if self.blocks_hole_desc.is_hole(bid as usize) {
|
||||
if self.blocks_hole_desc.read().is_hole(bid as usize) {
|
||||
block.writer().fill(0);
|
||||
return Ok(BioWaiter::new());
|
||||
}
|
||||
let device_bid = Bid::new(self.desc.data[bid as usize] as _);
|
||||
self.inode().fs().read_block_async(device_bid, block)
|
||||
|
||||
let device_range = DeviceRangeReader::new(self, bid..bid + 1)?.read()?;
|
||||
self.fs().read_block_async(device_range.start, block)
|
||||
}
|
||||
|
||||
pub fn read_block_sync(&self, bid: Bid, block: &VmFrame) -> Result<()> {
|
||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
match self.read_block_async(bid, block)?.wait() {
|
||||
Some(BioStatus::Complete) => Ok(()),
|
||||
_ => return_errno!(Errno::EIO),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_block_async(&self, bid: Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
let bid = bid.to_raw() as u32;
|
||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
if bid >= self.desc.blocks_count() {
|
||||
return_errno!(Errno::EINVAL);
|
||||
}
|
||||
|
||||
debug_assert!(field::DIRECT.contains(&(bid as usize)));
|
||||
let device_bid = Bid::new(self.desc.data[bid as usize] as _);
|
||||
self.inode().fs().write_block_async(device_bid, block)
|
||||
let device_range = DeviceRangeReader::new(self, bid..bid + 1)?.read()?;
|
||||
let waiter = self.fs().write_block_async(device_range.start, block)?;
|
||||
|
||||
// FIXME: Unset the block hole in the callback function of bio.
|
||||
self.blocks_hole_desc.write().unset(bid as usize);
|
||||
Ok(waiter)
|
||||
}
|
||||
|
||||
pub fn write_block_sync(&self, bid: Bid, block: &VmFrame) -> Result<()> {
|
||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
match self.write_block_async(bid, block)?.wait() {
|
||||
Some(BioStatus::Complete) => Ok(()),
|
||||
_ => return_errno!(Errno::EIO),
|
||||
@ -885,48 +882,560 @@ impl InodeImpl_ {
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, new_size: usize) -> Result<()> {
|
||||
let new_blocks = if self.desc.type_ == FileType::Symlink && new_size <= FAST_SYMLINK_MAX_LEN
|
||||
{
|
||||
0
|
||||
let old_size = self.desc.size;
|
||||
if new_size > old_size {
|
||||
self.expand(new_size)?;
|
||||
} else {
|
||||
new_size.div_ceil(BLOCK_SIZE) as u32
|
||||
};
|
||||
self.shrink(new_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Expands inode size.
|
||||
///
|
||||
/// After a successful expansion, the size will be enlarged to `new_size`,
|
||||
/// which may result in an increased block count.
|
||||
fn expand(&mut self, new_size: usize) -> Result<()> {
|
||||
let new_blocks = self.desc.size_to_blocks(new_size);
|
||||
let old_blocks = self.desc.blocks_count();
|
||||
|
||||
match new_blocks.cmp(&old_blocks) {
|
||||
Ordering::Greater => {
|
||||
// Allocate blocks
|
||||
for file_bid in old_blocks..new_blocks {
|
||||
debug_assert!(field::DIRECT.contains(&(file_bid as usize)));
|
||||
let device_bid = self
|
||||
.inode()
|
||||
.fs()
|
||||
.alloc_block(self.inode().block_group_idx)?;
|
||||
self.desc.data[file_bid as usize] = device_bid.to_raw() as u32;
|
||||
}
|
||||
self.desc.blocks_count = new_blocks;
|
||||
// Expands block count if necessary
|
||||
if new_blocks > old_blocks {
|
||||
if new_blocks - old_blocks > self.fs().super_block().free_blocks_count() {
|
||||
return_errno_with_message!(Errno::ENOSPC, "not enough free blocks");
|
||||
}
|
||||
Ordering::Equal => (),
|
||||
Ordering::Less => {
|
||||
// Free blocks
|
||||
for file_bid in new_blocks..old_blocks {
|
||||
debug_assert!(field::DIRECT.contains(&(file_bid as usize)));
|
||||
let device_bid = Bid::new(self.desc.data[file_bid as usize] as _);
|
||||
self.inode().fs().free_block(device_bid)?;
|
||||
self.expand_blocks(old_blocks..new_blocks)?;
|
||||
self.blocks_hole_desc.write().resize(new_blocks as usize);
|
||||
}
|
||||
|
||||
// Expands the size
|
||||
self.desc.size = new_size;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Expands inode blocks.
|
||||
///
|
||||
/// After a successful expansion, the block count will be enlarged to `range.end`.
|
||||
fn expand_blocks(&mut self, range: Range<Ext2Bid>) -> Result<()> {
|
||||
let mut current_range = range.clone();
|
||||
while !current_range.is_empty() {
|
||||
let Ok(expand_cnt) = self.try_expand_blocks(current_range.clone()) else {
|
||||
self.shrink_blocks(range.start..current_range.start);
|
||||
return_errno_with_message!(Errno::ENOSPC, "can not allocate blocks");
|
||||
};
|
||||
current_range.start += expand_cnt;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to expand a range of blocks and returns the number of consecutive
|
||||
/// blocks successfully allocated.
|
||||
///
|
||||
/// Note that the returned number may be less than the requested range if there
|
||||
/// isn't enough consecutive space available or if there is a necessity to allocate
|
||||
/// indirect blocks.
|
||||
fn try_expand_blocks(&mut self, range: Range<Ext2Bid>) -> Result<Ext2Bid> {
|
||||
// Calculates the maximum number of consecutive blocks that can be allocated in
|
||||
// this round, as well as the number of additional indirect blocks required for
|
||||
// the allocation.
|
||||
let (max_cnt, indirect_cnt) = {
|
||||
let bid_path = BidPath::from(range.start);
|
||||
let max_cnt = (range.len() as Ext2Bid).min(bid_path.cnt_to_next_indirect());
|
||||
let indirect_cnt = match bid_path {
|
||||
BidPath::Direct(_) => 0,
|
||||
BidPath::Indirect(0) => 1,
|
||||
BidPath::Indirect(_) => 0,
|
||||
BidPath::DbIndirect(0, 0) => 2,
|
||||
BidPath::DbIndirect(_, 0) => 1,
|
||||
BidPath::DbIndirect(_, _) => 0,
|
||||
BidPath::TbIndirect(0, 0, 0) => 3,
|
||||
BidPath::TbIndirect(_, 0, 0) => 2,
|
||||
BidPath::TbIndirect(_, _, 0) => 1,
|
||||
BidPath::TbIndirect(_, _, _) => 0,
|
||||
};
|
||||
(max_cnt, indirect_cnt)
|
||||
};
|
||||
|
||||
// Calculates the block_group_idx to advise the filesystem on which group
|
||||
// to prioritize for allocation.
|
||||
let block_group_idx = self
|
||||
.last_alloc_device_bid
|
||||
.map_or(self.inode().block_group_idx, |id| {
|
||||
((id + 1) / self.fs().blocks_per_group()) as usize
|
||||
});
|
||||
|
||||
// Allocates the blocks only, no indirect blocks are required.
|
||||
if indirect_cnt == 0 {
|
||||
let device_range = self
|
||||
.fs()
|
||||
.alloc_blocks(block_group_idx, max_cnt)
|
||||
.ok_or_else(|| Error::new(Errno::ENOSPC))?;
|
||||
if let Err(e) = self.set_device_range(range.start, device_range.clone()) {
|
||||
self.fs().free_blocks(device_range).unwrap();
|
||||
return Err(e);
|
||||
}
|
||||
self.desc.blocks_count = range.start + device_range.len() as Ext2Bid;
|
||||
self.last_alloc_device_bid = Some(device_range.end - 1);
|
||||
return Ok(device_range.len() as Ext2Bid);
|
||||
}
|
||||
|
||||
// Allocates the required additional indirect blocks and at least one block.
|
||||
let (indirect_bids, device_range) = {
|
||||
let mut indirect_bids: Vec<Ext2Bid> = Vec::with_capacity(indirect_cnt as usize);
|
||||
let mut total_cnt = max_cnt + indirect_cnt;
|
||||
let mut device_range: Option<Range<Ext2Bid>> = None;
|
||||
while device_range.is_none() {
|
||||
let Some(mut range) = self.fs().alloc_blocks(block_group_idx, total_cnt) else {
|
||||
for indirect_bid in indirect_bids.iter() {
|
||||
self.fs()
|
||||
.free_blocks(*indirect_bid..*indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
return_errno!(Errno::ENOSPC);
|
||||
};
|
||||
total_cnt -= range.len() as Ext2Bid;
|
||||
|
||||
// Stores the bids for indirect blocks.
|
||||
while (indirect_bids.len() as Ext2Bid) < indirect_cnt && !range.is_empty() {
|
||||
indirect_bids.push(range.start);
|
||||
range.start += 1;
|
||||
}
|
||||
self.desc.blocks_count = new_blocks;
|
||||
|
||||
if !range.is_empty() {
|
||||
device_range = Some(range);
|
||||
}
|
||||
}
|
||||
|
||||
(indirect_bids, device_range.unwrap())
|
||||
};
|
||||
|
||||
if let Err(e) = self.set_indirect_bids(range.start, &indirect_bids) {
|
||||
self.free_indirect_blocks_required_by(range.start).unwrap();
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
if let Err(e) = self.set_device_range(range.start, device_range.clone()) {
|
||||
self.fs().free_blocks(device_range).unwrap();
|
||||
self.free_indirect_blocks_required_by(range.start).unwrap();
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
self.desc.blocks_count = range.start + device_range.len() as Ext2Bid;
|
||||
self.last_alloc_device_bid = Some(device_range.end - 1);
|
||||
Ok(device_range.len() as Ext2Bid)
|
||||
}
|
||||
|
||||
/// Sets the device block IDs for a specified range.
|
||||
///
|
||||
/// It updates the mapping between the file's block IDs and the device's block IDs
|
||||
/// starting from `start_bid`. It maps each block ID in the file to the corresponding
|
||||
/// block ID on the device based on the provided `device_range`.
|
||||
fn set_device_range(&mut self, start_bid: Ext2Bid, device_range: Range<Ext2Bid>) -> Result<()> {
|
||||
match BidPath::from(start_bid) {
|
||||
BidPath::Direct(idx) => {
|
||||
for (i, bid) in device_range.enumerate() {
|
||||
self.desc.block_ptrs.set_direct(idx as usize + i, bid);
|
||||
}
|
||||
}
|
||||
BidPath::Indirect(idx) => {
|
||||
let indirect_bid = self.desc.block_ptrs.indirect();
|
||||
assert!(indirect_bid != 0);
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let indirect_block = indirect_blocks.find_mut(indirect_bid)?;
|
||||
for (i, bid) in device_range.enumerate() {
|
||||
indirect_block.write_bid(idx as usize + i, &bid)?;
|
||||
}
|
||||
}
|
||||
BidPath::DbIndirect(lvl1_idx, lvl2_idx) => {
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let lvl1_indirect_bid = {
|
||||
let db_indirect_bid = self.desc.block_ptrs.db_indirect();
|
||||
assert!(db_indirect_bid != 0);
|
||||
let db_indirect_block = indirect_blocks.find(db_indirect_bid)?;
|
||||
db_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
assert!(lvl1_indirect_bid != 0);
|
||||
|
||||
let lvl1_indirect_block = indirect_blocks.find_mut(lvl1_indirect_bid)?;
|
||||
for (i, bid) in device_range.enumerate() {
|
||||
lvl1_indirect_block.write_bid(lvl2_idx as usize + i, &bid)?;
|
||||
}
|
||||
}
|
||||
BidPath::TbIndirect(lvl1_idx, lvl2_idx, lvl3_idx) => {
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let lvl2_indirect_bid = {
|
||||
let lvl1_indirect_bid = {
|
||||
let tb_indirect_bid = self.desc.block_ptrs.tb_indirect();
|
||||
assert!(tb_indirect_bid != 0);
|
||||
let tb_indirect_block = indirect_blocks.find(tb_indirect_bid)?;
|
||||
tb_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
assert!(lvl1_indirect_bid != 0);
|
||||
let lvl1_indirect_block = indirect_blocks.find(lvl1_indirect_bid)?;
|
||||
lvl1_indirect_block.read_bid(lvl2_idx as usize)?
|
||||
};
|
||||
assert!(lvl2_indirect_bid != 0);
|
||||
|
||||
let lvl2_indirect_block = indirect_blocks.find_mut(lvl2_indirect_bid)?;
|
||||
for (i, bid) in device_range.enumerate() {
|
||||
lvl2_indirect_block.write_bid(lvl3_idx as usize + i, &bid)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the device block IDs for indirect blocks required by a specific block ID.
|
||||
///
|
||||
/// It assigns a sequence of block IDs (`indirect_bids`) on the device to be used
|
||||
/// as indirect blocks for a given file block ID (`bid`).
|
||||
fn set_indirect_bids(&mut self, bid: Ext2Bid, indirect_bids: &[Ext2Bid]) -> Result<()> {
|
||||
assert!((1..=3).contains(&indirect_bids.len()));
|
||||
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let bid_path = BidPath::from(bid);
|
||||
for indirect_bid in indirect_bids.iter() {
|
||||
let indirect_block = IndirectBlock::alloc()?;
|
||||
indirect_blocks.insert(*indirect_bid, indirect_block)?;
|
||||
|
||||
match bid_path {
|
||||
BidPath::Indirect(idx) => {
|
||||
assert_eq!(idx, 0);
|
||||
self.desc.block_ptrs.set_indirect(*indirect_bid);
|
||||
}
|
||||
BidPath::DbIndirect(lvl1_idx, lvl2_idx) => {
|
||||
assert_eq!(lvl2_idx, 0);
|
||||
if self.desc.block_ptrs.db_indirect() == 0 {
|
||||
self.desc.block_ptrs.set_db_indirect(*indirect_bid);
|
||||
} else {
|
||||
let db_indirect_block =
|
||||
indirect_blocks.find_mut(self.desc.block_ptrs.db_indirect())?;
|
||||
db_indirect_block.write_bid(lvl1_idx as usize, indirect_bid)?;
|
||||
}
|
||||
}
|
||||
BidPath::TbIndirect(lvl1_idx, lvl2_idx, lvl3_idx) => {
|
||||
assert_eq!(lvl3_idx, 0);
|
||||
if self.desc.block_ptrs.tb_indirect() == 0 {
|
||||
self.desc.block_ptrs.set_tb_indirect(*indirect_bid);
|
||||
} else {
|
||||
let lvl1_indirect_bid = {
|
||||
let tb_indirect_block =
|
||||
indirect_blocks.find(self.desc.block_ptrs.tb_indirect())?;
|
||||
tb_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
|
||||
if lvl1_indirect_bid == 0 {
|
||||
let tb_indirect_block =
|
||||
indirect_blocks.find_mut(self.desc.block_ptrs.tb_indirect())?;
|
||||
tb_indirect_block.write_bid(lvl1_idx as usize, indirect_bid)?;
|
||||
} else {
|
||||
let lvl1_indirect_block =
|
||||
indirect_blocks.find_mut(lvl1_indirect_bid)?;
|
||||
lvl1_indirect_block.write_bid(lvl2_idx as usize, indirect_bid)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
BidPath::Direct(_) => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Shrinks inode size.
|
||||
///
|
||||
/// After the reduction, the size will be shrinked to `new_size`,
|
||||
/// which may result in an decreased block count.
|
||||
fn shrink(&mut self, new_size: usize) {
|
||||
let new_blocks = self.desc.size_to_blocks(new_size);
|
||||
let old_blocks = self.desc.blocks_count();
|
||||
|
||||
// Shrinks block count if necessary
|
||||
if new_blocks < old_blocks {
|
||||
self.shrink_blocks(new_blocks..old_blocks);
|
||||
self.blocks_hole_desc.write().resize(new_blocks as usize);
|
||||
}
|
||||
|
||||
// Shrinks the size
|
||||
self.desc.size = new_size;
|
||||
self.blocks_hole_desc.resize(new_blocks as usize);
|
||||
}
|
||||
|
||||
/// Shrinks inode blocks.
|
||||
///
|
||||
/// After the reduction, the block count will be decreased to `range.start`.
|
||||
fn shrink_blocks(&mut self, range: Range<Ext2Bid>) {
|
||||
let mut current_range = range.clone();
|
||||
while !current_range.is_empty() {
|
||||
let free_cnt = self.try_shrink_blocks(current_range.clone());
|
||||
current_range.end -= free_cnt;
|
||||
}
|
||||
|
||||
self.desc.blocks_count = range.start;
|
||||
self.last_alloc_device_bid = if range.start == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
DeviceRangeReader::new(self, (range.start - 1)..range.start)
|
||||
.unwrap()
|
||||
.read()
|
||||
.unwrap()
|
||||
.start,
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
/// Attempts to shrink a range of blocks and returns the number of blocks
|
||||
/// successfully freed.
|
||||
///
|
||||
/// Note that the returned number may be less than the requested range if needs
|
||||
/// to free the indirect blocks that are no longer required.
|
||||
fn try_shrink_blocks(&mut self, range: Range<Ext2Bid>) -> Ext2Bid {
|
||||
// Calculates the maximum range of blocks that can be freed in this round.
|
||||
let range = {
|
||||
let max_cnt = (range.len() as Ext2Bid)
|
||||
.min(BidPath::from(range.end - 1).last_lvl_idx() as Ext2Bid + 1);
|
||||
(range.end - max_cnt)..range.end
|
||||
};
|
||||
|
||||
let fs = self.fs();
|
||||
let device_range_reader = DeviceRangeReader::new(self, range.clone()).unwrap();
|
||||
for device_range in device_range_reader {
|
||||
fs.free_blocks(device_range.clone()).unwrap();
|
||||
}
|
||||
|
||||
self.free_indirect_blocks_required_by(range.start).unwrap();
|
||||
range.len() as Ext2Bid
|
||||
}
|
||||
|
||||
/// Frees the indirect blocks required by the specified block ID.
|
||||
///
|
||||
/// It ensures that the indirect blocks that are required by the block ID
|
||||
/// are properly released.
|
||||
fn free_indirect_blocks_required_by(&mut self, bid: Ext2Bid) -> Result<()> {
|
||||
let bid_path = BidPath::from(bid);
|
||||
if bid_path.last_lvl_idx() != 0 {
|
||||
return Ok(());
|
||||
}
|
||||
if bid == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match bid_path {
|
||||
BidPath::Indirect(_) => {
|
||||
let indirect_bid = self.desc.block_ptrs.indirect();
|
||||
if indirect_bid == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.desc.block_ptrs.set_indirect(0);
|
||||
self.indirect_blocks.write().remove(indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(indirect_bid..indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
BidPath::DbIndirect(lvl1_idx, _) => {
|
||||
let db_indirect_bid = self.desc.block_ptrs.db_indirect();
|
||||
if db_indirect_bid == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let lvl1_indirect_bid = {
|
||||
let db_indirect_block = indirect_blocks.find(db_indirect_bid)?;
|
||||
db_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
if lvl1_indirect_bid != 0 {
|
||||
indirect_blocks.remove(lvl1_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
if lvl1_idx == 0 {
|
||||
self.desc.block_ptrs.set_db_indirect(0);
|
||||
indirect_blocks.remove(db_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(db_indirect_bid..db_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
BidPath::TbIndirect(lvl1_idx, lvl2_idx, _) => {
|
||||
let tb_indirect_bid = self.desc.block_ptrs.tb_indirect();
|
||||
if tb_indirect_bid == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let lvl1_indirect_bid = {
|
||||
let tb_indirect_block = indirect_blocks.find(tb_indirect_bid)?;
|
||||
tb_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
if lvl1_indirect_bid != 0 {
|
||||
let lvl2_indirect_bid = {
|
||||
let lvl1_indirect_block = indirect_blocks.find(lvl1_indirect_bid)?;
|
||||
lvl1_indirect_block.read_bid(lvl2_idx as usize)?
|
||||
};
|
||||
if lvl2_indirect_bid != 0 {
|
||||
indirect_blocks.remove(lvl2_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(lvl2_indirect_bid..lvl2_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
if lvl2_idx == 0 {
|
||||
indirect_blocks.remove(lvl1_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
if lvl2_idx == 0 && lvl1_idx == 0 {
|
||||
self.desc.block_ptrs.set_tb_indirect(0);
|
||||
indirect_blocks.remove(tb_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(tb_indirect_bid..tb_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
BidPath::Direct(_) => panic!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A reader to get the corresponding device block IDs for a specified range.
|
||||
///
|
||||
/// It calculates and returns the range of block IDs on the device that would map to
|
||||
/// the file's block range. This is useful for translating file-level block addresses
|
||||
/// to their locations on the physical storage device.
|
||||
struct DeviceRangeReader<'a> {
|
||||
inode: &'a InodeImpl_,
|
||||
indirect_blocks: RwMutexWriteGuard<'a, IndirectBlockCache>,
|
||||
range: Range<Ext2Bid>,
|
||||
indirect_block: Option<IndirectBlock>,
|
||||
}
|
||||
|
||||
impl<'a> DeviceRangeReader<'a> {
|
||||
/// Creates a new reader.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// If the 'range' is empty, this method will panic.
|
||||
pub fn new(inode: &'a InodeImpl_, range: Range<Ext2Bid>) -> Result<Self> {
|
||||
assert!(!range.is_empty());
|
||||
|
||||
let mut reader = Self {
|
||||
indirect_blocks: inode.indirect_blocks.write(),
|
||||
inode,
|
||||
range,
|
||||
indirect_block: None,
|
||||
};
|
||||
reader.update_indirect_block()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
/// Reads the corresponding device block IDs for a specified range.
|
||||
///
|
||||
/// Note that the returned device range size may be smaller than the requested range
|
||||
/// due to possible inconsecutive block allocation.
|
||||
pub fn read(&mut self) -> Result<Range<Ext2Bid>> {
|
||||
let bid_path = BidPath::from(self.range.start);
|
||||
let max_cnt = self
|
||||
.range
|
||||
.len()
|
||||
.min(bid_path.cnt_to_next_indirect() as usize);
|
||||
let start_idx = bid_path.last_lvl_idx();
|
||||
|
||||
// Reads the device block ID range
|
||||
let mut device_range: Option<Range<Ext2Bid>> = None;
|
||||
for i in start_idx..start_idx + max_cnt {
|
||||
let device_bid = match &self.indirect_block {
|
||||
None => self.inode.desc.block_ptrs.direct(i),
|
||||
Some(indirect_block) => indirect_block.read_bid(i)?,
|
||||
};
|
||||
match device_range {
|
||||
Some(ref mut range) => {
|
||||
if device_bid == range.end {
|
||||
range.end += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
device_range = Some(device_bid..device_bid + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
let device_range = device_range.unwrap();
|
||||
|
||||
// Updates the range
|
||||
self.range.start += device_range.len() as Ext2Bid;
|
||||
if device_range.len() == max_cnt {
|
||||
// Updates the indirect block
|
||||
self.update_indirect_block()?;
|
||||
}
|
||||
|
||||
Ok(device_range)
|
||||
}
|
||||
|
||||
fn update_indirect_block(&mut self) -> Result<()> {
|
||||
let bid_path = BidPath::from(self.range.start);
|
||||
match bid_path {
|
||||
BidPath::Direct(_) => {
|
||||
self.indirect_block = None;
|
||||
}
|
||||
BidPath::Indirect(_) => {
|
||||
let indirect_bid = self.inode.desc.block_ptrs.indirect();
|
||||
let indirect_block = self.indirect_blocks.find(indirect_bid)?;
|
||||
self.indirect_block = Some(indirect_block.clone());
|
||||
}
|
||||
BidPath::DbIndirect(lvl1_idx, _) => {
|
||||
let lvl1_indirect_bid = {
|
||||
let db_indirect_block = self
|
||||
.indirect_blocks
|
||||
.find(self.inode.desc.block_ptrs.db_indirect())?;
|
||||
db_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
let lvl1_indirect_block = self.indirect_blocks.find(lvl1_indirect_bid)?;
|
||||
self.indirect_block = Some(lvl1_indirect_block.clone())
|
||||
}
|
||||
BidPath::TbIndirect(lvl1_idx, lvl2_idx, _) => {
|
||||
let lvl2_indirect_bid = {
|
||||
let lvl1_indirect_bid = {
|
||||
let tb_indirect_block = self
|
||||
.indirect_blocks
|
||||
.find(self.inode.desc.block_ptrs.tb_indirect())?;
|
||||
tb_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
let lvl1_indirect_block = self.indirect_blocks.find(lvl1_indirect_bid)?;
|
||||
lvl1_indirect_block.read_bid(lvl2_idx as usize)?
|
||||
};
|
||||
let lvl2_indirect_block = self.indirect_blocks.find(lvl2_indirect_bid)?;
|
||||
self.indirect_block = Some(lvl2_indirect_block.clone())
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for DeviceRangeReader<'a> {
|
||||
type Item = Range<Ext2Bid>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.range.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let range = self.read().unwrap();
|
||||
Some(range)
|
||||
}
|
||||
}
|
||||
|
||||
impl InodeImpl {
|
||||
pub fn new(desc: Dirty<InodeDesc>, weak_self: Weak<Inode>) -> Arc<Self> {
|
||||
let inner = InodeImpl_::new(desc, weak_self);
|
||||
pub fn new(desc: Dirty<InodeDesc>, weak_self: Weak<Inode>, fs: Weak<Ext2>) -> Arc<Self> {
|
||||
let inner = InodeImpl_::new(desc, weak_self, fs);
|
||||
Arc::new(Self(RwMutex::new(inner)))
|
||||
}
|
||||
|
||||
@ -988,7 +1497,7 @@ impl InodeImpl {
|
||||
inner.desc.hard_links -= 1;
|
||||
}
|
||||
|
||||
pub fn blocks_count(&self) -> u32 {
|
||||
pub fn blocks_count(&self) -> Ext2Bid {
|
||||
self.0.read().desc.blocks_count()
|
||||
}
|
||||
|
||||
@ -1018,53 +1527,38 @@ impl InodeImpl {
|
||||
self.0.read().desc.ctime
|
||||
}
|
||||
|
||||
pub fn read_block_sync(&self, bid: Bid, block: &VmFrame) -> Result<()> {
|
||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
self.0.read().read_block_sync(bid, block)
|
||||
}
|
||||
|
||||
pub fn read_block_async(&self, bid: Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
self.0.read().read_block_async(bid, block)
|
||||
}
|
||||
|
||||
pub fn write_block_sync(&self, bid: Bid, block: &VmFrame) -> Result<()> {
|
||||
let waiter = self.write_block_async(bid, block)?;
|
||||
match waiter.wait() {
|
||||
Some(BioStatus::Complete) => Ok(()),
|
||||
_ => return_errno!(Errno::EIO),
|
||||
}
|
||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
||||
self.0.read().write_block_sync(bid, block)
|
||||
}
|
||||
|
||||
pub fn write_block_async(&self, bid: Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
let inner = self.0.read();
|
||||
let waiter = inner.write_block_async(bid, block)?;
|
||||
|
||||
let bid = bid.to_raw() as usize;
|
||||
if inner.blocks_hole_desc.is_hole(bid) {
|
||||
drop(inner);
|
||||
let mut inner = self.0.write();
|
||||
if bid < inner.blocks_hole_desc.size() && inner.blocks_hole_desc.is_hole(bid) {
|
||||
inner.blocks_hole_desc.unset(bid);
|
||||
}
|
||||
}
|
||||
Ok(waiter)
|
||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
||||
self.0.read().write_block_async(bid, block)
|
||||
}
|
||||
|
||||
pub fn set_device_id(&self, device_id: u64) {
|
||||
self.0.write().desc.data.as_bytes_mut()[..core::mem::size_of::<u64>()]
|
||||
self.0.write().desc.block_ptrs.as_bytes_mut()[..core::mem::size_of::<u64>()]
|
||||
.copy_from_slice(device_id.as_bytes());
|
||||
}
|
||||
|
||||
pub fn device_id(&self) -> u64 {
|
||||
let mut device_id: u64 = 0;
|
||||
device_id
|
||||
.as_bytes_mut()
|
||||
.copy_from_slice(&self.0.read().desc.data.as_bytes()[..core::mem::size_of::<u64>()]);
|
||||
device_id.as_bytes_mut().copy_from_slice(
|
||||
&self.0.read().desc.block_ptrs.as_bytes()[..core::mem::size_of::<u64>()],
|
||||
);
|
||||
device_id
|
||||
}
|
||||
|
||||
pub fn write_link(&self, target: &str) -> Result<()> {
|
||||
let mut inner = self.0.write();
|
||||
inner.desc.data.as_bytes_mut()[..target.len()].copy_from_slice(target.as_bytes());
|
||||
inner.desc.block_ptrs.as_bytes_mut()[..target.len()].copy_from_slice(target.as_bytes());
|
||||
if inner.desc.size != target.len() {
|
||||
inner.resize(target.len())?;
|
||||
}
|
||||
@ -1074,17 +1568,17 @@ impl InodeImpl {
|
||||
pub fn read_link(&self) -> Result<String> {
|
||||
let inner = self.0.read();
|
||||
let mut symlink = vec![0u8; inner.desc.size];
|
||||
symlink.copy_from_slice(&inner.desc.data.as_bytes()[..inner.desc.size]);
|
||||
symlink.copy_from_slice(&inner.desc.block_ptrs.as_bytes()[..inner.desc.size]);
|
||||
Ok(String::from_utf8(symlink)?)
|
||||
}
|
||||
|
||||
pub fn sync_data_holes(&self) -> Result<()> {
|
||||
let mut inner = self.0.write();
|
||||
let inner = self.0.read();
|
||||
let zero_frame = VmAllocOptions::new(1).alloc_single().unwrap();
|
||||
for bid in 0..inner.desc.blocks_count() {
|
||||
if inner.blocks_hole_desc.is_hole(bid as usize) {
|
||||
inner.write_block_sync(Bid::new(bid as _), &zero_frame)?;
|
||||
inner.blocks_hole_desc.unset(bid as usize);
|
||||
let is_data_hole = inner.blocks_hole_desc.read().is_hole(bid as usize);
|
||||
if is_data_hole {
|
||||
inner.write_block_sync(bid, &zero_frame)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -1112,6 +1606,7 @@ impl InodeImpl {
|
||||
}
|
||||
}
|
||||
|
||||
inner.indirect_blocks.write().evict_all()?;
|
||||
inode.fs().sync_inode(inode.ino(), &inner.desc)?;
|
||||
inner.desc.clear_dirty();
|
||||
Ok(())
|
||||
@ -1120,12 +1615,12 @@ impl InodeImpl {
|
||||
|
||||
impl PageCacheBackend for InodeImpl {
|
||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let bid = Bid::new(idx as _);
|
||||
let bid = idx as Ext2Bid;
|
||||
self.read_block_async(bid, frame)
|
||||
}
|
||||
|
||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
||||
let bid = Bid::new(idx as _);
|
||||
let bid = idx as Ext2Bid;
|
||||
self.write_block_async(bid, frame)
|
||||
}
|
||||
|
||||
@ -1164,11 +1659,11 @@ pub(super) struct InodeDesc {
|
||||
/// Hard links count.
|
||||
hard_links: u16,
|
||||
/// Number of blocks.
|
||||
blocks_count: u32,
|
||||
blocks_count: Ext2Bid,
|
||||
/// File flags.
|
||||
flags: FileFlags,
|
||||
/// Pointers to blocks.
|
||||
data: [u32; BLOCK_PTR_CNT],
|
||||
block_ptrs: BlockPtrs,
|
||||
/// File or directory acl block.
|
||||
acl: Option<Bid>,
|
||||
}
|
||||
@ -1196,7 +1691,7 @@ impl TryFrom<RawInode> for InodeDesc {
|
||||
blocks_count: inode.blocks_count,
|
||||
flags: FileFlags::from_bits(inode.flags)
|
||||
.ok_or(Error::with_message(Errno::EINVAL, "invalid file flags"))?,
|
||||
data: inode.data,
|
||||
block_ptrs: inode.block_ptrs,
|
||||
acl: match file_type {
|
||||
FileType::File => Some(Bid::new(inode.file_acl as _)),
|
||||
FileType::Dir => Some(Bid::new(inode.size_high as _)),
|
||||
@ -1221,7 +1716,7 @@ impl InodeDesc {
|
||||
hard_links: 1,
|
||||
blocks_count: 0,
|
||||
flags: FileFlags::empty(),
|
||||
data: [0; BLOCK_PTR_CNT],
|
||||
block_ptrs: BlockPtrs::default(),
|
||||
acl: match type_ {
|
||||
FileType::File | FileType::Dir => Some(Bid::new(0)),
|
||||
_ => None,
|
||||
@ -1233,13 +1728,21 @@ impl InodeDesc {
|
||||
(self.blocks_count() as usize) * BLOCK_SIZE
|
||||
}
|
||||
|
||||
pub fn blocks_count(&self) -> u32 {
|
||||
if self.type_ == FileType::Dir {
|
||||
let real_blocks = (self.size / BLOCK_SIZE) as u32;
|
||||
assert!(real_blocks <= self.blocks_count);
|
||||
return real_blocks;
|
||||
/// Returns the actual number of blocks utilized.
|
||||
///
|
||||
/// Ext2 allows the `block_count` to exceed the actual number of blocks utilized.
|
||||
pub fn blocks_count(&self) -> Ext2Bid {
|
||||
let blocks = self.size_to_blocks(self.size);
|
||||
assert!(blocks <= self.blocks_count);
|
||||
blocks
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_to_blocks(&self, size: usize) -> Ext2Bid {
|
||||
if self.type_ == FileType::Symlink && size <= MAX_FAST_SYMLINK_LEN {
|
||||
return 0;
|
||||
}
|
||||
self.blocks_count
|
||||
size.div_ceil(BLOCK_SIZE) as Ext2Bid
|
||||
}
|
||||
}
|
||||
|
||||
@ -1378,7 +1881,8 @@ pub(super) struct RawInode {
|
||||
pub flags: u32,
|
||||
/// OS dependent Value 1.
|
||||
reserved1: u32,
|
||||
pub data: [u32; BLOCK_PTR_CNT],
|
||||
/// Pointers to blocks.
|
||||
pub block_ptrs: BlockPtrs,
|
||||
/// File version (for NFS).
|
||||
pub generation: u32,
|
||||
/// In revision 0, this field is reserved.
|
||||
@ -1408,7 +1912,7 @@ impl From<&InodeDesc> for RawInode {
|
||||
hard_links: inode.hard_links,
|
||||
blocks_count: inode.blocks_count,
|
||||
flags: inode.flags.bits(),
|
||||
data: inode.data,
|
||||
block_ptrs: inode.block_ptrs,
|
||||
file_acl: match inode.acl {
|
||||
Some(acl) if inode.type_ == FileType::File => acl.to_raw() as u32,
|
||||
_ => Default::default(),
|
||||
|
@ -33,19 +33,20 @@
|
||||
//! # Limitation
|
||||
//!
|
||||
//! Here we summarizes the features that need to be implemented in the future.
|
||||
//! 1. Supports large file.
|
||||
//! 2. Supports merging small read/write operations.
|
||||
//! 3. Handles the intermediate failure status correctly.
|
||||
//! 1. Supports merging small read/write operations.
|
||||
//! 2. Handles the intermediate failure status correctly.
|
||||
|
||||
pub use fs::Ext2;
|
||||
pub use inode::{FilePerm, FileType, Inode};
|
||||
pub use super_block::{SuperBlock, MAGIC_NUM};
|
||||
|
||||
mod block_group;
|
||||
mod block_ptr;
|
||||
mod blocks_hole;
|
||||
mod dir;
|
||||
mod fs;
|
||||
mod impl_for_vfs;
|
||||
mod indirect_block_cache;
|
||||
mod inode;
|
||||
mod prelude;
|
||||
mod super_block;
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
pub(super) use core::{
|
||||
ops::{Deref, DerefMut},
|
||||
ops::{Deref, DerefMut, Range},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
@ -12,7 +12,7 @@ pub(super) use aster_block::{
|
||||
BlockDevice, BLOCK_SIZE,
|
||||
};
|
||||
pub(super) use aster_frame::{
|
||||
sync::{RwMutex, RwMutexReadGuard},
|
||||
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
||||
vm::{VmAllocOptions, VmFrame, VmIo, VmSegment},
|
||||
};
|
||||
pub(super) use aster_rights::Full;
|
||||
|
@ -243,23 +243,22 @@ impl SuperBlock {
|
||||
}
|
||||
|
||||
/// Returns the number of free blocks.
|
||||
pub fn free_blocks(&self) -> u32 {
|
||||
pub fn free_blocks_count(&self) -> u32 {
|
||||
self.free_blocks_count
|
||||
}
|
||||
|
||||
/// Increase the number of free blocks.
|
||||
pub(super) fn inc_free_blocks(&mut self) {
|
||||
self.free_blocks_count += 1;
|
||||
pub(super) fn inc_free_blocks(&mut self, count: u32) {
|
||||
self.free_blocks_count = self.free_blocks_count.checked_add(count).unwrap();
|
||||
}
|
||||
|
||||
/// Decrease the number of free blocks.
|
||||
pub(super) fn dec_free_blocks(&mut self) {
|
||||
debug_assert!(self.free_blocks_count > 0);
|
||||
self.free_blocks_count -= 1;
|
||||
pub(super) fn dec_free_blocks(&mut self, count: u32) {
|
||||
self.free_blocks_count = self.free_blocks_count.checked_sub(count).unwrap();
|
||||
}
|
||||
|
||||
/// Returns the number of free inodes.
|
||||
pub fn free_inodes(&self) -> u32 {
|
||||
pub fn free_inodes_count(&self) -> u32 {
|
||||
self.free_inodes_count
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use core::fmt::Debug;
|
||||
use core::{fmt::Debug, ops::Range};
|
||||
|
||||
use bitvec::prelude::BitVec;
|
||||
|
||||
@ -63,6 +63,74 @@ impl IdAlloc {
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a consecutive range of new `id`s.
|
||||
///
|
||||
/// The `count` is the number of consecutive `id`s to allocate. If it is 0, return `None`.
|
||||
///
|
||||
/// If allocation is not possible, it returns `None`.
|
||||
///
|
||||
/// TODO: Choose a more efficient strategy.
|
||||
pub fn alloc_consecutive(&mut self, count: usize) -> Option<Range<usize>> {
|
||||
if count == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Scan the bitmap from the position `first_available_id`
|
||||
// for the first `count` number of consecutive 0's.
|
||||
let allocated_range = {
|
||||
// Invariance: all bits within `curr_range` are 0's
|
||||
let mut curr_range = self.first_available_id..self.first_available_id + 1;
|
||||
while curr_range.len() < count && curr_range.end < self.bitset.len() {
|
||||
if !self.is_allocated(curr_range.end) {
|
||||
curr_range.end += 1;
|
||||
} else {
|
||||
curr_range = curr_range.end + 1..curr_range.end + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if curr_range.len() < count {
|
||||
return None;
|
||||
}
|
||||
|
||||
curr_range
|
||||
};
|
||||
|
||||
// Set every bit to 1 within the allocated range
|
||||
for id in allocated_range.clone() {
|
||||
self.bitset.set(id, true);
|
||||
}
|
||||
|
||||
// In case we need to update first_available_id
|
||||
if self.is_allocated(self.first_available_id) {
|
||||
self.first_available_id = (allocated_range.end..self.bitset.len())
|
||||
.find(|&i| !self.bitset[i])
|
||||
.map_or(self.bitset.len(), |i| i);
|
||||
}
|
||||
|
||||
Some(allocated_range)
|
||||
}
|
||||
|
||||
/// Releases the consecutive range of allocated `id`s.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// If the `range` is out of bounds, this method will panic.
|
||||
pub fn free_consecutive(&mut self, range: Range<usize>) {
|
||||
if range.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let range_start = range.start;
|
||||
for id in range {
|
||||
debug_assert!(self.is_allocated(id));
|
||||
self.bitset.set(id, false);
|
||||
}
|
||||
|
||||
if range_start < self.first_available_id {
|
||||
self.first_available_id = range_start
|
||||
}
|
||||
}
|
||||
|
||||
/// Releases the allocated `id`.
|
||||
///
|
||||
/// # Panic
|
||||
|
38
regression/apps/scripts/ext2.sh
Executable file
38
regression/apps/scripts/ext2.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/bin/sh
|
||||
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
set -e
|
||||
|
||||
check_file_size() {
|
||||
local file_name="$1"
|
||||
local expected_size="$2"
|
||||
|
||||
if [ ! -f "$file_name" ]; then
|
||||
echo "Error: File does not exist."
|
||||
return 1
|
||||
fi
|
||||
|
||||
actual_size=$(du -b "$file_name" | cut -f1)
|
||||
|
||||
if [ "$actual_size" -eq "$expected_size" ]; then
|
||||
return 0
|
||||
else
|
||||
echo "Error: File size is incorrect: expected ${expected_size}, but got ${actual_size}."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
EXT2_DIR=/ext2
|
||||
cd ${EXT2_DIR}
|
||||
|
||||
echo "Start ext2 fs test......"
|
||||
|
||||
# Test case for the big file feature
|
||||
truncate -s 500M test_file.txt
|
||||
check_file_size test_file.txt $((500 * 1024 * 1024))
|
||||
truncate -s 2K test_file.txt
|
||||
check_file_size test_file.txt $((2 * 1024))
|
||||
sync
|
||||
|
||||
echo "All ext2 fs test passed."
|
@ -8,6 +8,7 @@ SCRIPT_DIR=/regression
|
||||
cd ${SCRIPT_DIR}
|
||||
|
||||
./shell_cmd.sh
|
||||
./ext2.sh
|
||||
./process.sh
|
||||
./network.sh
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user