Add get_mut methods for locks

This commit is contained in:
Ruihan Li
2024-12-12 17:31:45 +08:00
committed by Tate, Hongliang Tian
parent 87e953eac3
commit 53b873f647
7 changed files with 49 additions and 20 deletions

View File

@ -1356,14 +1356,14 @@ impl InodeImpl_ {
BidPath::Indirect(idx) => { BidPath::Indirect(idx) => {
let indirect_bid = self.desc.block_ptrs.indirect(); let indirect_bid = self.desc.block_ptrs.indirect();
assert!(indirect_bid != 0); assert!(indirect_bid != 0);
let mut indirect_blocks = self.indirect_blocks.write(); let indirect_blocks = self.indirect_blocks.get_mut();
let indirect_block = indirect_blocks.find_mut(indirect_bid)?; let indirect_block = indirect_blocks.find_mut(indirect_bid)?;
for (i, bid) in device_range.enumerate() { for (i, bid) in device_range.enumerate() {
indirect_block.write_bid(idx as usize + i, &bid)?; indirect_block.write_bid(idx as usize + i, &bid)?;
} }
} }
BidPath::DbIndirect(lvl1_idx, lvl2_idx) => { BidPath::DbIndirect(lvl1_idx, lvl2_idx) => {
let mut indirect_blocks = self.indirect_blocks.write(); let indirect_blocks = self.indirect_blocks.get_mut();
let lvl1_indirect_bid = { let lvl1_indirect_bid = {
let db_indirect_bid = self.desc.block_ptrs.db_indirect(); let db_indirect_bid = self.desc.block_ptrs.db_indirect();
assert!(db_indirect_bid != 0); assert!(db_indirect_bid != 0);
@ -1378,7 +1378,7 @@ impl InodeImpl_ {
} }
} }
BidPath::TbIndirect(lvl1_idx, lvl2_idx, lvl3_idx) => { BidPath::TbIndirect(lvl1_idx, lvl2_idx, lvl3_idx) => {
let mut indirect_blocks = self.indirect_blocks.write(); let indirect_blocks = self.indirect_blocks.get_mut();
let lvl2_indirect_bid = { let lvl2_indirect_bid = {
let lvl1_indirect_bid = { let lvl1_indirect_bid = {
let tb_indirect_bid = self.desc.block_ptrs.tb_indirect(); let tb_indirect_bid = self.desc.block_ptrs.tb_indirect();
@ -1408,7 +1408,7 @@ impl InodeImpl_ {
fn set_indirect_bids(&mut self, bid: Ext2Bid, indirect_bids: &[Ext2Bid]) -> Result<()> { fn set_indirect_bids(&mut self, bid: Ext2Bid, indirect_bids: &[Ext2Bid]) -> Result<()> {
assert!((1..=3).contains(&indirect_bids.len())); assert!((1..=3).contains(&indirect_bids.len()));
let mut indirect_blocks = self.indirect_blocks.write(); let indirect_blocks = self.indirect_blocks.get_mut();
let bid_path = BidPath::from(bid); let bid_path = BidPath::from(bid);
for indirect_bid in indirect_bids.iter() { for indirect_bid in indirect_bids.iter() {
let indirect_block = IndirectBlock::alloc()?; let indirect_block = IndirectBlock::alloc()?;
@ -1543,7 +1543,7 @@ impl InodeImpl_ {
} }
self.desc.block_ptrs.set_indirect(0); self.desc.block_ptrs.set_indirect(0);
self.indirect_blocks.write().remove(indirect_bid); self.indirect_blocks.get_mut().remove(indirect_bid);
self.fs() self.fs()
.free_blocks(indirect_bid..indirect_bid + 1) .free_blocks(indirect_bid..indirect_bid + 1)
.unwrap(); .unwrap();
@ -1554,22 +1554,21 @@ impl InodeImpl_ {
return Ok(()); return Ok(());
} }
let mut indirect_blocks = self.indirect_blocks.write(); let fs = self.fs();
let indirect_blocks = self.indirect_blocks.get_mut();
let lvl1_indirect_bid = { let lvl1_indirect_bid = {
let db_indirect_block = indirect_blocks.find(db_indirect_bid)?; let db_indirect_block = indirect_blocks.find(db_indirect_bid)?;
db_indirect_block.read_bid(lvl1_idx as usize)? db_indirect_block.read_bid(lvl1_idx as usize)?
}; };
if lvl1_indirect_bid != 0 { if lvl1_indirect_bid != 0 {
indirect_blocks.remove(lvl1_indirect_bid); indirect_blocks.remove(lvl1_indirect_bid);
self.fs() fs.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
.unwrap(); .unwrap();
} }
if lvl1_idx == 0 { if lvl1_idx == 0 {
self.desc.block_ptrs.set_db_indirect(0); self.desc.block_ptrs.set_db_indirect(0);
indirect_blocks.remove(db_indirect_bid); indirect_blocks.remove(db_indirect_bid);
self.fs() fs.free_blocks(db_indirect_bid..db_indirect_bid + 1)
.free_blocks(db_indirect_bid..db_indirect_bid + 1)
.unwrap(); .unwrap();
} }
} }
@ -1579,7 +1578,8 @@ impl InodeImpl_ {
return Ok(()); return Ok(());
} }
let mut indirect_blocks = self.indirect_blocks.write(); let fs = self.fs();
let indirect_blocks = self.indirect_blocks.get_mut();
let lvl1_indirect_bid = { let lvl1_indirect_bid = {
let tb_indirect_block = indirect_blocks.find(tb_indirect_bid)?; let tb_indirect_block = indirect_blocks.find(tb_indirect_bid)?;
tb_indirect_block.read_bid(lvl1_idx as usize)? tb_indirect_block.read_bid(lvl1_idx as usize)?
@ -1591,14 +1591,12 @@ impl InodeImpl_ {
}; };
if lvl2_indirect_bid != 0 { if lvl2_indirect_bid != 0 {
indirect_blocks.remove(lvl2_indirect_bid); indirect_blocks.remove(lvl2_indirect_bid);
self.fs() fs.free_blocks(lvl2_indirect_bid..lvl2_indirect_bid + 1)
.free_blocks(lvl2_indirect_bid..lvl2_indirect_bid + 1)
.unwrap(); .unwrap();
} }
if lvl2_idx == 0 { if lvl2_idx == 0 {
indirect_blocks.remove(lvl1_indirect_bid); indirect_blocks.remove(lvl1_indirect_bid);
self.fs() fs.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
.unwrap(); .unwrap();
} }
} }
@ -1606,8 +1604,7 @@ impl InodeImpl_ {
if lvl2_idx == 0 && lvl1_idx == 0 { if lvl2_idx == 0 && lvl1_idx == 0 {
self.desc.block_ptrs.set_tb_indirect(0); self.desc.block_ptrs.set_tb_indirect(0);
indirect_blocks.remove(tb_indirect_bid); indirect_blocks.remove(tb_indirect_bid);
self.fs() fs.free_blocks(tb_indirect_bid..tb_indirect_bid + 1)
.free_blocks(tb_indirect_bid..tb_indirect_bid + 1)
.unwrap(); .unwrap();
} }
} }

View File

@ -696,7 +696,7 @@ impl Socket for StreamSocket {
impl Drop for StreamSocket { impl Drop for StreamSocket {
fn drop(&mut self) { fn drop(&mut self) {
let state = self.state.write().take(); let state = self.state.get_mut().take();
let iface_to_poll = match state { let iface_to_poll = match state {
State::Init(_) => None, State::Init(_) => None,

View File

@ -365,8 +365,8 @@ impl Socket for VsockStreamSocket {
impl Drop for VsockStreamSocket { impl Drop for VsockStreamSocket {
fn drop(&mut self) { fn drop(&mut self) {
let vsockspace = VSOCK_GLOBAL.get().unwrap(); let vsockspace = VSOCK_GLOBAL.get().unwrap();
let inner = self.status.read(); let inner = self.status.get_mut();
match &*inner { match inner {
Status::Init(init) => { Status::Init(init) => {
if let Some(addr) = init.bound_addr() { if let Some(addr) = init.bound_addr() {
vsockspace.recycle_port(&addr.port); vsockspace.recycle_port(&addr.port);

View File

@ -69,6 +69,14 @@ impl<T: ?Sized> Mutex<T> {
}) })
} }
/// Returns a mutable reference to the underlying data.
///
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
/// already statically guaranteed that access to the data is exclusive.
pub fn get_mut(&mut self) -> &mut T {
self.val.get_mut()
}
/// Releases the mutex and wake up one thread which is blocked on this mutex. /// Releases the mutex and wake up one thread which is blocked on this mutex.
fn unlock(&self) { fn unlock(&self) {
self.release_lock(); self.release_lock();

View File

@ -331,6 +331,14 @@ impl<T: ?Sized, G: Guardian> RwLock<T, G> {
} }
None None
} }
/// Returns a mutable reference to the underlying data.
///
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
/// already statically guaranteed that access to the data is exclusive.
pub fn get_mut(&mut self) -> &mut T {
self.val.get_mut()
}
} }
impl<T: ?Sized + fmt::Debug, G> fmt::Debug for RwLock<T, G> { impl<T: ?Sized + fmt::Debug, G> fmt::Debug for RwLock<T, G> {

View File

@ -192,6 +192,14 @@ impl<T: ?Sized> RwMutex<T> {
} }
None None
} }
/// Returns a mutable reference to the underlying data.
///
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
/// already statically guaranteed that access to the data is exclusive.
pub fn get_mut(&mut self) -> &mut T {
self.val.get_mut()
}
} }
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwMutex<T> { impl<T: ?Sized + fmt::Debug> fmt::Debug for RwMutex<T> {

View File

@ -109,6 +109,14 @@ impl<T: ?Sized, G: Guardian> SpinLock<T, G> {
None None
} }
/// Returns a mutable reference to the underlying data.
///
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
/// already statically guaranteed that access to the data is exclusive.
pub fn get_mut(&mut self) -> &mut T {
self.inner.val.get_mut()
}
/// Acquires the spin lock, otherwise busy waiting /// Acquires the spin lock, otherwise busy waiting
fn acquire_lock(&self) { fn acquire_lock(&self) {
while !self.try_acquire_lock() { while !self.try_acquire_lock() {