mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-22 08:53:29 +00:00
Add get_mut
methods for locks
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
87e953eac3
commit
53b873f647
@ -1356,14 +1356,14 @@ impl InodeImpl_ {
|
||||
BidPath::Indirect(idx) => {
|
||||
let indirect_bid = self.desc.block_ptrs.indirect();
|
||||
assert!(indirect_bid != 0);
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let indirect_blocks = self.indirect_blocks.get_mut();
|
||||
let indirect_block = indirect_blocks.find_mut(indirect_bid)?;
|
||||
for (i, bid) in device_range.enumerate() {
|
||||
indirect_block.write_bid(idx as usize + i, &bid)?;
|
||||
}
|
||||
}
|
||||
BidPath::DbIndirect(lvl1_idx, lvl2_idx) => {
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let indirect_blocks = self.indirect_blocks.get_mut();
|
||||
let lvl1_indirect_bid = {
|
||||
let db_indirect_bid = self.desc.block_ptrs.db_indirect();
|
||||
assert!(db_indirect_bid != 0);
|
||||
@ -1378,7 +1378,7 @@ impl InodeImpl_ {
|
||||
}
|
||||
}
|
||||
BidPath::TbIndirect(lvl1_idx, lvl2_idx, lvl3_idx) => {
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let indirect_blocks = self.indirect_blocks.get_mut();
|
||||
let lvl2_indirect_bid = {
|
||||
let lvl1_indirect_bid = {
|
||||
let tb_indirect_bid = self.desc.block_ptrs.tb_indirect();
|
||||
@ -1408,7 +1408,7 @@ impl InodeImpl_ {
|
||||
fn set_indirect_bids(&mut self, bid: Ext2Bid, indirect_bids: &[Ext2Bid]) -> Result<()> {
|
||||
assert!((1..=3).contains(&indirect_bids.len()));
|
||||
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let indirect_blocks = self.indirect_blocks.get_mut();
|
||||
let bid_path = BidPath::from(bid);
|
||||
for indirect_bid in indirect_bids.iter() {
|
||||
let indirect_block = IndirectBlock::alloc()?;
|
||||
@ -1543,7 +1543,7 @@ impl InodeImpl_ {
|
||||
}
|
||||
|
||||
self.desc.block_ptrs.set_indirect(0);
|
||||
self.indirect_blocks.write().remove(indirect_bid);
|
||||
self.indirect_blocks.get_mut().remove(indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(indirect_bid..indirect_bid + 1)
|
||||
.unwrap();
|
||||
@ -1554,22 +1554,21 @@ impl InodeImpl_ {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let fs = self.fs();
|
||||
let indirect_blocks = self.indirect_blocks.get_mut();
|
||||
let lvl1_indirect_bid = {
|
||||
let db_indirect_block = indirect_blocks.find(db_indirect_bid)?;
|
||||
db_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
};
|
||||
if lvl1_indirect_bid != 0 {
|
||||
indirect_blocks.remove(lvl1_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
|
||||
fs.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
if lvl1_idx == 0 {
|
||||
self.desc.block_ptrs.set_db_indirect(0);
|
||||
indirect_blocks.remove(db_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(db_indirect_bid..db_indirect_bid + 1)
|
||||
fs.free_blocks(db_indirect_bid..db_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
@ -1579,7 +1578,8 @@ impl InodeImpl_ {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut indirect_blocks = self.indirect_blocks.write();
|
||||
let fs = self.fs();
|
||||
let indirect_blocks = self.indirect_blocks.get_mut();
|
||||
let lvl1_indirect_bid = {
|
||||
let tb_indirect_block = indirect_blocks.find(tb_indirect_bid)?;
|
||||
tb_indirect_block.read_bid(lvl1_idx as usize)?
|
||||
@ -1591,14 +1591,12 @@ impl InodeImpl_ {
|
||||
};
|
||||
if lvl2_indirect_bid != 0 {
|
||||
indirect_blocks.remove(lvl2_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(lvl2_indirect_bid..lvl2_indirect_bid + 1)
|
||||
fs.free_blocks(lvl2_indirect_bid..lvl2_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
if lvl2_idx == 0 {
|
||||
indirect_blocks.remove(lvl1_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
|
||||
fs.free_blocks(lvl1_indirect_bid..lvl1_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
@ -1606,8 +1604,7 @@ impl InodeImpl_ {
|
||||
if lvl2_idx == 0 && lvl1_idx == 0 {
|
||||
self.desc.block_ptrs.set_tb_indirect(0);
|
||||
indirect_blocks.remove(tb_indirect_bid);
|
||||
self.fs()
|
||||
.free_blocks(tb_indirect_bid..tb_indirect_bid + 1)
|
||||
fs.free_blocks(tb_indirect_bid..tb_indirect_bid + 1)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -696,7 +696,7 @@ impl Socket for StreamSocket {
|
||||
|
||||
impl Drop for StreamSocket {
|
||||
fn drop(&mut self) {
|
||||
let state = self.state.write().take();
|
||||
let state = self.state.get_mut().take();
|
||||
|
||||
let iface_to_poll = match state {
|
||||
State::Init(_) => None,
|
||||
|
@ -365,8 +365,8 @@ impl Socket for VsockStreamSocket {
|
||||
impl Drop for VsockStreamSocket {
|
||||
fn drop(&mut self) {
|
||||
let vsockspace = VSOCK_GLOBAL.get().unwrap();
|
||||
let inner = self.status.read();
|
||||
match &*inner {
|
||||
let inner = self.status.get_mut();
|
||||
match inner {
|
||||
Status::Init(init) => {
|
||||
if let Some(addr) = init.bound_addr() {
|
||||
vsockspace.recycle_port(&addr.port);
|
||||
|
@ -69,6 +69,14 @@ impl<T: ?Sized> Mutex<T> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
|
||||
/// already statically guaranteed that access to the data is exclusive.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
self.val.get_mut()
|
||||
}
|
||||
|
||||
/// Releases the mutex and wake up one thread which is blocked on this mutex.
|
||||
fn unlock(&self) {
|
||||
self.release_lock();
|
||||
|
@ -331,6 +331,14 @@ impl<T: ?Sized, G: Guardian> RwLock<T, G> {
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
|
||||
/// already statically guaranteed that access to the data is exclusive.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
self.val.get_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + fmt::Debug, G> fmt::Debug for RwLock<T, G> {
|
||||
|
@ -192,6 +192,14 @@ impl<T: ?Sized> RwMutex<T> {
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
|
||||
/// already statically guaranteed that access to the data is exclusive.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
self.val.get_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwMutex<T> {
|
||||
|
@ -109,6 +109,14 @@ impl<T: ?Sized, G: Guardian> SpinLock<T, G> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
|
||||
/// already statically guaranteed that access to the data is exclusive.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
self.inner.val.get_mut()
|
||||
}
|
||||
|
||||
/// Acquires the spin lock, otherwise busy waiting
|
||||
fn acquire_lock(&self) {
|
||||
while !self.try_acquire_lock() {
|
||||
|
Reference in New Issue
Block a user