diff --git a/kernel/aster-nix/src/device/pty/pty.rs b/kernel/aster-nix/src/device/pty/pty.rs index 44dbe3121..18fb0896d 100644 --- a/kernel/aster-nix/src/device/pty/pty.rs +++ b/kernel/aster-nix/src/device/pty/pty.rs @@ -62,7 +62,7 @@ impl PtyMaster { } pub(super) fn slave_push_char(&self, ch: u8) { - let mut input = self.input.lock_irq_disabled(); + let mut input = self.input.disable_irq().lock(); input.push_overwrite(ch); self.update_state(&input); } @@ -107,7 +107,7 @@ impl FileIo for PtyMaster { let mut poller = Poller::new(); loop { - let mut input = self.input.lock_irq_disabled(); + let mut input = self.input.disable_irq().lock(); if input.is_empty() { let events = self.pollee.poll(IoEvents::IN, Some(&mut poller)); diff --git a/kernel/aster-nix/src/device/tty/driver.rs b/kernel/aster-nix/src/device/tty/driver.rs index 245ef65f3..b53c6404b 100644 --- a/kernel/aster-nix/src/device/tty/driver.rs +++ b/kernel/aster-nix/src/device/tty/driver.rs @@ -36,7 +36,7 @@ impl TtyDriver { /// Return the tty device in driver's internal table. pub fn lookup(&self, index: usize) -> Result> { - let ttys = self.ttys.lock_irq_disabled(); + let ttys = self.ttys.disable_irq().lock(); // Return the tty device corresponding to idx if index >= ttys.len() { return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device"); @@ -49,12 +49,12 @@ impl TtyDriver { /// Install a new tty into the driver's internal tables. pub fn install(self: &Arc, tty: Arc) { tty.set_driver(Arc::downgrade(self)); - self.ttys.lock_irq_disabled().push(tty); + self.ttys.disable_irq().lock().push(tty); } /// remove a new tty into the driver's internal tables. pub fn remove(&self, index: usize) -> Result<()> { - let mut ttys = self.ttys.lock_irq_disabled(); + let mut ttys = self.ttys.disable_irq().lock(); if index >= ttys.len() { return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device"); } @@ -66,7 +66,7 @@ impl TtyDriver { pub fn push_char(&self, ch: u8) { // FIXME: should the char send to all ttys? - for tty in &*self.ttys.lock_irq_disabled() { + for tty in &*self.ttys.disable_irq().lock() { tty.push_char(ch); } } diff --git a/kernel/aster-nix/src/device/tty/line_discipline.rs b/kernel/aster-nix/src/device/tty/line_discipline.rs index 1854744e8..60956b86e 100644 --- a/kernel/aster-nix/src/device/tty/line_discipline.rs +++ b/kernel/aster-nix/src/device/tty/line_discipline.rs @@ -106,7 +106,7 @@ impl LineDiscipline { /// Push char to line discipline. pub fn push_char(&self, ch: u8, echo_callback: F2) { - let termios = self.termios.lock_irq_disabled(); + let termios = self.termios.disable_irq().lock(); let ch = if termios.contains_icrnl() && ch == b'\r' { b'\n' @@ -127,7 +127,7 @@ impl LineDiscipline { // Raw mode if !termios.is_canonical_mode() { - self.read_buffer.lock_irq_disabled().push_overwrite(ch); + self.read_buffer.disable_irq().lock().push_overwrite(ch); self.update_readable_state(); return; } @@ -136,12 +136,12 @@ impl LineDiscipline { if ch == *termios.get_special_char(CC_C_CHAR::VKILL) { // Erase current line - self.current_line.lock_irq_disabled().drain(); + self.current_line.disable_irq().lock().drain(); } if ch == *termios.get_special_char(CC_C_CHAR::VERASE) { // Type backspace - let mut current_line = self.current_line.lock_irq_disabled(); + let mut current_line = self.current_line.disable_irq().lock(); if !current_line.is_empty() { current_line.backspace(); } @@ -149,17 +149,17 @@ impl LineDiscipline { if is_line_terminator(ch, &termios) { // If a new line is met, all bytes in current_line will be moved to read_buffer - let mut current_line = self.current_line.lock_irq_disabled(); + let mut current_line = self.current_line.disable_irq().lock(); current_line.push_char(ch); let current_line_chars = current_line.drain(); for char in current_line_chars { - self.read_buffer.lock_irq_disabled().push_overwrite(char); + self.read_buffer.disable_irq().lock().push_overwrite(char); } } if is_printable_char(ch) { // Printable character - self.current_line.lock_irq_disabled().push_char(ch); + self.current_line.disable_irq().lock().push_char(ch); } self.update_readable_state(); @@ -178,7 +178,7 @@ impl LineDiscipline { if in_interrupt_context() { // `kernel_signal()` may cause sleep, so only construct parameters here. - self.work_item_para.lock_irq_disabled().kernel_signal = Some(signal); + self.work_item_para.disable_irq().lock().kernel_signal = Some(signal); } else { (self.send_signal)(signal); } @@ -187,14 +187,14 @@ impl LineDiscipline { } pub fn update_readable_state(&self) { - let buffer = self.read_buffer.lock_irq_disabled(); + let buffer = self.read_buffer.disable_irq().lock(); if in_interrupt_context() { // Add/Del events may sleep, so only construct parameters here. if !buffer.is_empty() { - self.work_item_para.lock_irq_disabled().pollee_type = Some(PolleeType::Add); + self.work_item_para.disable_irq().lock().pollee_type = Some(PolleeType::Add); } else { - self.work_item_para.lock_irq_disabled().pollee_type = Some(PolleeType::Del); + self.work_item_para.disable_irq().lock().pollee_type = Some(PolleeType::Del); } submit_work_item(self.work_item.clone(), WorkPriority::High); return; @@ -209,10 +209,16 @@ impl LineDiscipline { /// include all operations that may cause sleep, and processes by a work queue. fn update_readable_state_after(&self) { - if let Some(signal) = self.work_item_para.lock_irq_disabled().kernel_signal.take() { + if let Some(signal) = self + .work_item_para + .disable_irq() + .lock() + .kernel_signal + .take() + { (self.send_signal)(signal); }; - if let Some(pollee_type) = self.work_item_para.lock_irq_disabled().pollee_type.take() { + if let Some(pollee_type) = self.work_item_para.disable_irq().lock().pollee_type.take() { match pollee_type { PolleeType::Add => { self.pollee.add_events(IoEvents::IN); @@ -262,13 +268,13 @@ impl LineDiscipline { /// read all bytes buffered to dst, return the actual read length. fn try_read(&self, dst: &mut [u8]) -> Result { let (vmin, vtime) = { - let termios = self.termios.lock_irq_disabled(); + let termios = self.termios.disable_irq().lock(); let vmin = *termios.get_special_char(CC_C_CHAR::VMIN); let vtime = *termios.get_special_char(CC_C_CHAR::VTIME); (vmin, vtime) }; let read_len = { - let len = self.read_buffer.lock_irq_disabled().len(); + let len = self.read_buffer.disable_irq().lock().len(); let max_read_len = len.min(dst.len()); if vmin == 0 && vtime == 0 { // poll read @@ -295,7 +301,7 @@ impl LineDiscipline { /// returns immediately with the lesser of the number of bytes available or the number of bytes requested. /// If no bytes are available, completes immediately, returning 0. fn poll_read(&self, dst: &mut [u8]) -> usize { - let mut buffer = self.read_buffer.lock_irq_disabled(); + let mut buffer = self.read_buffer.disable_irq().lock(); let len = buffer.len(); let max_read_len = len.min(dst.len()); if max_read_len == 0 { @@ -304,7 +310,7 @@ impl LineDiscipline { let mut read_len = 0; for dst_i in dst.iter_mut().take(max_read_len) { if let Some(next_char) = buffer.pop() { - let termios = self.termios.lock_irq_disabled(); + let termios = self.termios.disable_irq().lock(); if termios.is_canonical_mode() { // canonical mode, read until meet new line if is_line_terminator(next_char, &termios) { @@ -353,15 +359,15 @@ impl LineDiscipline { /// whether there is buffered data pub fn is_empty(&self) -> bool { - self.read_buffer.lock_irq_disabled().len() == 0 + self.read_buffer.disable_irq().lock().len() == 0 } pub fn termios(&self) -> KernelTermios { - *self.termios.lock_irq_disabled() + *self.termios.disable_irq().lock() } pub fn set_termios(&self, termios: KernelTermios) { - *self.termios.lock_irq_disabled() = termios; + *self.termios.disable_irq().lock() = termios; } pub fn drain_input(&self) { diff --git a/kernel/aster-nix/src/device/tty/mod.rs b/kernel/aster-nix/src/device/tty/mod.rs index 22b772f38..89da51133 100644 --- a/kernel/aster-nix/src/device/tty/mod.rs +++ b/kernel/aster-nix/src/device/tty/mod.rs @@ -60,7 +60,7 @@ impl Tty { } pub fn set_driver(&self, driver: Weak) { - *self.driver.lock_irq_disabled() = driver; + *self.driver.disable_irq().lock() = driver; } pub fn push_char(&self, ch: u8) { diff --git a/kernel/aster-nix/src/fs/pipe.rs b/kernel/aster-nix/src/fs/pipe.rs index adcaefeed..432abaebc 100644 --- a/kernel/aster-nix/src/fs/pipe.rs +++ b/kernel/aster-nix/src/fs/pipe.rs @@ -233,8 +233,8 @@ mod test { // FIXME: `ThreadOptions::new` currently accepts `Fn`, forcing us to use `SpinLock` to gain // internal mutability. We should avoid this `SpinLock` by making `ThreadOptions::new` // accept `FnOnce`. - let writer_with_lock = SpinLock::new(Some(writer)); - let reader_with_lock = SpinLock::new(Some(reader)); + let writer_with_lock: SpinLock<_> = SpinLock::new(Some(writer)); + let reader_with_lock: SpinLock<_> = SpinLock::new(Some(reader)); let signal_writer = Arc::new(AtomicBool::new(false)); let signal_reader = signal_writer.clone(); diff --git a/kernel/aster-nix/src/net/iface/common.rs b/kernel/aster-nix/src/net/iface/common.rs index 00cd56bec..620d4fb13 100644 --- a/kernel/aster-nix/src/net/iface/common.rs +++ b/kernel/aster-nix/src/net/iface/common.rs @@ -4,7 +4,7 @@ use alloc::collections::btree_map::Entry; use core::sync::atomic::{AtomicU64, Ordering}; use keyable_arc::KeyableArc; -use ostd::sync::WaitQueue; +use ostd::sync::{LocalIrqDisabled, WaitQueue}; use smoltcp::{ iface::{SocketHandle, SocketSet}, phy::Device, @@ -49,23 +49,25 @@ impl IfaceCommon { /// Acquires the lock to the interface. /// /// *Lock ordering:* [`Self::sockets`] first, [`Self::interface`] second. - pub(super) fn interface(&self) -> SpinLockGuard { - self.interface.lock_irq_disabled() + pub(super) fn interface(&self) -> SpinLockGuard { + self.interface.disable_irq().lock() } /// Acuqires the lock to the sockets. /// /// *Lock ordering:* [`Self::sockets`] first, [`Self::interface`] second. - pub(super) fn sockets(&self) -> SpinLockGuard> { - self.sockets.lock_irq_disabled() + pub(super) fn sockets( + &self, + ) -> SpinLockGuard, LocalIrqDisabled> { + self.sockets.disable_irq().lock() } pub(super) fn ipv4_addr(&self) -> Option { - self.interface.lock_irq_disabled().ipv4_addr() + self.interface.disable_irq().lock().ipv4_addr() } pub(super) fn netmask(&self) -> Option { - let interface = self.interface.lock_irq_disabled(); + let interface = self.interface.disable_irq().lock(); let ip_addrs = interface.ip_addrs(); ip_addrs.first().map(|cidr| match cidr { IpCidr::Ipv4(ipv4_cidr) => ipv4_cidr.netmask(), @@ -132,12 +134,12 @@ impl IfaceCommon { let (handle, socket_family, observer) = match socket.into_raw() { (AnyRawSocket::Tcp(tcp_socket), observer) => ( - self.sockets.lock_irq_disabled().add(tcp_socket), + self.sockets.disable_irq().lock().add(tcp_socket), SocketFamily::Tcp, observer, ), (AnyRawSocket::Udp(udp_socket), observer) => ( - self.sockets.lock_irq_disabled().add(udp_socket), + self.sockets.disable_irq().lock().add(udp_socket), SocketFamily::Udp, observer, ), @@ -150,12 +152,12 @@ impl IfaceCommon { /// Remove a socket from the interface pub(super) fn remove_socket(&self, handle: SocketHandle) { - self.sockets.lock_irq_disabled().remove(handle); + self.sockets.disable_irq().lock().remove(handle); } pub(super) fn poll(&self, device: &mut D) { - let mut sockets = self.sockets.lock_irq_disabled(); - let mut interface = self.interface.lock_irq_disabled(); + let mut sockets = self.sockets.disable_irq().lock(); + let mut interface = self.interface.disable_irq().lock(); let timestamp = get_network_timestamp(); let (has_events, poll_at) = { @@ -199,7 +201,8 @@ impl IfaceCommon { let closed_sockets = self .closing_sockets - .lock_irq_disabled() + .disable_irq() + .lock() .extract_if(|closing_socket| closing_socket.is_closed()) .collect::>(); drop(closed_sockets); @@ -244,7 +247,7 @@ impl IfaceCommon { .remove(&keyable_socket); assert!(removed); - let mut closing_sockets = self.closing_sockets.lock_irq_disabled(); + let mut closing_sockets = self.closing_sockets.disable_irq().lock(); // Check `is_closed` after holding the lock to avoid race conditions. if keyable_socket.is_closed() { diff --git a/kernel/aster-nix/src/net/iface/mod.rs b/kernel/aster-nix/src/net/iface/mod.rs index d2d9abc5c..22bd2a932 100644 --- a/kernel/aster-nix/src/net/iface/mod.rs +++ b/kernel/aster-nix/src/net/iface/mod.rs @@ -17,6 +17,7 @@ pub use any_socket::{ AnyBoundSocket, AnyUnboundSocket, RawTcpSocket, RawUdpSocket, RECV_BUF_LEN, SEND_BUF_LEN, }; pub use loopback::IfaceLoopback; +use ostd::sync::LocalIrqDisabled; pub use smoltcp::wire::EthernetAddress; pub use util::{spawn_background_poll_thread, BindPortConfig}; pub use virtio::IfaceVirtio; @@ -77,11 +78,11 @@ mod internal { pub trait IfaceInternal { fn common(&self) -> &IfaceCommon; /// The inner socket set - fn sockets(&self) -> SpinLockGuard> { + fn sockets(&self) -> SpinLockGuard, LocalIrqDisabled> { self.common().sockets() } /// The inner iface. - fn iface_inner(&self) -> SpinLockGuard { + fn iface_inner(&self) -> SpinLockGuard { self.common().interface() } /// The time we should do another poll. diff --git a/kernel/aster-nix/src/net/iface/virtio.rs b/kernel/aster-nix/src/net/iface/virtio.rs index 1c4e1bd9f..e88879c4a 100644 --- a/kernel/aster-nix/src/net/iface/virtio.rs +++ b/kernel/aster-nix/src/net/iface/virtio.rs @@ -2,6 +2,7 @@ use aster_network::AnyNetworkDevice; use aster_virtio::device::network::DEVICE_NAME; +use ostd::sync::PreemptDisabled; use smoltcp::{ iface::{Config, SocketHandle, SocketSet}, socket::dhcpv4, @@ -12,7 +13,7 @@ use super::{common::IfaceCommon, internal::IfaceInternal, Iface}; use crate::prelude::*; pub struct IfaceVirtio { - driver: Arc>, + driver: Arc>, common: IfaceCommon, dhcp_handle: SocketHandle, weak_self: Weak, @@ -113,7 +114,7 @@ impl Iface for IfaceVirtio { } fn poll(&self) { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); self.common.poll(&mut *driver); self.process_dhcp(); } diff --git a/kernel/aster-nix/src/net/socket/vsock/common.rs b/kernel/aster-nix/src/net/socket/vsock/common.rs index f55bf2368..ec09e4554 100644 --- a/kernel/aster-nix/src/net/socket/vsock/common.rs +++ b/kernel/aster-nix/src/net/socket/vsock/common.rs @@ -46,11 +46,13 @@ impl VsockSpace { /// Check whether the event is for this socket space fn is_event_for_socket(&self, event: &VsockEvent) -> bool { self.connecting_sockets - .lock_irq_disabled() + .disable_irq() + .lock() .contains_key(&event.destination.into()) || self .listen_sockets - .lock_irq_disabled() + .disable_irq() + .lock() .contains_key(&event.destination.into()) || self .connected_sockets @@ -60,7 +62,7 @@ impl VsockSpace { /// Alloc an unused port range pub fn alloc_ephemeral_port(&self) -> Result { - let mut used_ports = self.used_ports.lock_irq_disabled(); + let mut used_ports = self.used_ports.disable_irq().lock(); // FIXME: the maximal port number is not defined by spec for port in 1024..u32::MAX { if !used_ports.contains(&port) { @@ -73,13 +75,13 @@ impl VsockSpace { /// Bind a port pub fn bind_port(&self, port: u32) -> bool { - let mut used_ports = self.used_ports.lock_irq_disabled(); + let mut used_ports = self.used_ports.disable_irq().lock(); used_ports.insert(port) } /// Recycle a port pub fn recycle_port(&self, port: &u32) -> bool { - let mut used_ports = self.used_ports.lock_irq_disabled(); + let mut used_ports = self.used_ports.disable_irq().lock(); used_ports.remove(port) } @@ -105,13 +107,13 @@ impl VsockSpace { addr: VsockSocketAddr, connecting: Arc, ) -> Option> { - let mut connecting_sockets = self.connecting_sockets.lock_irq_disabled(); + let mut connecting_sockets = self.connecting_sockets.disable_irq().lock(); connecting_sockets.insert(addr, connecting) } /// Remove a connecting socket pub fn remove_connecting_socket(&self, addr: &VsockSocketAddr) -> Option> { - let mut connecting_sockets = self.connecting_sockets.lock_irq_disabled(); + let mut connecting_sockets = self.connecting_sockets.disable_irq().lock(); connecting_sockets.remove(addr) } @@ -121,13 +123,13 @@ impl VsockSpace { addr: VsockSocketAddr, listen: Arc, ) -> Option> { - let mut listen_sockets = self.listen_sockets.lock_irq_disabled(); + let mut listen_sockets = self.listen_sockets.disable_irq().lock(); listen_sockets.insert(addr, listen) } /// Remove a listening socket pub fn remove_listen_socket(&self, addr: &VsockSocketAddr) -> Option> { - let mut listen_sockets = self.listen_sockets.lock_irq_disabled(); + let mut listen_sockets = self.listen_sockets.disable_irq().lock(); listen_sockets.remove(addr) } } @@ -135,13 +137,13 @@ impl VsockSpace { impl VsockSpace { /// Get the CID of the guest pub fn guest_cid(&self) -> u32 { - let driver = self.driver.lock_irq_disabled(); + let driver = self.driver.disable_irq().lock(); driver.guest_cid() as u32 } /// Send a request packet for initializing a new connection. pub fn request(&self, info: &ConnectionInfo) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); driver .request(info) .map_err(|_| Error::with_message(Errno::EIO, "cannot send connect packet")) @@ -149,7 +151,7 @@ impl VsockSpace { /// Send a response packet for accepting a new connection. pub fn response(&self, info: &ConnectionInfo) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); driver .response(info) .map_err(|_| Error::with_message(Errno::EIO, "cannot send response packet")) @@ -157,7 +159,7 @@ impl VsockSpace { /// Send a shutdown packet to close a connection pub fn shutdown(&self, info: &ConnectionInfo) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); driver .shutdown(info) .map_err(|_| Error::with_message(Errno::EIO, "cannot send shutdown packet")) @@ -165,7 +167,7 @@ impl VsockSpace { /// Send a reset packet to reset a connection pub fn reset(&self, info: &ConnectionInfo) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); driver .reset(info) .map_err(|_| Error::with_message(Errno::EIO, "cannot send reset packet")) @@ -173,7 +175,7 @@ impl VsockSpace { /// Send a credit request packet pub fn request_credit(&self, info: &ConnectionInfo) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); driver .credit_request(info) .map_err(|_| Error::with_message(Errno::EIO, "cannot send credit request packet")) @@ -181,7 +183,7 @@ impl VsockSpace { /// Send a credit update packet pub fn update_credit(&self, info: &ConnectionInfo) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); driver .credit_update(info) .map_err(|_| Error::with_message(Errno::EIO, "cannot send credit update packet")) @@ -189,7 +191,7 @@ impl VsockSpace { /// Send a data packet pub fn send(&self, buffer: &[u8], info: &mut ConnectionInfo) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); driver .send(buffer, info) .map_err(|_| Error::with_message(Errno::EIO, "cannot send data packet")) @@ -197,7 +199,7 @@ impl VsockSpace { /// Poll for each event from the driver pub fn poll(&self) -> Result<()> { - let mut driver = self.driver.lock_irq_disabled(); + let mut driver = self.driver.disable_irq().lock(); while let Some(event) = self.poll_single(&mut driver)? { if !self.is_event_for_socket(&event) { @@ -219,7 +221,7 @@ impl VsockSpace { match event.event_type { VsockEventType::ConnectionRequest => { // Preparation for listen socket `accept` - let listen_sockets = self.listen_sockets.lock_irq_disabled(); + let listen_sockets = self.listen_sockets.disable_irq().lock(); let Some(listen) = listen_sockets.get(&event.destination.into()) else { return_errno_with_message!( Errno::EINVAL, @@ -233,7 +235,7 @@ impl VsockSpace { listen.update_io_events(); } VsockEventType::ConnectionResponse => { - let connecting_sockets = self.connecting_sockets.lock_irq_disabled(); + let connecting_sockets = self.connecting_sockets.disable_irq().lock(); let Some(connecting) = connecting_sockets.get(&event.destination.into()) else { return_errno_with_message!( Errno::EINVAL, diff --git a/kernel/aster-nix/src/net/socket/vsock/stream/connected.rs b/kernel/aster-nix/src/net/socket/vsock/stream/connected.rs index 851b9b92d..b2c0dbccf 100644 --- a/kernel/aster-nix/src/net/socket/vsock/stream/connected.rs +++ b/kernel/aster-nix/src/net/socket/vsock/stream/connected.rs @@ -51,7 +51,7 @@ impl Connected { } pub fn try_recv(&self, buf: &mut [u8]) -> Result { - let mut connection = self.connection.lock_irq_disabled(); + let mut connection = self.connection.disable_irq().lock(); let bytes_read = connection.buffer.len().min(buf.len()); connection.buffer.pop_slice(&mut buf[..bytes_read]); connection.info.done_forwarding(bytes_read); @@ -69,7 +69,7 @@ impl Connected { } pub fn send(&self, packet: &[u8], flags: SendRecvFlags) -> Result { - let mut connection = self.connection.lock_irq_disabled(); + let mut connection = self.connection.disable_irq().lock(); debug_assert!(flags.is_all_supported()); let buf_len = packet.len(); VSOCK_GLOBAL @@ -81,21 +81,21 @@ impl Connected { } pub fn should_close(&self) -> bool { - let connection = self.connection.lock_irq_disabled(); + let connection = self.connection.disable_irq().lock(); // If buffer is now empty and the peer requested shutdown, finish shutting down the // connection. connection.is_peer_requested_shutdown() && connection.buffer.is_empty() } pub fn is_closed(&self) -> bool { - let connection = self.connection.lock_irq_disabled(); + let connection = self.connection.disable_irq().lock(); connection.is_local_shutdown() } pub fn shutdown(&self, _cmd: SockShutdownCmd) -> Result<()> { // TODO: deal with cmd if self.should_close() { - let mut connection = self.connection.lock_irq_disabled(); + let mut connection = self.connection.disable_irq().lock(); if connection.is_local_shutdown() { return Ok(()); } @@ -106,23 +106,24 @@ impl Connected { Ok(()) } pub fn update_info(&self, event: &VsockEvent) { - let mut connection = self.connection.lock_irq_disabled(); + let mut connection = self.connection.disable_irq().lock(); connection.update_for_event(event) } pub fn get_info(&self) -> ConnectionInfo { - let connection = self.connection.lock_irq_disabled(); + let connection = self.connection.disable_irq().lock(); connection.info.clone() } pub fn add_connection_buffer(&self, bytes: &[u8]) -> bool { - let mut connection = self.connection.lock_irq_disabled(); + let mut connection = self.connection.disable_irq().lock(); connection.add(bytes) } pub fn set_peer_requested_shutdown(&self) { self.connection - .lock_irq_disabled() + .disable_irq() + .lock() .set_peer_requested_shutdown() } @@ -131,7 +132,7 @@ impl Connected { } pub fn update_io_events(&self) { - let connection = self.connection.lock_irq_disabled(); + let connection = self.connection.disable_irq().lock(); // receive if !connection.buffer.is_empty() { self.pollee.add_events(IoEvents::IN); diff --git a/kernel/aster-nix/src/net/socket/vsock/stream/connecting.rs b/kernel/aster-nix/src/net/socket/vsock/stream/connecting.rs index 287ec6b7a..2c3a65006 100644 --- a/kernel/aster-nix/src/net/socket/vsock/stream/connecting.rs +++ b/kernel/aster-nix/src/net/socket/vsock/stream/connecting.rs @@ -38,11 +38,11 @@ impl Connecting { } pub fn info(&self) -> ConnectionInfo { - self.info.lock_irq_disabled().clone() + self.info.disable_irq().lock().clone() } pub fn update_info(&self, event: &VsockEvent) { - self.info.lock_irq_disabled().update_for_event(event) + self.info.disable_irq().lock().update_for_event(event) } pub fn poll(&self, mask: IoEvents, poller: Option<&mut Poller>) -> IoEvents { diff --git a/kernel/aster-nix/src/net/socket/vsock/stream/listen.rs b/kernel/aster-nix/src/net/socket/vsock/stream/listen.rs index 9317a6476..6034005cb 100644 --- a/kernel/aster-nix/src/net/socket/vsock/stream/listen.rs +++ b/kernel/aster-nix/src/net/socket/vsock/stream/listen.rs @@ -29,7 +29,7 @@ impl Listen { } pub fn push_incoming(&self, connect: Arc) -> Result<()> { - let mut incoming_connections = self.incoming_connection.lock_irq_disabled(); + let mut incoming_connections = self.incoming_connection.disable_irq().lock(); if incoming_connections.len() >= self.backlog { return_errno_with_message!(Errno::ECONNREFUSED, "queue in listenging socket is full") } @@ -41,7 +41,8 @@ impl Listen { pub fn try_accept(&self) -> Result> { let connection = self .incoming_connection - .lock_irq_disabled() + .disable_irq() + .lock() .pop_front() .ok_or_else(|| { Error::with_message(Errno::EAGAIN, "no pending connection is available") @@ -55,7 +56,7 @@ impl Listen { } pub fn update_io_events(&self) { - let incomming_connection = self.incoming_connection.lock_irq_disabled(); + let incomming_connection = self.incoming_connection.disable_irq().lock(); if !incomming_connection.is_empty() { self.pollee.add_events(IoEvents::IN); } else { diff --git a/kernel/aster-nix/src/sched/priority_scheduler.rs b/kernel/aster-nix/src/sched/priority_scheduler.rs index c769876dc..8185124df 100644 --- a/kernel/aster-nix/src/sched/priority_scheduler.rs +++ b/kernel/aster-nix/src/sched/priority_scheduler.rs @@ -56,7 +56,7 @@ impl Scheduler for PreemptScheduler { cpu_id }; - let mut rq = self.rq[target_cpu as usize].lock_irq_disabled(); + let mut rq = self.rq[target_cpu as usize].disable_irq().lock(); if still_in_rq && let Err(_) = runnable.cpu().set_if_is_none(target_cpu) { return None; } @@ -71,13 +71,13 @@ impl Scheduler for PreemptScheduler { } fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue)) { - let local_rq: &PreemptRunQueue = &self.rq[this_cpu() as usize].lock_irq_disabled(); + let local_rq: &PreemptRunQueue = &self.rq[this_cpu() as usize].disable_irq().lock(); f(local_rq); } fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue)) { let local_rq: &mut PreemptRunQueue = - &mut self.rq[this_cpu() as usize].lock_irq_disabled(); + &mut self.rq[this_cpu() as usize].disable_irq().lock(); f(local_rq); } } diff --git a/kernel/aster-nix/src/thread/work_queue/mod.rs b/kernel/aster-nix/src/thread/work_queue/mod.rs index dccba18dd..a91e74a36 100644 --- a/kernel/aster-nix/src/thread/work_queue/mod.rs +++ b/kernel/aster-nix/src/thread/work_queue/mod.rs @@ -138,7 +138,8 @@ impl WorkQueue { return false; } self.inner - .lock_irq_disabled() + .disable_irq() + .lock() .pending_work_items .push(work_item); if let Some(worker_pool) = self.worker_pool.upgrade() { @@ -150,7 +151,7 @@ impl WorkQueue { /// Request a pending work item. The `request_cpu` indicates the CPU where /// the calling worker is located. fn dequeue(&self, request_cpu: u32) -> Option> { - let mut inner = self.inner.lock_irq_disabled(); + let mut inner = self.inner.disable_irq().lock(); let index = inner .pending_work_items .iter() @@ -161,7 +162,8 @@ impl WorkQueue { fn has_pending_work_items(&self, request_cpu: u32) -> bool { self.inner - .lock_irq_disabled() + .disable_irq() + .lock() .pending_work_items .iter() .any(|item| item.is_valid_cpu(request_cpu)) diff --git a/kernel/aster-nix/src/thread/work_queue/worker.rs b/kernel/aster-nix/src/thread/work_queue/worker.rs index be3b30600..173af08f9 100644 --- a/kernel/aster-nix/src/thread/work_queue/worker.rs +++ b/kernel/aster-nix/src/thread/work_queue/worker.rs @@ -87,10 +87,10 @@ impl Worker { if self.is_destroying() { break; } - self.inner.lock_irq_disabled().worker_status = WorkerStatus::Idle; + self.inner.disable_irq().lock().worker_status = WorkerStatus::Idle; worker_pool.idle_current_worker(self.bound_cpu, self.clone()); if !self.is_destroying() { - self.inner.lock_irq_disabled().worker_status = WorkerStatus::Running; + self.inner.disable_irq().lock().worker_status = WorkerStatus::Running; } } } @@ -102,22 +102,22 @@ impl Worker { } pub(super) fn is_idle(&self) -> bool { - self.inner.lock_irq_disabled().worker_status == WorkerStatus::Idle + self.inner.disable_irq().lock().worker_status == WorkerStatus::Idle } pub(super) fn is_destroying(&self) -> bool { - self.inner.lock_irq_disabled().worker_status == WorkerStatus::Destroying + self.inner.disable_irq().lock().worker_status == WorkerStatus::Destroying } pub(super) fn destroy(&self) { - self.inner.lock_irq_disabled().worker_status = WorkerStatus::Destroying; + self.inner.disable_irq().lock().worker_status = WorkerStatus::Destroying; } fn exit(&self) { - self.inner.lock_irq_disabled().worker_status = WorkerStatus::Exited; + self.inner.disable_irq().lock().worker_status = WorkerStatus::Exited; } pub(super) fn is_exit(&self) -> bool { - self.inner.lock_irq_disabled().worker_status == WorkerStatus::Exited + self.inner.disable_irq().lock().worker_status == WorkerStatus::Exited } } diff --git a/kernel/aster-nix/src/thread/work_queue/worker_pool.rs b/kernel/aster-nix/src/thread/work_queue/worker_pool.rs index 4dfe9aa2e..c3fd6dd0b 100644 --- a/kernel/aster-nix/src/thread/work_queue/worker_pool.rs +++ b/kernel/aster-nix/src/thread/work_queue/worker_pool.rs @@ -76,12 +76,12 @@ impl LocalWorkerPool { fn add_worker(&self) { let worker = Worker::new(self.parent.clone(), self.cpu_id); - self.workers.lock_irq_disabled().push_back(worker.clone()); + self.workers.disable_irq().lock().push_back(worker.clone()); worker.bound_thread().run(); } fn remove_worker(&self) { - let mut workers = self.workers.lock_irq_disabled(); + let mut workers = self.workers.disable_irq().lock(); for (index, worker) in workers.iter().enumerate() { if worker.is_idle() { worker.destroy(); @@ -116,7 +116,7 @@ impl LocalWorkerPool { } fn destroy_all_workers(&self) { - for worker in self.workers.lock_irq_disabled().iter() { + for worker in self.workers.disable_irq().lock().iter() { worker.destroy(); } self.idle_wait_queue.wake_all(); @@ -149,12 +149,13 @@ impl WorkerPool { } pub fn assign_work_queue(&self, work_queue: Arc) { - self.work_queues.lock_irq_disabled().push(work_queue); + self.work_queues.disable_irq().lock().push(work_queue); } pub fn has_pending_work_items(&self, request_cpu: u32) -> bool { self.work_queues - .lock_irq_disabled() + .disable_irq() + .lock() .iter() .any(|work_queue| work_queue.has_pending_work_items(request_cpu)) } @@ -164,7 +165,7 @@ impl WorkerPool { } pub fn num_workers(&self, cpu_id: u32) -> u16 { - self.local_pool(cpu_id).workers.lock_irq_disabled().len() as u16 + self.local_pool(cpu_id).workers.disable_irq().lock().len() as u16 } pub fn cpu_set(&self) -> &CpuSet { @@ -172,7 +173,7 @@ impl WorkerPool { } pub(super) fn fetch_pending_work_item(&self, request_cpu: u32) -> Option> { - for work_queue in self.work_queues.lock_irq_disabled().iter() { + for work_queue in self.work_queues.disable_irq().lock().iter() { let item = work_queue.dequeue(request_cpu); if item.is_some() { return item; diff --git a/kernel/aster-nix/src/time/clocks/cpu_clock.rs b/kernel/aster-nix/src/time/clocks/cpu_clock.rs index 9ce69c482..18a11f488 100644 --- a/kernel/aster-nix/src/time/clocks/cpu_clock.rs +++ b/kernel/aster-nix/src/time/clocks/cpu_clock.rs @@ -30,13 +30,13 @@ impl CpuClock { /// Adds `interval` to the original recorded time to update the `CpuClock`. pub fn add_time(&self, interval: Duration) { - *self.time.lock_irq_disabled() += interval; + *self.time.disable_irq().lock() += interval; } } impl Clock for CpuClock { fn read_time(&self) -> Duration { - *self.time.lock_irq_disabled() + *self.time.disable_irq().lock() } } diff --git a/kernel/aster-nix/src/time/clocks/system_wide.rs b/kernel/aster-nix/src/time/clocks/system_wide.rs index 3be868ee9..ed6c83323 100644 --- a/kernel/aster-nix/src/time/clocks/system_wide.rs +++ b/kernel/aster-nix/src/time/clocks/system_wide.rs @@ -153,7 +153,7 @@ impl Clock for MonotonicClock { impl Clock for RealTimeCoarseClock { fn read_time(&self) -> Duration { - *Self::current_ref().get().unwrap().lock_irq_disabled() + *Self::current_ref().get().unwrap().disable_irq().lock() } } @@ -264,7 +264,7 @@ fn init_jiffies_clock_manager() { fn update_coarse_clock() { let real_time = RealTimeClock::get().read_time(); let current = RealTimeCoarseClock::current_ref().get().unwrap(); - *current.lock_irq_disabled() = real_time; + *current.disable_irq().lock() = real_time; } fn init_coarse_clock() { diff --git a/kernel/aster-nix/src/time/core/timer.rs b/kernel/aster-nix/src/time/core/timer.rs index c086ebe15..0d8530f2d 100644 --- a/kernel/aster-nix/src/time/core/timer.rs +++ b/kernel/aster-nix/src/time/core/timer.rs @@ -57,12 +57,12 @@ impl Timer { /// Set the interval time for this timer. /// The timer will be reset with the interval time upon expiration. pub fn set_interval(&self, interval: Duration) { - *self.interval.lock_irq_disabled() = interval; + *self.interval.disable_irq().lock() = interval; } /// Cancel the current timer's set timeout callback. pub fn cancel(&self) { - let timer_callback = self.timer_callback.lock_irq_disabled(); + let timer_callback = self.timer_callback.disable_irq().lock(); if let Some(timer_callback) = timer_callback.upgrade() { timer_callback.cancel(); } @@ -88,7 +88,7 @@ impl Timer { Box::new(move || interval_timer_callback(&timer_weak)), )); - let mut timer_callback = self.timer_callback.lock_irq_disabled(); + let mut timer_callback = self.timer_callback.disable_irq().lock(); if let Some(timer_callback) = timer_callback.upgrade() { timer_callback.cancel(); } @@ -98,7 +98,7 @@ impl Timer { /// Return the current expired time of this timer. pub fn expired_time(&self) -> Duration { - let timer_callback = self.timer_callback.lock_irq_disabled().upgrade(); + let timer_callback = self.timer_callback.disable_irq().lock().upgrade(); timer_callback.map_or(Duration::ZERO, |timer_callback| timer_callback.expired_time) } @@ -124,7 +124,7 @@ impl Timer { /// Returns the interval time of the current timer. pub fn interval(&self) -> Duration { - *self.interval.lock_irq_disabled() + *self.interval.disable_irq().lock() } } @@ -134,7 +134,7 @@ fn interval_timer_callback(timer: &Weak) { }; (timer.registered_callback)(); - let interval = timer.interval.lock_irq_disabled(); + let interval = timer.interval.disable_irq().lock(); if *interval != Duration::ZERO { timer.set_timeout(Timeout::After(*interval)); } @@ -161,7 +161,8 @@ impl TimerManager { fn insert(&self, timer_callback: Arc) { self.timer_callbacks - .lock_irq_disabled() + .disable_irq() + .lock() .push(timer_callback); } @@ -169,7 +170,7 @@ impl TimerManager { /// call the corresponding callback functions. pub fn process_expired_timers(&self) { let callbacks = { - let mut timeout_list = self.timer_callbacks.lock_irq_disabled(); + let mut timeout_list = self.timer_callbacks.disable_irq().lock(); if timeout_list.len() == 0 { return; } diff --git a/kernel/comps/console/src/lib.rs b/kernel/comps/console/src/lib.rs index 01c639a4f..f748dd869 100644 --- a/kernel/comps/console/src/lib.rs +++ b/kernel/comps/console/src/lib.rs @@ -31,7 +31,8 @@ pub fn register_device(name: String, device: Arc) { .get() .unwrap() .console_device_table - .lock_irq_disabled() + .disable_irq() + .lock() .insert(name, device); } @@ -40,7 +41,8 @@ pub fn all_devices() -> Vec<(String, Arc)> { .get() .unwrap() .console_device_table - .lock_irq_disabled(); + .disable_irq() + .lock(); console_devs .iter() .map(|(name, device)| (name.clone(), device.clone())) diff --git a/kernel/comps/framebuffer/src/lib.rs b/kernel/comps/framebuffer/src/lib.rs index 3637ca188..d890b4c62 100644 --- a/kernel/comps/framebuffer/src/lib.rs +++ b/kernel/comps/framebuffer/src/lib.rs @@ -204,7 +204,8 @@ pub fn _print(args: fmt::Arguments) { WRITER .get() .unwrap() - .lock_irq_disabled() + .disable_irq() + .lock() .write_fmt(args) .unwrap(); } diff --git a/kernel/comps/network/src/buffer.rs b/kernel/comps/network/src/buffer.rs index 845fc26b2..62d8ba8a8 100644 --- a/kernel/comps/network/src/buffer.rs +++ b/kernel/comps/network/src/buffer.rs @@ -74,7 +74,8 @@ impl HasDaddr for TxBuffer { impl Drop for TxBuffer { fn drop(&mut self) { self.pool - .lock_irq_disabled() + .disable_irq() + .lock() .push_back(self.dma_stream.clone()); } } @@ -145,7 +146,7 @@ fn get_tx_stream_from_pool( nbytes: usize, tx_buffer_pool: &'static SpinLock>, ) -> Option { - let mut pool = tx_buffer_pool.lock_irq_disabled(); + let mut pool = tx_buffer_pool.disable_irq().lock(); let mut cursor = pool.cursor_front_mut(); while let Some(current) = cursor.current() { if current.nbytes() >= nbytes { diff --git a/kernel/comps/network/src/dma_pool.rs b/kernel/comps/network/src/dma_pool.rs index ad3762a55..ee8917286 100644 --- a/kernel/comps/network/src/dma_pool.rs +++ b/kernel/comps/network/src/dma_pool.rs @@ -97,7 +97,7 @@ impl DmaPool { pub fn alloc_segment(self: &Arc) -> Result { // Lock order: pool.avail_pages -> pool.all_pages // pool.avail_pages -> page.allocated_segments - let mut avail_pages = self.avail_pages.lock_irq_disabled(); + let mut avail_pages = self.avail_pages.disable_irq().lock(); if avail_pages.is_empty() { /// Allocate a new page let new_page = { @@ -109,7 +109,7 @@ impl DmaPool { pool, )?) }; - let mut all_pages = self.all_pages.lock_irq_disabled(); + let mut all_pages = self.all_pages.disable_irq().lock(); avail_pages.push_back(new_page.clone()); all_pages.push_back(new_page); } @@ -124,7 +124,7 @@ impl DmaPool { /// Returns the number of pages in pool fn num_pages(&self) -> usize { - self.all_pages.lock_irq_disabled().len() + self.all_pages.disable_irq().lock().len() } /// Return segment size in pool @@ -166,7 +166,7 @@ impl DmaPage { } fn alloc_segment(self: &Arc) -> Option { - let mut segments = self.allocated_segments.lock_irq_disabled(); + let mut segments = self.allocated_segments.disable_irq().lock(); let free_segment_index = get_next_free_index(&segments, self.nr_blocks_per_page())?; segments.set(free_segment_index, true); @@ -189,7 +189,7 @@ impl DmaPage { } fn is_full(&self) -> bool { - let segments = self.allocated_segments.lock_irq_disabled(); + let segments = self.allocated_segments.disable_irq().lock(); get_next_free_index(&segments, self.nr_blocks_per_page()).is_none() } } @@ -257,10 +257,10 @@ impl Drop for DmaSegment { // Keep the same lock order as `pool.alloc_segment` // Lock order: pool.avail_pages -> pool.all_pages -> page.allocated_segments - let mut avail_pages = pool.avail_pages.lock_irq_disabled(); - let mut all_pages = pool.all_pages.lock_irq_disabled(); + let mut avail_pages = pool.avail_pages.disable_irq().lock(); + let mut all_pages = pool.all_pages.disable_irq().lock(); - let mut allocated_segments = page.allocated_segments.lock_irq_disabled(); + let mut allocated_segments = page.allocated_segments.disable_irq().lock(); let nr_blocks_per_page = PAGE_SIZE / self.size; let became_avail = get_next_free_index(&allocated_segments, nr_blocks_per_page).is_none(); diff --git a/kernel/comps/network/src/lib.rs b/kernel/comps/network/src/lib.rs index e5378f124..570503089 100644 --- a/kernel/comps/network/src/lib.rs +++ b/kernel/comps/network/src/lib.rs @@ -18,7 +18,10 @@ use core::{any::Any, fmt::Debug}; pub use buffer::{RxBuffer, TxBuffer, RX_BUFFER_POOL, TX_BUFFER_POOL}; use component::{init_component, ComponentInitError}; pub use dma_pool::DmaSegment; -use ostd::{sync::SpinLock, Pod}; +use ostd::{ + sync::{PreemptDisabled, SpinLock}, + Pod, +}; use smoltcp::phy; use spin::Once; @@ -52,21 +55,23 @@ pub trait AnyNetworkDevice: Send + Sync + Any + Debug { pub trait NetDeviceIrqHandler = Fn() + Send + Sync + 'static; -pub fn register_device(name: String, device: Arc>) { +pub fn register_device(name: String, device: Arc>) { COMPONENT .get() .unwrap() .network_device_table - .lock_irq_disabled() + .disable_irq() + .lock() .insert(name, (Arc::new(SpinLock::new(Vec::new())), device)); } -pub fn get_device(str: &str) -> Option>> { +pub fn get_device(str: &str) -> Option>> { let table = COMPONENT .get() .unwrap() .network_device_table - .lock_irq_disabled(); + .disable_irq() + .lock(); let (_, device) = table.get(str)?; Some(device.clone()) } @@ -80,11 +85,12 @@ pub fn register_recv_callback(name: &str, callback: impl NetDeviceIrqHandler) { .get() .unwrap() .network_device_table - .lock_irq_disabled(); + .disable_irq() + .lock(); let Some((callbacks, _)) = device_table.get(name) else { return; }; - callbacks.lock_irq_disabled().push(Arc::new(callback)); + callbacks.disable_irq().lock().push(Arc::new(callback)); } pub fn handle_recv_irq(name: &str) { @@ -92,11 +98,12 @@ pub fn handle_recv_irq(name: &str) { .get() .unwrap() .network_device_table - .lock_irq_disabled(); + .disable_irq() + .lock(); let Some((callbacks, _)) = device_table.get(name) else { return; }; - let callbacks = callbacks.lock_irq_disabled(); + let callbacks = callbacks.disable_irq().lock(); for callback in callbacks.iter() { callback(); } @@ -107,7 +114,8 @@ pub fn all_devices() -> Vec<(String, NetworkDeviceRef)> { .get() .unwrap() .network_device_table - .lock_irq_disabled(); + .disable_irq() + .lock(); network_devs .iter() .map(|(name, (_, device))| (name.clone(), device.clone())) @@ -115,8 +123,9 @@ pub fn all_devices() -> Vec<(String, NetworkDeviceRef)> { } static COMPONENT: Once = Once::new(); -pub(crate) static NETWORK_IRQ_HANDLERS: Once>>> = - Once::new(); +pub(crate) static NETWORK_IRQ_HANDLERS: Once< + SpinLock>, PreemptDisabled>, +> = Once::new(); #[init_component] fn init() -> Result<(), ComponentInitError> { @@ -127,13 +136,13 @@ fn init() -> Result<(), ComponentInitError> { Ok(()) } -type NetDeviceIrqHandlerListRef = Arc>>>; -type NetworkDeviceRef = Arc>; +type NetDeviceIrqHandlerListRef = Arc>, PreemptDisabled>>; +type NetworkDeviceRef = Arc>; struct Component { /// Device list, the key is device name, value is (callbacks, device); network_device_table: - SpinLock>, + SpinLock, PreemptDisabled>, } impl Component { diff --git a/kernel/comps/virtio/src/device/block/device.rs b/kernel/comps/virtio/src/device/block/device.rs index ab6334849..51c06143b 100644 --- a/kernel/comps/virtio/src/device/block/device.rs +++ b/kernel/comps/virtio/src/device/block/device.rs @@ -174,7 +174,7 @@ impl DeviceInner { info!("Virtio block device handle irq"); // When we enter the IRQs handling function, // IRQs have already been disabled, - // so there is no need to call `lock_irq_disabled`. + // so there is no need to call `disable_irq`. loop { // Pops the complete request let complete_request = { @@ -221,7 +221,7 @@ impl DeviceInner { // TODO: Most logic is the same as read and write, there should be a refactor. // TODO: Should return an Err instead of panic if the device fails. fn request_device_id(&self) -> String { - let id = self.id_allocator.lock_irq_disabled().alloc().unwrap(); + let id = self.id_allocator.disable_irq().lock().alloc().unwrap(); let req_slice = { let req_slice = DmaStreamSlice::new(&self.block_requests, id * REQ_SIZE, REQ_SIZE); let req = BlockReq { @@ -250,7 +250,7 @@ impl DeviceInner { let device_id_slice = DmaStreamSlice::new(&device_id_stream, 0, MAX_ID_LENGTH); let outputs = vec![&device_id_slice, &resp_slice]; - let mut queue = self.queue.lock_irq_disabled(); + let mut queue = self.queue.disable_irq().lock(); let token = queue .add_dma_buf(&[&req_slice], outputs.as_slice()) .expect("add queue failed"); @@ -263,7 +263,7 @@ impl DeviceInner { queue.pop_used_with_token(token).expect("pop used failed"); resp_slice.sync().unwrap(); - self.id_allocator.lock_irq_disabled().free(id); + self.id_allocator.disable_irq().lock().free(id); let resp: BlockResp = resp_slice.read_val(0).unwrap(); match RespStatus::try_from(resp.status).unwrap() { RespStatus::Ok => {} @@ -288,7 +288,7 @@ impl DeviceInner { fn read(&self, bio_request: BioRequest) { let dma_streams = Self::dma_stream_map(&bio_request); - let id = self.id_allocator.lock_irq_disabled().alloc().unwrap(); + let id = self.id_allocator.disable_irq().lock().alloc().unwrap(); let req_slice = { let req_slice = DmaStreamSlice::new(&self.block_requests, id * REQ_SIZE, REQ_SIZE); let req = BlockReq { @@ -325,7 +325,7 @@ impl DeviceInner { } loop { - let mut queue = self.queue.lock_irq_disabled(); + let mut queue = self.queue.disable_irq().lock(); if num_used_descs > queue.available_desc() { continue; } @@ -339,7 +339,8 @@ impl DeviceInner { // Records the submitted request let submitted_request = SubmittedRequest::new(id as u16, bio_request, dma_streams); self.submitted_requests - .lock_irq_disabled() + .disable_irq() + .lock() .insert(token, submitted_request); return; } @@ -349,7 +350,7 @@ impl DeviceInner { fn write(&self, bio_request: BioRequest) { let dma_streams = Self::dma_stream_map(&bio_request); - let id = self.id_allocator.lock_irq_disabled().alloc().unwrap(); + let id = self.id_allocator.disable_irq().lock().alloc().unwrap(); let req_slice = { let req_slice = DmaStreamSlice::new(&self.block_requests, id * REQ_SIZE, REQ_SIZE); let req = BlockReq { @@ -385,7 +386,7 @@ impl DeviceInner { panic!("The request size surpasses the queue size"); } loop { - let mut queue = self.queue.lock_irq_disabled(); + let mut queue = self.queue.disable_irq().lock(); if num_used_descs > queue.available_desc() { continue; } @@ -399,7 +400,8 @@ impl DeviceInner { // Records the submitted request let submitted_request = SubmittedRequest::new(id as u16, bio_request, dma_streams); self.submitted_requests - .lock_irq_disabled() + .disable_irq() + .lock() .insert(token, submitted_request); return; } diff --git a/kernel/comps/virtio/src/device/console/device.rs b/kernel/comps/virtio/src/device/console/device.rs index 4c6d11270..bb4d1f89c 100644 --- a/kernel/comps/virtio/src/device/console/device.rs +++ b/kernel/comps/virtio/src/device/console/device.rs @@ -32,7 +32,7 @@ pub struct ConsoleDevice { impl AnyConsoleDevice for ConsoleDevice { fn send(&self, value: &[u8]) { - let mut transmit_queue = self.transmit_queue.lock_irq_disabled(); + let mut transmit_queue = self.transmit_queue.disable_irq().lock(); let mut reader = VmReader::from(value); while reader.remain() > 0 { @@ -106,10 +106,10 @@ impl ConsoleDevice { callbacks: RwLock::new(Vec::new()), }); - device.activate_receive_buffer(&mut device.receive_queue.lock_irq_disabled()); + device.activate_receive_buffer(&mut device.receive_queue.disable_irq().lock()); // Register irq callbacks - let mut transport = device.transport.lock_irq_disabled(); + let mut transport = device.transport.disable_irq().lock(); let handle_console_input = { let device = device.clone(); move |_: &TrapFrame| device.handle_recv_irq() @@ -129,7 +129,7 @@ impl ConsoleDevice { } fn handle_recv_irq(&self) { - let mut receive_queue = self.receive_queue.lock_irq_disabled(); + let mut receive_queue = self.receive_queue.disable_irq().lock(); let Ok((_, len)) = receive_queue.pop_used() else { return; diff --git a/kernel/comps/virtio/src/device/input/device.rs b/kernel/comps/virtio/src/device/input/device.rs index a035947a2..0550da538 100644 --- a/kernel/comps/virtio/src/device/input/device.rs +++ b/kernel/comps/virtio/src/device/input/device.rs @@ -122,7 +122,7 @@ impl InputDevice { let input_prop = InputProp::from_bits(prop[0]).unwrap(); debug!("input device prop:{:?}", input_prop); - let mut transport = device.transport.lock_irq_disabled(); + let mut transport = device.transport.disable_irq().lock(); fn config_space_change(_: &TrapFrame) { debug!("input device config space change"); } @@ -148,7 +148,7 @@ impl InputDevice { /// Pop the pending event. fn pop_pending_events(&self, handle_event: &impl Fn(&EventBuf) -> bool) { - let mut event_queue = self.event_queue.lock_irq_disabled(); + let mut event_queue = self.event_queue.disable_irq().lock(); // one interrupt may contain several input events, so it should loop while let Ok((token, _)) = event_queue.pop_used() { diff --git a/kernel/comps/virtio/src/device/socket/mod.rs b/kernel/comps/virtio/src/device/socket/mod.rs index df7e2ed5d..652a3cfa2 100644 --- a/kernel/comps/virtio/src/device/socket/mod.rs +++ b/kernel/comps/virtio/src/device/socket/mod.rs @@ -21,18 +21,19 @@ pub fn register_device(name: String, device: Arc>) { VSOCK_DEVICE_TABLE .get() .unwrap() - .lock_irq_disabled() + .disable_irq() + .lock() .insert(name, (Arc::new(SpinLock::new(Vec::new())), device)); } pub fn get_device(str: &str) -> Option>> { - let lock = VSOCK_DEVICE_TABLE.get().unwrap().lock_irq_disabled(); + let lock = VSOCK_DEVICE_TABLE.get().unwrap().disable_irq().lock(); let (_, device) = lock.get(str)?; Some(device.clone()) } pub fn all_devices() -> Vec<(String, Arc>)> { - let vsock_devs = VSOCK_DEVICE_TABLE.get().unwrap().lock_irq_disabled(); + let vsock_devs = VSOCK_DEVICE_TABLE.get().unwrap().disable_irq().lock(); vsock_devs .iter() .map(|(name, (_, device))| (name.clone(), device.clone())) @@ -40,19 +41,19 @@ pub fn all_devices() -> Vec<(String, Arc>)> { } pub fn register_recv_callback(name: &str, callback: impl VsockDeviceIrqHandler) { - let lock = VSOCK_DEVICE_TABLE.get().unwrap().lock_irq_disabled(); + let lock = VSOCK_DEVICE_TABLE.get().unwrap().disable_irq().lock(); let Some((callbacks, _)) = lock.get(name) else { return; }; - callbacks.lock_irq_disabled().push(Arc::new(callback)); + callbacks.disable_irq().lock().push(Arc::new(callback)); } pub fn handle_recv_irq(name: &str) { - let lock = VSOCK_DEVICE_TABLE.get().unwrap().lock_irq_disabled(); + let lock = VSOCK_DEVICE_TABLE.get().unwrap().disable_irq().lock(); let Some((callbacks, _)) = lock.get(name) else { return; }; - let lock = callbacks.lock_irq_disabled(); + let lock = callbacks.disable_irq().lock(); for callback in lock.iter() { callback.call(()) } diff --git a/ostd/src/arch/x86/irq.rs b/ostd/src/arch/x86/irq.rs index 7dacd0fff..9b46fb3e8 100644 --- a/ostd/src/arch/x86/irq.rs +++ b/ostd/src/arch/x86/irq.rs @@ -10,7 +10,7 @@ use id_alloc::IdAlloc; use spin::Once; use trapframe::TrapFrame; -use crate::sync::{Mutex, SpinLock, SpinLockGuard}; +use crate::sync::{Mutex, PreemptDisabled, SpinLock, SpinLockGuard}; /// The global allocator for software defined IRQ lines. pub(crate) static IRQ_ALLOCATOR: Once> = Once::new(); @@ -101,7 +101,9 @@ impl IrqLine { self.irq_num } - pub fn callback_list(&self) -> SpinLockGuard> { + pub fn callback_list( + &self, + ) -> SpinLockGuard, PreemptDisabled> { self.callback_list.lock() } diff --git a/ostd/src/arch/x86/serial.rs b/ostd/src/arch/x86/serial.rs index 41b8aae0f..9d4226ab0 100644 --- a/ostd/src/arch/x86/serial.rs +++ b/ostd/src/arch/x86/serial.rs @@ -26,7 +26,10 @@ pub type InputCallback = dyn Fn(u8) + Send + Sync + 'static; /// Registers a callback function to be called when there is console input. pub fn register_console_input_callback(f: &'static InputCallback) { - SERIAL_INPUT_CALLBACKS.lock_irq_disabled().push(Arc::new(f)); + SERIAL_INPUT_CALLBACKS + .disable_irq() + .lock() + .push(Arc::new(f)); } struct Stdout; @@ -77,7 +80,8 @@ where CONSOLE_IRQ_CALLBACK .get() .unwrap() - .lock_irq_disabled() + .disable_irq() + .lock() .on_active(callback); } diff --git a/ostd/src/logger.rs b/ostd/src/logger.rs index 28415fafa..0e79d0de9 100644 --- a/ostd/src/logger.rs +++ b/ostd/src/logger.rs @@ -59,7 +59,7 @@ impl log::Log for Logger { // Use a global lock to prevent interleaving of log messages. use crate::sync::SpinLock; static RECORD_LOCK: SpinLock<()> = SpinLock::new(()); - let _lock = RECORD_LOCK.lock_irq_disabled(); + let _lock = RECORD_LOCK.disable_irq().lock(); early_println!("{} {}: {}", timestamp, level, record_str); } diff --git a/ostd/src/mm/dma/mod.rs b/ostd/src/mm/dma/mod.rs index be2346fd3..58a2d109c 100644 --- a/ostd/src/mm/dma/mod.rs +++ b/ostd/src/mm/dma/mod.rs @@ -63,7 +63,7 @@ pub fn init() { /// Checks whether the physical addresses has dma mapping. /// Fail if they have been mapped, otherwise insert them. fn check_and_insert_dma_mapping(start_paddr: Paddr, num_pages: usize) -> bool { - let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled(); + let mut mapping_set = DMA_MAPPING_SET.get().unwrap().disable_irq().lock(); // Ensure that the addresses used later will not overflow start_paddr.checked_add(num_pages * PAGE_SIZE).unwrap(); for i in 0..num_pages { @@ -81,7 +81,7 @@ fn check_and_insert_dma_mapping(start_paddr: Paddr, num_pages: usize) -> bool { /// Removes a physical address from the dma mapping set. fn remove_dma_mapping(start_paddr: Paddr, num_pages: usize) { - let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled(); + let mut mapping_set = DMA_MAPPING_SET.get().unwrap().disable_irq().lock(); // Ensure that the addresses used later will not overflow start_paddr.checked_add(num_pages * PAGE_SIZE).unwrap(); for i in 0..num_pages { diff --git a/ostd/src/mm/heap_allocator.rs b/ostd/src/mm/heap_allocator.rs index b357e54bd..2fcec4378 100644 --- a/ostd/src/mm/heap_allocator.rs +++ b/ostd/src/mm/heap_allocator.rs @@ -53,13 +53,14 @@ impl LockedHeapWithRescue { /// SAFETY: The range [start, start + size) must be a valid memory region. pub unsafe fn init(&self, start: *const u8, size: usize) { - self.heap.lock_irq_disabled().init(start as usize, size); + self.heap.disable_irq().lock().init(start as usize, size); } /// SAFETY: The range [start, start + size) must be a valid memory region. unsafe fn add_to_heap(&self, start: usize, size: usize) { self.heap - .lock_irq_disabled() + .disable_irq() + .lock() .add_to_heap(start, start + size) } } @@ -88,7 +89,8 @@ unsafe impl GlobalAlloc for LockedHeapWithRescue { unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { debug_assert!(ptr as usize != 0); self.heap - .lock_irq_disabled() + .disable_irq() + .lock() .dealloc(NonNull::new_unchecked(ptr), layout) } } diff --git a/ostd/src/sync/mod.rs b/ostd/src/sync/mod.rs index b72a1aa44..643afcf06 100644 --- a/ostd/src/sync/mod.rs +++ b/ostd/src/sync/mod.rs @@ -24,6 +24,6 @@ pub use self::{ ArcRwMutexReadGuard, ArcRwMutexUpgradeableGuard, ArcRwMutexWriteGuard, RwMutex, RwMutexReadGuard, RwMutexUpgradeableGuard, RwMutexWriteGuard, }, - spin::{ArcSpinLockGuard, SpinLock, SpinLockGuard}, + spin::{ArcSpinLockGuard, LocalIrqDisabled, PreemptDisabled, SpinLock, SpinLockGuard}, wait::{WaitQueue, Waiter, Waker}, }; diff --git a/ostd/src/sync/rcu/monitor.rs b/ostd/src/sync/rcu/monitor.rs index d694bfa7a..6d8fff2f8 100644 --- a/ostd/src/sync/rcu/monitor.rs +++ b/ostd/src/sync/rcu/monitor.rs @@ -37,7 +37,7 @@ impl RcuMonitor { // on the current CPU. If GP is complete, take the callbacks of the current // GP. let callbacks = { - let mut state = self.state.lock_irq_disabled(); + let mut state = self.state.disable_irq().lock(); if state.current_gp.is_complete() { return; } @@ -71,7 +71,7 @@ impl RcuMonitor { where F: FnOnce() -> () + Send + 'static, { - let mut state = self.state.lock_irq_disabled(); + let mut state = self.state.disable_irq().lock(); state.next_callbacks.push_back(Box::new(f)); diff --git a/ostd/src/sync/spin.rs b/ostd/src/sync/spin.rs index 0dcfab348..dfa2bb8ee 100644 --- a/ostd/src/sync/spin.rs +++ b/ostd/src/sync/spin.rs @@ -6,6 +6,7 @@ use alloc::sync::Arc; use core::{ cell::UnsafeCell, fmt, + marker::PhantomData, ops::{Deref, DerefMut}, sync::atomic::{AtomicBool, Ordering}, }; @@ -16,65 +17,104 @@ use crate::{ }; /// A spin lock. -pub struct SpinLock { +/// +/// # Guard behavior +/// +/// The type `G' specifies the guard behavior of the spin lock. While holding the lock, +/// - if `G` is [`PreemptDisabled`], preemption is disabled; +/// - if `G` is [`LocalIrqDisabled`], local IRQs are disabled. +/// +/// The guard behavior can be temporarily upgraded from [`PreemptDisabled`] to +/// [`LocalIrqDisabled`] using the [`disable_irq`] method. +/// +/// [`disable_irq`]: Self::disable_irq +#[repr(transparent)] +pub struct SpinLock { + phantom: PhantomData, + /// Only the last field of a struct may have a dynamically sized type. + /// That's why SpinLockInner is put in the last field. + inner: SpinLockInner, +} + +struct SpinLockInner { lock: AtomicBool, val: UnsafeCell, } -impl SpinLock { +/// A guardian that denotes the guard behavior for holding the spin lock. +pub trait Guardian { + /// The guard type. + type Guard; + + /// Creates a new guard. + fn guard() -> Self::Guard; +} + +/// A guardian that disables preemption while holding the spin lock. +pub struct PreemptDisabled; + +impl Guardian for PreemptDisabled { + type Guard = DisablePreemptGuard; + + fn guard() -> Self::Guard { + disable_preempt() + } +} + +/// A guardian that disables IRQs while holding the spin lock. +/// +/// This guardian would incur a certain time overhead over +/// [`PreemptDisabled']. So prefer avoiding using this guardian when +/// IRQ handlers are allowed to get executed while holding the +/// lock. For example, if a lock is never used in the interrupt +/// context, then it is ok not to use this guardian in the process context. +pub struct LocalIrqDisabled; + +impl Guardian for LocalIrqDisabled { + type Guard = DisabledLocalIrqGuard; + + fn guard() -> Self::Guard { + disable_local() + } +} + +impl SpinLock { /// Creates a new spin lock. pub const fn new(val: T) -> Self { - Self { + let lock_inner = SpinLockInner { lock: AtomicBool::new(false), val: UnsafeCell::new(val), + }; + Self { + phantom: PhantomData, + inner: lock_inner, } } } -impl SpinLock { - /// Acquires the spin lock with disabling the local IRQs. This is the most secure - /// locking way. - /// - /// This method runs in a busy loop until the lock can be acquired. - /// After acquiring the spin lock, all interrupts are disabled. - pub fn lock_irq_disabled(&self) -> SpinLockGuard { - let guard = disable_local(); +impl SpinLock { + /// Converts the guard behavior from disabling preemption to disabling IRQs. + pub fn disable_irq(&self) -> &SpinLock { + let ptr = self as *const SpinLock; + let ptr = ptr as *const SpinLock; + // SAFETY: + // 1. The types `SpinLock`, `SpinLockInner` and `SpinLock` have the same memory layout guaranteed by `#[repr(transparent)]`. + // 2. The specified memory location can be borrowed as an immutable reference for the + // specified lifetime. + unsafe { &*ptr } + } +} + +impl SpinLock { + /// Acquires the spin lock. + pub fn lock(&self) -> SpinLockGuard { + // Notice the guard must be created before acquiring the lock. + let inner_guard = G::guard(); self.acquire_lock(); SpinLockGuard_ { lock: self, - inner_guard: InnerGuard::IrqGuard(guard), - } - } - - /// Tries acquiring the spin lock immedidately with disabling the local IRQs. - pub fn try_lock_irq_disabled(&self) -> Option> { - let irq_guard = disable_local(); - if self.try_acquire_lock() { - let lock_guard = SpinLockGuard_ { - lock: self, - inner_guard: InnerGuard::IrqGuard(irq_guard), - }; - return Some(lock_guard); - } - None - } - - /// Acquires the spin lock without disabling local IRQs. - /// - /// This method is twice as fast as the [`lock_irq_disabled`] method. - /// So prefer using this method over the [`lock_irq_disabled`] method - /// when IRQ handlers are allowed to get executed while - /// holding this lock. For example, if a lock is never used - /// in the interrupt context, then it is ok to use this method - /// in the process context. - /// - /// [`lock_irq_disabled`]: Self::lock_irq_disabled - pub fn lock(&self) -> SpinLockGuard { - let guard = disable_preempt(); - self.acquire_lock(); - SpinLockGuard_ { - lock: self, - inner_guard: InnerGuard::PreemptGuard(guard), + guard: inner_guard, } } @@ -84,22 +124,22 @@ impl SpinLock { /// for compile-time checked lifetimes of the lock guard. /// /// [`lock`]: Self::lock - pub fn lock_arc(self: &Arc) -> ArcSpinLockGuard { - let guard = disable_preempt(); + pub fn lock_arc(self: &Arc) -> ArcSpinLockGuard { + let inner_guard = G::guard(); self.acquire_lock(); SpinLockGuard_ { lock: self.clone(), - inner_guard: InnerGuard::PreemptGuard(guard), + guard: inner_guard, } } - /// Tries acquiring the spin lock immedidately without disabling the local IRQs. - pub fn try_lock(&self) -> Option> { - let guard = disable_preempt(); + /// Tries acquiring the spin lock immedidately. + pub fn try_lock(&self) -> Option> { + let inner_guard = G::guard(); if self.try_acquire_lock() { let lock_guard = SpinLockGuard_ { lock: self, - inner_guard: InnerGuard::PreemptGuard(guard), + guard: inner_guard, }; return Some(lock_guard); } @@ -114,72 +154,75 @@ impl SpinLock { } fn try_acquire_lock(&self) -> bool { - self.lock + self.inner + .lock .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) .is_ok() } fn release_lock(&self) { - self.lock.store(false, Ordering::Release); + self.inner.lock.store(false, Ordering::Release); } } -impl fmt::Debug for SpinLock { +impl fmt::Debug for SpinLock { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.val, f) + fmt::Debug::fmt(&self.inner.val, f) } } // SAFETY: Only a single lock holder is permitted to access the inner data of Spinlock. -unsafe impl Send for SpinLock {} -unsafe impl Sync for SpinLock {} - -enum InnerGuard { - IrqGuard(DisabledLocalIrqGuard), - PreemptGuard(DisablePreemptGuard), -} +unsafe impl Send for SpinLock {} +unsafe impl Sync for SpinLock {} /// A guard that provides exclusive access to the data protected by a [`SpinLock`]. -pub type SpinLockGuard<'a, T> = SpinLockGuard_>; +pub type SpinLockGuard<'a, T, G> = SpinLockGuard_, G>; /// A guard that provides exclusive access to the data protected by a `Arc`. -pub type ArcSpinLockGuard = SpinLockGuard_>>; +pub type ArcSpinLockGuard = SpinLockGuard_>, G>; -/// The guard of a spin lock that disables the local IRQs. +/// The guard of a spin lock. #[clippy::has_significant_drop] #[must_use] -pub struct SpinLockGuard_>> { - inner_guard: InnerGuard, +pub struct SpinLockGuard_>, G: Guardian> { + guard: G::Guard, lock: R, } -impl>> Deref for SpinLockGuard_ { +impl>, G: Guardian> Deref for SpinLockGuard_ { type Target = T; fn deref(&self) -> &T { - unsafe { &*self.lock.val.get() } + unsafe { &*self.lock.inner.val.get() } } } -impl>> DerefMut for SpinLockGuard_ { +impl>, G: Guardian> DerefMut + for SpinLockGuard_ +{ fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.val.get() } + unsafe { &mut *self.lock.inner.val.get() } } } -impl>> Drop for SpinLockGuard_ { +impl>, G: Guardian> Drop for SpinLockGuard_ { fn drop(&mut self) { self.lock.release_lock(); } } -impl>> fmt::Debug for SpinLockGuard_ { +impl>, G: Guardian> fmt::Debug + for SpinLockGuard_ +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } -impl>> !Send for SpinLockGuard_ {} +impl>, G: Guardian> !Send for SpinLockGuard_ {} // SAFETY: `SpinLockGuard_` can be shared between tasks/threads in same CPU. // As `lock()` is only called when there are no race conditions caused by interrupts. -unsafe impl> + Sync> Sync for SpinLockGuard_ {} +unsafe impl> + Sync, G: Guardian> Sync + for SpinLockGuard_ +{ +} diff --git a/ostd/src/sync/wait.rs b/ostd/src/sync/wait.rs index 34b4a1c27..2aa601170 100644 --- a/ostd/src/sync/wait.rs +++ b/ostd/src/sync/wait.rs @@ -123,7 +123,7 @@ impl WaitQueue { } loop { - let mut wakers = self.wakers.lock_irq_disabled(); + let mut wakers = self.wakers.disable_irq().lock(); let Some(waker) = wakers.pop_front() else { return false; }; @@ -147,7 +147,7 @@ impl WaitQueue { let mut num_woken = 0; loop { - let mut wakers = self.wakers.lock_irq_disabled(); + let mut wakers = self.wakers.disable_irq().lock(); let Some(waker) = wakers.pop_front() else { break; }; @@ -171,7 +171,7 @@ impl WaitQueue { } fn enqueue(&self, waker: Arc) { - let mut wakers = self.wakers.lock_irq_disabled(); + let mut wakers = self.wakers.disable_irq().lock(); wakers.push_back(waker); self.num_wakers.fetch_add(1, Ordering::Acquire); } diff --git a/ostd/src/task/scheduler/fifo_scheduler.rs b/ostd/src/task/scheduler/fifo_scheduler.rs index a153e9b53..9e1243885 100644 --- a/ostd/src/task/scheduler/fifo_scheduler.rs +++ b/ostd/src/task/scheduler/fifo_scheduler.rs @@ -51,7 +51,7 @@ impl Scheduler for FifoScheduler { cpu_id }; - let mut rq = self.rq[target_cpu as usize].lock_irq_disabled(); + let mut rq = self.rq[target_cpu as usize].disable_irq().lock(); if still_in_rq && let Err(_) = runnable.cpu().set_if_is_none(target_cpu) { return None; } @@ -61,12 +61,12 @@ impl Scheduler for FifoScheduler { } fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue)) { - let local_rq: &FifoRunQueue = &self.rq[this_cpu() as usize].lock_irq_disabled(); + let local_rq: &FifoRunQueue = &self.rq[this_cpu() as usize].disable_irq().lock(); f(local_rq); } fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue)) { - let local_rq: &mut FifoRunQueue = &mut self.rq[this_cpu() as usize].lock_irq_disabled(); + let local_rq: &mut FifoRunQueue = &mut self.rq[this_cpu() as usize].disable_irq().lock(); f(local_rq); } }