Refactor the API of spinlocks

This commit is contained in:
Cautreoxit
2024-08-09 03:32:27 +00:00
committed by Tate, Hongliang Tian
parent 0160a85ccd
commit c44447d54b
38 changed files with 345 additions and 259 deletions

View File

@ -62,7 +62,7 @@ impl PtyMaster {
}
pub(super) fn slave_push_char(&self, ch: u8) {
let mut input = self.input.lock_irq_disabled();
let mut input = self.input.disable_irq().lock();
input.push_overwrite(ch);
self.update_state(&input);
}
@ -107,7 +107,7 @@ impl FileIo for PtyMaster {
let mut poller = Poller::new();
loop {
let mut input = self.input.lock_irq_disabled();
let mut input = self.input.disable_irq().lock();
if input.is_empty() {
let events = self.pollee.poll(IoEvents::IN, Some(&mut poller));

View File

@ -36,7 +36,7 @@ impl TtyDriver {
/// Return the tty device in driver's internal table.
pub fn lookup(&self, index: usize) -> Result<Arc<Tty>> {
let ttys = self.ttys.lock_irq_disabled();
let ttys = self.ttys.disable_irq().lock();
// Return the tty device corresponding to idx
if index >= ttys.len() {
return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device");
@ -49,12 +49,12 @@ impl TtyDriver {
/// Install a new tty into the driver's internal tables.
pub fn install(self: &Arc<Self>, tty: Arc<Tty>) {
tty.set_driver(Arc::downgrade(self));
self.ttys.lock_irq_disabled().push(tty);
self.ttys.disable_irq().lock().push(tty);
}
/// remove a new tty into the driver's internal tables.
pub fn remove(&self, index: usize) -> Result<()> {
let mut ttys = self.ttys.lock_irq_disabled();
let mut ttys = self.ttys.disable_irq().lock();
if index >= ttys.len() {
return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device");
}
@ -66,7 +66,7 @@ impl TtyDriver {
pub fn push_char(&self, ch: u8) {
// FIXME: should the char send to all ttys?
for tty in &*self.ttys.lock_irq_disabled() {
for tty in &*self.ttys.disable_irq().lock() {
tty.push_char(ch);
}
}

View File

@ -106,7 +106,7 @@ impl LineDiscipline {
/// Push char to line discipline.
pub fn push_char<F2: FnMut(&str)>(&self, ch: u8, echo_callback: F2) {
let termios = self.termios.lock_irq_disabled();
let termios = self.termios.disable_irq().lock();
let ch = if termios.contains_icrnl() && ch == b'\r' {
b'\n'
@ -127,7 +127,7 @@ impl LineDiscipline {
// Raw mode
if !termios.is_canonical_mode() {
self.read_buffer.lock_irq_disabled().push_overwrite(ch);
self.read_buffer.disable_irq().lock().push_overwrite(ch);
self.update_readable_state();
return;
}
@ -136,12 +136,12 @@ impl LineDiscipline {
if ch == *termios.get_special_char(CC_C_CHAR::VKILL) {
// Erase current line
self.current_line.lock_irq_disabled().drain();
self.current_line.disable_irq().lock().drain();
}
if ch == *termios.get_special_char(CC_C_CHAR::VERASE) {
// Type backspace
let mut current_line = self.current_line.lock_irq_disabled();
let mut current_line = self.current_line.disable_irq().lock();
if !current_line.is_empty() {
current_line.backspace();
}
@ -149,17 +149,17 @@ impl LineDiscipline {
if is_line_terminator(ch, &termios) {
// If a new line is met, all bytes in current_line will be moved to read_buffer
let mut current_line = self.current_line.lock_irq_disabled();
let mut current_line = self.current_line.disable_irq().lock();
current_line.push_char(ch);
let current_line_chars = current_line.drain();
for char in current_line_chars {
self.read_buffer.lock_irq_disabled().push_overwrite(char);
self.read_buffer.disable_irq().lock().push_overwrite(char);
}
}
if is_printable_char(ch) {
// Printable character
self.current_line.lock_irq_disabled().push_char(ch);
self.current_line.disable_irq().lock().push_char(ch);
}
self.update_readable_state();
@ -178,7 +178,7 @@ impl LineDiscipline {
if in_interrupt_context() {
// `kernel_signal()` may cause sleep, so only construct parameters here.
self.work_item_para.lock_irq_disabled().kernel_signal = Some(signal);
self.work_item_para.disable_irq().lock().kernel_signal = Some(signal);
} else {
(self.send_signal)(signal);
}
@ -187,14 +187,14 @@ impl LineDiscipline {
}
pub fn update_readable_state(&self) {
let buffer = self.read_buffer.lock_irq_disabled();
let buffer = self.read_buffer.disable_irq().lock();
if in_interrupt_context() {
// Add/Del events may sleep, so only construct parameters here.
if !buffer.is_empty() {
self.work_item_para.lock_irq_disabled().pollee_type = Some(PolleeType::Add);
self.work_item_para.disable_irq().lock().pollee_type = Some(PolleeType::Add);
} else {
self.work_item_para.lock_irq_disabled().pollee_type = Some(PolleeType::Del);
self.work_item_para.disable_irq().lock().pollee_type = Some(PolleeType::Del);
}
submit_work_item(self.work_item.clone(), WorkPriority::High);
return;
@ -209,10 +209,16 @@ impl LineDiscipline {
/// include all operations that may cause sleep, and processes by a work queue.
fn update_readable_state_after(&self) {
if let Some(signal) = self.work_item_para.lock_irq_disabled().kernel_signal.take() {
if let Some(signal) = self
.work_item_para
.disable_irq()
.lock()
.kernel_signal
.take()
{
(self.send_signal)(signal);
};
if let Some(pollee_type) = self.work_item_para.lock_irq_disabled().pollee_type.take() {
if let Some(pollee_type) = self.work_item_para.disable_irq().lock().pollee_type.take() {
match pollee_type {
PolleeType::Add => {
self.pollee.add_events(IoEvents::IN);
@ -262,13 +268,13 @@ impl LineDiscipline {
/// read all bytes buffered to dst, return the actual read length.
fn try_read(&self, dst: &mut [u8]) -> Result<usize> {
let (vmin, vtime) = {
let termios = self.termios.lock_irq_disabled();
let termios = self.termios.disable_irq().lock();
let vmin = *termios.get_special_char(CC_C_CHAR::VMIN);
let vtime = *termios.get_special_char(CC_C_CHAR::VTIME);
(vmin, vtime)
};
let read_len = {
let len = self.read_buffer.lock_irq_disabled().len();
let len = self.read_buffer.disable_irq().lock().len();
let max_read_len = len.min(dst.len());
if vmin == 0 && vtime == 0 {
// poll read
@ -295,7 +301,7 @@ impl LineDiscipline {
/// returns immediately with the lesser of the number of bytes available or the number of bytes requested.
/// If no bytes are available, completes immediately, returning 0.
fn poll_read(&self, dst: &mut [u8]) -> usize {
let mut buffer = self.read_buffer.lock_irq_disabled();
let mut buffer = self.read_buffer.disable_irq().lock();
let len = buffer.len();
let max_read_len = len.min(dst.len());
if max_read_len == 0 {
@ -304,7 +310,7 @@ impl LineDiscipline {
let mut read_len = 0;
for dst_i in dst.iter_mut().take(max_read_len) {
if let Some(next_char) = buffer.pop() {
let termios = self.termios.lock_irq_disabled();
let termios = self.termios.disable_irq().lock();
if termios.is_canonical_mode() {
// canonical mode, read until meet new line
if is_line_terminator(next_char, &termios) {
@ -353,15 +359,15 @@ impl LineDiscipline {
/// whether there is buffered data
pub fn is_empty(&self) -> bool {
self.read_buffer.lock_irq_disabled().len() == 0
self.read_buffer.disable_irq().lock().len() == 0
}
pub fn termios(&self) -> KernelTermios {
*self.termios.lock_irq_disabled()
*self.termios.disable_irq().lock()
}
pub fn set_termios(&self, termios: KernelTermios) {
*self.termios.lock_irq_disabled() = termios;
*self.termios.disable_irq().lock() = termios;
}
pub fn drain_input(&self) {

View File

@ -60,7 +60,7 @@ impl Tty {
}
pub fn set_driver(&self, driver: Weak<TtyDriver>) {
*self.driver.lock_irq_disabled() = driver;
*self.driver.disable_irq().lock() = driver;
}
pub fn push_char(&self, ch: u8) {

View File

@ -233,8 +233,8 @@ mod test {
// FIXME: `ThreadOptions::new` currently accepts `Fn`, forcing us to use `SpinLock` to gain
// internal mutability. We should avoid this `SpinLock` by making `ThreadOptions::new`
// accept `FnOnce`.
let writer_with_lock = SpinLock::new(Some(writer));
let reader_with_lock = SpinLock::new(Some(reader));
let writer_with_lock: SpinLock<_> = SpinLock::new(Some(writer));
let reader_with_lock: SpinLock<_> = SpinLock::new(Some(reader));
let signal_writer = Arc::new(AtomicBool::new(false));
let signal_reader = signal_writer.clone();

View File

@ -4,7 +4,7 @@ use alloc::collections::btree_map::Entry;
use core::sync::atomic::{AtomicU64, Ordering};
use keyable_arc::KeyableArc;
use ostd::sync::WaitQueue;
use ostd::sync::{LocalIrqDisabled, WaitQueue};
use smoltcp::{
iface::{SocketHandle, SocketSet},
phy::Device,
@ -49,23 +49,25 @@ impl IfaceCommon {
/// Acquires the lock to the interface.
///
/// *Lock ordering:* [`Self::sockets`] first, [`Self::interface`] second.
pub(super) fn interface(&self) -> SpinLockGuard<smoltcp::iface::Interface> {
self.interface.lock_irq_disabled()
pub(super) fn interface(&self) -> SpinLockGuard<smoltcp::iface::Interface, LocalIrqDisabled> {
self.interface.disable_irq().lock()
}
/// Acuqires the lock to the sockets.
///
/// *Lock ordering:* [`Self::sockets`] first, [`Self::interface`] second.
pub(super) fn sockets(&self) -> SpinLockGuard<smoltcp::iface::SocketSet<'static>> {
self.sockets.lock_irq_disabled()
pub(super) fn sockets(
&self,
) -> SpinLockGuard<smoltcp::iface::SocketSet<'static>, LocalIrqDisabled> {
self.sockets.disable_irq().lock()
}
pub(super) fn ipv4_addr(&self) -> Option<Ipv4Address> {
self.interface.lock_irq_disabled().ipv4_addr()
self.interface.disable_irq().lock().ipv4_addr()
}
pub(super) fn netmask(&self) -> Option<Ipv4Address> {
let interface = self.interface.lock_irq_disabled();
let interface = self.interface.disable_irq().lock();
let ip_addrs = interface.ip_addrs();
ip_addrs.first().map(|cidr| match cidr {
IpCidr::Ipv4(ipv4_cidr) => ipv4_cidr.netmask(),
@ -132,12 +134,12 @@ impl IfaceCommon {
let (handle, socket_family, observer) = match socket.into_raw() {
(AnyRawSocket::Tcp(tcp_socket), observer) => (
self.sockets.lock_irq_disabled().add(tcp_socket),
self.sockets.disable_irq().lock().add(tcp_socket),
SocketFamily::Tcp,
observer,
),
(AnyRawSocket::Udp(udp_socket), observer) => (
self.sockets.lock_irq_disabled().add(udp_socket),
self.sockets.disable_irq().lock().add(udp_socket),
SocketFamily::Udp,
observer,
),
@ -150,12 +152,12 @@ impl IfaceCommon {
/// Remove a socket from the interface
pub(super) fn remove_socket(&self, handle: SocketHandle) {
self.sockets.lock_irq_disabled().remove(handle);
self.sockets.disable_irq().lock().remove(handle);
}
pub(super) fn poll<D: Device + ?Sized>(&self, device: &mut D) {
let mut sockets = self.sockets.lock_irq_disabled();
let mut interface = self.interface.lock_irq_disabled();
let mut sockets = self.sockets.disable_irq().lock();
let mut interface = self.interface.disable_irq().lock();
let timestamp = get_network_timestamp();
let (has_events, poll_at) = {
@ -199,7 +201,8 @@ impl IfaceCommon {
let closed_sockets = self
.closing_sockets
.lock_irq_disabled()
.disable_irq()
.lock()
.extract_if(|closing_socket| closing_socket.is_closed())
.collect::<Vec<_>>();
drop(closed_sockets);
@ -244,7 +247,7 @@ impl IfaceCommon {
.remove(&keyable_socket);
assert!(removed);
let mut closing_sockets = self.closing_sockets.lock_irq_disabled();
let mut closing_sockets = self.closing_sockets.disable_irq().lock();
// Check `is_closed` after holding the lock to avoid race conditions.
if keyable_socket.is_closed() {

View File

@ -17,6 +17,7 @@ pub use any_socket::{
AnyBoundSocket, AnyUnboundSocket, RawTcpSocket, RawUdpSocket, RECV_BUF_LEN, SEND_BUF_LEN,
};
pub use loopback::IfaceLoopback;
use ostd::sync::LocalIrqDisabled;
pub use smoltcp::wire::EthernetAddress;
pub use util::{spawn_background_poll_thread, BindPortConfig};
pub use virtio::IfaceVirtio;
@ -77,11 +78,11 @@ mod internal {
pub trait IfaceInternal {
fn common(&self) -> &IfaceCommon;
/// The inner socket set
fn sockets(&self) -> SpinLockGuard<SocketSet<'static>> {
fn sockets(&self) -> SpinLockGuard<SocketSet<'static>, LocalIrqDisabled> {
self.common().sockets()
}
/// The inner iface.
fn iface_inner(&self) -> SpinLockGuard<smoltcp::iface::Interface> {
fn iface_inner(&self) -> SpinLockGuard<smoltcp::iface::Interface, LocalIrqDisabled> {
self.common().interface()
}
/// The time we should do another poll.

View File

@ -2,6 +2,7 @@
use aster_network::AnyNetworkDevice;
use aster_virtio::device::network::DEVICE_NAME;
use ostd::sync::PreemptDisabled;
use smoltcp::{
iface::{Config, SocketHandle, SocketSet},
socket::dhcpv4,
@ -12,7 +13,7 @@ use super::{common::IfaceCommon, internal::IfaceInternal, Iface};
use crate::prelude::*;
pub struct IfaceVirtio {
driver: Arc<SpinLock<dyn AnyNetworkDevice>>,
driver: Arc<SpinLock<dyn AnyNetworkDevice, PreemptDisabled>>,
common: IfaceCommon,
dhcp_handle: SocketHandle,
weak_self: Weak<Self>,
@ -113,7 +114,7 @@ impl Iface for IfaceVirtio {
}
fn poll(&self) {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
self.common.poll(&mut *driver);
self.process_dhcp();
}

View File

@ -46,11 +46,13 @@ impl VsockSpace {
/// Check whether the event is for this socket space
fn is_event_for_socket(&self, event: &VsockEvent) -> bool {
self.connecting_sockets
.lock_irq_disabled()
.disable_irq()
.lock()
.contains_key(&event.destination.into())
|| self
.listen_sockets
.lock_irq_disabled()
.disable_irq()
.lock()
.contains_key(&event.destination.into())
|| self
.connected_sockets
@ -60,7 +62,7 @@ impl VsockSpace {
/// Alloc an unused port range
pub fn alloc_ephemeral_port(&self) -> Result<u32> {
let mut used_ports = self.used_ports.lock_irq_disabled();
let mut used_ports = self.used_ports.disable_irq().lock();
// FIXME: the maximal port number is not defined by spec
for port in 1024..u32::MAX {
if !used_ports.contains(&port) {
@ -73,13 +75,13 @@ impl VsockSpace {
/// Bind a port
pub fn bind_port(&self, port: u32) -> bool {
let mut used_ports = self.used_ports.lock_irq_disabled();
let mut used_ports = self.used_ports.disable_irq().lock();
used_ports.insert(port)
}
/// Recycle a port
pub fn recycle_port(&self, port: &u32) -> bool {
let mut used_ports = self.used_ports.lock_irq_disabled();
let mut used_ports = self.used_ports.disable_irq().lock();
used_ports.remove(port)
}
@ -105,13 +107,13 @@ impl VsockSpace {
addr: VsockSocketAddr,
connecting: Arc<Connecting>,
) -> Option<Arc<Connecting>> {
let mut connecting_sockets = self.connecting_sockets.lock_irq_disabled();
let mut connecting_sockets = self.connecting_sockets.disable_irq().lock();
connecting_sockets.insert(addr, connecting)
}
/// Remove a connecting socket
pub fn remove_connecting_socket(&self, addr: &VsockSocketAddr) -> Option<Arc<Connecting>> {
let mut connecting_sockets = self.connecting_sockets.lock_irq_disabled();
let mut connecting_sockets = self.connecting_sockets.disable_irq().lock();
connecting_sockets.remove(addr)
}
@ -121,13 +123,13 @@ impl VsockSpace {
addr: VsockSocketAddr,
listen: Arc<Listen>,
) -> Option<Arc<Listen>> {
let mut listen_sockets = self.listen_sockets.lock_irq_disabled();
let mut listen_sockets = self.listen_sockets.disable_irq().lock();
listen_sockets.insert(addr, listen)
}
/// Remove a listening socket
pub fn remove_listen_socket(&self, addr: &VsockSocketAddr) -> Option<Arc<Listen>> {
let mut listen_sockets = self.listen_sockets.lock_irq_disabled();
let mut listen_sockets = self.listen_sockets.disable_irq().lock();
listen_sockets.remove(addr)
}
}
@ -135,13 +137,13 @@ impl VsockSpace {
impl VsockSpace {
/// Get the CID of the guest
pub fn guest_cid(&self) -> u32 {
let driver = self.driver.lock_irq_disabled();
let driver = self.driver.disable_irq().lock();
driver.guest_cid() as u32
}
/// Send a request packet for initializing a new connection.
pub fn request(&self, info: &ConnectionInfo) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
driver
.request(info)
.map_err(|_| Error::with_message(Errno::EIO, "cannot send connect packet"))
@ -149,7 +151,7 @@ impl VsockSpace {
/// Send a response packet for accepting a new connection.
pub fn response(&self, info: &ConnectionInfo) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
driver
.response(info)
.map_err(|_| Error::with_message(Errno::EIO, "cannot send response packet"))
@ -157,7 +159,7 @@ impl VsockSpace {
/// Send a shutdown packet to close a connection
pub fn shutdown(&self, info: &ConnectionInfo) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
driver
.shutdown(info)
.map_err(|_| Error::with_message(Errno::EIO, "cannot send shutdown packet"))
@ -165,7 +167,7 @@ impl VsockSpace {
/// Send a reset packet to reset a connection
pub fn reset(&self, info: &ConnectionInfo) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
driver
.reset(info)
.map_err(|_| Error::with_message(Errno::EIO, "cannot send reset packet"))
@ -173,7 +175,7 @@ impl VsockSpace {
/// Send a credit request packet
pub fn request_credit(&self, info: &ConnectionInfo) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
driver
.credit_request(info)
.map_err(|_| Error::with_message(Errno::EIO, "cannot send credit request packet"))
@ -181,7 +183,7 @@ impl VsockSpace {
/// Send a credit update packet
pub fn update_credit(&self, info: &ConnectionInfo) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
driver
.credit_update(info)
.map_err(|_| Error::with_message(Errno::EIO, "cannot send credit update packet"))
@ -189,7 +191,7 @@ impl VsockSpace {
/// Send a data packet
pub fn send(&self, buffer: &[u8], info: &mut ConnectionInfo) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
driver
.send(buffer, info)
.map_err(|_| Error::with_message(Errno::EIO, "cannot send data packet"))
@ -197,7 +199,7 @@ impl VsockSpace {
/// Poll for each event from the driver
pub fn poll(&self) -> Result<()> {
let mut driver = self.driver.lock_irq_disabled();
let mut driver = self.driver.disable_irq().lock();
while let Some(event) = self.poll_single(&mut driver)? {
if !self.is_event_for_socket(&event) {
@ -219,7 +221,7 @@ impl VsockSpace {
match event.event_type {
VsockEventType::ConnectionRequest => {
// Preparation for listen socket `accept`
let listen_sockets = self.listen_sockets.lock_irq_disabled();
let listen_sockets = self.listen_sockets.disable_irq().lock();
let Some(listen) = listen_sockets.get(&event.destination.into()) else {
return_errno_with_message!(
Errno::EINVAL,
@ -233,7 +235,7 @@ impl VsockSpace {
listen.update_io_events();
}
VsockEventType::ConnectionResponse => {
let connecting_sockets = self.connecting_sockets.lock_irq_disabled();
let connecting_sockets = self.connecting_sockets.disable_irq().lock();
let Some(connecting) = connecting_sockets.get(&event.destination.into()) else {
return_errno_with_message!(
Errno::EINVAL,

View File

@ -51,7 +51,7 @@ impl Connected {
}
pub fn try_recv(&self, buf: &mut [u8]) -> Result<usize> {
let mut connection = self.connection.lock_irq_disabled();
let mut connection = self.connection.disable_irq().lock();
let bytes_read = connection.buffer.len().min(buf.len());
connection.buffer.pop_slice(&mut buf[..bytes_read]);
connection.info.done_forwarding(bytes_read);
@ -69,7 +69,7 @@ impl Connected {
}
pub fn send(&self, packet: &[u8], flags: SendRecvFlags) -> Result<usize> {
let mut connection = self.connection.lock_irq_disabled();
let mut connection = self.connection.disable_irq().lock();
debug_assert!(flags.is_all_supported());
let buf_len = packet.len();
VSOCK_GLOBAL
@ -81,21 +81,21 @@ impl Connected {
}
pub fn should_close(&self) -> bool {
let connection = self.connection.lock_irq_disabled();
let connection = self.connection.disable_irq().lock();
// If buffer is now empty and the peer requested shutdown, finish shutting down the
// connection.
connection.is_peer_requested_shutdown() && connection.buffer.is_empty()
}
pub fn is_closed(&self) -> bool {
let connection = self.connection.lock_irq_disabled();
let connection = self.connection.disable_irq().lock();
connection.is_local_shutdown()
}
pub fn shutdown(&self, _cmd: SockShutdownCmd) -> Result<()> {
// TODO: deal with cmd
if self.should_close() {
let mut connection = self.connection.lock_irq_disabled();
let mut connection = self.connection.disable_irq().lock();
if connection.is_local_shutdown() {
return Ok(());
}
@ -106,23 +106,24 @@ impl Connected {
Ok(())
}
pub fn update_info(&self, event: &VsockEvent) {
let mut connection = self.connection.lock_irq_disabled();
let mut connection = self.connection.disable_irq().lock();
connection.update_for_event(event)
}
pub fn get_info(&self) -> ConnectionInfo {
let connection = self.connection.lock_irq_disabled();
let connection = self.connection.disable_irq().lock();
connection.info.clone()
}
pub fn add_connection_buffer(&self, bytes: &[u8]) -> bool {
let mut connection = self.connection.lock_irq_disabled();
let mut connection = self.connection.disable_irq().lock();
connection.add(bytes)
}
pub fn set_peer_requested_shutdown(&self) {
self.connection
.lock_irq_disabled()
.disable_irq()
.lock()
.set_peer_requested_shutdown()
}
@ -131,7 +132,7 @@ impl Connected {
}
pub fn update_io_events(&self) {
let connection = self.connection.lock_irq_disabled();
let connection = self.connection.disable_irq().lock();
// receive
if !connection.buffer.is_empty() {
self.pollee.add_events(IoEvents::IN);

View File

@ -38,11 +38,11 @@ impl Connecting {
}
pub fn info(&self) -> ConnectionInfo {
self.info.lock_irq_disabled().clone()
self.info.disable_irq().lock().clone()
}
pub fn update_info(&self, event: &VsockEvent) {
self.info.lock_irq_disabled().update_for_event(event)
self.info.disable_irq().lock().update_for_event(event)
}
pub fn poll(&self, mask: IoEvents, poller: Option<&mut Poller>) -> IoEvents {

View File

@ -29,7 +29,7 @@ impl Listen {
}
pub fn push_incoming(&self, connect: Arc<Connected>) -> Result<()> {
let mut incoming_connections = self.incoming_connection.lock_irq_disabled();
let mut incoming_connections = self.incoming_connection.disable_irq().lock();
if incoming_connections.len() >= self.backlog {
return_errno_with_message!(Errno::ECONNREFUSED, "queue in listenging socket is full")
}
@ -41,7 +41,8 @@ impl Listen {
pub fn try_accept(&self) -> Result<Arc<Connected>> {
let connection = self
.incoming_connection
.lock_irq_disabled()
.disable_irq()
.lock()
.pop_front()
.ok_or_else(|| {
Error::with_message(Errno::EAGAIN, "no pending connection is available")
@ -55,7 +56,7 @@ impl Listen {
}
pub fn update_io_events(&self) {
let incomming_connection = self.incoming_connection.lock_irq_disabled();
let incomming_connection = self.incoming_connection.disable_irq().lock();
if !incomming_connection.is_empty() {
self.pollee.add_events(IoEvents::IN);
} else {

View File

@ -56,7 +56,7 @@ impl<T: Sync + Send + PreemptSchedInfo> Scheduler<T> for PreemptScheduler<T> {
cpu_id
};
let mut rq = self.rq[target_cpu as usize].lock_irq_disabled();
let mut rq = self.rq[target_cpu as usize].disable_irq().lock();
if still_in_rq && let Err(_) = runnable.cpu().set_if_is_none(target_cpu) {
return None;
}
@ -71,13 +71,13 @@ impl<T: Sync + Send + PreemptSchedInfo> Scheduler<T> for PreemptScheduler<T> {
}
fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue<T>)) {
let local_rq: &PreemptRunQueue<T> = &self.rq[this_cpu() as usize].lock_irq_disabled();
let local_rq: &PreemptRunQueue<T> = &self.rq[this_cpu() as usize].disable_irq().lock();
f(local_rq);
}
fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue<T>)) {
let local_rq: &mut PreemptRunQueue<T> =
&mut self.rq[this_cpu() as usize].lock_irq_disabled();
&mut self.rq[this_cpu() as usize].disable_irq().lock();
f(local_rq);
}
}

View File

@ -138,7 +138,8 @@ impl WorkQueue {
return false;
}
self.inner
.lock_irq_disabled()
.disable_irq()
.lock()
.pending_work_items
.push(work_item);
if let Some(worker_pool) = self.worker_pool.upgrade() {
@ -150,7 +151,7 @@ impl WorkQueue {
/// Request a pending work item. The `request_cpu` indicates the CPU where
/// the calling worker is located.
fn dequeue(&self, request_cpu: u32) -> Option<Arc<WorkItem>> {
let mut inner = self.inner.lock_irq_disabled();
let mut inner = self.inner.disable_irq().lock();
let index = inner
.pending_work_items
.iter()
@ -161,7 +162,8 @@ impl WorkQueue {
fn has_pending_work_items(&self, request_cpu: u32) -> bool {
self.inner
.lock_irq_disabled()
.disable_irq()
.lock()
.pending_work_items
.iter()
.any(|item| item.is_valid_cpu(request_cpu))

View File

@ -87,10 +87,10 @@ impl Worker {
if self.is_destroying() {
break;
}
self.inner.lock_irq_disabled().worker_status = WorkerStatus::Idle;
self.inner.disable_irq().lock().worker_status = WorkerStatus::Idle;
worker_pool.idle_current_worker(self.bound_cpu, self.clone());
if !self.is_destroying() {
self.inner.lock_irq_disabled().worker_status = WorkerStatus::Running;
self.inner.disable_irq().lock().worker_status = WorkerStatus::Running;
}
}
}
@ -102,22 +102,22 @@ impl Worker {
}
pub(super) fn is_idle(&self) -> bool {
self.inner.lock_irq_disabled().worker_status == WorkerStatus::Idle
self.inner.disable_irq().lock().worker_status == WorkerStatus::Idle
}
pub(super) fn is_destroying(&self) -> bool {
self.inner.lock_irq_disabled().worker_status == WorkerStatus::Destroying
self.inner.disable_irq().lock().worker_status == WorkerStatus::Destroying
}
pub(super) fn destroy(&self) {
self.inner.lock_irq_disabled().worker_status = WorkerStatus::Destroying;
self.inner.disable_irq().lock().worker_status = WorkerStatus::Destroying;
}
fn exit(&self) {
self.inner.lock_irq_disabled().worker_status = WorkerStatus::Exited;
self.inner.disable_irq().lock().worker_status = WorkerStatus::Exited;
}
pub(super) fn is_exit(&self) -> bool {
self.inner.lock_irq_disabled().worker_status == WorkerStatus::Exited
self.inner.disable_irq().lock().worker_status == WorkerStatus::Exited
}
}

View File

@ -76,12 +76,12 @@ impl LocalWorkerPool {
fn add_worker(&self) {
let worker = Worker::new(self.parent.clone(), self.cpu_id);
self.workers.lock_irq_disabled().push_back(worker.clone());
self.workers.disable_irq().lock().push_back(worker.clone());
worker.bound_thread().run();
}
fn remove_worker(&self) {
let mut workers = self.workers.lock_irq_disabled();
let mut workers = self.workers.disable_irq().lock();
for (index, worker) in workers.iter().enumerate() {
if worker.is_idle() {
worker.destroy();
@ -116,7 +116,7 @@ impl LocalWorkerPool {
}
fn destroy_all_workers(&self) {
for worker in self.workers.lock_irq_disabled().iter() {
for worker in self.workers.disable_irq().lock().iter() {
worker.destroy();
}
self.idle_wait_queue.wake_all();
@ -149,12 +149,13 @@ impl WorkerPool {
}
pub fn assign_work_queue(&self, work_queue: Arc<WorkQueue>) {
self.work_queues.lock_irq_disabled().push(work_queue);
self.work_queues.disable_irq().lock().push(work_queue);
}
pub fn has_pending_work_items(&self, request_cpu: u32) -> bool {
self.work_queues
.lock_irq_disabled()
.disable_irq()
.lock()
.iter()
.any(|work_queue| work_queue.has_pending_work_items(request_cpu))
}
@ -164,7 +165,7 @@ impl WorkerPool {
}
pub fn num_workers(&self, cpu_id: u32) -> u16 {
self.local_pool(cpu_id).workers.lock_irq_disabled().len() as u16
self.local_pool(cpu_id).workers.disable_irq().lock().len() as u16
}
pub fn cpu_set(&self) -> &CpuSet {
@ -172,7 +173,7 @@ impl WorkerPool {
}
pub(super) fn fetch_pending_work_item(&self, request_cpu: u32) -> Option<Arc<WorkItem>> {
for work_queue in self.work_queues.lock_irq_disabled().iter() {
for work_queue in self.work_queues.disable_irq().lock().iter() {
let item = work_queue.dequeue(request_cpu);
if item.is_some() {
return item;

View File

@ -30,13 +30,13 @@ impl CpuClock {
/// Adds `interval` to the original recorded time to update the `CpuClock`.
pub fn add_time(&self, interval: Duration) {
*self.time.lock_irq_disabled() += interval;
*self.time.disable_irq().lock() += interval;
}
}
impl Clock for CpuClock {
fn read_time(&self) -> Duration {
*self.time.lock_irq_disabled()
*self.time.disable_irq().lock()
}
}

View File

@ -153,7 +153,7 @@ impl Clock for MonotonicClock {
impl Clock for RealTimeCoarseClock {
fn read_time(&self) -> Duration {
*Self::current_ref().get().unwrap().lock_irq_disabled()
*Self::current_ref().get().unwrap().disable_irq().lock()
}
}
@ -264,7 +264,7 @@ fn init_jiffies_clock_manager() {
fn update_coarse_clock() {
let real_time = RealTimeClock::get().read_time();
let current = RealTimeCoarseClock::current_ref().get().unwrap();
*current.lock_irq_disabled() = real_time;
*current.disable_irq().lock() = real_time;
}
fn init_coarse_clock() {

View File

@ -57,12 +57,12 @@ impl Timer {
/// Set the interval time for this timer.
/// The timer will be reset with the interval time upon expiration.
pub fn set_interval(&self, interval: Duration) {
*self.interval.lock_irq_disabled() = interval;
*self.interval.disable_irq().lock() = interval;
}
/// Cancel the current timer's set timeout callback.
pub fn cancel(&self) {
let timer_callback = self.timer_callback.lock_irq_disabled();
let timer_callback = self.timer_callback.disable_irq().lock();
if let Some(timer_callback) = timer_callback.upgrade() {
timer_callback.cancel();
}
@ -88,7 +88,7 @@ impl Timer {
Box::new(move || interval_timer_callback(&timer_weak)),
));
let mut timer_callback = self.timer_callback.lock_irq_disabled();
let mut timer_callback = self.timer_callback.disable_irq().lock();
if let Some(timer_callback) = timer_callback.upgrade() {
timer_callback.cancel();
}
@ -98,7 +98,7 @@ impl Timer {
/// Return the current expired time of this timer.
pub fn expired_time(&self) -> Duration {
let timer_callback = self.timer_callback.lock_irq_disabled().upgrade();
let timer_callback = self.timer_callback.disable_irq().lock().upgrade();
timer_callback.map_or(Duration::ZERO, |timer_callback| timer_callback.expired_time)
}
@ -124,7 +124,7 @@ impl Timer {
/// Returns the interval time of the current timer.
pub fn interval(&self) -> Duration {
*self.interval.lock_irq_disabled()
*self.interval.disable_irq().lock()
}
}
@ -134,7 +134,7 @@ fn interval_timer_callback(timer: &Weak<Timer>) {
};
(timer.registered_callback)();
let interval = timer.interval.lock_irq_disabled();
let interval = timer.interval.disable_irq().lock();
if *interval != Duration::ZERO {
timer.set_timeout(Timeout::After(*interval));
}
@ -161,7 +161,8 @@ impl TimerManager {
fn insert(&self, timer_callback: Arc<TimerCallback>) {
self.timer_callbacks
.lock_irq_disabled()
.disable_irq()
.lock()
.push(timer_callback);
}
@ -169,7 +170,7 @@ impl TimerManager {
/// call the corresponding callback functions.
pub fn process_expired_timers(&self) {
let callbacks = {
let mut timeout_list = self.timer_callbacks.lock_irq_disabled();
let mut timeout_list = self.timer_callbacks.disable_irq().lock();
if timeout_list.len() == 0 {
return;
}