Update spin lock users to lock_irq_disabled()

This commit is contained in:
Chuandong Li
2023-07-02 18:31:53 +08:00
committed by Tate, Hongliang Tian
parent ba4121cd6a
commit 78de1af348
15 changed files with 69 additions and 59 deletions

View File

@ -51,7 +51,7 @@ pub(crate) fn init() {
} }
pub fn register_serial_input_callback(f: impl Fn(u8) + Send + Sync + 'static) { pub fn register_serial_input_callback(f: impl Fn(u8) + Send + Sync + 'static) {
SERIAL_INPUT_CALLBACKS.lock().push(Arc::new(f)); SERIAL_INPUT_CALLBACKS.lock_irq_disabled().push(Arc::new(f));
} }
pub(crate) fn callback_init() { pub(crate) fn callback_init() {
@ -67,7 +67,7 @@ where
CONSOLE_IRQ_CALLBACK CONSOLE_IRQ_CALLBACK
.get() .get()
.unwrap() .unwrap()
.lock() .lock_irq_disabled()
.on_active(callback); .on_active(callback);
} }

View File

@ -35,7 +35,7 @@ impl RcuMonitor {
// on the current CPU. If GP is complete, take the callbacks of the current // on the current CPU. If GP is complete, take the callbacks of the current
// GP. // GP.
let callbacks = { let callbacks = {
let mut state = self.state.lock(); let mut state = self.state.lock_irq_disabled();
if state.current_gp.is_complete() { if state.current_gp.is_complete() {
return; return;
} }
@ -69,7 +69,7 @@ impl RcuMonitor {
where where
F: FnOnce() -> () + Send + 'static, F: FnOnce() -> () + Send + 'static,
{ {
let mut state = self.state.lock(); let mut state = self.state.lock_irq_disabled();
state.next_callbacks.push_back(Box::new(f)); state.next_callbacks.push_back(Box::new(f));

View File

@ -52,8 +52,8 @@ impl<T> SpinLock<T> {
/// Acquire the spin lock without disabling local IRQs. /// Acquire the spin lock without disabling local IRQs.
/// ///
/// This method is twice as fast as the `lock_irq_disable` method. /// This method is twice as fast as the `lock_irq_disabled` method.
/// So prefer using this method over the `lock_irq_disable` method /// So prefer using this method over the `lock_irq_disabled` method
/// when IRQ handlers are allowed to get executed while /// when IRQ handlers are allowed to get executed while
/// holding this lock. For example, if a lock is never used /// holding this lock. For example, if a lock is never used
/// in the interrupt context, then it is ok to use this method /// in the interrupt context, then it is ok to use this method
@ -137,7 +137,7 @@ impl<'a, T: fmt::Debug> fmt::Debug for SpinLockIrqDisabledGuard<'a, T> {
impl<'a, T> !Send for SpinLockIrqDisabledGuard<'a, T> {} impl<'a, T> !Send for SpinLockIrqDisabledGuard<'a, T> {}
// Safety. `SpinLockIrqDisabledGuard` can be shared between tasks/threads in same CPU. // Safety. `SpinLockIrqDisabledGuard` can be shared between tasks/threads in same CPU.
// As `lock_irq_disable()` disables interrupts to prevent race conditions caused by interrupts. // As `lock_irq_disabled()` disables interrupts to prevent race conditions caused by interrupts.
unsafe impl<T: Sync> Sync for SpinLockIrqDisabledGuard<'_, T> {} unsafe impl<T: Sync> Sync for SpinLockIrqDisabledGuard<'_, T> {}
pub struct SpinLockGuard<'a, T> { pub struct SpinLockGuard<'a, T> {

View File

@ -53,14 +53,14 @@ impl WaitQueue {
/// Wake one waiter thread, if there is one. /// Wake one waiter thread, if there is one.
pub fn wake_one(&self) { pub fn wake_one(&self) {
if let Some(waiter) = self.waiters.lock().front() { if let Some(waiter) = self.waiters.lock_irq_disabled().front() {
waiter.wake_up(); waiter.wake_up();
} }
} }
/// Wake all not-exclusive waiter threads and at most one exclusive waiter. /// Wake all not-exclusive waiter threads and at most one exclusive waiter.
pub fn wake_all(&self) { pub fn wake_all(&self) {
for waiter in self.waiters.lock().iter() { for waiter in self.waiters.lock_irq_disabled().iter() {
waiter.wake_up(); waiter.wake_up();
if waiter.is_exclusive() { if waiter.is_exclusive() {
break; break;
@ -72,15 +72,15 @@ impl WaitQueue {
// Otherwise, add to the front of waitqueue // Otherwise, add to the front of waitqueue
fn enqueue(&self, waiter: &Arc<Waiter>) { fn enqueue(&self, waiter: &Arc<Waiter>) {
if waiter.is_exclusive() { if waiter.is_exclusive() {
self.waiters.lock().push_back(waiter.clone()) self.waiters.lock_irq_disabled().push_back(waiter.clone())
} else { } else {
self.waiters.lock().push_front(waiter.clone()); self.waiters.lock_irq_disabled().push_front(waiter.clone());
} }
} }
/// removes all waiters that have finished wait /// removes all waiters that have finished wait
fn finish_wait(&self) { fn finish_wait(&self) {
self.waiters.lock().retain(|waiter| !waiter.is_finished()) self.waiters.lock_irq_disabled().retain(|waiter| !waiter.is_finished())
} }
} }

View File

@ -32,20 +32,20 @@ impl<const ORDER: usize> LockedHeap<ORDER> {
/// Safety: The range [start, start + size) must be a valid memory region. /// Safety: The range [start, start + size) must be a valid memory region.
pub unsafe fn init(&self, start: *const u8, size: usize) { pub unsafe fn init(&self, start: *const u8, size: usize) {
self.0.lock().init(start as usize, size); self.0.lock_irq_disabled().init(start as usize, size);
} }
} }
unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeap<ORDER> { unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeap<ORDER> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 { unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.0 self.0
.lock() .lock_irq_disabled()
.alloc(layout) .alloc(layout)
.map_or(0 as *mut u8, |allocation| allocation.as_ptr()) .map_or(0 as *mut u8, |allocation| allocation.as_ptr())
} }
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
debug_assert!(ptr as usize != 0); debug_assert!(ptr as usize != 0);
self.0.lock().dealloc(NonNull::new_unchecked(ptr), layout) self.0.lock_irq_disabled().dealloc(NonNull::new_unchecked(ptr), layout)
} }
} }

View File

@ -204,5 +204,10 @@ macro_rules! println {
pub fn _print(args: fmt::Arguments) { pub fn _print(args: fmt::Arguments) {
use core::fmt::Write; use core::fmt::Write;
WRITER.get().unwrap().lock().write_fmt(args).unwrap(); WRITER
.get()
.unwrap()
.lock_irq_disabled()
.write_fmt(args)
.unwrap();
} }

View File

@ -54,6 +54,6 @@ pub fn register_net_device_irq_handler(callback: impl NetDeviceIrqHandler) {
NETWORK_IRQ_HANDLERS NETWORK_IRQ_HANDLERS
.get() .get()
.unwrap() .unwrap()
.lock() .lock_irq_disabled()
.push(Arc::new(callback)) .push(Arc::new(callback))
} }

View File

@ -97,7 +97,12 @@ fn config_space_change(_: &TrapFrame) {
/// Interrupt handler if network device receives some packet /// Interrupt handler if network device receives some packet
fn handle_network_event(trap_frame: &TrapFrame) { fn handle_network_event(trap_frame: &TrapFrame) {
let irq_num = trap_frame.trap_num as u8; let irq_num = trap_frame.trap_num as u8;
for callback in NETWORK_IRQ_HANDLERS.get().unwrap().lock().iter() { for callback in NETWORK_IRQ_HANDLERS
.get()
.unwrap()
.lock_irq_disabled()
.iter()
{
callback(irq_num); callback(irq_num);
} }
} }

View File

@ -30,7 +30,7 @@ impl TtyDriver {
/// Return the tty device in driver's internal table. /// Return the tty device in driver's internal table.
pub fn lookup(&self, index: usize) -> Result<Arc<Tty>> { pub fn lookup(&self, index: usize) -> Result<Arc<Tty>> {
let ttys = self.ttys.lock(); let ttys = self.ttys.lock_irq_disabled();
// Return the tty device corresponding to idx // Return the tty device corresponding to idx
if index >= ttys.len() { if index >= ttys.len() {
return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device"); return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device");
@ -43,12 +43,12 @@ impl TtyDriver {
/// Install a new tty into the driver's internal tables. /// Install a new tty into the driver's internal tables.
pub fn install(self: &Arc<Self>, tty: Arc<Tty>) { pub fn install(self: &Arc<Self>, tty: Arc<Tty>) {
tty.set_driver(Arc::downgrade(self)); tty.set_driver(Arc::downgrade(self));
self.ttys.lock().push(tty); self.ttys.lock_irq_disabled().push(tty);
} }
/// remove a new tty into the driver's internal tables. /// remove a new tty into the driver's internal tables.
pub fn remove(&self, index: usize) -> Result<()> { pub fn remove(&self, index: usize) -> Result<()> {
let mut ttys = self.ttys.lock(); let mut ttys = self.ttys.lock_irq_disabled();
if index >= ttys.len() { if index >= ttys.len() {
return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device"); return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device");
} }
@ -60,7 +60,7 @@ impl TtyDriver {
pub fn receive_char(&self, item: u8) { pub fn receive_char(&self, item: u8) {
// FIXME: should the char send to all ttys? // FIXME: should the char send to all ttys?
for tty in &*self.ttys.lock() { for tty in &*self.ttys.lock_irq_disabled() {
tty.receive_char(item); tty.receive_char(item);
} }
} }

View File

@ -75,7 +75,7 @@ impl LineDiscipline {
/// push char to line discipline. This function should be called in input interrupt handler. /// push char to line discipline. This function should be called in input interrupt handler.
pub fn push_char(&self, mut item: u8) { pub fn push_char(&self, mut item: u8) {
let termios = self.termios.lock(); let termios = self.termios.lock_irq_disabled();
if termios.contains_icrnl() { if termios.contains_icrnl() {
if item == b'\r' { if item == b'\r' {
item = b'\n' item = b'\n'
@ -85,7 +85,7 @@ impl LineDiscipline {
if item == *termios.get_special_char(CC_C_CHAR::VINTR) { if item == *termios.get_special_char(CC_C_CHAR::VINTR) {
// type Ctrl + C, signal SIGINT // type Ctrl + C, signal SIGINT
if termios.contains_isig() { if termios.contains_isig() {
if let Some(fg) = *self.foreground.lock() { if let Some(fg) = *self.foreground.lock_irq_disabled() {
let kernel_signal = KernelSignal::new(SIGINT); let kernel_signal = KernelSignal::new(SIGINT);
let fg_group = process_table::pgid_to_process_group(fg).unwrap(); let fg_group = process_table::pgid_to_process_group(fg).unwrap();
fg_group.kernel_signal(kernel_signal); fg_group.kernel_signal(kernel_signal);
@ -94,7 +94,7 @@ impl LineDiscipline {
} else if item == *termios.get_special_char(CC_C_CHAR::VQUIT) { } else if item == *termios.get_special_char(CC_C_CHAR::VQUIT) {
// type Ctrl + \, signal SIGQUIT // type Ctrl + \, signal SIGQUIT
if termios.contains_isig() { if termios.contains_isig() {
if let Some(fg) = *self.foreground.lock() { if let Some(fg) = *self.foreground.lock_irq_disabled() {
let kernel_signal = KernelSignal::new(SIGQUIT); let kernel_signal = KernelSignal::new(SIGQUIT);
let fg_group = process_table::pgid_to_process_group(fg).unwrap(); let fg_group = process_table::pgid_to_process_group(fg).unwrap();
fg_group.kernel_signal(kernel_signal); fg_group.kernel_signal(kernel_signal);
@ -102,29 +102,29 @@ impl LineDiscipline {
} }
} else if item == *termios.get_special_char(CC_C_CHAR::VKILL) { } else if item == *termios.get_special_char(CC_C_CHAR::VKILL) {
// erase current line // erase current line
self.current_line.lock().drain(); self.current_line.lock_irq_disabled().drain();
} else if item == *termios.get_special_char(CC_C_CHAR::VERASE) { } else if item == *termios.get_special_char(CC_C_CHAR::VERASE) {
// type backspace // type backspace
let mut current_line = self.current_line.lock(); let mut current_line = self.current_line.lock_irq_disabled();
if !current_line.is_empty() { if !current_line.is_empty() {
current_line.backspace(); current_line.backspace();
} }
} else if meet_new_line(item, &termios) { } else if meet_new_line(item, &termios) {
// a new line was met. We currently add the item to buffer. // a new line was met. We currently add the item to buffer.
// when we read content, the item should be skipped if it's EOF. // when we read content, the item should be skipped if it's EOF.
let mut current_line = self.current_line.lock(); let mut current_line = self.current_line.lock_irq_disabled();
current_line.push_char(item); current_line.push_char(item);
let current_line_chars = current_line.drain(); let current_line_chars = current_line.drain();
for char in current_line_chars { for char in current_line_chars {
self.read_buffer.lock().push_overwrite(char); self.read_buffer.lock_irq_disabled().push_overwrite(char);
} }
} else if item >= 0x20 && item < 0x7f { } else if item >= 0x20 && item < 0x7f {
// printable character // printable character
self.current_line.lock().push_char(item); self.current_line.lock_irq_disabled().push_char(item);
} }
} else { } else {
// raw mode // raw mode
self.read_buffer.lock().push_overwrite(item); self.read_buffer.lock_irq_disabled().push_overwrite(item);
// debug!("push char: {}", char::from(item)) // debug!("push char: {}", char::from(item))
} }
@ -139,7 +139,7 @@ impl LineDiscipline {
/// whether self is readable /// whether self is readable
fn is_readable(&self) -> bool { fn is_readable(&self) -> bool {
!self.read_buffer.lock().is_empty() !self.read_buffer.lock_irq_disabled().is_empty()
} }
// TODO: respect output flags // TODO: respect output flags
@ -201,13 +201,13 @@ impl LineDiscipline {
} }
let (vmin, vtime) = { let (vmin, vtime) = {
let termios = self.termios.lock(); let termios = self.termios.lock_irq_disabled();
let vmin = *termios.get_special_char(CC_C_CHAR::VMIN); let vmin = *termios.get_special_char(CC_C_CHAR::VMIN);
let vtime = *termios.get_special_char(CC_C_CHAR::VTIME); let vtime = *termios.get_special_char(CC_C_CHAR::VTIME);
(vmin, vtime) (vmin, vtime)
}; };
let read_len = { let read_len = {
let len = self.read_buffer.lock().len(); let len = self.read_buffer.lock_irq_disabled().len();
let max_read_len = len.min(dst.len()); let max_read_len = len.min(dst.len());
if vmin == 0 && vtime == 0 { if vmin == 0 && vtime == 0 {
// poll read // poll read
@ -236,7 +236,7 @@ impl LineDiscipline {
/// returns immediately with the lesser of the number of bytes available or the number of bytes requested. /// returns immediately with the lesser of the number of bytes available or the number of bytes requested.
/// If no bytes are available, completes immediately, returning 0. /// If no bytes are available, completes immediately, returning 0.
fn poll_read(&self, dst: &mut [u8]) -> usize { fn poll_read(&self, dst: &mut [u8]) -> usize {
let mut buffer = self.read_buffer.lock(); let mut buffer = self.read_buffer.lock_irq_disabled();
let len = buffer.len(); let len = buffer.len();
let max_read_len = len.min(dst.len()); let max_read_len = len.min(dst.len());
if max_read_len == 0 { if max_read_len == 0 {
@ -245,7 +245,7 @@ impl LineDiscipline {
let mut read_len = 0; let mut read_len = 0;
for i in 0..max_read_len { for i in 0..max_read_len {
if let Some(next_char) = buffer.pop() { if let Some(next_char) = buffer.pop() {
let termios = self.termios.lock(); let termios = self.termios.lock_irq_disabled();
if termios.is_canonical_mode() { if termios.is_canonical_mode() {
// canonical mode, read until meet new line // canonical mode, read until meet new line
if meet_new_line(next_char, &termios) { if meet_new_line(next_char, &termios) {
@ -276,7 +276,7 @@ impl LineDiscipline {
// MIN bytes are available, and returns the lesser of the two values. // MIN bytes are available, and returns the lesser of the two values.
pub fn block_read(&self, dst: &mut [u8], vmin: u8) -> Result<usize> { pub fn block_read(&self, dst: &mut [u8], vmin: u8) -> Result<usize> {
let min_read_len = (vmin as usize).min(dst.len()); let min_read_len = (vmin as usize).min(dst.len());
let buffer_len = self.read_buffer.lock().len(); let buffer_len = self.read_buffer.lock_irq_disabled().len();
if buffer_len < min_read_len { if buffer_len < min_read_len {
return_errno!(Errno::EAGAIN); return_errno!(Errno::EAGAIN);
} }
@ -291,7 +291,7 @@ impl LineDiscipline {
/// whether the current process belongs to foreground process group /// whether the current process belongs to foreground process group
fn current_belongs_to_foreground(&self) -> bool { fn current_belongs_to_foreground(&self) -> bool {
let current = current!(); let current = current!();
if let Some(fg_pgid) = *self.foreground.lock() { if let Some(fg_pgid) = *self.foreground.lock_irq_disabled() {
if let Some(process_group) = process_table::pgid_to_process_group(fg_pgid) { if let Some(process_group) = process_table::pgid_to_process_group(fg_pgid) {
if process_group.contains_process(current.pid()) { if process_group.contains_process(current.pid()) {
return true; return true;
@ -304,7 +304,7 @@ impl LineDiscipline {
/// set foreground process group /// set foreground process group
pub fn set_fg(&self, fg_pgid: Pgid) { pub fn set_fg(&self, fg_pgid: Pgid) {
*self.foreground.lock() = Some(fg_pgid); *self.foreground.lock_irq_disabled() = Some(fg_pgid);
// Some background processes may be waiting on the wait queue, when set_fg, the background processes may be able to read. // Some background processes may be waiting on the wait queue, when set_fg, the background processes may be able to read.
if self.is_readable() { if self.is_readable() {
self.pollee.add_events(IoEvents::IN); self.pollee.add_events(IoEvents::IN);
@ -313,20 +313,20 @@ impl LineDiscipline {
/// get foreground process group id /// get foreground process group id
pub fn fg_pgid(&self) -> Option<Pgid> { pub fn fg_pgid(&self) -> Option<Pgid> {
*self.foreground.lock() *self.foreground.lock_irq_disabled()
} }
/// whether there is buffered data /// whether there is buffered data
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.read_buffer.lock().len() == 0 self.read_buffer.lock_irq_disabled().len() == 0
} }
pub fn termios(&self) -> KernelTermios { pub fn termios(&self) -> KernelTermios {
*self.termios.lock() *self.termios.lock_irq_disabled()
} }
pub fn set_termios(&self, termios: KernelTermios) { pub fn set_termios(&self, termios: KernelTermios) {
*self.termios.lock() = termios; *self.termios.lock_irq_disabled() = termios;
} }
} }

View File

@ -45,7 +45,7 @@ impl Tty {
} }
pub fn set_driver(&self, driver: Weak<TtyDriver>) { pub fn set_driver(&self, driver: Weak<TtyDriver>) {
*self.driver.lock() = driver; *self.driver.lock_irq_disabled() = driver;
} }
pub fn receive_char(&self, item: u8) { pub fn receive_char(&self, item: u8) {

View File

@ -38,20 +38,20 @@ impl IfaceCommon {
} }
} }
pub(super) fn interface(&self) -> SpinLockGuard<smoltcp::iface::Interface> { pub(super) fn interface(&self) -> SpinLockIrqDisabledGuard<smoltcp::iface::Interface> {
self.interface.lock() self.interface.lock_irq_disabled()
} }
pub(super) fn sockets(&self) -> SpinLockGuard<smoltcp::iface::SocketSet<'static>> { pub(super) fn sockets(&self) -> SpinLockIrqDisabledGuard<smoltcp::iface::SocketSet<'static>> {
self.sockets.lock() self.sockets.lock_irq_disabled()
} }
pub(super) fn ipv4_addr(&self) -> Option<Ipv4Address> { pub(super) fn ipv4_addr(&self) -> Option<Ipv4Address> {
self.interface.lock().ipv4_addr() self.interface.lock_irq_disabled().ipv4_addr()
} }
pub(super) fn netmask(&self) -> Option<Ipv4Address> { pub(super) fn netmask(&self) -> Option<Ipv4Address> {
let interface = self.interface.lock(); let interface = self.interface.lock_irq_disabled();
let ip_addrs = interface.ip_addrs(); let ip_addrs = interface.ip_addrs();
ip_addrs.first().map(|cidr| match cidr { ip_addrs.first().map(|cidr| match cidr {
IpCidr::Ipv4(ipv4_cidr) => ipv4_cidr.netmask(), IpCidr::Ipv4(ipv4_cidr) => ipv4_cidr.netmask(),
@ -113,7 +113,7 @@ impl IfaceCommon {
} }
let socket_family = socket.socket_family(); let socket_family = socket.socket_family();
let pollee = socket.pollee(); let pollee = socket.pollee();
let mut sockets = self.sockets.lock(); let mut sockets = self.sockets.lock_irq_disabled();
let handle = match socket.raw_socket_family() { let handle = match socket.raw_socket_family() {
AnyRawSocket::Tcp(tcp_socket) => sockets.add(tcp_socket), AnyRawSocket::Tcp(tcp_socket) => sockets.add(tcp_socket),
AnyRawSocket::Udp(udp_socket) => sockets.add(udp_socket), AnyRawSocket::Udp(udp_socket) => sockets.add(udp_socket),
@ -125,14 +125,14 @@ impl IfaceCommon {
/// Remove a socket from the interface /// Remove a socket from the interface
pub(super) fn remove_socket(&self, handle: SocketHandle) { pub(super) fn remove_socket(&self, handle: SocketHandle) {
self.sockets.lock().remove(handle); self.sockets.lock_irq_disabled().remove(handle);
} }
pub(super) fn poll<D: Device + ?Sized>(&self, device: &mut D) { pub(super) fn poll<D: Device + ?Sized>(&self, device: &mut D) {
let mut interface = self.interface.lock(); let mut interface = self.interface.lock_irq_disabled();
let timestamp = get_network_timestamp(); let timestamp = get_network_timestamp();
let has_events = { let has_events = {
let mut sockets = self.sockets.lock(); let mut sockets = self.sockets.lock_irq_disabled();
interface.poll(timestamp, device, &mut sockets) interface.poll(timestamp, device, &mut sockets)
// drop sockets here to avoid deadlock // drop sockets here to avoid deadlock
}; };
@ -143,7 +143,7 @@ impl IfaceCommon {
}); });
} }
let sockets = self.sockets.lock(); let sockets = self.sockets.lock_irq_disabled();
if let Some(instant) = interface.poll_at(timestamp, &sockets) { if let Some(instant) = interface.poll_at(timestamp, &sockets) {
self.next_poll_at_ms self.next_poll_at_ms
.store(instant.total_millis() as u64, Ordering::SeqCst); .store(instant.total_millis() as u64, Ordering::SeqCst);

View File

@ -65,11 +65,11 @@ mod internal {
pub trait IfaceInternal { pub trait IfaceInternal {
fn common(&self) -> &IfaceCommon; fn common(&self) -> &IfaceCommon;
/// The inner socket set /// The inner socket set
fn sockets(&self) -> SpinLockGuard<SocketSet<'static>> { fn sockets(&self) -> SpinLockIrqDisabledGuard<SocketSet<'static>> {
self.common().sockets() self.common().sockets()
} }
/// The inner iface. /// The inner iface.
fn iface_inner(&self) -> SpinLockGuard<smoltcp::iface::Interface> { fn iface_inner(&self) -> SpinLockIrqDisabledGuard<smoltcp::iface::Interface> {
self.common().interface() self.common().interface()
} }
/// The time we should do another poll. /// The time we should do another poll.

View File

@ -112,7 +112,7 @@ impl Iface for IfaceVirtio {
} }
fn poll(&self) { fn poll(&self) {
let mut driver = self.driver.lock(); let mut driver = self.driver.lock_irq_disabled();
self.common.poll(&mut *driver); self.common.poll(&mut *driver);
self.process_dhcp(); self.process_dhcp();
} }

View File

@ -18,7 +18,7 @@ pub(crate) use core::ffi::CStr;
pub(crate) use int_to_c_enum::TryFromInt; pub(crate) use int_to_c_enum::TryFromInt;
pub(crate) use jinux_frame::config::PAGE_SIZE; pub(crate) use jinux_frame::config::PAGE_SIZE;
// pub(crate) use jinux_frame::sync::{Mutex, MutexGuard}; // pub(crate) use jinux_frame::sync::{Mutex, MutexGuard};
pub(crate) use jinux_frame::sync::{SpinLock, SpinLockGuard}; pub(crate) use jinux_frame::sync::{SpinLock, SpinLockGuard, SpinLockIrqDisabledGuard};
pub(crate) use jinux_frame::vm::Vaddr; pub(crate) use jinux_frame::vm::Vaddr;
pub(crate) use jinux_frame::{print, println}; pub(crate) use jinux_frame::{print, println};
pub(crate) use log::{debug, error, info, trace, warn}; pub(crate) use log::{debug, error, info, trace, warn};