Disable send callback if sendqueue is not full

This commit is contained in:
jiangjianfeng
2024-11-07 11:27:57 +00:00
committed by Tate, Hongliang Tian
parent 56727aa5ee
commit 9965802f65
2 changed files with 66 additions and 5 deletions

View File

@ -61,8 +61,9 @@ impl NetworkDevice {
debug!("mac addr = {:x?}, status = {:?}", mac_addr, config.status); debug!("mac addr = {:x?}, status = {:?}", mac_addr, config.status);
let caps = init_caps(&features, &config); let caps = init_caps(&features, &config);
let send_queue = VirtQueue::new(QUEUE_SEND, QUEUE_SIZE, transport.as_mut()) let mut send_queue = VirtQueue::new(QUEUE_SEND, QUEUE_SIZE, transport.as_mut())
.expect("create send queue fails"); .expect("create send queue fails");
send_queue.disable_callback();
let mut recv_queue = VirtQueue::new(QUEUE_RECV, QUEUE_SIZE, transport.as_mut()) let mut recv_queue = VirtQueue::new(QUEUE_RECV, QUEUE_SIZE, transport.as_mut())
.expect("creating recv queue fails"); .expect("creating recv queue fails");
@ -115,11 +116,11 @@ impl NetworkDevice {
.unwrap(); .unwrap();
device device
.transport .transport
.register_queue_callback(QUEUE_SEND, Box::new(handle_send_event), false) .register_queue_callback(QUEUE_SEND, Box::new(handle_send_event), true)
.unwrap(); .unwrap();
device device
.transport .transport
.register_queue_callback(QUEUE_RECV, Box::new(handle_recv_event), false) .register_queue_callback(QUEUE_RECV, Box::new(handle_recv_event), true)
.unwrap(); .unwrap();
device.transport.finish_init(); device.transport.finish_init();
@ -185,6 +186,19 @@ impl NetworkDevice {
debug_assert!(self.tx_buffers[token as usize].is_none()); debug_assert!(self.tx_buffers[token as usize].is_none());
self.tx_buffers[token as usize] = Some(tx_buffer); self.tx_buffers[token as usize] = Some(tx_buffer);
self.free_processed_tx_buffers();
// If the send queue is not full, we can free the send buffers during the next sending process.
// Therefore, there is no need to free the used buffers in the IRQ handlers.
// This allows us to temporarily disable the send queue interrupt.
// Conversely, if the send queue is full, the send queue interrupt should remain enabled
// to free the send buffers as quickly as possible.
if !self.can_send() {
self.send_queue.enable_callback();
} else {
self.send_queue.disable_callback();
}
Ok(()) Ok(())
} }
} }

View File

@ -58,6 +58,8 @@ pub struct VirtQueue {
avail_idx: u16, avail_idx: u16,
/// last service used index /// last service used index
last_used_idx: u16, last_used_idx: u16,
/// Whether the callback of this queue is enabled
is_callback_enabled: bool,
} }
impl VirtQueue { impl VirtQueue {
@ -141,7 +143,7 @@ impl VirtQueue {
let notify = transport.get_notify_ptr(idx).unwrap(); let notify = transport.get_notify_ptr(idx).unwrap();
field_ptr!(&avail_ring_ptr, AvailRing, flags) field_ptr!(&avail_ring_ptr, AvailRing, flags)
.write_once(&(0u16)) .write_once(&AvailFlags::empty())
.unwrap(); .unwrap();
Ok(VirtQueue { Ok(VirtQueue {
descs, descs,
@ -154,6 +156,7 @@ impl VirtQueue {
free_head: 0, free_head: 0,
avail_idx: 0, avail_idx: 0,
last_used_idx: 0, last_used_idx: 0,
is_callback_enabled: true,
}) })
} }
@ -342,6 +345,40 @@ impl VirtQueue {
pub fn notify(&mut self) { pub fn notify(&mut self) {
self.notify.write_once(&self.queue_idx).unwrap(); self.notify.write_once(&self.queue_idx).unwrap();
} }
/// Disables registered callbacks.
///
/// That is to say, the queue won't generate interrupts after calling this method.
pub fn disable_callback(&mut self) {
if !self.is_callback_enabled {
return;
}
let flags_ptr = field_ptr!(&self.avail, AvailRing, flags);
let mut flags: AvailFlags = flags_ptr.read_once().unwrap();
debug_assert!(!flags.contains(AvailFlags::VIRTQ_AVAIL_F_NO_INTERRUPT));
flags.insert(AvailFlags::VIRTQ_AVAIL_F_NO_INTERRUPT);
flags_ptr.write_once(&flags).unwrap();
self.is_callback_enabled = false;
}
/// Enables registered callbacks.
///
/// The queue will generate interrupts if any event comes after calling this method.
pub fn enable_callback(&mut self) {
if self.is_callback_enabled {
return;
}
let flags_ptr = field_ptr!(&self.avail, AvailRing, flags);
let mut flags: AvailFlags = flags_ptr.read_once().unwrap();
debug_assert!(flags.contains(AvailFlags::VIRTQ_AVAIL_F_NO_INTERRUPT));
flags.remove(AvailFlags::VIRTQ_AVAIL_F_NO_INTERRUPT);
flags_ptr.write_once(&flags).unwrap();
self.is_callback_enabled = true;
}
} }
#[repr(C, align(16))] #[repr(C, align(16))]
@ -385,7 +422,7 @@ bitflags! {
#[repr(C, align(2))] #[repr(C, align(2))]
#[derive(Debug, Copy, Clone, Pod)] #[derive(Debug, Copy, Clone, Pod)]
pub struct AvailRing { pub struct AvailRing {
flags: u16, flags: AvailFlags,
/// A driver MUST NOT decrement the idx. /// A driver MUST NOT decrement the idx.
idx: u16, idx: u16,
ring: [u16; 64], // actual size: queue_size ring: [u16; 64], // actual size: queue_size
@ -411,3 +448,13 @@ pub struct UsedElem {
id: u32, id: u32,
len: u32, len: u32,
} }
bitflags! {
/// The flags useds in [`AvailRing`]
#[repr(C)]
#[derive(Pod)]
pub struct AvailFlags: u16 {
/// The flag used to disable virt queue interrupt
const VIRTQ_AVAIL_F_NO_INTERRUPT = 1;
}
}