Avoiding busy loop in sending packet and optimize device caps

This commit is contained in:
jiangjianfeng
2024-09-25 11:15:58 +00:00
committed by Tate, Hongliang Tian
parent e0106f1f18
commit f793259512
9 changed files with 198 additions and 97 deletions

View File

@ -1,14 +1,13 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::{collections::LinkedList, sync::Arc};
use alloc::{collections::linked_list::LinkedList, sync::Arc};
use align_ext::AlignExt;
use ostd::{
mm::{
Daddr, DmaDirection, DmaStream, FrameAllocOptions, HasDaddr, Infallible, VmReader,
VmWriter, PAGE_SIZE,
},
sync::SpinLock,
sync::{LocalIrqDisabled, SpinLock},
Pod,
};
use spin::Once;
@ -18,37 +17,40 @@ use crate::dma_pool::{DmaPool, DmaSegment};
pub struct TxBuffer {
dma_stream: DmaStream,
nbytes: usize,
pool: &'static SpinLock<LinkedList<DmaStream>>,
pool: &'static SpinLock<LinkedList<DmaStream>, LocalIrqDisabled>,
}
impl TxBuffer {
pub fn new<H: Pod>(
header: &H,
packet: &[u8],
pool: &'static SpinLock<LinkedList<DmaStream>>,
pool: &'static SpinLock<LinkedList<DmaStream>, LocalIrqDisabled>,
) -> Self {
let header = header.as_bytes();
let nbytes = header.len() + packet.len();
let dma_stream = if let Some(stream) = get_tx_stream_from_pool(nbytes, pool) {
assert!(nbytes <= TX_BUFFER_LEN);
let dma_stream = if let Some(stream) = pool.lock().pop_front() {
stream
} else {
let segment = {
let nframes = (nbytes.align_up(PAGE_SIZE)) / PAGE_SIZE;
FrameAllocOptions::new(nframes).alloc_contiguous().unwrap()
};
let segment = FrameAllocOptions::new(TX_BUFFER_LEN / PAGE_SIZE)
.alloc_contiguous()
.unwrap();
DmaStream::map(segment, DmaDirection::ToDevice, false).unwrap()
};
let mut writer = dma_stream.writer().unwrap();
writer.write(&mut VmReader::from(header));
writer.write(&mut VmReader::from(packet));
let tx_buffer = Self {
dma_stream,
nbytes,
pool,
let tx_buffer = {
let mut writer = dma_stream.writer().unwrap();
writer.write(&mut VmReader::from(header));
writer.write(&mut VmReader::from(packet));
Self {
dma_stream,
nbytes,
pool,
}
};
tx_buffer.sync();
tx_buffer
}
@ -74,10 +76,7 @@ impl HasDaddr for TxBuffer {
impl Drop for TxBuffer {
fn drop(&mut self) {
self.pool
.disable_irq()
.lock()
.push_back(self.dma_stream.clone());
self.pool.lock().push_back(self.dma_stream.clone());
}
}
@ -139,29 +138,13 @@ impl HasDaddr for RxBuffer {
}
}
const RX_BUFFER_LEN: usize = 4096;
pub const RX_BUFFER_LEN: usize = 4096;
pub const TX_BUFFER_LEN: usize = 4096;
pub static RX_BUFFER_POOL: Once<Arc<DmaPool>> = Once::new();
pub static TX_BUFFER_POOL: Once<SpinLock<LinkedList<DmaStream>>> = Once::new();
fn get_tx_stream_from_pool(
nbytes: usize,
tx_buffer_pool: &'static SpinLock<LinkedList<DmaStream>>,
) -> Option<DmaStream> {
let mut pool = tx_buffer_pool.disable_irq().lock();
let mut cursor = pool.cursor_front_mut();
while let Some(current) = cursor.current() {
if current.nbytes() >= nbytes {
return cursor.remove_current();
}
cursor.move_next();
}
None
}
pub fn init() {
const POOL_INIT_SIZE: usize = 32;
const POOL_HIGH_WATERMARK: usize = 64;
const POOL_INIT_SIZE: usize = 64;
const POOL_HIGH_WATERMARK: usize = 128;
RX_BUFFER_POOL.call_once(|| {
DmaPool::new(
RX_BUFFER_LEN,
@ -171,5 +154,4 @@ pub fn init() {
false,
)
});
TX_BUFFER_POOL.call_once(|| SpinLock::new(LinkedList::new()));
}

View File

@ -12,7 +12,7 @@ impl device::Device for dyn AnyNetworkDevice {
type TxToken<'a> = TxToken<'a>;
fn receive(&mut self, _timestamp: Instant) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
if self.can_receive() {
if self.can_receive() && self.can_send() {
let rx_buffer = self.receive().unwrap();
Some((RxToken(rx_buffer), TxToken(self)))
} else {

View File

@ -16,7 +16,7 @@ use alloc::{collections::BTreeMap, string::String, sync::Arc, vec::Vec};
use core::{any::Any, fmt::Debug};
use aster_bigtcp::device::DeviceCapabilities;
pub use buffer::{RxBuffer, TxBuffer, RX_BUFFER_POOL, TX_BUFFER_POOL};
pub use buffer::{RxBuffer, TxBuffer, RX_BUFFER_POOL, TX_BUFFER_LEN};
use component::{init_component, ComponentInitError};
pub use dma_pool::DmaSegment;
use ostd::{
@ -33,6 +33,7 @@ pub struct EthernetAddr(pub [u8; 6]);
pub enum VirtioNetError {
NotReady,
WrongToken,
Busy,
Unknown,
}
@ -51,6 +52,7 @@ pub trait AnyNetworkDevice: Send + Sync + Any + Debug {
fn receive(&mut self) -> Result<RxBuffer, VirtioNetError>;
/// Send a packet to network. Return until the request completes.
fn send(&mut self, packet: &[u8]) -> Result<(), VirtioNetError>;
fn free_processed_tx_buffers(&mut self);
}
pub trait NetDeviceIrqHandler = Fn() + Send + Sync + 'static;
@ -64,13 +66,13 @@ pub fn register_device(
.unwrap()
.network_device_table
.lock()
.insert(name, (Arc::new(SpinLock::new(Vec::new())), device));
.insert(name, NetworkDeviceIrqCallbackSet::new(device));
}
pub fn get_device(str: &str) -> Option<Arc<SpinLock<dyn AnyNetworkDevice, LocalIrqDisabled>>> {
let table = COMPONENT.get().unwrap().network_device_table.lock();
let (_, device) = table.get(str)?;
Some(device.clone())
let callbacks = table.get(str)?;
Some(callbacks.device.clone())
}
/// Registers callback which will be called when receiving message.
@ -79,18 +81,48 @@ pub fn get_device(str: &str) -> Option<Arc<SpinLock<dyn AnyNetworkDevice, LocalI
/// the callback function should NOT sleep.
pub fn register_recv_callback(name: &str, callback: impl NetDeviceIrqHandler) {
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
let Some((callbacks, _)) = device_table.get(name) else {
let Some(callbacks) = device_table.get(name) else {
return;
};
callbacks.lock().push(Arc::new(callback));
callbacks.recv_callbacks.lock().push(Arc::new(callback));
}
pub fn register_send_callback(name: &str, callback: impl NetDeviceIrqHandler) {
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
let Some(callbacks) = device_table.get(name) else {
return;
};
callbacks.send_callbacks.lock().push(Arc::new(callback));
}
pub fn handle_recv_irq(name: &str) {
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
let Some((callbacks, _)) = device_table.get(name) else {
let Some(callbacks) = device_table.get(name) else {
return;
};
let callbacks = callbacks.lock();
let callbacks = callbacks.recv_callbacks.lock();
for callback in callbacks.iter() {
callback();
}
}
pub fn handle_send_irq(name: &str) {
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
let Some(callbacks) = device_table.get(name) else {
return;
};
let can_send = {
let mut device = callbacks.device.lock();
device.free_processed_tx_buffers();
device.can_send()
};
if !can_send {
return;
}
let callbacks = callbacks.send_callbacks.lock();
for callback in callbacks.iter() {
callback();
}
@ -100,7 +132,7 @@ pub fn all_devices() -> Vec<(String, NetworkDeviceRef)> {
let network_devs = COMPONENT.get().unwrap().network_device_table.lock();
network_devs
.iter()
.map(|(name, (_, device))| (name.clone(), device.clone()))
.map(|(name, callbacks)| (name.clone(), callbacks.device.clone()))
.collect()
}
@ -124,10 +156,24 @@ type NetworkDeviceRef = Arc<SpinLock<dyn AnyNetworkDevice, LocalIrqDisabled>>;
struct Component {
/// Device list, the key is device name, value is (callbacks, device);
network_device_table: SpinLock<
BTreeMap<String, (NetDeviceIrqHandlerListRef, NetworkDeviceRef)>,
LocalIrqDisabled,
>,
network_device_table: SpinLock<BTreeMap<String, NetworkDeviceIrqCallbackSet>, LocalIrqDisabled>,
}
/// The send callbacks and recv callbacks for a network device
struct NetworkDeviceIrqCallbackSet {
device: NetworkDeviceRef,
recv_callbacks: NetDeviceIrqHandlerListRef,
send_callbacks: NetDeviceIrqHandlerListRef,
}
impl NetworkDeviceIrqCallbackSet {
fn new(device: NetworkDeviceRef) -> Self {
Self {
device,
recv_callbacks: Arc::new(SpinLock::new(Vec::new())),
send_callbacks: Arc::new(SpinLock::new(Vec::new())),
}
}
}
impl Component {