mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-20 21:46:31 +00:00
Move network polling code to bottom half
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
9804f053f2
commit
7d24e63216
@ -2,12 +2,13 @@
|
||||
|
||||
use alloc::{collections::linked_list::LinkedList, sync::Arc};
|
||||
|
||||
use aster_softirq::BottomHalfDisabled;
|
||||
use ostd::{
|
||||
mm::{
|
||||
Daddr, DmaDirection, DmaStream, FrameAllocOptions, HasDaddr, Infallible, VmReader,
|
||||
VmWriter, PAGE_SIZE,
|
||||
},
|
||||
sync::{LocalIrqDisabled, SpinLock},
|
||||
sync::SpinLock,
|
||||
Pod,
|
||||
};
|
||||
use spin::Once;
|
||||
@ -17,14 +18,14 @@ use crate::dma_pool::{DmaPool, DmaSegment};
|
||||
pub struct TxBuffer {
|
||||
dma_stream: DmaStream,
|
||||
nbytes: usize,
|
||||
pool: &'static SpinLock<LinkedList<DmaStream>, LocalIrqDisabled>,
|
||||
pool: &'static SpinLock<LinkedList<DmaStream>, BottomHalfDisabled>,
|
||||
}
|
||||
|
||||
impl TxBuffer {
|
||||
pub fn new<H: Pod>(
|
||||
header: &H,
|
||||
packet: &[u8],
|
||||
pool: &'static SpinLock<LinkedList<DmaStream>, LocalIrqDisabled>,
|
||||
pool: &'static SpinLock<LinkedList<DmaStream>, BottomHalfDisabled>,
|
||||
) -> Self {
|
||||
let header = header.as_bytes();
|
||||
let nbytes = header.len() + packet.len();
|
||||
|
@ -8,6 +8,7 @@ use alloc::{
|
||||
};
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_softirq::BottomHalfDisabled;
|
||||
use bitvec::{array::BitArray, prelude::Lsb0};
|
||||
use ostd::{
|
||||
mm::{
|
||||
@ -34,8 +35,8 @@ pub struct DmaPool {
|
||||
direction: DmaDirection,
|
||||
is_cache_coherent: bool,
|
||||
high_watermark: usize,
|
||||
avail_pages: SpinLock<VecDeque<Arc<DmaPage>>>,
|
||||
all_pages: SpinLock<VecDeque<Arc<DmaPage>>>,
|
||||
avail_pages: SpinLock<VecDeque<Arc<DmaPage>>, BottomHalfDisabled>,
|
||||
all_pages: SpinLock<VecDeque<Arc<DmaPage>>, BottomHalfDisabled>,
|
||||
}
|
||||
|
||||
impl DmaPool {
|
||||
@ -98,7 +99,7 @@ impl DmaPool {
|
||||
pub fn alloc_segment(self: &Arc<Self>) -> Result<DmaSegment, ostd::Error> {
|
||||
// Lock order: pool.avail_pages -> pool.all_pages
|
||||
// pool.avail_pages -> page.allocated_segments
|
||||
let mut avail_pages = self.avail_pages.disable_irq().lock();
|
||||
let mut avail_pages = self.avail_pages.lock();
|
||||
if avail_pages.is_empty() {
|
||||
/// Allocate a new page
|
||||
let new_page = {
|
||||
@ -110,7 +111,7 @@ impl DmaPool {
|
||||
pool,
|
||||
)?)
|
||||
};
|
||||
let mut all_pages = self.all_pages.disable_irq().lock();
|
||||
let mut all_pages = self.all_pages.lock();
|
||||
avail_pages.push_back(new_page.clone());
|
||||
all_pages.push_back(new_page);
|
||||
}
|
||||
@ -125,7 +126,7 @@ impl DmaPool {
|
||||
|
||||
/// Returns the number of pages in pool
|
||||
fn num_pages(&self) -> usize {
|
||||
self.all_pages.disable_irq().lock().len()
|
||||
self.all_pages.lock().len()
|
||||
}
|
||||
|
||||
/// Return segment size in pool
|
||||
@ -140,7 +141,7 @@ struct DmaPage {
|
||||
segment_size: usize,
|
||||
// `BitArray` is 64 bits, since each `DmaSegment` is bigger than 64 bytes,
|
||||
// there's no more than `PAGE_SIZE` / 64 = 64 `DmaSegment`s in a `DmaPage`.
|
||||
allocated_segments: SpinLock<BitArray>,
|
||||
allocated_segments: SpinLock<BitArray, BottomHalfDisabled>,
|
||||
pool: Weak<DmaPool>,
|
||||
}
|
||||
|
||||
@ -167,7 +168,7 @@ impl DmaPage {
|
||||
}
|
||||
|
||||
fn alloc_segment(self: &Arc<Self>) -> Option<DmaSegment> {
|
||||
let mut segments = self.allocated_segments.disable_irq().lock();
|
||||
let mut segments = self.allocated_segments.lock();
|
||||
let free_segment_index = get_next_free_index(&segments, self.nr_blocks_per_page())?;
|
||||
segments.set(free_segment_index, true);
|
||||
|
||||
@ -190,7 +191,7 @@ impl DmaPage {
|
||||
}
|
||||
|
||||
fn is_full(&self) -> bool {
|
||||
let segments = self.allocated_segments.disable_irq().lock();
|
||||
let segments = self.allocated_segments.lock();
|
||||
get_next_free_index(&segments, self.nr_blocks_per_page()).is_none()
|
||||
}
|
||||
}
|
||||
@ -262,10 +263,10 @@ impl Drop for DmaSegment {
|
||||
|
||||
// Keep the same lock order as `pool.alloc_segment`
|
||||
// Lock order: pool.avail_pages -> pool.all_pages -> page.allocated_segments
|
||||
let mut avail_pages = pool.avail_pages.disable_irq().lock();
|
||||
let mut all_pages = pool.all_pages.disable_irq().lock();
|
||||
let mut avail_pages = pool.avail_pages.lock();
|
||||
let mut all_pages = pool.all_pages.lock();
|
||||
|
||||
let mut allocated_segments = page.allocated_segments.disable_irq().lock();
|
||||
let mut allocated_segments = page.allocated_segments.lock();
|
||||
|
||||
let nr_blocks_per_page = PAGE_SIZE / self.size;
|
||||
let became_avail = get_next_free_index(&allocated_segments, nr_blocks_per_page).is_none();
|
||||
|
@ -16,13 +16,14 @@ use alloc::{collections::BTreeMap, string::String, sync::Arc, vec::Vec};
|
||||
use core::{any::Any, fmt::Debug};
|
||||
|
||||
use aster_bigtcp::device::DeviceCapabilities;
|
||||
use aster_softirq::{
|
||||
softirq_id::{NETWORK_RX_SOFTIRQ_ID, NETWORK_TX_SOFTIRQ_ID},
|
||||
BottomHalfDisabled, SoftIrqLine,
|
||||
};
|
||||
pub use buffer::{RxBuffer, TxBuffer, RX_BUFFER_POOL, TX_BUFFER_LEN};
|
||||
use component::{init_component, ComponentInitError};
|
||||
pub use dma_pool::DmaSegment;
|
||||
use ostd::{
|
||||
sync::{LocalIrqDisabled, SpinLock},
|
||||
Pod,
|
||||
};
|
||||
use ostd::{sync::SpinLock, Pod};
|
||||
use spin::Once;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Pod)]
|
||||
@ -66,11 +67,11 @@ pub trait AnyNetworkDevice: Send + Sync + Any + Debug {
|
||||
fn notify_poll_end(&mut self);
|
||||
}
|
||||
|
||||
pub trait NetDeviceIrqHandler = Fn() + Send + Sync + 'static;
|
||||
pub trait NetDeviceCallback = Fn() + Send + Sync + 'static;
|
||||
|
||||
pub fn register_device(
|
||||
name: String,
|
||||
device: Arc<SpinLock<dyn AnyNetworkDevice, LocalIrqDisabled>>,
|
||||
device: Arc<SpinLock<dyn AnyNetworkDevice, BottomHalfDisabled>>,
|
||||
) {
|
||||
COMPONENT
|
||||
.get()
|
||||
@ -80,7 +81,7 @@ pub fn register_device(
|
||||
.insert(name, NetworkDeviceIrqCallbackSet::new(device));
|
||||
}
|
||||
|
||||
pub fn get_device(str: &str) -> Option<Arc<SpinLock<dyn AnyNetworkDevice, LocalIrqDisabled>>> {
|
||||
pub fn get_device(str: &str) -> Option<Arc<SpinLock<dyn AnyNetworkDevice, BottomHalfDisabled>>> {
|
||||
let table = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let callbacks = table.get(str)?;
|
||||
Some(callbacks.device.clone())
|
||||
@ -88,9 +89,9 @@ pub fn get_device(str: &str) -> Option<Arc<SpinLock<dyn AnyNetworkDevice, LocalI
|
||||
|
||||
/// Registers callback which will be called when receiving message.
|
||||
///
|
||||
/// Since the callback will be called in interrupt context,
|
||||
/// the callback function should NOT sleep.
|
||||
pub fn register_recv_callback(name: &str, callback: impl NetDeviceIrqHandler) {
|
||||
/// Since the callback will be called in softirq context,
|
||||
/// the callback function should _not_ sleep.
|
||||
pub fn register_recv_callback(name: &str, callback: impl NetDeviceCallback) {
|
||||
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let Some(callbacks) = device_table.get(name) else {
|
||||
return;
|
||||
@ -98,7 +99,15 @@ pub fn register_recv_callback(name: &str, callback: impl NetDeviceIrqHandler) {
|
||||
callbacks.recv_callbacks.lock().push(Arc::new(callback));
|
||||
}
|
||||
|
||||
pub fn register_send_callback(name: &str, callback: impl NetDeviceIrqHandler) {
|
||||
/// Registers a callback that will be invoked
|
||||
/// when the device has completed sending a packet.
|
||||
///
|
||||
/// Since this callback is executed in a softirq context,
|
||||
/// the callback function should _not_ block or sleep.
|
||||
///
|
||||
/// Please note that the callback may not be called every time a packet is sent.
|
||||
/// The driver may skip certain callbacks for performance optimization.
|
||||
pub fn register_send_callback(name: &str, callback: impl NetDeviceCallback) {
|
||||
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let Some(callbacks) = device_table.get(name) else {
|
||||
return;
|
||||
@ -106,39 +115,52 @@ pub fn register_send_callback(name: &str, callback: impl NetDeviceIrqHandler) {
|
||||
callbacks.send_callbacks.lock().push(Arc::new(callback));
|
||||
}
|
||||
|
||||
pub fn handle_recv_irq(name: &str) {
|
||||
fn handle_rx_softirq() {
|
||||
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let Some(callbacks) = device_table.get(name) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let callbacks = callbacks.recv_callbacks.lock();
|
||||
for callback in callbacks.iter() {
|
||||
callback();
|
||||
// TODO: We should handle network events for just one device per softirq,
|
||||
// rather than processing events for all devices.
|
||||
// This issue should be addressed once new network devices are added.
|
||||
for callback_set in device_table.values() {
|
||||
let recv_callbacks = callback_set.recv_callbacks.lock();
|
||||
for callback in recv_callbacks.iter() {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_send_irq(name: &str) {
|
||||
fn handle_tx_softirq() {
|
||||
let device_table = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let Some(callbacks) = device_table.get(name) else {
|
||||
return;
|
||||
};
|
||||
// TODO: We should handle network events for just one device per softirq,
|
||||
// rather than processing events for all devices.
|
||||
// This issue should be addressed once new network devices are added.
|
||||
for callback_set in device_table.values() {
|
||||
let can_send = {
|
||||
let mut device = callback_set.device.lock();
|
||||
device.free_processed_tx_buffers();
|
||||
device.can_send()
|
||||
};
|
||||
|
||||
let can_send = {
|
||||
let mut device = callbacks.device.lock();
|
||||
device.free_processed_tx_buffers();
|
||||
device.can_send()
|
||||
};
|
||||
if !can_send {
|
||||
return;
|
||||
}
|
||||
if !can_send {
|
||||
continue;
|
||||
}
|
||||
|
||||
let callbacks = callbacks.send_callbacks.lock();
|
||||
for callback in callbacks.iter() {
|
||||
callback();
|
||||
let send_callbacks = callback_set.send_callbacks.lock();
|
||||
for callback in send_callbacks.iter() {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Raises softirq for handling transmission events
|
||||
pub fn raise_send_softirq() {
|
||||
SoftIrqLine::get(NETWORK_TX_SOFTIRQ_ID).raise();
|
||||
}
|
||||
|
||||
/// Raises softirq for handling reception events
|
||||
pub fn raise_receive_softirq() {
|
||||
SoftIrqLine::get(NETWORK_RX_SOFTIRQ_ID).raise();
|
||||
}
|
||||
|
||||
pub fn all_devices() -> Vec<(String, NetworkDeviceRef)> {
|
||||
let network_devs = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
network_devs
|
||||
@ -148,33 +170,31 @@ pub fn all_devices() -> Vec<(String, NetworkDeviceRef)> {
|
||||
}
|
||||
|
||||
static COMPONENT: Once<Component> = Once::new();
|
||||
pub(crate) static NETWORK_IRQ_HANDLERS: Once<
|
||||
SpinLock<Vec<Arc<dyn NetDeviceIrqHandler>>, LocalIrqDisabled>,
|
||||
> = Once::new();
|
||||
|
||||
#[init_component]
|
||||
fn init() -> Result<(), ComponentInitError> {
|
||||
let a = Component::init()?;
|
||||
COMPONENT.call_once(|| a);
|
||||
NETWORK_IRQ_HANDLERS.call_once(|| SpinLock::new(Vec::new()));
|
||||
SoftIrqLine::get(NETWORK_TX_SOFTIRQ_ID).enable(handle_tx_softirq);
|
||||
SoftIrqLine::get(NETWORK_RX_SOFTIRQ_ID).enable(handle_rx_softirq);
|
||||
buffer::init();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
type NetDeviceIrqHandlerListRef =
|
||||
Arc<SpinLock<Vec<Arc<dyn NetDeviceIrqHandler>>, LocalIrqDisabled>>;
|
||||
type NetworkDeviceRef = Arc<SpinLock<dyn AnyNetworkDevice, LocalIrqDisabled>>;
|
||||
type NetDeviceCallbackListRef = Arc<SpinLock<Vec<Arc<dyn NetDeviceCallback>>, BottomHalfDisabled>>;
|
||||
type NetworkDeviceRef = Arc<SpinLock<dyn AnyNetworkDevice, BottomHalfDisabled>>;
|
||||
|
||||
struct Component {
|
||||
/// Device list, the key is device name, value is (callbacks, device);
|
||||
network_device_table: SpinLock<BTreeMap<String, NetworkDeviceIrqCallbackSet>, LocalIrqDisabled>,
|
||||
network_device_table:
|
||||
SpinLock<BTreeMap<String, NetworkDeviceIrqCallbackSet>, BottomHalfDisabled>,
|
||||
}
|
||||
|
||||
/// The send callbacks and recv callbacks for a network device
|
||||
struct NetworkDeviceIrqCallbackSet {
|
||||
device: NetworkDeviceRef,
|
||||
recv_callbacks: NetDeviceIrqHandlerListRef,
|
||||
send_callbacks: NetDeviceIrqHandlerListRef,
|
||||
recv_callbacks: NetDeviceCallbackListRef,
|
||||
send_callbacks: NetDeviceCallbackListRef,
|
||||
}
|
||||
|
||||
impl NetworkDeviceIrqCallbackSet {
|
||||
|
Reference in New Issue
Block a user