mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-22 08:53:29 +00:00
Refactor virtio drivers with DMA APIs
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
5e127b2da0
commit
cd1575bc6d
@ -6,16 +6,17 @@ edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
component = { path = "../../libs/comp-sys/component" }
|
||||
align_ext = { path = "../../../framework/libs/align_ext" }
|
||||
aster-frame = { path = "../../../framework/aster-frame" }
|
||||
aster-util = { path = "../../libs/aster-util" }
|
||||
aster-rights = { path = "../../libs/aster-rights" }
|
||||
align_ext = { path = "../../../framework/libs/align_ext" }
|
||||
int-to-c-enum = { path = "../../libs/int-to-c-enum" }
|
||||
bytes = { version = "1.4.0", default-features = false }
|
||||
pod = { git = "https://github.com/asterinas/pod", rev = "d7dba56" }
|
||||
bitflags = "1.3"
|
||||
spin = "0.9.4"
|
||||
ringbuf = { version = "0.3.2", default-features = false, features = ["alloc"] }
|
||||
bitvec = { version = "1.0.1", default-features = false, features = ["alloc"]}
|
||||
component = { path = "../../libs/comp-sys/component" }
|
||||
int-to-c-enum = { path = "../../libs/int-to-c-enum" }
|
||||
ktest = { path = "../../../framework/libs/ktest" }
|
||||
log = "0.4"
|
||||
smoltcp = { version = "0.9.1", default-features = false, features = ["alloc", "log", "medium-ethernet", "medium-ip", "proto-dhcpv4", "proto-ipv4", "proto-igmp", "socket-icmp", "socket-udp", "socket-tcp", "socket-raw", "socket-dhcpv4"] }
|
||||
pod = { git = "https://github.com/asterinas/pod", rev = "d7dba56" }
|
||||
ringbuf = { version = "0.3.2", default-features = false, features = ["alloc"] }
|
||||
smoltcp = { version = "0.9.1", default-features = false, features = ["alloc", "log", "medium-ethernet", "medium-ip", "proto-dhcpv4", "proto-ipv4", "proto-igmp", "socket-icmp", "socket-udp", "socket-tcp", "socket-raw", "socket-dhcpv4"] }
|
||||
spin = "0.9.4"
|
@ -1,30 +1,89 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use core::mem::size_of;
|
||||
use alloc::{collections::LinkedList, sync::Arc};
|
||||
|
||||
use align_ext::AlignExt;
|
||||
use bytes::BytesMut;
|
||||
use aster_frame::{
|
||||
sync::SpinLock,
|
||||
vm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||
};
|
||||
use pod::Pod;
|
||||
use spin::Once;
|
||||
|
||||
use crate::dma_pool::{DmaPool, DmaSegment};
|
||||
|
||||
pub struct TxBuffer {
|
||||
dma_stream: DmaStream,
|
||||
nbytes: usize,
|
||||
}
|
||||
|
||||
impl TxBuffer {
|
||||
pub fn new<H: Pod>(header: &H, packet: &[u8]) -> Self {
|
||||
let header = header.as_bytes();
|
||||
let nbytes = header.len() + packet.len();
|
||||
|
||||
let dma_stream = if let Some(stream) = get_tx_stream_from_pool(nbytes) {
|
||||
stream
|
||||
} else {
|
||||
let segment = {
|
||||
let nframes = (nbytes.align_up(PAGE_SIZE)) / PAGE_SIZE;
|
||||
VmAllocOptions::new(nframes).alloc_contiguous().unwrap()
|
||||
};
|
||||
DmaStream::map(segment, DmaDirection::ToDevice, false).unwrap()
|
||||
};
|
||||
|
||||
let mut writer = dma_stream.writer().unwrap();
|
||||
writer.write(&mut VmReader::from(header));
|
||||
writer.write(&mut VmReader::from(packet));
|
||||
|
||||
let tx_buffer = Self { dma_stream, nbytes };
|
||||
tx_buffer.sync();
|
||||
tx_buffer
|
||||
}
|
||||
|
||||
pub fn writer(&self) -> VmWriter<'_> {
|
||||
self.dma_stream.writer().unwrap().limit(self.nbytes)
|
||||
}
|
||||
|
||||
fn sync(&self) {
|
||||
self.dma_stream.sync(0..self.nbytes).unwrap();
|
||||
}
|
||||
|
||||
pub fn nbytes(&self) -> usize {
|
||||
self.nbytes
|
||||
}
|
||||
}
|
||||
|
||||
impl HasDaddr for TxBuffer {
|
||||
fn daddr(&self) -> Daddr {
|
||||
self.dma_stream.daddr()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TxBuffer {
|
||||
fn drop(&mut self) {
|
||||
TX_BUFFER_POOL
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock_irq_disabled()
|
||||
.push_back(self.dma_stream.clone());
|
||||
}
|
||||
}
|
||||
|
||||
/// Buffer for receive packet
|
||||
#[derive(Debug)]
|
||||
pub struct RxBuffer {
|
||||
/// Packet Buffer, length align 8.
|
||||
buf: BytesMut,
|
||||
/// Header len
|
||||
segment: DmaSegment,
|
||||
header_len: usize,
|
||||
/// Packet len
|
||||
packet_len: usize,
|
||||
}
|
||||
|
||||
impl RxBuffer {
|
||||
pub fn new(len: usize, header_len: usize) -> Self {
|
||||
let len = len.align_up(8);
|
||||
let buf = BytesMut::zeroed(len);
|
||||
pub fn new(header_len: usize) -> Self {
|
||||
assert!(header_len <= RX_BUFFER_LEN);
|
||||
let segment = RX_BUFFER_POOL.get().unwrap().alloc_segment().unwrap();
|
||||
Self {
|
||||
buf,
|
||||
packet_len: 0,
|
||||
segment,
|
||||
header_len,
|
||||
packet_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,59 +92,60 @@ impl RxBuffer {
|
||||
}
|
||||
|
||||
pub fn set_packet_len(&mut self, packet_len: usize) {
|
||||
assert!(self.header_len + packet_len <= RX_BUFFER_LEN);
|
||||
self.packet_len = packet_len;
|
||||
}
|
||||
|
||||
pub fn buf(&self) -> &[u8] {
|
||||
&self.buf
|
||||
pub fn packet(&self) -> VmReader<'_> {
|
||||
self.segment
|
||||
.sync(self.header_len..self.header_len + self.packet_len)
|
||||
.unwrap();
|
||||
self.segment
|
||||
.reader()
|
||||
.unwrap()
|
||||
.skip(self.header_len)
|
||||
.limit(self.packet_len)
|
||||
}
|
||||
|
||||
pub fn buf_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.buf
|
||||
}
|
||||
|
||||
/// Packet payload slice, which is inner buffer excluding VirtioNetHdr.
|
||||
pub fn packet(&self) -> &[u8] {
|
||||
debug_assert!(self.header_len + self.packet_len <= self.buf.len());
|
||||
&self.buf[self.header_len..self.header_len + self.packet_len]
|
||||
}
|
||||
|
||||
/// Mutable packet payload slice.
|
||||
pub fn packet_mut(&mut self) -> &mut [u8] {
|
||||
debug_assert!(self.header_len + self.packet_len <= self.buf.len());
|
||||
&mut self.buf[self.header_len..self.header_len + self.packet_len]
|
||||
}
|
||||
|
||||
pub fn header<H: Pod>(&self) -> H {
|
||||
debug_assert_eq!(size_of::<H>(), self.header_len);
|
||||
H::from_bytes(&self.buf[..size_of::<H>()])
|
||||
pub const fn buf_len(&self) -> usize {
|
||||
self.segment.size()
|
||||
}
|
||||
}
|
||||
|
||||
/// Buffer for transmit packet
|
||||
#[derive(Debug)]
|
||||
pub struct TxBuffer {
|
||||
buf: BytesMut,
|
||||
impl HasDaddr for RxBuffer {
|
||||
fn daddr(&self) -> Daddr {
|
||||
self.segment.daddr()
|
||||
}
|
||||
}
|
||||
|
||||
impl TxBuffer {
|
||||
pub fn with_len(buf_len: usize) -> Self {
|
||||
Self {
|
||||
buf: BytesMut::zeroed(buf_len),
|
||||
const RX_BUFFER_LEN: usize = 4096;
|
||||
static RX_BUFFER_POOL: Once<Arc<DmaPool>> = Once::new();
|
||||
static TX_BUFFER_POOL: Once<SpinLock<LinkedList<DmaStream>>> = Once::new();
|
||||
|
||||
fn get_tx_stream_from_pool(nbytes: usize) -> Option<DmaStream> {
|
||||
let mut pool = TX_BUFFER_POOL.get().unwrap().lock_irq_disabled();
|
||||
let mut cursor = pool.cursor_front_mut();
|
||||
while let Some(current) = cursor.current() {
|
||||
if current.nbytes() >= nbytes {
|
||||
return cursor.remove_current();
|
||||
}
|
||||
cursor.move_next();
|
||||
}
|
||||
|
||||
pub fn new(buf: &[u8]) -> Self {
|
||||
Self {
|
||||
buf: BytesMut::from(buf),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn buf(&self) -> &[u8] {
|
||||
&self.buf
|
||||
}
|
||||
|
||||
pub fn buf_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.buf
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn init() {
|
||||
const POOL_INIT_SIZE: usize = 32;
|
||||
const POOL_HIGH_WATERMARK: usize = 64;
|
||||
RX_BUFFER_POOL.call_once(|| {
|
||||
DmaPool::new(
|
||||
RX_BUFFER_LEN,
|
||||
POOL_INIT_SIZE,
|
||||
POOL_HIGH_WATERMARK,
|
||||
DmaDirection::FromDevice,
|
||||
false,
|
||||
)
|
||||
});
|
||||
TX_BUFFER_POOL.call_once(|| SpinLock::new(LinkedList::new()));
|
||||
}
|
||||
|
367
kernel/comps/network/src/dma_pool.rs
Normal file
367
kernel/comps/network/src/dma_pool.rs
Normal file
@ -0,0 +1,367 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
use alloc::{
|
||||
collections::VecDeque,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_frame::{
|
||||
sync::{RwLock, SpinLock},
|
||||
vm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||
};
|
||||
use bitvec::{array::BitArray, prelude::Lsb0};
|
||||
use ktest::ktest;
|
||||
|
||||
/// `DmaPool` is responsible for allocating small streaming DMA segments
|
||||
/// (equal to or smaller than PAGE_SIZE),
|
||||
/// referred to as `DmaSegment`.
|
||||
///
|
||||
/// A `DmaPool` can only allocate `DmaSegment` of a fixed size.
|
||||
/// Once a `DmaSegment` is dropped, it will be returned to the pool.
|
||||
/// If the `DmaPool` is dropped before the associated `DmaSegment`,
|
||||
/// the `drop` method of the `DmaSegment` will panic.
|
||||
///
|
||||
/// Therefore, as a best practice,
|
||||
/// it is recommended for the `DmaPool` to have a static lifetime.
|
||||
#[derive(Debug)]
|
||||
pub struct DmaPool {
|
||||
segment_size: usize,
|
||||
direction: DmaDirection,
|
||||
is_cache_coherent: bool,
|
||||
high_watermark: usize,
|
||||
avail_pages: SpinLock<VecDeque<Arc<DmaPage>>>,
|
||||
all_pages: SpinLock<VecDeque<Arc<DmaPage>>>,
|
||||
}
|
||||
|
||||
impl DmaPool {
|
||||
/// Constructs a new `DmaPool` with a specified initial capacity and a high watermark.
|
||||
///
|
||||
/// The `DmaPool` starts with `init_size` DMAable pages.
|
||||
/// As additional DMA blocks are requested beyond the initial capacity,
|
||||
/// the pool dynamically allocates more DMAable pages.
|
||||
/// To optimize performance, the pool employs a lazy deallocation strategy:
|
||||
/// A DMAable page is freed only if it meets the following conditions:
|
||||
/// 1. The page is currently not in use;
|
||||
/// 2. The total number of allocated DMAable pages exceeds the specified `high_watermark`.
|
||||
///
|
||||
/// The returned pool can be used to allocate small segments for DMA usage.
|
||||
/// All allocated segments will have the same DMA direction
|
||||
/// and will either all be cache coherent or not cache coherent,
|
||||
/// as specified in the parameters.
|
||||
pub fn new(
|
||||
segment_size: usize,
|
||||
init_size: usize,
|
||||
high_watermark: usize,
|
||||
direction: DmaDirection,
|
||||
is_cache_coherent: bool,
|
||||
) -> Arc<Self> {
|
||||
assert!(segment_size.is_power_of_two());
|
||||
assert!(segment_size >= 64);
|
||||
assert!(segment_size <= PAGE_SIZE);
|
||||
assert!(high_watermark >= init_size);
|
||||
|
||||
Arc::new_cyclic(|pool| {
|
||||
let mut avail_pages = VecDeque::new();
|
||||
let mut all_pages = VecDeque::new();
|
||||
|
||||
for _ in 0..init_size {
|
||||
let page = Arc::new(
|
||||
DmaPage::new(
|
||||
segment_size,
|
||||
direction,
|
||||
is_cache_coherent,
|
||||
Weak::clone(pool),
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
avail_pages.push_back(page.clone());
|
||||
all_pages.push_back(page);
|
||||
}
|
||||
|
||||
Self {
|
||||
segment_size,
|
||||
direction,
|
||||
is_cache_coherent,
|
||||
high_watermark,
|
||||
avail_pages: SpinLock::new(avail_pages),
|
||||
all_pages: SpinLock::new(all_pages),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Allocates a `DmaSegment` from the pool
|
||||
pub fn alloc_segment(self: &Arc<Self>) -> Result<DmaSegment, aster_frame::Error> {
|
||||
// Lock order: pool.avail_pages -> pool.all_pages
|
||||
// pool.avail_pages -> page.allocated_segments
|
||||
let mut avail_pages = self.avail_pages.lock_irq_disabled();
|
||||
if avail_pages.is_empty() {
|
||||
/// Allocate a new page
|
||||
let new_page = {
|
||||
let pool = Arc::downgrade(self);
|
||||
Arc::new(DmaPage::new(
|
||||
self.segment_size,
|
||||
self.direction,
|
||||
self.is_cache_coherent,
|
||||
pool,
|
||||
)?)
|
||||
};
|
||||
let mut all_pages = self.all_pages.lock_irq_disabled();
|
||||
avail_pages.push_back(new_page.clone());
|
||||
all_pages.push_back(new_page);
|
||||
}
|
||||
|
||||
let first_avail_page = avail_pages.front().unwrap();
|
||||
let free_segment = first_avail_page.alloc_segment().unwrap();
|
||||
if first_avail_page.is_full() {
|
||||
avail_pages.pop_front();
|
||||
}
|
||||
Ok(free_segment)
|
||||
}
|
||||
|
||||
/// Returns the number of pages in pool
|
||||
fn num_pages(&self) -> usize {
|
||||
self.all_pages.lock_irq_disabled().len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DmaPage {
|
||||
storage: DmaStream,
|
||||
segment_size: usize,
|
||||
// `BitArray` is 64 bits, since each `DmaSegment` is bigger than 64 bytes,
|
||||
// there's no more than `PAGE_SIZE` / 64 = 64 `DmaSegment`s in a `DmaPage`.
|
||||
allocated_segments: SpinLock<BitArray>,
|
||||
pool: Weak<DmaPool>,
|
||||
}
|
||||
|
||||
impl DmaPage {
|
||||
fn new(
|
||||
segment_size: usize,
|
||||
direction: DmaDirection,
|
||||
is_cache_coherent: bool,
|
||||
pool: Weak<DmaPool>,
|
||||
) -> Result<Self, aster_frame::Error> {
|
||||
let dma_stream = {
|
||||
let vm_segment = VmAllocOptions::new(1).alloc_contiguous()?;
|
||||
|
||||
DmaStream::map(vm_segment, direction, is_cache_coherent)
|
||||
.map_err(|_| aster_frame::Error::AccessDenied)?
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
storage: dma_stream,
|
||||
segment_size,
|
||||
allocated_segments: SpinLock::new(BitArray::ZERO),
|
||||
pool,
|
||||
})
|
||||
}
|
||||
|
||||
fn alloc_segment(self: &Arc<Self>) -> Option<DmaSegment> {
|
||||
let mut segments = self.allocated_segments.lock_irq_disabled();
|
||||
let free_segment_index = get_next_free_index(&segments, self.nr_blocks_per_page())?;
|
||||
segments.set(free_segment_index, true);
|
||||
|
||||
let segment = DmaSegment {
|
||||
size: self.segment_size,
|
||||
dma_stream: self.storage.clone(),
|
||||
start_addr: self.storage.daddr() + free_segment_index * self.segment_size,
|
||||
page: Arc::downgrade(self),
|
||||
};
|
||||
|
||||
Some(segment)
|
||||
}
|
||||
|
||||
fn is_free(&self) -> bool {
|
||||
*self.allocated_segments.lock() == BitArray::<[usize; 1], Lsb0>::ZERO
|
||||
}
|
||||
|
||||
const fn nr_blocks_per_page(&self) -> usize {
|
||||
PAGE_SIZE / self.segment_size
|
||||
}
|
||||
|
||||
fn is_full(&self) -> bool {
|
||||
let segments = self.allocated_segments.lock_irq_disabled();
|
||||
get_next_free_index(&segments, self.nr_blocks_per_page()).is_none()
|
||||
}
|
||||
}
|
||||
|
||||
fn get_next_free_index(segments: &BitArray, nr_blocks_per_page: usize) -> Option<usize> {
|
||||
let free_segment_index = segments.iter_zeros().next()?;
|
||||
|
||||
if free_segment_index >= nr_blocks_per_page {
|
||||
None
|
||||
} else {
|
||||
Some(free_segment_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl HasDaddr for DmaPage {
|
||||
fn daddr(&self) -> Daddr {
|
||||
self.storage.daddr()
|
||||
}
|
||||
}
|
||||
|
||||
/// A small and fixed-size segment of DMA memory.
|
||||
///
|
||||
/// The size of `DmaSegment` ranges from 64 bytes to `PAGE_SIZE` and must be 2^K.
|
||||
/// Each `DmaSegment`'s daddr must be aligned with its size.
|
||||
#[derive(Debug)]
|
||||
pub struct DmaSegment {
|
||||
dma_stream: DmaStream,
|
||||
start_addr: Daddr,
|
||||
size: usize,
|
||||
page: Weak<DmaPage>,
|
||||
}
|
||||
|
||||
impl HasDaddr for DmaSegment {
|
||||
fn daddr(&self) -> Daddr {
|
||||
self.start_addr
|
||||
}
|
||||
}
|
||||
|
||||
impl DmaSegment {
|
||||
pub const fn size(&self) -> usize {
|
||||
self.size
|
||||
}
|
||||
|
||||
pub fn reader(&self) -> Result<VmReader<'_>, aster_frame::Error> {
|
||||
let offset = self.start_addr - self.dma_stream.daddr();
|
||||
Ok(self.dma_stream.reader()?.skip(offset).limit(self.size))
|
||||
}
|
||||
|
||||
pub fn writer(&self) -> Result<VmWriter<'_>, aster_frame::Error> {
|
||||
let offset = self.start_addr - self.dma_stream.daddr();
|
||||
Ok(self.dma_stream.writer()?.skip(offset).limit(self.size))
|
||||
}
|
||||
|
||||
pub fn sync(&self, byte_range: Range<usize>) -> Result<(), aster_frame::Error> {
|
||||
let offset = self.daddr() - self.dma_stream.daddr();
|
||||
let range = byte_range.start + offset..byte_range.end + offset;
|
||||
self.dma_stream.sync(range)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DmaSegment {
|
||||
fn drop(&mut self) {
|
||||
let page = self.page.upgrade().unwrap();
|
||||
let pool = page.pool.upgrade().unwrap();
|
||||
|
||||
// Keep the same lock order as `pool.alloc_segment`
|
||||
// Lock order: pool.avail_pages -> pool.all_pages -> page.allocated_segments
|
||||
let mut avail_pages = pool.avail_pages.lock_irq_disabled();
|
||||
let mut all_pages = pool.all_pages.lock_irq_disabled();
|
||||
|
||||
let mut allocated_segments = page.allocated_segments.lock_irq_disabled();
|
||||
|
||||
let nr_blocks_per_page = PAGE_SIZE / self.size;
|
||||
let became_avail = get_next_free_index(&allocated_segments, nr_blocks_per_page).is_none();
|
||||
|
||||
debug_assert!((page.daddr()..page.daddr() + PAGE_SIZE).contains(&self.daddr()));
|
||||
let segment_idx = (self.daddr() - page.daddr()) / self.size;
|
||||
allocated_segments.set(segment_idx, false);
|
||||
|
||||
let became_free = allocated_segments.not_any();
|
||||
|
||||
if became_free && all_pages.len() > pool.high_watermark {
|
||||
avail_pages.retain(|page_| !Arc::ptr_eq(page_, &page));
|
||||
all_pages.retain(|page_| !Arc::ptr_eq(page_, &page));
|
||||
return;
|
||||
}
|
||||
|
||||
if became_avail {
|
||||
avail_pages.push_back(page.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(ktest)]
|
||||
mod test {
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[ktest]
|
||||
fn alloc_page_size_segment() {
|
||||
let pool = DmaPool::new(PAGE_SIZE, 0, 100, DmaDirection::ToDevice, false);
|
||||
let segments1: Vec<_> = (0..100)
|
||||
.map(|_| {
|
||||
let segment = pool.alloc_segment().unwrap();
|
||||
assert_eq!(segment.size(), PAGE_SIZE);
|
||||
assert!(segment.reader().is_err());
|
||||
assert!(segment.writer().is_ok());
|
||||
segment
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(pool.num_pages(), 100);
|
||||
drop(segments1);
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn write_to_dma_segment() {
|
||||
let pool: Arc<DmaPool> = DmaPool::new(PAGE_SIZE, 1, 2, DmaDirection::ToDevice, false);
|
||||
let segment = pool.alloc_segment().unwrap();
|
||||
let mut writer = segment.writer().unwrap();
|
||||
let data = &[0u8, 1, 2, 3, 4] as &[u8];
|
||||
let size = writer.write(&mut VmReader::from(data));
|
||||
assert_eq!(size, data.len());
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn free_pool_pages() {
|
||||
let pool: Arc<DmaPool> = DmaPool::new(PAGE_SIZE, 10, 50, DmaDirection::ToDevice, false);
|
||||
let segments1: Vec<_> = (0..100)
|
||||
.map(|_| {
|
||||
let segment = pool.alloc_segment().unwrap();
|
||||
assert_eq!(segment.size(), PAGE_SIZE);
|
||||
assert!(segment.reader().is_err());
|
||||
assert!(segment.writer().is_ok());
|
||||
segment
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(pool.num_pages(), 100);
|
||||
drop(segments1);
|
||||
assert_eq!(pool.num_pages(), 50);
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn alloc_small_size_segment() {
|
||||
const SEGMENT_SIZE: usize = PAGE_SIZE / 4;
|
||||
let pool: Arc<DmaPool> =
|
||||
DmaPool::new(SEGMENT_SIZE, 0, 10, DmaDirection::Bidirectional, false);
|
||||
let segments1: Vec<_> = (0..100)
|
||||
.map(|_| {
|
||||
let segment = pool.alloc_segment().unwrap();
|
||||
assert_eq!(segment.size(), PAGE_SIZE / 4);
|
||||
assert!(segment.reader().is_ok());
|
||||
assert!(segment.writer().is_ok());
|
||||
segment
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(pool.num_pages(), 100 / 4);
|
||||
drop(segments1);
|
||||
assert_eq!(pool.num_pages(), 10);
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
fn read_dma_segments() {
|
||||
const SEGMENT_SIZE: usize = PAGE_SIZE / 4;
|
||||
let pool: Arc<DmaPool> =
|
||||
DmaPool::new(SEGMENT_SIZE, 1, 2, DmaDirection::Bidirectional, false);
|
||||
let segment = pool.alloc_segment().unwrap();
|
||||
assert_eq!(pool.num_pages(), 1);
|
||||
let mut writer = segment.writer().unwrap();
|
||||
let data = &[0u8, 1, 2, 3, 4] as &[u8];
|
||||
let size = writer.write(&mut VmReader::from(data));
|
||||
assert_eq!(size, data.len());
|
||||
|
||||
let mut read_buf = [0u8; 5];
|
||||
let mut reader = segment.reader().unwrap();
|
||||
reader.read(&mut VmWriter::from(&mut read_buf as &mut [u8]));
|
||||
assert_eq!(&read_buf, data);
|
||||
}
|
||||
}
|
@ -2,12 +2,10 @@
|
||||
|
||||
use alloc::vec;
|
||||
|
||||
use aster_frame::vm::VmWriter;
|
||||
use smoltcp::{phy, time::Instant};
|
||||
|
||||
use crate::{
|
||||
buffer::{RxBuffer, TxBuffer},
|
||||
AnyNetworkDevice,
|
||||
};
|
||||
use crate::{buffer::RxBuffer, AnyNetworkDevice};
|
||||
|
||||
impl phy::Device for dyn AnyNetworkDevice {
|
||||
type RxToken<'a> = RxToken;
|
||||
@ -37,12 +35,14 @@ impl phy::Device for dyn AnyNetworkDevice {
|
||||
pub struct RxToken(RxBuffer);
|
||||
|
||||
impl phy::RxToken for RxToken {
|
||||
fn consume<R, F>(mut self, f: F) -> R
|
||||
fn consume<R, F>(self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut [u8]) -> R,
|
||||
{
|
||||
let packet_but = self.0.packet_mut();
|
||||
f(packet_but)
|
||||
let mut packet = self.0.packet();
|
||||
let mut buffer = vec![0u8; packet.remain()];
|
||||
packet.read(&mut VmWriter::from(&mut buffer as &mut [u8]));
|
||||
f(&mut buffer)
|
||||
}
|
||||
}
|
||||
|
||||
@ -55,8 +55,7 @@ impl<'a> phy::TxToken for TxToken<'a> {
|
||||
{
|
||||
let mut buffer = vec![0u8; len];
|
||||
let res = f(&mut buffer);
|
||||
let tx_buffer = TxBuffer::new(&buffer);
|
||||
self.0.send(tx_buffer).expect("Send packet failed");
|
||||
self.0.send(&buffer).expect("Send packet failed");
|
||||
res
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,11 @@
|
||||
#![forbid(unsafe_code)]
|
||||
#![feature(trait_alias)]
|
||||
#![feature(fn_traits)]
|
||||
#![feature(linked_list_cursors)]
|
||||
|
||||
pub mod buffer;
|
||||
pub mod driver;
|
||||
mod buffer;
|
||||
mod dma_pool;
|
||||
mod driver;
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
@ -15,8 +17,9 @@ use core::{any::Any, fmt::Debug};
|
||||
|
||||
use aster_frame::sync::SpinLock;
|
||||
use aster_util::safe_ptr::Pod;
|
||||
use buffer::{RxBuffer, TxBuffer};
|
||||
pub use buffer::{RxBuffer, TxBuffer};
|
||||
use component::{init_component, ComponentInitError};
|
||||
pub use dma_pool::DmaSegment;
|
||||
use smoltcp::phy;
|
||||
use spin::Once;
|
||||
|
||||
@ -45,7 +48,7 @@ pub trait AnyNetworkDevice: Send + Sync + Any + Debug {
|
||||
/// Otherwise, return NotReady error.
|
||||
fn receive(&mut self) -> Result<RxBuffer, VirtioNetError>;
|
||||
/// Send a packet to network. Return until the request completes.
|
||||
fn send(&mut self, tx_buffer: TxBuffer) -> Result<(), VirtioNetError>;
|
||||
fn send(&mut self, packet: &[u8]) -> Result<(), VirtioNetError>;
|
||||
}
|
||||
|
||||
pub trait NetDeviceIrqHandler = Fn() + Send + Sync + 'static;
|
||||
@ -55,38 +58,57 @@ pub fn register_device(name: String, device: Arc<SpinLock<dyn AnyNetworkDevice>>
|
||||
.get()
|
||||
.unwrap()
|
||||
.network_device_table
|
||||
.lock()
|
||||
.lock_irq_disabled()
|
||||
.insert(name, (Arc::new(SpinLock::new(Vec::new())), device));
|
||||
}
|
||||
|
||||
pub fn get_device(str: &str) -> Option<Arc<SpinLock<dyn AnyNetworkDevice>>> {
|
||||
let lock = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let (_, device) = lock.get(str)?;
|
||||
let table = COMPONENT
|
||||
.get()
|
||||
.unwrap()
|
||||
.network_device_table
|
||||
.lock_irq_disabled();
|
||||
let (_, device) = table.get(str)?;
|
||||
Some(device.clone())
|
||||
}
|
||||
|
||||
/// Registers callback which will be called when receiving message.
|
||||
///
|
||||
/// Since the callback will be called in interrupt context,
|
||||
/// the callback function should NOT sleep.
|
||||
pub fn register_recv_callback(name: &str, callback: impl NetDeviceIrqHandler) {
|
||||
let lock = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let Some((callbacks, _)) = lock.get(name) else {
|
||||
let device_table = COMPONENT
|
||||
.get()
|
||||
.unwrap()
|
||||
.network_device_table
|
||||
.lock_irq_disabled();
|
||||
let Some((callbacks, _)) = device_table.get(name) else {
|
||||
return;
|
||||
};
|
||||
callbacks.lock().push(Arc::new(callback));
|
||||
callbacks.lock_irq_disabled().push(Arc::new(callback));
|
||||
}
|
||||
|
||||
pub fn handle_recv_irq(name: &str) {
|
||||
let lock = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let Some((callbacks, _)) = lock.get(name) else {
|
||||
let device_table = COMPONENT
|
||||
.get()
|
||||
.unwrap()
|
||||
.network_device_table
|
||||
.lock_irq_disabled();
|
||||
let Some((callbacks, _)) = device_table.get(name) else {
|
||||
return;
|
||||
};
|
||||
let callbacks = callbacks.clone();
|
||||
let lock = callbacks.lock();
|
||||
for callback in lock.iter() {
|
||||
callback.call(())
|
||||
let callbacks = callbacks.lock_irq_disabled();
|
||||
for callback in callbacks.iter() {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn all_devices() -> Vec<(String, NetworkDeviceRef)> {
|
||||
let network_devs = COMPONENT.get().unwrap().network_device_table.lock();
|
||||
let network_devs = COMPONENT
|
||||
.get()
|
||||
.unwrap()
|
||||
.network_device_table
|
||||
.lock_irq_disabled();
|
||||
network_devs
|
||||
.iter()
|
||||
.map(|(name, (_, device))| (name.clone(), device.clone()))
|
||||
@ -102,6 +124,7 @@ fn init() -> Result<(), ComponentInitError> {
|
||||
let a = Component::init()?;
|
||||
COMPONENT.call_once(|| a);
|
||||
NETWORK_IRQ_HANDLERS.call_once(|| SpinLock::new(Vec::new()));
|
||||
buffer::init();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user