Merge pull request #26 from sdww0/main

finish pci virtio block device driver
This commit is contained in:
Tate, Hongliang Tian 2022-09-22 19:08:12 -07:00 committed by GitHub
commit f5f03d6bca
14 changed files with 898 additions and 20 deletions

View File

@ -89,8 +89,8 @@ fn create_fs_image(path: &Path) -> anyhow::Result<String> {
.write(true)
.create(true)
.open(fs_img_path.as_str())?;
// 64MiB
f.set_len(1 * 1024 * 1024).unwrap();
// 16MiB
f.set_len(16 * 1024 * 1024).unwrap();
Ok(format!(
"file={},if=none,format=raw,id=x0",
fs_img_path.as_str()

View File

@ -12,10 +12,6 @@ pub const ENTRY_COUNT: usize = 512;
pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const MEM_START: usize = 0x8000_0000;
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
pub const TRAP_CONTEXT_BASE: usize = TRAMPOLINE - PAGE_SIZE;
pub const KVA_START: usize = (usize::MAX) << PAGE_SIZE_BITS;

View File

@ -1,4 +1,6 @@
pub(crate) mod msix;
mod pci;
pub mod virtio;
pub fn init() {
pci::init();

View File

@ -0,0 +1,71 @@
use alloc::vec::Vec;
use pci::*;
use crate::{mm, trap::NOT_USING_IRQ_NUMBER};
use super::pci::*;
#[derive(Debug)]
#[repr(C)]
pub struct CapabilityMSIXData {
pub cap_ptr: u16,
pub table_size: u16,
pub table: Vec<MSIXEntry>,
/// pointing to the Pending Table
pub pba_addr: u64,
}
#[derive(Debug)]
pub struct MSIXEntry {
pub table_entry: &'static mut MSIXTableEntry,
pub allocate_irq: u8,
}
#[derive(Debug)]
#[repr(C)]
pub struct MSIXTableEntry {
pub msg_addr: u32,
pub msg_upper_addr: u32,
pub msg_data: u32,
pub vector_control: u32,
}
impl CapabilityMSIXData {
pub unsafe fn handle(loc: Location, cap_ptr: u16) -> Self {
let ops = &PortOpsImpl;
let am = CSpaceAccessMethod::IO;
let message_control = am.read16(ops, loc, cap_ptr + 2);
let table_info = am.read32(ops, loc, cap_ptr + 4);
let pba_info = am.read32(ops, loc, cap_ptr + 8);
let table_size = table_info & (0b11_1111_1111);
let mut cap = Self {
cap_ptr: cap_ptr,
table_size: table_size as u16,
table: Vec::new(),
pba_addr: mm::phys_to_virt(
(pba_info / 8 + am.read32(ops, loc, PCI_BAR + ((pba_info & 0b111) as u16) * 4))
as usize,
) as u64,
};
let mut table_addr = mm::phys_to_virt(
(table_info / 8 + am.read32(ops, loc, PCI_BAR + ((table_info & 0b111) as u16) * 4))
as usize,
);
for i in 0..table_size {
let entry = &mut *(table_addr as *const usize as *mut MSIXTableEntry);
entry.msg_addr = 0xFEE0_0000;
// allocate irq number
let irq_number = NOT_USING_IRQ_NUMBER.exclusive_access().pop().unwrap();
entry.msg_data = irq_number as u32;
entry.vector_control = 0;
cap.table.push(MSIXEntry {
table_entry: entry,
allocate_irq: irq_number,
});
table_addr += 32;
}
// enable MSI-X
am.write8(ops, loc, cap_ptr, 0b1000_0000);
cap
}
}

View File

@ -1,20 +1,19 @@
use crate::*;
use crate::{drivers::virtio, trap::NOT_USING_IRQ_NUMBER, *};
use pci::*;
const PCI_COMMAND: u16 = 0x04;
const PCI_CAP_PTR: u16 = 0x34;
const PCI_INTERRUPT_LINE: u16 = 0x3c;
const PCI_INTERRUPT_PIN: u16 = 0x3d;
pub(crate) const PCI_COMMAND: u16 = 0x04;
pub(crate) const PCI_BAR: u16 = 0x10;
pub(crate) const PCI_CAP_PTR: u16 = 0x34;
pub(crate) const PCI_INTERRUPT_LINE: u16 = 0x3c;
pub(crate) const PCI_INTERRUPT_PIN: u16 = 0x3d;
const PCI_MSI_CTRL_CAP: u16 = 0x00;
const PCI_MSI_ADDR: u16 = 0x04;
const PCI_MSI_UPPER_ADDR: u16 = 0x08;
const PCI_MSI_DATA_32: u16 = 0x08;
const PCI_MSI_DATA_64: u16 = 0x0C;
pub(crate) const PCI_MSIX_CTRL_CAP: u16 = 0x00;
pub(crate) const PCI_MSIX_TABLE: u16 = 0x04;
pub(crate) const PCI_MSIX_PBA: u16 = 0x08;
const PCI_CAP_ID_MSI: u8 = 0x05;
pub(crate) const PCI_CAP_ID_MSI: u8 = 0x05;
struct PortOpsImpl;
pub(crate) struct PortOpsImpl;
impl PortOps for PortOpsImpl {
unsafe fn read8(&self, port: u16) -> u8 {
@ -57,6 +56,7 @@ pub fn init() {
{
// virtio block device mass storage
info!("found virtio pci block device");
virtio::block::init(dev);
}
}
}

View File

@ -0,0 +1,157 @@
mod virtio_blk;
use core::{any::Any, mem::transmute};
use crate::prelude::*;
use crate::{
cell::Cell,
drivers::pci::{PortOpsImpl, PCI_BAR},
info, mm,
trap::{IrqCallbackHandle, IrqLine, TrapFrame},
};
use pci::{CSpaceAccessMethod, Location, PCIDevice};
use super::AsBuf;
pub const BLK_SIZE: usize = 512;
pub trait BlockDevice: Send + Sync + Any {
fn read_block(&self, block_id: usize, buf: &mut [u8]) -> Result<()>;
fn write_block(&self, block_id: usize, buf: &[u8]) -> Result<()>;
fn handle_irq(&self);
}
pub static BLOCK_DEVICE: Cell<Arc<dyn BlockDevice>> = unsafe {
transmute(&0 as *const _ as *const virtio_blk::VirtIOBlock as *const dyn BlockDevice)
};
static mut BLOCK_DEVICE_IRQ_CALLBACK_LIST: Vec<IrqCallbackHandle> = Vec::new();
pub fn init(loc: PCIDevice) {
let dev = virtio_blk::VirtIOBlock::new(loc);
unsafe {
(BLOCK_DEVICE.get() as *mut Arc<dyn BlockDevice>).write(Arc::new(dev));
}
}
#[repr(u8)]
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum RespStatus {
/// Ok.
Ok = 0,
/// IoErr.
IoErr = 1,
/// Unsupported yet.
Unsupported = 2,
/// Not ready.
_NotReady = 3,
}
#[derive(Debug)]
#[repr(C)]
pub struct VirtioBLKConfig {
pub capacity: u64,
pub size_max: u64,
pub geometry: VirtioBLKGeometry,
pub blk_size: u32,
pub topology: VirtioBLKTopology,
pub writeback: u8,
pub unused0: [u8; 3],
pub max_discard_sectors: u32,
pub max_discard_seg: u32,
pub discard_sector_alignment: u32,
pub max_write_zeroes_sectors: u32,
pub max_write_zeroes_seg: u32,
pub write_zeros_may_unmap: u8,
pub unused1: [u8; 3],
}
#[repr(C)]
#[derive(Debug)]
struct BlkReq {
type_: ReqType,
reserved: u32,
sector: u64,
}
/// Response of a VirtIOBlk request.
#[repr(C)]
#[derive(Debug)]
pub struct BlkResp {
pub status: RespStatus,
}
#[repr(u32)]
#[derive(Debug)]
enum ReqType {
In = 0,
Out = 1,
Flush = 4,
Discard = 11,
WriteZeroes = 13,
}
#[derive(Debug)]
#[repr(C)]
pub struct VirtioBLKGeometry {
pub cylinders: u16,
pub heads: u8,
pub sectors: u8,
}
#[derive(Debug)]
#[repr(C)]
pub struct VirtioBLKTopology {
pub physical_block_exp: u8,
pub alignment_offset: u8,
pub min_io_size: u16,
pub opt_io_size: u32,
}
impl VirtioBLKConfig {
pub unsafe fn new(loc: Location, cap_ptr: u16) -> &'static mut Self {
let ops = &PortOpsImpl;
let am = CSpaceAccessMethod::IO;
let bar = am.read8(ops, loc, cap_ptr + 4);
let offset = am.read32(ops, loc, cap_ptr + 8);
let bar_address = am.read32(ops, loc, PCI_BAR + bar as u16 * 4) & (!(0b1111));
&mut *(mm::phys_to_virt(bar_address as usize + offset as usize) as *const usize
as *mut Self)
}
}
impl Default for BlkResp {
fn default() -> Self {
BlkResp {
status: RespStatus::_NotReady,
}
}
}
unsafe impl AsBuf for BlkReq {}
unsafe impl AsBuf for BlkResp {}
pub fn read_block(block_id: usize, buf: &mut [u8]) -> Result<()> {
BLOCK_DEVICE.get().read_block(block_id, buf)
}
pub fn write_block(block_id: usize, buf: &[u8]) -> Result<()> {
BLOCK_DEVICE.get().write_block(block_id, buf)
}
#[allow(unused)]
fn block_device_test() {
let block_device = BLOCK_DEVICE.clone();
let mut write_buffer = [0u8; 512];
let mut read_buffer = [0u8; 512];
info!("test:{:x}", write_buffer.as_ptr() as usize);
for i in 0..512 {
for byte in write_buffer.iter_mut() {
*byte = i as u8;
}
block_device.write_block(i as usize, &write_buffer);
block_device.read_block(i as usize, &mut read_buffer);
assert_eq!(write_buffer, read_buffer);
}
info!("block device test passed!");
}

View File

@ -0,0 +1,186 @@
use core::hint::spin_loop;
use alloc::collections::BTreeMap;
use pci::{CSpaceAccessMethod, PCIDevice};
use crate::{
drivers::{
msix::CapabilityMSIXData,
pci::*,
virtio::{block::*, queue::VirtQueue, *},
},
info,
task::Task,
zero, Error,
};
use super::BlockDevice;
pub struct VirtIOBlock {
// virtio_blk: Cell<VirtIOBlk<'static, VirtioHal>>,
common_cfg: &'static mut VitrioPciCommonCfg,
dev_cfg: &'static mut VirtioBLKConfig,
queue: Cell<VirtQueue>,
tasks: BTreeMap<u16, Option<Task>>,
irq_callback: IrqCallbackHandle,
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) -> Result<()> {
assert_eq!(buf.len(), BLK_SIZE);
let req = BlkReq {
type_: ReqType::In,
reserved: 0,
sector: block_id as u64,
};
let mut resp = BlkResp::default();
let queue = self.queue.get();
queue
.add(&[req.as_buf()], &[buf, resp.as_buf_mut()])
.expect("add queue failed");
queue.notify();
while !queue.can_pop() {
spin_loop();
}
queue.pop_used().expect("pop used failed");
match resp.status {
RespStatus::Ok => Ok(()),
_ => Err(Error::IoError),
}
}
/// it is blocking now
fn write_block(&self, block_id: usize, buf: &[u8]) -> Result<()> {
assert_eq!(buf.len(), BLK_SIZE);
let req = BlkReq {
type_: ReqType::Out,
reserved: 0,
sector: block_id as u64,
};
let mut resp = BlkResp::default();
let queue = self.queue.get();
queue
.add(&[req.as_buf(), buf], &[resp.as_buf_mut()])
.expect("add queue failed");
queue.notify();
while !queue.can_pop() {
spin_loop();
}
queue.pop_used().expect("pop used failed");
match resp.status {
RespStatus::Ok => Ok(()),
_ => Err(Error::IoError),
}
}
fn handle_irq(&self) {
info!("handle irq in block device!");
}
}
impl VirtIOBlock {
pub fn new(dev: PCIDevice) -> Self {
fn handle_block_device(frame: TrapFrame) {
BLOCK_DEVICE.get().handle_irq()
}
let (msix, common_cfg, dev_cfg, cap_offset, notify_off_multiplier);
unsafe {
(msix, common_cfg, dev_cfg, cap_offset, notify_off_multiplier) = Self::enable(dev.loc)
};
common_cfg.device_status = DeviceStatus::ACKNOWLEDGE.bits();
common_cfg.device_status = DeviceStatus::DRIVER.bits();
common_cfg.device_status = DeviceStatus::FEATURES_OK.bits();
let queue = VirtQueue::new(common_cfg, 0, 16, cap_offset, notify_off_multiplier)
.expect("error creating virtqueue");
common_cfg.queue_enable = 1;
common_cfg.device_status = DeviceStatus::DRIVER_OK.bits();
let mut tasks = BTreeMap::new();
let channels = queue.size();
for i in 0..channels {
tasks.insert(i, None);
}
let msix_entry = msix
.table
.get(common_cfg.queue_msix_vector as usize)
.unwrap();
// register interrupt
let irq_number = msix_entry.allocate_irq;
let irq;
unsafe {
irq = IrqLine::acquire(irq_number);
}
let blk = Self {
common_cfg,
dev_cfg,
queue: Cell::new(queue),
tasks: tasks,
irq_callback: irq.on_active(handle_block_device),
};
blk
}
/// Enable the pci device and virtio MSIX
/// need to activate the specific device
/// return the msix, virtio pci common cfg, virtio block device config,
/// the virtual address of cap.offset and notify_off_multiplier
unsafe fn enable(
loc: Location,
) -> (
CapabilityMSIXData,
&'static mut VitrioPciCommonCfg,
&'static mut VirtioBLKConfig,
usize,
u32,
) {
let ops = &PortOpsImpl;
let am = CSpaceAccessMethod::IO;
// 23 and lower are used, use 22-27
static mut MSI_IRQ: u32 = 23;
let mut cap_ptr = am.read8(ops, loc, PCI_CAP_PTR) as u16;
let mut msix = zero();
let mut init = false;
let mut common_cfg = zero();
let mut dev_cfg = zero();
let mut notify_off_multiplier: u32 = 0;
let mut cap_offset: usize = 0;
while cap_ptr > 0 {
let cap_vndr = am.read8(ops, loc, cap_ptr);
match cap_vndr {
9 => {
let cap = PciVirtioCapability::handle(loc, cap_ptr);
match cap.cfg {
CFGType::COMMON(x) => {
common_cfg = x;
}
CFGType::NOTIFY(x) => {
let bar = cap.bar;
let bar_address =
am.read32(ops, loc, PCI_BAR + bar as u16 * 4) & (!(0b1111));
cap_offset = mm::phys_to_virt((bar_address + cap.offset) as usize);
notify_off_multiplier = x;
}
CFGType::DEVICE(dev) => {
match dev {
VirtioDeviceCFG::Block(x) => dev_cfg = x,
_ => {
panic!("wrong device while initalize virtio block device")
}
};
}
_ => {}
};
}
17 => {
msix = CapabilityMSIXData::handle(loc, cap_ptr);
init = true;
}
_ => panic!("unsupport capability, id:{}", cap_vndr),
};
cap_ptr = am.read8(ops, loc, cap_ptr + 1) as u16;
}
if !init {
panic!("PCI Virtio Block Device initalize incomplete, not found msix");
}
common_cfg.queue_msix_vector = 0;
(msix, common_cfg, dev_cfg, cap_offset, notify_off_multiplier)
}
}

View File

@ -0,0 +1,158 @@
use core::mem::size_of;
use crate::mm;
use bitflags::bitflags;
use pci::{CSpaceAccessMethod, Location};
use self::block::VirtioBLKConfig;
use super::pci::*;
pub(crate) mod block;
pub(crate) mod queue;
pub(crate) const PCI_VIRTIO_CAP_COMMON_CFG: u8 = 1;
pub(crate) const PCI_VIRTIO_CAP_NOTIFY_CFG: u8 = 2;
pub(crate) const PCI_VIRTIO_CAP_ISR_CFG: u8 = 3;
pub(crate) const PCI_VIRTIO_CAP_DEVICE_CFG: u8 = 4;
pub(crate) const PCI_VIRTIO_CAP_PCI_CFG: u8 = 5;
bitflags! {
/// The device status field.
pub(crate) struct DeviceStatus: u8 {
/// Indicates that the guest OS has found the device and recognized it
/// as a valid virtio device.
const ACKNOWLEDGE = 1;
/// Indicates that the guest OS knows how to drive the device.
const DRIVER = 2;
/// Indicates that something went wrong in the guest, and it has given
/// up on the device. This could be an internal error, or the driver
/// didnt like the device for some reason, or even a fatal error
/// during device operation.
const FAILED = 128;
/// Indicates that the driver has acknowledged all the features it
/// understands, and feature negotiation is complete.
const FEATURES_OK = 8;
/// Indicates that the driver is set up and ready to drive the device.
const DRIVER_OK = 4;
/// Indicates that the device has experienced an error from which it
/// cant recover.
const DEVICE_NEEDS_RESET = 64;
}
}
#[derive(Debug)]
#[repr(C)]
pub(crate) struct VitrioPciCommonCfg {
device_feature_select: u32,
device_feature: u32,
driver_feature_select: u32,
driver_feature: u32,
config_msix_vector: u16,
num_queues: u16,
device_status: u8,
config_generation: u8,
queue_select: u16,
queue_size: u16,
queue_msix_vector: u16,
queue_enable: u16,
queue_notify_off: u16,
queue_desc: u64,
queue_driver: u64,
queue_device: u64,
}
#[derive(Debug)]
enum CFGType {
COMMON(&'static mut VitrioPciCommonCfg),
NOTIFY(u32),
ISR,
DEVICE(VirtioDeviceCFG),
PCI,
}
#[derive(Debug)]
enum VirtioDeviceCFG {
Network,
Block(&'static mut VirtioBLKConfig),
Console,
Entropy,
TraditionalMemoryBalloon,
ScsiHost,
GPU,
Input,
Crypto,
Socket,
}
#[derive(Debug)]
struct PciVirtioCapability {
pub cap_vndr: u8,
pub cap_ptr: u16,
pub cap_len: u8,
pub cfg_type: u8,
pub cfg: CFGType,
pub bar: u8,
pub offset: u32,
pub length: u32,
}
impl VitrioPciCommonCfg {
pub unsafe fn new(loc: Location, cap_ptr: u16) -> &'static mut Self {
let ops = &PortOpsImpl;
let am = CSpaceAccessMethod::IO;
let bar = am.read8(ops, loc, cap_ptr + 4);
let offset = am.read32(ops, loc, cap_ptr + 8);
let bar_address = am.read32(ops, loc, PCI_BAR + bar as u16 * 4) & (!(0b1111));
&mut *(mm::phys_to_virt(bar_address as usize + offset as usize) as *const usize
as *mut Self)
}
}
impl PciVirtioCapability {
pub unsafe fn handle(loc: Location, cap_ptr: u16) -> Self {
let ops = &PortOpsImpl;
let am = CSpaceAccessMethod::IO;
let cap_vndr = am.read8(ops, loc, cap_ptr);
let cap_next = am.read8(ops, loc, cap_ptr + 1);
let cap_len = am.read8(ops, loc, cap_ptr + 2);
let cfg_type = am.read8(ops, loc, cap_ptr + 3);
let cfg = match cfg_type {
PCI_VIRTIO_CAP_COMMON_CFG => CFGType::COMMON(VitrioPciCommonCfg::new(loc, cap_ptr)),
PCI_VIRTIO_CAP_NOTIFY_CFG => CFGType::NOTIFY(am.read32(ops, loc, cap_ptr + 16)),
PCI_VIRTIO_CAP_ISR_CFG => CFGType::ISR,
PCI_VIRTIO_CAP_DEVICE_CFG => {
CFGType::DEVICE(VirtioDeviceCFG::Block(VirtioBLKConfig::new(loc, cap_ptr)))
}
PCI_VIRTIO_CAP_PCI_CFG => CFGType::PCI,
_ => panic!("unsupport cfg, cfg_type:{}", cfg_type),
};
let cap = PciVirtioCapability {
cap_vndr: cap_vndr,
cap_ptr: cap_ptr,
cap_len: cap_len,
cfg_type: cfg_type,
cfg: cfg,
bar: am.read8(ops, loc, cap_ptr + 4),
offset: am.read32(ops, loc, cap_ptr + 8),
length: am.read32(ops, loc, cap_ptr + 12),
};
cap
}
}
/// Convert a struct into a byte buffer.
unsafe trait AsBuf: Sized {
fn as_buf(&self) -> &[u8] {
unsafe { core::slice::from_raw_parts(self as *const _ as _, size_of::<Self>()) }
}
fn as_buf_mut(&mut self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as _, size_of::<Self>()) }
}
}

View File

@ -0,0 +1,289 @@
//! FIXME: use Volatile
use crate::mm::address::align_up;
use core::mem::size_of;
use core::slice;
use core::sync::atomic::{fence, Ordering};
use super::*;
#[derive(Debug)]
pub enum QueueError {
InvalidArgs,
BufferTooSmall,
NotReady,
AlreadyUsed,
}
#[derive(Debug)]
#[repr(C)]
pub struct QueueNotify {
notify: u32,
}
/// The mechanism for bulk data transport on virtio devices.
///
/// Each device can have zero or more virtqueues.
#[derive(Debug)]
pub(crate) struct VirtQueue {
/// Descriptor table
desc: &'static mut [Descriptor],
/// Available ring
avail: &'static mut AvailRing,
/// Used ring
used: &'static mut UsedRing,
/// point to notify address
notify: &'static mut QueueNotify,
/// The index of queue
queue_idx: u32,
/// The size of the queue.
///
/// This is both the number of descriptors, and the number of slots in the available and used
/// rings.
queue_size: u16,
/// The number of used queues.
num_used: u16,
/// The head desc index of the free list.
free_head: u16,
avail_idx: u16,
last_used_idx: u16,
}
impl VirtQueue {
/// Create a new VirtQueue.
pub fn new(
cfg: &mut VitrioPciCommonCfg,
idx: usize,
size: u16,
cap_offset: usize,
notify_off_multiplier: u32,
) -> Result<Self, QueueError> {
if !size.is_power_of_two() || cfg.queue_size < size {
return Err(QueueError::InvalidArgs);
}
let layout = VirtQueueLayout::new(size);
// Allocate contiguous pages.
cfg.queue_select = idx as u16;
cfg.queue_size = size;
let desc = unsafe {
slice::from_raw_parts_mut(
mm::phys_to_virt(cfg.queue_desc as usize) as *mut Descriptor,
size as usize,
)
};
let avail =
unsafe { &mut *(mm::phys_to_virt(cfg.queue_driver as usize) as *mut AvailRing) };
let used = unsafe { &mut *(mm::phys_to_virt(cfg.queue_device as usize) as *mut UsedRing) };
let notify = unsafe {
&mut *((cap_offset + notify_off_multiplier as usize * idx) as *mut QueueNotify)
};
// Link descriptors together.
for i in 0..(size - 1) {
desc[i as usize].next = i + 1;
}
Ok(VirtQueue {
desc,
avail,
used,
notify,
queue_size: size,
queue_idx: idx as u32,
num_used: 0,
free_head: 0,
avail_idx: 0,
last_used_idx: 0,
})
}
/// Add buffers to the virtqueue, return a token.
///
/// Ref: linux virtio_ring.c virtqueue_add
pub fn add(&mut self, inputs: &[&[u8]], outputs: &[&mut [u8]]) -> Result<u16, QueueError> {
if inputs.is_empty() && outputs.is_empty() {
return Err(QueueError::InvalidArgs);
}
if inputs.len() + outputs.len() + self.num_used as usize > self.queue_size as usize {
return Err(QueueError::BufferTooSmall);
}
// allocate descriptors from free list
let head = self.free_head;
let mut last = self.free_head;
for input in inputs.iter() {
let desc = &mut self.desc[self.free_head as usize];
desc.set_buf(input);
desc.flags = DescFlags::NEXT;
last = self.free_head;
self.free_head = desc.next;
}
for output in outputs.iter() {
let desc = &mut self.desc[self.free_head as usize];
desc.set_buf(output);
desc.flags = DescFlags::NEXT | DescFlags::WRITE;
last = self.free_head;
self.free_head = desc.next;
}
// set last_elem.next = NULL
{
let desc = &mut self.desc[last as usize];
let mut flags = desc.flags;
flags.remove(DescFlags::NEXT);
desc.flags = flags;
}
self.num_used += (inputs.len() + outputs.len()) as u16;
let avail_slot = self.avail_idx & (self.queue_size - 1);
self.avail.ring[avail_slot as usize] = head;
// write barrier
fence(Ordering::SeqCst);
// increase head of avail ring
self.avail_idx = self.avail_idx.wrapping_add(1);
self.avail.idx = self.avail_idx;
Ok(head)
}
/// Whether there is a used element that can pop.
pub fn can_pop(&self) -> bool {
self.last_used_idx != self.used.idx
}
/// The number of free descriptors.
pub fn available_desc(&self) -> usize {
(self.queue_size - self.num_used) as usize
}
/// Recycle descriptors in the list specified by head.
///
/// This will push all linked descriptors at the front of the free list.
fn recycle_descriptors(&mut self, mut head: u16) {
let origin_free_head = self.free_head;
self.free_head = head;
loop {
let desc = &mut self.desc[head as usize];
let flags = desc.flags;
self.num_used -= 1;
if flags.contains(DescFlags::NEXT) {
head = desc.next;
} else {
desc.next = origin_free_head;
return;
}
}
}
/// Get a token from device used buffers, return (token, len).
///
/// Ref: linux virtio_ring.c virtqueue_get_buf_ctx
pub fn pop_used(&mut self) -> Result<(u16, u32), QueueError> {
if !self.can_pop() {
return Err(QueueError::NotReady);
}
// read barrier
fence(Ordering::SeqCst);
let last_used_slot = self.last_used_idx & (self.queue_size - 1);
let index = self.used.ring[last_used_slot as usize].id as u16;
let len = self.used.ring[last_used_slot as usize].len;
self.recycle_descriptors(index);
self.last_used_idx = self.last_used_idx.wrapping_add(1);
Ok((index, len))
}
/// Return size of the queue.
pub fn size(&self) -> u16 {
self.queue_size
}
pub fn notify(&mut self) {
self.notify.notify = 0
}
}
/// The inner layout of a VirtQueue.
///
/// Ref: 2.6.2 Legacy Interfaces: A Note on Virtqueue Layout
struct VirtQueueLayout {
avail_offset: usize,
used_offset: usize,
size: usize,
}
impl VirtQueueLayout {
fn new(queue_size: u16) -> Self {
assert!(
queue_size.is_power_of_two(),
"queue size should be a power of 2"
);
let queue_size = queue_size as usize;
let desc = size_of::<Descriptor>() * queue_size;
let avail = size_of::<u16>() * (3 + queue_size);
let used = size_of::<u16>() * 3 + size_of::<UsedElem>() * queue_size;
VirtQueueLayout {
avail_offset: desc,
used_offset: align_up(desc + avail),
size: align_up(desc + avail) + align_up(used),
}
}
}
#[repr(C, align(16))]
#[derive(Debug)]
struct Descriptor {
addr: u64,
len: u32,
flags: DescFlags,
next: u16,
}
impl Descriptor {
fn set_buf(&mut self, buf: &[u8]) {
self.addr = mm::virt_to_phys(buf.as_ptr() as usize) as u64;
self.len = buf.len() as u32;
}
}
bitflags! {
/// Descriptor flags
struct DescFlags: u16 {
const NEXT = 1;
const WRITE = 2;
const INDIRECT = 4;
}
}
/// The driver uses the available ring to offer buffers to the device:
/// each ring entry refers to the head of a descriptor chain.
/// It is only written by the driver and read by the device.
#[repr(C)]
#[derive(Debug)]
struct AvailRing {
flags: u16,
/// A driver MUST NOT decrement the idx.
idx: u16,
ring: [u16; 32], // actual size: queue_size
used_event: u16, // unused
}
/// The used ring is where the device returns buffers once it is done with them:
/// it is only written to by the device, and read by the driver.
#[repr(C)]
#[derive(Debug)]
struct UsedRing {
flags: u16,
idx: u16,
ring: [UsedElem; 32], // actual size: queue_size
avail_event: u16, // unused
}
#[repr(C)]
#[derive(Debug)]
struct UsedElem {
id: u32,
len: u32,
}

View File

@ -5,4 +5,5 @@ pub enum Error {
NoMemory,
PageFault,
AccessDenied,
IoError,
}

View File

@ -41,6 +41,8 @@ use bootloader::{
};
use trap::{IrqCallbackHandle, IrqLine, TrapFrame};
pub use self::drivers::virtio::block::{read_block, write_block};
static mut IRQ_CALLBACK_LIST: Vec<IrqCallbackHandle> = Vec::new();
#[cfg(not(feature = "serial_print"))]

View File

@ -10,6 +10,7 @@ use address::PhysAddr;
use address::VirtAddr;
pub use self::{frame_allocator::*, memory_set::*, page_table::*};
pub(crate) use address::{phys_to_virt, virt_to_phys};
bitflags::bitflags! {
/// Possible flags for a page table entry.

View File

@ -1,9 +1,24 @@
use crate::prelude::*;
use crate::{prelude::*, sync::up::UPSafeCell};
use super::TrapFrame;
use lazy_static::lazy_static;
use spin::{Mutex, MutexGuard};
lazy_static! {
/// The IRQ numbers which are not using
/// FIXME: using alloc, dealloc instead of letting user use push and pop method.
pub static ref NOT_USING_IRQ_NUMBER:UPSafeCell<Vec<u8>> = unsafe {UPSafeCell::new({
let mut vector = Vec::new();
for i in 31..256{
vector.push(i as u8);
}
for i in 22..28{
vector.push(i as u8);
}
vector
})};
}
lazy_static! {
pub static ref IRQ_LIST: Vec<IrqLine> = {
let mut list: Vec<IrqLine> = Vec::new();

View File

@ -1,7 +1,7 @@
mod handler;
mod irq;
pub use self::irq::{IrqCallbackHandle, IrqLine};
pub use self::irq::{IrqCallbackHandle, IrqLine, NOT_USING_IRQ_NUMBER};
use core::mem::size_of_val;
use crate::{x86_64_util::*, *};