ahci内存越界问题修复+ mm的bug修复+在rust中解析acpi table (#384)

* bugfix: 修复了Flusher Drop的时候没有自动刷新TLB的bug

* 解决进程管理未初始化时,trap.c尝试打印pid导致错误的问题

* 设置kmalloc默认强制清0

* 修复ahci驱动的内存越界问题
* 修复mmio buddy忘记归还buddy block的问题
* 新增acpi模块,暂时能解析acpi tables
This commit is contained in:
LoGin
2023-09-17 15:41:01 +08:00
committed by GitHub
parent 1111099746
commit 7ae679ddd6
19 changed files with 234 additions and 92 deletions

View File

@ -27,6 +27,8 @@ hashbrown = "0.13.2"
elf = { version = "0.7.2", default-features = false }
memoffset = "0.9.0"
atomic_enum = "0.2.0"
raw-cpuid = "11.0.1"
acpi = "5.0.0"
# 构建时依赖项
[build-dependencies]

View File

@ -1,5 +1,5 @@
use crate::arch::TraitPciArch;
use crate::driver::acpi::acpi::mcfg_find_segment;
use crate::driver::acpi::old::mcfg_find_segment;
use crate::driver::pci::pci::{
BusDeviceFunction, PciAddr, PciError, PciRoot, SegmentGroupNumber, PORT_PCI_CONFIG_ADDRESS,
PORT_PCI_CONFIG_DATA,

View File

@ -5,6 +5,8 @@
#include <mm/mm.h>
#include <mm/mmio.h>
extern void rs_acpi_init(uint64_t rsdp_paddr);
#define acpi_get_RSDT_entry_vaddr(phys_addr) (acpi_description_header_base + (phys_addr)-acpi_RSDT_entry_phys_base) // 获取RSDT entry的虚拟地址
// #define acpi_get_XSDT_entry_vaddr(phys_addr) (ACPI_DESCRIPTION_HEDERS_BASE + (phys_addr)-acpi_XSDT_entry_phys_base) // 获取XSDT entry的虚拟地址
@ -138,6 +140,8 @@ void acpi_init()
multiboot2_iter(multiboot2_get_acpi_new_RSDP, &new_acpi, &reserved);
rsdpv2 = &(new_acpi.rsdp);
rs_acpi_init((uint64_t)rsdpv1);
uint64_t paddr = 0;
// An ACPI-compatible OS must use the XSDT if present
if (rsdpv2->XsdtAddress != 0x00UL)

View File

@ -0,0 +1,24 @@
use crate::{
arch::MMArch,
libs::align::AlignedBox,
mm::{MemoryManagementArch, VirtAddr},
};
use super::AcpiManager;
static mut RSDP_TMP_BOX: Option<AlignedBox<[u8; 4096], 4096>> = None;
#[no_mangle]
unsafe extern "C" fn rs_acpi_init(rsdp_vaddr: u64) {
RSDP_TMP_BOX = Some(AlignedBox::new_zeroed().expect("rs_acpi_init(): failed to alloc"));
let size = core::mem::size_of::<acpi::rsdp::Rsdp>();
let tmp_data = core::slice::from_raw_parts(rsdp_vaddr as usize as *const u8, size);
RSDP_TMP_BOX.as_mut().unwrap()[0..size].copy_from_slice(tmp_data);
let rsdp_paddr = MMArch::virt_2_phys(VirtAddr::new(
RSDP_TMP_BOX.as_ref().unwrap().as_ptr() as usize
))
.unwrap();
AcpiManager::init(rsdp_paddr);
}

View File

@ -1 +1,99 @@
pub mod acpi;
use core::{fmt::Debug, ptr::NonNull};
use acpi::AcpiHandler;
use crate::{
kinfo,
libs::{
align::{page_align_down, page_align_up},
once::Once,
},
mm::{
mmio_buddy::{mmio_pool, MMIOSpaceGuard},
PhysAddr, VirtAddr,
},
};
mod c_adapter;
pub mod old;
extern crate acpi;
static mut __ACPI_TABLE: Option<acpi::AcpiTables<AcpiHandlerImpl>> = None;
#[derive(Debug)]
pub struct AcpiManager;
impl AcpiManager {
pub fn init(rsdp_paddr: PhysAddr) {
static INIT: Once = Once::new();
INIT.call_once(|| {
kinfo!("Initializing Acpi Manager...");
let acpi_table: acpi::AcpiTables<AcpiHandlerImpl> =
unsafe { acpi::AcpiTables::from_rsdp(AcpiHandlerImpl, rsdp_paddr.data()) }
.unwrap_or_else(|e| {
panic!("acpi_init(): failed to parse acpi tables, error: {:?}", e)
});
unsafe {
__ACPI_TABLE = Some(acpi_table);
}
kinfo!("Acpi Manager initialized.");
});
}
#[allow(dead_code)]
pub fn tables() -> Option<&'static acpi::AcpiTables<AcpiHandlerImpl>> {
unsafe { __ACPI_TABLE.as_ref() }
}
}
#[derive(Debug, Clone, Copy)]
pub struct AcpiHandlerImpl;
impl AcpiHandler for AcpiHandlerImpl {
unsafe fn map_physical_region<T>(
&self,
physical_address: usize,
size: usize,
) -> acpi::PhysicalMapping<Self, T> {
let offset = physical_address - page_align_down(physical_address);
let size_fix = page_align_up(size + offset);
let mmio_guard = mmio_pool()
.create_mmio(size_fix)
.expect("AcpiHandlerImpl::map_physical_region(): failed to create mmio");
mmio_guard
.map_phys(PhysAddr::new(page_align_down(physical_address)), size_fix)
.expect("AcpiHandlerImpl::map_physical_region(): failed to map phys");
let virtual_start = mmio_guard.vaddr().data() + offset;
let virtual_start = NonNull::new(virtual_start as *mut T).unwrap();
let result: acpi::PhysicalMapping<AcpiHandlerImpl, T> = acpi::PhysicalMapping::new(
physical_address,
virtual_start,
size,
mmio_guard.size(),
AcpiHandlerImpl,
);
MMIOSpaceGuard::leak(mmio_guard);
return result;
}
fn unmap_physical_region<T>(region: &acpi::PhysicalMapping<Self, T>) {
let mmio_guard = unsafe {
MMIOSpaceGuard::from_raw(
VirtAddr::new(page_align_down(
region.virtual_start().as_ref() as *const T as usize
)),
region.mapped_length(),
true,
)
};
drop(mmio_guard);
}
}

View File

@ -3,7 +3,7 @@ use crate::include::bindings::bindings::acpi_system_description_table_header_t;
use core::ptr::{slice_from_raw_parts_mut, NonNull};
// MCFG表中的Segement配置部分开始位置为44+16*n
#[repr(C, packed)]
pub struct Segement_Configuration_Space {
pub struct SegementConfigurationSpace {
pub base_address: u64,
pub segement_group_number: SegmentGroupNumber,
pub bus_begin: u8,
@ -16,7 +16,7 @@ pub struct Segement_Configuration_Space {
/// @return NonNull<[Segement_Configuration_Space]>
pub fn mcfg_find_segment(
head: NonNull<acpi_system_description_table_header_t>,
) -> NonNull<[Segement_Configuration_Space]> {
) -> NonNull<[SegementConfigurationSpace]> {
let table_length = unsafe { (*head.as_ptr()).Length };
let number_of_segments = ((table_length - 44) / 16) as u16;
NonNull::new(slice_from_raw_parts_mut(

View File

@ -23,7 +23,7 @@ use alloc::sync::Weak;
use alloc::{string::String, sync::Arc, vec::Vec};
use core::fmt::Debug;
use core::sync::atomic::compiler_fence;
use core::sync::atomic::{compiler_fence, Ordering};
use core::{mem::size_of, ptr::write_bytes};
/// @brief: 只支持MBR分区格式的磁盘结构体
@ -60,9 +60,10 @@ impl AhciDisk {
count: usize, // 读取lba的数量
buf: &mut [u8],
) -> Result<usize, SystemError> {
assert!((buf.len() & 511) == 0);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
let check_length = ((count - 1) >> 4) + 1; // prdt length
if count * 512 > buf.len() || check_length > u16::MAX as usize {
if count * 512 > buf.len() || check_length > 8 as usize {
kerror!("ahci read: e2big");
// 不可能的操作
return Err(SystemError::E2BIG);
@ -89,11 +90,7 @@ impl AhciDisk {
.unwrap()
};
volatile_write_bit!(
cmdheader.cfl,
(1 << 5) - 1 as u8,
(size_of::<FisRegH2D>() / size_of::<u32>()) as u8
); // Command FIS size
cmdheader.cfl = (size_of::<FisRegH2D>() / size_of::<u32>()) as u8;
volatile_set_bit!(cmdheader.cfl, 1 << 6, false); // Read/Write bit : Read from device
volatile_write!(cmdheader.prdtl, check_length as u16); // PRDT entries count
@ -109,10 +106,8 @@ impl AhciDisk {
false
};
let mut kbuf = if user_buf {
let mut x: Vec<u8> = Vec::with_capacity(buf.len());
unsafe {
x.set_len(buf.len());
}
let mut x: Vec<u8> = Vec::new();
x.resize(buf.len(), 0);
Some(x)
} else {
None
@ -134,11 +129,12 @@ impl AhciDisk {
// 清空整个table的旧数据
write_bytes(cmdtbl, 0, 1);
}
// kdebug!("cmdheader.prdtl={}", volatile_read!(cmdheader.prdtl));
// 8K bytes (16 sectors) per PRDT
for i in 0..((volatile_read!(cmdheader.prdtl) - 1) as usize) {
volatile_write!(cmdtbl.prdt_entry[i].dba, virt_2_phys(buf_ptr) as u64);
volatile_write_bit!(cmdtbl.prdt_entry[i].dbc, (1 << 22) - 1, 8 * 1024 - 1); // 数据长度 prdt_entry.dbc
cmdtbl.prdt_entry[i].dbc = 8 * 1024 - 1;
volatile_set_bit!(cmdtbl.prdt_entry[i].dbc, 1 << 31, true); // 允许中断 prdt_entry.i
buf_ptr += 8 * 1024;
tmp_count -= 16;
@ -147,11 +143,8 @@ impl AhciDisk {
// Last entry
let las = (volatile_read!(cmdheader.prdtl) - 1) as usize;
volatile_write!(cmdtbl.prdt_entry[las].dba, virt_2_phys(buf_ptr) as u64);
volatile_write_bit!(
cmdtbl.prdt_entry[las].dbc,
(1 << 22) - 1,
((tmp_count << 9) - 1) as u32
); // 数据长度
cmdtbl.prdt_entry[las].dbc = ((tmp_count << 9) - 1) as u32; // 数据长度
volatile_set_bit!(cmdtbl.prdt_entry[las].dbc, 1 << 31, true); // 允许中断
// 设置命令
@ -219,9 +212,10 @@ impl AhciDisk {
count: usize,
buf: &[u8],
) -> Result<usize, SystemError> {
assert!((buf.len() & 511) == 0);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
let check_length = ((count - 1) >> 4) + 1; // prdt length
if count * 512 > buf.len() || check_length > u16::MAX as usize {
if count * 512 > buf.len() || check_length > 8 as usize {
// 不可能的操作
return Err(SystemError::E2BIG);
} else if count == 0 {
@ -369,8 +363,6 @@ impl LockedAhciDisk {
ctrl_num: u8,
port_num: u8,
) -> Result<Arc<LockedAhciDisk>, SystemError> {
let mut part_s: Vec<Arc<Partition>> = Vec::new();
// 构建磁盘结构体
let result: Arc<LockedAhciDisk> = Arc::new(LockedAhciDisk(SpinLock::new(AhciDisk {
name,
@ -382,22 +374,24 @@ impl LockedAhciDisk {
})));
let table: MbrDiskPartionTable = result.read_mbr_table()?;
let weak_this: Weak<LockedAhciDisk> = Arc::downgrade(&result); // 获取this的弱指针
// 求出有多少可用分区
for i in 0..4 {
compiler_fence(Ordering::SeqCst);
if table.dpte[i].part_type != 0 {
part_s.push(Partition::new(
let w = Arc::downgrade(&result);
result.0.lock().partitions.push(Partition::new(
table.dpte[i].starting_sector() as u64,
table.dpte[i].starting_lba as u64,
table.dpte[i].total_sectors as u64,
weak_this.clone(),
w,
i as u16,
));
}
}
result.0.lock().partitions = part_s;
result.0.lock().self_ref = weak_this;
result.0.lock().self_ref = Arc::downgrade(&result);
return Ok(result);
}
@ -409,7 +403,7 @@ impl LockedAhciDisk {
let mut buf: Vec<u8> = Vec::new();
buf.resize(size_of::<MbrDiskPartionTable>(), 0);
BlockDevice::read_at(self, 0, 1, &mut buf)?;
self.read_at(0, 1, &mut buf)?;
// 创建 Cursor 用于按字节读取
let mut cursor = VecCursor::new(buf);
cursor.seek(SeekFrom::SeekCurrent(446))?;

View File

@ -103,7 +103,7 @@ pub struct HbaCmdTable {
// 0x50
_rsv: [u8; 48], // Reserved
// 0x80
pub prdt_entry: [HbaPrdtEntry; 65535], // Physical region descriptor table entries, 0 ~ 65535, 需要注意不要越界
pub prdt_entry: [HbaPrdtEntry; 8], // Physical region descriptor table entries, 0 ~ 65535, 需要注意不要越界 这里设置8的原因是目前CmdTable只预留了8个PRDT项的空间
}
/// HBA Command Header
@ -218,7 +218,7 @@ impl HbaPort {
// Command table size = 256*32 = 8K per port
let mut cmdheaders = phys_2_virt(clb as usize) as *mut u64 as *mut HbaCmdHeader;
for i in 0..32 as usize {
volatile_write!((*cmdheaders).prdtl, 0); // 一开始没有询问prdtl = 0
volatile_write!((*cmdheaders).prdtl, 0); // 一开始没有询问prdtl = 0预留了8个PRDT项的空间
volatile_write!((*cmdheaders).ctba, ctbas[i]);
// 这里限制了 prdtl <= 8, 所以一共用了256bytes如果需要修改可以修改这里
compiler_fence(core::sync::atomic::Ordering::SeqCst);

View File

@ -70,7 +70,7 @@ pub fn ahci_init() -> Result<(), SystemError> {
for device in ahci_device {
let standard_device = device.as_standard_device_mut().unwrap();
standard_device.bar_ioremap();
// 对于每一个ahci控制器分配一块空间 (目前slab algorithm最大支持1MB)
// 对于每一个ahci控制器分配一块空间
let ahci_port_base_vaddr =
Box::leak(Box::new([0u8; (1 << 20) as usize])) as *mut u8 as usize;
let virtaddr = standard_device

View File

@ -2,7 +2,7 @@
// 目前仅支持单主桥单Segment
use super::pci_irq::{IrqType, PciIrqError};
use crate::arch::{MMArch, PciArch, TraitPciArch};
use crate::arch::{PciArch, TraitPciArch};
use crate::include::bindings::bindings::PAGE_2M_SIZE;
use crate::libs::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
@ -638,7 +638,9 @@ impl PciRoot {
let space_guard = Arc::new(space_guard);
self.mmio_guard = Some(space_guard.clone());
assert!(space_guard.map_phys::<MMArch>(self.physical_address_base, size));
assert!(space_guard
.map_phys(self.physical_address_base, size)
.is_ok());
}
return Ok(0);
}
@ -1411,7 +1413,7 @@ pub fn pci_bar_init(
space_guard = Arc::new(tmp);
kdebug!("Pci bar init: mmio space: {space_guard:?}, paddr={paddr:?}, size_want={size_want}");
assert!(
space_guard.map_phys::<MMArch>(paddr, size_want),
space_guard.map_phys(paddr, size_want).is_ok(),
"pci_bar_init: map_phys failed"
);
}

View File

@ -1,10 +1,9 @@
use core::{
hint::spin_loop,
ptr::null_mut,
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::{boxed::Box, format, string::ToString, sync::Arc};
use alloc::{format, string::ToString, sync::Arc};
use crate::{
driver::{
@ -36,7 +35,7 @@ pub fn generate_inode_id() -> InodeId {
return INO.fetch_add(1, Ordering::SeqCst);
}
static mut __ROOT_INODE: *mut Arc<dyn IndexNode> = null_mut();
static mut __ROOT_INODE: Option<Arc<dyn IndexNode>> = None;
/// @brief 获取全局的根节点
#[inline(always)]
@ -52,10 +51,10 @@ pub extern "C" fn vfs_init() -> i32 {
// 使用Ramfs作为默认的根文件系统
let ramfs = RamFS::new();
let mount_fs = MountFS::new(ramfs, None);
let root_inode = Box::leak(Box::new(mount_fs.root_inode()));
let root_inode = mount_fs.root_inode();
unsafe {
__ROOT_INODE = root_inode;
__ROOT_INODE = Some(root_inode.clone());
}
// 创建文件夹
@ -76,8 +75,8 @@ pub extern "C" fn vfs_init() -> i32 {
sysfs_init().expect("Failed to initialize sysfs");
let root_inode = ROOT_INODE().list().expect("VFS init failed");
if root_inode.len() > 0 {
let root_entries = ROOT_INODE().list().expect("VFS init failed");
if root_entries.len() > 0 {
kinfo!("Successfully initialized VFS!");
}
return 0;
@ -122,21 +121,19 @@ fn migrate_virtual_filesystem(new_fs: Arc<dyn FileSystem>) -> Result<(), SystemE
let new_fs = MountFS::new(new_fs, None);
// 获取新的根文件系统的根节点的引用
let new_root_inode = Box::leak(Box::new(new_fs.root_inode()));
let new_root_inode = new_fs.root_inode();
// 把上述文件系统,迁移到新的文件系统下
do_migrate(new_root_inode.clone(), "proc", proc)?;
do_migrate(new_root_inode.clone(), "dev", dev)?;
do_migrate(new_root_inode.clone(), "sys", sys)?;
unsafe {
// drop旧的Root inode
let old_root_inode: Box<Arc<dyn IndexNode>> = Box::from_raw(__ROOT_INODE);
__ROOT_INODE = null_mut();
let old_root_inode = __ROOT_INODE.take().unwrap();
drop(old_root_inode);
// 设置全局的新的ROOT Inode
__ROOT_INODE = new_root_inode;
__ROOT_INODE = Some(new_root_inode);
}
kinfo!("VFS: Migrate filesystems done!");

View File

@ -784,9 +784,7 @@ impl IoVecs {
let mut buf: Vec<u8> = Vec::with_capacity(total_len);
if set_len {
unsafe {
buf.set_len(total_len);
}
buf.resize(total_len, 0);
}
return buf;
}

View File

@ -44,3 +44,4 @@
#include <driver/pci/pci_irq.h>
#include <common/errno.h>
#include <common/cpu.h>
#include <driver/interrupt/apic/apic2rust.h>

View File

@ -129,6 +129,11 @@ pub fn page_align_up(addr: usize) -> usize {
return (addr + page_size - 1) & (!(page_size - 1));
}
pub fn page_align_down(addr: usize) -> usize {
let page_size = MMArch::PAGE_SIZE;
return addr & (!(page_size - 1));
}
/// ## 检查是否对齐
///
/// 检查给定的值是否对齐到给定的对齐要求。

View File

@ -26,6 +26,7 @@ pub struct SpinLock<T> {
#[derive(Debug)]
pub struct SpinLockGuard<'a, T: 'a> {
lock: &'a SpinLock<T>,
data: *mut T,
irq_flag: Option<IrqFlagsGuard>,
flags: SpinLockGuardFlags,
}
@ -104,6 +105,7 @@ impl<T> SpinLock<T> {
if self.inner_try_lock() {
return Ok(SpinLockGuard {
lock: self,
data: unsafe { &mut *self.data.get() },
irq_flag: None,
flags: SpinLockGuardFlags::empty(),
});
@ -118,7 +120,7 @@ impl<T> SpinLock<T> {
fn inner_try_lock(&self) -> bool {
let res = self
.lock
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.is_ok();
return res;
}
@ -129,6 +131,7 @@ impl<T> SpinLock<T> {
if self.inner_try_lock() {
return Ok(SpinLockGuard {
lock: self,
data: unsafe { &mut *self.data.get() },
irq_flag: Some(irq_guard),
flags: SpinLockGuardFlags::empty(),
});
@ -142,6 +145,7 @@ impl<T> SpinLock<T> {
if self.inner_try_lock() {
return Ok(SpinLockGuard {
lock: self,
data: unsafe { &mut *self.data.get() },
irq_flag: None,
flags: SpinLockGuardFlags::NO_PREEMPT,
});
@ -156,11 +160,11 @@ impl<T> SpinLock<T> {
/// 由于这样做可能导致preempt count不正确因此必须小心的手动维护好preempt count。
/// 如非必要,请不要使用这个函数。
pub unsafe fn force_unlock(&self) {
self.lock.store(false, Ordering::Release);
self.lock.store(false, Ordering::SeqCst);
}
fn unlock(&self) {
self.lock.store(false, Ordering::Release);
self.lock.store(false, Ordering::SeqCst);
ProcessManager::preempt_enable();
}
}
@ -170,14 +174,14 @@ impl<T> Deref for SpinLockGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
return unsafe { &*self.lock.data.get() };
return unsafe { &*self.data };
}
}
/// 实现DerefMut trait支持通过获取SpinLockGuard来获取临界区数据的可变引用
impl<T> DerefMut for SpinLockGuard<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
return unsafe { &mut *self.lock.data.get() };
return unsafe { &mut *self.data };
}
}

View File

@ -76,16 +76,8 @@ pub unsafe extern "C" fn kmalloc(size: usize, _gfp: gfp_t) -> usize {
return do_kmalloc(size, true);
}
fn do_kmalloc(size: usize, zero: bool) -> usize {
let space: Vec<u8> = if zero {
vec![0u8; size]
} else {
let mut v = Vec::with_capacity(size);
unsafe {
v.set_len(size);
}
v
};
fn do_kmalloc(size: usize, _zero: bool) -> usize {
let space: Vec<u8> = vec![0u8; size];
assert!(space.len() == size);
let (ptr, len, cap) = space.into_raw_parts();
@ -100,7 +92,7 @@ fn do_kmalloc(size: usize, zero: bool) -> usize {
drop(Vec::from_raw_parts(vaddr.data() as *mut u8, len, cap));
}
panic!(
"do_kmalloc: vaddr {:?} already exists in C Allocation Map, query size: {size}, zero: {zero}",
"do_kmalloc: vaddr {:?} already exists in C Allocation Map, query size: {size}, zero: {_zero}",
vaddr
);
}

View File

@ -30,8 +30,8 @@ const PAGE_1G_SIZE: usize = 1 << 30;
static mut __MMIO_POOL: Option<MmioBuddyMemPool> = None;
pub fn mmio_pool() -> &'static mut MmioBuddyMemPool {
unsafe { __MMIO_POOL.as_mut().unwrap() }
pub fn mmio_pool() -> &'static MmioBuddyMemPool {
unsafe { __MMIO_POOL.as_ref().unwrap() }
}
pub enum MmioResult {
@ -482,7 +482,7 @@ impl MmioBuddyMemPool {
// 计算前导0
#[cfg(target_arch = "x86_64")]
let mut size_exp: u32 = 63 - size.leading_zeros();
// kdebug!("create_mmio: size_exp: {}", size_exp);
// 记录最终申请的空间大小
let mut new_size = size;
// 对齐要申请的空间大小
@ -542,14 +542,26 @@ impl MmioBuddyMemPool {
for i in 0..page_count {
unsafe {
kernel_mapper
let x: Option<(
PhysAddr,
PageFlags<MMArch>,
crate::mm::page::PageFlush<MMArch>,
)> = kernel_mapper
.as_mut()
.unwrap()
.unmap_phys(vaddr + i * MMArch::PAGE_SIZE, true)
.unmap_phys(vaddr + i * MMArch::PAGE_SIZE, false);
if let Some((_, _, flush)) = x {
flush.flush();
}
};
}
// todo: 归还到buddy
// 归还到buddy
mmio_pool()
.give_back_block(vaddr, length.trailing_zeros() as u32)
.unwrap_or_else(|err| {
panic!("MMIO release failed: self: {self:?}, err msg: {:?}", err);
});
return Ok(0);
}
@ -652,29 +664,27 @@ impl MMIOSpaceGuard {
///
/// 传入的物理地址【一定要是设备的物理地址】。
/// 如果物理地址是从内存分配器中分配的那么会造成内存泄露。因为mmio_release的时候只取消映射不会释放内存。
pub unsafe fn map_phys<Arch: MemoryManagementArch>(
&self,
paddr: PhysAddr,
length: usize,
) -> bool {
pub unsafe fn map_phys(&self, paddr: PhysAddr, length: usize) -> Result<(), SystemError> {
if length > self.size {
return false;
return Err(SystemError::EINVAL);
}
let check = self
.mapped
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst);
if check.is_err() {
return false;
return Err(SystemError::EINVAL);
}
let flags = PageFlags::mmio_flags();
let mut kernel_mapper = KernelMapper::lock();
let r = kernel_mapper.map_phys_with_size(self.vaddr, paddr, length, flags, true);
if r.is_err() {
return false;
}
return true;
return r;
}
/// 泄露一个MMIO space guard不会释放映射的空间
pub unsafe fn leak(self) {
core::mem::forget(self);
}
}

View File

@ -803,7 +803,7 @@ impl<Arch, F: Debug> Debug for PageMapper<Arch, F> {
}
/// 页表刷新器的trait
pub trait Flusher<Arch> {
pub trait Flusher<Arch: MemoryManagementArch> {
/// 取消对指定的page flusher的刷新
fn consume(&mut self, flush: PageFlush<Arch>);
}
@ -811,7 +811,7 @@ pub trait Flusher<Arch> {
/// 用于刷新某个虚拟地址的刷新器。这个刷新器一经产生就必须调用flush()方法,
/// 否则会造成对页表的更改被忽略,这是不安全的
#[must_use = "The flusher must call the 'flush()', or the changes to page table will be unsafely ignored."]
pub struct PageFlush<Arch> {
pub struct PageFlush<Arch: MemoryManagementArch> {
virt: VirtAddr,
phantom: PhantomData<Arch>,
}
@ -834,6 +834,14 @@ impl<Arch: MemoryManagementArch> PageFlush<Arch> {
}
}
impl<Arch: MemoryManagementArch> Drop for PageFlush<Arch> {
fn drop(&mut self) {
unsafe {
MMArch::invalidate_page(self.virt);
}
}
}
/// 用于刷新整个页表的刷新器。这个刷新器一经产生就必须调用flush()方法,
/// 否则会造成对页表的更改被忽略,这是不安全的
#[must_use = "The flusher must call the 'flush()', or the changes to page table will be unsafely ignored."]

View File

@ -1,4 +1,6 @@
use super::{kthread::kthread_init, process_init, ProcessManager};
use crate::smp::core::smp_get_processor_id;
use super::{kthread::kthread_init, process_init, ProcessManager, __PROCESS_MANAGEMENT_INIT_DONE};
#[no_mangle]
pub extern "C" fn rs_process_init() {
@ -21,14 +23,15 @@ pub extern "C" fn rs_get_idle_stack_top(cpu_id: u32) -> usize {
#[no_mangle]
pub extern "C" fn rs_current_pcb_cpuid() -> u32 {
return ProcessManager::current_pcb()
.sched_info()
.on_cpu()
.unwrap_or(u32::MAX);
return smp_get_processor_id();
}
#[no_mangle]
pub extern "C" fn rs_current_pcb_pid() -> u32 {
return ProcessManager::current_pcb().pid().0 as u32;
if unsafe { __PROCESS_MANAGEMENT_INIT_DONE } {
return ProcessManager::current_pcb().pid().0 as u32;
}
return 0;
}
#[no_mangle]