Fix clippy and compiler warings

This commit is contained in:
Jianfeng Jiang
2023-09-04 11:04:42 +08:00
committed by Tate, Hongliang Tian
parent 20a90426a0
commit 9ca64c281e
156 changed files with 539 additions and 603 deletions

View File

@ -2,7 +2,6 @@
use core::arch::x86_64::{_fxrstor, _fxsave};
use core::fmt::Debug;
use core::mem::MaybeUninit;
use trapframe::{GeneralRegs, UserContext as RawUserContext};
@ -87,7 +86,7 @@ impl UserContextApiInternal for UserContext {
}
}
};
call_irq_callback_functions(&self.into_trap_frame());
call_irq_callback_functions(&self.as_trap_frame());
}
crate::arch::irq::enable_local();
@ -103,7 +102,7 @@ impl UserContextApiInternal for UserContext {
}
}
fn into_trap_frame(&self) -> trapframe::TrapFrame {
fn as_trap_frame(&self) -> trapframe::TrapFrame {
trapframe::TrapFrame {
rax: self.user_context.general.rax,
rbx: self.user_context.general.rbx,
@ -350,7 +349,7 @@ impl FpRegs {
let ptr = unsafe { alloc::alloc::alloc(layout) } as usize;
debug!("ptr = 0x{:x}", ptr);
unsafe {
_fxsave((&mut self.buf.data).as_mut_ptr() as *mut u8);
_fxsave(self.buf.data.as_mut_ptr());
}
debug!("save fpregs success");
self.is_valid = true;
@ -365,7 +364,7 @@ impl FpRegs {
/// It is the caller's responsibility to ensure that the source slice contains
/// data that is in xsave/xrstor format. The slice must have a length of 512 bytes.
pub unsafe fn save_from_slice(&mut self, src: &[u8]) {
(&mut self.buf.data).copy_from_slice(src);
self.buf.data.copy_from_slice(src);
self.is_valid = true;
}
@ -388,7 +387,7 @@ impl FpRegs {
pub fn restore(&self) {
debug!("restore fpregs");
assert!(self.is_valid);
unsafe { _fxrstor((&self.buf.data).as_ptr()) };
unsafe { _fxrstor(self.buf.data.as_ptr()) };
debug!("restore fpregs success");
}

View File

@ -33,7 +33,7 @@ impl<T, A> IoPort<T, A> {
/// a privileged operation.
pub const unsafe fn new(port: u16) -> Self {
Self {
port: port,
port,
value_marker: PhantomData,
access_marker: PhantomData,
}

View File

@ -26,6 +26,7 @@ static SERIAL_MODEM_CTRL: IoPort<u8, WriteOnlyAccess> =
static SERIAL_LINE_STS: IoPort<u8, ReadWriteAccess> = unsafe { IoPort::new(SERIAL_DATA_PORT + 5) };
static CONSOLE_IRQ_CALLBACK: Once<SpinLock<IrqLine>> = Once::new();
#[allow(clippy::type_complexity)]
static SERIAL_INPUT_CALLBACKS: SpinLock<Vec<Arc<dyn Fn(u8) + Send + Sync + 'static>>> =
SpinLock::new(Vec::new());

View File

@ -49,7 +49,7 @@ pub enum ContextTableError {
impl RootTable {
pub fn new() -> Self {
Self {
root_frame: VmFrameVec::allocate(&VmAllocOptions::new(1).uninit(false))
root_frame: VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false))
.unwrap()
.pop()
.unwrap(),
@ -128,7 +128,7 @@ impl RootTable {
let bus_entry = context_table
.entries_frame
.read_val::<ContextEntry>(
(device_id.device as usize * 8 + device_id.function as usize) as usize
(device_id.device as usize * 8 + device_id.function as usize)
* size_of::<ContextEntry>(),
)
.unwrap();
@ -141,7 +141,7 @@ impl RootTable {
context_table
.entries_frame
.write_val::<ContextEntry>(
(device_id.device as usize * 8 + device_id.function as usize) as usize
(device_id.device as usize * 8 + device_id.function as usize)
* size_of::<ContextEntry>(),
&entry,
)
@ -237,7 +237,7 @@ pub struct ContextTable {
impl ContextTable {
fn new() -> Self {
Self {
entries_frame: VmFrameVec::allocate(&VmAllocOptions::new(1).uninit(false))
entries_frame: VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false))
.unwrap()
.pop()
.unwrap(),
@ -256,8 +256,7 @@ impl ContextTable {
let bus_entry = self
.entries_frame
.read_val::<ContextEntry>(
(device.device as usize * 8 + device.function as usize) as usize
* size_of::<ContextEntry>(),
(device.device as usize * 8 + device.function as usize) * size_of::<ContextEntry>(),
)
.unwrap();
@ -270,7 +269,7 @@ impl ContextTable {
let entry = ContextEntry(address as u128 | 3 | 0x1_0000_0000_0000_0000);
self.entries_frame
.write_val::<ContextEntry>(
(device.device as usize * 8 + device.function as usize) as usize
(device.device as usize * 8 + device.function as usize)
* size_of::<ContextEntry>(),
&entry,
)
@ -298,7 +297,7 @@ impl ContextTable {
paddr,
PageTableFlags::WRITABLE | PageTableFlags::READABLE | PageTableFlags::LAST_PAGE,
)
.map_err(|err| ContextTableError::ModificationError(err))
.map_err(ContextTableError::ModificationError)
}
fn unmap(&mut self, device: PciDeviceLocation, vaddr: Vaddr) -> Result<(), ContextTableError> {
@ -308,6 +307,6 @@ impl ContextTable {
self.get_or_create_page_table(device)
.unmap(vaddr)
.map_err(|err| ContextTableError::ModificationError(err))
.map_err(ContextTableError::ModificationError)
}
}

View File

@ -116,7 +116,7 @@ impl FaultRecording {
pub fn pasid_value(&self) -> u32 {
// bit 123:104
((self.0 & 0xF_FFFF0_0000_0000_0000_0000_0000_0000) >> 104) as u32
((self.0 & 0x00FF_FFF0_0000_0000_0000_0000_0000_0000) >> 104) as u32
}
pub fn fault_reason(&self) -> u8 {
@ -165,6 +165,7 @@ pub enum FaultRequestType {
#[derive(Debug)]
#[repr(u8)]
#[allow(clippy::enum_variant_names)]
pub enum FaultAddressType {
UntranslatedRequest = 0,
TranslationRequest = 1,
@ -201,6 +202,6 @@ pub(super) unsafe fn init(base_register_vaddr: Vaddr) {
fn iommu_page_fault_handler(frame: &TrapFrame) {
let fault_event = FAULT_EVENT_REGS.get().unwrap();
let index = (fault_event.status().bits & FaultStatus::FRI.bits) >> 8;
let recording = FaultRecording(*(&fault_event.recordings[index as usize].read()));
let recording = FaultRecording(fault_event.recordings[index as usize].read());
info!("Catch iommu page fault, recording:{:x?}", recording)
}

View File

@ -44,9 +44,8 @@ impl RemappingRegisters {
let base_address = {
let mut addr = 0;
for remapping in dmar.remapping_iter() {
match remapping {
Remapping::Drhd(drhd) => addr = drhd.register_base_addr(),
_ => {}
if let Remapping::Drhd(drhd) = remapping {
addr = drhd.register_base_addr()
}
}
if addr == 0 {
@ -110,7 +109,7 @@ bitflags! {
/// 6 => 16-bit domain-ids with support for up to 64K domains.
/// 7 => Reserved.
/// ```
const ND = 0x7 << 0;
const ND = 0x7;
/// Required Write-Buffer Flushing.
const RWBF = 1 << 4;
/// Protected Low-Memory Region

View File

@ -126,7 +126,7 @@ impl PageTableEntryTrait for PageTableEntry {
// bit 47~12
type F = PageTableFlags;
fn new(paddr: crate::vm::Paddr, flags: PageTableFlags) -> Self {
Self(((paddr & Self::PHYS_MASK) as u64 | flags.bits) as u64)
Self((paddr & Self::PHYS_MASK) as u64 | flags.bits)
}
fn paddr(&self) -> crate::vm::Paddr {
@ -150,7 +150,7 @@ impl PageTableEntryTrait for PageTableEntry {
}
fn page_index(va: crate::vm::Vaddr, level: usize) -> usize {
debug_assert!(level >= 1 && level <= 5);
debug_assert!((1..=5).contains(&level));
va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
}
}

View File

@ -72,6 +72,7 @@ impl IrqLine {
///
/// This function is marked unsafe as manipulating interrupt lines is
/// considered a dangerous operation.
#[allow(clippy::redundant_allocation)]
pub unsafe fn acquire(irq_num: u8) -> Arc<&'static Self> {
Arc::new(IRQ_LIST.get().unwrap().get(irq_num as usize).unwrap())
}
@ -126,7 +127,7 @@ impl Drop for IrqCallbackHandle {
.unwrap()
.callback_list
.lock();
a.retain(|item| if (*item).id == self.id { false } else { true });
a.retain(|item| item.id != self.id);
ID_ALLOCATOR.lock().dealloc(self.id);
}
}

View File

@ -34,6 +34,7 @@ pub enum Remapping {
#[derive(Debug, Clone, Copy)]
#[repr(u16)]
#[allow(clippy::upper_case_acronyms)]
pub enum RemappingType {
DRHD = 0,
RMRR = 1,
@ -65,14 +66,9 @@ impl Dmar {
let acpi_table_lock = super::ACPI_TABLES.get().unwrap().lock();
// Safety: The DmarHeader is the header for the DMAR structure, it fits all the field described in Intel manual.
let dmar_mapping = unsafe {
if let Some(temp) = acpi_table_lock
acpi_table_lock
.get_sdt::<DmarHeader>(Signature::DMAR)
.unwrap()
{
temp
} else {
return None;
}
.unwrap()?
};
let physical_address = dmar_mapping.physical_start();
@ -93,9 +89,9 @@ impl Dmar {
unsafe {
while remain_length > 0 {
// Common header: type: u16, length: u16
let length = *dmar_slice[index as usize + 2..index as usize + 4].as_ptr() as usize;
let typ = *dmar_slice[index as usize..index as usize + 2].as_ptr() as usize;
let bytes = &&dmar_slice[index as usize..index as usize + length];
let length = *dmar_slice[index + 2..index + 4].as_ptr() as usize;
let typ = *dmar_slice[index..index + 2].as_ptr() as usize;
let bytes = &&dmar_slice[index..index + length];
let remapping = match typ {
0 => Remapping::Drhd(Drhd::from_bytes(bytes)),
1 => Remapping::Rmrr(Rmrr::from_bytes(bytes)),
@ -119,7 +115,7 @@ impl Dmar {
Some(Dmar {
header: *dmar_mapping,
remapping_structures: remapping_structures,
remapping_structures,
})
}

View File

@ -94,13 +94,13 @@ impl AcpiHandler for AcpiMemoryHandler {
pub fn init() {
let acpi_tables = match boot::acpi_arg().to_owned() {
BootloaderAcpiArg::Rsdp(addr) => unsafe {
AcpiTables::from_rsdp(AcpiMemoryHandler {}, addr as usize).unwrap()
AcpiTables::from_rsdp(AcpiMemoryHandler {}, addr).unwrap()
},
BootloaderAcpiArg::Rsdt(addr) => unsafe {
AcpiTables::from_rsdt(AcpiMemoryHandler {}, 0, addr as usize).unwrap()
AcpiTables::from_rsdt(AcpiMemoryHandler {}, 0, addr).unwrap()
},
BootloaderAcpiArg::Xsdt(addr) => unsafe {
AcpiTables::from_rsdt(AcpiMemoryHandler {}, 1, addr as usize).unwrap()
AcpiTables::from_rsdt(AcpiMemoryHandler {}, 1, addr).unwrap()
},
};

View File

@ -250,8 +250,7 @@ impl Rhsa {
pub unsafe fn from_bytes(bytes: &[u8]) -> Self {
let length = u16_from_slice(&bytes[2..4]) as u32;
debug_assert_eq!(length, bytes.len() as u32);
let result = *(bytes.as_ptr() as *const Self);
result
*(bytes.as_ptr() as *const Self)
}
}

View File

@ -52,8 +52,7 @@ pub fn init() {
acpi::InterruptModel::Unknown => panic!("not found APIC in ACPI Table"),
acpi::InterruptModel::Apic(apic) => {
apic.io_apics
.iter()
.next()
.first()
.expect("There must be at least one IO APIC")
.address
}

View File

@ -27,7 +27,7 @@ impl X2Apic {
unsafe {
// Enable x2APIC mode globally
let mut base = rdmsr(IA32_APIC_BASE);
base = base | 0b1100_0000_0000; // Enable x2APIC and xAPIC
base |= 0b1100_0000_0000; // Enable x2APIC and xAPIC
wrmsr(IA32_APIC_BASE, base);
// Set SVR, Enable APIC and set Spurious Vector to 15 (Reserved irq number)

View File

@ -49,6 +49,10 @@ pub fn tlb_flush(vaddr: Vaddr) {
#[repr(C)]
pub struct PageTableEntry(usize);
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
pub unsafe fn activate_page_table(root_paddr: Paddr, flags: x86_64::registers::control::Cr3Flags) {
x86_64::registers::control::Cr3::write(
PhysFrame::from_start_address(x86_64::PhysAddr::new(root_paddr as u64)).unwrap(),
@ -67,9 +71,9 @@ pub fn init() {
// Cancel mapping in lowest addresses.
p4[0].clear();
let mut map_pte = ALL_MAPPED_PTE.lock();
for i in 0..512 {
if p4[i].flags().contains(PageTableFlags::PRESENT) {
map_pte.insert(i, p4[i]);
for (i, p4_i) in p4.iter().enumerate().take(512) {
if p4_i.flags().contains(PageTableFlags::PRESENT) {
map_pte.insert(i, *p4_i);
}
}
}
@ -166,7 +170,7 @@ impl PageTableEntryTrait for PageTableEntry {
Self((paddr & Self::PHYS_ADDR_MASK) | flags.bits)
}
fn paddr(&self) -> Paddr {
self.0 as usize & Self::PHYS_ADDR_MASK
self.0 & Self::PHYS_ADDR_MASK
}
fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.0)
@ -184,7 +188,7 @@ impl PageTableEntryTrait for PageTableEntry {
}
fn page_index(va: crate::vm::Vaddr, level: usize) -> usize {
debug_assert!(level >= 1 && level <= 5);
debug_assert!((1..=5).contains(&level));
va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
}
}

View File

@ -1,7 +1,6 @@
use core::arch::x86_64::_rdtsc;
use core::sync::atomic::{AtomicBool, Ordering};
use alloc::boxed::Box;
use alloc::sync::Arc;
use log::info;
use spin::Once;
@ -83,7 +82,6 @@ fn periodic_mode_init() {
}
x86_64::instructions::interrupts::disable();
drop(a);
drop(handle);
fn init_function(trap_frame: &TrapFrame) {
static mut IN_TIME: u8 = 0;

View File

@ -32,7 +32,7 @@ pub struct KCmdlineArg {
impl KCmdlineArg {
/// Get the path of the initprocess.
pub fn get_initproc_path(&self) -> Option<&str> {
self.initproc.path.as_ref().map(|s| s.as_str())
self.initproc.path.as_deref()
}
/// Get the argument vector(argv) of the initprocess.
pub fn get_initproc_argv(&self) -> &Vec<CString> {
@ -85,7 +85,7 @@ impl From<&str> for KCmdlineArg {
// KernelArg => Arg "\s+" KernelArg | %empty
// InitArg => Arg "\s+" InitArg | %empty
if kcmdline_end {
if result.initproc.path == None {
if result.initproc.path.is_none() {
panic!("Initproc arguments provided but no initproc path specified!");
}
result.initproc.argv.push(CString::new(arg).unwrap());
@ -96,7 +96,7 @@ impl From<&str> for KCmdlineArg {
continue;
}
// Arg => Entry | Entry "=" Value
let arg_pattern: Vec<_> = arg.split("=").collect();
let arg_pattern: Vec<_> = arg.split('=').collect();
let (entry, value) = match arg_pattern.len() {
1 => (arg_pattern[0], None),
2 => (arg_pattern[0], Some(arg_pattern[1])),
@ -105,7 +105,7 @@ impl From<&str> for KCmdlineArg {
}
};
// Entry => Module "." ModuleOptionName | KernelOptionName
let entry_pattern: Vec<_> = entry.split(".").collect();
let entry_pattern: Vec<_> = entry.split('.').collect();
let (node, option) = match entry_pattern.len() {
1 => (None, entry_pattern[0]),
2 => (Some(entry_pattern[0]), entry_pattern[1]),
@ -145,14 +145,11 @@ impl From<&str> for KCmdlineArg {
}
} else {
// There is no value, the entry is only a option.
match option {
_ => {
// If the option is not recognized, it is passed to the initproc.
// Pattern 'option' without value is treated as the init argument.
let argv_entry = CString::new(option.to_string()).unwrap();
result.initproc.argv.push(argv_entry);
}
}
// If the option is not recognized, it is passed to the initproc.
// Pattern 'option' without value is treated as the init argument.
let argv_entry = CString::new(option.to_string()).unwrap();
result.initproc.argv.push(argv_entry);
}
}

View File

@ -36,11 +36,7 @@ pub struct MemoryRegion {
impl MemoryRegion {
/// Construct a page aligned memory region.
pub fn new(base: usize, len: usize, typ: MemoryRegionType) -> Self {
MemoryRegion {
base: base,
len: len,
typ: typ,
}
MemoryRegion { base, len, typ }
}
/// The physical address of the base of the region.
@ -53,6 +49,10 @@ impl MemoryRegion {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// The type of the region.
pub fn typ(&self) -> MemoryRegionType {
self.typ
@ -86,20 +86,18 @@ impl MemoryRegion {
} else {
vec![*self]
}
} else {
if self.base < t.base + t.len {
if self.base + self.len > t.base + t.len {
vec![MemoryRegion {
base: t.base + t.len,
len: self.base + self.len - (t.base + t.len),
typ: self.typ,
}]
} else {
vec![]
}
} else if self.base < t.base + t.len {
if self.base + self.len > t.base + t.len {
vec![MemoryRegion {
base: t.base + t.len,
len: self.base + self.len - (t.base + t.len),
typ: self.typ,
}]
} else {
vec![*self]
vec![]
}
} else {
vec![*self]
}
}
}

View File

@ -25,6 +25,7 @@ pub trait PciDriver: Sync + Send + Debug {
///
/// Once a device is matched and claimed by a driver,
/// it won't be fed to another driver for probing.
#[allow(clippy::result_large_err)]
fn probe(
&self,
device: PciCommonDevice,
@ -69,7 +70,7 @@ impl PciBus {
pub(super) fn register_common_device(&mut self, mut common_device: PciCommonDevice) {
debug!("Find pci common devices:{:x?}", common_device);
let device_id = common_device.device_id().clone();
let device_id = *common_device.device_id();
for driver in self.drivers.iter() {
common_device = match driver.probe(common_device) {
Ok(device) => {

View File

@ -31,9 +31,9 @@ impl Clone for CapabilityMsixData {
fn clone(&self) -> Self {
let new_vec = self.irqs.clone().to_vec();
Self {
loc: self.loc.clone(),
ptr: self.ptr.clone(),
table_size: self.table_size.clone(),
loc: self.loc,
ptr: self.ptr,
table_size: self.table_size,
table_bar: self.table_bar.clone(),
pending_table_bar: self.pending_table_bar.clone(),
irqs: new_vec,
@ -99,7 +99,7 @@ impl CapabilityMsixData {
.unwrap();
table_bar
.io_mem()
.write_val((16 * i + 12) as usize + table_offset, &(1 as u32))
.write_val((16 * i + 12) as usize + table_offset, &1_u32)
.unwrap();
}
@ -115,13 +115,13 @@ impl CapabilityMsixData {
}
Self {
loc: dev.location().clone(),
loc: *dev.location(),
ptr: cap_ptr,
table_size: (dev.location().read16(cap_ptr + 2) & 0b11_1111_1111) + 1,
table_bar,
pending_table_bar: pba_bar,
irqs,
table_offset: table_offset,
table_offset,
pending_table_offset: pba_offset,
}
}
@ -146,7 +146,7 @@ impl CapabilityMsixData {
// Enable this msix vector
self.table_bar
.io_mem()
.write_val((16 * index + 12) as usize + self.table_offset, &(0 as u32))
.write_val((16 * index + 12) as usize + self.table_offset, &0_u32)
.unwrap();
}

View File

@ -13,7 +13,7 @@ pub struct CapabilityVndrData {
impl CapabilityVndrData {
pub(super) fn new(dev: &PciCommonDevice, cap_ptr: u16, length: u16) -> Self {
Self {
location: dev.location().clone(),
location: *dev.location(),
cap_ptr,
length,
}
@ -23,6 +23,10 @@ impl CapabilityVndrData {
self.length
}
pub fn is_empty(&self) -> bool {
self.length == 0
}
pub fn read8(&self, offset: u16) -> Result<u8> {
self.check_range(offset)?;
Ok(self.location.read8(self.cap_ptr + offset))
@ -30,7 +34,8 @@ impl CapabilityVndrData {
pub fn write8(&self, offset: u16, value: u8) -> Result<()> {
self.check_range(offset)?;
Ok(self.location.write8(self.cap_ptr + offset, value))
self.location.write8(self.cap_ptr + offset, value);
Ok(())
}
pub fn read16(&self, offset: u16) -> Result<u16> {
@ -40,7 +45,8 @@ impl CapabilityVndrData {
pub fn write16(&self, offset: u16, value: u16) -> Result<()> {
self.check_range(offset)?;
Ok(self.location.write16(self.cap_ptr + offset, value))
self.location.write16(self.cap_ptr + offset, value);
Ok(())
}
pub fn read32(&self, offset: u16) -> Result<u32> {
@ -50,7 +56,8 @@ impl CapabilityVndrData {
pub fn write32(&self, offset: u16, value: u32) -> Result<()> {
self.check_range(offset)?;
Ok(self.location.write32(self.cap_ptr + offset, value))
self.location.write32(self.cap_ptr + offset, value);
Ok(())
}
#[inline]

View File

@ -153,7 +153,7 @@ impl MemoryBar {
};
// length
let size = !(len_encoded & !0xF).wrapping_add(1);
let prefetchable = if raw & 0b1000 == 0 { false } else { true };
let prefetchable = raw & 0b1000 != 0;
// The BAR is located in I/O memory region
Ok(MemoryBar {
base,

View File

@ -110,22 +110,18 @@ impl BarManager {
let mut idx = 0;
let mut bars = [None, None, None, None, None, None];
while idx < max {
match Bar::new(location, idx) {
Ok(bar) => {
let mut idx_step = 0;
match &bar {
Bar::Memory(memory_bar) => {
if memory_bar.address_length() == AddrLen::Bits64 {
idx_step = 1;
}
if let Ok(bar) = Bar::new(location, idx) {
let mut idx_step = 0;
match &bar {
Bar::Memory(memory_bar) => {
if memory_bar.address_length() == AddrLen::Bits64 {
idx_step = 1;
}
Bar::Io(_) => {}
}
bars[idx as usize] = Some((bar, true));
idx += idx_step;
Bar::Io(_) => {}
}
// ignore for now
Err(_) => {}
bars[idx as usize] = Some((bar, true));
idx += idx_step;
}
idx += 1;
}

View File

@ -38,7 +38,7 @@ macro_rules! cpu_local {
// multiple declarations
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => {
$(#[$attr])* $vis static $name: CpuLocal<$t> = unsafe { CpuLocal::new($init) };
crate::cpu_local!($($rest)*);
$crate::cpu_local!($($rest)*);
};
// single declaration
@ -66,6 +66,7 @@ unsafe impl<T> Sync for CpuLocal<T> {}
impl<T> CpuLocal<T> {
/// Initialize CPU-local object
/// Developer cannot construct a valid CpuLocal object arbitrarily
#[allow(clippy::missing_safety_doc)]
pub const unsafe fn new(val: T) -> Self {
Self(UnsafeCell::new(val))
}

View File

@ -70,11 +70,10 @@ fn invoke_c_init_funcs() {
fn __sinit_array();
fn __einit_array();
}
let call_len = (__einit_array as u64 - __sinit_array as u64) / 8;
let call_len = (__einit_array as usize - __sinit_array as usize) / 8;
for i in 0..call_len {
unsafe {
let address = (__sinit_array as u64 + 8 * i) as *const u64;
let function = address as *const fn();
let function = (__sinit_array as usize + 8 * i) as *const fn();
(*function)();
}
}
@ -96,7 +95,7 @@ pub(crate) const fn zero<T>() -> T {
}
pub trait Testable {
fn run(&self) -> ();
fn run(&self);
}
impl<T> Testable for T

View File

@ -83,17 +83,17 @@ impl AtomicBits {
}
/// Get an iterator for the bits.
pub fn iter<'a>(&'a self) -> Iter<'a> {
pub fn iter(&self) -> Iter<'_> {
Iter::new(self)
}
/// Get an iterator that gives the positions of all 1s in the bits.
pub fn iter_ones<'a>(&'a self) -> OnesIter<'a> {
pub fn iter_ones(&self) -> OnesIter<'_> {
OnesIter::new(self)
}
/// Get an iterator that gives the positions of all 0s in the bits.
pub fn iter_zeroes<'a>(&'a self) -> ZeroesIter<'a> {
pub fn iter_zeroes(&self) -> ZeroesIter<'_> {
ZeroesIter::new(self)
}
}

View File

@ -31,7 +31,7 @@ impl<T> Mutex<T> {
/// Try Acquire the mutex immedidately.
pub fn try_lock(&self) -> Option<MutexGuard<T>> {
self.acquire_lock().then(|| MutexGuard { mutex: &self })
self.acquire_lock().then_some(MutexGuard { mutex: self })
}
/// Release the mutex and wake up one thread which is blocked on this mutex.

View File

@ -70,7 +70,7 @@ impl<T> RwLock<T> {
let lock = self.lock.fetch_add(READER, Acquire);
if lock & (WRITER | MAX_READER) == 0 {
Some(RwLockReadGuard {
inner: &self,
inner: self,
inner_guard: InnerGuard::IrqGuard(irq_guard),
})
} else {
@ -88,7 +88,7 @@ impl<T> RwLock<T> {
.is_ok()
{
Some(RwLockWriteGuard {
inner: &self,
inner: self,
inner_guard: InnerGuard::IrqGuard(irq_guard),
})
} else {
@ -130,7 +130,7 @@ impl<T> RwLock<T> {
let lock = self.lock.fetch_add(READER, Acquire);
if lock & (WRITER | MAX_READER) == 0 {
Some(RwLockReadGuard {
inner: &self,
inner: self,
inner_guard: InnerGuard::PreemptGuard(guard),
})
} else {
@ -148,7 +148,7 @@ impl<T> RwLock<T> {
.is_ok()
{
Some(RwLockWriteGuard {
inner: &self,
inner: self,
inner_guard: InnerGuard::PreemptGuard(guard),
})
} else {

View File

@ -25,7 +25,7 @@ const MAX_READER: usize = WRITER >> 1;
impl<T> RwMutex<T> {
/// Creates a new `RwMutex`.
pub fn new(val: T) -> Self {
pub const fn new(val: T) -> Self {
Self {
val: UnsafeCell::new(val),
lock: AtomicUsize::new(0),
@ -47,7 +47,7 @@ impl<T> RwMutex<T> {
pub fn try_read(&self) -> Option<RwMutexReadGuard<T>> {
let lock = self.lock.fetch_add(READER, Acquire);
if lock & (WRITER | MAX_READER) == 0 {
Some(RwMutexReadGuard { inner: &self })
Some(RwMutexReadGuard { inner: self })
} else {
self.lock.fetch_sub(READER, Release);
None
@ -61,7 +61,7 @@ impl<T> RwMutex<T> {
.compare_exchange(0, WRITER, Acquire, Relaxed)
.is_ok()
{
Some(RwMutexWriteGuard { inner: &self })
Some(RwMutexWriteGuard { inner: self })
} else {
None
}

View File

@ -31,7 +31,7 @@ impl<T> SpinLock<T> {
let guard = disable_local();
self.acquire_lock();
SpinLockGuard {
lock: &self,
lock: self,
inner_guard: InnerGuard::IrqGuard(guard),
}
}
@ -41,12 +41,12 @@ impl<T> SpinLock<T> {
let irq_guard = disable_local();
if self.try_acquire_lock() {
let lock_guard = SpinLockGuard {
lock: &self,
lock: self,
inner_guard: InnerGuard::IrqGuard(irq_guard),
};
return Some(lock_guard);
}
return None;
None
}
/// Acquire the spin lock without disabling local IRQs.
@ -61,7 +61,7 @@ impl<T> SpinLock<T> {
let guard = disable_preempt();
self.acquire_lock();
SpinLockGuard {
lock: &self,
lock: self,
inner_guard: InnerGuard::PreemptGuard(guard),
}
}
@ -71,12 +71,12 @@ impl<T> SpinLock<T> {
let guard = disable_preempt();
if self.try_acquire_lock() {
let lock_guard = SpinLockGuard {
lock: &self,
lock: self,
inner_guard: InnerGuard::PreemptGuard(guard),
};
return Some(lock_guard);
}
return None;
None
}
/// Access the spin lock, otherwise busy waiting

View File

@ -17,7 +17,6 @@ pub struct WaitQueue {
}
impl WaitQueue {
/// Creates a new instance.
pub const fn new() -> Self {
WaitQueue {
waiters: SpinLock::new(VecDeque::new()),

View File

@ -13,7 +13,6 @@ use super::{
};
use alloc::sync::Arc;
use lazy_static::lazy_static;
use log::warn;
pub struct Processor {
current: Option<Arc<Task>>,
@ -83,17 +82,18 @@ pub fn switch_to_task(next_task: Arc<Task>) {
let current_task_option = current_task();
let next_task_cx_ptr = &next_task.inner_ctx() as *const TaskContext;
let current_task: Arc<Task>;
let current_task_cx_ptr = if current_task_option.is_none() {
PROCESSOR.lock().get_idle_task_cx_ptr()
} else {
current_task = current_task_option.unwrap();
if current_task.status() == TaskStatus::Runnable {
GLOBAL_SCHEDULER
.lock_irq_disabled()
.enqueue(current_task.clone());
let current_task_cx_ptr = match current_task_option {
None => PROCESSOR.lock().get_idle_task_cx_ptr(),
Some(current_task) => {
if current_task.status() == TaskStatus::Runnable {
GLOBAL_SCHEDULER
.lock_irq_disabled()
.enqueue(current_task.clone());
}
&mut current_task.inner_exclusive_access().ctx as *mut TaskContext
}
&mut current_task.inner_exclusive_access().ctx as *mut TaskContext
};
// change the current task to the next task
PROCESSOR.lock().current = Some(next_task.clone());

View File

@ -44,7 +44,7 @@ impl KernelStack {
pub fn new() -> Result<Self> {
Ok(Self {
frame: VmFrameVec::allocate(
&VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE).is_contiguous(true),
VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE).is_contiguous(true),
)?,
})
}

View File

@ -12,11 +12,7 @@ extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
}
pub(crate) fn call_irq_callback_functions(trap_frame: &TrapFrame) {
let irq_line = IRQ_LIST
.get()
.unwrap()
.get(trap_frame.trap_num as usize)
.unwrap();
let irq_line = IRQ_LIST.get().unwrap().get(trap_frame.trap_num).unwrap();
let callback_functions = irq_line.callback_list();
for callback_function in callback_functions.iter() {
callback_function.call(trap_frame);

View File

@ -13,6 +13,7 @@ use trapframe::TrapFrame;
#[must_use]
pub struct IrqLine {
irq_num: u8,
#[allow(clippy::redundant_allocation)]
irq: Arc<&'static irq::IrqLine>,
callbacks: Vec<IrqCallbackHandle>,
}
@ -68,7 +69,7 @@ impl IrqLine {
impl Clone for IrqLine {
fn clone(&self) -> Self {
Self {
irq_num: self.irq_num.clone(),
irq_num: self.irq_num,
irq: self.irq.clone(),
callbacks: Vec::new(),
}

View File

@ -54,7 +54,7 @@ pub(crate) trait UserContextApiInternal {
fn execute(&mut self) -> UserEvent;
/// Use the information inside CpuContext to build a trapframe
fn into_trap_frame(&self) -> TrapFrame;
fn as_trap_frame(&self) -> TrapFrame;
}
/// The common interface that every CPU architecture-specific `CpuContext` implements.

View File

@ -23,7 +23,7 @@ impl RecycleAllocator {
current: start,
recycled: Vec::new(),
skip: Vec::new(),
max: max,
max,
}
}
@ -82,13 +82,11 @@ impl RecycleAllocator {
self.skip.push(target);
true
}
} else if self.recycled.contains(&target) {
self.recycled.retain(|value| *value != target);
true
} else {
if self.recycled.contains(&target) {
self.recycled.retain(|value| *value != target);
true
} else {
false
}
false
}
}
}

View File

@ -123,11 +123,6 @@ impl VmFrameVec {
self.0.iter()
}
/// Return IntoIterator for internal frames
pub fn into_iter(self) -> alloc::vec::IntoIter<VmFrame> {
self.0.into_iter()
}
/// Returns the number of frames.
pub fn len(&self) -> usize {
self.0.len()
@ -150,6 +145,16 @@ impl VmFrameVec {
}
}
impl IntoIterator for VmFrameVec {
type Item = VmFrame;
type IntoIter = alloc::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl VmIo for VmFrameVec {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
let mut start = offset;
@ -362,6 +367,7 @@ impl VmFrame {
// FIXME: need a sound reason for creating a mutable reference
// for getting the content of the frame.
#[allow(clippy::mut_from_ref)]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn as_slice(&self) -> &mut [u8] {
core::slice::from_raw_parts_mut(
super::paddr_to_vaddr(self.start_paddr()) as *mut u8,

View File

@ -57,7 +57,7 @@ pub(crate) unsafe fn dealloc(index: usize) {
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(index, 1);
}
pub(crate) fn init(regions: &Vec<MemoryRegion>) {
pub(crate) fn init(regions: &[MemoryRegion]) {
let mut allocator = FrameAllocator::<32>::new();
for region in regions.iter() {
if region.typ() == MemoryRegionType::Usable {

View File

@ -67,14 +67,16 @@ unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeapWithRescue<ORDER> {
}
// Avoid locking self.heap when calling rescue.
if (self.rescue)(&self, &layout).is_err() {
return 0 as *mut u8;
if (self.rescue)(self, &layout).is_err() {
return core::ptr::null_mut::<u8>();
}
self.heap
.lock()
.alloc(layout)
.map_or(0 as *mut u8, |allocation| allocation.as_ptr())
.map_or(core::ptr::null_mut::<u8>(), |allocation| {
allocation.as_ptr()
})
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {

View File

@ -28,12 +28,8 @@ pub struct MemorySet {
areas: BTreeMap<Vaddr, MapArea>,
}
impl MapArea {
pub fn mapped_size(&self) -> usize {
self.size
}
pub fn clone(&self) -> Self {
impl Clone for MapArea {
fn clone(&self) -> Self {
let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper {
let new = frame_allocator::alloc(VmFrameFlags::empty()).unwrap();
@ -49,6 +45,12 @@ impl MapArea {
mapper,
}
}
}
impl MapArea {
pub fn mapped_size(&self) -> usize {
self.size
}
/// This function will map the vitural address to the given physical frames
pub fn new(
@ -69,7 +71,7 @@ impl MapArea {
size,
mapper: BTreeMap::new(),
};
let mut current_va = start_va.clone();
let mut current_va = start_va;
let page_size = size / PAGE_SIZE;
let mut phy_frame_iter = physical_frames.iter();
@ -149,6 +151,12 @@ impl MapArea {
}
}
impl Default for MemorySet {
fn default() -> Self {
Self::new()
}
}
impl MemorySet {
pub fn map(&mut self, area: MapArea) {
if area.size > 0 {

View File

@ -15,7 +15,7 @@ mod offset;
pub(crate) mod page_table;
mod space;
use crate::config::{PAGE_SIZE, PHYS_OFFSET};
use crate::config::{KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET};
pub use self::frame::{VmAllocOptions, VmFrame, VmFrameVec, VmFrameVecIter};
pub use self::io::VmIo;
@ -38,7 +38,7 @@ pub trait HasPaddr {
}
pub fn vaddr_to_paddr(va: Vaddr) -> Option<Paddr> {
if va >= crate::config::PHYS_OFFSET && va <= crate::config::KERNEL_OFFSET {
if (PHYS_OFFSET..=KERNEL_OFFSET).contains(&va) {
// can use offset to get the physical address
Some(va - PHYS_OFFSET)
} else {
@ -67,7 +67,7 @@ pub(crate) fn init() {
let mut framebuffer_regions = Vec::new();
for i in memory_regions.iter() {
if i.typ() == MemoryRegionType::Framebuffer {
framebuffer_regions.push(i.clone());
framebuffer_regions.push(*i);
}
}
FRAMEBUFFER_REGIONS.call_once(|| framebuffer_regions);

View File

@ -89,6 +89,7 @@ pub struct PageTableConfig {
#[derive(Debug, Clone, Copy)]
#[repr(usize)]
#[allow(clippy::enum_variant_names)]
pub enum AddressWidth {
Level3PageTable = 3,
Level4PageTable = 4,
@ -190,7 +191,7 @@ impl<T: PageTableEntryTrait> PageTable<T> {
return None;
}
// Create next table
let frame = VmFrameVec::allocate(&VmAllocOptions::new(1).uninit(false))
let frame = VmFrameVec::allocate(VmAllocOptions::new(1).uninit(false))
.unwrap()
.pop()
.unwrap();

View File

@ -36,6 +36,7 @@ impl VmSpace {
}
}
/// Activate the page table, load root physical address to cr3
#[allow(clippy::missing_safety_doc)]
pub unsafe fn activate(&self) {
#[cfg(target_arch = "x86_64")]
crate::arch::x86::mm::activate_page_table(
@ -188,7 +189,7 @@ impl VmMapOptions {
///
/// The default value of this option is `None`.
pub fn addr(&mut self, addr: Option<Vaddr>) -> &mut Self {
if addr == None {
if addr.is_none() {
return self;
}
self.addr = Some(addr.unwrap());