修复内核的clippy检查报错 (#637)

修复内核的clippy检查报错
---------

Co-authored-by: Samuel Dai <947309196@qq.com>
Co-authored-by: Donkey Kane <109840258+xiaolin2004@users.noreply.github.com>
Co-authored-by: themildwind <107623059+themildwind@users.noreply.github.com>
Co-authored-by: GnoCiYeH <heyicong@dragonos.org>
Co-authored-by: MemoryShore <105195940+MemoryShore@users.noreply.github.com>
Co-authored-by: 曾俊 <110876916+ZZJJWarth@users.noreply.github.com>
Co-authored-by: sun5etop <146408999+sun5etop@users.noreply.github.com>
Co-authored-by: hmt <114841534+1037827920@users.noreply.github.com>
Co-authored-by: laokengwt <143977175+laokengwt@users.noreply.github.com>
Co-authored-by: TTaq <103996388+TTaq@users.noreply.github.com>
Co-authored-by: Jomo <2512364506@qq.com>
Co-authored-by: Samuel Dai <samuka007@qq.com>
Co-authored-by: sspphh <112558065+sspphh@users.noreply.github.com>
This commit is contained in:
LoGin
2024-03-22 23:26:39 +08:00
committed by GitHub
parent 4695947e1b
commit b5b571e026
175 changed files with 1820 additions and 2155 deletions

View File

@ -64,7 +64,7 @@ impl<A> PageList<A> {
#[derive(Debug)]
pub struct BuddyAllocator<A> {
// 存放每个阶的空闲“链表”的头部地址
free_area: [PhysAddr; (MAX_ORDER - MIN_ORDER) as usize],
free_area: [PhysAddr; MAX_ORDER - MIN_ORDER],
/// 总页数
total: PageFrameCount,
phantom: PhantomData<A>,
@ -81,8 +81,8 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
kdebug!("Free pages before init buddy: {:?}", initial_free_pages);
kdebug!("Buddy entries: {}", Self::BUDDY_ENTRIES);
let mut free_area: [PhysAddr; (MAX_ORDER - MIN_ORDER) as usize] =
[PhysAddr::new(0); (MAX_ORDER - MIN_ORDER) as usize];
let mut free_area: [PhysAddr; MAX_ORDER - MIN_ORDER] =
[PhysAddr::new(0); MAX_ORDER - MIN_ORDER];
// Buddy初始占用的空间从bump分配
for f in free_area.iter_mut() {
@ -157,11 +157,10 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
}
}
}
// 然后从高往低,把剩余的页面加入链表
let mut remain_bytes = remain_pages.data() * A::PAGE_SIZE;
assert!(remain_bytes < (1 << MAX_ORDER - 1));
assert!(remain_bytes < (1 << MAX_ORDER) - 1);
for i in (MIN_ORDER..MAX_ORDER).rev() {
if remain_bytes >= (1 << i) {
@ -214,7 +213,7 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
/// free_area的下标
#[inline]
fn order2index(order: u8) -> usize {
(order as usize - MIN_ORDER) as usize
order as usize - MIN_ORDER
}
/// 从空闲链表的开头取出1个指定阶数的伙伴块如果没有则返回None
@ -239,7 +238,6 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
if !next_page_list_addr.is_null() {
// 此时page_list已经没有空闲伙伴块了又因为非唯一页需要删除该page_list
self.free_area[Self::order2index(spec_order)] = next_page_list_addr;
drop(page_list);
// kdebug!("FREE: page_list_addr={:b}", page_list_addr.data());
unsafe {
self.buddy_free(page_list_addr, MMArch::PAGE_SHIFT as u8);
@ -284,7 +282,7 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
if !page_list.next_page.is_null() {
// 此时page_list已经没有空闲伙伴块了又因为非唯一页需要删除该page_list
self.free_area[Self::order2index(spec_order)] = page_list.next_page;
drop(page_list);
let _ = page_list;
unsafe { self.buddy_free(page_list_addr, MMArch::PAGE_SHIFT as u8) };
} else {
Self::write_page(page_list_addr, page_list);
@ -303,7 +301,7 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
}
return None;
};
let result: Option<PhysAddr> = alloc_in_specific_order(order as u8);
let result: Option<PhysAddr> = alloc_in_specific_order(order);
// kdebug!("result={:?}", result);
if result.is_some() {
return result;
@ -352,7 +350,7 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
fn buddy_alloc(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
assert!(count.data().is_power_of_two());
// 计算需要分配的阶数
let mut order = log2(count.data() as usize);
let mut order = log2(count.data());
if count.data() & ((1 << order) - 1) != 0 {
order += 1;
}
@ -426,79 +424,9 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
}
// 如果没有找到伙伴块
if buddy_entry_virt_vaddr.is_none() {
assert!(
page_list.entry_num <= Self::BUDDY_ENTRIES,
"buddy_free: page_list.entry_num > Self::BUDDY_ENTRIES"
);
// 当前第一个page_list没有空间了
if first_page_list.entry_num == Self::BUDDY_ENTRIES {
// 如果当前order是最小的那么就把这个块当作新的page_list使用
let new_page_list_addr = if order == MIN_ORDER {
base
} else {
// 否则分配新的page_list
// 请注意分配之后有可能当前的entry_num会减1伙伴块分裂造成出现整个链表为null的entry数量为Self::BUDDY_ENTRIES+1的情况
// 但是不影响我们在后面插入链表项的时候会处理这种情况检查链表中的第2个页是否有空位
self.buddy_alloc(PageFrameCount::new(1))
.expect("buddy_alloc failed: no enough memory")
.0
};
// 清空这个页面
core::ptr::write_bytes(
A::phys_2_virt(new_page_list_addr)
.expect(
"Buddy free: failed to get virt address of [new_page_list_addr]",
)
.as_ptr::<u8>(),
0,
1 << order,
);
assert!(
first_page_list_paddr == self.free_area[Self::order2index(order as u8)]
);
// 初始化新的page_list
let new_page_list = PageList::new(0, first_page_list_paddr);
Self::write_page(new_page_list_addr, new_page_list);
self.free_area[Self::order2index(order as u8)] = new_page_list_addr;
}
// 由于上面可能更新了第一个链表页,因此需要重新获取这个值
let first_page_list_paddr = self.free_area[Self::order2index(order as u8)];
let first_page_list: PageList<A> = Self::read_page(first_page_list_paddr);
// 检查第二个page_list是否有空位
let second_page_list = if first_page_list.next_page.is_null() {
None
} else {
Some(Self::read_page::<PageList<A>>(first_page_list.next_page))
};
let (paddr, mut page_list) = if let Some(second) = second_page_list {
// 第二个page_list有空位
// 应当符合之前的假设还有1个空位
assert!(second.entry_num == Self::BUDDY_ENTRIES - 1);
(first_page_list.next_page, second)
} else {
// 在第一个page list中分配
(first_page_list_paddr, first_page_list)
};
// kdebug!("to write entry, page_list_base={paddr:?}, page_list.entry_num={}, value={base:?}", page_list.entry_num);
assert!(page_list.entry_num < Self::BUDDY_ENTRIES);
// 把要归还的块,写入到链表项中
unsafe { A::write(Self::entry_virt_addr(paddr, page_list.entry_num), base) }
page_list.entry_num += 1;
Self::write_page(paddr, page_list);
return;
} else {
if let Some(buddy_entry_virt_addr) = buddy_entry_virt_vaddr {
// 如果找到了伙伴块,合并,向上递归
// 伙伴块所在的表项的虚拟地址
let buddy_entry_virt_addr = buddy_entry_virt_vaddr.unwrap();
// 伙伴块所在的page_list的物理地址
let buddy_entry_page_list_paddr = buddy_entry_page_list_paddr.unwrap();
@ -568,6 +496,74 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
page_list.entry_num -= 1;
Self::write_page(page_list_paddr, page_list);
}
} else {
assert!(
page_list.entry_num <= Self::BUDDY_ENTRIES,
"buddy_free: page_list.entry_num > Self::BUDDY_ENTRIES"
);
// 当前第一个page_list没有空间了
if first_page_list.entry_num == Self::BUDDY_ENTRIES {
// 如果当前order是最小的那么就把这个块当作新的page_list使用
let new_page_list_addr = if order == MIN_ORDER {
base
} else {
// 否则分配新的page_list
// 请注意分配之后有可能当前的entry_num会减1伙伴块分裂造成出现整个链表为null的entry数量为Self::BUDDY_ENTRIES+1的情况
// 但是不影响我们在后面插入链表项的时候会处理这种情况检查链表中的第2个页是否有空位
self.buddy_alloc(PageFrameCount::new(1))
.expect("buddy_alloc failed: no enough memory")
.0
};
// 清空这个页面
core::ptr::write_bytes(
A::phys_2_virt(new_page_list_addr)
.expect(
"Buddy free: failed to get virt address of [new_page_list_addr]",
)
.as_ptr::<u8>(),
0,
1 << order,
);
assert!(
first_page_list_paddr == self.free_area[Self::order2index(order as u8)]
);
// 初始化新的page_list
let new_page_list = PageList::new(0, first_page_list_paddr);
Self::write_page(new_page_list_addr, new_page_list);
self.free_area[Self::order2index(order as u8)] = new_page_list_addr;
}
// 由于上面可能更新了第一个链表页,因此需要重新获取这个值
let first_page_list_paddr = self.free_area[Self::order2index(order as u8)];
let first_page_list: PageList<A> = Self::read_page(first_page_list_paddr);
// 检查第二个page_list是否有空位
let second_page_list = if first_page_list.next_page.is_null() {
None
} else {
Some(Self::read_page::<PageList<A>>(first_page_list.next_page))
};
let (paddr, mut page_list) = if let Some(second) = second_page_list {
// 第二个page_list有空位
// 应当符合之前的假设还有1个空位
assert!(second.entry_num == Self::BUDDY_ENTRIES - 1);
(first_page_list.next_page, second)
} else {
// 在第一个page list中分配
(first_page_list_paddr, first_page_list)
};
// kdebug!("to write entry, page_list_base={paddr:?}, page_list.entry_num={}, value={base:?}", page_list.entry_num);
assert!(page_list.entry_num < Self::BUDDY_ENTRIES);
// 把要归还的块,写入到链表项中
unsafe { A::write(Self::entry_virt_addr(paddr, page_list.entry_num), base) }
page_list.entry_num += 1;
Self::write_page(paddr, page_list);
return;
}
base = min(base, buddy_addr);
order += 1;
@ -597,7 +593,7 @@ impl<A: MemoryManagementArch> FrameAllocator for BuddyAllocator<A> {
if unlikely(!count.data().is_power_of_two()) {
kwarn!("buddy free: count is not power of two");
}
let mut order = log2(count.data() as usize);
let mut order = log2(count.data());
if count.data() & ((1 << order) - 1) != 0 {
order += 1;
}

View File

@ -57,7 +57,7 @@ impl<MMA: MemoryManagementArch> BumpAllocator<MMA> {
let mut found_start = false;
// 遍历所有的物理内存区域
for area in iter {
if found_start == false {
if !found_start {
// 将area的base地址与PAGE_SIZE对齐对齐时向上取整
// let area_base = (area.base.data() + MMA::PAGE_SHIFT) & !(MMA::PAGE_SHIFT);
let area_base = area.area_base_aligned().data();
@ -78,17 +78,15 @@ impl<MMA: MemoryManagementArch> BumpAllocator<MMA> {
offset = (offset + (MMA::PAGE_SIZE - 1)) & !(MMA::PAGE_SIZE - 1);
}
// found
if offset + 1 * MMA::PAGE_SIZE <= area_end {
if offset + MMA::PAGE_SIZE <= area_end {
ret_offset_aligned = offset - area.area_base_aligned().data();
found_start = true;
}
}
if found_start {
if area.area_base_aligned() < area.area_end_aligned() {
result_area[res_cnt] = area;
res_cnt += 1;
}
if found_start && area.area_base_aligned() < area.area_end_aligned() {
result_area[res_cnt] = area;
res_cnt += 1;
}
}
@ -114,7 +112,10 @@ impl<MMA: MemoryManagementArch> BumpAllocator<MMA> {
PageMapper::<MMA, _>::current(PageTableKind::Kernel, BumpAllocator::<MMA>::new(0));
for p in iter {
if let None = mapper.translate(MMA::phys_2_virt(p.phys_address()).unwrap()) {
if mapper
.translate(MMA::phys_2_virt(p.phys_address()).unwrap())
.is_none()
{
let vaddr = MMA::phys_2_virt(p.phys_address()).unwrap();
pseudo_map_phys(vaddr, p.phys_address(), PageFrameCount::new(1));
}

View File

@ -61,8 +61,8 @@ impl LocalAlloc for KernelAllocator {
unsafe fn local_alloc(&self, layout: Layout) -> *mut u8 {
return self
.alloc_in_buddy(layout)
.map(|x| x.as_mut_ptr() as *mut u8)
.unwrap_or(core::ptr::null_mut() as *mut u8);
.map(|x| x.as_mut_ptr())
.unwrap_or(core::ptr::null_mut());
}
unsafe fn local_alloc_zeroed(&self, layout: Layout) -> *mut u8 {
@ -73,7 +73,7 @@ impl LocalAlloc for KernelAllocator {
core::ptr::write_bytes(ptr, 0, x.len());
ptr
})
.unwrap_or(core::ptr::null_mut() as *mut u8);
.unwrap_or(core::ptr::null_mut());
}
unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) {
@ -86,11 +86,7 @@ unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let r = self.local_alloc_zeroed(layout);
mm_debug_log(
klog_types::AllocatorLogType::Alloc(AllocLogItem::new(
layout.clone(),
Some(r as usize),
None,
)),
klog_types::AllocatorLogType::Alloc(AllocLogItem::new(layout, Some(r as usize), None)),
klog_types::LogSource::Buddy,
);
@ -104,7 +100,7 @@ unsafe impl GlobalAlloc for KernelAllocator {
mm_debug_log(
klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new(
layout.clone(),
layout,
Some(r as usize),
None,
)),
@ -116,11 +112,7 @@ unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
mm_debug_log(
klog_types::AllocatorLogType::Free(AllocLogItem::new(
layout.clone(),
Some(ptr as usize),
None,
)),
klog_types::AllocatorLogType::Free(AllocLogItem::new(layout, Some(ptr as usize), None)),
klog_types::LogSource::Buddy,
);

View File

@ -17,7 +17,7 @@ impl Slab {
pub unsafe fn new(start_addr: usize, slab_size: usize, block_size: usize) -> Slab {
let blocks_num = slab_size / block_size;
return Slab {
block_size: block_size,
block_size,
free_block_list: FreeBlockList::new(start_addr, block_size, blocks_num),
};
}
@ -106,7 +106,7 @@ impl FreeBlockList {
impl Drop for FreeBlockList {
fn drop(&mut self) {
while let Some(_) = self.pop() {}
while self.pop().is_some() {}
}
}

View File

@ -82,8 +82,6 @@ fn do_kmalloc(size: usize, _zero: bool) -> usize {
let (ptr, len, cap) = space.into_raw_parts();
if !ptr.is_null() {
let vaddr = VirtAddr::new(ptr as usize);
let len = len as usize;
let cap = cap as usize;
let mut guard = C_ALLOCATION_MAP.lock();
if unlikely(guard.contains_key(&vaddr)) {
drop(guard);
@ -139,8 +137,8 @@ unsafe extern "C" fn rs_mmio_create(
) -> i32 {
// kdebug!("mmio_create");
let r = mmio_pool().create_mmio(size as usize);
if r.is_err() {
return r.unwrap_err().to_posix_errno();
if let Err(e) = r {
return e.to_posix_errno();
}
let space_guard = r.unwrap();
*res_vaddr = space_guard.vaddr().data() as u64;

View File

@ -78,7 +78,7 @@ impl EarlyIoRemap {
size: usize,
read_only: bool,
) -> Result<(VirtAddr, usize), SystemError> {
if phys.check_aligned(MMArch::PAGE_SIZE) == false {
if !phys.check_aligned(MMArch::PAGE_SIZE) {
return Err(SystemError::EINVAL);
}
@ -175,7 +175,7 @@ impl EarlyIoRemap {
let idx = idx.ok_or(SystemError::EINVAL)?;
let vaddr = Self::idx_to_virt(idx as usize);
let vaddr = Self::idx_to_virt(idx);
let count = PageFrameCount::from_bytes(slot_guard[idx].size as usize).unwrap();
// 取消映射

View File

@ -360,7 +360,7 @@ impl MemBlockManager {
flags: MemoryAreaAttr,
) -> Result<(), SystemError> {
let rsvd_base = PhysAddr::new(page_align_down(base.data()));
size = page_align_up((size as usize) + base.data() - rsvd_base.data());
size = page_align_up(size + base.data() - rsvd_base.data());
base = rsvd_base;
let mut inner = self.inner.lock();
@ -489,18 +489,16 @@ impl<'a> Iterator for MemBlockIter<'a> {
fn next(&mut self) -> Option<Self::Item> {
while self.index < self.inner.initial_memory_regions_num {
if self.usable_only {
if self.inner.initial_memory_regions[self.index]
if self.usable_only
&& !self.inner.initial_memory_regions[self.index]
.flags
.is_empty()
== false
{
self.index += 1;
if self.index >= self.inner.initial_memory_regions_num {
return None;
}
continue;
{
self.index += 1;
if self.index >= self.inner.initial_memory_regions_num {
return None;
}
continue;
}
break;
}
@ -515,6 +513,7 @@ impl<'a> Iterator for MemBlockIter<'a> {
bitflags! {
/// 内存区域属性
#[allow(clippy::bad_bit_mask)]
pub struct MemoryAreaAttr: u32 {
/// No special request
const NONE = 0x0;

View File

@ -164,7 +164,7 @@ impl MmioBuddyMemPool {
list_guard: &mut SpinLockGuard<MmioFreeRegionList>,
) -> Result<MmioBuddyAddrRegion, MmioResult> {
// 申请范围错误
if exp < MMIO_BUDDY_MIN_EXP || exp > MMIO_BUDDY_MAX_EXP {
if !(MMIO_BUDDY_MIN_EXP..=MMIO_BUDDY_MAX_EXP).contains(&exp) {
kdebug!("query_addr_region: exp wrong");
return Err(MmioResult::WRONGEXP);
}
@ -175,7 +175,7 @@ impl MmioBuddyMemPool {
// 将大的内存块依次分成小块内存直到能够满足exp大小即将exp+1分成两块exp
for e in exp + 1..MMIO_BUDDY_MAX_EXP + 1 {
let pop_list: &mut SpinLockGuard<MmioFreeRegionList> =
&mut self.free_regions[exp2index(e) as usize].lock();
&mut self.free_regions[exp2index(e)].lock();
if pop_list.num_free == 0 {
continue;
}
@ -187,7 +187,7 @@ impl MmioBuddyMemPool {
if e2 != exp + 1 {
// 要将分裂后的内存块插入到更小的链表中
let low_list_guard: &mut SpinLockGuard<MmioFreeRegionList> =
&mut self.free_regions[exp2index(e2 - 1) as usize].lock();
&mut self.free_regions[exp2index(e2 - 1)].lock();
self.split_block(region, e2, low_list_guard);
} else {
// 由于exp对应的链表list_guard已经被锁住了 不能再加锁
@ -201,13 +201,12 @@ impl MmioBuddyMemPool {
}
}
} else {
match self.pop_block(&mut self.free_regions[exp2index(e2) as usize].lock())
{
match self.pop_block(&mut self.free_regions[exp2index(e2)].lock()) {
Ok(region) => {
if e2 != exp + 1 {
// 要将分裂后的内存块插入到更小的链表中
let low_list_guard: &mut SpinLockGuard<MmioFreeRegionList> =
&mut self.free_regions[exp2index(e2 - 1) as usize].lock();
&mut self.free_regions[exp2index(e2 - 1)].lock();
self.split_block(region, e2, low_list_guard);
} else {
// 由于exp对应的链表list_guard已经被锁住了 不能再加锁
@ -251,7 +250,7 @@ impl MmioBuddyMemPool {
if e != exp - 1 {
match self.merge_all_exp(
exp,
&mut self.free_regions[exp2index(exp) as usize].lock(),
&mut self.free_regions[exp2index(exp)].lock(),
&mut self.free_regions[exp2index(exp + 1)].lock(),
) {
Ok(_) => continue,
@ -263,7 +262,7 @@ impl MmioBuddyMemPool {
} else {
match self.merge_all_exp(
exp,
&mut self.free_regions[exp2index(exp) as usize].lock(),
&mut self.free_regions[exp2index(exp)].lock(),
list_guard,
) {
Ok(_) => continue,
@ -346,7 +345,7 @@ impl MmioBuddyMemPool {
exp: u32,
list_guard: &mut SpinLockGuard<MmioFreeRegionList>,
) -> Result<MmioBuddyAddrRegion, MmioResult> {
if list_guard.list.len() == 0 {
if list_guard.list.is_empty() {
return Err(MmioResult::ISEMPTY);
} else {
//计算伙伴块的地址
@ -559,7 +558,7 @@ impl MmioBuddyMemPool {
// 归还到buddy
mmio_pool()
.give_back_block(vaddr, length.trailing_zeros() as u32)
.give_back_block(vaddr, length.trailing_zeros())
.unwrap_or_else(|err| {
panic!("MMIO release failed: self: {self:?}, err msg: {:?}", err);
});
@ -585,7 +584,7 @@ impl MmioBuddyAddrRegion {
}
/// @brief 空闲页数组结构体
#[derive(Debug)]
#[derive(Debug, Default)]
pub struct MmioFreeRegionList {
/// 存储mmio_buddy的地址链表
list: LinkedList<MmioBuddyAddrRegion>,
@ -600,14 +599,6 @@ impl MmioFreeRegionList {
};
}
}
impl Default for MmioFreeRegionList {
fn default() -> Self {
MmioFreeRegionList {
list: Default::default(),
num_free: 0,
}
}
}
/// @brief 将内存对象大小的幂转换成内存池中的数组的下标
///

View File

@ -37,6 +37,7 @@ static mut __IDLE_PROCESS_ADDRESS_SPACE: Option<Arc<AddressSpace>> = None;
bitflags! {
/// Virtual memory flags
#[allow(clippy::bad_bit_mask)]
pub struct VmFlags:u32{
const VM_NONE = 0x00000000;
@ -280,11 +281,7 @@ impl VirtAddr {
/// @brief 判断虚拟地址是否在用户空间
#[inline(always)]
pub fn check_user(&self) -> bool {
if self < &MMArch::USER_END_VADDR {
return true;
} else {
return false;
}
return self < &MMArch::USER_END_VADDR;
}
#[inline(always)]
@ -720,7 +717,7 @@ impl VirtRegion {
impl PartialOrd for VirtRegion {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
return self.start.partial_cmp(&other.start);
Some(self.cmp(other))
}
}

View File

@ -86,7 +86,7 @@ impl EarlyIoRemapPages {
let offset = addr.data() - start_vaddr;
let index = offset / MMArch::PAGE_SIZE;
if index < Self::EARLY_REMAP_PAGES_NUM {
assert_eq!(self.bmp.get(index).unwrap(), true);
assert!(self.bmp.get(index).unwrap());
self.bmp.set(index, false);
}
}
@ -203,9 +203,9 @@ pub unsafe fn pseudo_unmap_phys(vaddr: VirtAddr, count: PageFrameCount) {
for i in 0..count.data() {
let vaddr = vaddr + i * MMArch::PAGE_SIZE;
mapper.unmap_phys(vaddr, true).map(|(_, _, flusher)| {
if let Some((_, _, flusher)) = mapper.unmap_phys(vaddr, true) {
flusher.ignore();
});
};
}
mapper.make_current();

View File

@ -293,7 +293,7 @@ impl<Arch: MemoryManagementArch> PageFlags<Arch> {
#[inline(always)]
pub const unsafe fn from_data(data: usize) -> Self {
return Self {
data: data,
data,
phantom: PhantomData,
};
}
@ -624,7 +624,7 @@ impl<Arch: MemoryManagementArch, F: FrameAllocator> PageMapper<Arch, F> {
if table.level() == 0 {
// todo: 检查是否已经映射
// 现在不检查的原因是,刚刚启动系统时,内核会映射一些页。
if table.entry_mapped(i)? == true {
if table.entry_mapped(i)? {
kwarn!("Page {:?} already mapped", virt);
}
@ -752,8 +752,8 @@ impl<Arch: MemoryManagementArch, F: FrameAllocator> PageMapper<Arch, F> {
return None;
}
let mut table = self.table();
return unmap_phys_inner(virt, &mut table, unmap_parents, self.allocator_mut())
let table = self.table();
return unmap_phys_inner(virt, &table, unmap_parents, self.allocator_mut())
.map(|(paddr, flags)| (paddr, flags, PageFlush::<Arch>::new(virt)));
}
@ -805,9 +805,9 @@ unsafe fn unmap_phys_inner<Arch: MemoryManagementArch>(
return Some((entry.address().ok()?, entry.flags()));
}
let mut subtable = table.next_level_table(i)?;
let subtable = table.next_level_table(i)?;
// 递归地取消映射
let result = unmap_phys_inner(vaddr, &mut subtable, unmap_parents, allocator)?;
let result = unmap_phys_inner(vaddr, &subtable, unmap_parents, allocator)?;
// TODO: This is a bad idea for architectures where the kernel mappings are done in the process tables,
// as these mappings may become out of sync

View File

@ -14,7 +14,7 @@ use crate::{
///
/// todo: 待smp模块重构后从smp模块获取CPU数量。
/// 目前由于smp模块初始化时机较晚导致大部分内核模块无法在早期初始化PerCpu变量。
const CPU_NUM: AtomicU32 = AtomicU32::new(PerCpu::MAX_CPU_NUM);
static CPU_NUM_ATOMIC: AtomicU32 = AtomicU32::new(PerCpu::MAX_CPU_NUM);
#[derive(Debug)]
pub struct PerCpu;
@ -32,13 +32,14 @@ impl PerCpu {
/// 该函数会调用`smp_get_total_cpu()`获取CPU数量然后将其存储在`CPU_NUM`中。
#[allow(dead_code)]
pub fn init() {
if CPU_NUM.load(core::sync::atomic::Ordering::SeqCst) != 0 {
let cpu_num: &AtomicU32 = &CPU_NUM_ATOMIC;
if cpu_num.load(core::sync::atomic::Ordering::SeqCst) != 0 {
panic!("PerCpu::init() called twice");
}
let cpus = smp_cpu_manager().present_cpus_count();
assert!(cpus > 0, "PerCpu::init(): present_cpus_count() returned 0");
CPU_NUM.store(cpus, core::sync::atomic::Ordering::SeqCst);
CPU_NUM_ATOMIC.store(cpus, core::sync::atomic::Ordering::SeqCst);
}
}
@ -65,7 +66,7 @@ impl<T> PerCpuVar<T> {
///
/// - `data` - 每个CPU的数据的初始值。 传入的Vec的长度必须等于CPU的数量否则返回None。
pub fn new(data: Vec<T>) -> Option<Self> {
let cpu_num = CPU_NUM.load(core::sync::atomic::Ordering::SeqCst);
let cpu_num = CPU_NUM_ATOMIC.load(core::sync::atomic::Ordering::SeqCst);
if cpu_num == 0 {
panic!("PerCpu::init() not called");
}
@ -87,6 +88,7 @@ impl<T> PerCpuVar<T> {
&self.inner[cpu_id.data() as usize]
}
#[allow(clippy::mut_from_ref)]
pub fn get_mut(&self) -> &mut T {
let cpu_id = smp_get_processor_id();
unsafe {
@ -98,6 +100,7 @@ impl<T> PerCpuVar<T> {
&self.inner[cpu_id.data() as usize]
}
#[allow(clippy::mut_from_ref)]
pub unsafe fn force_get_mut(&self, cpu_id: ProcessorId) -> &mut T {
&mut (self as *const Self as *mut Self).as_mut().unwrap().inner[cpu_id.data() as usize]
}

View File

@ -113,23 +113,23 @@ impl From<ProtFlags> for VmFlags {
}
}
impl Into<MapFlags> for VmFlags {
fn into(self) -> MapFlags {
impl From<VmFlags> for MapFlags {
fn from(value: VmFlags) -> Self {
let mut map_flags = MapFlags::MAP_NONE;
if self.contains(VmFlags::VM_GROWSDOWN) {
if value.contains(VmFlags::VM_GROWSDOWN) {
map_flags |= MapFlags::MAP_GROWSDOWN;
}
if self.contains(VmFlags::VM_LOCKED) {
if value.contains(VmFlags::VM_LOCKED) {
map_flags |= MapFlags::MAP_LOCKED;
}
if self.contains(VmFlags::VM_SYNC) {
if value.contains(VmFlags::VM_SYNC) {
map_flags |= MapFlags::MAP_SYNC;
}
if self.contains(VmFlags::VM_MAYSHARE) {
if value.contains(VmFlags::VM_MAYSHARE) {
map_flags |= MapFlags::MAP_SHARED;
}
@ -137,19 +137,19 @@ impl Into<MapFlags> for VmFlags {
}
}
impl Into<ProtFlags> for VmFlags {
fn into(self) -> ProtFlags {
impl From<VmFlags> for ProtFlags {
fn from(value: VmFlags) -> Self {
let mut prot_flags = ProtFlags::PROT_NONE;
if self.contains(VmFlags::VM_READ) {
if value.contains(VmFlags::VM_READ) {
prot_flags |= ProtFlags::PROT_READ;
}
if self.contains(VmFlags::VM_WRITE) {
if value.contains(VmFlags::VM_WRITE) {
prot_flags |= ProtFlags::PROT_WRITE;
}
if self.contains(VmFlags::VM_EXEC) {
if value.contains(VmFlags::VM_EXEC) {
prot_flags |= ProtFlags::PROT_EXEC;
}
@ -302,7 +302,7 @@ impl Syscall {
return Err(SystemError::EINVAL);
}
let vma = vma.unwrap();
let vm_flags = vma.lock().vm_flags().clone();
let vm_flags = *vma.lock().vm_flags();
// 暂时不支持巨页映射
if vm_flags.contains(VmFlags::VM_HUGETLB) {

View File

@ -178,7 +178,7 @@ impl InnerAddressSpace {
let new_vma = VMA::zeroed(
VirtPageFrame::new(vma_guard.region.start()),
PageFrameCount::new(vma_guard.region.size() / MMArch::PAGE_SIZE),
vma_guard.vm_flags().clone(),
*vma_guard.vm_flags(),
tmp_flags,
&mut new_guard.user_mapper.utable,
(),
@ -287,7 +287,7 @@ impl InnerAddressSpace {
prot_flags,
map_flags,
move |page, count, flags, mapper, flusher| {
Ok(VMA::zeroed(page, count, vm_flags, flags, mapper, flusher)?)
VMA::zeroed(page, count, vm_flags, flags, mapper, flusher)
},
)?;
@ -434,9 +434,7 @@ impl InnerAddressSpace {
UserBufferWriter::new(new_page_vaddr.data() as *mut u8, new_len, true)?;
let new_buf: &mut [u8] = new_buffer_writer.buffer(0)?;
let len = old_buf.len().min(new_buf.len());
for i in 0..len {
new_buf[i] = old_buf[i];
}
new_buf[..len].copy_from_slice(&old_buf[..len]);
return Ok(new_page_vaddr);
}
@ -466,16 +464,16 @@ impl InnerAddressSpace {
let r = r.lock().region;
let r = self.mappings.remove_vma(&r).unwrap();
let intersection = r.lock().region().intersect(&to_unmap).unwrap();
let (before, r, after) = r.extract(intersection).unwrap();
let split_result = r.extract(intersection).unwrap();
// TODO: 当引入后备页映射后,这里需要增加通知文件的逻辑
if let Some(before) = before {
if let Some(before) = split_result.prev {
// 如果前面有VMA则需要将前面的VMA重新插入到地址空间的VMA列表中
self.mappings.insert_vma(before);
}
if let Some(after) = after {
if let Some(after) = split_result.after {
// 如果后面有VMA则需要将后面的VMA重新插入到地址空间的VMA列表中
self.mappings.insert_vma(after);
}
@ -517,16 +515,16 @@ impl InnerAddressSpace {
for r in regions {
// kdebug!("mprotect: r: {:?}", r);
let r = r.lock().region().clone();
let r = *r.lock().region();
let r = self.mappings.remove_vma(&r).unwrap();
let intersection = r.lock().region().intersect(&region).unwrap();
let (before, r, after) = r.extract(intersection).expect("Failed to extract VMA");
let split_result = r.extract(intersection).expect("Failed to extract VMA");
if let Some(before) = before {
if let Some(before) = split_result.prev {
self.mappings.insert_vma(before);
}
if let Some(after) = after {
if let Some(after) = split_result.after {
self.mappings.insert_vma(after);
}
@ -625,7 +623,7 @@ impl InnerAddressSpace {
let new_brk = if incr > 0 {
self.brk + incr as usize
} else {
self.brk - (incr.abs() as usize)
self.brk - incr.unsigned_abs()
};
let new_brk = VirtAddr::new(page_align_up(new_brk.data()));
@ -707,7 +705,7 @@ impl UserMappings {
let r = self
.vmas
.iter()
.filter(move |v| !v.lock().region.intersect(&request).is_none())
.filter(move |v| v.lock().region.intersect(&request).is_some())
.cloned();
return r;
}
@ -829,7 +827,7 @@ impl UserMappings {
/// 在当前进程的映射关系中插入一个新的VMA。
pub fn insert_vma(&mut self, vma: Arc<LockedVMA>) {
let region = vma.lock().region.clone();
let region = vma.lock().region;
// 要求插入的地址范围必须是空闲的也就是说当前进程的地址空间中不能有任何与之重叠的VMA。
assert!(self.conflicts(region).next().is_none());
self.reserve_hole(&region);
@ -963,14 +961,7 @@ impl LockedVMA {
/// 1. 前面的VMA如果没有则为None
/// 2. 中间的VMA也就是传入的Region
/// 3. 后面的VMA如果没有则为None
pub fn extract(
&self,
region: VirtRegion,
) -> Option<(
Option<Arc<LockedVMA>>,
Arc<LockedVMA>,
Option<Arc<LockedVMA>>,
)> {
pub fn extract(&self, region: VirtRegion) -> Option<VMASplitResult> {
assert!(region.start().check_aligned(MMArch::PAGE_SIZE));
assert!(region.end().check_aligned(MMArch::PAGE_SIZE));
@ -990,7 +981,11 @@ impl LockedVMA {
let intersect: VirtRegion = intersect.unwrap();
if unlikely(intersect == guard.region) {
// 如果当前VMA完全包含region则直接返回当前VMA
return Some((None, guard.self_ref.upgrade().unwrap(), None));
return Some(VMASplitResult::new(
None,
guard.self_ref.upgrade().unwrap(),
None,
));
}
}
@ -1014,7 +1009,32 @@ impl LockedVMA {
// TODO: 重新设置before、after这两个VMA里面的物理页的anon_vma
return Some((before, guard.self_ref.upgrade().unwrap(), after));
return Some(VMASplitResult::new(
before,
guard.self_ref.upgrade().unwrap(),
after,
));
}
}
/// VMA切分结果
pub struct VMASplitResult {
pub prev: Option<Arc<LockedVMA>>,
pub middle: Arc<LockedVMA>,
pub after: Option<Arc<LockedVMA>>,
}
impl VMASplitResult {
pub fn new(
prev: Option<Arc<LockedVMA>>,
middle: Arc<LockedVMA>,
post: Option<Arc<LockedVMA>>,
) -> Self {
Self {
prev,
middle,
after: post,
}
}
}
@ -1293,7 +1313,7 @@ impl Eq for VMA {}
impl PartialOrd for VMA {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
return self.region.partial_cmp(&other.region);
Some(self.cmp(other))
}
}