Refactor MemoryRegionArray::into_non_overlapping

This commit is contained in:
Zhang Junyang
2025-03-03 21:43:59 +08:00
committed by Tate, Hongliang Tian
parent 46217ab021
commit 8a6c8c44e9

View File

@ -1,31 +1,35 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! Information of memory regions in the boot phase. //! Information of memory regions in the boot phase.
//!
use core::ops::Deref; use core::ops::Deref;
use crate::mm::kspace::kernel_loaded_offset; use align_ext::AlignExt;
use crate::mm::{kspace::kernel_loaded_offset, Paddr, PAGE_SIZE};
/// The type of initial memory regions that are needed for the kernel. /// The type of initial memory regions that are needed for the kernel.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub enum MemoryRegionType { pub enum MemoryRegionType {
/// Maybe points to an unplugged DIMM module. It's bad anyway. /// Maybe points to an unplugged DIMM module. It's bad anyway.
BadMemory = 0, BadMemory = 0,
/// Some holes not specified by the bootloader/firmware. It may be used for
/// I/O memory but we don't know for sure.
Unknown = 1,
/// In ACPI spec, this area needs to be preserved when sleeping. /// In ACPI spec, this area needs to be preserved when sleeping.
NonVolatileSleep = 1, NonVolatileSleep = 2,
/// Reserved by BIOS or bootloader, do not use. /// Reserved by BIOS or bootloader, do not use.
Reserved = 2, Reserved = 3,
/// The place where kernel sections are loaded. /// The place where kernel sections are loaded.
Kernel = 3, Kernel = 4,
/// The place where kernel modules (e.g. initrd) are loaded, could be reused. /// The place where kernel modules (e.g. initrd) are loaded, could be reused.
Module = 4, Module = 5,
/// The memory region provided as the framebuffer. /// The memory region provided as the framebuffer.
Framebuffer = 5, Framebuffer = 6,
/// Once used in the boot phase. Kernel can reclaim it after initialization. /// Once used in the boot phase. Kernel can reclaim it after initialization.
Reclaimable = 6, Reclaimable = 7,
/// Directly usable by the frame allocator. /// Directly usable by the frame allocator.
Usable = 7, Usable = 8,
} }
/// The information of initial memory regions that are needed by the kernel. /// The information of initial memory regions that are needed by the kernel.
@ -39,7 +43,7 @@ pub struct MemoryRegion {
impl MemoryRegion { impl MemoryRegion {
/// Constructs a valid memory region. /// Constructs a valid memory region.
pub const fn new(base: usize, len: usize, typ: MemoryRegionType) -> Self { pub const fn new(base: Paddr, len: usize, typ: MemoryRegionType) -> Self {
MemoryRegion { base, len, typ } MemoryRegion { base, len, typ }
} }
@ -70,7 +74,7 @@ impl MemoryRegion {
} }
/// The physical address of the base of the region. /// The physical address of the base of the region.
pub fn base(&self) -> usize { pub fn base(&self) -> Paddr {
self.base self.base
} }
@ -79,6 +83,11 @@ impl MemoryRegion {
self.len self.len
} }
/// The physical address of the end of the region.
pub fn end(&self) -> Paddr {
self.base + self.len
}
/// Checks whether the region is empty /// Checks whether the region is empty
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.len == 0 self.len == 0
@ -89,46 +98,21 @@ impl MemoryRegion {
self.typ self.typ
} }
/// Removes range `t` from self, resulting in 0, 1 or 2 truncated ranges. fn as_aligned(&self) -> Self {
/// We need to have this method since memory regions can overlap. let (base, end) = match self.typ() {
pub fn truncate(&self, t: &MemoryRegion) -> MemoryRegionArray<2> { MemoryRegionType::Usable => (
if self.base < t.base { self.base().align_up(PAGE_SIZE),
if self.base + self.len > t.base { self.end().align_down(PAGE_SIZE),
if self.base + self.len > t.base + t.len { ),
MemoryRegionArray::from(&[ _ => (
MemoryRegion { self.base().align_down(PAGE_SIZE),
base: self.base, self.end().align_up(PAGE_SIZE),
len: t.base - self.base, ),
typ: self.typ, };
}, MemoryRegion {
MemoryRegion { base,
base: t.base + t.len, len: end - base,
len: self.base + self.len - (t.base + t.len), typ: self.typ,
typ: self.typ,
},
])
} else {
MemoryRegionArray::from(&[MemoryRegion {
base: self.base,
len: t.base - self.base,
typ: self.typ,
}])
}
} else {
MemoryRegionArray::from(&[*self])
}
} else if self.base < t.base + t.len {
if self.base + self.len > t.base + t.len {
MemoryRegionArray::from(&[MemoryRegion {
base: t.base + t.len,
len: self.base + self.len - (t.base + t.len),
typ: self.typ,
}])
} else {
MemoryRegionArray::new()
}
} else {
MemoryRegionArray::from(&[*self])
} }
} }
} }
@ -172,20 +156,6 @@ impl<const LEN: usize> MemoryRegionArray<LEN> {
} }
} }
/// Constructs from an array of regions.
pub fn from(array: &[MemoryRegion]) -> Self {
Self {
regions: core::array::from_fn(|i| {
if i < array.len() {
array[i]
} else {
MemoryRegion::bad()
}
}),
count: array.len(),
}
}
/// Appends a region to the set. /// Appends a region to the set.
/// ///
/// If the set is full, an error is returned. /// If the set is full, an error is returned.
@ -199,60 +169,153 @@ impl<const LEN: usize> MemoryRegionArray<LEN> {
} }
} }
/// Clears the set. /// Sorts the regions and returns a full set of non-overlapping regions.
pub fn clear(&mut self) {
self.count = 0;
}
/// Truncates regions, resulting in a set of regions that does not overlap.
/// ///
/// The truncation will be done according to the type of the regions, that /// If an address is in multiple regions, the region with the lowest
/// usable and reclaimable regions will be truncated by the unusable regions. /// usability will be its type.
/// ///
/// If the output regions are more than `LEN`, the extra regions will be ignored. /// All the addresses between 0 and the end of the last region will be in
pub fn into_non_overlapping(self) -> Self { /// the resulting set. If an address is not in any region, it will be marked
// We should later use regions in `regions_unusable` to truncate all /// as [`MemoryRegionType::Unknown`].
// regions in `regions_usable`. ///
// The difference is that regions in `regions_usable` could be used by /// If any of the region boundaries are not page-aligned, they will be aligned
// the frame allocator. /// according to the type of the region.
let mut regions_usable = MemoryRegionArray::<LEN>::new(); ///
let mut regions_unusable = MemoryRegionArray::<LEN>::new(); /// # Panics
///
/// This method will panic if the number of output regions is greater than `LEN`.
pub fn into_non_overlapping(mut self) -> Self {
let max_addr = self
.iter()
.map(|r| r.end())
.max()
.unwrap_or(0)
.align_down(PAGE_SIZE);
self.regions.iter_mut().for_each(|r| *r = r.as_aligned());
for r in self.iter() { let mut result = MemoryRegionArray::<LEN>::new();
match r.typ {
MemoryRegionType::Usable | MemoryRegionType::Reclaimable => { let mut cur_right = 0;
// If usable memory regions exceeded it's fine to ignore the rest.
let _ = regions_usable.push(*r); while cur_right < max_addr {
} // Find the most restrictive type.
_ => { let typ = self
regions_unusable .iter()
.push(*r) .filter(|region| (region.base()..region.end()).contains(&cur_right))
.expect("Too many unusable memory regions"); .map(|region| region.typ())
} .min()
.unwrap_or(MemoryRegionType::Unknown);
// Find the right boundary.
let right = self
.iter()
.filter_map(|region| {
if region.base() > cur_right {
Some(region.base())
} else if region.end() > cur_right {
Some(region.end())
} else {
None
}
})
.min()
.unwrap();
result
.push(MemoryRegion::new(cur_right, right - cur_right, typ))
.unwrap();
cur_right = right;
}
// Merge the adjacent regions with the same type.
let mut merged_count = 1;
for i in 1..result.count {
if result[i].typ() == result.regions[merged_count - 1].typ() {
result.regions[merged_count - 1] = MemoryRegion::new(
result.regions[merged_count - 1].base(),
result.regions[merged_count - 1].len() + result[i].len(),
result.regions[merged_count - 1].typ(),
);
} else {
result.regions[merged_count] = result[i];
merged_count += 1;
} }
} }
result.count = merged_count;
// `regions_*` are 2 rolling vectors since we are going to truncate result
// the regions in a iterative manner. }
let mut regions = MemoryRegionArray::<LEN>::new(); }
let regions_src = &mut regions_usable;
let regions_dst = &mut regions; #[cfg(ktest)]
// Truncate the usable regions. mod test {
for r_unusable in regions_unusable.iter() { use super::*;
regions_dst.clear(); use crate::prelude::ktest;
for r_usable in regions_src.iter() {
for truncated in r_usable.truncate(r_unusable).iter() { #[ktest]
let _ = regions_dst.push(*truncated); fn test_sort_full_non_overlapping() {
} let mut regions = MemoryRegionArray::<64>::new();
} // Regions that can be combined.
core::mem::swap(regions_src, regions_dst); regions
} .push(MemoryRegion::new(
0,
// Combine all the regions processed. PAGE_SIZE + 1,
let mut all_regions = regions_unusable; MemoryRegionType::Usable,
for r in regions_usable.iter() { ))
let _ = all_regions.push(*r); .unwrap();
} regions
all_regions .push(MemoryRegion::new(
PAGE_SIZE - 1,
PAGE_SIZE + 2,
MemoryRegionType::Usable,
))
.unwrap();
regions
.push(MemoryRegion::new(
PAGE_SIZE * 2,
PAGE_SIZE * 5,
MemoryRegionType::Usable,
))
.unwrap();
// A punctured region.
regions
.push(MemoryRegion::new(
PAGE_SIZE * 3 + 1,
PAGE_SIZE - 2,
MemoryRegionType::BadMemory,
))
.unwrap();
// A far region that left a hole in the middle.
regions
.push(MemoryRegion::new(
PAGE_SIZE * 9,
PAGE_SIZE * 2,
MemoryRegionType::Usable,
))
.unwrap();
let regions = regions.into_non_overlapping();
assert_eq!(regions.count, 5);
assert_eq!(regions[0].base(), 0);
assert_eq!(regions[0].len(), PAGE_SIZE * 3);
assert_eq!(regions[0].typ(), MemoryRegionType::Usable);
assert_eq!(regions[1].base(), PAGE_SIZE * 3);
assert_eq!(regions[1].len(), PAGE_SIZE);
assert_eq!(regions[1].typ(), MemoryRegionType::BadMemory);
assert_eq!(regions[2].base(), PAGE_SIZE * 4);
assert_eq!(regions[2].len(), PAGE_SIZE * 3);
assert_eq!(regions[2].typ(), MemoryRegionType::Usable);
assert_eq!(regions[3].base(), PAGE_SIZE * 7);
assert_eq!(regions[3].len(), PAGE_SIZE * 2);
assert_eq!(regions[3].typ(), MemoryRegionType::Unknown);
assert_eq!(regions[4].base(), PAGE_SIZE * 9);
assert_eq!(regions[4].len(), PAGE_SIZE * 2);
assert_eq!(regions[4].typ(), MemoryRegionType::Usable);
} }
} }