mirror of
https://github.com/DragonOS-Community/DragonOS.git
synced 2025-06-08 14:16:47 +00:00
fix: 解决创建buddy的函数栈帧过大的问题 (#1189)
* fix: 解决创建buddy的函数栈帧过大的问题 Signed-off-by: longjin <longjin@DragonOS.org> * chore(kernel): 移除smoltcp的log依赖项 Signed-off-by: longjin <longjin@DragonOS.org> --------- Signed-off-by: longjin <longjin@DragonOS.org>
This commit is contained in:
parent
996150bbc4
commit
e696ba4440
1
kernel/Cargo.lock
generated
1
kernel/Cargo.lock
generated
@ -1543,7 +1543,6 @@ dependencies = [
|
||||
"cfg-if",
|
||||
"defmt",
|
||||
"heapless",
|
||||
"log",
|
||||
"managed",
|
||||
]
|
||||
|
||||
|
@ -49,7 +49,6 @@ num = { version = "=0.4.0", default-features = false }
|
||||
num-derive = "=0.3"
|
||||
num-traits = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/num-traits.git", rev = "1597c1c", default-features = false }
|
||||
smoltcp = { version = "=0.11.0", default-features = false, features = [
|
||||
"log",
|
||||
"alloc",
|
||||
"socket-raw",
|
||||
"socket-udp",
|
||||
|
@ -151,11 +151,16 @@ pub struct X86_64SmpManager {
|
||||
}
|
||||
|
||||
impl X86_64SmpManager {
|
||||
pub const fn new() -> Self {
|
||||
/// 创建一个新的X86_64SmpManager实例
|
||||
///
|
||||
/// 注:由于该函数只在编译时被调用,因此 `#[allow(clippy::large_stack_frames)]` 是安全的。
|
||||
#[allow(clippy::large_stack_frames)]
|
||||
const fn new() -> Self {
|
||||
return Self {
|
||||
ia64_cpu_to_sapicid: RwLock::new([None; PerCpu::MAX_CPU_NUM as usize]),
|
||||
};
|
||||
}
|
||||
|
||||
/// initialize the logical cpu number to APIC ID mapping
|
||||
pub fn build_cpu_map(&self) -> Result<(), SystemError> {
|
||||
// 参考:https://code.dragonos.org.cn/xref/linux-6.1.9/arch/ia64/kernel/smpboot.c?fi=smp_build_cpu_map#496
|
||||
|
@ -77,11 +77,12 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
|
||||
// 定义一个变量记录buddy表的大小
|
||||
(A::PAGE_SIZE - mem::size_of::<PageList<A>>()) / mem::size_of::<PhysAddr>();
|
||||
|
||||
#[inline(never)]
|
||||
pub unsafe fn new(mut bump_allocator: BumpAllocator<A>) -> Option<Self> {
|
||||
let initial_free_pages = bump_allocator.usage().free();
|
||||
let total_memory = bump_allocator.usage().total();
|
||||
debug!("Free pages before init buddy: {:?}", initial_free_pages);
|
||||
debug!("Buddy entries: {}", Self::BUDDY_ENTRIES);
|
||||
// debug!("Buddy entries: {}", Self::BUDDY_ENTRIES);
|
||||
|
||||
let mut free_area: [PhysAddr; MAX_ORDER - MIN_ORDER] =
|
||||
[PhysAddr::new(0); MAX_ORDER - MIN_ORDER];
|
||||
@ -105,12 +106,12 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
|
||||
};
|
||||
|
||||
let mut total_pages_to_buddy = PageFrameCount::new(0);
|
||||
let mut res_areas = [PhysMemoryArea::default(); 128];
|
||||
static mut RES_AREAS: [PhysMemoryArea; 128] = [PhysMemoryArea::DEFAULT; 128];
|
||||
let mut offset_in_remain_area = bump_allocator
|
||||
.remain_areas(&mut res_areas)
|
||||
.remain_areas(&mut RES_AREAS)
|
||||
.expect("BuddyAllocator: failed to get remain areas from bump allocator");
|
||||
|
||||
let remain_areas = &res_areas[0..];
|
||||
let remain_areas = &RES_AREAS[0..];
|
||||
|
||||
for area in remain_areas {
|
||||
let mut paddr = (area.area_base_aligned() + offset_in_remain_area).data();
|
||||
@ -120,7 +121,7 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
|
||||
if remain_pages.data() == 0 {
|
||||
continue;
|
||||
}
|
||||
debug!("area: {area:?}, paddr: {paddr:#x}, remain_pages: {remain_pages:?}");
|
||||
// debug!("area: {area:?}, paddr: {paddr:#x}, remain_pages: {remain_pages:?}");
|
||||
|
||||
total_pages_to_buddy += remain_pages;
|
||||
|
||||
|
@ -51,7 +51,13 @@ pub struct EarlyIoRemapPages {
|
||||
impl EarlyIoRemapPages {
|
||||
/// 预留的用于在内存管理初始化之前,映射内存所使用的页表数量
|
||||
pub const EARLY_REMAP_PAGES_NUM: usize = 256;
|
||||
pub const fn new() -> Self {
|
||||
|
||||
/// 创建一个新的EarlyIoRemapPages实例
|
||||
///
|
||||
/// # Safety
|
||||
/// 由于该函数只在编译时被调用,因此 `#[allow(clippy::large_stack_frames)]` 是安全的。
|
||||
#[allow(clippy::large_stack_frames)]
|
||||
const fn new() -> Self {
|
||||
Self {
|
||||
pages: [EarlyRemapPage {
|
||||
data: [0; MMArch::PAGE_SIZE],
|
||||
|
Loading…
x
Reference in New Issue
Block a user