diff --git a/ostd/libs/linux-bzimage/setup/src/main.rs b/ostd/libs/linux-bzimage/setup/src/main.rs index 403f6da46..2e411b987 100644 --- a/ostd/libs/linux-bzimage/setup/src/main.rs +++ b/ostd/libs/linux-bzimage/setup/src/main.rs @@ -27,15 +27,14 @@ use linux_boot_params::BootParams; mod console; mod loader; -// Unfortunately, the entrypoint is not defined here in the main.rs file. -// See the exported functions in the x86 module for details. +// The entry points are defined in `x86/*/setup.S`. mod x86; fn get_payload(boot_params: &BootParams) -> &'static [u8] { let hdr = &boot_params.hdr; // The payload_offset field is not recorded in the relocation table, so we need to // calculate the loaded offset manually. - let loaded_offset = x86::get_image_loaded_offset(); + let loaded_offset = x86::image_load_offset(); let payload_offset = (loaded_offset + hdr.payload_offset as isize) as usize; let payload_length = hdr.payload_length as usize; // SAFETY: the payload_offset and payload_length is valid if we assume that the diff --git a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs index 7589b9bf5..071f5a226 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs +++ b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs @@ -13,24 +13,8 @@ use super::{decoder::decode_payload, relocation::apply_rela_relocations}; const PAGE_SIZE: u64 = 4096; -// Suppress warnings since using todo!. -#[expect(unreachable_code)] -#[expect(unused_variables)] -#[expect(clippy::diverging_sub_expression)] -#[export_name = "efi_stub_entry"] -extern "sysv64" fn efi_stub_entry(handle: Handle, system_table: *const SystemTable) -> ! { - // SAFETY: handle and system_table are valid pointers. It is only called once. - unsafe { system_init(handle, system_table) }; - - uefi::helpers::init().unwrap(); - - let boot_params = todo!("Use EFI boot services to fill boot params"); - - efi_phase_boot(boot_params); -} - -#[export_name = "efi_handover_entry"] -extern "sysv64" fn efi_handover_entry( +#[export_name = "main_efi_handover64"] +extern "sysv64" fn main_efi_handover64( handle: Handle, system_table: *const SystemTable, boot_params_ptr: *mut BootParams, @@ -74,7 +58,7 @@ fn efi_phase_boot(boot_params: &mut BootParams) -> ! { uefi::println!("[EFI stub] Relocations applied."); uefi::println!( "[EFI stub] Stub loaded at {:#x?}", - crate::x86::get_image_loaded_offset() + crate::x86::image_load_offset() ); // Fill the boot params with the RSDP address if it is not provided. diff --git a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/header.S b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/header.S deleted file mode 100644 index 863716132..000000000 --- a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/header.S +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: MPL-2.0 */ - -// The compatibility file for the Linux x86 Boot Protocol. -// See https://www.kernel.org/doc/html/v5.6/x86/boot.html for -// more information on the Linux x86 Boot Protocol. - -// Some of the fields filled with a 0xab* values should be filled -// by the torjan builder. -// Asterinas will use only a few of these fields, and some of them -// are filled by the loader and will be read by Asterinas. - -.section ".header", "a" -CODE32_START = 0x100000 -SETUP_SECTS = 7 # so that the legacy setup could occupy a page -SETUP_SECTS_SIZE = 0x200 * (SETUP_SECTS + 1) -.code16 -.org 0x01f1 -hdr_start: -setup_sects: .byte SETUP_SECTS -root_flags: .word 1 -syssize: .long 0 -ram_size: .word 0 -vid_mode: .word 0xfffd -root_dev: .word 0 -boot_flag: .word 0xAA55 -jump: .byte 0xeb -jump_addr: .byte hdr_end-jump_addr -magic: .ascii "HdrS" - .word 0x020f -realmode_swtch: .word 0, 0 -start_sys_seg: .word 0 - .word 0 -type_of_loader: .byte 0 -loadflags: .byte (1 << 0) -setup_move_size: .word 0 -code32_start: .long CODE32_START -ramdisk_image: .long 0 -ramdisk_size: .long 0 -bootsect_kludge: .long 0 -heap_end_ptr: .word 65535 -ext_loader_ver: .byte 0 -ext_loader_type: .byte 0 -cmd_line_ptr: .long 0 -initrd_addr_max: .long 0x7fffffff -kernel_alignment: .long 0x1000000 -relocatable_kernel: .byte 0 -min_alignment: .byte 0x10 -xloadflags: .word 0b01111 # all handover protocols except kexec -cmdline_size: .long 4096-1 -hardware_subarch: .long 0 -hardware_subarch_data: .quad 0 -payload_offset: .long 0xabababab # at 0x248/4, to be filled by the builder -payload_length: .long 0xabababab # at 0x24c/4, to be filled by the builder -setup_data: .quad 0 -pref_address: .quad CODE32_START - SETUP_SECTS_SIZE -init_size: .long 0xabababab # at 0x260/4, to be filled by the builder -# The handover_offset should be efi_handover_setup_entry - CODE32_START - 0x200 -# But we use ABI workaround to avoid the relocation of efi_handover_setup_entry -handover_offset: .long 0x10 -kernel_info_offset: .long 0 -hdr_end: diff --git a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/linker.ld b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/linker.ld index c1f0bf1b3..2a297b6fa 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/linker.ld +++ b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/linker.ld @@ -1,4 +1,4 @@ -ENTRY(efi_handover_setup_entry) +ENTRY(entry_efi_pe64) OUTPUT_ARCH(i386:x86-64) OUTPUT_FORMAT(elf64-x86-64) diff --git a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/mod.rs b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/mod.rs index 2b15a197f..5f2e0d8bf 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/mod.rs +++ b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/mod.rs @@ -6,8 +6,6 @@ mod relocation; use core::arch::{asm, global_asm}; -global_asm!(include_str!("header.S")); - global_asm!(include_str!("setup.S")); pub const ASTER_ENTRY_POINT: u32 = 0x8001200; diff --git a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/relocation.rs b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/relocation.rs index 7fc931f26..9b8acde82 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/relocation.rs +++ b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/relocation.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 -use crate::x86::get_image_loaded_offset; +use crate::x86::image_load_offset; /// Apply the relocations in the `.rela.*` sections. /// @@ -18,7 +18,7 @@ use crate::x86::get_image_loaded_offset; /// Failure to do relocations will cause `dyn Trait` objects to break. pub unsafe fn apply_rela_relocations() { use core::arch::asm; - let image_loaded_offset = get_image_loaded_offset(); + let image_loaded_offset = image_load_offset(); let mut start: usize; let end: usize; diff --git a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/setup.S b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/setup.S index 89db78840..b6ade119a 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/setup.S +++ b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/setup.S @@ -1,43 +1,74 @@ /* SPDX-License-Identifier: MPL-2.0 */ +// The load address of the setup section is CODE32_START (0x100000). +// See the linker script. .section ".setup", "ax" + +.code32 +.global entry_legacy32 +entry_legacy32: + // This is the 32-bit Linux legacy entry point. + + // Not supported. However, there doesn't seem to be a way to disable this + // entry point in the header, so provide a dummy implementation here. + hlt + jmp entry_legacy32 + +.global entry_efi_handover32 +entry_efi_handover32: + // This is the 32-bit EFI handover entry point. + + // Not supported. This entry point is not enabled in the header, so it + // should not be reachable. We declare the entry point anyway, because + // its offset is needed in the header. We provide a dummy implementation + // just in case. + jmp entry_legacy32 + +// The 64-bit Linux legacy entry point must be 0x200 bytes after the 32-bit +// one. This is required by the x86 Linux boot protocol. +.skip 0x200 - (. - entry_legacy32) + .code64 -// start_of_setup32 should be loaded at CODE32_START, which is our base. -.global start_of_setup32 -start_of_setup32: +entry_legacy64: + // This is the 64-bit Linux legacy entry point. -// `efi_handover_setup_entry64` should be at efi_handover_setup_entry32 + 0x200, but -// we could provide the 32 bit dummy entry point as the 64 bit entry point - 0x200 -// since we do not provide 32-bit entry point in the x86_64 specific implementation. -.org 0x210 -.global efi_handover_setup_entry -efi_handover_setup_entry: - // The 3 parameters of is stored in rdi, rsi and rdx (sysv64). - // Do not use them. + // Not supported. We need to enable this entry point in the header, + // otherwise the boot loader will think the kernel does not support + // 64-bit. + jmp halt - // Setup the stack. - lea rsp, [rip + setup_stack_top] - lea rax, [rip + halt] - push rax # the return address - mov rbp, rsp - add rbp, -4 - push rbp - mov rbp, rsp +// The 64-bit EFI handover entry point must be 0x200 bytes after the 32-bit +// one. This is required by the x86 Linux boot protocol. +.skip 0x200 - (. - entry_efi_handover32) -.extern efi_handover_entry - lea rax, [rip + efi_handover_entry] - call rax +entry_efi_handover64: + // This is the 64-bit EFI handover entry point. + // + // Arguments: + // RDI: void *handle + // RSI: efi_system_table_t *table + // RDX: struct boot_params *bp + + // We can reuse the stack provided by the UEFI firmware until a short time + // after exiting the UEFI boot services. So we don't build our own stack. + // + // But the stack must be 16-byte aligned! So we drop the return address. + add rsp, 8 + + // Call the Rust main routine. + call main_efi_handover64 + + // The main routine should not return. If it does, there is nothing we can + // do but stop the machine. + jmp halt + +.global entry_efi_pe64 +entry_efi_pe64: + // This is the 64-bit EFI PE/COFF entry point. + + // Not supported yet. Just stop the machine. + jmp halt - // Unreachable here. halt: hlt jmp halt - -// A small stack for the setup code. -.section .data -.align 0x1000 / 8 -.global setup_stack -setup_stack: - .skip 0x1000 -.global setup_stack_top -setup_stack_top: \ No newline at end of file diff --git a/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/header.S b/ostd/libs/linux-bzimage/setup/src/x86/header.S similarity index 68% rename from ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/header.S rename to ostd/libs/linux-bzimage/setup/src/x86/header.S index 199f9b399..c7d262410 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/header.S +++ b/ostd/libs/linux-bzimage/setup/src/x86/header.S @@ -10,9 +10,13 @@ // are filled by the loader and will be read by Asterinas. .section ".header", "a" + CODE32_START = 0x100000 -SETUP_SECTS = 7 # so that the legacy setup could occupy a page -.code16 + +# Real-mode setup sectors. We don't use them. Their size is set to one page. +SETUP_SECTS = 7 +SETUP_SECTS_SIZE = 0x200 * (SETUP_SECTS + 1) + .org 0x01f1 hdr_start: setup_sects: .byte SETUP_SECTS @@ -23,14 +27,14 @@ vid_mode: .word 0xfffd root_dev: .word 0 boot_flag: .word 0xAA55 jump: .byte 0xeb -jump_addr: .byte hdr_end-jump_addr +jump_addr: .byte hdr_end - jump_addr magic: .ascii "HdrS" .word 0x020f realmode_swtch: .word 0, 0 start_sys_seg: .word 0 .word 0 type_of_loader: .byte 0 -loadflags: .byte (1 << 0) +loadflags: .byte (1 << 0) # LOADED_HIGH setup_move_size: .word 0 code32_start: .long CODE32_START ramdisk_image: .long 0 @@ -44,15 +48,32 @@ initrd_addr_max: .long 0x7fffffff kernel_alignment: .long 0x1000000 relocatable_kernel: .byte 0 min_alignment: .byte 0x10 + +.if {CFG_TARGET_ARCH_X86_64} +# Note that we don't actually support the legacy 64-bit entry point +# (XLF_KERNEL_64). But we have to specify it, otherwise the boot loader +# will think this kernel does not have 64-bit support. +xloadflags: .word 0b01011 # Bit 0: XLF_KERNEL_64 + # Bit 1: XLF_CAN_BE_LOADED_ABOVE_4G + # Bit 3: XLF_EFI_HANDOVER_64 +.else xloadflags: .word 0 -cmdline_size: .long 4096-1 +.endif + +cmdline_size: .long 4096 - 1 hardware_subarch: .long 0 hardware_subarch_data: .quad 0 payload_offset: .long 0xabababab # at 0x248/4, to be filled by the builder payload_length: .long 0xabababab # at 0x24c/4, to be filled by the builder setup_data: .quad 0 -pref_address: .quad CODE32_START - 0x200 * (SETUP_SECTS + 1); +pref_address: .quad CODE32_START init_size: .long 0xabababab # at 0x260/4, to be filled by the builder + +.if {CFG_TARGET_ARCH_X86_64} +handover_offset: .long (entry_efi_handover32 - entry_legacy32) +.else handover_offset: .long 0 +.endif + kernel_info_offset: .long 0 hdr_end: diff --git a/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/mod.rs b/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/mod.rs index 9795c252c..8f276c009 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/mod.rs +++ b/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/mod.rs @@ -4,23 +4,21 @@ use core::arch::{asm, global_asm}; use linux_boot_params::BootParams; -global_asm!(include_str!("header.S")); - global_asm!(include_str!("setup.S")); use crate::console::{print_hex, print_str}; pub const ASTER_ENTRY_POINT: u32 = 0x8001000; -#[export_name = "_bzimage_entry_32"] -extern "cdecl" fn bzimage_entry(boot_params_ptr: u32) -> ! { +#[export_name = "main_legacy32"] +extern "cdecl" fn main_legacy32(boot_params_ptr: u32) -> ! { // SAFETY: this init function is only called once. unsafe { crate::console::init() }; - // println!("[setup] bzImage loaded at {:#x}", x86::relocation::get_image_loaded_offset()); + // println!("[setup] bzImage loaded at {:#x}", x86::relocation::image_load_offset()); unsafe { print_str("[setup] bzImage loaded offset: "); - print_hex(crate::x86::get_image_loaded_offset() as u64); + print_hex(crate::x86::image_load_offset() as u64); print_str("\n"); } diff --git a/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/setup.S b/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/setup.S index 9d75959d5..c0bd10c52 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/setup.S +++ b/ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/setup.S @@ -1,25 +1,31 @@ /* SPDX-License-Identifier: MPL-2.0 */ -// 32-bit setup code starts here, and will be loaded at CODE32_START. +// The load address of the setup section is CODE32_START (0x100000). +// See the linker script. .section ".setup", "ax" + .code32 -.global start_of_setup32 -start_of_setup32: - mov eax, offset __stack_top - mov esp, eax - mov eax, offset halt - push eax # the return address - mov ebp, esp - add ebp, -4 - push ebp - mov ebp, esp +.global entry_legacy32 +entry_legacy32: + // This is the 32-bit Linux legacy entry point. + // + // Arguments: + // RSI: struct boot_params *bp -// The rust entrypoint of the bzImage -.extern _bzimage_entry_32 - push esi # the boot_params pointer - call _bzimage_entry_32 + // Set up the stack. + mov esp, offset __stack_top + + // Call the Rust main routine. + push esi + call main_legacy32 + + // The main routine should not return. If it does, there is nothing we can + // do but stop the machine. + jmp halt + +// All other types of entry points are not enabled in the header. +// So we don't care about them. - // Unreachable here. halt: hlt jmp halt diff --git a/ostd/libs/linux-bzimage/setup/src/x86/mod.rs b/ostd/libs/linux-bzimage/setup/src/x86/mod.rs index 8a04c7c50..6b085a398 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/mod.rs +++ b/ostd/libs/linux-bzimage/setup/src/x86/mod.rs @@ -1,38 +1,34 @@ // SPDX-License-Identifier: MPL-2.0 +use core::arch::global_asm; + cfg_if::cfg_if! { if #[cfg(target_arch = "x86_64")] { mod amd64_efi; + + const CFG_TARGET_ARCH_X86_64: usize = 1; } else if #[cfg(target_arch = "x86")] { mod legacy_i386; + + const CFG_TARGET_ARCH_X86_64: usize = 0; } else { - compile_error!("Unsupported target_arch"); + compile_error!("unsupported target architecture"); } } -// This is enforced in the linker script of the setup. -const START_OF_SETUP32_VA: usize = 0x100000; +global_asm!( + include_str!("header.S"), + CFG_TARGET_ARCH_X86_64 = const CFG_TARGET_ARCH_X86_64, +); -/// The setup is a position-independent executable. We can get the loaded base -/// address from the symbol. -#[inline] -pub fn get_image_loaded_offset() -> isize { - let address_of_start: usize; - #[cfg(target_arch = "x86_64")] - unsafe { - core::arch::asm!( - "lea {}, [rip + start_of_setup32]", - out(reg) address_of_start, - options(pure, nomem, nostack) - ); +/// Returns the difference between the real load address and the one in the linker script. +pub fn image_load_offset() -> isize { + /// The load address of the `entry_legacy32` symbol specified in the linker script. + const CODE32_START: isize = 0x100000; + + extern "C" { + fn entry_legacy32(); } - #[cfg(target_arch = "x86")] - unsafe { - core::arch::asm!( - "lea {}, [start_of_setup32]", - out(reg) address_of_start, - options(pure, nomem, nostack) - ); - } - address_of_start as isize - START_OF_SETUP32_VA as isize + + (entry_legacy32 as usize as isize) - CODE32_START }