Make user program relocation code readable

This commit is contained in:
Zhang Junyang
2025-04-08 00:09:52 +08:00
committed by Jianfeng Jiang
parent 35e0918bce
commit 93015a6090
3 changed files with 155 additions and 96 deletions

View File

@ -118,8 +118,8 @@ fn create_init_task(
}; };
let mut user_ctx = UserContext::default(); let mut user_ctx = UserContext::default();
user_ctx.set_instruction_pointer(elf_load_info.entry_point() as _); user_ctx.set_instruction_pointer(elf_load_info.entry_point as _);
user_ctx.set_stack_pointer(elf_load_info.user_stack_top() as _); user_ctx.set_stack_pointer(elf_load_info.user_stack_top as _);
let thread_name = Some(ThreadName::new_from_executable_path(executable_path)?); let thread_name = Some(ThreadName::new_from_executable_path(executable_path)?);
let thread_builder = PosixThreadBuilder::new(tid, Arc::new(user_ctx), credentials) let thread_builder = PosixThreadBuilder::new(tid, Arc::new(user_ctx), credentials)
.thread_name(thread_name) .thread_name(thread_name)

View File

@ -1,10 +1,10 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
#![expect(dead_code)]
//! This module is used to parse elf file content to get elf_load_info. //! This module is used to parse elf file content to get elf_load_info.
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace //! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use core::ops::Range;
use align_ext::AlignExt; use align_ext::AlignExt;
use aster_rights::Full; use aster_rights::Full;
use ostd::{ use ostd::{
@ -51,7 +51,7 @@ pub fn load_elf_to_vm(
let ldso = lookup_and_parse_ldso(&parsed_elf, file_header, fs_resolver)?; let ldso = lookup_and_parse_ldso(&parsed_elf, file_header, fs_resolver)?;
match init_and_map_vmos(process_vm, ldso, &parsed_elf, &elf_file) { match init_and_map_vmos(process_vm, ldso, &parsed_elf, &elf_file) {
Ok((entry_point, mut aux_vec)) => { Ok((_range, entry_point, mut aux_vec)) => {
// Map and set vdso entry. // Map and set vdso entry.
// Since vdso does not require being mapped to any specific address, // Since vdso does not require being mapped to any specific address,
// vdso is mapped after the elf file, heap and stack are mapped. // vdso is mapped after the elf file, heap and stack are mapped.
@ -67,6 +67,7 @@ pub fn load_elf_to_vm(
Ok(ElfLoadInfo { Ok(ElfLoadInfo {
entry_point, entry_point,
user_stack_top, user_stack_top,
_private: (),
}) })
} }
Err(err) => { Err(err) => {
@ -110,19 +111,28 @@ fn lookup_and_parse_ldso(
} }
fn load_ldso(root_vmar: &Vmar<Full>, ldso_file: &Dentry, ldso_elf: &Elf) -> Result<LdsoLoadInfo> { fn load_ldso(root_vmar: &Vmar<Full>, ldso_file: &Dentry, ldso_elf: &Elf) -> Result<LdsoLoadInfo> {
let map_addr = map_segment_vmos(ldso_elf, root_vmar, ldso_file)?; let range = map_segment_vmos(ldso_elf, root_vmar, ldso_file)?;
Ok(LdsoLoadInfo::new( Ok(LdsoLoadInfo {
ldso_elf.entry_point() + map_addr, entry_point: range
map_addr, .relocated_addr_of(ldso_elf.entry_point())
)) .ok_or(Error::with_message(
Errno::ENOEXEC,
"The entry point is not in the mapped range",
))?,
range,
_private: (),
})
} }
/// Initializes the VM space and maps the VMO to the corresponding virtual memory address.
///
/// Returns the mapped range, the entry point and the auxiliary vector.
fn init_and_map_vmos( fn init_and_map_vmos(
process_vm: &ProcessVm, process_vm: &ProcessVm,
ldso: Option<(Dentry, Elf)>, ldso: Option<(Dentry, Elf)>,
parsed_elf: &Elf, parsed_elf: &Elf,
elf_file: &Dentry, elf_file: &Dentry,
) -> Result<(Vaddr, AuxVec)> { ) -> Result<(RelocatedRange, Vaddr, AuxVec)> {
let process_vmar = process_vm.lock_root_vmar(); let process_vmar = process_vm.lock_root_vmar();
let root_vmar = process_vmar.unwrap(); let root_vmar = process_vmar.unwrap();
@ -133,118 +143,167 @@ fn init_and_map_vmos(
None None
}; };
let elf_map_addr = map_segment_vmos(parsed_elf, root_vmar, elf_file)?; let elf_map_range = map_segment_vmos(parsed_elf, root_vmar, elf_file)?;
let aux_vec = { let aux_vec = {
let ldso_base = ldso_load_info let ldso_base = ldso_load_info
.as_ref() .as_ref()
.map(|load_info| load_info.base_addr()); .map(|load_info| load_info.range.relocated_start);
init_aux_vec(parsed_elf, elf_map_addr, ldso_base)? init_aux_vec(parsed_elf, elf_map_range.relocated_start, ldso_base)?
}; };
let entry_point = if let Some(ldso_load_info) = ldso_load_info { let entry_point = if let Some(ldso_load_info) = ldso_load_info {
// Normal shared object // Normal shared object
ldso_load_info.entry_point() ldso_load_info.entry_point
} else if parsed_elf.is_shared_object() {
// ldso itself
parsed_elf.entry_point() + elf_map_addr
} else { } else {
// statically linked executable elf_map_range
parsed_elf.entry_point() .relocated_addr_of(parsed_elf.entry_point())
.ok_or(Error::with_message(
Errno::ENOEXEC,
"The entry point is not in the mapped range",
))?
}; };
Ok((entry_point, aux_vec)) Ok((elf_map_range, entry_point, aux_vec))
} }
pub struct LdsoLoadInfo { pub struct LdsoLoadInfo {
entry_point: Vaddr, /// Relocated entry point.
base_addr: Vaddr, pub entry_point: Vaddr,
} /// The range covering all the mapped segments.
///
impl LdsoLoadInfo { /// May not be page-aligned.
pub fn new(entry_point: Vaddr, base_addr: Vaddr) -> Self { pub range: RelocatedRange,
Self { _private: (),
entry_point,
base_addr,
}
}
pub fn entry_point(&self) -> Vaddr {
self.entry_point
}
pub fn base_addr(&self) -> Vaddr {
self.base_addr
}
} }
pub struct ElfLoadInfo { pub struct ElfLoadInfo {
entry_point: Vaddr, /// Relocated entry point.
user_stack_top: Vaddr, pub entry_point: Vaddr,
/// Address of the user stack top.
pub user_stack_top: Vaddr,
_private: (),
} }
impl ElfLoadInfo { /// Initializes a [`Vmo`] for each segment and then map to the root [`Vmar`].
pub fn new(entry_point: Vaddr, user_stack_top: Vaddr) -> Self { ///
Self { /// This function will return the mapped range that covers all segments. The
entry_point, /// range will be tight, i.e., will not include any padding bytes. So the
user_stack_top, /// boundaries may not be page-aligned.
} ///
} /// [`Vmo`]: crate::vm::vmo::Vmo
pub fn map_segment_vmos(
elf: &Elf,
root_vmar: &Vmar<Full>,
elf_file: &Dentry,
) -> Result<RelocatedRange> {
let elf_va_range = get_range_for_all_segments(elf)?;
pub fn entry_point(&self) -> Vaddr { let map_range = if elf.is_shared_object() {
self.entry_point // Relocatable object.
}
pub fn user_stack_top(&self) -> Vaddr { // Allocate a continuous range of virtual memory for all segments in advance.
self.user_stack_top //
} // All segments in the ELF program must be mapped to a continuous VM range to
} // ensure the relative offset of each segment not changed.
let elf_va_range_aligned =
elf_va_range.start.align_down(PAGE_SIZE)..elf_va_range.end.align_up(PAGE_SIZE);
let map_size = elf_va_range_aligned.len();
/// Inits VMO for each segment and then map segment to root vmar let vmar_map_options = root_vmar
pub fn map_segment_vmos(elf: &Elf, root_vmar: &Vmar<Full>, elf_file: &Dentry) -> Result<Vaddr> { .new_map(map_size, VmPerms::empty())?
// all segments of the shared object must be mapped to a continuous vm range .handle_page_faults_around();
// to ensure the relative offset of each segment not changed. let aligned_range = vmar_map_options.build().map(|addr| addr..addr + map_size)?;
let base_addr = if elf.is_shared_object() {
base_map_addr(elf, root_vmar)? let start_in_page_offset = elf_va_range.start - elf_va_range_aligned.start;
let end_in_page_offset = elf_va_range_aligned.end - elf_va_range.end;
aligned_range.start + start_in_page_offset..aligned_range.end - end_in_page_offset
} else { } else {
0 // Not relocatable object. Map as-is.
elf_va_range.clone()
}; };
let relocated_range =
RelocatedRange::new(elf_va_range, map_range.start).expect("Mapped range overflows");
for program_header in &elf.program_headers { for program_header in &elf.program_headers {
let type_ = program_header let type_ = program_header.get_type().map_err(|_| {
.get_type() Error::with_message(Errno::ENOEXEC, "Failed to parse the program header")
.map_err(|_| Error::with_message(Errno::ENOEXEC, "parse program header type fails"))?; })?;
if type_ == program::Type::Load { if type_ == program::Type::Load {
check_segment_align(program_header)?; check_segment_align(program_header)?;
map_segment_vmo(program_header, elf_file, root_vmar, base_addr)?;
let map_at = relocated_range
.relocated_addr_of(program_header.virtual_addr as Vaddr)
.expect("Address not covered by `get_range_for_all_segments`");
map_segment_vmo(program_header, elf_file, root_vmar, map_at)?;
} }
} }
Ok(base_addr)
Ok(relocated_range)
} }
fn base_map_addr(elf: &Elf, root_vmar: &Vmar<Full>) -> Result<Vaddr> { /// A virtual range and its relocated address.
let elf_size = elf pub struct RelocatedRange {
.program_headers original_range: Range<Vaddr>,
.iter() relocated_start: Vaddr,
.filter_map(|program_header| { }
if let Ok(type_) = program_header.get_type()
&& type_ == program::Type::Load impl RelocatedRange {
{ /// Creates a new `RelocatedRange`.
let ph_max_addr = program_header.virtual_addr + program_header.mem_size; ///
Some(ph_max_addr as usize) /// If the relocated address overflows, it will return `None`.
} else { pub fn new(original_range: Range<Vaddr>, relocated_start: Vaddr) -> Option<Self> {
None relocated_start.checked_add(original_range.len())?;
} Some(Self {
original_range,
relocated_start,
}) })
}
/// Gets the relocated address of an address in the original range.
///
/// If the provided address is not in the original range, it will return `None`.
pub fn relocated_addr_of(&self, addr: Vaddr) -> Option<Vaddr> {
if self.original_range.contains(&addr) {
Some(addr - self.original_range.start + self.relocated_start)
} else {
None
}
}
}
/// Returns the range that covers all segments in the ELF file.
///
/// The range must be tight, i.e., will not include any padding bytes. So the
/// boundaries may not be page-aligned.
fn get_range_for_all_segments(elf: &Elf) -> Result<Range<Vaddr>> {
let loadable_ranges_iter = elf.program_headers.iter().filter_map(|ph| {
if let Ok(program::Type::Load) = ph.get_type() {
Some((ph.virtual_addr as Vaddr)..((ph.virtual_addr + ph.mem_size) as Vaddr))
} else {
None
}
});
let min_addr =
loadable_ranges_iter
.clone()
.map(|r| r.start)
.min()
.ok_or(Error::with_message(
Errno::ENOEXEC,
"Executable file does not has loadable sections",
))?;
let max_addr = loadable_ranges_iter
.map(|r| r.end)
.max() .max()
.ok_or(Error::with_message( .expect("The range set contains minimum but no maximum");
Errno::ENOEXEC,
"executable file does not has loadable sections", Ok(min_addr..max_addr)
))?;
let map_size = elf_size.align_up(PAGE_SIZE);
let vmar_map_options = root_vmar
.new_map(map_size, VmPerms::empty())?
.handle_page_faults_around();
vmar_map_options.build()
} }
/// Creates and map the corresponding segment VMO to `root_vmar`. /// Creates and map the corresponding segment VMO to `root_vmar`.
@ -253,7 +312,7 @@ fn map_segment_vmo(
program_header: &ProgramHeader64, program_header: &ProgramHeader64,
elf_file: &Dentry, elf_file: &Dentry,
root_vmar: &Vmar<Full>, root_vmar: &Vmar<Full>,
base_addr: Vaddr, map_at: Vaddr,
) -> Result<()> { ) -> Result<()> {
trace!( trace!(
"mem range = 0x{:x} - 0x{:x}, mem_size = 0x{:x}", "mem range = 0x{:x} - 0x{:x}, mem_size = 0x{:x}",
@ -297,7 +356,7 @@ fn map_segment_vmo(
}; };
let perms = parse_segment_perm(program_header.flags); let perms = parse_segment_perm(program_header.flags);
let offset = base_addr + (program_header.virtual_addr as Vaddr).align_down(PAGE_SIZE); let offset = map_at.align_down(PAGE_SIZE);
if segment_size != 0 { if segment_size != 0 {
let mut vm_map_options = root_vmar let mut vm_map_options = root_vmar
.new_map(segment_size, perms)? .new_map(segment_size, perms)?

View File

@ -158,11 +158,11 @@ fn do_execve(
// to the user-registered signal handlers. // to the user-registered signal handlers.
user_context.fpu_state().restore(); user_context.fpu_state().restore();
// set new entry point // set new entry point
user_context.set_instruction_pointer(elf_load_info.entry_point() as _); user_context.set_instruction_pointer(elf_load_info.entry_point as _);
debug!("entry_point: 0x{:x}", elf_load_info.entry_point()); debug!("entry_point: 0x{:x}", elf_load_info.entry_point);
// set new user stack top // set new user stack top
user_context.set_stack_pointer(elf_load_info.user_stack_top() as _); user_context.set_stack_pointer(elf_load_info.user_stack_top as _);
debug!("user stack top: 0x{:x}", elf_load_info.user_stack_top()); debug!("user stack top: 0x{:x}", elf_load_info.user_stack_top);
Ok(()) Ok(())
} }