refactor the code to load elf

This commit is contained in:
Jianfeng Jiang 2022-10-27 15:18:24 +08:00
parent 7ee9c9e74c
commit db880f274e
13 changed files with 126 additions and 106 deletions

View File

@ -15,4 +15,4 @@ pub const PAGE_SIZE_BITS: usize = 0xc;
pub const KVA_START: usize = (usize::MAX) << PAGE_SIZE_BITS;
pub const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Info;
pub const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Trace;

View File

@ -1,36 +1,10 @@
pub mod aux_vec;
pub mod elf;
pub mod init_stack;
pub mod mmap_area;
pub mod user_heap;
pub mod vm_page;
use crate::prelude::*;
use kxos_frame::vm::{Pod, VmIo, VmSpace};
use kxos_frame::vm::{Pod, VmIo};
pub mod vm_page;
use crate::process::Process;
use self::elf::{ElfError, ElfLoadInfo};
/// load elf to a given vm_space. this function will
/// 1. read the vaddr of each segment to get all elf pages.
/// 2. allocate physical frames and copy elf data to these frames
/// 3. map frames to the correct vaddr
/// 4. (allocate frams and) map the user stack
pub fn load_elf_to_vm_space<'a>(
filename: CString,
elf_file_content: &'a [u8],
vm_space: &VmSpace,
) -> Result<ElfLoadInfo<'a>, ElfError> {
let mut elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content, filename)?;
elf_load_info.copy_data(vm_space)?;
elf_load_info.debug_check_map_result(vm_space);
debug!("map elf success");
elf_load_info.init_stack(vm_space);
elf_load_info.write_elf_first_page(vm_space, elf_file_content);
Ok(elf_load_info)
}
/// copy bytes from user space of current process. The bytes len is the len of dest.
pub fn read_bytes_from_user(src: Vaddr, dest: &mut [u8]) {
let current = Process::current();

View File

@ -1,9 +1,10 @@
//! A Page in virtual address space
use crate::prelude::*;
use core::ops::Range;
use kxos_frame::vm::{VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace};
use super::elf::ElfError;
use kxos_frame::{
vm::{VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace},
Error,
};
/// A set of **CONTINUOUS** virtual pages in VmSpace
pub struct VmPageRange {
@ -135,7 +136,7 @@ impl VmPage {
}
}
const fn start_address(&self) -> Vaddr {
pub const fn start_address(&self) -> Vaddr {
self.vpn * PAGE_SIZE
}
@ -148,7 +149,7 @@ impl VmPage {
vm_space.is_mapped(self.start_address())
}
pub fn map_page(&self, vm_space: &VmSpace, vm_perm: VmPerm) -> Result<(), ElfError> {
pub fn map_page(&self, vm_space: &VmSpace, vm_perm: VmPerm) -> Result<(), Error> {
let vm_alloc_option = VmAllocOptions::new(1);
let vm_frame = VmFrameVec::allocate(&vm_alloc_option)?;

View File

@ -1,10 +1,13 @@
//! This module is used to parse elf file content to get elf_load_info.
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
use crate::prelude::*;
use crate::{
memory::vm_page::{VmPage, VmPageRange},
prelude::*,
};
use core::{cmp::Ordering, ops::Range};
use kxos_frame::{
vm::{VmIo, VmPerm, VmSpace},
vm::{VmAllocOptions, VmFrameVec, VmIo, VmPerm, VmSpace},
Error,
};
use xmas_elf::{
@ -13,10 +16,9 @@ use xmas_elf::{
ElfFile,
};
use super::{init_stack::InitStack, vm_page::VmPageRange};
use super::init_stack::InitStack;
pub struct ElfLoadInfo<'a> {
entry_point: Vaddr,
segments: Vec<ElfSegment<'a>>,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
@ -30,8 +32,11 @@ pub struct ElfSegment<'a> {
}
#[derive(Debug, Clone, Copy, Default)]
/// Info parsed from elf header. Used to set aux vector.
/// Info parsed from elf header. The entry point is used to set rip
/// The other info is used to set auxv vectors.
pub struct ElfHeaderInfo {
/// the entry point of the elf
pub entry_point: Vaddr,
/// page header table offset
pub ph_off: u64,
/// number of program headers
@ -91,28 +96,57 @@ impl<'a> ElfSegment<'a> {
self.range.end
}
fn copy_segment(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
let vm_page_range = VmPageRange::new_range(self.start_address()..self.end_address());
for page in vm_page_range.iter() {
// map page if the page is not mapped
if !page.is_mapped(vm_space) {
let vm_perm = self.vm_perm | VmPerm::W;
page.map_page(vm_space, vm_perm)?;
}
}
// copy segment
vm_space.write_bytes(self.start_address(), self.data)?;
// The length of segment may be greater than the length of data
// In this case, the additional bytes should be zeroed.
fn copy_and_map_segment(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
let start_address = self.start_address();
let page_mask = PAGE_SIZE - 1;
let segment_len = self.end_address() - self.start_address();
let data_len = self.data.len();
if segment_len > data_len {
let zeroed_bytes = vec![0u8; segment_len - data_len];
let write_addr = self.start_address() + data_len;
let zeroed_bytes = if segment_len > data_len {
vec![0u8; segment_len - data_len]
} else {
Vec::new()
};
// according to linux abi, the first page may be on same page with another segment.
// So at first, we will check whether the first page is mapped.
if vm_space.is_mapped(start_address) {
// The first page is mapped. This is the rare case.
let write_len_on_first_page =
(PAGE_SIZE - (start_address & page_mask)).min(self.data.len());
vm_space
.write_bytes(write_addr, &zeroed_bytes)
.expect("Write zeroed bytes failed");
.write_bytes(start_address, &self.data[..write_len_on_first_page])
.expect("Write first page failed");
let start_page = VmPage::containing_address(start_address).next_page();
let end_page = VmPage::containing_address(self.end_address());
if end_page >= start_page {
let vm_page_range = VmPageRange::new_page_range(start_page, end_page);
let page_num = vm_page_range.len();
let vm_alloc_options = VmAllocOptions::new(page_num);
let frames = VmFrameVec::allocate(&vm_alloc_options)?;
frames.write_bytes(0, &self.data[write_len_on_first_page..])?;
if zeroed_bytes.len() > 0 {
frames.write_bytes(data_len - write_len_on_first_page, &zeroed_bytes)?;
}
vm_page_range.map_to(vm_space, frames, self.vm_perm);
} else {
if zeroed_bytes.len() > 0 {
vm_space.write_bytes(start_address + data_len, &zeroed_bytes)?;
}
}
} else {
// The first page is not mapped. This is the common case.
let vm_page_range = VmPageRange::new_range(start_address..self.end_address());
let page_num = vm_page_range.len();
let vm_alloc_options = VmAllocOptions::new(page_num);
let frames = VmFrameVec::allocate(&vm_alloc_options)?;
let offset = start_address & page_mask;
// copy segment
frames.write_bytes(offset, &self.data)?;
// write zero bytes
if zeroed_bytes.len() > 0 {
let write_addr = offset + data_len;
frames.write_bytes(write_addr, &zeroed_bytes)?;
}
vm_page_range.map_to(vm_space, frames, self.vm_perm);
}
Ok(())
}
@ -124,13 +158,11 @@ impl<'a> ElfSegment<'a> {
impl<'a> ElfLoadInfo<'a> {
fn with_capacity(
entry_point: Vaddr,
capacity: usize,
init_stack: InitStack,
elf_header_info: ElfHeaderInfo,
) -> Self {
Self {
entry_point,
segments: Vec::with_capacity(capacity),
init_stack,
elf_header_info,
@ -147,14 +179,13 @@ impl<'a> ElfLoadInfo<'a> {
Ok(elf_file) => elf_file,
};
check_elf_header(&elf_file)?;
// init elf load info
let entry_point = elf_file.header.pt2.entry_point() as Vaddr;
// parse elf header
let elf_header_info = ElfHeaderInfo::parse_elf_header(&elf_file);
// FIXME: only contains load segment?
let segments_count = elf_file.program_iter().count();
let init_stack = InitStack::new_default_config(filename);
let mut elf_load_info =
ElfLoadInfo::with_capacity(entry_point, segments_count, init_stack, elf_header_info);
ElfLoadInfo::with_capacity(segments_count, init_stack, elf_header_info);
// parse each segemnt
for segment in elf_file.program_iter() {
@ -186,9 +217,10 @@ impl<'a> ElfLoadInfo<'a> {
Ok(VmPageRange::new_range(elf_start_address..elf_end_address))
}
pub fn copy_data(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
/// copy and map all segment
pub fn copy_and_map_segments(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
for segment in self.segments.iter() {
segment.copy_segment(vm_space)?;
segment.copy_and_map_segment(vm_space)?;
}
Ok(())
}
@ -199,10 +231,10 @@ impl<'a> ElfLoadInfo<'a> {
.expect("Init User Stack failed");
}
/// This function will write the first page of elf file to the initial stack top.
/// This function will write the program header table to the initial stack top.
/// This function must be called after init process initial stack.
/// This infomation is used to set Auxv vectors.
pub fn write_elf_first_page(&self, vm_space: &VmSpace, file_content: &[u8]) {
pub fn write_program_header_table(&self, vm_space: &VmSpace, file_content: &[u8]) {
let write_len = PAGE_SIZE.min(file_content.len());
let write_content = &file_content[..write_len];
let write_addr = self.init_stack.init_stack_top() - PAGE_SIZE;
@ -211,14 +243,8 @@ impl<'a> ElfLoadInfo<'a> {
.expect("Write elf content failed");
}
/// return the perm of elf pages
/// FIXME: Set the correct permission bit of user pages.
fn perm() -> VmPerm {
VmPerm::RXU
}
pub fn entry_point(&self) -> u64 {
self.entry_point as u64
self.elf_header_info.entry_point as u64
}
pub fn user_stack_top(&self) -> u64 {
@ -251,16 +277,6 @@ impl<'a> ElfLoadInfo<'a> {
.read_bytes(start_address, &mut read_buffer)
.expect("read bytes failed");
let res = segment.data.cmp(&read_buffer);
// if res != Ordering::Equal {
// debug!("segment: 0x{:x} - 0x{:x}", segment.start_address(), segment.end_address());
// debug!("read buffer len: 0x{:x}", read_buffer.len());
// for i in 0..segment.data.len() {
// if segment.data[i] != read_buffer[i] {
// debug!("i = 0x{:x}", i);
// break;
// }
// }
// }
assert_eq!(res, Ordering::Equal);
}
@ -269,10 +285,12 @@ impl<'a> ElfLoadInfo<'a> {
impl ElfHeaderInfo {
fn parse_elf_header(elf_file: &ElfFile) -> Self {
let entry_point = elf_file.header.pt2.entry_point() as Vaddr;
let ph_off = elf_file.header.pt2.ph_offset();
let ph_num = elf_file.header.pt2.ph_count();
let ph_ent = core::mem::size_of::<ProgramHeader64>();
ElfHeaderInfo {
entry_point,
ph_off,
ph_num,
ph_ent,

View File

@ -2,7 +2,7 @@
//! The process initial stack, contains arguments, environmental variables and auxiliary vectors
//! The data layout of init stack can be seen in Figure 3.9 in https://uclibc.org/docs/psABI-x86_64.pdf
use crate::prelude::*;
use crate::{memory::vm_page::VmPageRange, prelude::*};
use core::mem;
use kxos_frame::{
vm::{VmIo, VmPerm, VmSpace},
@ -13,7 +13,6 @@ use super::elf::ElfHeaderInfo;
use super::{
aux_vec::{AuxKey, AuxVec},
elf::ElfError,
vm_page::VmPageRange,
};
pub const INIT_STACK_BASE: Vaddr = 0x0000_0000_2000_0000;

View File

@ -0,0 +1,28 @@
pub mod aux_vec;
pub mod elf;
pub mod init_stack;
use kxos_frame::vm::VmSpace;
use self::elf::{ElfError, ElfLoadInfo};
use crate::prelude::*;
/// load elf to a given vm_space. this function will
/// 1. read the vaddr of each segment to get all elf pages.
/// 2. allocate physical frames and copy elf data to these frames
/// 3. map frames to the correct vaddr
/// 4. (allocate frams and) map the user stack
pub fn load_elf_to_vm_space<'a>(
filename: CString,
elf_file_content: &'a [u8],
vm_space: &VmSpace,
) -> Result<ElfLoadInfo<'a>, ElfError> {
let mut elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content, filename)?;
elf_load_info.copy_and_map_segments(vm_space)?;
elf_load_info.debug_check_map_result(vm_space);
elf_load_info.init_stack(vm_space);
elf_load_info.write_program_header_table(vm_space, elf_file_content);
debug!("load elf succeeds.");
Ok(elf_load_info)
}

View File

@ -4,20 +4,20 @@ use crate::prelude::*;
use kxos_frame::sync::WaitQueue;
use kxos_frame::{task::Task, user::UserSpace, vm::VmSpace};
use crate::memory::mmap_area::MmapArea;
use crate::memory::user_heap::UserHeap;
use self::process_filter::ProcessFilter;
use self::process_vm::mmap_area::MmapArea;
use self::process_vm::user_heap::UserHeap;
use self::process_vm::UserVm;
use self::status::ProcessStatus;
use self::task::create_user_task_from_elf;
use self::user_vm_data::UserVm;
pub mod elf;
pub mod fifo_scheduler;
pub mod process_filter;
pub mod process_vm;
pub mod status;
pub mod table;
pub mod task;
pub mod user_vm_data;
pub mod wait;
static PID_ALLOCATOR: AtomicUsize = AtomicUsize::new(0);

View File

@ -1,9 +1,8 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::prelude::*;
use crate::{memory::vm_page::VmPageRange, prelude::*, process::elf::init_stack::INIT_STACK_BASE};
use kxos_frame::vm::{VmPerm, VmSpace};
use super::{init_stack::INIT_STACK_BASE, vm_page::VmPageRange};
use crate::syscall::mmap::MMapFlags;
#[derive(Debug)]

View File

@ -4,7 +4,11 @@
//! So we define a UserVm struct to store such infomation.
//! Briefly, it contains the exact usage of each segment of virtual spaces.
use crate::memory::{mmap_area::MmapArea, user_heap::UserHeap};
pub mod mmap_area;
pub mod user_heap;
use mmap_area::MmapArea;
use user_heap::UserHeap;
/*
* The user vm space layout is look like below.

View File

@ -1,10 +1,11 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::prelude::*;
use crate::{
memory::vm_page::{VmPage, VmPageRange},
prelude::*,
};
use kxos_frame::vm::{VmPerm, VmSpace};
use super::vm_page::{VmPage, VmPageRange};
pub const USER_HEAP_BASE: Vaddr = 0x0000_0000_1000_0000;
#[derive(Debug)]

View File

@ -9,9 +9,9 @@ use kxos_frame::{
use crate::prelude::*;
use crate::{memory::load_elf_to_vm_space, syscall::syscall_handler};
use crate::syscall::syscall_handler;
use super::Process;
use super::{elf::load_elf_to_vm_space, Process};
static COUNTER: AtomicUsize = AtomicUsize::new(0);

View File

@ -2,12 +2,8 @@ use kxos_frame::cpu::CpuContext;
use super::constants::*;
use super::SyscallResult;
use crate::{
memory::{load_elf_to_vm_space, read_bytes_from_user},
prelude::*,
process::Process,
syscall::SYS_EXECVE,
};
use crate::process::elf::load_elf_to_vm_space;
use crate::{memory::read_bytes_from_user, prelude::*, process::Process, syscall::SYS_EXECVE};
pub fn sys_execve(
filename_ptr: Vaddr,