Clear vmar later to allow execve return error

This commit is contained in:
Jianfeng Jiang
2023-09-01 10:25:37 +08:00
committed by Tate, Hongliang Tian
parent 4fef4d9b79
commit dbc1e79e56
9 changed files with 149 additions and 102 deletions

View File

@ -22,7 +22,10 @@ use crate::{
vm::vmar::Vmar, vm::vmar::Vmar,
}; };
use super::{posix_thread::PosixThread, signal::sig_disposition::SigDispositions, Process}; use super::{
posix_thread::PosixThread, process_vm::ProcessVm, signal::sig_disposition::SigDispositions,
Process,
};
bitflags! { bitflags! {
pub struct CloneFlags: u32 { pub struct CloneFlags: u32 {
@ -200,9 +203,15 @@ fn clone_child_process(parent_context: UserContext, clone_args: CloneArgs) -> Re
let clone_flags = clone_args.clone_flags; let clone_flags = clone_args.clone_flags;
// clone vm // clone vm
let parent_root_vmar = current.root_vmar(); let child_root_vmar = {
let child_root_vmar = clone_vm(parent_root_vmar, clone_flags)?; let parent_root_vmar = current.root_vmar();
let child_user_vm = current.user_vm().clone(); clone_vm(parent_root_vmar, clone_flags)?
};
let child_process_vm = {
let child_user_heap = current.user_heap().clone();
ProcessVm::new(child_user_heap, child_root_vmar.dup()?)
};
// clone user space // clone user space
let child_cpu_context = clone_cpu_context( let child_cpu_context = clone_cpu_context(
@ -249,8 +258,7 @@ fn clone_child_process(parent_context: UserContext, clone_args: CloneArgs) -> Re
parent, parent,
vec![child_thread], vec![child_thread],
child_elf_path, child_elf_path,
child_user_vm, child_process_vm,
child_root_vmar.clone(),
Weak::new(), Weak::new(),
child_file_table, child_file_table,
child_fs, child_fs,
@ -316,14 +324,11 @@ fn clone_parent_settid(
/// clone child vmar. If CLONE_VM is set, both threads share the same root vmar. /// clone child vmar. If CLONE_VM is set, both threads share the same root vmar.
/// Otherwise, fork a new copy-on-write vmar. /// Otherwise, fork a new copy-on-write vmar.
fn clone_vm( fn clone_vm(parent_root_vmar: &Vmar<Full>, clone_flags: CloneFlags) -> Result<Vmar<Full>> {
parent_root_vmar: &Arc<Vmar<Full>>,
clone_flags: CloneFlags,
) -> Result<Arc<Vmar<Full>>> {
if clone_flags.contains(CloneFlags::CLONE_VM) { if clone_flags.contains(CloneFlags::CLONE_VM) {
Ok(parent_root_vmar.clone()) Ok(parent_root_vmar.dup()?)
} else { } else {
Ok(Arc::new(parent_root_vmar.fork_vmar()?)) Ok(parent_root_vmar.fork_vmar()?)
} }
} }

View File

@ -3,7 +3,7 @@ use core::sync::atomic::{AtomicI32, Ordering};
use self::posix_thread::posix_thread_ext::PosixThreadExt; use self::posix_thread::posix_thread_ext::PosixThreadExt;
use self::process_group::ProcessGroup; use self::process_group::ProcessGroup;
use self::process_vm::user_heap::UserHeap; use self::process_vm::user_heap::UserHeap;
use self::process_vm::UserVm; use self::process_vm::ProcessVm;
use self::rlimit::ResourceLimits; use self::rlimit::ResourceLimits;
use self::signal::constants::SIGCHLD; use self::signal::constants::SIGCHLD;
use self::signal::sig_disposition::SigDispositions; use self::signal::sig_disposition::SigDispositions;
@ -45,8 +45,7 @@ pub struct Process {
// Immutable Part // Immutable Part
pid: Pid, pid: Pid,
user_vm: UserVm, process_vm: ProcessVm,
root_vmar: Arc<Vmar<Full>>,
/// wait for child status changed /// wait for child status changed
waiting_children: WaitQueue, waiting_children: WaitQueue,
@ -98,8 +97,7 @@ impl Process {
parent: Weak<Process>, parent: Weak<Process>,
threads: Vec<Arc<Thread>>, threads: Vec<Arc<Thread>>,
executable_path: String, executable_path: String,
user_vm: UserVm, process_vm: ProcessVm,
root_vmar: Arc<Vmar<Full>>,
process_group: Weak<ProcessGroup>, process_group: Weak<ProcessGroup>,
file_table: Arc<Mutex<FileTable>>, file_table: Arc<Mutex<FileTable>>,
fs: Arc<RwLock<FsResolver>>, fs: Arc<RwLock<FsResolver>>,
@ -113,8 +111,7 @@ impl Process {
pid, pid,
threads: Mutex::new(threads), threads: Mutex::new(threads),
executable_path: RwLock::new(executable_path), executable_path: RwLock::new(executable_path),
user_vm, process_vm,
root_vmar,
waiting_children, waiting_children,
exit_code: AtomicI32::new(0), exit_code: AtomicI32::new(0),
status: Mutex::new(ProcessStatus::Runnable), status: Mutex::new(ProcessStatus::Runnable),
@ -157,13 +154,12 @@ impl Process {
argv: Vec<CString>, argv: Vec<CString>,
envp: Vec<CString>, envp: Vec<CString>,
) -> Result<Arc<Self>> { ) -> Result<Arc<Self>> {
let root_vmar = Vmar::<Full>::new_root()?;
let fs = FsResolver::new(); let fs = FsResolver::new();
let umask = FileCreationMask::default(); let umask = FileCreationMask::default();
let pid = allocate_tid(); let pid = allocate_tid();
let parent = Weak::new(); let parent = Weak::new();
let process_group = Weak::new(); let process_group = Weak::new();
let user_vm = UserVm::new(&root_vmar)?; let process_vm = ProcessVm::alloc()?;
let file_table = FileTable::new_with_stdio(); let file_table = FileTable::new_with_stdio();
let sig_dispositions = SigDispositions::new(); let sig_dispositions = SigDispositions::new();
let user_process = Arc::new(Process::new( let user_process = Arc::new(Process::new(
@ -171,8 +167,7 @@ impl Process {
parent, parent,
vec![], vec![],
executable_path.to_string(), executable_path.to_string(),
user_vm, process_vm,
Arc::new(root_vmar),
process_group, process_group,
Arc::new(Mutex::new(file_table)), Arc::new(Mutex::new(file_table)),
Arc::new(RwLock::new(fs)), Arc::new(RwLock::new(fs)),
@ -182,7 +177,7 @@ impl Process {
let thread = Thread::new_posix_thread_from_executable( let thread = Thread::new_posix_thread_from_executable(
pid, pid,
&user_process.root_vmar(), &user_process.process_vm,
&user_process.fs().read(), &user_process.fs().read(),
executable_path, executable_path,
Arc::downgrade(&user_process), Arc::downgrade(&user_process),
@ -315,18 +310,18 @@ impl Process {
} }
/// returns the user_vm /// returns the user_vm
pub fn user_vm(&self) -> &UserVm { pub fn process_vm(&self) -> &ProcessVm {
&self.user_vm &self.process_vm
} }
/// returns the root vmar /// returns the root vmar
pub fn root_vmar(&self) -> &Arc<Vmar<Full>> { pub fn root_vmar(&self) -> &Vmar<Full> {
&self.root_vmar &self.process_vm.root_vmar()
} }
/// returns the user heap if the process does have, otherwise None /// returns the user heap if the process does have, otherwise None
pub fn user_heap(&self) -> &UserHeap { pub fn user_heap(&self) -> &UserHeap {
self.user_vm.user_heap() self.process_vm.user_heap()
} }
/// free zombie child with pid, returns the exit code of child process. /// free zombie child with pid, returns the exit code of child process.

View File

@ -1,12 +1,10 @@
use jinux_frame::{cpu::UserContext, user::UserSpace}; use jinux_frame::{cpu::UserContext, user::UserSpace};
use jinux_rights::Full;
use crate::{ use crate::{
fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD}, fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD},
prelude::*, prelude::*,
process::{program_loader::load_program_to_root_vmar, Process}, process::{process_vm::ProcessVm, program_loader::load_program_to_vm, Process},
thread::{Thread, Tid}, thread::{Thread, Tid},
vm::vmar::Vmar,
}; };
use super::{builder::PosixThreadBuilder, name::ThreadName, PosixThread}; use super::{builder::PosixThreadBuilder, name::ThreadName, PosixThread};
@ -14,7 +12,7 @@ pub trait PosixThreadExt {
fn as_posix_thread(&self) -> Option<&PosixThread>; fn as_posix_thread(&self) -> Option<&PosixThread>;
fn new_posix_thread_from_executable( fn new_posix_thread_from_executable(
tid: Tid, tid: Tid,
root_vmar: &Vmar<Full>, process_vm: &ProcessVm,
fs_resolver: &FsResolver, fs_resolver: &FsResolver,
executable_path: &str, executable_path: &str,
process: Weak<Process>, process: Weak<Process>,
@ -27,7 +25,7 @@ impl PosixThreadExt for Thread {
/// This function should only be called when launch shell() /// This function should only be called when launch shell()
fn new_posix_thread_from_executable( fn new_posix_thread_from_executable(
tid: Tid, tid: Tid,
root_vmar: &Vmar<Full>, process_vm: &ProcessVm,
fs_resolver: &FsResolver, fs_resolver: &FsResolver,
executable_path: &str, executable_path: &str,
process: Weak<Process>, process: Weak<Process>,
@ -39,9 +37,9 @@ impl PosixThreadExt for Thread {
fs_resolver.lookup(&fs_path)? fs_resolver.lookup(&fs_path)?
}; };
let (_, elf_load_info) = let (_, elf_load_info) =
load_program_to_root_vmar(root_vmar, elf_file, argv, envp, fs_resolver, 1)?; load_program_to_vm(process_vm, elf_file, argv, envp, fs_resolver, 1)?;
let vm_space = root_vmar.vm_space().clone(); let vm_space = process_vm.root_vmar().vm_space().clone();
let mut cpu_ctx = UserContext::default(); let mut cpu_ctx = UserContext::default();
cpu_ctx.set_rip(elf_load_info.entry_point() as _); cpu_ctx.set_rip(elf_load_info.entry_point() as _);
cpu_ctx.set_rsp(elf_load_info.user_stack_top() as _); cpu_ctx.set_rsp(elf_load_info.user_stack_top() as _);

View File

@ -41,24 +41,40 @@ use crate::vm::vmar::Vmar;
/// The virtual space usage. /// The virtual space usage.
/// This struct is used to control brk and mmap now. /// This struct is used to control brk and mmap now.
#[derive(Debug, Clone)] pub struct ProcessVm {
pub struct UserVm {
user_heap: UserHeap, user_heap: UserHeap,
root_vmar: Vmar<Full>,
} }
impl UserVm { impl ProcessVm {
pub fn new(root_vmar: &Vmar<Full>) -> Result<Self> { pub fn alloc() -> Result<Self> {
let root_vmar = Vmar::<Full>::new_root()?;
let user_heap = UserHeap::new(); let user_heap = UserHeap::new();
user_heap.init(root_vmar).unwrap(); user_heap.init(&root_vmar);
Ok(UserVm { user_heap }) Ok(ProcessVm {
user_heap,
root_vmar,
})
}
pub fn new(user_heap: UserHeap, root_vmar: Vmar<Full>) -> Self {
Self {
user_heap,
root_vmar,
}
} }
pub fn user_heap(&self) -> &UserHeap { pub fn user_heap(&self) -> &UserHeap {
&self.user_heap &self.user_heap
} }
pub fn root_vmar(&self) -> &Vmar<Full> {
&self.root_vmar
}
/// Set user vm to the init status /// Set user vm to the init status
pub fn set_default(&self) -> Result<()> { pub fn clear(&self) {
self.user_heap.set_default() self.root_vmar.clear().unwrap();
self.user_heap.set_default(&self.root_vmar);
} }
} }

View File

@ -30,7 +30,7 @@ impl UserHeap {
} }
} }
pub fn init(&self, root_vmar: &Vmar<Full>) -> Result<Vaddr> { pub fn init(&self, root_vmar: &Vmar<Full>) -> Vaddr {
let perms = VmPerms::READ | VmPerms::WRITE; let perms = VmPerms::READ | VmPerms::WRITE;
let vmo_options = VmoOptions::<Rights>::new(0).flags(VmoFlags::RESIZABLE); let vmo_options = VmoOptions::<Rights>::new(0).flags(VmoFlags::RESIZABLE);
let heap_vmo = vmo_options.alloc().unwrap(); let heap_vmo = vmo_options.alloc().unwrap();
@ -40,7 +40,7 @@ impl UserHeap {
.offset(self.heap_base) .offset(self.heap_base)
.size(self.heap_size_limit); .size(self.heap_size_limit);
vmar_map_options.build().unwrap(); vmar_map_options.build().unwrap();
return Ok(self.current_heap_end.load(Ordering::Relaxed)); return self.current_heap_end.load(Ordering::Relaxed);
} }
pub fn brk(&self, new_heap_end: Option<Vaddr>) -> Result<Vaddr> { pub fn brk(&self, new_heap_end: Option<Vaddr>) -> Result<Vaddr> {
@ -71,12 +71,10 @@ impl UserHeap {
/// Set heap to the default status. i.e., point the heap end to heap base. /// Set heap to the default status. i.e., point the heap end to heap base.
/// This function will we called in execve. /// This function will we called in execve.
pub fn set_default(&self) -> Result<()> { pub fn set_default(&self, root_vmar: &Vmar<Full>) {
self.current_heap_end self.current_heap_end
.store(self.heap_base, Ordering::Relaxed); .store(self.heap_base, Ordering::Relaxed);
let current = current!(); self.init(root_vmar);
self.init(current.root_vmar())?;
Ok(())
} }
} }

View File

@ -3,7 +3,9 @@
use crate::fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD}; use crate::fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD};
use crate::fs::utils::Dentry; use crate::fs::utils::Dentry;
use crate::process::process_vm::ProcessVm;
use crate::process::program_loader::elf::init_stack::{init_aux_vec, InitStack}; use crate::process::program_loader::elf::init_stack::{init_aux_vec, InitStack};
use crate::process::TermStatus;
use crate::vm::perms::VmPerms; use crate::vm::perms::VmPerms;
use crate::vm::vmo::{VmoOptions, VmoRightsOp}; use crate::vm::vmo::{VmoOptions, VmoRightsOp};
use crate::{ use crate::{
@ -11,6 +13,7 @@ use crate::{
vm::{vmar::Vmar, vmo::Vmo}, vm::{vmar::Vmar, vmo::Vmo},
}; };
use align_ext::AlignExt; use align_ext::AlignExt;
use jinux_frame::task::Task;
use jinux_frame::vm::{VmIo, VmPerm}; use jinux_frame::vm::{VmIo, VmPerm};
use jinux_rights::{Full, Rights}; use jinux_rights::{Full, Rights};
use xmas_elf::program::{self, ProgramHeader64}; use xmas_elf::program::{self, ProgramHeader64};
@ -21,8 +24,8 @@ use super::elf_file::Elf;
/// 1. read the vaddr of each segment to get all elf pages. /// 1. read the vaddr of each segment to get all elf pages.
/// 2. create a vmo for each elf segment, create a pager for each segment. Then map the vmo to the root vmar. /// 2. create a vmo for each elf segment, create a pager for each segment. Then map the vmo to the root vmar.
/// 3. write proper content to the init stack. /// 3. write proper content to the init stack.
pub fn load_elf_to_root_vmar( pub fn load_elf_to_vm(
root_vmar: &Vmar<Full>, process_vm: &ProcessVm,
file_header: &[u8], file_header: &[u8],
elf_file: Arc<Dentry>, elf_file: Arc<Dentry>,
fs_resolver: &FsResolver, fs_resolver: &FsResolver,
@ -30,13 +33,75 @@ pub fn load_elf_to_root_vmar(
envp: Vec<CString>, envp: Vec<CString>,
) -> Result<ElfLoadInfo> { ) -> Result<ElfLoadInfo> {
let elf = Elf::parse_elf(file_header)?; let elf = Elf::parse_elf(file_header)?;
let ldso_load_info = if let Ok(ldso_load_info) =
load_ldso_for_shared_object(root_vmar, &elf, file_header, fs_resolver) let ldso = if elf.is_shared_object() {
{ Some(lookup_and_parse_ldso(&elf, file_header, fs_resolver)?)
Some(ldso_load_info)
} else { } else {
None None
}; };
process_vm.clear();
match init_and_map_vmos(process_vm, ldso, &elf, &elf_file, argv, envp) {
Ok(elf_load_info) => return Ok(elf_load_info),
Err(e) => {
// Since the process_vm is cleared, the process cannot return to user space again,
// so exit_group is called here.
// FIXME: if `current` macro is used when creating the init process,
// the macro will panic. This corner case should be handled later.
let current = current!();
// FIXME: how to set the correct exit status?
current.exit_group(TermStatus::Exited(1));
Task::current().exit();
}
}
}
fn lookup_and_parse_ldso(
elf: &Elf,
file_header: &[u8],
fs_resolver: &FsResolver,
) -> Result<(Arc<Dentry>, Elf)> {
let ldso_file = {
let ldso_path = elf.ldso_path(file_header)?;
let fs_path = FsPath::new(AT_FDCWD, &ldso_path)?;
fs_resolver.lookup(&fs_path)?
};
let ldso_elf = {
let mut buf = Box::new([0u8; PAGE_SIZE]);
let vnode = ldso_file.vnode();
vnode.read_at(0, &mut *buf)?;
Elf::parse_elf(&*buf)?
};
Ok((ldso_file, ldso_elf))
}
fn load_ldso(root_vmar: &Vmar<Full>, ldso_file: &Dentry, ldso_elf: &Elf) -> Result<LdsoLoadInfo> {
let map_addr = map_segment_vmos(&ldso_elf, root_vmar, &ldso_file)?;
Ok(LdsoLoadInfo::new(
ldso_elf.entry_point() + map_addr,
map_addr,
))
}
fn init_and_map_vmos(
process_vm: &ProcessVm,
ldso: Option<(Arc<Dentry>, Elf)>,
elf: &Elf,
elf_file: &Dentry,
argv: Vec<CString>,
envp: Vec<CString>,
) -> Result<ElfLoadInfo> {
let root_vmar = process_vm.root_vmar();
// After we clear process vm, if any error happens, we must call exit_group instead of return to user space.
let ldso_load_info = if let Some((ldso_file, ldso_elf)) = ldso {
Some(load_ldso(root_vmar, &ldso_file, &ldso_elf)?)
} else {
None
};
let map_addr = map_segment_vmos(&elf, root_vmar, &elf_file)?; let map_addr = map_segment_vmos(&elf, root_vmar, &elf_file)?;
let mut aux_vec = init_aux_vec(&elf, map_addr)?; let mut aux_vec = init_aux_vec(&elf, map_addr)?;
let mut init_stack = InitStack::new_default_config(argv, envp); let mut init_stack = InitStack::new_default_config(argv, envp);
@ -55,37 +120,9 @@ pub fn load_elf_to_root_vmar(
}; };
let elf_load_info = ElfLoadInfo::new(entry_point, init_stack.user_stack_top()); let elf_load_info = ElfLoadInfo::new(entry_point, init_stack.user_stack_top());
debug!("load elf succeeds.");
Ok(elf_load_info) Ok(elf_load_info)
} }
fn load_ldso_for_shared_object(
root_vmar: &Vmar<Full>,
elf: &Elf,
file_header: &[u8],
fs_resolver: &FsResolver,
) -> Result<LdsoLoadInfo> {
if !elf.is_shared_object() {
return_errno_with_message!(Errno::EINVAL, "not shared object");
}
let ldso_file = {
let ldso_path = elf.ldso_path(file_header)?;
let fs_path = FsPath::new(AT_FDCWD, &ldso_path)?;
fs_resolver.lookup(&fs_path)?
};
let ldso_elf = {
let mut buf = Box::new([0u8; PAGE_SIZE]);
let vnode = ldso_file.vnode();
vnode.read_at(0, &mut *buf)?;
Elf::parse_elf(&*buf)?
};
let map_addr = map_segment_vmos(&ldso_elf, root_vmar, &ldso_file)?;
Ok(LdsoLoadInfo::new(
ldso_elf.entry_point() + map_addr,
map_addr,
))
}
pub struct LdsoLoadInfo { pub struct LdsoLoadInfo {
entry_point: Vaddr, entry_point: Vaddr,
base_addr: Vaddr, base_addr: Vaddr,

View File

@ -4,4 +4,4 @@ mod init_stack;
mod load_elf; mod load_elf;
pub use init_stack::INIT_STACK_SIZE; pub use init_stack::INIT_STACK_SIZE;
pub use load_elf::{load_elf_to_root_vmar, ElfLoadInfo}; pub use load_elf::{load_elf_to_vm, ElfLoadInfo};

View File

@ -4,12 +4,12 @@ mod shebang;
use crate::fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD}; use crate::fs::fs_resolver::{FsPath, FsResolver, AT_FDCWD};
use crate::fs::utils::Dentry; use crate::fs::utils::Dentry;
use crate::prelude::*; use crate::prelude::*;
use crate::vm::vmar::Vmar;
use jinux_rights::Full;
use self::elf::{load_elf_to_root_vmar, ElfLoadInfo}; use self::elf::{load_elf_to_vm, ElfLoadInfo};
use self::shebang::parse_shebang_line; use self::shebang::parse_shebang_line;
use super::process_vm::ProcessVm;
/// Load an executable to root vmar, including loading programe image, preparing heap and stack, /// Load an executable to root vmar, including loading programe image, preparing heap and stack,
/// initializing argv, envp and aux tables. /// initializing argv, envp and aux tables.
/// About recursion_limit: recursion limit is used to limit th recursion depth of shebang executables. /// About recursion_limit: recursion limit is used to limit th recursion depth of shebang executables.
@ -17,8 +17,8 @@ use self::shebang::parse_shebang_line;
/// then it will trigger recursion. We will try to setup root vmar for the interpreter. /// then it will trigger recursion. We will try to setup root vmar for the interpreter.
/// I guess for most cases, setting the recursion_limit as 1 should be enough. /// I guess for most cases, setting the recursion_limit as 1 should be enough.
/// because the interpreter is usually an elf binary(e.g., /bin/bash) /// because the interpreter is usually an elf binary(e.g., /bin/bash)
pub fn load_program_to_root_vmar( pub fn load_program_to_vm(
root_vmar: &Vmar<Full>, process_vm: &ProcessVm,
elf_file: Arc<Dentry>, elf_file: Arc<Dentry>,
argv: Vec<CString>, argv: Vec<CString>,
envp: Vec<CString>, envp: Vec<CString>,
@ -44,8 +44,8 @@ pub fn load_program_to_root_vmar(
fs_resolver.lookup(&fs_path)? fs_resolver.lookup(&fs_path)?
}; };
check_executable_file(&interpreter)?; check_executable_file(&interpreter)?;
return load_program_to_root_vmar( return load_program_to_vm(
root_vmar, process_vm,
interpreter, interpreter,
new_argv, new_argv,
envp, envp,
@ -54,7 +54,7 @@ pub fn load_program_to_root_vmar(
); );
} }
let elf_load_info = let elf_load_info =
load_elf_to_root_vmar(root_vmar, &*file_header, elf_file, fs_resolver, argv, envp)?; load_elf_to_vm(process_vm, &*file_header, elf_file, fs_resolver, argv, envp)?;
Ok((abs_path, elf_load_info)) Ok((abs_path, elf_load_info))
} }

View File

@ -8,7 +8,7 @@ use crate::log_syscall_entry;
use crate::prelude::*; use crate::prelude::*;
use crate::process::posix_thread::name::ThreadName; use crate::process::posix_thread::name::ThreadName;
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt; use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
use crate::process::program_loader::{check_executable_file, load_program_to_root_vmar}; use crate::process::program_loader::{check_executable_file, load_program_to_vm};
use crate::syscall::{SYS_EXECVE, SYS_EXECVEAT}; use crate::syscall::{SYS_EXECVE, SYS_EXECVEAT};
use crate::util::{read_cstring_from_user, read_val_from_user}; use crate::util::{read_cstring_from_user, read_val_from_user};
@ -96,15 +96,13 @@ fn do_execve(
*posix_thread.clear_child_tid().lock() = 0; *posix_thread.clear_child_tid().lock() = 0;
let current = current!(); let current = current!();
// destroy root vmars
let root_vmar = current.root_vmar();
root_vmar.clear()?;
current.user_vm().set_default()?;
// load elf content to new vm space
let fs_resolver = &*current.fs().read();
debug!("load program to root vmar"); debug!("load program to root vmar");
let (new_executable_path, elf_load_info) = let (new_executable_path, elf_load_info) = {
load_program_to_root_vmar(root_vmar, elf_file, argv, envp, fs_resolver, 1)?; let fs_resolver = &*current.fs().read();
let process_vm = current.process_vm();
load_program_to_vm(process_vm, elf_file, argv, envp, fs_resolver, 1)?
};
debug!("load elf in execve succeeds"); debug!("load elf in execve succeeds");
// set executable path // set executable path
*current.executable_path().write() = new_executable_path; *current.executable_path().write() = new_executable_path;