Utilize ThreadLocal

This commit is contained in:
Ruihan Li
2024-12-01 09:19:03 +08:00
committed by Tate, Hongliang Tian
parent 38fcaaf749
commit 0bfe7aceb8
13 changed files with 186 additions and 122 deletions

View File

@ -11,7 +11,10 @@ use ostd::{
use crate::{ use crate::{
prelude::*, prelude::*,
process::{posix_thread::PosixThread, Process}, process::{
posix_thread::{PosixThread, ThreadLocal},
Process,
},
thread::Thread, thread::Thread,
}; };
@ -19,6 +22,7 @@ use crate::{
#[derive(Clone)] #[derive(Clone)]
pub struct Context<'a> { pub struct Context<'a> {
pub process: &'a Process, pub process: &'a Process,
pub thread_local: &'a ThreadLocal,
pub posix_thread: &'a PosixThread, pub posix_thread: &'a PosixThread,
pub thread: &'a Thread, pub thread: &'a Thread,
pub task: &'a Task, pub task: &'a Task,

View File

@ -9,7 +9,7 @@ use ostd::{
}; };
use super::{ use super::{
posix_thread::{thread_table, AsPosixThread, PosixThread, PosixThreadBuilder, ThreadName}, posix_thread::{AsPosixThread, PosixThreadBuilder, ThreadName},
process_table, process_table,
process_vm::ProcessVm, process_vm::ProcessVm,
signal::{constants::SIGCHLD, sig_disposition::SigDispositions, sig_num::SigNum}, signal::{constants::SIGCHLD, sig_disposition::SigDispositions, sig_num::SigNum},
@ -216,8 +216,7 @@ fn clone_child_task(
let Context { let Context {
process, process,
posix_thread, posix_thread,
thread: _, ..
task: _,
} = ctx; } = ctx;
// clone system V semaphore // clone system V semaphore
@ -252,11 +251,17 @@ fn clone_child_task(
Credentials::new_from(&credentials) Credentials::new_from(&credentials)
}; };
let thread_builder = PosixThreadBuilder::new(child_tid, child_user_space, credentials) let mut thread_builder = PosixThreadBuilder::new(child_tid, child_user_space, credentials)
.process(posix_thread.weak_process()) .process(posix_thread.weak_process())
.sig_mask(sig_mask) .sig_mask(sig_mask)
.file_table(child_file_table) .file_table(child_file_table)
.fs(child_fs); .fs(child_fs);
// Deal with SETTID/CLEARTID flags
clone_parent_settid(child_tid, clone_args.parent_tid, clone_flags)?;
thread_builder = clone_child_cleartid(thread_builder, clone_args.child_tid, clone_flags);
thread_builder = clone_child_settid(thread_builder, clone_args.child_tid, clone_flags);
thread_builder.build() thread_builder.build()
}; };
@ -266,10 +271,6 @@ fn clone_child_task(
.insert(child_task.clone()) .insert(child_task.clone())
.map_err(|_| Error::with_message(Errno::EINTR, "the process has exited"))?; .map_err(|_| Error::with_message(Errno::EINTR, "the process has exited"))?;
let child_posix_thread = child_task.as_posix_thread().unwrap();
clone_parent_settid(child_tid, clone_args.parent_tid, clone_flags)?;
clone_child_cleartid(child_posix_thread, clone_args.child_tid, clone_flags)?;
clone_child_settid(child_posix_thread, clone_args.child_tid, clone_flags)?;
Ok(child_task) Ok(child_task)
} }
@ -281,8 +282,7 @@ fn clone_child_process(
let Context { let Context {
process, process,
posix_thread, posix_thread,
thread: _, ..
task: _,
} = ctx; } = ctx;
let clone_flags = clone_args.flags; let clone_flags = clone_args.flags;
@ -331,7 +331,7 @@ fn clone_child_process(
let child = { let child = {
let child_elf_path = process.executable_path(); let child_elf_path = process.executable_path();
let child_thread_builder = { let mut child_thread_builder = {
let child_thread_name = ThreadName::new_from_executable_path(&child_elf_path)?; let child_thread_name = ThreadName::new_from_executable_path(&child_elf_path)?;
let credentials = { let credentials = {
@ -346,6 +346,13 @@ fn clone_child_process(
.fs(child_fs) .fs(child_fs)
}; };
// Deal with SETTID/CLEARTID flags
clone_parent_settid(child_tid, clone_args.parent_tid, clone_flags)?;
child_thread_builder =
clone_child_cleartid(child_thread_builder, clone_args.child_tid, clone_flags);
child_thread_builder =
clone_child_settid(child_thread_builder, clone_args.child_tid, clone_flags);
let mut process_builder = let mut process_builder =
ProcessBuilder::new(child_tid, &child_elf_path, posix_thread.weak_process()); ProcessBuilder::new(child_tid, &child_elf_path, posix_thread.weak_process());
@ -362,13 +369,6 @@ fn clone_child_process(
child.set_exit_signal(sig); child.set_exit_signal(sig);
}; };
// Deals with clone flags
let child_thread = thread_table::get_thread(child_tid).unwrap();
let child_posix_thread = child_thread.as_posix_thread().unwrap();
clone_parent_settid(child_tid, clone_args.parent_tid, clone_flags)?;
clone_child_cleartid(child_posix_thread, clone_args.child_tid, clone_flags)?;
clone_child_settid(child_posix_thread, clone_args.child_tid, clone_flags)?;
// Sets parent process and group for child process. // Sets parent process and group for child process.
set_parent_and_group(process, &child); set_parent_and_group(process, &child);
@ -376,25 +376,27 @@ fn clone_child_process(
} }
fn clone_child_cleartid( fn clone_child_cleartid(
child_posix_thread: &PosixThread, child_builder: PosixThreadBuilder,
child_tidptr: Vaddr, child_tidptr: Vaddr,
clone_flags: CloneFlags, clone_flags: CloneFlags,
) -> Result<()> { ) -> PosixThreadBuilder {
if clone_flags.contains(CloneFlags::CLONE_CHILD_CLEARTID) { if clone_flags.contains(CloneFlags::CLONE_CHILD_CLEARTID) {
*child_posix_thread.clear_child_tid().lock() = child_tidptr; child_builder.clear_child_tid(child_tidptr)
} else {
child_builder
} }
Ok(())
} }
fn clone_child_settid( fn clone_child_settid(
child_posix_thread: &PosixThread, child_builder: PosixThreadBuilder,
child_tidptr: Vaddr, child_tidptr: Vaddr,
clone_flags: CloneFlags, clone_flags: CloneFlags,
) -> Result<()> { ) -> PosixThreadBuilder {
if clone_flags.contains(CloneFlags::CLONE_CHILD_SETTID) { if clone_flags.contains(CloneFlags::CLONE_CHILD_SETTID) {
*child_posix_thread.set_child_tid().lock() = child_tidptr; child_builder.set_child_tid(child_tidptr)
} else {
child_builder
} }
Ok(())
} }
fn clone_parent_settid( fn clone_parent_settid(

View File

@ -4,7 +4,7 @@
use ostd::{cpu::CpuSet, task::Task, user::UserSpace}; use ostd::{cpu::CpuSet, task::Task, user::UserSpace};
use super::{thread_table, PosixThread}; use super::{thread_table, PosixThread, ThreadLocal};
use crate::{ use crate::{
fs::{file_table::FileTable, thread_info::ThreadFsInfo}, fs::{file_table::FileTable, thread_info::ThreadFsInfo},
prelude::*, prelude::*,
@ -126,17 +126,12 @@ impl PosixThreadBuilder {
process, process,
tid, tid,
name: Mutex::new(thread_name), name: Mutex::new(thread_name),
set_child_tid: Mutex::new(set_child_tid),
clear_child_tid: Mutex::new(clear_child_tid),
credentials, credentials,
file_table, file_table,
fs, fs,
sig_mask, sig_mask,
sig_queues, sig_queues,
sig_context: Mutex::new(None),
sig_stack: Mutex::new(None),
signalled_waker: SpinLock::new(None), signalled_waker: SpinLock::new(None),
robust_list: Mutex::new(None),
prof_clock, prof_clock,
virtual_timer_manager, virtual_timer_manager,
prof_timer_manager, prof_timer_manager,
@ -151,8 +146,10 @@ impl PosixThreadBuilder {
cpu_affinity, cpu_affinity,
)); ));
let thread_local = ThreadLocal::new(set_child_tid, clear_child_tid);
thread_table::add_thread(tid, thread.clone()); thread_table::add_thread(tid, thread.clone());
task::create_new_user_task(user_space, thread) task::create_new_user_task(user_space, thread, thread_local)
}) })
} }
} }

View File

@ -3,7 +3,8 @@
use ostd::task::{CurrentTask, Task}; use ostd::task::{CurrentTask, Task};
use super::{ use super::{
futex::futex_wake, robust_list::wake_robust_futex, thread_table, AsPosixThread, PosixThread, futex::futex_wake, robust_list::wake_robust_futex, thread_table, AsPosixThread, AsThreadLocal,
ThreadLocal,
}; };
use crate::{ use crate::{
current_userspace, current_userspace,
@ -14,7 +15,7 @@ use crate::{
task_set::TaskSet, task_set::TaskSet,
TermStatus, TermStatus,
}, },
thread::AsThread, thread::{AsThread, Tid},
}; };
/// Exits the current POSIX thread. /// Exits the current POSIX thread.
@ -40,6 +41,7 @@ fn exit_internal(term_status: TermStatus, is_exiting_group: bool) {
let current_task = Task::current().unwrap(); let current_task = Task::current().unwrap();
let current_thread = current_task.as_thread().unwrap(); let current_thread = current_task.as_thread().unwrap();
let posix_thread = current_thread.as_posix_thread().unwrap(); let posix_thread = current_thread.as_posix_thread().unwrap();
let thread_local = current_task.as_thread_local().unwrap();
let posix_process = posix_thread.process(); let posix_process = posix_thread.process();
let is_last_thread = { let is_last_thread = {
@ -67,9 +69,9 @@ fn exit_internal(term_status: TermStatus, is_exiting_group: bool) {
tasks.remove_exited(&current_task) tasks.remove_exited(&current_task)
}; };
wake_clear_ctid(posix_thread); wake_clear_ctid(thread_local);
wake_robust_list(posix_thread); wake_robust_list(thread_local, posix_thread.tid());
// According to Linux behavior, the main thread shouldn't be removed from the table until the // According to Linux behavior, the main thread shouldn't be removed from the table until the
// process is reaped by its parent. // process is reaped by its parent.
@ -97,27 +99,27 @@ fn sigkill_other_threads(current_task: &CurrentTask, task_set: &TaskSet) {
} }
/// Writes zero to `clear_child_tid` and performs a futex wake. /// Writes zero to `clear_child_tid` and performs a futex wake.
fn wake_clear_ctid(current_thread: &PosixThread) { fn wake_clear_ctid(thread_local: &ThreadLocal) {
let mut clear_ctid = current_thread.clear_child_tid().lock(); let clear_ctid = thread_local.clear_child_tid().get();
if *clear_ctid == 0 { if clear_ctid == 0 {
return; return;
} }
let _ = current_userspace!() let _ = current_userspace!()
.write_val(*clear_ctid, &0u32) .write_val(clear_ctid, &0u32)
.inspect_err(|err| debug!("exit: cannot clear the child TID: {:?}", err)); .inspect_err(|err| debug!("exit: cannot clear the child TID: {:?}", err));
let _ = futex_wake(*clear_ctid, 1, None) let _ = futex_wake(clear_ctid, 1, None)
.inspect_err(|err| debug!("exit: cannot wake the futex on the child TID: {:?}", err)); .inspect_err(|err| debug!("exit: cannot wake the futex on the child TID: {:?}", err));
*clear_ctid = 0; thread_local.clear_child_tid().set(0);
} }
/// Walks the robust futex list, marking futex dead and waking waiters. /// Walks the robust futex list, marking futex dead and waking waiters.
/// ///
/// This corresponds to Linux's `exit_robust_list`. Errors are silently ignored. /// This corresponds to Linux's `exit_robust_list`. Errors are silently ignored.
fn wake_robust_list(current_thread: &PosixThread) { fn wake_robust_list(thread_local: &ThreadLocal, tid: Tid) {
let mut robust_list = current_thread.robust_list.lock(); let mut robust_list = thread_local.robust_list().borrow_mut();
let list_head = match *robust_list { let list_head = match *robust_list {
Some(robust_list_head) => robust_list_head, Some(robust_list_head) => robust_list_head,
@ -126,7 +128,7 @@ fn wake_robust_list(current_thread: &PosixThread) {
trace!("exit: wake up the rubust list: {:?}", list_head); trace!("exit: wake up the rubust list: {:?}", list_head);
for futex_addr in list_head.futexes() { for futex_addr in list_head.futexes() {
let _ = wake_robust_futex(futex_addr, current_thread.tid) let _ = wake_robust_futex(futex_addr, tid)
.inspect_err(|err| debug!("exit: cannot wake up the robust futex: {:?}", err)); .inspect_err(|err| debug!("exit: cannot wake up the robust futex: {:?}", err));
} }

View File

@ -15,7 +15,7 @@ use super::{
sig_num::SigNum, sig_num::SigNum,
sig_queues::SigQueues, sig_queues::SigQueues,
signals::Signal, signals::Signal,
SigEvents, SigEventsFilter, SigStack, SigEvents, SigEventsFilter,
}, },
Credentials, Process, Credentials, Process,
}; };
@ -34,6 +34,7 @@ pub mod futex;
mod name; mod name;
mod posix_thread_ext; mod posix_thread_ext;
mod robust_list; mod robust_list;
mod thread_local;
pub mod thread_table; pub mod thread_table;
pub use builder::PosixThreadBuilder; pub use builder::PosixThreadBuilder;
@ -41,6 +42,7 @@ pub use exit::{do_exit, do_exit_group};
pub use name::{ThreadName, MAX_THREAD_NAME_LEN}; pub use name::{ThreadName, MAX_THREAD_NAME_LEN};
pub use posix_thread_ext::{create_posix_task_from_executable, AsPosixThread}; pub use posix_thread_ext::{create_posix_task_from_executable, AsPosixThread};
pub use robust_list::RobustListHead; pub use robust_list::RobustListHead;
pub use thread_local::{AsThreadLocal, ThreadLocal};
pub struct PosixThread { pub struct PosixThread {
// Immutable part // Immutable part
@ -50,13 +52,6 @@ pub struct PosixThread {
// Mutable part // Mutable part
name: Mutex<Option<ThreadName>>, name: Mutex<Option<ThreadName>>,
// Linux specific attributes.
// https://man7.org/linux/man-pages/man2/set_tid_address.2.html
set_child_tid: Mutex<Vaddr>,
clear_child_tid: Mutex<Vaddr>,
robust_list: Mutex<Option<RobustListHead>>,
/// Process credentials. At the kernel level, credentials are a per-thread attribute. /// Process credentials. At the kernel level, credentials are a per-thread attribute.
credentials: Credentials, credentials: Credentials,
@ -71,10 +66,6 @@ pub struct PosixThread {
sig_mask: AtomicSigMask, sig_mask: AtomicSigMask,
/// Thread-directed sigqueue /// Thread-directed sigqueue
sig_queues: SigQueues, sig_queues: SigQueues,
/// Signal handler ucontext address
/// FIXME: This field may be removed. For glibc applications with RESTORER flag set, the sig_context is always equals with rsp.
sig_context: Mutex<Option<Vaddr>>,
sig_stack: Mutex<Option<SigStack>>,
/// The per-thread signal [`Waker`], which will be used to wake up the thread /// The per-thread signal [`Waker`], which will be used to wake up the thread
/// when enqueuing a signal. /// when enqueuing a signal.
signalled_waker: SpinLock<Option<Arc<Waker>>>, signalled_waker: SpinLock<Option<Arc<Waker>>>,
@ -107,14 +98,6 @@ impl PosixThread {
&self.name &self.name
} }
pub fn set_child_tid(&self) -> &Mutex<Vaddr> {
&self.set_child_tid
}
pub fn clear_child_tid(&self) -> &Mutex<Vaddr> {
&self.clear_child_tid
}
pub fn file_table(&self) -> &Arc<SpinLock<FileTable>> { pub fn file_table(&self) -> &Arc<SpinLock<FileTable>> {
&self.file_table &self.file_table
} }
@ -273,18 +256,6 @@ impl PosixThread {
self.sig_queues.unregister_observer(observer); self.sig_queues.unregister_observer(observer);
} }
pub fn sig_context(&self) -> &Mutex<Option<Vaddr>> {
&self.sig_context
}
pub fn sig_stack(&self) -> &Mutex<Option<SigStack>> {
&self.sig_stack
}
pub fn robust_list(&self) -> &Mutex<Option<RobustListHead>> {
&self.robust_list
}
/// Gets the read-only credentials of the thread. /// Gets the read-only credentials of the thread.
pub fn credentials(&self) -> Credentials<ReadOp> { pub fn credentials(&self) -> Credentials<ReadOp> {
self.credentials.dup().restrict() self.credentials.dup().restrict()

View File

@ -0,0 +1,72 @@
// SPDX-License-Identifier: MPL-2.0
use core::cell::{Cell, RefCell};
use ostd::{mm::Vaddr, task::CurrentTask};
use super::RobustListHead;
use crate::process::signal::SigStack;
/// Local data for a POSIX thread.
pub struct ThreadLocal {
// TID pointers.
// https://man7.org/linux/man-pages/man2/set_tid_address.2.html
set_child_tid: Cell<Vaddr>,
clear_child_tid: Cell<Vaddr>,
// Robust futexes.
// https://man7.org/linux/man-pages/man2/get_robust_list.2.html
robust_list: RefCell<Option<RobustListHead>>,
// Signal.
/// `ucontext` address for the signal handler.
// FIXME: This field may be removed. For glibc applications with RESTORER flag set, the
// `sig_context` is always equals with RSP.
sig_context: Cell<Option<Vaddr>>,
/// Stack address, size, and flags for the signal handler.
sig_stack: RefCell<Option<SigStack>>,
}
impl ThreadLocal {
pub(super) fn new(set_child_tid: Vaddr, clear_child_tid: Vaddr) -> Self {
Self {
set_child_tid: Cell::new(set_child_tid),
clear_child_tid: Cell::new(clear_child_tid),
robust_list: RefCell::new(None),
sig_context: Cell::new(None),
sig_stack: RefCell::new(None),
}
}
pub fn set_child_tid(&self) -> &Cell<Vaddr> {
&self.set_child_tid
}
pub fn clear_child_tid(&self) -> &Cell<Vaddr> {
&self.clear_child_tid
}
pub fn robust_list(&self) -> &RefCell<Option<RobustListHead>> {
&self.robust_list
}
pub fn sig_context(&self) -> &Cell<Option<Vaddr>> {
&self.sig_context
}
pub fn sig_stack(&self) -> &RefCell<Option<SigStack>> {
&self.sig_stack
}
}
/// A trait to provide the `as_thread_local` method for tasks.
pub trait AsThreadLocal {
/// Returns the associated [`ThreadLocal`].
fn as_thread_local(&self) -> Option<&ThreadLocal>;
}
impl AsThreadLocal for CurrentTask {
fn as_thread_local(&self) -> Option<&ThreadLocal> {
self.local_data().downcast_ref()
}
}

View File

@ -26,7 +26,7 @@ use sig_mask::SigMask;
use sig_num::SigNum; use sig_num::SigNum;
pub use sig_stack::{SigStack, SigStackFlags}; pub use sig_stack::{SigStack, SigStackFlags};
use super::posix_thread::PosixThread; use super::posix_thread::ThreadLocal;
use crate::{ use crate::{
cpu::LinuxAbi, cpu::LinuxAbi,
current_userspace, current_userspace,
@ -165,7 +165,7 @@ pub fn handle_user_signal(
.store(old_mask + mask, Ordering::Relaxed); .store(old_mask + mask, Ordering::Relaxed);
// Set up signal stack. // Set up signal stack.
let mut stack_pointer = if let Some(sp) = use_alternate_signal_stack(ctx.posix_thread) { let mut stack_pointer = if let Some(sp) = use_alternate_signal_stack(ctx.thread_local) {
sp as u64 sp as u64
} else { } else {
// just use user stack // just use user stack
@ -193,8 +193,8 @@ pub fn handle_user_signal(
.inner .inner
.gp_regs .gp_regs
.copy_from_raw(user_ctx.general_regs()); .copy_from_raw(user_ctx.general_regs());
let mut sig_context = ctx.posix_thread.sig_context().lock(); let sig_context = ctx.thread_local.sig_context().get();
if let Some(sig_context_addr) = *sig_context { if let Some(sig_context_addr) = sig_context {
ucontext.uc_link = sig_context_addr; ucontext.uc_link = sig_context_addr;
} else { } else {
ucontext.uc_link = 0; ucontext.uc_link = 0;
@ -203,7 +203,9 @@ pub fn handle_user_signal(
user_space.write_val(stack_pointer as _, &ucontext)?; user_space.write_val(stack_pointer as _, &ucontext)?;
let ucontext_addr = stack_pointer; let ucontext_addr = stack_pointer;
// Store the ucontext addr in sig context of current thread. // Store the ucontext addr in sig context of current thread.
*sig_context = Some(ucontext_addr as Vaddr); ctx.thread_local
.sig_context()
.set(Some(ucontext_addr as Vaddr));
// 3. Set the address of the trampoline code. // 3. Set the address of the trampoline code.
if flags.contains(SigActionFlags::SA_RESTORER) { if flags.contains(SigActionFlags::SA_RESTORER) {
@ -251,8 +253,8 @@ pub fn handle_user_signal(
/// It the stack is already active, we just increase the handler counter and return None, since /// It the stack is already active, we just increase the handler counter and return None, since
/// the stack pointer can be read from context. /// the stack pointer can be read from context.
/// It the stack is not used by any handler, we will return the new sp in alternate signal stack. /// It the stack is not used by any handler, we will return the new sp in alternate signal stack.
fn use_alternate_signal_stack(posix_thread: &PosixThread) -> Option<usize> { fn use_alternate_signal_stack(thread_local: &ThreadLocal) -> Option<usize> {
let mut sig_stack = posix_thread.sig_stack().lock(); let mut sig_stack = thread_local.sig_stack().borrow_mut();
let sig_stack = (*sig_stack).as_mut()?; let sig_stack = (*sig_stack).as_mut()?;
if sig_stack.is_disabled() { if sig_stack.is_disabled() {

View File

@ -90,9 +90,9 @@ fn do_execve(
) -> Result<()> { ) -> Result<()> {
let Context { let Context {
process, process,
thread_local,
posix_thread, posix_thread,
thread: _, ..
task: _,
} = ctx; } = ctx;
let executable_path = elf_file.abs_path(); let executable_path = elf_file.abs_path();
@ -107,7 +107,7 @@ fn do_execve(
Some(ThreadName::new_from_executable_path(&executable_path)?); Some(ThreadName::new_from_executable_path(&executable_path)?);
// clear ctid // clear ctid
// FIXME: should we clear ctid when execve? // FIXME: should we clear ctid when execve?
*posix_thread.clear_child_tid().lock() = 0; thread_local.clear_child_tid().set(0);
// Ensure that the file descriptors with the close-on-exec flag are closed. // Ensure that the file descriptors with the close-on-exec flag are closed.
// FIXME: This is just wrong if the file table is shared with other processes. // FIXME: This is just wrong if the file table is shared with other processes.
@ -123,10 +123,10 @@ fn do_execve(
// After the program has been successfully loaded, the virtual memory of the current process // After the program has been successfully loaded, the virtual memory of the current process
// is initialized. Hence, it is necessary to clear the previously recorded robust list. // is initialized. Hence, it is necessary to clear the previously recorded robust list.
*posix_thread.robust_list().lock() = None; *thread_local.robust_list().borrow_mut() = None;
debug!("load elf in execve succeeds"); debug!("load elf in execve succeeds");
let credentials = ctx.posix_thread.credentials_mut(); let credentials = posix_thread.credentials_mut();
set_uid_from_elf(process, &credentials, &elf_file)?; set_uid_from_elf(process, &credentials, &elf_file)?;
set_gid_from_elf(process, &credentials, &elf_file)?; set_gid_from_elf(process, &credentials, &elf_file)?;
credentials.set_keep_capabilities(false); credentials.set_keep_capabilities(false);

View File

@ -9,16 +9,17 @@ use crate::{prelude::*, process::signal::c_types::ucontext_t};
pub fn sys_rt_sigreturn(ctx: &Context, user_ctx: &mut UserContext) -> Result<SyscallReturn> { pub fn sys_rt_sigreturn(ctx: &Context, user_ctx: &mut UserContext) -> Result<SyscallReturn> {
let Context { let Context {
process: _, thread_local,
posix_thread, posix_thread,
thread: _, ..
task: _,
} = ctx; } = ctx;
let mut sig_context = posix_thread.sig_context().lock();
if (*sig_context).is_none() { let Some(sig_context_addr) = thread_local.sig_context().get() else {
return_errno_with_message!(Errno::EINVAL, "sigreturn should not been called"); return_errno_with_message!(
} Errno::EINVAL,
let sig_context_addr = sig_context.unwrap(); "`sigreturn` cannot be called outside the signal context"
);
};
// FIXME: This assertion is not always true, if RESTORER flag is not presented. // FIXME: This assertion is not always true, if RESTORER flag is not presented.
// In this case, we will put restorer code on user stack, then the assertion will fail. // In this case, we will put restorer code on user stack, then the assertion will fail.
// However, for most glibc applications, the restorer codes is provided by glibc and RESTORER flag is set. // However, for most glibc applications, the restorer codes is provided by glibc and RESTORER flag is set.
@ -27,7 +28,7 @@ pub fn sys_rt_sigreturn(ctx: &Context, user_ctx: &mut UserContext) -> Result<Sys
let ucontext = ctx.user_space().read_val::<ucontext_t>(sig_context_addr)?; let ucontext = ctx.user_space().read_val::<ucontext_t>(sig_context_addr)?;
// If the sig stack is active and used by current handler, decrease handler counter. // If the sig stack is active and used by current handler, decrease handler counter.
if let Some(sig_stack) = posix_thread.sig_stack().lock().as_mut() { if let Some(sig_stack) = &mut *thread_local.sig_stack().borrow_mut() {
let rsp = user_ctx.stack_pointer(); let rsp = user_ctx.stack_pointer();
if rsp >= sig_stack.base() && rsp <= sig_stack.base() + sig_stack.size() { if rsp >= sig_stack.base() && rsp <= sig_stack.base() + sig_stack.size() {
sig_stack.decrease_handler_counter(); sig_stack.decrease_handler_counter();
@ -36,15 +37,16 @@ pub fn sys_rt_sigreturn(ctx: &Context, user_ctx: &mut UserContext) -> Result<Sys
// Set previous ucontext address // Set previous ucontext address
if ucontext.uc_link == 0 { if ucontext.uc_link == 0 {
*sig_context = None; thread_local.sig_context().set(None);
} else { } else {
*sig_context = Some(ucontext.uc_link); thread_local.sig_context().set(Some(ucontext.uc_link));
}; };
ucontext ucontext
.uc_mcontext .uc_mcontext
.inner .inner
.gp_regs .gp_regs
.copy_to_raw(user_ctx.general_regs_mut()); .copy_to_raw(user_ctx.general_regs_mut());
// unblock sig mask // unblock sig mask
let sig_mask = ucontext.uc_sigmask; let sig_mask = ucontext.uc_sigmask;
let old_mask = posix_thread.sig_mask().load(Ordering::Relaxed); let old_mask = posix_thread.sig_mask().load(Ordering::Relaxed);

View File

@ -12,15 +12,18 @@ pub fn sys_set_robust_list(
"robust list head ptr: 0x{:x}, len = {}", "robust list head ptr: 0x{:x}, len = {}",
robust_list_head_ptr, len robust_list_head_ptr, len
); );
if len != core::mem::size_of::<RobustListHead>() { if len != core::mem::size_of::<RobustListHead>() {
return_errno_with_message!( return_errno_with_message!(
Errno::EINVAL, Errno::EINVAL,
"The len is not equal to the size of robust list head" "the length is not equal to the size of the robust list head"
); );
} }
let robust_list_head: RobustListHead = ctx.user_space().read_val(robust_list_head_ptr)?; let robust_list_head: RobustListHead = ctx.user_space().read_val(robust_list_head_ptr)?;
debug!("{:x?}", robust_list_head); debug!("robust list head: {:x?}", robust_list_head);
let mut robust_list = ctx.posix_thread.robust_list().lock();
*robust_list = Some(robust_list_head); *ctx.thread_local.robust_list().borrow_mut() = Some(robust_list_head);
Ok(SyscallReturn::Return(0)) Ok(SyscallReturn::Return(0))
} }

View File

@ -5,14 +5,16 @@ use crate::prelude::*;
pub fn sys_set_tid_address(tidptr: Vaddr, ctx: &Context) -> Result<SyscallReturn> { pub fn sys_set_tid_address(tidptr: Vaddr, ctx: &Context) -> Result<SyscallReturn> {
debug!("tidptr = 0x{:x}", tidptr); debug!("tidptr = 0x{:x}", tidptr);
let mut clear_child_tid = ctx.posix_thread.clear_child_tid().lock();
if *clear_child_tid != 0 { let clear_child_tid = ctx.thread_local.clear_child_tid().get();
if clear_child_tid != 0 {
// According to manuals at https://man7.org/linux/man-pages/man2/set_tid_address.2.html // According to manuals at https://man7.org/linux/man-pages/man2/set_tid_address.2.html
// We need to write 0 to clear_child_tid and do futex wake // We need to write 0 to clear_child_tid and do futex wake
todo!() todo!()
} else {
*clear_child_tid = tidptr;
} }
ctx.thread_local.set_child_tid().set(clear_child_tid);
let tid = ctx.posix_thread.tid(); let tid = ctx.posix_thread.tid();
Ok(SyscallReturn::Return(tid as _)) Ok(SyscallReturn::Return(tid as _))
} }

View File

@ -16,10 +16,7 @@ pub fn sys_sigaltstack(
sig_stack_addr, old_sig_stack_addr sig_stack_addr, old_sig_stack_addr
); );
let old_stack = { let old_stack = ctx.thread_local.sig_stack().borrow().clone();
let sig_stack = ctx.posix_thread.sig_stack().lock();
sig_stack.clone()
};
get_old_stack(old_sig_stack_addr, old_stack.as_ref(), ctx)?; get_old_stack(old_sig_stack_addr, old_stack.as_ref(), ctx)?;
set_new_stack(sig_stack_addr, old_stack.as_ref(), ctx)?; set_new_stack(sig_stack_addr, old_stack.as_ref(), ctx)?;
@ -73,7 +70,7 @@ fn set_new_stack(sig_stack_addr: Vaddr, old_stack: Option<&SigStack>, ctx: &Cont
debug!("new_stack = {:?}", new_stack); debug!("new_stack = {:?}", new_stack);
*ctx.posix_thread.sig_stack().lock() = Some(new_stack); *ctx.thread_local.sig_stack().borrow_mut() = Some(new_stack);
Ok(()) Ok(())
} }

View File

@ -10,19 +10,27 @@ use crate::{
cpu::LinuxAbi, cpu::LinuxAbi,
current_userspace, current_userspace,
prelude::*, prelude::*,
process::{posix_thread::AsPosixThread, signal::handle_pending_signal}, process::{
posix_thread::{AsPosixThread, AsThreadLocal, ThreadLocal},
signal::handle_pending_signal,
},
syscall::handle_syscall, syscall::handle_syscall,
thread::exception::handle_exception, thread::{exception::handle_exception, AsThread},
vm::vmar::is_userspace_vaddr, vm::vmar::is_userspace_vaddr,
}; };
/// create new task with userspace and parent process /// create new task with userspace and parent process
pub fn create_new_user_task(user_space: Arc<UserSpace>, thread_ref: Arc<Thread>) -> Task { pub fn create_new_user_task(
user_space: Arc<UserSpace>,
thread_ref: Arc<Thread>,
thread_local: ThreadLocal,
) -> Task {
fn user_task_entry() { fn user_task_entry() {
let current_thread = current_thread!();
let current_posix_thread = current_thread.as_posix_thread().unwrap();
let current_process = current_posix_thread.process();
let current_task = Task::current().unwrap(); let current_task = Task::current().unwrap();
let current_thread = current_task.as_thread().unwrap();
let current_posix_thread = current_thread.as_posix_thread().unwrap();
let current_thread_local = current_task.as_thread_local().unwrap();
let current_process = current_posix_thread.process();
let user_space = current_task let user_space = current_task
.user_space() .user_space()
@ -41,7 +49,7 @@ pub fn create_new_user_task(user_space: Arc<UserSpace>, thread_ref: Arc<Thread>)
user_mode.context().syscall_ret() user_mode.context().syscall_ret()
); );
let child_tid_ptr = *current_posix_thread.set_child_tid().lock(); let child_tid_ptr = current_thread_local.set_child_tid().get();
// The `clone` syscall may require child process to write the thread pid to the specified address. // The `clone` syscall may require child process to write the thread pid to the specified address.
// Make sure the store operation completes before the clone call returns control to user space // Make sure the store operation completes before the clone call returns control to user space
@ -56,6 +64,7 @@ pub fn create_new_user_task(user_space: Arc<UserSpace>, thread_ref: Arc<Thread>)
let ctx = Context { let ctx = Context {
process: current_process.as_ref(), process: current_process.as_ref(),
thread_local: current_thread_local,
posix_thread: current_posix_thread, posix_thread: current_posix_thread,
thread: current_thread.as_ref(), thread: current_thread.as_ref(),
task: current_task.as_ref(), task: current_task.as_ref(),
@ -99,6 +108,7 @@ pub fn create_new_user_task(user_space: Arc<UserSpace>, thread_ref: Arc<Thread>)
let _ = oops::catch_panics_as_oops(user_task_entry); let _ = oops::catch_panics_as_oops(user_task_entry);
}) })
.data(thread_ref) .data(thread_ref)
.local_data(thread_local)
.user_space(Some(user_space)) .user_space(Some(user_space))
.build() .build()
.expect("spawn task failed") .expect("spawn task failed")