Implement cpu_local with GS and ensure GS points to TSS

This commit is contained in:
Qingsong Chen
2024-09-19 02:11:32 +00:00
committed by Tate, Hongliang Tian
parent 52bde1721e
commit c2f7a10b84
30 changed files with 901 additions and 112 deletions

View File

@ -32,6 +32,7 @@ pub struct Task {
user_space: Option<Arc<UserSpace>>,
ctx: UnsafeCell<TaskContext>,
/// kernel stack, note that the top is SyscallFrame/TrapFrame
#[allow(dead_code)]
kstack: KernelStack,
schedule_info: TaskScheduleInfo,
@ -53,6 +54,22 @@ impl Task {
&self.ctx
}
/// Sets thread-local storage pointer.
pub fn set_tls_pointer(&self, tls: usize) {
let ctx_ptr = self.ctx.get();
// SAFETY: it's safe to set user tls pointer in kernel context.
unsafe { (*ctx_ptr).set_tls_pointer(tls) }
}
/// Gets thread-local storage pointer.
pub fn tls_pointer(&self) -> usize {
let ctx_ptr = self.ctx.get();
// SAFETY: it's safe to get user tls pointer in kernel context.
unsafe { (*ctx_ptr).tls_pointer() }
}
/// Yields execution so that another task may be scheduled.
///
/// Note that this method cannot be simply named "yield" as the name is
@ -176,21 +193,14 @@ impl TaskOptions {
current_task.exit();
}
let mut new_task = Task {
func: self.func.unwrap(),
data: self.data.unwrap(),
user_space: self.user_space,
ctx: UnsafeCell::new(TaskContext::default()),
kstack: KernelStack::new_with_guard_page()?,
schedule_info: TaskScheduleInfo {
cpu: AtomicCpuId::default(),
priority: self.priority,
cpu_affinity: self.cpu_affinity,
},
};
let kstack = KernelStack::new_with_guard_page()?;
let ctx = new_task.ctx.get_mut();
ctx.set_instruction_pointer(kernel_task_entry as usize);
let mut ctx = UnsafeCell::new(TaskContext::default());
if let Some(user_space) = self.user_space.as_ref() {
ctx.get_mut().set_tls_pointer(user_space.tls_pointer());
};
ctx.get_mut()
.set_instruction_pointer(kernel_task_entry as usize);
// We should reserve space for the return address in the stack, otherwise
// we will write across the page boundary due to the implementation of
// the context switch.
@ -199,7 +209,21 @@ impl TaskOptions {
// to at least 16 bytes. And a larger alignment is needed if larger arguments
// are passed to the function. The `kernel_task_entry` function does not
// have any arguments, so we only need to align the stack pointer to 16 bytes.
ctx.set_stack_pointer(crate::mm::paddr_to_vaddr(new_task.kstack.end_paddr() - 16));
ctx.get_mut()
.set_stack_pointer(crate::mm::paddr_to_vaddr(kstack.end_paddr() - 16));
let new_task = Task {
func: self.func.unwrap(),
data: self.data.unwrap(),
user_space: self.user_space,
ctx,
kstack,
schedule_info: TaskScheduleInfo {
cpu: AtomicCpuId::default(),
priority: self.priority,
cpu_affinity: self.cpu_affinity,
},
};
Ok(new_task)
}