signal的发送(暂时父子进程之间共享信号及相应的结构体) (#89)

* 解决由于spinlock.h中包含preempt_enable()带来的循环include问题

* new: 初步实现signal的数据结构

* new:signal相关数据结构

* fix: 解决bindings.rs报一堆警告的问题

* new: rust下的kdebug kinfo kwarn kBUG kerror宏

* 移动asm.h和cmpxchg.h

* new: signal的发送(暂时只支持父子进程共享信号及处理函数)
This commit is contained in:
login
2022-11-23 11:38:20 +08:00
committed by GitHub
parent 3d729e2069
commit 66f67c6a95
44 changed files with 1677 additions and 472 deletions

391
kernel/src/process/fork.c Normal file
View File

@ -0,0 +1,391 @@
#include "process.h"
#include <common/err.h>
#include <common/kthread.h>
#include <common/spinlock.h>
extern spinlock_t process_global_pid_write_lock;
extern long process_global_pid;
extern void kernel_thread_func(void);
int process_copy_files(uint64_t clone_flags, struct process_control_block *pcb);
int process_copy_flags(uint64_t clone_flags, struct process_control_block *pcb);
int process_copy_mm(uint64_t clone_flags, struct process_control_block *pcb);
int process_copy_thread(uint64_t clone_flags, struct process_control_block *pcb, uint64_t stack_start,
uint64_t stack_size, struct pt_regs *current_regs);
extern int process_copy_sighand(uint64_t clone_flags, struct process_control_block * pcb);
extern int process_copy_signal(uint64_t clone_flags, struct process_control_block * pcb);
/**
* @brief fork当前进程
*
* @param regs 新的寄存器值
* @param clone_flags 克隆标志
* @param stack_start 堆栈开始地址
* @param stack_size 堆栈大小
* @return unsigned long
*/
unsigned long do_fork(struct pt_regs *regs, unsigned long clone_flags, unsigned long stack_start,
unsigned long stack_size)
{
int retval = 0;
struct process_control_block *tsk = NULL;
// 为新的进程分配栈空间并将pcb放置在底部
tsk = (struct process_control_block *)kzalloc(STACK_SIZE, 0);
barrier();
if (tsk == NULL)
{
retval = -ENOMEM;
return retval;
}
barrier();
memset(tsk, 0, sizeof(struct process_control_block));
io_mfence();
// 将当前进程的pcb复制到新的pcb内
memcpy(tsk, current_pcb, sizeof(struct process_control_block));
tsk->worker_private = NULL;
io_mfence();
// 初始化进程的循环链表结点
list_init(&tsk->list);
io_mfence();
// 判断是否为内核态调用fork
if ((current_pcb->flags & PF_KTHREAD) && stack_start != 0)
tsk->flags |= PF_KFORK;
if (tsk->flags & PF_KTHREAD)
{
// 对于内核线程设置其worker私有信息
retval = kthread_set_worker_private(tsk);
if (IS_ERR_VALUE(retval))
goto copy_flags_failed;
tsk->virtual_runtime = 0;
}
tsk->priority = 2;
tsk->preempt_count = 0;
// 增加全局的pid并赋值给新进程的pid
spin_lock(&process_global_pid_write_lock);
tsk->pid = process_global_pid++;
barrier();
// 加入到进程链表中
// todo: 对pcb_list_lock加锁
tsk->prev_pcb = &initial_proc_union.pcb;
barrier();
tsk->next_pcb = initial_proc_union.pcb.next_pcb;
barrier();
initial_proc_union.pcb.next_pcb = tsk;
barrier();
tsk->parent_pcb = current_pcb;
barrier();
spin_unlock(&process_global_pid_write_lock);
tsk->cpu_id = proc_current_cpu_id;
tsk->state = PROC_UNINTERRUPTIBLE;
tsk->parent_pcb = current_pcb;
wait_queue_init(&tsk->wait_child_proc_exit, NULL);
barrier();
list_init(&tsk->list);
retval = -ENOMEM;
// 拷贝标志位
retval = process_copy_flags(clone_flags, tsk);
if (retval)
goto copy_flags_failed;
// 拷贝内存空间分布结构体
retval = process_copy_mm(clone_flags, tsk);
if (retval)
goto copy_mm_failed;
// 拷贝文件
retval = process_copy_files(clone_flags, tsk);
if (retval)
goto copy_files_failed;
// 拷贝信号处理函数
retval = process_copy_sighand(clone_flags, tsk);
if(retval)
goto copy_sighand_failed;
retval = process_copy_signal(clone_flags, tsk);
if(retval)
goto copy_signal_failed;
// 拷贝线程结构体
retval = process_copy_thread(clone_flags, tsk, stack_start, stack_size, regs);
if (retval)
goto copy_thread_failed;
// 拷贝成功
retval = tsk->pid;
tsk->flags &= ~PF_KFORK;
// 唤醒进程
process_wakeup(tsk);
return retval;
copy_thread_failed:;
// 回收线程
process_exit_thread(tsk);
copy_files_failed:;
// 回收文件
process_exit_files(tsk);
copy_sighand_failed:;
process_exit_sighand(tsk);
copy_signal_failed:;
process_exit_signal(tsk);
copy_mm_failed:;
// 回收内存空间分布结构体
process_exit_mm(tsk);
copy_flags_failed:;
kfree(tsk);
return retval;
}
/**
* @brief 拷贝当前进程的标志位
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
int process_copy_flags(uint64_t clone_flags, struct process_control_block *pcb)
{
if (clone_flags & CLONE_VM)
pcb->flags |= PF_VFORK;
return 0;
}
/**
* @brief 拷贝当前进程的文件描述符等信息
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
int process_copy_files(uint64_t clone_flags, struct process_control_block *pcb)
{
int retval = 0;
// 如果CLONE_FS被置位那么子进程与父进程共享文件描述符
// 文件描述符已经在复制pcb时被拷贝
if (clone_flags & CLONE_FS)
return retval;
// 为新进程拷贝新的文件描述符
for (int i = 0; i < PROC_MAX_FD_NUM; ++i)
{
if (current_pcb->fds[i] == NULL)
continue;
pcb->fds[i] = (struct vfs_file_t *)kmalloc(sizeof(struct vfs_file_t), 0);
memcpy(pcb->fds[i], current_pcb->fds[i], sizeof(struct vfs_file_t));
}
return retval;
}
/**
* @brief 拷贝当前进程的内存空间分布结构体信息
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
int process_copy_mm(uint64_t clone_flags, struct process_control_block *pcb)
{
int retval = 0;
// 与父进程共享内存空间
if (clone_flags & CLONE_VM)
{
pcb->mm = current_pcb->mm;
return retval;
}
// 分配新的内存空间分布结构体
struct mm_struct *new_mms = (struct mm_struct *)kmalloc(sizeof(struct mm_struct), 0);
memset(new_mms, 0, sizeof(struct mm_struct));
memcpy(new_mms, current_pcb->mm, sizeof(struct mm_struct));
new_mms->vmas = NULL;
pcb->mm = new_mms;
// 分配顶层页表, 并设置顶层页表的物理地址
new_mms->pgd = (pml4t_t *)virt_2_phys(kmalloc(PAGE_4K_SIZE, 0));
// 由于高2K部分为内核空间在接下来需要覆盖其数据因此不用清零
memset(phys_2_virt(new_mms->pgd), 0, PAGE_4K_SIZE / 2);
// 拷贝内核空间的页表指针
memcpy(phys_2_virt(new_mms->pgd) + 256, phys_2_virt(initial_proc[proc_current_cpu_id]->mm->pgd) + 256,
PAGE_4K_SIZE / 2);
uint64_t *current_pgd = (uint64_t *)phys_2_virt(current_pcb->mm->pgd);
uint64_t *new_pml4t = (uint64_t *)phys_2_virt(new_mms->pgd);
// 拷贝用户空间的vma
struct vm_area_struct *vma = current_pcb->mm->vmas;
while (vma != NULL)
{
if (vma->vm_end > USER_MAX_LINEAR_ADDR || vma->vm_flags & VM_DONTCOPY)
{
vma = vma->vm_next;
continue;
}
int64_t vma_size = vma->vm_end - vma->vm_start;
// kdebug("vma_size=%ld, vm_start=%#018lx", vma_size, vma->vm_start);
if (vma_size > PAGE_2M_SIZE / 2)
{
int page_to_alloc = (PAGE_2M_ALIGN(vma_size)) >> PAGE_2M_SHIFT;
for (int i = 0; i < page_to_alloc; ++i)
{
uint64_t pa = alloc_pages(ZONE_NORMAL, 1, PAGE_PGT_MAPPED)->addr_phys;
struct vm_area_struct *new_vma = NULL;
int ret = mm_create_vma(new_mms, vma->vm_start + i * PAGE_2M_SIZE, PAGE_2M_SIZE, vma->vm_flags,
vma->vm_ops, &new_vma);
// 防止内存泄露
if (unlikely(ret == -EEXIST))
free_pages(Phy_to_2M_Page(pa), 1);
else
mm_map_vma(new_vma, pa, 0, PAGE_2M_SIZE);
memcpy((void *)phys_2_virt(pa), (void *)(vma->vm_start + i * PAGE_2M_SIZE),
(vma_size >= PAGE_2M_SIZE) ? PAGE_2M_SIZE : vma_size);
vma_size -= PAGE_2M_SIZE;
}
}
else
{
uint64_t map_size = PAGE_4K_ALIGN(vma_size);
uint64_t va = (uint64_t)kmalloc(map_size, 0);
struct vm_area_struct *new_vma = NULL;
int ret = mm_create_vma(new_mms, vma->vm_start, map_size, vma->vm_flags, vma->vm_ops, &new_vma);
// 防止内存泄露
if (unlikely(ret == -EEXIST))
kfree((void *)va);
else
mm_map_vma(new_vma, virt_2_phys(va), 0, map_size);
memcpy((void *)va, (void *)vma->vm_start, vma_size);
}
vma = vma->vm_next;
}
return retval;
}
/**
* @brief 重写内核栈中的rbp地址
*
* @param new_regs 子进程的reg
* @param new_pcb 子进程的pcb
* @return int
*/
static int process_rewrite_rbp(struct pt_regs *new_regs, struct process_control_block *new_pcb)
{
uint64_t new_top = ((uint64_t)new_pcb) + STACK_SIZE;
uint64_t old_top = (uint64_t)(current_pcb) + STACK_SIZE;
uint64_t *rbp = &new_regs->rbp;
uint64_t *tmp = rbp;
// 超出内核栈范围
if ((uint64_t)*rbp >= old_top || (uint64_t)*rbp < (old_top - STACK_SIZE))
return 0;
while (1)
{
// 计算delta
uint64_t delta = old_top - *rbp;
// 计算新的rbp值
uint64_t newVal = new_top - delta;
// 新的值不合法
if (unlikely((uint64_t)newVal >= new_top || (uint64_t)newVal < (new_top - STACK_SIZE)))
break;
// 将新的值写入对应位置
*rbp = newVal;
// 跳转栈帧
rbp = (uint64_t *)*rbp;
}
// 设置内核态fork返回到enter_syscall_int()函数内的时候rsp寄存器的值
new_regs->rsp = new_top - (old_top - new_regs->rsp);
return 0;
}
/**
* @brief 拷贝当前进程的线程结构体
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
int process_copy_thread(uint64_t clone_flags, struct process_control_block *pcb, uint64_t stack_start,
uint64_t stack_size, struct pt_regs *current_regs)
{
// 将线程结构体放置在pcb后方
struct thread_struct *thd = (struct thread_struct *)(pcb + 1);
memset(thd, 0, sizeof(struct thread_struct));
pcb->thread = thd;
struct pt_regs *child_regs = NULL;
// 拷贝栈空间
if (pcb->flags & PF_KFORK) // 内核态下的fork
{
// 内核态下则拷贝整个内核栈
uint32_t size = ((uint64_t)current_pcb) + STACK_SIZE - (uint64_t)(current_regs);
child_regs = (struct pt_regs *)(((uint64_t)pcb) + STACK_SIZE - size);
memcpy(child_regs, (void *)current_regs, size);
barrier();
// 然后重写新的栈中每个栈帧的rbp值
process_rewrite_rbp(child_regs, pcb);
}
else
{
child_regs = (struct pt_regs *)((uint64_t)pcb + STACK_SIZE - sizeof(struct pt_regs));
memcpy(child_regs, current_regs, sizeof(struct pt_regs));
barrier();
child_regs->rsp = stack_start;
}
// 设置子进程的返回值为0
child_regs->rax = 0;
if (pcb->flags & PF_KFORK)
thd->rbp =
(uint64_t)(child_regs + 1); // 设置新的内核线程开始执行时的rbp也就是进入ret_from_system_call时的rbp
else
thd->rbp = (uint64_t)pcb + STACK_SIZE;
// 设置新的内核线程开始执行的时候的rsp
thd->rsp = (uint64_t)child_regs;
thd->fs = current_pcb->thread->fs;
thd->gs = current_pcb->thread->gs;
// 根据是否为内核线程、是否在内核态fork设置进程的开始执行的地址
if (pcb->flags & PF_KFORK)
thd->rip = (uint64_t)ret_from_system_call;
else if (pcb->flags & PF_KTHREAD && (!(pcb->flags & PF_KFORK)))
thd->rip = (uint64_t)kernel_thread_func;
else
thd->rip = (uint64_t)ret_from_system_call;
return 0;
}

View File

@ -0,0 +1,27 @@
use crate::{include::bindings::bindings::{process_control_block, CLONE_SIGHAND}, kdebug, libs::{refcount::{refcount_inc, RefCount}, ffi_convert::FFIBind2Rust}, arch::x86_64::asm::current::current_pcb};
#[no_mangle]
pub extern "C" fn process_copy_sighand(clone_flags: u64, pcb: *mut process_control_block) -> i32 {
kdebug!("process_copy_sighand");
if(clone_flags & (CLONE_SIGHAND as u64)) != 0{
let r = RefCount::convert_mut(unsafe{&mut (*((current_pcb().sighand))).count}).unwrap();
refcount_inc(r);
}
0
}
#[no_mangle]
pub extern "C" fn process_copy_signal(clone_flags: u64, pcb: *mut process_control_block) -> i32 {
kdebug!("process_copy_signal");
0
}
#[no_mangle]
pub extern "C" fn process_exit_signal(pcb: *mut process_control_block){
// todo: 回收进程的信号结构体
}
#[no_mangle]
pub extern "C" fn process_exit_sighand(pcb: *mut process_control_block){
// todo: 回收进程的sighand结构体
}

View File

@ -0,0 +1,26 @@
use crate::{
include::{
bindings::bindings::{atomic_t, spinlock_t, wait_queue_head_t, List},
DragonOS::signal::{sighand_struct, signal_struct, MAX_SIG_NUM},
},
ipc::signal::DEFAULT_SIGACTION,
};
#[no_mangle]
pub static INITIAL_SIGNALS: signal_struct = signal_struct {
sig_cnt: atomic_t { value: 0 },
};
#[no_mangle]
pub static mut INITIAL_SIGHAND: sighand_struct = sighand_struct {
count: REFCOUNT_INIT!(1),
siglock: spinlock_t { lock: 1 },
signal_fd_wqh: wait_queue_head_t {
lock: spinlock_t { lock: 1 },
wait_list: List {
prev: unsafe { &INITIAL_SIGHAND.signal_fd_wqh.wait_list as *const List } as *mut List,
next: unsafe { &INITIAL_SIGHAND.signal_fd_wqh.wait_list as *const List } as *mut List,
},
},
action: [DEFAULT_SIGACTION; MAX_SIG_NUM as usize],
};

View File

@ -0,0 +1,5 @@
pub mod pid;
pub mod process;
pub mod preempt;
pub mod initial_proc;
pub mod fork;

18
kernel/src/process/pid.rs Normal file
View File

@ -0,0 +1,18 @@
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum PidType {
/// pid类型是进程id
PID = 1,
TGID = 2,
PGID = 3,
SID = 4,
MAX = 5,
}
/// 为PidType实现判断相等的trait
impl PartialEq for PidType {
fn eq(&self, other: &PidType) -> bool {
*self as u8 == *other as u8
}
}

View File

@ -0,0 +1,13 @@
use crate::arch::x86_64::asm::current::current_pcb;
/// @brief 增加进程的锁持有计数
#[inline]
pub fn preempt_disable() {
current_pcb().preempt_count += 1;
}
/// @brief 减少进程的锁持有计数
#[inline]
pub fn preempt_enable() {
current_pcb().preempt_count -= 1;
}

View File

@ -1,7 +1,8 @@
#pragma once
#include <common/wait_queue.h>
#include <stdint.h>
#include <DragonOS/signal.h>
#include <common/wait_queue.h>
// 进程最大可拥有的文件描述符数量
#define PROC_MAX_FD_NUM 16
@ -12,9 +13,9 @@
// 进程的运行状态
// 正在运行
#define PROC_RUNNING (1 << 0)
// 可被
// 可被信号打
#define PROC_INTERRUPTIBLE (1 << 1)
// 不可被
// 不可被信号打
#define PROC_UNINTERRUPTIBLE (1 << 2)
// 挂起
#define PROC_ZOMBIE (1 << 3)
@ -32,8 +33,9 @@
// 进程初始化时的数据拷贝标志位
#define CLONE_FS (1UL << 0) // 在进程间共享打开的文件
#define CLONE_SIGNAL (1UL << 1)
#define CLONE_SIGNAL (1UL << 1) // 克隆时,与父进程共享信号结构体
#define CLONE_VM (1UL << 2) // 在进程间共享虚拟内存空间
#define CLONE_SIGHAND (1UL << 3) // 克隆时,与父进程共享信号处理结构体
#define PCB_NAME_LEN 16
@ -60,9 +62,10 @@ struct thread_struct
#define PF_KTHREAD (1UL << 0) // 内核线程
#define PF_NEED_SCHED (1UL << 1) // 进程需要被调度
#define PF_VFORK (1UL << 2) // 标志进程是否由于vfork而存在资源共享
#define PF_KFORK (1UL << 3) // 标志在内核态下调用fork临时标记do_fork()结束后会将其复位)
#define PF_NOFREEZE (1UL << 4) // 当前进程不能被冻结
#define PF_KFORK (1UL << 3) // 标志在内核态下调用fork临时标记do_fork()结束后会将其复位)
#define PF_NOFREEZE (1UL << 4) // 当前进程不能被冻结
#define PF_EXITING (1UL << 5) // 进程正在退出
#define PF_WAKEKILL (1UL << 6) // 进程由于接收到终止信号唤醒
/**
* @brief 进程控制块
*
@ -70,12 +73,12 @@ struct thread_struct
struct process_control_block
{
// 进程的状态
volatile long state;
volatile uint64_t state;
// 进程标志:进程、线程、内核线程
unsigned long flags;
int64_t preempt_count; // 持有的自旋锁的数量
long cpu_id; // 当前进程在哪个CPU核心上运行
uint64_t flags;
int32_t preempt_count; // 持有的自旋锁的数量
uint32_t cpu_id; // 当前进程在哪个CPU核心上运行
char name[PCB_NAME_LEN];
// 内存空间分布结构体, 记录内存页表和程序段信息
@ -87,9 +90,9 @@ struct process_control_block
// pcb加入调度队列时所使用的链表节点
struct List list;
//todo:给pcb中加一个spinlock_t成员
// todo:给pcb中加一个spinlock_t成员
//进程自旋锁
// spinlock_t alloc_lock;
// spinlock_t alloc_lock;
// 地址空间范围
// 用户空间: 0x0000 0000 0000 0000 ~ 0x0000 7fff ffff ffff
@ -119,16 +122,14 @@ struct process_control_block
// ==== 信号处理相关 =====
struct signal_struct *signal;
struct sighand_struct *sighand;
// 一个bitmap表示被阻塞的信号
sigset_t blocked;
// 一个bitmap表示当前进程被禁用的信号
sigset_t sig_blocked;
// 正在等待的信号的标志位,表示某个信号正在等待处理
struct sigpending sig_pending;
};
// 将进程的pcb和内核栈融合到一起,8字节对齐
union proc_union
{
union proc_union {
struct process_control_block pcb;
ul stack[STACK_SIZE / sizeof(ul)];
} __attribute__((aligned(8)));

View File

@ -1,5 +1,6 @@
#include "process.h"
#include <DragonOS/signal.h>
#include <common/compiler.h>
#include <common/completion.h>
#include <common/elf.h>
@ -21,13 +22,12 @@
#include <filesystem/devfs/devfs.h>
#include <filesystem/fat32/fat32.h>
#include <filesystem/rootfs/rootfs.h>
#include <ktest/ktest.h>
#include <mm/slab.h>
#include <sched/sched.h>
#include <syscall/syscall.h>
#include <syscall/syscall_num.h>
#include <ktest/ktest.h>
#include <mm/mmio.h>
#include <common/lz4.h>
@ -43,6 +43,19 @@ extern void kernel_thread_func(void);
ul _stack_start; // initial proc的栈基地址虚拟地址
extern struct mm_struct initial_mm;
extern struct signal_struct INITIAL_SIGNALS;
extern struct sighand_struct INITIAL_SIGHAND;
// 设置初始进程的PCB
#define INITIAL_PROC(proc) \
{ \
.state = PROC_UNINTERRUPTIBLE, .flags = PF_KTHREAD, .preempt_count = 0, .signal = 0, .cpu_id = 0, \
.mm = &initial_mm, .thread = &initial_thread, .addr_limit = 0xffffffffffffffff, .pid = 0, .priority = 2, \
.virtual_runtime = 0, .fds = {0}, .next_pcb = &proc, .prev_pcb = &proc, .parent_pcb = &proc, .exit_code = 0, \
.wait_child_proc_exit = 0, .worker_private = NULL, .policy = SCHED_NORMAL, .sig_blocked = 0, \
.signal = &INITIAL_SIGNALS, .sighand = &INITIAL_SIGHAND, \
}
struct thread_struct initial_thread = {
.rbp = (ul)(initial_proc_union.stack + STACK_SIZE / sizeof(ul)),
.rsp = (ul)(initial_proc_union.stack + STACK_SIZE / sizeof(ul)),
@ -62,24 +75,6 @@ struct process_control_block *initial_proc[MAX_CPU_NUM] = {&initial_proc_union.p
// 为每个核心初始化初始进程的tss
struct tss_struct initial_tss[MAX_CPU_NUM] = {[0 ... MAX_CPU_NUM - 1] = INITIAL_TSS};
/**
* @brief 拷贝当前进程的标志位
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_flags(uint64_t clone_flags, struct process_control_block *pcb);
/**
* @brief 拷贝当前进程的文件描述符等信息
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_files(uint64_t clone_flags, struct process_control_block *pcb);
/**
* @brief 回收进程的所有文件描述符
*
@ -88,15 +83,6 @@ uint64_t process_copy_files(uint64_t clone_flags, struct process_control_block *
*/
uint64_t process_exit_files(struct process_control_block *pcb);
/**
* @brief 拷贝当前进程的内存空间分布结构体信息
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_mm(uint64_t clone_flags, struct process_control_block *pcb);
/**
* @brief 释放进程的页表
*
@ -105,17 +91,7 @@ uint64_t process_copy_mm(uint64_t clone_flags, struct process_control_block *pcb
*/
uint64_t process_exit_mm(struct process_control_block *pcb);
/**
* @brief 拷贝当前进程的线程结构体
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_thread(uint64_t clone_flags, struct process_control_block *pcb, uint64_t stack_start,
uint64_t stack_size, struct pt_regs *current_regs);
void process_exit_thread(struct process_control_block *pcb);
/**
* @brief 切换进程
@ -644,6 +620,7 @@ void process_init()
// 初始化进程的循环链表
list_init(&initial_proc_union.pcb.list);
wait_queue_init(&initial_proc_union.pcb.wait_child_proc_exit, NULL);
// 临时设置IDLE进程的的虚拟运行时间为0防止下面的这些内核线程的虚拟运行时间出错
current_pcb->virtual_runtime = 0;
@ -660,130 +637,9 @@ void process_init()
current_pcb->virtual_runtime = (1UL << 60);
}
/**
* @brief fork当前进程
*
* @param regs 新的寄存器值
* @param clone_flags 克隆标志
* @param stack_start 堆栈开始地址
* @param stack_size 堆栈大小
* @return unsigned long
*/
unsigned long do_fork(struct pt_regs *regs, unsigned long clone_flags, unsigned long stack_start,
unsigned long stack_size)
{
int retval = 0;
struct process_control_block *tsk = NULL;
// 为新的进程分配栈空间并将pcb放置在底部
tsk = (struct process_control_block *)kzalloc(STACK_SIZE, 0);
barrier();
if (tsk == NULL)
{
retval = -ENOMEM;
return retval;
}
barrier();
memset(tsk, 0, sizeof(struct process_control_block));
io_mfence();
// 将当前进程的pcb复制到新的pcb内
memcpy(tsk, current_pcb, sizeof(struct process_control_block));
tsk->worker_private = NULL;
io_mfence();
// 初始化进程的循环链表结点
list_init(&tsk->list);
io_mfence();
// 判断是否为内核态调用fork
if ((current_pcb->flags & PF_KTHREAD) && stack_start != 0)
tsk->flags |= PF_KFORK;
if (tsk->flags & PF_KTHREAD)
{
// 对于内核线程设置其worker私有信息
retval = kthread_set_worker_private(tsk);
if (IS_ERR_VALUE(retval))
goto copy_flags_failed;
tsk->virtual_runtime = 0;
}
tsk->priority = 2;
tsk->preempt_count = 0;
// 增加全局的pid并赋值给新进程的pid
spin_lock(&process_global_pid_write_lock);
tsk->pid = process_global_pid++;
barrier();
// 加入到进程链表中
// todo: 对pcb_list_lock加锁
tsk->prev_pcb = &initial_proc_union.pcb;
barrier();
tsk->next_pcb = initial_proc_union.pcb.next_pcb;
barrier();
initial_proc_union.pcb.next_pcb = tsk;
barrier();
tsk->parent_pcb = current_pcb;
barrier();
spin_unlock(&process_global_pid_write_lock);
tsk->cpu_id = proc_current_cpu_id;
tsk->state = PROC_UNINTERRUPTIBLE;
tsk->parent_pcb = current_pcb;
wait_queue_init(&tsk->wait_child_proc_exit, NULL);
barrier();
list_init(&tsk->list);
retval = -ENOMEM;
// 拷贝标志位
if (process_copy_flags(clone_flags, tsk))
goto copy_flags_failed;
// 拷贝内存空间分布结构体
if (process_copy_mm(clone_flags, tsk))
goto copy_mm_failed;
// 拷贝文件
if (process_copy_files(clone_flags, tsk))
goto copy_files_failed;
// 拷贝线程结构体
if (process_copy_thread(clone_flags, tsk, stack_start, stack_size, regs))
goto copy_thread_failed;
// 拷贝成功
retval = tsk->pid;
tsk->flags &= ~PF_KFORK;
// 唤醒进程
process_wakeup(tsk);
return retval;
copy_thread_failed:;
// 回收线程
process_exit_thread(tsk);
copy_files_failed:;
// 回收文件
process_exit_files(tsk);
copy_mm_failed:;
// 回收内存空间分布结构体
process_exit_mm(tsk);
copy_flags_failed:;
kfree(tsk);
return retval;
return 0;
}
/**
* @brief 根据pid获取进程的pcb。存在对应的pcb时返回对应的pcb的指针否则返回NULL
*
* 当进程管理模块拥有pcblist_lock之后调用本函数之前应当对其加锁
* @param pid
* @return struct process_control_block*
*/
@ -803,16 +659,19 @@ struct process_control_block *process_find_pcb_by_pid(pid_t pid)
}
/**
* @brief 将进程加入到调度器的就绪队列中
* @brief 将进程加入到调度器的就绪队列中.
*
* @param pcb 进程的pcb
*
* @return true 成功加入调度队列
* @return false 进程已经在运行
*/
int process_wakeup(struct process_control_block *pcb)
{
// kdebug("pcb pid = %#018lx", pcb->pid);
BUG_ON(pcb == NULL);
if (pcb == current_pcb || pcb == NULL)
if (pcb == NULL)
return -EINVAL;
// 如果pcb正在调度队列中则不重复加入调度队列
if (pcb->state & PROC_RUNNING)
@ -820,7 +679,7 @@ int process_wakeup(struct process_control_block *pcb)
pcb->state |= PROC_RUNNING;
sched_enqueue(pcb);
return 0;
return 1;
}
/**
@ -838,47 +697,6 @@ int process_wakeup_immediately(struct process_control_block *pcb)
// 将当前进程标志为需要调度缩短新进程被wakeup的时间
current_pcb->flags |= PF_NEED_SCHED;
}
/**
* @brief 拷贝当前进程的标志位
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_flags(uint64_t clone_flags, struct process_control_block *pcb)
{
if (clone_flags & CLONE_VM)
pcb->flags |= PF_VFORK;
return 0;
}
/**
* @brief 拷贝当前进程的文件描述符等信息
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_files(uint64_t clone_flags, struct process_control_block *pcb)
{
int retval = 0;
// 如果CLONE_FS被置位那么子进程与父进程共享文件描述符
// 文件描述符已经在复制pcb时被拷贝
if (clone_flags & CLONE_FS)
return retval;
// 为新进程拷贝新的文件描述符
for (int i = 0; i < PROC_MAX_FD_NUM; ++i)
{
if (current_pcb->fds[i] == NULL)
continue;
pcb->fds[i] = (struct vfs_file_t *)kmalloc(sizeof(struct vfs_file_t), 0);
memcpy(pcb->fds[i], current_pcb->fds[i], sizeof(struct vfs_file_t));
}
return retval;
}
/**
* @brief 回收进程的所有文件描述符
@ -903,99 +721,6 @@ uint64_t process_exit_files(struct process_control_block *pcb)
memset(pcb->fds, 0, sizeof(struct vfs_file_t *) * PROC_MAX_FD_NUM);
}
/**
* @brief 拷贝当前进程的内存空间分布结构体信息
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_mm(uint64_t clone_flags, struct process_control_block *pcb)
{
int retval = 0;
// 与父进程共享内存空间
if (clone_flags & CLONE_VM)
{
pcb->mm = current_pcb->mm;
return retval;
}
// 分配新的内存空间分布结构体
struct mm_struct *new_mms = (struct mm_struct *)kmalloc(sizeof(struct mm_struct), 0);
memset(new_mms, 0, sizeof(struct mm_struct));
memcpy(new_mms, current_pcb->mm, sizeof(struct mm_struct));
new_mms->vmas = NULL;
pcb->mm = new_mms;
// 分配顶层页表, 并设置顶层页表的物理地址
new_mms->pgd = (pml4t_t *)virt_2_phys(kmalloc(PAGE_4K_SIZE, 0));
// 由于高2K部分为内核空间在接下来需要覆盖其数据因此不用清零
memset(phys_2_virt(new_mms->pgd), 0, PAGE_4K_SIZE / 2);
// 拷贝内核空间的页表指针
memcpy(phys_2_virt(new_mms->pgd) + 256, phys_2_virt(initial_proc[proc_current_cpu_id]->mm->pgd) + 256,
PAGE_4K_SIZE / 2);
uint64_t *current_pgd = (uint64_t *)phys_2_virt(current_pcb->mm->pgd);
uint64_t *new_pml4t = (uint64_t *)phys_2_virt(new_mms->pgd);
// 拷贝用户空间的vma
struct vm_area_struct *vma = current_pcb->mm->vmas;
while (vma != NULL)
{
if (vma->vm_end > USER_MAX_LINEAR_ADDR || vma->vm_flags & VM_DONTCOPY)
{
vma = vma->vm_next;
continue;
}
int64_t vma_size = vma->vm_end - vma->vm_start;
// kdebug("vma_size=%ld, vm_start=%#018lx", vma_size, vma->vm_start);
if (vma_size > PAGE_2M_SIZE / 2)
{
int page_to_alloc = (PAGE_2M_ALIGN(vma_size)) >> PAGE_2M_SHIFT;
for (int i = 0; i < page_to_alloc; ++i)
{
uint64_t pa = alloc_pages(ZONE_NORMAL, 1, PAGE_PGT_MAPPED)->addr_phys;
struct vm_area_struct *new_vma = NULL;
int ret = mm_create_vma(new_mms, vma->vm_start + i * PAGE_2M_SIZE, PAGE_2M_SIZE, vma->vm_flags,
vma->vm_ops, &new_vma);
// 防止内存泄露
if (unlikely(ret == -EEXIST))
free_pages(Phy_to_2M_Page(pa), 1);
else
mm_map_vma(new_vma, pa, 0, PAGE_2M_SIZE);
memcpy((void *)phys_2_virt(pa), (void *)(vma->vm_start + i * PAGE_2M_SIZE),
(vma_size >= PAGE_2M_SIZE) ? PAGE_2M_SIZE : vma_size);
vma_size -= PAGE_2M_SIZE;
}
}
else
{
uint64_t map_size = PAGE_4K_ALIGN(vma_size);
uint64_t va = (uint64_t)kmalloc(map_size, 0);
struct vm_area_struct *new_vma = NULL;
int ret = mm_create_vma(new_mms, vma->vm_start, map_size, vma->vm_flags, vma->vm_ops, &new_vma);
// 防止内存泄露
if (unlikely(ret == -EEXIST))
kfree((void *)va);
else
mm_map_vma(new_vma, virt_2_phys(va), 0, map_size);
memcpy((void *)va, (void *)vma->vm_start, vma_size);
}
vma = vma->vm_next;
}
return retval;
}
/**
* @brief 释放进程的页表
*
@ -1059,106 +784,6 @@ uint64_t process_exit_mm(struct process_control_block *pcb)
return 0;
}
/**
* @brief 重写内核栈中的rbp地址
*
* @param new_regs 子进程的reg
* @param new_pcb 子进程的pcb
* @return int
*/
static int process_rewrite_rbp(struct pt_regs *new_regs, struct process_control_block *new_pcb)
{
uint64_t new_top = ((uint64_t)new_pcb) + STACK_SIZE;
uint64_t old_top = (uint64_t)(current_pcb) + STACK_SIZE;
uint64_t *rbp = &new_regs->rbp;
uint64_t *tmp = rbp;
// 超出内核栈范围
if ((uint64_t)*rbp >= old_top || (uint64_t)*rbp < (old_top - STACK_SIZE))
return 0;
while (1)
{
// 计算delta
uint64_t delta = old_top - *rbp;
// 计算新的rbp值
uint64_t newVal = new_top - delta;
// 新的值不合法
if (unlikely((uint64_t)newVal >= new_top || (uint64_t)newVal < (new_top - STACK_SIZE)))
break;
// 将新的值写入对应位置
*rbp = newVal;
// 跳转栈帧
rbp = (uint64_t *)*rbp;
}
// 设置内核态fork返回到enter_syscall_int()函数内的时候rsp寄存器的值
new_regs->rsp = new_top - (old_top - new_regs->rsp);
return 0;
}
/**
* @brief 拷贝当前进程的线程结构体
*
* @param clone_flags 克隆标志位
* @param pcb 新的进程的pcb
* @return uint64_t
*/
uint64_t process_copy_thread(uint64_t clone_flags, struct process_control_block *pcb, uint64_t stack_start,
uint64_t stack_size, struct pt_regs *current_regs)
{
// 将线程结构体放置在pcb后方
struct thread_struct *thd = (struct thread_struct *)(pcb + 1);
memset(thd, 0, sizeof(struct thread_struct));
pcb->thread = thd;
struct pt_regs *child_regs = NULL;
// 拷贝栈空间
if (pcb->flags & PF_KFORK) // 内核态下的fork
{
// 内核态下则拷贝整个内核栈
uint32_t size = ((uint64_t)current_pcb) + STACK_SIZE - (uint64_t)(current_regs);
child_regs = (struct pt_regs *)(((uint64_t)pcb) + STACK_SIZE - size);
memcpy(child_regs, (void *)current_regs, size);
barrier();
// 然后重写新的栈中每个栈帧的rbp值
process_rewrite_rbp(child_regs, pcb);
}
else
{
child_regs = (struct pt_regs *)((uint64_t)pcb + STACK_SIZE - sizeof(struct pt_regs));
memcpy(child_regs, current_regs, sizeof(struct pt_regs));
barrier();
child_regs->rsp = stack_start;
}
// 设置子进程的返回值为0
child_regs->rax = 0;
if (pcb->flags & PF_KFORK)
thd->rbp =
(uint64_t)(child_regs + 1); // 设置新的内核线程开始执行时的rbp也就是进入ret_from_system_call时的rbp
else
thd->rbp = (uint64_t)pcb + STACK_SIZE;
// 设置新的内核线程开始执行的时候的rsp
thd->rsp = (uint64_t)child_regs;
thd->fs = current_pcb->thread->fs;
thd->gs = current_pcb->thread->gs;
// 根据是否为内核线程、是否在内核态fork设置进程的开始执行的地址
if (pcb->flags & PF_KFORK)
thd->rip = (uint64_t)ret_from_system_call;
else if (pcb->flags & PF_KTHREAD && (!(pcb->flags & PF_KFORK)))
thd->rip = (uint64_t)kernel_thread_func;
else
thd->rip = (uint64_t)ret_from_system_call;
return 0;
}
/**
* @brief todo: 回收线程结构体
@ -1181,12 +806,12 @@ int process_release_pcb(struct process_control_block *pcb)
process_exit_mm(pcb);
if ((pcb->flags & PF_KTHREAD)) // 释放内核线程的worker private结构体
free_kthread_struct(pcb);
// 将pcb从pcb链表中移除
// todo: 对相关的pcb加锁
pcb->prev_pcb->next_pcb = pcb->next_pcb;
pcb->next_pcb->prev_pcb = pcb->prev_pcb;
// 释放当前pcb
kfree(pcb);
return 0;

View File

@ -22,14 +22,8 @@
#include "proc-types.h"
// 设置初始进程的PCB
#define INITIAL_PROC(proc) \
{ \
.state = PROC_UNINTERRUPTIBLE, .flags = PF_KTHREAD, .preempt_count = 0, .signal = 0, .cpu_id = 0, \
.mm = &initial_mm, .thread = &initial_thread, .addr_limit = 0xffffffffffffffff, .pid = 0, .priority = 2, \
.virtual_runtime = 0, .fds = {0}, .next_pcb = &proc, .prev_pcb = &proc, .parent_pcb = &proc, .exit_code = 0, \
.wait_child_proc_exit = 0, .worker_private = NULL, .policy = SCHED_NORMAL \
}
extern void process_exit_thread(struct process_control_block *pcb);
extern uint64_t process_exit_files(struct process_control_block *pcb);
/**
* @brief 任务状态段结构体
@ -96,7 +90,7 @@ unsigned long do_fork(struct pt_regs *regs, unsigned long clone_flags, unsigned
/**
* @brief 根据pid获取进程的pcb。存在对应的pcb时返回对应的pcb的指针否则返回NULL
*
* 当进程管理模块拥有pcblist_lock之后调用本函数之前应当对其加锁
* @param pid
* @return struct process_control_block*
*/
@ -106,6 +100,8 @@ struct process_control_block *process_find_pcb_by_pid(pid_t pid);
* @brief 将进程加入到调度器的就绪队列中
*
* @param pcb 进程的pcb
*
* @return 如果进程被成功唤醒则返回1,如果进程正在运行则返回0.如果pcb为NULL则返回-EINVAL
*/
int process_wakeup(struct process_control_block *pcb);
@ -197,3 +193,30 @@ extern struct process_control_block *initial_proc[MAX_CPU_NUM];
* @param pcb_name 保存名字的char数组
*/
void process_set_pcb_name(struct process_control_block *pcb, const char *pcb_name);
/**
* @brief 判断进程是否已经停止
*
* hint: 本函数在rust中实现请参考rust版本的注释
*
* @param pcb 目标pcb
* @return true
* @return false
*/
extern bool process_is_stopped(struct process_control_block *pcb);
/**
* @brief 尝试唤醒指定的进程。
* 本函数的行为If (@_state & @pcb->state) @pcb->state = TASK_RUNNING.
*
* hint: 本函数在rust中实现请参考rust版本的注释
*/
extern int process_try_to_wake_up(struct process_control_block *_pcb, uint64_t _state, int32_t _wake_flags);
/** @brief 当进程,满足 (@state & @pcb->state)时,唤醒进程,并设置: @pcb->state = TASK_RUNNING.
*
* hint: 本函数在rust中实现请参考rust版本的注释
* @return true 唤醒成功
* @return false 唤醒失败
*/
extern int process_wake_up_state(struct process_control_block *pcb, uint64_t state);

View File

@ -0,0 +1,103 @@
use core::ptr::{read_volatile, write_volatile};
use crate::{
arch::x86_64::asm::current::current_pcb,
include::bindings::bindings::{
process_control_block, sched_enqueue, PROC_RUNNING, PROC_STOPPED,
},
sched::core::cpu_executing,
smp::core::{smp_get_processor_id, smp_send_reschedule},
};
use super::preempt::{preempt_disable, preempt_enable};
/// 判断进程是否已经停止
#[no_mangle]
pub extern "C" fn process_is_stopped(pcb: *const process_control_block) -> bool {
let state: u64 = unsafe { read_volatile(&(*pcb).state) } as u64;
if (state & (PROC_STOPPED as u64)) != 0 {
return true;
} else {
return false;
}
}
/// @brief 尝试唤醒指定的进程。
/// 本函数的行为If (@_state & @pcb->state) @pcb->state = TASK_RUNNING.
///
/// @param _pcb 要被唤醒的进程的pcb
/// @param _state 如果pcb的state与_state匹配则唤醒这个进程
/// @param _wake_flags 保留暂未使用请置为0
/// @return true: 成功唤醒
/// false: 不符合唤醒条件,无法唤醒
#[no_mangle]
pub extern "C" fn process_try_to_wake_up(
_pcb: *mut process_control_block,
_state: u64,
_wake_flags: i32,
) -> bool {
preempt_disable();
let mut retval = false;
// 获取对pcb的可变引用
let pcb = unsafe { _pcb.as_mut() }.unwrap();
// 如果要唤醒的就是当前的进程
if current_pcb() as *mut process_control_block as usize == _pcb as usize {
unsafe {
write_volatile(&mut pcb.state, PROC_RUNNING as u64);
}
preempt_enable();
retval = true;
return retval;
}
// todo: 将来调度器引入ttwu队列之后需要修改这里的判断条件
// todo: 为pcb引入pi_lock,然后在这里加锁
if unsafe { read_volatile(&pcb.state) } & _state != 0 {
// 可以wakeup
unsafe {
write_volatile(&mut pcb.state, PROC_RUNNING as u64);
sched_enqueue(pcb);
}
retval = true;
}
// todo: 对pcb的pi_lock放锁
preempt_enable();
return retval;
}
/// @brief 当进程,满足 (@state & @pcb->state)时,唤醒进程,并设置: @pcb->state = TASK_RUNNING.
///
/// @return true 唤醒成功
/// @return false 唤醒失败
#[no_mangle]
pub extern "C" fn process_wake_up_state(pcb: *mut process_control_block, state: u64) -> bool {
return process_try_to_wake_up(pcb, state, 0);
}
/// @brief 让一个正在cpu上运行的进程陷入内核
pub fn process_kick(pcb: *mut process_control_block) {
preempt_disable();
let cpu = process_cpu(pcb);
// 如果给定的进程正在别的核心上执行,则立即发送请求,让它陷入内核态,以及时响应信号。
if cpu != smp_get_processor_id() && process_is_executing(pcb) {
smp_send_reschedule(cpu);
}
preempt_enable();
}
/// @brief 获取给定的进程在哪个cpu核心上运行(使用volatile避免编译器优化)
#[inline]
pub fn process_cpu(pcb: *const process_control_block) -> u32 {
unsafe { read_volatile(&(*pcb).cpu_id) }
}
/// @brief 判断给定的进程是否正在处理器上执行
///
/// @param pcb 进程的pcb
#[inline]
pub fn process_is_executing(pcb: *const process_control_block) -> bool {
return cpu_executing(process_cpu(pcb)) == pcb;
}