使用Rust重构CFS调度器 (#131)

* 新建调度器的文件

* 把softirq vector移动到c文件中(原来在.h)

* 将进程切换方式改为“中断返回时切换”

* new:使用rust重构CFS

* 删除已经在smp中废弃的HPET中断转发函数

* 代码格式化

* 删除多余的dunce依赖
This commit is contained in:
login
2022-12-31 17:26:12 +08:00
committed by GitHub
parent 156949680c
commit d4f3de93a2
37 changed files with 464 additions and 1027 deletions

View File

@ -1,163 +0,0 @@
#include "cfs.h"
#include <common/kprint.h>
#include <common/spinlock.h>
#include <driver/video/video.h>
struct sched_queue_t sched_cfs_ready_queue[MAX_CPU_NUM]; // 就绪队列
/**
* @brief 从就绪队列中取出PCB
*
* @return struct process_control_block*
*/
struct process_control_block *sched_cfs_dequeue()
{
if (list_empty(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list))
{
// kdebug("list empty, count=%d", sched_cfs_ready_queue[proc_current_cpu_id].count);
return &initial_proc_union.pcb;
}
struct process_control_block *proc = container_of(
list_next(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list), struct process_control_block, list);
list_del(&proc->list);
--sched_cfs_ready_queue[proc_current_cpu_id].count;
return proc;
}
/**
* @brief 将PCB加入就绪队列
*
* @param pcb
*/
void sched_cfs_enqueue(struct process_control_block *pcb)
{
if (pcb == initial_proc[proc_current_cpu_id])
return;
struct process_control_block *proc = container_of(
list_next(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list), struct process_control_block, list);
if ((list_empty(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list)) == 0)
{
while (proc->virtual_runtime < pcb->virtual_runtime)
{
proc = container_of(list_next(&proc->list), struct process_control_block, list);
}
}
list_append(&proc->list, &pcb->list);
++sched_cfs_ready_queue[proc_current_cpu_id].count;
}
/**
* @brief 调度函数
*
*/
void sched_cfs()
{
cli();
current_pcb->flags &= ~PF_NEED_SCHED;
// kdebug("current_pcb pid= %d", current_pcb->pid);
struct process_control_block *proc = sched_cfs_dequeue();
// kdebug("sched_cfs_ready_queue[proc_current_cpu_id].count = %d",
// sched_cfs_ready_queue[proc_current_cpu_id].count);
if (current_pcb->virtual_runtime >= proc->virtual_runtime ||
!(current_pcb->state & PROC_RUNNING)) // 当前进程运行时间大于了下一进程的运行时间,进行切换
{
// kdebug("current_pcb->virtual_runtime = %d,proc->vt= %d", current_pcb->virtual_runtime,
// proc->virtual_runtime);
if (current_pcb->state &
PROC_RUNNING) // 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
sched_cfs_enqueue(current_pcb);
// kdebug("proc->pid=%d, count=%d", proc->pid, sched_cfs_ready_queue[proc_current_cpu_id].count);
if (sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies <= 0)
{
switch (proc->priority)
{
case 0:
case 1:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies =
4 / sched_cfs_ready_queue[proc_current_cpu_id].count;
break;
case 2:
default:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies =
(4 / sched_cfs_ready_queue[proc_current_cpu_id].count) << 2;
break;
}
}
barrier();
switch_proc(current_pcb, proc);
barrier();
}
else // 不进行切换
{
// kdebug("not switch.");
sched_cfs_enqueue(proc);
if (sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies <= 0)
{
switch (proc->priority)
{
case 0:
case 1:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies =
4 / sched_cfs_ready_queue[proc_current_cpu_id].count;
break;
case 2:
default:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies =
(4 / sched_cfs_ready_queue[proc_current_cpu_id].count) << 2;
break;
}
}
}
sti();
}
/**
* @brief 当时钟中断到达时,更新时间片
*
*/
void sched_update_jiffies()
{
switch (current_pcb->priority)
{
case 0:
case 1:
--sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies;
++current_pcb->virtual_runtime;
break;
case 2:
default:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies -= 2;
current_pcb->virtual_runtime += 2;
break;
}
// 时间片耗尽,标记可调度
if (sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies <= 0)
current_pcb->flags |= PF_NEED_SCHED;
}
/**
* @brief 初始化CFS调度器
*
*/
void sched_cfs_init()
{
memset(&sched_cfs_ready_queue, 0, sizeof(struct sched_queue_t) * MAX_CPU_NUM);
for (int i = 0; i < MAX_CPU_NUM; ++i)
{
list_init(&sched_cfs_ready_queue[i].proc_queue.list);
sched_cfs_ready_queue[i].count = 1; // 因为存在IDLE进程因此为1
sched_cfs_ready_queue[i].cpu_exec_proc_jiffies = 5;
sched_cfs_ready_queue[i].proc_queue.virtual_runtime = 0x7fffffffffffffff;
}
}

201
kernel/src/sched/cfs.rs Normal file
View File

@ -0,0 +1,201 @@
use core::{
ptr::null_mut,
sync::atomic::compiler_fence,
};
use alloc::{boxed::Box, vec::Vec};
use crate::{
arch::{
asm::current::current_pcb,
context::switch_process,
},
include::bindings::bindings::{
initial_proc_union, process_control_block, MAX_CPU_NUM, PF_NEED_SCHED,
PROC_RUNNING,
},
kBUG,
libs::spinlock::RawSpinlock,
};
use super::core::Scheduler;
/// 声明全局的cfs调度器实例
pub static mut CFS_SCHEDULER_PTR: *mut SchedulerCFS = null_mut();
/// @brief 获取cfs调度器实例的可变引用
#[inline]
pub fn __get_cfs_scheduler() -> &'static mut SchedulerCFS {
return unsafe { CFS_SCHEDULER_PTR.as_mut().unwrap() };
}
/// @brief 初始化cfs调度器
pub unsafe fn sched_cfs_init() {
if CFS_SCHEDULER_PTR.is_null() {
CFS_SCHEDULER_PTR = Box::leak(Box::new(SchedulerCFS::new()));
} else {
kBUG!("Try to init CFS Scheduler twice.");
panic!("Try to init CFS Scheduler twice.");
}
}
/// @brief CFS队列per-cpu的
#[derive(Debug)]
struct CFSQueue {
/// 当前cpu上执行的进程剩余的时间片
cpu_exec_proc_jiffies: i64,
/// 队列的锁
lock: RawSpinlock,
/// 进程的队列
queue: Vec<&'static mut process_control_block>,
}
impl CFSQueue {
pub fn new() -> CFSQueue {
CFSQueue {
cpu_exec_proc_jiffies: 0,
lock: RawSpinlock::INIT,
queue: Vec::new(),
}
}
/// @brief 将进程按照虚拟运行时间的升序进行排列
/// todo: 换掉这个sort方法因为它底层是归并很占内存且时间复杂度为nlogn遍历然后插入的方法时间复杂度最坏是n
pub fn sort(&mut self) {
self.queue
.sort_by(|a, b| (*a).virtual_runtime.cmp(&(*b).virtual_runtime));
}
/// @brief 将pcb加入队列
pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
// 如果进程是IDLE进程那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
return;
}
self.queue.push(pcb);
self.sort();
self.lock.unlock();
}
/// @brief 将pcb从调度队列中弹出,若队列为空则返回IDLE进程的pcb
pub fn dequeue(&mut self) -> &'static mut process_control_block {
let res: &'static mut process_control_block;
self.lock.lock();
if self.queue.len() > 0 {
// 队列不为空返回下一个要执行的pcb
res = self.queue.pop().unwrap();
} else {
// 如果队列为空则返回IDLE进程的pcb
res = unsafe { &mut initial_proc_union.pcb };
}
self.lock.unlock();
return res;
}
}
/// @brief CFS调度器类
pub struct SchedulerCFS {
cpu_queue: Vec<&'static mut CFSQueue>,
}
impl SchedulerCFS {
pub fn new() -> SchedulerCFS {
// 暂时手动指定核心数目
// todo: 从cpu模块来获取核心的数目
let mut result = SchedulerCFS {
cpu_queue: Default::default(),
};
// 为每个cpu核心创建队列
for _ in 0..MAX_CPU_NUM {
result.cpu_queue.push(Box::leak(Box::new(CFSQueue::new())));
}
return result;
}
/// @brief 更新这个cpu上这个进程的可执行时间。
#[inline]
fn update_cpu_exec_proc_jiffies(_priority: i64, cfs_queue: &mut CFSQueue) -> &mut CFSQueue {
// todo: 引入调度周期以及所有进程的优先权进行计算,然后设置分配给进程的可执行时间
cfs_queue.cpu_exec_proc_jiffies = 10;
return cfs_queue;
}
/// @brief 时钟中断到来时由sched的core模块中的函数调用本函数更新CFS进程的可执行时间
pub fn timer_update_jiffies(&mut self) {
let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_pcb().cpu_id as usize];
// todo: 引入调度周期以及所有进程的优先权进行计算,然后设置进程的可执行时间
// 更新进程的剩余可执行时间
current_cpu_queue.lock.lock();
current_cpu_queue.cpu_exec_proc_jiffies -= 1;
// 时间片耗尽,标记需要被调度
if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
current_pcb().flags |= PF_NEED_SCHED as u64;
}
current_cpu_queue.lock.unlock();
// 更新当前进程的虚拟运行时间
current_pcb().virtual_runtime += 1;
}
}
impl Scheduler for SchedulerCFS {
/// @brief 在当前cpu上进行调度。
/// 请注意,进入该函数之前,需要关中断
fn sched(&mut self) {
// kdebug!("cfs:sched");
current_pcb().flags &= !(PF_NEED_SCHED as u64);
let current_cpu_id = current_pcb().cpu_id as usize;
let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_cpu_id];
let proc: &'static mut process_control_block = current_cpu_queue.dequeue();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
// 如果当前不是running态或者当前进程的虚拟运行时间大于等于下一个进程的那就需要切换。
if (current_pcb().state & (PROC_RUNNING as u64)) == 0
|| current_pcb().virtual_runtime >= proc.virtual_runtime
{
compiler_fence(core::sync::atomic::Ordering::SeqCst);
// 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
if current_pcb().state & (PROC_RUNNING as u64) != 0 {
// kdebug!("cfs:sched->enqueue");
current_cpu_queue.enqueue(current_pcb());
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
// 设置进程可以执行的时间
if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
SchedulerCFS::update_cpu_exec_proc_jiffies(proc.priority, current_cpu_queue);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
switch_process(current_pcb(), proc);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
} else {
// 不进行切换
// 设置进程可以执行的时间
compiler_fence(core::sync::atomic::Ordering::SeqCst);
if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
SchedulerCFS::update_cpu_exec_proc_jiffies(proc.priority, current_cpu_queue);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
current_cpu_queue.enqueue(proc);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn enqueue(&mut self, pcb: &'static mut process_control_block) {
let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
cpu_queue.enqueue(pcb);
}
}

14
kernel/src/sched/core.c Normal file
View File

@ -0,0 +1,14 @@
#include "sched.h"
/**
* @brief 切换进程上下文。请注意,只能在中断上下文内调用本函数
* TODO使用Rust重构这里
* @param prev 前一个进程的pcb
* @param proc 后一个进程的pcb
*/
void switch_proc(struct process_control_block *prev, struct process_control_block *proc)
{
process_switch_mm(proc);
io_mfence();
switch_to(prev, proc);
}

View File

@ -1,13 +1,85 @@
use crate::{include::bindings::bindings::process_control_block, process::process::process_cpu, arch::asm::current::current_pcb};
use core::sync::atomic::compiler_fence;
use crate::{
arch::asm::{current::current_pcb, ptrace::user_mode},
include::bindings::bindings::{process_control_block, pt_regs, EPERM, SCHED_NORMAL},
process::process::process_cpu,
};
use super::cfs::{sched_cfs_init, SchedulerCFS, __get_cfs_scheduler};
/// @brief 获取指定的cpu上正在执行的进程的pcb
#[inline]
pub fn cpu_executing(cpu_id:u32) -> *const process_control_block{
pub fn cpu_executing(cpu_id: u32) -> &'static mut process_control_block {
// todo: 引入per_cpu之后该函数真正执行“返回指定的cpu上正在执行的pcb”的功能
if cpu_id == process_cpu(current_pcb()){
if cpu_id == process_cpu(current_pcb()) {
return current_pcb();
}else {
} else {
todo!()
}
}
}
/// @brief 具体的调度器应当实现的trait
pub trait Scheduler {
/// @brief 使用该调度器发起调度的时候,要调用的函数
fn sched(&mut self);
/// @brief 将pcb加入这个调度器的调度队列
fn enqueue(&mut self, pcb: &'static mut process_control_block);
}
fn __sched() {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
let cfs_scheduler: &mut SchedulerCFS = __get_cfs_scheduler();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
cfs_scheduler.sched();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
/// @brief 将进程加入调度队列
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block) {
let cfs_scheduler = __get_cfs_scheduler();
cfs_scheduler.enqueue(pcb);
}
/// @brief 初始化进程调度器模块
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sched_init() {
unsafe {
sched_cfs_init();
}
}
/// @brief 当时钟中断到达时,更新时间片
/// 请注意,该函数只能被时钟中断处理程序调用
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sched_update_jiffies() {
match current_pcb().policy {
SCHED_NORMAL => {
__get_cfs_scheduler().timer_update_jiffies();
}
_ => {
todo!()
}
}
}
/// @brief 让系统立即运行调度器的系统调用
/// 请注意该系统调用不能由ring3的程序发起
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sys_sched(regs: &'static mut pt_regs) -> u64 {
// 进行权限校验,拒绝用户态发起调度
if user_mode(regs) {
return (-(EPERM as i64)) as u64;
}
__sched();
0
}

View File

@ -1 +1,2 @@
pub mod core;
pub mod core;
pub mod cfs;

View File

@ -1,95 +0,0 @@
#include "sched.h"
#include <common/kprint.h>
#include <common/spinlock.h>
#include <common/string.h>
#include <driver/video/video.h>
#include <sched/cfs.h>
/**
* @brief
*
* @param p pcb
* @param attr 调度属性
* @param user 请求是否来自用户态
* @param pi
* @return int
*/
static int __sched_setscheduler(struct process_control_block *p, const struct sched_attr *attr, bool user, bool pi)
{
int policy = attr->sched_policy;
recheck:;
// 这里policy的设置小于0是因为需要在临界区内更新值之后重新到这里判断
if (!IS_VALID_SCHED_POLICY(policy))
{
return -EINVAL;
}
// 修改成功
p->policy = policy;
return 0;
}
static int _sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param, bool check)
{
struct sched_attr attr = {.sched_policy = policy};
return __sched_setscheduler(p, &attr, check, true);
}
/**
* sched_setscheduler -设置进程的调度策略
* @param p 需要修改的pcb
* @param policy 需要设置的policy
* @param param structure containing the new RT priority. 目前没有用
*
* @return 成功返回0,否则返回对应的错误码
*
*/
int sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param)
{
return _sched_setscheduler(p, policy, param, true);
}
/**
* @brief 包裹shced_cfs_enqueue(),将PCB加入就绪队列
*
* @param pcb
*/
void sched_enqueue(struct process_control_block *pcb)
{
sched_cfs_enqueue(pcb);
}
/**
* @brief 该函数只能由sys_sched调用
*
*/
static void __sched()
{
sched_cfs();
}
void sched_init()
{
sched_cfs_init();
}
uint64_t sys_sched(struct pt_regs *regs)
{
if(user_mode(regs)){
return -EPERM;
}
__sched();
}
void sched()
{
enter_syscall_int(SYS_SCHED, 0, 0, 0, 0, 0, 0, 0, 0);
}
void switch_proc(struct process_control_block *prev, struct process_control_block *proc)
{
process_switch_mm(proc);
io_mfence();
switch_to(prev, proc);
}

View File

@ -17,64 +17,61 @@
#define IS_VALID_SCHED_POLICY(_policy) ((_policy) > 0 && (_policy) <= SCHED_MAX_POLICY_NUM)
struct sched_param
{
int sched_priority;
};
struct sched_attr
{
uint32_t size;
uint32_t sched_policy;
uint64_t sched_flags;
/* SCHED_NORMAL, SCHED_BATCH */
int32_t sched_nice;
// struct sched_param
// {
// int sched_priority;
// };
// struct sched_attr
// {
// uint32_t size;
/* SCHED_FIFO, SCHED_RR */
uint32_t sched_priority;
// uint32_t sched_policy;
// uint64_t sched_flags;
/* SCHED_DEADLINE */
uint64_t sched_runtime;
uint64_t sched_deadline;
uint64_t sched_period;
// /* SCHED_NORMAL, SCHED_BATCH */
// int32_t sched_nice;
/* Utilization hints */
uint32_t sched_util_min;
uint32_t sched_util_max;
};
// /* SCHED_FIFO, SCHED_RR */
// uint32_t sched_priority;
// /* SCHED_DEADLINE */
// uint64_t sched_runtime;
// uint64_t sched_deadline;
// uint64_t sched_period;
// /* Utilization hints */
// uint32_t sched_util_min;
// uint32_t sched_util_max;
// };
// static int __sched_setscheduler(struct process_control_block *p, const struct sched_attr *attr, bool user, bool pi);
// static int _sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param,
// bool check);
// /**
// * sched_setscheduler -设置进程的调度策略
// * @param p 需要修改的pcb
// * @param policy 需要设置的policy
// * @param param structure containing the new RT priority. 目前没有用
// *
// * @return 成功返回0,否则返回对应的错误码
// *
// */
// int sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param);
static int __sched_setscheduler(struct process_control_block *p, const struct sched_attr *attr, bool user, bool pi);
static int _sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param,
bool check);
/**
* sched_setscheduler -设置进程的调度策略
* @param p 需要修改的pcb
* @param policy 需要设置的policy
* @param param structure containing the new RT priority. 目前没有用
*
* @return 成功返回0,否则返回对应的错误码
*
*/
int sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param);
/**
* @brief 包裹sched_enqueue(),将PCB加入就绪队列
*
* @param pcb
*/
void sched_enqueue(struct process_control_block *pcb);
/**
* @brief 包裹sched_cfs(),调度函数
*
*/
void sched();
void sched_init();
// ================= Rust 实现 =============
extern void sched_update_jiffies();
extern void sched_init();
extern void sched();
extern void sched_enqueue(struct process_control_block *pcb);
extern void sched();
/**
* @brief 当时钟中断到达时,更新时间片
*
*/
void sched_update_jiffies();
void switch_proc(struct process_control_block *prev, struct process_control_block *proc);
void switch_proc(struct process_control_block *prev, struct process_control_block *proc);