Patch sched rust (#139)

* update

* 添加rt调度器的rust初步实现

* 完善rt调度逻辑

* 调试rt调度器

* 修改sched的返回值

* cargo fmt 格式化

* 删除无用代码,修补rt bug

* 删除无用的代码,和重复的逻辑

* 软中断bugfix

* 删除一些代码

* 添加kthread_run_rt文档

* 解决sphinix警告_static目录不存在的问题

Co-authored-by: longjin <longjin@RinGoTek.cn>
This commit is contained in:
kong
2023-01-14 22:38:05 +08:00
committed by GitHub
parent ec53d23ed0
commit 06b09f34ed
44 changed files with 426 additions and 173 deletions

View File

@ -1,24 +1,17 @@
use core::{
ptr::null_mut,
sync::atomic::compiler_fence,
};
use core::{ptr::null_mut, sync::atomic::compiler_fence};
use alloc::{boxed::Box, vec::Vec};
use crate::{
arch::{
asm::current::current_pcb,
context::switch_process,
},
arch::asm::current::current_pcb,
include::bindings::bindings::{
initial_proc_union, process_control_block, MAX_CPU_NUM, PF_NEED_SCHED,
PROC_RUNNING,
initial_proc_union, process_control_block, MAX_CPU_NUM, PF_NEED_SCHED, PROC_RUNNING,
},
kBUG,
libs::spinlock::RawSpinlock,
};
use super::core::Scheduler;
use super::core::{sched_enqueue, Scheduler};
/// 声明全局的cfs调度器实例
@ -149,8 +142,7 @@ impl SchedulerCFS {
impl Scheduler for SchedulerCFS {
/// @brief 在当前cpu上进行调度。
/// 请注意,进入该函数之前,需要关中断
fn sched(&mut self) {
// kdebug!("cfs:sched");
fn sched(&mut self) -> Option<&'static mut process_control_block> {
current_pcb().flags &= !(PF_NEED_SCHED as u64);
let current_cpu_id = current_pcb().cpu_id as usize;
let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_cpu_id];
@ -163,8 +155,7 @@ impl Scheduler for SchedulerCFS {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
// 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
if current_pcb().state & (PROC_RUNNING as u64) != 0 {
// kdebug!("cfs:sched->enqueue");
current_cpu_queue.enqueue(current_pcb());
sched_enqueue(current_pcb());
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
@ -175,9 +166,7 @@ impl Scheduler for SchedulerCFS {
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
switch_process(current_pcb(), proc);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return Some(proc);
} else {
// 不进行切换
@ -188,10 +177,11 @@ impl Scheduler for SchedulerCFS {
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
current_cpu_queue.enqueue(proc);
sched_enqueue(proc);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return None;
}
fn enqueue(&mut self, pcb: &'static mut process_control_block) {

View File

@ -2,11 +2,16 @@ use core::sync::atomic::compiler_fence;
use crate::{
arch::asm::{current::current_pcb, ptrace::user_mode},
include::bindings::bindings::{process_control_block, pt_regs, EPERM, SCHED_NORMAL},
arch::context::switch_process,
include::bindings::bindings::{
process_control_block, pt_regs, EPERM, PROC_RUNNING, SCHED_FIFO, SCHED_NORMAL, SCHED_RR,
},
kdebug,
process::process::process_cpu,
};
use super::cfs::{sched_cfs_init, SchedulerCFS, __get_cfs_scheduler};
use super::rt::{sched_rt_init, SchedulerRT, __get_rt_scheduler};
/// @brief 获取指定的cpu上正在执行的进程的pcb
#[inline]
@ -23,28 +28,50 @@ pub fn cpu_executing(cpu_id: u32) -> &'static mut process_control_block {
/// @brief 具体的调度器应当实现的trait
pub trait Scheduler {
/// @brief 使用该调度器发起调度的时候,要调用的函数
fn sched(&mut self);
fn sched(&mut self) -> Option<&'static mut process_control_block>;
/// @brief 将pcb加入这个调度器的调度队列
fn enqueue(&mut self, pcb: &'static mut process_control_block);
}
fn __sched() {
fn __sched() -> Option<&'static mut process_control_block> {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
let cfs_scheduler: &mut SchedulerCFS = __get_cfs_scheduler();
let rt_scheduler: &mut SchedulerRT = __get_rt_scheduler();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
cfs_scheduler.sched();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
let next: &'static mut process_control_block;
match rt_scheduler.pick_next_task_rt() {
Some(p) => {
next = p;
// kdebug!("next pcb is {}",next.pid);
// rt_scheduler.enqueue_task_rt(next.priority as usize, next);
sched_enqueue(next);
return rt_scheduler.sched();
}
None => {
return cfs_scheduler.sched();
}
}
}
/// @brief 将进程加入调度队列
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block) {
// 调度器不处理running位为0的进程
if pcb.state & (PROC_RUNNING as u64) == 0 {
return;
}
let cfs_scheduler = __get_cfs_scheduler();
cfs_scheduler.enqueue(pcb);
let rt_scheduler = __get_rt_scheduler();
if pcb.policy == SCHED_NORMAL {
cfs_scheduler.enqueue(pcb);
} else if pcb.policy == SCHED_FIFO || pcb.policy == SCHED_RR {
rt_scheduler.enqueue(pcb);
} else {
panic!("This policy is not supported at this time");
}
}
/// @brief 初始化进程调度器模块
@ -53,6 +80,7 @@ pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block) {
pub extern "C" fn sched_init() {
unsafe {
sched_cfs_init();
sched_rt_init();
}
}
@ -65,6 +93,9 @@ pub extern "C" fn sched_update_jiffies() {
SCHED_NORMAL => {
__get_cfs_scheduler().timer_update_jiffies();
}
SCHED_FIFO | SCHED_RR => {
current_pcb().rt_time_slice -= 1;
}
_ => {
todo!()
}
@ -80,6 +111,10 @@ pub extern "C" fn sys_sched(regs: &'static mut pt_regs) -> u64 {
if user_mode(regs) {
return (-(EPERM as i64)) as u64;
}
__sched();
// 根据调度结果统一进行切换
let pcb = __sched();
if pcb.is_some() {
switch_process(current_pcb(), pcb.unwrap());
}
0
}

View File

@ -1,2 +1,3 @@
pub mod cfs;
pub mod core;
pub mod cfs;
pub mod rt;

173
kernel/src/sched/rt.rs Normal file
View File

@ -0,0 +1,173 @@
use core::{ptr::null_mut, sync::atomic::compiler_fence};
use alloc::{boxed::Box, vec::Vec};
use crate::{
arch::asm::current::current_pcb,
include::bindings::bindings::{
initial_proc_union, process_control_block, PF_NEED_SCHED, SCHED_FIFO, SCHED_NORMAL,
SCHED_RR,
},
kBUG, kdebug,
libs::spinlock::RawSpinlock,
};
use super::core::{sched_enqueue, Scheduler};
/// 声明全局的rt调度器实例
pub static mut RT_SCHEDULER_PTR: *mut SchedulerRT = null_mut();
/// @brief 获取rt调度器实例的可变引用
#[inline]
pub fn __get_rt_scheduler() -> &'static mut SchedulerRT {
return unsafe { RT_SCHEDULER_PTR.as_mut().unwrap() };
}
/// @brief 初始化rt调度器
pub unsafe fn sched_rt_init() {
kdebug!("test rt init");
if RT_SCHEDULER_PTR.is_null() {
RT_SCHEDULER_PTR = Box::leak(Box::new(SchedulerRT::new()));
} else {
kBUG!("Try to init RT Scheduler twice.");
panic!("Try to init RT Scheduler twice.");
}
}
/// @brief RT队列per-cpu的
#[derive(Debug)]
struct RTQueue {
/// 队列的锁
lock: RawSpinlock,
/// 进程的队列
queue: Vec<&'static mut process_control_block>,
}
impl RTQueue {
pub fn new() -> RTQueue {
RTQueue {
queue: Vec::new(),
lock: RawSpinlock::INIT,
}
}
/// @brief 将pcb加入队列
pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
// 如果进程是IDLE进程那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
return;
}
self.queue.push(pcb);
self.lock.unlock();
}
/// @brief 将pcb从调度队列中弹出,若队列为空则返回None
pub fn dequeue(&mut self) -> Option<&'static mut process_control_block> {
let res: Option<&'static mut process_control_block>;
self.lock.lock();
if self.queue.len() > 0 {
// 队列不为空返回下一个要执行的pcb
res = Some(self.queue.pop().unwrap());
} else {
// 如果队列为空则返回None
res=None;
}
self.lock.unlock();
return res;
}
}
/// @brief RT调度器类
pub struct SchedulerRT {
cpu_queue: Vec<&'static mut RTQueue>,
}
impl SchedulerRT {
const RR_TIMESLICE: i64 = 100;
const MAX_RT_PRIO: i64 = 100;
pub fn new() -> SchedulerRT {
// 暂时手动指定核心数目
// todo: 从cpu模块来获取核心的数目
let mut result = SchedulerRT {
cpu_queue: Default::default(),
};
// 为每个cpu核心创建队列
for _ in 0..SchedulerRT::MAX_RT_PRIO {
result.cpu_queue.push(Box::leak(Box::new(RTQueue::new())));
}
return result;
}
/// @brief 挑选下一个可执行的rt进程
pub fn pick_next_task_rt(&mut self) -> Option<&'static mut process_control_block> {
// 循环查找,直到找到
// 这里应该是优先级数量而不是CPU数量需要修改
for i in 0..SchedulerRT::MAX_RT_PRIO {
let cpu_queue_i: &mut RTQueue = self.cpu_queue[i as usize];
let proc: Option<&'static mut process_control_block> = cpu_queue_i.dequeue();
if proc.is_some(){
return proc;
}
}
// return 一个空值
None
}
}
impl Scheduler for SchedulerRT {
/// @brief 在当前cpu上进行调度。
/// 请注意,进入该函数之前,需要关中断
fn sched(&mut self) -> Option<&'static mut process_control_block> {
current_pcb().flags &= !(PF_NEED_SCHED as u64);
// 正常流程下这里一定是会pick到next的pcb的如果是None的话要抛出错误
let proc: &'static mut process_control_block =
self.pick_next_task_rt().expect("No RT process found");
// 如果是fifo策略则可以一直占有cpu直到有优先级更高的任务就绪(即使优先级相同也不行)或者主动放弃(等待资源)
if proc.policy == SCHED_FIFO {
// 如果挑选的进程优先级小于当前进程,则不进行切换
if proc.priority <= current_pcb().priority {
sched_enqueue(proc);
} else {
// 将当前的进程加进队列
sched_enqueue(current_pcb());
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return Some(proc);
}
}
// RR调度策略需要考虑时间片
else if proc.policy == SCHED_RR {
// 同等优先级的,考虑切换
if proc.priority >= current_pcb().priority {
// 判断这个进程时间片是否耗尽,若耗尽则将其时间片赋初值然后入队
if proc.rt_time_slice <= 0 {
proc.rt_time_slice = SchedulerRT::RR_TIMESLICE;
proc.flags |= !(PF_NEED_SCHED as u64);
sched_enqueue(proc);
}
// 目标进程时间片未耗尽,切换到目标进程
else {
// 将当前进程加进队列
sched_enqueue(current_pcb());
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return Some(proc);
}
}
// curr优先级更大说明一定是实时进程将所选进程入队列
else {
sched_enqueue(proc);
}
}
return None;
}
fn enqueue(&mut self, pcb: &'static mut process_control_block) {
let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
cpu_queue.enqueue(pcb);
}
}