Block IO Scheduler (#158)

* Block io调度器
* process_wakeup时,对cfs的进程,重设虚拟运行时间。解决由于休眠的进程,其虚拟运行时间过小,导致其他进程饥饿的问题

* 1、为AP核启动apic_timer,使其能够运行调度
2、增加kick_cpu功能,支持让某个特定核心立即运行调度器
3、wait_queue的唤醒,改为立即唤醒。
4、增加进程在核心间迁移的功能
5、CFS调度器为每个核心设置单独的IDLE进程pcb(pid均为0)
6、pcb中增加migrate_to字段
7、当具有多核时,io调度器在核心1上运行。

* io调度器文件位置修改

* 修改io的makefile

* 更新makefile中的变量名

* 修改io调度器函数名

---------

Co-authored-by: login <longjin@ringotek.cn>
This commit is contained in:
houmkh
2023-02-04 12:31:15 +08:00
committed by GitHub
parent 151251b50b
commit f6ba114bb0
38 changed files with 831 additions and 264 deletions

View File

@ -9,6 +9,7 @@ use crate::{
},
kBUG,
libs::spinlock::RawSpinlock,
smp::core::smp_get_processor_id,
};
use super::core::{sched_enqueue, Scheduler};
@ -42,14 +43,17 @@ struct CFSQueue {
lock: RawSpinlock,
/// 进程的队列
queue: Vec<&'static mut process_control_block>,
/// 当前核心的队列专属的IDLE进程的pcb
idle_pcb: *mut process_control_block,
}
impl CFSQueue {
pub fn new() -> CFSQueue {
pub fn new(idle_pcb: *mut process_control_block) -> CFSQueue {
CFSQueue {
cpu_exec_proc_jiffies: 0,
lock: RawSpinlock::INIT,
queue: Vec::new(),
idle_pcb: idle_pcb,
}
}
@ -83,11 +87,22 @@ impl CFSQueue {
res = self.queue.pop().unwrap();
} else {
// 如果队列为空则返回IDLE进程的pcb
res = unsafe { &mut initial_proc_union.pcb };
res = unsafe { self.idle_pcb.as_mut().unwrap() };
}
self.lock.unlock();
return res;
}
/// @brief 获取cfs队列的最小运行时间
///
/// @return Option<i64> 如果队列不为空那么返回队列中最小的虚拟运行时间否则返回None
pub fn min_vruntime(&self) -> Option<i64> {
if !self.queue.is_empty() {
return Some(self.queue.first().unwrap().virtual_runtime);
} else {
return None;
}
}
}
/// @brief CFS调度器类
@ -105,8 +120,12 @@ impl SchedulerCFS {
// 为每个cpu核心创建队列
for _ in 0..MAX_CPU_NUM {
result.cpu_queue.push(Box::leak(Box::new(CFSQueue::new())));
result
.cpu_queue
.push(Box::leak(Box::new(CFSQueue::new(null_mut()))));
}
// 设置cpu0的pcb
result.cpu_queue[0].idle_pcb = unsafe { &mut initial_proc_union.pcb };
return result;
}
@ -137,6 +156,22 @@ impl SchedulerCFS {
// 更新当前进程的虚拟运行时间
current_pcb().virtual_runtime += 1;
}
/// @brief 将进程加入cpu的cfs调度队列并且重设其虚拟运行时间为当前队列的最小值
pub fn enqueue_reset_vruntime(&mut self, pcb: &'static mut process_control_block) {
let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
if cpu_queue.queue.len() > 0 {
pcb.virtual_runtime = cpu_queue.min_vruntime().unwrap();
}
cpu_queue.enqueue(pcb);
}
/// @brief 设置cpu的队列的IDLE进程的pcb
pub fn set_cpu_idle(&mut self, cpu_id: usize, pcb: *mut process_control_block) {
// kdebug!("set cpu idle: id={}", cpu_id);
self.cpu_queue[cpu_id].idle_pcb = pcb;
}
}
impl Scheduler for SchedulerCFS {
@ -144,7 +179,8 @@ impl Scheduler for SchedulerCFS {
/// 请注意,进入该函数之前,需要关中断
fn sched(&mut self) -> Option<&'static mut process_control_block> {
current_pcb().flags &= !(PF_NEED_SCHED as u64);
let current_cpu_id = current_pcb().cpu_id as usize;
let current_cpu_id = smp_get_processor_id() as usize;
let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_cpu_id];
let proc: &'static mut process_control_block = current_cpu_queue.dequeue();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
@ -155,7 +191,7 @@ impl Scheduler for SchedulerCFS {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
// 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
if current_pcb().state & (PROC_RUNNING as u64) != 0 {
sched_enqueue(current_pcb());
sched_enqueue(current_pcb(), false);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
@ -166,6 +202,7 @@ impl Scheduler for SchedulerCFS {
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return Some(proc);
} else {
// 不进行切换
@ -177,7 +214,7 @@ impl Scheduler for SchedulerCFS {
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
sched_enqueue(proc);
sched_enqueue(proc, false);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);

View File

@ -204,7 +204,7 @@ void wait_for_multicompletion(struct completion x[], int n)
{
wait_for_completion(&x[i]);
}
else if (!try_wait_for_completion(&x[i])) //上面测试过done>0那么这里尝试去获取一个done如果失败了就继续wait
else if (!try_wait_for_completion(&x[i])) // 上面测试过done>0那么这里尝试去获取一个done如果失败了就继续wait
{
wait_for_completion(&x[i]);
}
@ -325,4 +325,14 @@ void __test_completion()
kfree(waiter_data);
kfree(worker_data);
// kdebug("completion test done.");
}
/**
* @brief rust 获取completion
*/
struct completion *completion_alloc()
{
struct completion *cmpl = kzalloc(sizeof(struct completion), 0);
completion_init(cmpl);
return cmpl;
}

View File

@ -2,11 +2,14 @@ use core::sync::atomic::compiler_fence;
use crate::{
arch::asm::{current::current_pcb, ptrace::user_mode},
arch::context::switch_process,
include::bindings::bindings::{
process_control_block, pt_regs, EPERM, PROC_RUNNING, SCHED_FIFO, SCHED_NORMAL, SCHED_RR,
arch::{
context::switch_process,
interrupt::{cli, sti},
},
include::bindings::bindings::{
process_control_block, pt_regs, EINVAL, EPERM, MAX_CPU_NUM, PF_NEED_MIGRATE, PROC_RUNNING,
SCHED_FIFO, SCHED_NORMAL, SCHED_RR,
},
kdebug,
process::process::process_cpu,
};
@ -46,7 +49,7 @@ fn __sched() -> Option<&'static mut process_control_block> {
next = p;
// kdebug!("next pcb is {}",next.pid);
// rt_scheduler.enqueue_task_rt(next.priority as usize, next);
sched_enqueue(next);
sched_enqueue(next, false);
return rt_scheduler.sched();
}
None => {
@ -56,17 +59,34 @@ fn __sched() -> Option<&'static mut process_control_block> {
}
/// @brief 将进程加入调度队列
///
/// @param pcb 要被加入队列的pcb
/// @param reset_time 是否重置虚拟运行时间
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block) {
pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block, mut reset_time: bool) {
// 调度器不处理running位为0的进程
if pcb.state & (PROC_RUNNING as u64) == 0 {
return;
}
let cfs_scheduler = __get_cfs_scheduler();
let rt_scheduler = __get_rt_scheduler();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
if (pcb.flags & (PF_NEED_MIGRATE as u64)) != 0 {
// kdebug!("migrating pcb:{:?}", pcb);
pcb.flags &= !(PF_NEED_MIGRATE as u64);
pcb.cpu_id = pcb.migrate_to;
reset_time = true;
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
if pcb.policy == SCHED_NORMAL {
cfs_scheduler.enqueue(pcb);
if reset_time {
cfs_scheduler.enqueue_reset_vruntime(pcb);
} else {
cfs_scheduler.enqueue(pcb);
}
} else if pcb.policy == SCHED_FIFO || pcb.policy == SCHED_RR {
rt_scheduler.enqueue(pcb);
} else {
@ -107,6 +127,7 @@ pub extern "C" fn sched_update_jiffies() {
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sys_sched(regs: &'static mut pt_regs) -> u64 {
cli();
// 进行权限校验,拒绝用户态发起调度
if user_mode(regs) {
return (-(EPERM as i64)) as u64;
@ -116,5 +137,33 @@ pub extern "C" fn sys_sched(regs: &'static mut pt_regs) -> u64 {
if pcb.is_some() {
switch_process(current_pcb(), pcb.unwrap());
}
0
sti();
return 0;
}
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sched_set_cpu_idle(cpu_id: usize, pcb: *mut process_control_block) {
__get_cfs_scheduler().set_cpu_idle(cpu_id, pcb);
}
/// @brief 设置进程需要等待迁移到另一个cpu核心。
/// 当进程被重新加入队列时将会更新其cpu_id,并加入正确的队列
///
/// @return i32 成功返回0,否则返回posix错误码
#[allow(dead_code)]
#[no_mangle]
pub extern "C" fn sched_migrate_process(
pcb: &'static mut process_control_block,
target: usize,
) -> i32 {
if target > MAX_CPU_NUM.try_into().unwrap() {
// panic!("sched_migrate_process: target > MAX_CPU_NUM");
return -(EINVAL as i32);
}
pcb.flags |= PF_NEED_MIGRATE as u64;
pcb.migrate_to = target as u32;
// kdebug!("pid:{} migrate to cpu:{}", pcb.pid, target);
return 0;
}

View File

@ -4,10 +4,7 @@ use alloc::{boxed::Box, vec::Vec};
use crate::{
arch::asm::current::current_pcb,
include::bindings::bindings::{
initial_proc_union, process_control_block, PF_NEED_SCHED, SCHED_FIFO, SCHED_NORMAL,
SCHED_RR,
},
include::bindings::bindings::{process_control_block, PF_NEED_SCHED, SCHED_FIFO, SCHED_RR},
kBUG, kdebug,
libs::spinlock::RawSpinlock,
};
@ -73,12 +70,11 @@ impl RTQueue {
res = Some(self.queue.pop().unwrap());
} else {
// 如果队列为空则返回None
res=None;
res = None;
}
self.lock.unlock();
return res;
}
}
/// @brief RT调度器类
@ -110,7 +106,7 @@ impl SchedulerRT {
for i in 0..SchedulerRT::MAX_RT_PRIO {
let cpu_queue_i: &mut RTQueue = self.cpu_queue[i as usize];
let proc: Option<&'static mut process_control_block> = cpu_queue_i.dequeue();
if proc.is_some(){
if proc.is_some() {
return proc;
}
}
@ -132,10 +128,10 @@ impl Scheduler for SchedulerRT {
if proc.policy == SCHED_FIFO {
// 如果挑选的进程优先级小于当前进程,则不进行切换
if proc.priority <= current_pcb().priority {
sched_enqueue(proc);
sched_enqueue(proc, false);
} else {
// 将当前的进程加进队列
sched_enqueue(current_pcb());
sched_enqueue(current_pcb(), false);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return Some(proc);
}
@ -148,19 +144,19 @@ impl Scheduler for SchedulerRT {
if proc.rt_time_slice <= 0 {
proc.rt_time_slice = SchedulerRT::RR_TIMESLICE;
proc.flags |= !(PF_NEED_SCHED as u64);
sched_enqueue(proc);
sched_enqueue(proc, false);
}
// 目标进程时间片未耗尽,切换到目标进程
else {
// 将当前进程加进队列
sched_enqueue(current_pcb());
sched_enqueue(current_pcb(), false);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return Some(proc);
}
}
// curr优先级更大说明一定是实时进程将所选进程入队列
else {
sched_enqueue(proc);
sched_enqueue(proc, false);
}
}
return None;

View File

@ -62,6 +62,8 @@
extern void sched_update_jiffies();
extern void sched_init();
extern void sched();
extern void sched_enqueue(struct process_control_block *pcb);
extern void sched_enqueue(struct process_control_block *pcb, bool reset_time);
extern void sched_set_cpu_idle(uint64_t cpu_id, struct process_control_block *pcb);
extern void sched_migrate_process(struct process_control_block *pcb, uint64_t target);
void switch_proc(struct process_control_block *prev, struct process_control_block *proc);