修改CFSqueue从Vec变成红黑树 (#229)

使用了由tickbh编写的rbtree: https://github.com/tickbh/rbtree-rs/blob/master/src/lib.rs

Co-authored-by: tickbh <tickdream125@hotmail.com>
This commit is contained in:
hanjiezhou 2023-04-06 00:50:14 +08:00 committed by GitHub
parent 2a7d773d3d
commit e0dfd4d5d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1819 additions and 21 deletions

View File

@ -27,6 +27,7 @@ num-derive = "0.3"
[build-dependencies]
bindgen = "0.61.0"
[dependencies.lazy_static]
version = "1.4.0"
# 由于在no_std环境而lazy_static依赖了spin库因此需要指定其使用no_std

View File

@ -6,6 +6,7 @@ use core::{arch::asm, sync::atomic::compiler_fence};
#[inline]
pub fn current_pcb() -> &'static mut process_control_block {
let ret: Option<&mut process_control_block>;
unsafe {
let mut tmp: u64 = !(32767u64);
compiler_fence(core::sync::atomic::Ordering::SeqCst);

View File

@ -14,6 +14,7 @@ struct process_control_block *get_current_pcb()
: "=r"(current)
: "0"(~32767UL));
barrier();
return current;
};
#define current_pcb get_current_pcb()

View File

@ -116,11 +116,10 @@ int video_reinitialize(bool level) // 这个函数会在main.c调用, 保证 vid
// 创建video守护进程
video_daemon_pcb = kthread_run(&video_refresh_daemon, NULL, "Video refresh daemon");
video_daemon_pcb->virtual_runtime = 0; // 特殊情况, 最高优先级, 以后再改
// 启用屏幕刷新软中断
rs_register_softirq_video();
rs_raise_softirq(VIDEO_REFRESH_SIRQ);
}
return 0;
}

View File

@ -271,7 +271,6 @@ int scm_enable_double_buffer()
video_set_refresh_target(__current_framework->buf);
// 通知显示驱动,启动双缓冲
video_reinitialize(true);
c_uart_send_str(COM1, "##initialized double buffer##\n");
return 0;
}

View File

@ -4,6 +4,7 @@ pub mod list;
pub mod lockref;
pub mod mutex;
pub mod printk;
pub mod rbtree;
#[macro_use]
pub mod refcount;
pub mod rwlock;

1791
kernel/src/libs/rbtree.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -70,7 +70,8 @@ static struct process_control_block *__kthread_create_on_node(int (*thread_fn)(v
// todo: 使用completion优化这里
while (kthreadd_pcb == NULL) // 若kthreadd未初始化则等待kthreadd启动
;
barrier();
// 唤醒kthreadd守护进程
process_wakeup_immediately(kthreadd_pcb);
@ -213,17 +214,21 @@ int kthreadd(void *unused)
barrier();
kinfo("kthread daemon started!");
struct process_control_block *pcb = current_pcb;
barrier();
kthreadd_pcb = current_pcb;
barrier();
current_pcb->flags |= PF_NOFREEZE;
for (;;)
{
current_pcb->state = PROC_INTERRUPTIBLE;
// 所有的创建任务都被处理完了
if (list_empty(&kthread_create_list))
sched();
spin_lock(&__kthread_create_lock);
// 循环取出链表中的任务
while (!list_empty(&kthread_create_list))
{

View File

@ -603,7 +603,7 @@ ul initial_kernel_thread(ul arg)
"m"(current_pcb->thread->rsp), "m"(current_pcb->thread->rip), "S"("/bin/shell.elf"), "c"(NULL),
"d"(NULL)
: "memory");
return 1;
}
#pragma GCC pop_options

View File

@ -8,7 +8,7 @@ use crate::{
initial_proc_union, process_control_block, MAX_CPU_NUM, PF_NEED_SCHED, PROC_RUNNING,
},
kBUG,
libs::spinlock::RawSpinlock,
libs::{rbtree::RBTree, spinlock::RawSpinlock},
smp::core::smp_get_processor_id,
};
@ -42,7 +42,7 @@ struct CFSQueue {
/// 队列的锁
lock: RawSpinlock,
/// 进程的队列
queue: Vec<&'static mut process_control_block>,
queue: RBTree<i64, &'static mut process_control_block>,
/// 当前核心的队列专属的IDLE进程的pcb
idle_pcb: *mut process_control_block,
}
@ -52,18 +52,11 @@ impl CFSQueue {
CFSQueue {
cpu_exec_proc_jiffies: 0,
lock: RawSpinlock::INIT,
queue: Vec::new(),
queue: RBTree::new(),
idle_pcb: idle_pcb,
}
}
/// @brief 将进程按照虚拟运行时间的升序进行排列
/// todo: 换掉这个sort方法因为它底层是归并很占内存且时间复杂度为nlogn遍历然后插入的方法时间复杂度最坏是n
pub fn sort(&mut self) {
self.queue
.sort_by(|a, b| (*a).virtual_runtime.cmp(&(*b).virtual_runtime));
}
/// @brief 将pcb加入队列
pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
let mut rflags = 0u64;
@ -74,8 +67,9 @@ impl CFSQueue {
self.lock.unlock_irqrestore(&rflags);
return;
}
self.queue.push(pcb);
self.sort();
self.queue.insert(pcb.virtual_runtime, pcb);
self.lock.unlock_irqrestore(&rflags);
}
@ -84,9 +78,9 @@ impl CFSQueue {
let res: &'static mut process_control_block;
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
if self.queue.len() > 0 {
if !self.queue.is_empty() {
// 队列不为空返回下一个要执行的pcb
res = self.queue.pop().unwrap();
res = self.queue.pop_first().unwrap().1;
} else {
// 如果队列为空则返回IDLE进程的pcb
res = unsafe { self.idle_pcb.as_mut().unwrap() };
@ -100,7 +94,7 @@ impl CFSQueue {
/// @return Option<i64> 如果队列不为空那么返回队列中最小的虚拟运行时间否则返回None
pub fn min_vruntime(&self) -> Option<i64> {
if !self.queue.is_empty() {
return Some(self.queue.first().unwrap().virtual_runtime);
return Some(self.queue.get_first().unwrap().1.virtual_runtime);
} else {
return None;
}
@ -189,10 +183,13 @@ impl Scheduler for SchedulerCFS {
/// 请注意,进入该函数之前,需要关中断
fn sched(&mut self) -> Option<&'static mut process_control_block> {
current_pcb().flags &= !(PF_NEED_SCHED as u64);
let current_cpu_id = smp_get_processor_id() as usize;
let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_cpu_id];
let proc: &'static mut process_control_block = current_cpu_queue.dequeue();
compiler_fence(core::sync::atomic::Ordering::SeqCst);
// 如果当前不是running态或者当前进程的虚拟运行时间大于等于下一个进程的那就需要切换。
if (current_pcb().state & (PROC_RUNNING as u64)) == 0
@ -221,6 +218,7 @@ impl Scheduler for SchedulerCFS {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
SchedulerCFS::update_cpu_exec_proc_jiffies(proc.priority, current_cpu_queue);
// kdebug!("cpu:{:?}",current_cpu_id);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
@ -228,6 +226,7 @@ impl Scheduler for SchedulerCFS {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return None;
}

View File

@ -180,6 +180,7 @@ pub extern "C" fn sys_sched(regs: &'static mut pt_regs) -> u64 {
}
// 根据调度结果统一进行切换
let pcb = __sched();
if pcb.is_some() {
switch_process(current_pcb(), pcb.unwrap());
}