解决由于在中断上下文以外,sched_enqueue时,未关中断导致cpu_queue双重加锁的问题 (#201)

This commit is contained in:
login 2023-03-13 22:22:23 +08:00 committed by GitHub
parent 33270d005c
commit c2e757d8cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 26 additions and 21 deletions

View File

@ -66,22 +66,24 @@ impl CFSQueue {
/// @brief 将pcb加入队列
pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
// 如果进程是IDLE进程那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return;
}
self.queue.push(pcb);
self.sort();
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
}
/// @brief 将pcb从调度队列中弹出,若队列为空则返回IDLE进程的pcb
pub fn dequeue(&mut self) -> &'static mut process_control_block {
let res: &'static mut process_control_block;
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
if self.queue.len() > 0 {
// 队列不为空返回下一个要执行的pcb
res = self.queue.pop().unwrap();
@ -89,7 +91,7 @@ impl CFSQueue {
// 如果队列为空则返回IDLE进程的pcb
res = unsafe { self.idle_pcb.as_mut().unwrap() };
}
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return res;
}

View File

@ -11,7 +11,7 @@ use crate::{
process_control_block, pt_regs, EINVAL, EPERM, MAX_CPU_NUM, PF_NEED_MIGRATE, PROC_RUNNING,
SCHED_FIFO, SCHED_NORMAL, SCHED_RR,
},
process::process::process_cpu,
process::process::process_cpu
};
use super::cfs::{sched_cfs_init, SchedulerCFS, __get_cfs_scheduler};
@ -34,7 +34,7 @@ pub fn get_cpu_loads(cpu_id: u32) -> u32 {
let cfs_scheduler = __get_cfs_scheduler();
let rt_scheduler = __get_rt_scheduler();
let len_cfs = cfs_scheduler.get_cfs_queue_len(cpu_id);
let len_rt = rt_scheduler.get_rt_queue_len(cpu_id);
let len_rt = rt_scheduler.rt_queue_len(cpu_id);
// let load_rt = rt_scheduler.get_load_list_len(cpu_id);
// kdebug!("this cpu_id {} is load rt {}", cpu_id, load_rt);
@ -111,14 +111,13 @@ pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block, mut res
}
let cfs_scheduler = __get_cfs_scheduler();
let rt_scheduler = __get_rt_scheduler();
// TODO 前几号进程不进行迁移这里需要判断修改当前的意思为了调试已经初始化完成的rt进程
// if pcb.pid > 4 && pcb.policy!=0{
if pcb.pid > 4 {
// 除了IDLE以外的进程都进行负载均衡
if pcb.pid > 0 {
loads_balance(pcb);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
if (pcb.flags & (PF_NEED_MIGRATE as u64)) != 0 {
// kdebug!("migrating pcb:{:?}", pcb);
pcb.flags &= !(PF_NEED_MIGRATE as u64);

View File

@ -52,21 +52,23 @@ impl RTQueue {
}
/// @brief 将pcb加入队列
pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
// 如果进程是IDLE进程那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return;
}
self.queue.push_back(pcb);
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
}
/// @brief 将pcb从调度队列头部取出,若队列为空则返回None
pub fn dequeue(&mut self) -> Option<&'static mut process_control_block> {
let res: Option<&'static mut process_control_block>;
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
if self.queue.len() > 0 {
// 队列不为空返回下一个要执行的pcb
res = Some(self.queue.pop_front().unwrap());
@ -74,19 +76,20 @@ impl RTQueue {
// 如果队列为空则返回None
res = None;
}
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return res;
}
pub fn enqueue_front(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
// 如果进程是IDLE进程那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return;
}
self.queue.push_front(pcb);
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
}
pub fn get_rt_queue_size(&mut self) -> usize {
return self.queue.len();
@ -143,7 +146,7 @@ impl SchedulerRT {
None
}
pub fn get_rt_queue_len(&mut self, cpu_id: u32) -> usize {
pub fn rt_queue_len(&mut self, cpu_id: u32) -> usize {
let mut sum = 0;
for prio in 0..SchedulerRT::MAX_RT_PRIO {
sum += self.cpu_queue[cpu_id as usize][prio as usize].get_rt_queue_size();
@ -151,7 +154,8 @@ impl SchedulerRT {
return sum as usize;
}
pub fn get_load_list_len(&mut self, cpu_id: u32) -> usize {
#[inline]
pub fn load_list_len(&mut self, cpu_id: u32) -> usize {
return self.load_list[cpu_id as usize].len();
}