新增rust ffi (#77)

* 引入cargo

* 取消对Cargo.lock的跟踪

* 解决vscode报错问题

* new: rust的代码能够调用c语言的printk_color

* 1、将原本run.sh的工作拆解,变为几个不同的make命令
2、在docker镜像中编译rust

* 更改workflow

* update workflow

* new: 解决workflow无法通过编译的问题
This commit is contained in:
login
2022-11-11 15:35:37 +08:00
committed by GitHub
parent 5e023cf791
commit 2813126e31
271 changed files with 609 additions and 307 deletions

18
kernel/src/sched/Makefile Normal file
View File

@ -0,0 +1,18 @@
CFLAGS += -I .
kernel_sched_objs:= $(shell find ./*.c)
ECHO:
@echo "$@"
$(kernel_sched_objs): ECHO
$(CC) $(CFLAGS) -c $@ -o $@.o
all: $(kernel_sched_objs)
clean:
echo "Done."

153
kernel/src/sched/cfs.c Normal file
View File

@ -0,0 +1,153 @@
#include "cfs.h"
#include <common/kprint.h>
#include <driver/video/video.h>
#include <common/spinlock.h>
struct sched_queue_t sched_cfs_ready_queue[MAX_CPU_NUM]; // 就绪队列
/**
* @brief 从就绪队列中取出PCB
*
* @return struct process_control_block*
*/
struct process_control_block *sched_cfs_dequeue()
{
if (list_empty(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list))
{
// kdebug("list empty, count=%d", sched_cfs_ready_queue[proc_current_cpu_id].count);
return &initial_proc_union.pcb;
}
struct process_control_block *proc = container_of(list_next(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list), struct process_control_block, list);
list_del(&proc->list);
--sched_cfs_ready_queue[proc_current_cpu_id].count;
return proc;
}
/**
* @brief 将PCB加入就绪队列
*
* @param pcb
*/
void sched_cfs_enqueue(struct process_control_block *pcb)
{
if (pcb == initial_proc[proc_current_cpu_id])
return;
struct process_control_block *proc = container_of(list_next(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list), struct process_control_block, list);
if ((list_empty(&sched_cfs_ready_queue[proc_current_cpu_id].proc_queue.list)) == 0)
{
while (proc->virtual_runtime < pcb->virtual_runtime)
{
proc = container_of(list_next(&proc->list), struct process_control_block, list);
}
}
list_append(&proc->list, &pcb->list);
++sched_cfs_ready_queue[proc_current_cpu_id].count;
}
/**
* @brief 调度函数
*
*/
void sched_cfs()
{
cli();
current_pcb->flags &= ~PF_NEED_SCHED;
// kdebug("current_pcb pid= %d", current_pcb->pid);
struct process_control_block *proc = sched_cfs_dequeue();
// kdebug("sched_cfs_ready_queue[proc_current_cpu_id].count = %d", sched_cfs_ready_queue[proc_current_cpu_id].count);
if (current_pcb->virtual_runtime >= proc->virtual_runtime || !(current_pcb->state & PROC_RUNNING)) // 当前进程运行时间大于了下一进程的运行时间,进行切换
{
// kdebug("current_pcb->virtual_runtime = %d,proc->vt= %d", current_pcb->virtual_runtime, proc->virtual_runtime);
if (current_pcb->state & PROC_RUNNING) // 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
sched_cfs_enqueue(current_pcb);
// kdebug("proc->pid=%d, count=%d", proc->pid, sched_cfs_ready_queue[proc_current_cpu_id].count);
if (sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies <= 0)
{
switch (proc->priority)
{
case 0:
case 1:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies = 4 / sched_cfs_ready_queue[proc_current_cpu_id].count;
break;
case 2:
default:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies = (4 / sched_cfs_ready_queue[proc_current_cpu_id].count) << 2;
break;
}
}
process_switch_mm(proc);
switch_proc(current_pcb, proc);
}
else // 不进行切换
{
// kdebug("not switch.");
sched_cfs_enqueue(proc);
if (sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies <= 0)
{
switch (proc->priority)
{
case 0:
case 1:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies = 4 / sched_cfs_ready_queue[proc_current_cpu_id].count;
break;
case 2:
default:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies = (4 / sched_cfs_ready_queue[proc_current_cpu_id].count) << 2;
break;
}
}
}
sti();
}
/**
* @brief 当时钟中断到达时,更新时间片
*
*/
void sched_update_jiffies()
{
switch (current_pcb->priority)
{
case 0:
case 1:
--sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies;
++current_pcb->virtual_runtime;
break;
case 2:
default:
sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies -= 2;
current_pcb->virtual_runtime += 2;
break;
}
// 时间片耗尽,标记可调度
if (sched_cfs_ready_queue[proc_current_cpu_id].cpu_exec_proc_jiffies <= 0)
current_pcb->flags |= PF_NEED_SCHED;
}
/**
* @brief 初始化CFS调度器
*
*/
void sched_cfs_init()
{
memset(&sched_cfs_ready_queue, 0, sizeof(struct sched_queue_t) * MAX_CPU_NUM);
for (int i = 0; i < MAX_CPU_NUM; ++i)
{
list_init(&sched_cfs_ready_queue[i].proc_queue.list);
sched_cfs_ready_queue[i].count = 1; // 因为存在IDLE进程因此为1
sched_cfs_ready_queue[i].cpu_exec_proc_jiffies = 5;
sched_cfs_ready_queue[i].proc_queue.virtual_runtime = 0x7fffffffffffffff;
}
}

39
kernel/src/sched/cfs.h Normal file
View File

@ -0,0 +1,39 @@
#pragma once
#include <common/glib.h>
#include <process/process.h>
// @todo: 用红黑树重写cfs的队列
struct sched_queue_t
{
long count; // 当前队列中的数量
long cpu_exec_proc_jiffies; // 进程可执行的时间片数量
struct process_control_block proc_queue;
};
extern struct sched_queue_t sched_cfs_ready_queue[MAX_CPU_NUM]; // 就绪队列
/**
* @brief 调度函数
*
*/
void sched_cfs();
/**
* @brief 将PCB加入就绪队列
*
* @param pcb
*/
void sched_cfs_enqueue(struct process_control_block *pcb);
/**
* @brief 从就绪队列中取出PCB
*
* @return struct process_control_block*
*/
struct process_control_block *sched_cfs_dequeue();
/**
* @brief 初始化CFS进程调度器
*
*/
void sched_cfs_init();

View File

@ -0,0 +1,328 @@
#include "common/completion.h"
#include "common/kthread.h"
/**
* @brief 初始化一个completion变量
*
* @param x completion
*/
void completion_init(struct completion *x)
{
x->done = 0;
wait_queue_head_init(&x->wait_queue);
}
/**
* @brief 唤醒一个wait_queue中的节点
*
* @param x completion
*/
void complete(struct completion *x)
{
spin_lock(&x->wait_queue.lock);
if (x->done != COMPLETE_ALL)
++(x->done);
wait_queue_wakeup_on_stack(&x->wait_queue, -1UL); // -1UL代表所有节点都满足条件,暂时这么写
spin_unlock(&x->wait_queue.lock);
}
/**
* @brief 永久标记done为Complete_All, 并从wait_queue中删除所有节点
*
* @param x completion
*/
void complete_all(struct completion *x)
{
spin_lock(&x->wait_queue.lock);
x->done = COMPLETE_ALL; // 永久赋值
while (!list_empty(&x->wait_queue.wait_list))
wait_queue_wakeup_on_stack(&x->wait_queue, -1UL); // -1UL代表所有节点都满足条件,暂时这么写
spin_unlock(&x->wait_queue.lock);
}
/**
* @brief 辅助函数通用的处理wait命令的函数(即所有wait_for_completion函数最核心部分在这里)
*
* @param x completion
* @param action 函数指针
* @param timeout 一个非负整数
* @param state 你要设置进程的状态为state
* @return long - 返回剩余的timeout
*/
static long __wait_for_common(struct completion *x, long (*action)(long), long timeout, int state)
{
if (!x->done)
{
DECLARE_WAIT_ON_STACK_SELF(wait);
while (!x->done && timeout > 0)
{
// 加入等待队列, 但是不会调度走
if (list_empty(&wait.wait_list))
list_append(&x->wait_queue.wait_list, &wait.wait_list);
wait.pcb->state = state; // 清除运行位, 并设置为interuptible/uninteruptible
spin_unlock(&x->wait_queue.lock);
timeout = action(timeout);
spin_lock(&x->wait_queue.lock);
}
if (!x->done)
return timeout; // 仍然没有complete, 但是被其他进程唤醒
wait.pcb->state = PROC_RUNNING; // 设置为运行, 并清空state 所以使用等号赋值
if (!list_empty(&wait.wait_list))
list_del_init(&wait.wait_list); // 必须使用del_init
}
if (x->done != COMPLETE_ALL)
--(x->done);
return timeout ? timeout : 1; // 这里linux返回1不知道为啥
}
/**
* @brief 等待completion命令唤醒进程, 同时设置pcb->state为uninteruptible.
*
* @param x completion
*/
void wait_for_completion(struct completion *x)
{
spin_lock(&x->wait_queue.lock);
__wait_for_common(x, &schedule_timeout_ms, MAX_TIMEOUT, PROC_UNINTERRUPTIBLE);
spin_unlock(&x->wait_queue.lock);
}
/**
* @brief 等待指定时间,超时后就返回, 同时设置pcb->state为uninteruptible.
*
* @param x completion
* @param timeout 非负整数,等待指定时间,超时后就返回/ 或者提前done则返回剩余timeout时间
* @return long - 返回剩余的timeout
*/
long wait_for_completion_timeout(struct completion *x, long timeout)
{
BUG_ON(timeout < 0);
spin_lock(&x->wait_queue.lock);
timeout = __wait_for_common(x, &schedule_timeout_ms, timeout, PROC_UNINTERRUPTIBLE);
spin_unlock(&x->wait_queue.lock);
return timeout;
}
/**
* @brief 等待completion的完成但是可以被中断我也不太懂可以被中断是什么意思就是pcb->state=interuptible
*
* @param x completion
*/
void wait_for_completion_interruptible(struct completion *x)
{
spin_lock(&x->wait_queue.lock);
__wait_for_common(x, &schedule_timeout_ms, MAX_TIMEOUT, PROC_INTERRUPTIBLE);
spin_unlock(&x->wait_queue.lock);
}
/**
* @brief 等待指定时间,超时后就返回, 等待completion的完成但是可以被中断.
*
* @param x completion
* @param timeout 非负整数,等待指定时间,超时后就返回/ 或者提前done则返回剩余timeout时间
* @return long - 返回剩余的timeout
*/
long wait_for_completion_interruptible_timeout(struct completion *x, long timeout)
{
BUG_ON(timeout < 0);
spin_lock(&x->wait_queue.lock);
timeout = __wait_for_common(x, &schedule_timeout_ms, timeout, PROC_INTERRUPTIBLE);
spin_unlock(&x->wait_queue.lock);
return timeout;
}
/**
* @brief 尝试获取completion的一个done如果您在wait之前加上这个函数作为判断说不定会加快运行速度。
*
* @param x completion
* @return true - 表示不需要wait_for_completion并且已经获取到了一个completion(即返回true意味着done已经被 减1 ) \
* @return false - 表示当前done=0您需要进入等待即wait_for_completion
*/
bool try_wait_for_completion(struct completion *x)
{
if (!READ_ONCE(x->done))
return false;
bool ret = true;
spin_lock(&x->wait_queue.lock);
if (!x->done)
ret = false;
else if (x->done != COMPLETE_ALL)
--(x->done);
spin_unlock(&x->wait_queue.lock);
return ret;
}
/**
* @brief 测试一个completion是否有waiter。(即done是不是等于0)
*
* @param x completion
* @return true
* @return false
*/
bool completion_done(struct completion *x)
{
if (!READ_ONCE(x->done))
return false;
// 这里的意义是: 如果是多线程的情况下您有可能需要等待另一个进程的complete操作, 才算真正意义上的completed!
spin_lock(&x->wait_queue.lock);
if (!READ_ONCE(x->done))
{
spin_unlock(&x->wait_queue.lock);
return false;
}
spin_unlock(&x->wait_queue.lock);
return true;
}
/**
* @brief 对completion数组进行wait操作
*
* @param x completion array
* @param n len of the array
*/
void wait_for_multicompletion(struct completion x[], int n)
{
for (int i = 0; i < n; i++) // 对每一个completion都等一遍
{
if (!completion_done(&x[i])) // 如果没有done直接wait
{
wait_for_completion(&x[i]);
}
else if (!try_wait_for_completion(&x[i])) //上面测试过done>0那么这里尝试去获取一个done如果失败了就继续wait
{
wait_for_completion(&x[i]);
}
}
}
/**
* @brief 等待者, 等待wait_for_completion
*
* @param one_to_one
* @param one_to_many
* @param many_to_one
*/
int __test_completion_waiter(void *input_data)
{
struct __test_data *data = (struct __test_data *)input_data;
// kdebug("THE %d WAITER BEGIN", -data->id);
// 测试一对多能不能实现等待 - 由外部统一放闸一起跑
if (!try_wait_for_completion(data->one_to_many))
{
wait_for_completion(data->one_to_many);
}
// 测试一对一能不能实现等待
if (!try_wait_for_completion(data->one_to_many))
{
wait_for_completion(data->one_to_many);
}
// 完成上面两个等待, 执行complete声明自己已经完成
complete(data->many_to_one);
// kdebug("THE %d WAITER SOLVED", -data->id);
return true;
}
/**
* @brief 执行者执行complete
*
* @param one_to_one
* @param one_to_many
* @param many_to_one
*/
int __test_completion_worker(void *input_data)
{
struct __test_data *data = (struct __test_data *)input_data;
// kdebug("THE %d WORKER BEGIN", data->id);
// 测试一对多能不能实现等待 - 由外部统一放闸一起跑
if (!try_wait_for_completion(data->one_to_many))
{
wait_for_completion(data->one_to_many);
}
schedule_timeout_ms(50);
// for(uint64_t i=0;i<1e7;++i)
// pause();
complete(data->one_to_one);
// 完成上面两个等待, 执行complete声明自己已经完成
complete(data->many_to_one);
// kdebug("THE %d WORKER SOLVED", data->id);
return true;
}
/**
* @brief 测试函数
*
*/
void __test_completion()
{
// kdebug("BEGIN COMPLETION TEST");
const int N = 100;
struct completion *one_to_one = kzalloc(sizeof(struct completion) * N, 0);
struct completion *one_to_many = kzalloc(sizeof(struct completion), 0);
struct completion *waiter_many_to_one = kzalloc(sizeof(struct completion) * N, 0);
struct completion *worker_many_to_one = kzalloc(sizeof(struct completion) * N, 0);
struct __test_data *waiter_data = kzalloc(sizeof(struct __test_data) * N, 0);
struct __test_data *worker_data = kzalloc(sizeof(struct __test_data) * N, 0);
completion_init(one_to_many);
for (int i = 0; i < N; i++)
{
completion_init(&one_to_one[i]);
completion_init(&waiter_many_to_one[i]);
completion_init(&worker_many_to_one[i]);
}
for (int i = 0; i < N; i++)
{
waiter_data[i].id = -i; // waiter
waiter_data[i].many_to_one = &waiter_many_to_one[i];
waiter_data[i].one_to_one = &one_to_one[i];
waiter_data[i].one_to_many = one_to_many;
kthread_run(__test_completion_waiter, &waiter_data[i], "the %dth waiter", i);
}
for (int i = 0; i < N; i++)
{
worker_data[i].id = i; // worker
worker_data[i].many_to_one = &worker_many_to_one[i];
worker_data[i].one_to_one = &one_to_one[i];
worker_data[i].one_to_many = one_to_many;
kthread_run(__test_completion_worker, &worker_data[i], "the %dth worker", i);
}
complete_all(one_to_many);
// kdebug("all of the waiters and workers begin running");
// kdebug("BEGIN COUNTING");
wait_for_multicompletion(waiter_many_to_one, N);
wait_for_multicompletion(worker_many_to_one, N);
// kdebug("all of the waiters and workers complete");
kfree(one_to_one);
kfree(one_to_many);
kfree(waiter_many_to_one);
kfree(worker_many_to_one);
kfree(waiter_data);
kfree(worker_data);
// kdebug("completion test done.");
}

76
kernel/src/sched/sched.c Normal file
View File

@ -0,0 +1,76 @@
#include "sched.h"
#include <common/kprint.h>
#include <common/spinlock.h>
#include <driver/video/video.h>
#include <sched/cfs.h>
#include <common/string.h>
/**
* @brief
*
* @param p pcb
* @param attr 调度属性
* @param user 请求是否来自用户态
* @param pi
* @return int
*/
static int __sched_setscheduler(struct process_control_block *p, const struct sched_attr *attr, bool user, bool pi)
{
int policy = attr->sched_policy;
recheck:;
// 这里policy的设置小于0是因为需要在临界区内更新值之后重新到这里判断
if (!IS_VALID_SCHED_POLICY(policy))
{
return -EINVAL;
}
// 修改成功
p->policy = policy;
return 0;
}
static int _sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param, bool check)
{
struct sched_attr attr = {.sched_policy = policy};
return __sched_setscheduler(p, &attr, check, true);
}
/**
* sched_setscheduler -设置进程的调度策略
* @param p 需要修改的pcb
* @param policy 需要设置的policy
* @param param structure containing the new RT priority. 目前没有用
*
* @return 成功返回0,否则返回对应的错误码
*
*/
int sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param)
{
return _sched_setscheduler(p, policy, param, true);
}
/**
* @brief 包裹shced_cfs_enqueue(),将PCB加入就绪队列
*
* @param pcb
*/
void sched_enqueue(struct process_control_block *pcb)
{
sched_cfs_enqueue(pcb);
}
/**
* @brief 包裹sched_cfs(),调度函数
*
*/
void sched()
{
sched_cfs();
}
void sched_init()
{
sched_cfs_init();
}

79
kernel/src/sched/sched.h Normal file
View File

@ -0,0 +1,79 @@
#pragma once
#include <common/glib.h>
#include <process/process.h>
/*
* Scheduling policies
*/
#define SCHED_NORMAL 0
#define SCHED_FIFO 1
#define SCHED_RR 2
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
#define SCHED_DEADLINE 6
#define SCHED_MAX_POLICY_NUM SCHED_DEADLINE
#define IS_VALID_SCHED_POLICY(_policy) ((_policy) > 0 && (_policy) <= SCHED_MAX_POLICY_NUM)
struct sched_param
{
int sched_priority;
};
struct sched_attr
{
uint32_t size;
uint32_t sched_policy;
uint64_t sched_flags;
/* SCHED_NORMAL, SCHED_BATCH */
int32_t sched_nice;
/* SCHED_FIFO, SCHED_RR */
uint32_t sched_priority;
/* SCHED_DEADLINE */
uint64_t sched_runtime;
uint64_t sched_deadline;
uint64_t sched_period;
/* Utilization hints */
uint32_t sched_util_min;
uint32_t sched_util_max;
};
static int __sched_setscheduler(struct process_control_block *p, const struct sched_attr *attr, bool user, bool pi);
static int _sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param,
bool check);
/**
* sched_setscheduler -设置进程的调度策略
* @param p 需要修改的pcb
* @param policy 需要设置的policy
* @param param structure containing the new RT priority. 目前没有用
*
* @return 成功返回0,否则返回对应的错误码
*
*/
int sched_setscheduler(struct process_control_block *p, int policy, const struct sched_param *param);
/**
* @brief 包裹sched_enqueue(),将PCB加入就绪队列
*
* @param pcb
*/
void sched_enqueue(struct process_control_block *pcb);
/**
* @brief 包裹sched_cfs(),调度函数
*
*/
void sched();
void sched_init();
/**
* @brief 当时钟中断到达时,更新时间片
*
*/
void sched_update_jiffies();