mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 21:06:48 +00:00
Merge pull request #59 from StevenJiang1110/thread-ref
Implement posix thread; Add tty canonical support
This commit is contained in:
commit
c172373b54
3
.gitattributes
vendored
3
.gitattributes
vendored
@ -5,4 +5,5 @@ src/apps/execve/execve filter=lfs diff=lfs merge=lfs -text
|
||||
src/apps/execve/hello filter=lfs diff=lfs merge=lfs -text
|
||||
src/apps/fork_c/fork filter=lfs diff=lfs merge=lfs -text
|
||||
src/apps/signal_c/signal_test filter=lfs diff=lfs merge=lfs -text
|
||||
src/apps/busybox/busybox filter=lfs diff=lfs merge=lfs -text
|
||||
src/apps/busybox/busybox filter=lfs diff=lfs merge=lfs -text
|
||||
src/apps/pthread/pthread_test filter=lfs diff=lfs merge=lfs -text
|
@ -3,4 +3,5 @@ We don't include the source code of busybox here since the source code is really
|
||||
|
||||
After download the source code of busybox 1.35.0 and unzip, then cd to the directory of busybox
|
||||
1. `make defconfig`. We set all config as default.
|
||||
2. change the line in .config: `#CONFIG_STATIC is not set` => `CONFIG_STATIC=y`. We need a static-linked busybox binary since we does not support dynamic linking now.
|
||||
2. Set static link option in .config: `CONFIG_STATIC=y`. We need a static-linked busybox binary since we does not support dynamic linking now.
|
||||
3. Set standalone shell option in .config: `CONFIG_FEATURE_SH_STANDALONE=y`. The standalone ash will try to call busybox applets instead of search binaries in host system. e.g., when running ls, standalone ash will invoke `busybox ash`.
|
@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:be0c152c1e47d3109f808cda876c3a90a1ef959007741e126086552e1063eede
|
||||
oid sha256:6db28e1ed8bdac06ac595b2126cefc10ecf16156bf4c1005950f561aedc0531a
|
||||
size 2701792
|
||||
|
7
src/apps/pthread/Makefile
Normal file
7
src/apps/pthread/Makefile
Normal file
@ -0,0 +1,7 @@
|
||||
.PHONY: build clean run
|
||||
build: pthread_test.c
|
||||
@gcc -static pthread_test.c -lpthread -o pthread_test
|
||||
clean:
|
||||
@rm pthread_test
|
||||
run: build
|
||||
@./pthread_test
|
3
src/apps/pthread/pthread_test
Executable file
3
src/apps/pthread/pthread_test
Executable file
@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d40bfc16dbf95aba3afa1934fa4efc0147f0a0a12533362a8d95e105d4d2af21
|
||||
size 1693840
|
280
src/apps/pthread/pthread_test.c
Normal file
280
src/apps/pthread/pthread_test.c
Normal file
@ -0,0 +1,280 @@
|
||||
// This test file is from occlum pthread test.
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
#ifndef SYS_gettid
|
||||
#error "SYS_gettid unavailable on this system"
|
||||
#endif
|
||||
|
||||
#define gettid() ((pid_t)syscall(SYS_gettid))
|
||||
|
||||
// ============================================================================
|
||||
// Helper functions
|
||||
// ============================================================================
|
||||
|
||||
#define THROW_ERROR(fmt, ...) do { \
|
||||
printf("\t\tERROR:" fmt " in func %s at line %d of file %s with errno %d: %s\n", \
|
||||
##__VA_ARGS__, __func__, __LINE__, __FILE__, errno, strerror(errno)); \
|
||||
return -1; \
|
||||
} while (0)
|
||||
|
||||
// ============================================================================
|
||||
// Helper macros
|
||||
// ============================================================================
|
||||
|
||||
|
||||
#define NTHREADS (3)
|
||||
#define STACK_SIZE (8 * 1024)
|
||||
|
||||
// ============================================================================
|
||||
// The test case of concurrent counter
|
||||
// ============================================================================
|
||||
|
||||
#define LOCAL_COUNT (1000UL)
|
||||
#define EXPECTED_GLOBAL_COUNT (LOCAL_COUNT * NTHREADS)
|
||||
|
||||
struct thread_arg {
|
||||
int ti;
|
||||
long local_count;
|
||||
volatile unsigned long *global_count;
|
||||
pthread_mutex_t *mutex;
|
||||
};
|
||||
|
||||
static void *thread_func(void *_arg) {
|
||||
struct thread_arg *arg = _arg;
|
||||
for (long i = 0; i < arg->local_count; i++) {
|
||||
pthread_mutex_lock(arg->mutex);
|
||||
(*arg->global_count)++;
|
||||
pthread_mutex_unlock(arg->mutex);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int test_mutex_with_concurrent_counter(void) {
|
||||
/*
|
||||
* Multiple threads are to increase a global counter concurrently
|
||||
*/
|
||||
volatile unsigned long global_count = 0;
|
||||
pthread_t threads[NTHREADS];
|
||||
struct thread_arg thread_args[NTHREADS];
|
||||
/*
|
||||
* Protect the counter with a mutex
|
||||
*/
|
||||
pthread_mutex_t mutex;
|
||||
pthread_mutex_init(&mutex, NULL);
|
||||
/*
|
||||
* Start the threads
|
||||
*/
|
||||
for (int ti = 0; ti < NTHREADS; ti++) {
|
||||
struct thread_arg *thread_arg = &thread_args[ti];
|
||||
thread_arg->ti = ti;
|
||||
thread_arg->local_count = LOCAL_COUNT;
|
||||
thread_arg->global_count = &global_count;
|
||||
thread_arg->mutex = &mutex;
|
||||
|
||||
if (pthread_create(&threads[ti], NULL, thread_func, thread_arg) < 0) {
|
||||
printf("ERROR: pthread_create failed (ti = %d)\n", ti);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Wait for the threads to finish
|
||||
*/
|
||||
for (int ti = 0; ti < NTHREADS; ti++) {
|
||||
if (pthread_join(threads[ti], NULL) < 0) {
|
||||
printf("ERROR: pthread_join failed (ti = %d)\n", ti);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Check the correctness of the concurrent counter
|
||||
*/
|
||||
if (global_count != EXPECTED_GLOBAL_COUNT) {
|
||||
printf("ERROR: incorrect global_count (actual = %ld, expected = %ld)\n",
|
||||
global_count, EXPECTED_GLOBAL_COUNT);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pthread_mutex_destroy(&mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// The test case of robust mutex
|
||||
// ============================================================================
|
||||
|
||||
struct thread_robust_arg {
|
||||
int ti;
|
||||
volatile int *global_count;
|
||||
pthread_mutex_t *mutex;
|
||||
};
|
||||
|
||||
int ret_err = -1;
|
||||
|
||||
static void *thread_worker(void *_arg) {
|
||||
struct thread_robust_arg *arg = _arg;
|
||||
int err = pthread_mutex_lock(arg->mutex);
|
||||
if (err == EOWNERDEAD) {
|
||||
// The mutex is locked by the thread here, but the state is marked as
|
||||
// inconsistent, the thread should call 'pthread_mutex_consistent' to
|
||||
// make the mutex consistent again.
|
||||
if (pthread_mutex_consistent(arg->mutex) != 0) {
|
||||
printf("ERROR: failed to recover the mutex\n");
|
||||
return &ret_err;
|
||||
}
|
||||
} else if (err != 0) {
|
||||
printf("ERROR: failed to lock the mutex with error: %d\n", err);
|
||||
return &ret_err;
|
||||
}
|
||||
// Mutex is locked
|
||||
(*arg->global_count)++;
|
||||
// Wait for other threads to acquire the lock
|
||||
sleep(1);
|
||||
// Exit without unlocking the mutex, this will makes the mutex in an
|
||||
// inconsistent state.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int test_robust_mutex_with_concurrent_counter(void) {
|
||||
volatile int global_count = 0;
|
||||
pthread_t threads[NTHREADS];
|
||||
struct thread_robust_arg thread_args[NTHREADS];
|
||||
// Init robust mutex
|
||||
pthread_mutex_t mutex;
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST);
|
||||
pthread_mutex_init(&mutex, &attr);
|
||||
// Start the threads
|
||||
for (int ti = 0; ti < NTHREADS; ti++) {
|
||||
struct thread_robust_arg *thread_arg = &thread_args[ti];
|
||||
thread_arg->ti = ti;
|
||||
thread_arg->global_count = &global_count;
|
||||
thread_arg->mutex = &mutex;
|
||||
|
||||
if (pthread_create(&threads[ti], NULL, thread_worker, thread_arg) < 0) {
|
||||
THROW_ERROR("pthread_create failed (ti = %d)", ti);
|
||||
}
|
||||
}
|
||||
// Wait for the threads to finish
|
||||
for (int ti = 0; ti < NTHREADS; ti++) {
|
||||
int *ret_val;
|
||||
if (pthread_join(threads[ti], (void **)&ret_val) < 0) {
|
||||
THROW_ERROR("pthread_join failed (ti = %d)", ti);
|
||||
}
|
||||
// printf("Thread %d joined\n", ti);
|
||||
// fflush(stdout);
|
||||
if (ret_val && *ret_val != 0) {
|
||||
THROW_ERROR("run thread failed (ti = %d) with return val: %d", ti, *ret_val);
|
||||
}
|
||||
}
|
||||
// printf("Thread all exited.\n");
|
||||
// fflush(stdout);
|
||||
// Check the result
|
||||
if (global_count != NTHREADS) {
|
||||
THROW_ERROR("incorrect global_count (actual = %d, expected = %d)", global_count,
|
||||
NTHREADS);
|
||||
}
|
||||
|
||||
pthread_mutex_destroy(&mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// The test case of waiting condition variable
|
||||
// ============================================================================
|
||||
|
||||
#define WAIT_ROUND (10)
|
||||
|
||||
struct thread_cond_arg {
|
||||
int ti;
|
||||
volatile unsigned int *val;
|
||||
volatile int *exit_thread_count;
|
||||
pthread_cond_t *cond_val;
|
||||
pthread_mutex_t *mutex;
|
||||
};
|
||||
|
||||
static void *thread_cond_wait(void *_arg) {
|
||||
struct thread_cond_arg *arg = _arg;
|
||||
printf("Thread #%d: start to wait on condition variable.\n", arg->ti);
|
||||
fflush(stdout);
|
||||
for (unsigned int i = 0; i < WAIT_ROUND; ++i) {
|
||||
int tid = gettid();
|
||||
printf("WAIT ROUND: %d, tid = %d\n", i, tid);
|
||||
fflush(stdout);
|
||||
pthread_mutex_lock(arg->mutex);
|
||||
printf("pthread mutex lock: tid = %d\n", tid);
|
||||
fflush(stdout);
|
||||
while (*(arg->val) == 0) {
|
||||
pthread_cond_wait(arg->cond_val, arg->mutex);
|
||||
printf("pthread cond wait: tid = %d\n", tid);
|
||||
fflush(stdout);
|
||||
}
|
||||
pthread_mutex_unlock(arg->mutex);
|
||||
}
|
||||
(*arg->exit_thread_count)++;
|
||||
printf("Thread #%d: exited.\n", arg->ti);
|
||||
fflush(stdout);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int test_mutex_with_cond_wait(void) {
|
||||
volatile unsigned int val = 0;
|
||||
volatile int exit_thread_count = 0;
|
||||
pthread_t threads[NTHREADS];
|
||||
struct thread_cond_arg thread_args[NTHREADS];
|
||||
pthread_cond_t cond_val = PTHREAD_COND_INITIALIZER;
|
||||
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
/*
|
||||
* Start the threads waiting on the condition variable
|
||||
*/
|
||||
for (int ti = 0; ti < NTHREADS; ti++) {
|
||||
struct thread_cond_arg *thread_arg = &thread_args[ti];
|
||||
thread_arg->ti = ti;
|
||||
thread_arg->val = &val;
|
||||
thread_arg->exit_thread_count = &exit_thread_count;
|
||||
thread_arg->cond_val = &cond_val;
|
||||
thread_arg->mutex = &mutex;
|
||||
|
||||
if (pthread_create(&threads[ti], NULL, thread_cond_wait, thread_arg) < 0) {
|
||||
printf("ERROR: pthread_create failed (ti = %d)\n", ti);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Unblock all threads currently waiting on the condition variable
|
||||
*/
|
||||
while (exit_thread_count < NTHREADS) {
|
||||
pthread_mutex_lock(&mutex);
|
||||
val = 1;
|
||||
pthread_cond_broadcast(&cond_val);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
|
||||
pthread_mutex_lock(&mutex);
|
||||
val = 0;
|
||||
pthread_mutex_unlock(&mutex);
|
||||
}
|
||||
/*
|
||||
* Wait for the threads to finish
|
||||
*/
|
||||
for (int ti = 0; ti < NTHREADS; ti++) {
|
||||
if (pthread_join(threads[ti], NULL) < 0) {
|
||||
printf("ERROR: pthread_join failed (ti = %d)\n", ti);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
test_mutex_with_concurrent_counter();
|
||||
test_robust_mutex_with_concurrent_counter();
|
||||
// test_mutex_with_cond_wait();
|
||||
return 0;
|
||||
}
|
@ -12,7 +12,6 @@
|
||||
#include <string.h>
|
||||
#include <spawn.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <pthread.h>
|
||||
|
@ -4,7 +4,7 @@ use crate::log::LogLevel;
|
||||
|
||||
pub const USER_STACK_SIZE: usize = PAGE_SIZE * 4;
|
||||
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
|
||||
pub const KERNEL_HEAP_SIZE: usize = 0x1_000_000;
|
||||
pub const KERNEL_HEAP_SIZE: usize = 0x2_000_000;
|
||||
|
||||
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
|
||||
pub const PHYS_OFFSET: usize = 0xFFFF800000000000;
|
||||
@ -15,6 +15,6 @@ pub const PAGE_SIZE_BITS: usize = 0xc;
|
||||
|
||||
pub const KVA_START: usize = (usize::MAX) << PAGE_SIZE_BITS;
|
||||
|
||||
pub const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Close;
|
||||
pub const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Error;
|
||||
/// This value represent the base timer frequency in Hz
|
||||
pub const TIMER_FREQ: u64 = 100;
|
||||
|
@ -38,6 +38,25 @@ pub struct CpuContext {
|
||||
/// trap information, this field is all zero when it is syscall
|
||||
pub trap_information: TrapInformation,
|
||||
}
|
||||
|
||||
impl CpuContext {
|
||||
pub fn set_rax(&mut self, rax: u64) {
|
||||
self.gp_regs.rax = rax;
|
||||
}
|
||||
|
||||
pub fn set_rsp(&mut self, rsp: u64) {
|
||||
self.gp_regs.rsp = rsp;
|
||||
}
|
||||
|
||||
pub fn set_rip(&mut self, rip: u64) {
|
||||
self.gp_regs.rip = rip;
|
||||
}
|
||||
|
||||
pub fn set_fsbase(&mut self, fs_base: u64) {
|
||||
self.fs_base = fs_base;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Copy, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct TrapInformation {
|
||||
|
@ -1,19 +1,19 @@
|
||||
//! Device-related APIs.
|
||||
pub mod framebuffer;
|
||||
|
||||
pub mod console;
|
||||
mod io_port;
|
||||
pub mod pci;
|
||||
pub mod serial;
|
||||
|
||||
pub use self::io_port::IoPort;
|
||||
|
||||
/// first step to init device, call before the memory allocator init
|
||||
pub(crate) fn first_init(framebuffer: &'static mut bootloader::boot_info::FrameBuffer) {
|
||||
framebuffer::init(framebuffer);
|
||||
console::init();
|
||||
serial::init();
|
||||
}
|
||||
|
||||
/// second step to init device, call after the memory allocator init
|
||||
pub(crate) fn second_init() {
|
||||
console::register_console_input_callback(|trap| {});
|
||||
serial::register_serial_input_irq_handler(|trap| {});
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
use alloc::{sync::Arc, vec::Vec};
|
||||
use lazy_static::lazy_static;
|
||||
use spin::Mutex;
|
||||
|
||||
use crate::{cell::Cell, driver::pic, x86_64_util::*, IrqAllocateHandle, TrapFrame};
|
||||
use crate::{cell::Cell, debug, driver::pic, x86_64_util::*, IrqAllocateHandle, TrapFrame};
|
||||
use core::fmt::{self, Write};
|
||||
|
||||
bitflags::bitflags! {
|
||||
@ -18,8 +20,13 @@ const SERIAL_LINE_CTRL: u16 = SERIAL_DATA + 3;
|
||||
const SERIAL_MODEM_CTRL: u16 = SERIAL_DATA + 4;
|
||||
const SERIAL_LINE_STS: u16 = SERIAL_DATA + 5;
|
||||
lazy_static! {
|
||||
static ref CONSOLE_IRQ_CALLBACK: Cell<IrqAllocateHandle> =
|
||||
Cell::new(pic::allocate_irq(4).unwrap());
|
||||
static ref CONSOLE_IRQ_CALLBACK: Cell<IrqAllocateHandle> = {
|
||||
let irq = Cell::new(pic::allocate_irq(4).unwrap());
|
||||
irq.get().on_active(handle_serial_input);
|
||||
irq
|
||||
};
|
||||
pub static ref SERIAL_INPUT_CALLBACKS: Mutex<Vec<Arc<dyn Fn(u8) + Send + Sync + 'static>>> =
|
||||
Mutex::new(Vec::new());
|
||||
}
|
||||
|
||||
/// Initializes the serial port.
|
||||
@ -43,13 +50,26 @@ pub(crate) fn init() {
|
||||
out8(SERIAL_INT_EN, 0x01);
|
||||
}
|
||||
|
||||
pub fn register_console_input_callback<F>(callback: F)
|
||||
pub(crate) fn register_serial_input_irq_handler<F>(callback: F)
|
||||
where
|
||||
F: Fn(&TrapFrame) + Sync + Send + 'static,
|
||||
{
|
||||
CONSOLE_IRQ_CALLBACK.get().on_active(callback);
|
||||
}
|
||||
|
||||
fn handle_serial_input(trap_frame: &TrapFrame) {
|
||||
// debug!("keyboard interrupt was met");
|
||||
if SERIAL_INPUT_CALLBACKS.is_locked() {
|
||||
return;
|
||||
}
|
||||
let lock = SERIAL_INPUT_CALLBACKS.lock();
|
||||
let received_char = receive_char().unwrap();
|
||||
debug!("receive char = {:?}", received_char);
|
||||
for callback in lock.iter() {
|
||||
callback(received_char);
|
||||
}
|
||||
}
|
||||
|
||||
fn line_sts() -> LineSts {
|
||||
LineSts::from_bits_truncate(in8(SERIAL_LINE_STS))
|
||||
}
|
||||
@ -99,13 +119,13 @@ pub fn print(args: fmt::Arguments) {
|
||||
#[macro_export]
|
||||
macro_rules! console_print {
|
||||
($fmt: literal $(, $($arg: tt)+)?) => {
|
||||
$crate::device::console::print(format_args!($fmt $(, $($arg)+)?))
|
||||
$crate::device::serial::print(format_args!($fmt $(, $($arg)+)?))
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! console_println {
|
||||
($fmt: literal $(, $($arg: tt)+)?) => {
|
||||
$crate::device::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
|
||||
$crate::device::serial::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
|
||||
}
|
||||
}
|
@ -6,8 +6,8 @@ pub mod acpi;
|
||||
pub mod apic;
|
||||
pub mod ioapic;
|
||||
pub mod pic;
|
||||
pub mod timer;
|
||||
pub mod rtc;
|
||||
pub mod timer;
|
||||
|
||||
pub use apic::ack;
|
||||
pub use timer::TimerCallback;
|
||||
|
@ -1,52 +1,58 @@
|
||||
use core::sync::atomic::AtomicU8;
|
||||
use core::sync::atomic::Ordering::Relaxed;
|
||||
|
||||
use acpi::{sdt::Signature, fadt::Fadt};
|
||||
use acpi::{fadt::Fadt, sdt::Signature};
|
||||
use lazy_static::lazy_static;
|
||||
use spin::Mutex;
|
||||
|
||||
use crate::{x86_64_util::{out8, in8}, time::Time};
|
||||
use crate::{
|
||||
time::Time,
|
||||
x86_64_util::{in8, out8},
|
||||
};
|
||||
|
||||
use super::acpi::ACPI_TABLES;
|
||||
|
||||
const CMOS_ADDRESS : u16 = 0x70;
|
||||
const CMOS_DATA : u16 = 0x71;
|
||||
pub(crate) static CENTURY_REGISTER : AtomicU8 = AtomicU8::new(0);
|
||||
const CMOS_ADDRESS: u16 = 0x70;
|
||||
const CMOS_DATA: u16 = 0x71;
|
||||
pub(crate) static CENTURY_REGISTER: AtomicU8 = AtomicU8::new(0);
|
||||
|
||||
lazy_static!{
|
||||
static ref READ_TIME : Mutex<Time> = Mutex::new(Time::default());
|
||||
lazy_static! {
|
||||
static ref READ_TIME: Mutex<Time> = Mutex::new(Time::default());
|
||||
}
|
||||
|
||||
pub fn init(){
|
||||
pub fn init() {
|
||||
let c = ACPI_TABLES.lock();
|
||||
let r_century = unsafe{
|
||||
let a = c.get_sdt::<Fadt>(Signature::FADT).unwrap().expect("not found FACP in ACPI table");
|
||||
let r_century = unsafe {
|
||||
let a = c
|
||||
.get_sdt::<Fadt>(Signature::FADT)
|
||||
.unwrap()
|
||||
.expect("not found FACP in ACPI table");
|
||||
a.century
|
||||
};
|
||||
CENTURY_REGISTER.store(r_century, Relaxed);
|
||||
}
|
||||
|
||||
pub fn get_cmos(reg: u8) -> u8{
|
||||
pub fn get_cmos(reg: u8) -> u8 {
|
||||
out8(CMOS_ADDRESS, reg as u8);
|
||||
in8(CMOS_DATA)
|
||||
}
|
||||
|
||||
pub fn is_updating() -> bool{
|
||||
out8(CMOS_ADDRESS,0x0A);
|
||||
pub fn is_updating() -> bool {
|
||||
out8(CMOS_ADDRESS, 0x0A);
|
||||
in8(CMOS_DATA) & 0x80 != 0
|
||||
}
|
||||
|
||||
pub fn read()-> Time{
|
||||
pub fn read() -> Time {
|
||||
update_time();
|
||||
READ_TIME.lock().clone()
|
||||
}
|
||||
|
||||
/// read year,month,day and other data
|
||||
/// ref: https://wiki.osdev.org/CMOS#Reading_All_RTC_Time_and_Date_Registers
|
||||
fn update_time(){
|
||||
let mut last_time :Time;
|
||||
fn update_time() {
|
||||
let mut last_time: Time;
|
||||
|
||||
let register_b : u8;
|
||||
let register_b: u8;
|
||||
let mut lock = READ_TIME.lock();
|
||||
|
||||
lock.update_from_rtc();
|
||||
@ -55,18 +61,15 @@ fn update_time(){
|
||||
|
||||
lock.update_from_rtc();
|
||||
|
||||
|
||||
while *lock!=last_time{
|
||||
while *lock != last_time {
|
||||
last_time = lock.clone();
|
||||
|
||||
lock.update_from_rtc();
|
||||
}
|
||||
|
||||
|
||||
register_b = get_cmos(0x0B);
|
||||
|
||||
lock.convert_bcd_to_binary(register_b);
|
||||
lock.convert_12_hour_to_24_hour(register_b);
|
||||
lock.modify_year();
|
||||
}
|
||||
|
||||
|
@ -23,13 +23,13 @@ pub(crate) mod mm;
|
||||
pub mod prelude;
|
||||
pub mod sync;
|
||||
pub mod task;
|
||||
pub mod time;
|
||||
pub mod timer;
|
||||
pub mod trap;
|
||||
pub mod user;
|
||||
mod util;
|
||||
pub mod vm;
|
||||
pub(crate) mod x86_64_util;
|
||||
pub mod time;
|
||||
|
||||
use core::{mem, panic::PanicInfo};
|
||||
pub use driver::ack as apic_ack;
|
||||
@ -45,7 +45,7 @@ use bootloader::{
|
||||
boot_info::{FrameBuffer, MemoryRegionKind},
|
||||
BootInfo,
|
||||
};
|
||||
pub use device::console::receive_char;
|
||||
pub use device::serial::receive_char;
|
||||
pub use mm::address::{align_down, align_up, is_aligned, virt_to_phys};
|
||||
pub use mm::page_table::translate_not_offset_virtual_address;
|
||||
pub use trap::{allocate_irq, IrqAllocateHandle, TrapFrame};
|
||||
@ -146,7 +146,7 @@ pub fn test_panic_handler(info: &PanicInfo) -> ! {
|
||||
}
|
||||
|
||||
pub fn panic_handler() {
|
||||
println!("[panic]: cr3:{:x}", x86_64_util::get_cr3());
|
||||
// println!("[panic]: cr3:{:x}", x86_64_util::get_cr3());
|
||||
// let mut fp: usize;
|
||||
// let stop = unsafe{
|
||||
// Task::current().kstack.get_top()
|
||||
|
@ -23,7 +23,7 @@ pub fn log_print(args: Arguments) {
|
||||
#[cfg(feature = "serial_print")]
|
||||
#[doc(hidden)]
|
||||
pub fn log_print(args: Arguments) {
|
||||
crate::device::console::print(args);
|
||||
crate::device::serial::print(args);
|
||||
}
|
||||
|
||||
/// This macro should not be directly called.
|
||||
|
@ -1,10 +1,9 @@
|
||||
use super::{page_table::PageTable, *};
|
||||
use crate::prelude::*;
|
||||
use crate::vm::VmIo;
|
||||
use crate::{
|
||||
config::PAGE_SIZE,
|
||||
mm::address::is_aligned,
|
||||
vm::{VmFrame, VmFrameVec},
|
||||
vm::{VmFrame, VmFrameVec, VmPerm},
|
||||
*,
|
||||
};
|
||||
use alloc::collections::{btree_map::Entry, BTreeMap};
|
||||
@ -265,6 +264,11 @@ impl MemorySet {
|
||||
}
|
||||
Err(Error::PageFault)
|
||||
}
|
||||
|
||||
pub fn protect(&mut self, addr: Vaddr, flags: PTFlags) {
|
||||
let va = VirtAddr(addr);
|
||||
self.pt.protect(va, flags)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for MemorySet {
|
||||
|
@ -34,6 +34,8 @@ bitflags::bitflags! {
|
||||
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
|
||||
/// the TLB on an address space switch.
|
||||
const GLOBAL = 1 << 8;
|
||||
/// Forbid execute codes on the page. The NXE bits in EFER msr must be set.
|
||||
const NO_EXECUTE = 1 << 63;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@ use crate::{
|
||||
*,
|
||||
};
|
||||
use alloc::{collections::BTreeMap, vec, vec::Vec};
|
||||
use core::fmt;
|
||||
use core::{fmt, panic};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
@ -94,6 +94,18 @@ impl PageTable {
|
||||
entry.0 = 0;
|
||||
}
|
||||
|
||||
pub fn protect(&mut self, va: VirtAddr, flags: PTFlags) {
|
||||
let entry = self.get_entry_or_create(va).unwrap();
|
||||
if entry.is_unused() || !entry.is_present() {
|
||||
panic!("{:#x?} is invalid before protect", va);
|
||||
}
|
||||
// clear old mask
|
||||
let clear_flags_mask = !PTFlags::all().bits;
|
||||
entry.0 &= clear_flags_mask;
|
||||
// set new mask
|
||||
entry.0 |= flags.bits;
|
||||
}
|
||||
|
||||
pub fn map_area(&mut self, area: &MapArea) {
|
||||
for (va, pa) in area.mapper.iter() {
|
||||
assert!(pa.start_pa().0 < PHYS_OFFSET);
|
||||
|
@ -5,7 +5,8 @@ mod scheduler;
|
||||
#[allow(clippy::module_inception)]
|
||||
mod task;
|
||||
|
||||
pub(crate) use self::processor::{get_idle_task_cx_ptr, schedule};
|
||||
pub(crate) use self::processor::get_idle_task_cx_ptr;
|
||||
pub use self::processor::schedule;
|
||||
pub use self::scheduler::{set_scheduler, Scheduler};
|
||||
pub(crate) use self::task::context_switch;
|
||||
pub(crate) use self::task::TaskContext;
|
||||
|
@ -222,7 +222,7 @@ impl Task {
|
||||
Ok(Arc::new(result))
|
||||
}
|
||||
|
||||
pub fn send_to_scheduler(self: &Arc<Self>) {
|
||||
pub fn run(self: &Arc<Self>) {
|
||||
switch_to_task(self.clone());
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,8 @@
|
||||
use crate::driver::rtc::{get_cmos, is_updating, CENTURY_REGISTER, read};
|
||||
use crate::driver::rtc::{get_cmos, is_updating, read, CENTURY_REGISTER};
|
||||
use core::sync::atomic::Ordering::Relaxed;
|
||||
|
||||
|
||||
|
||||
#[derive(Debug,Clone, Copy,Default,PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Time{
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Time {
|
||||
century: u8,
|
||||
pub year: u16,
|
||||
pub month: u8,
|
||||
@ -14,31 +12,32 @@ pub struct Time{
|
||||
pub second: u8,
|
||||
}
|
||||
|
||||
impl Time{
|
||||
pub(crate) fn update_from_rtc(&mut self){
|
||||
while is_updating(){}
|
||||
impl Time {
|
||||
pub(crate) fn update_from_rtc(&mut self) {
|
||||
while is_updating() {}
|
||||
self.second = get_cmos(0x00);
|
||||
self.minute = get_cmos(0x02);
|
||||
self.hour = get_cmos(0x04);
|
||||
self.day = get_cmos(0x07);
|
||||
self.month = get_cmos(0x08);
|
||||
self.year = get_cmos(0x09) as u16;
|
||||
|
||||
|
||||
let century_register = CENTURY_REGISTER.load(Relaxed);
|
||||
|
||||
if century_register !=0{
|
||||
|
||||
if century_register != 0 {
|
||||
self.century = get_cmos(century_register);
|
||||
}
|
||||
}
|
||||
|
||||
/// convert BCD to binary values
|
||||
/// ref:https://wiki.osdev.org/CMOS#Reading_All_RTC_Time_and_Date_Registers
|
||||
pub(crate) fn convert_bcd_to_binary(&mut self,register_b: u8){
|
||||
if register_b & 0x04 == 0{
|
||||
pub(crate) fn convert_bcd_to_binary(&mut self, register_b: u8) {
|
||||
if register_b & 0x04 == 0 {
|
||||
let century_register = CENTURY_REGISTER.load(Relaxed);
|
||||
self.second = (self.second & 0x0F) + ((self.second / 16) * 10);
|
||||
self.minute = (self.minute & 0x0F) + ((self.minute / 16) * 10);
|
||||
self.hour = ( (self.hour & 0x0F) + (((self.hour & 0x70) / 16) * 10) ) | (self.hour & 0x80);
|
||||
self.hour =
|
||||
((self.hour & 0x0F) + (((self.hour & 0x70) / 16) * 10)) | (self.hour & 0x80);
|
||||
self.day = (self.day & 0x0F) + ((self.day / 16) * 10);
|
||||
self.month = (self.month & 0x0F) + ((self.month / 16) * 10);
|
||||
self.year = (self.year & 0x0F) + ((self.year / 16) * 10);
|
||||
@ -48,28 +47,26 @@ impl Time{
|
||||
}
|
||||
}
|
||||
/// convert 12 hour clock to 24 hour clock
|
||||
pub(crate) fn convert_12_hour_to_24_hour(&mut self,register_b:u8){
|
||||
pub(crate) fn convert_12_hour_to_24_hour(&mut self, register_b: u8) {
|
||||
// bit1 in register_b is not set if 12 hour format is enable
|
||||
// if highest bit in hour is set, then it is pm
|
||||
if ((register_b & 0x02)==0) && ((self.hour & 0x80) !=0){
|
||||
if ((register_b & 0x02) == 0) && ((self.hour & 0x80) != 0) {
|
||||
self.hour = ((self.hour & 0x7F) + 12) % 24;
|
||||
}
|
||||
}
|
||||
|
||||
/// convert raw year (10, 20 etc.) to real year (2010, 2020 etc.)
|
||||
pub(crate) fn modify_year(&mut self){
|
||||
pub(crate) fn modify_year(&mut self) {
|
||||
let century_register = CENTURY_REGISTER.load(Relaxed);
|
||||
if century_register !=0{
|
||||
if century_register != 0 {
|
||||
self.year += self.century as u16 * 100;
|
||||
}else{
|
||||
} else {
|
||||
panic!("century register not exists");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// get real time
|
||||
pub fn get_real_time() -> Time{
|
||||
pub fn get_real_time() -> Time {
|
||||
read()
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ pub(crate) fn init() {
|
||||
model_specific::SFMask::write(
|
||||
RFlags::TRAP_FLAG
|
||||
| RFlags::DIRECTION_FLAG
|
||||
| RFlags::INTERRUPT_FLAG
|
||||
// | RFlags::INTERRUPT_FLAG
|
||||
| RFlags::IOPL_LOW
|
||||
| RFlags::IOPL_HIGH
|
||||
| RFlags::NESTED_TASK
|
||||
|
@ -47,13 +47,7 @@ impl VmSpace {
|
||||
///
|
||||
/// For more information, see `VmMapOptions`.
|
||||
pub fn map(&self, frames: VmFrameVec, options: &VmMapOptions) -> Result<Vaddr> {
|
||||
let mut flags = PTFlags::PRESENT;
|
||||
if options.perm.contains(VmPerm::W) {
|
||||
flags.insert(PTFlags::WRITABLE);
|
||||
}
|
||||
// if options.perm.contains(VmPerm::U) {
|
||||
flags.insert(PTFlags::USER);
|
||||
// }
|
||||
let flags = PTFlags::from(options.perm);
|
||||
if options.addr.is_none() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
@ -101,7 +95,16 @@ impl VmSpace {
|
||||
/// The entire specified VM range must have been mapped with physical
|
||||
/// memory pages.
|
||||
pub fn protect(&self, range: &Range<Vaddr>, perm: VmPerm) -> Result<()> {
|
||||
todo!()
|
||||
debug_assert!(range.start % PAGE_SIZE == 0);
|
||||
debug_assert!(range.end % PAGE_SIZE == 0);
|
||||
let start_page = range.start / PAGE_SIZE;
|
||||
let end_page = range.end / PAGE_SIZE;
|
||||
let flags = PTFlags::from(perm);
|
||||
for page_idx in start_page..end_page {
|
||||
let addr = page_idx * PAGE_SIZE;
|
||||
self.memory_set.lock().protect(addr, flags)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -227,3 +230,17 @@ impl TryFrom<u64> for VmPerm {
|
||||
VmPerm::from_bits(value as u8).ok_or(Error::InvalidVmpermBits)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VmPerm> for PTFlags {
|
||||
fn from(vm_perm: VmPerm) -> Self {
|
||||
let mut flags = PTFlags::PRESENT | PTFlags::USER;
|
||||
if vm_perm.contains(VmPerm::W) {
|
||||
flags |= PTFlags::WRITABLE;
|
||||
}
|
||||
// FIXME: how to respect executable flags?
|
||||
if !vm_perm.contains(VmPerm::X) {
|
||||
flags |= PTFlags::NO_EXECUTE;
|
||||
}
|
||||
flags
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,9 @@
|
||||
//! util for x86_64, it will rename to x86_64 when depend x86_64 isn't necessary
|
||||
use core::arch::{asm, x86_64::CpuidResult};
|
||||
|
||||
use x86_64::registers::{control::Cr4Flags, segmentation::Segment64, xcontrol::XCr0Flags};
|
||||
use x86_64::registers::{
|
||||
control::Cr4Flags, model_specific::EferFlags, segmentation::Segment64, xcontrol::XCr0Flags,
|
||||
};
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_rsp() -> usize {
|
||||
@ -237,6 +239,13 @@ pub fn enable_common_cpu_features() {
|
||||
unsafe {
|
||||
x86_64::registers::xcontrol::XCr0::write(xcr0);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// enable non-executable page protection
|
||||
x86_64::registers::model_specific::Efer::update(|efer| {
|
||||
*efer |= EferFlags::NO_EXECUTE_ENABLE;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush_tlb() {
|
||||
|
@ -93,8 +93,8 @@ fn create_fs_image(path: &Path) -> anyhow::Result<String> {
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(fs_img_path.as_str())?;
|
||||
// 16MiB
|
||||
f.set_len(16 * 1024 * 1024).unwrap();
|
||||
// 32MiB
|
||||
f.set_len(64 * 1024 * 1024).unwrap();
|
||||
Ok(format!(
|
||||
"file={},if=none,format=raw,id=x0",
|
||||
fs_img_path.as_str()
|
||||
@ -120,6 +120,9 @@ pub fn create_disk_images(kernel_binary_path: &Path) -> PathBuf {
|
||||
.arg(kernel_binary_path.parent().unwrap());
|
||||
build_cmd.arg("--quiet");
|
||||
|
||||
// println!("current dir = {:?}", build_cmd.get_current_dir());
|
||||
// println!("args = {:?}", build_cmd.get_args());
|
||||
|
||||
if !build_cmd.status().unwrap().success() {
|
||||
panic!("build failed");
|
||||
}
|
||||
|
@ -2,9 +2,9 @@
|
||||
//! When use this crate, typeflags and typeflags-util should also be added as dependency.
|
||||
//!
|
||||
//! The require macro are used to ensure that an object has the enough capability to call the function.
|
||||
//! The **require** macro can accept constrait [SomeRightSet] > [SomeRight],
|
||||
//! The **require** macro can accept constraint [SomeRightSet] > [SomeRight],
|
||||
//! which means the SomeRightSet should **contain** the SomeRight.
|
||||
//! The **require** macro can also accept constrait [SomeRightSet] > [AnotherRightSet],
|
||||
//! The **require** macro can also accept constraint [SomeRightSet] > [AnotherRightSet],
|
||||
//! which means the SomeRightSet should **include** the AnotherRightSet. In this case, AnotherRightSet should be a **generic parameter**.
|
||||
//! i.e., AnotherRightSet should occur the the generic param list of the function.
|
||||
//!
|
||||
|
@ -140,6 +140,7 @@ fn set_contain_where_clause(
|
||||
}
|
||||
}
|
||||
|
||||
/// generate a where clause to constraint the type set with another type set
|
||||
fn set_include_where_clause(
|
||||
require_attr: &RequireAttr,
|
||||
required_type_set: Ident,
|
||||
|
@ -1,54 +0,0 @@
|
||||
use jinux_frame::println;
|
||||
|
||||
use alloc::{sync::Arc, vec::Vec};
|
||||
use jinux_frame::{TrapFrame, receive_char, info};
|
||||
use lazy_static::lazy_static;
|
||||
use spin::Mutex;
|
||||
|
||||
use crate::{process::Process, current};
|
||||
|
||||
lazy_static! {
|
||||
static ref KEYBOARD_CALLBACKS: Mutex<Vec<Arc<dyn Fn(u8) + Send + Sync + 'static>>> =
|
||||
Mutex::new(Vec::new());
|
||||
static ref WAIT_INPUT_PROCESS : Mutex<Option<Arc<Process>>> = Mutex::new(None);
|
||||
}
|
||||
|
||||
pub fn init() {
|
||||
jinux_frame::device::console::register_console_input_callback(handle_irq);
|
||||
register_console_callback(Arc::new(console_receive_callback));
|
||||
}
|
||||
|
||||
fn handle_irq(trap_frame: &TrapFrame) {
|
||||
if KEYBOARD_CALLBACKS.is_locked() {
|
||||
return;
|
||||
}
|
||||
let lock = KEYBOARD_CALLBACKS.lock();
|
||||
for callback in lock.iter() {
|
||||
callback.call(((jinux_frame::device::console::receive_char().unwrap()),));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_console_callback(callback: Arc<dyn Fn(u8) + 'static + Send + Sync>) {
|
||||
KEYBOARD_CALLBACKS.lock().push(callback);
|
||||
}
|
||||
|
||||
fn console_receive_callback(data: u8){
|
||||
let process = WAIT_INPUT_PROCESS.lock().take();
|
||||
process.unwrap().send_to_scheduler();
|
||||
}
|
||||
|
||||
/// receive char from console, if there is no data in buffer, then it will switch to other task
|
||||
/// until it is notified.
|
||||
pub fn receive_console_char() -> u8{
|
||||
loop{
|
||||
if let Some(byte) = receive_char() {
|
||||
return byte;
|
||||
}else if WAIT_INPUT_PROCESS.lock().is_none(){
|
||||
WAIT_INPUT_PROCESS.lock().replace(current!());
|
||||
Process::yield_now();
|
||||
WAIT_INPUT_PROCESS.lock().take();
|
||||
}else{
|
||||
panic!("there is process waiting in the console receive list!");
|
||||
}
|
||||
}
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
pub mod console;
|
||||
pub mod pci;
|
||||
pub mod tty;
|
||||
|
||||
pub fn init() {
|
||||
pci::init();
|
||||
console::init();
|
||||
tty::init();
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ fn inner_block_device_test() {
|
||||
}
|
||||
#[allow(unused)]
|
||||
pub fn block_device_test() {
|
||||
let test_process = Process::spawn_kernel_process(|| {
|
||||
// inner_block_device_test();
|
||||
let _ = Process::spawn_kernel_process(|| {
|
||||
inner_block_device_test();
|
||||
});
|
||||
}
|
||||
|
80
src/services/libs/jinux-std/src/driver/tty.rs
Normal file
80
src/services/libs/jinux-std/src/driver/tty.rs
Normal file
@ -0,0 +1,80 @@
|
||||
use jinux_frame::device::serial::SERIAL_INPUT_CALLBACKS;
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
tty::{get_n_tty, Tty},
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref TTY_DRIVER: Arc<TtyDriver> = {
|
||||
let tty_driver = Arc::new(TtyDriver::new());
|
||||
// FIXME: install n_tty into tty_driver?
|
||||
let n_tty = get_n_tty();
|
||||
tty_driver.install(n_tty.clone());
|
||||
tty_driver
|
||||
};
|
||||
}
|
||||
|
||||
pub struct TtyDriver {
|
||||
ttys: Mutex<Vec<Arc<Tty>>>,
|
||||
}
|
||||
|
||||
impl TtyDriver {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
ttys: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the tty device in driver's internal table.
|
||||
pub fn lookup(&self, index: usize) -> Result<Arc<Tty>> {
|
||||
let ttys = self.ttys.lock();
|
||||
// Return the tty device corresponding to idx
|
||||
if index >= ttys.len() {
|
||||
return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device");
|
||||
}
|
||||
let tty = ttys[index].clone();
|
||||
drop(ttys);
|
||||
Ok(tty)
|
||||
}
|
||||
|
||||
/// Install a new tty into the driver's internal tables.
|
||||
pub fn install(self: &Arc<Self>, tty: Arc<Tty>) {
|
||||
tty.set_driver(Arc::downgrade(self));
|
||||
self.ttys.lock().push(tty);
|
||||
}
|
||||
|
||||
/// remove a new tty into the driver's internal tables.
|
||||
pub fn remove(&self, index: usize) -> Result<()> {
|
||||
let mut ttys = self.ttys.lock();
|
||||
if index >= ttys.len() {
|
||||
return_errno_with_message!(Errno::ENODEV, "lookup failed. No tty device");
|
||||
}
|
||||
let removed_tty = ttys.remove(index);
|
||||
removed_tty.set_driver(Weak::new());
|
||||
drop(ttys);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn receive_char(&self, item: u8) {
|
||||
// FIXME: should the char send to all ttys?
|
||||
for tty in &*self.ttys.lock() {
|
||||
tty.receive_char(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn serial_input_callback(item: u8) {
|
||||
let tty_driver = get_tty_driver();
|
||||
tty_driver.receive_char(item);
|
||||
}
|
||||
|
||||
fn get_tty_driver() -> &'static TtyDriver {
|
||||
&TTY_DRIVER
|
||||
}
|
||||
|
||||
pub fn init() {
|
||||
SERIAL_INPUT_CALLBACKS
|
||||
.lock()
|
||||
.push(Arc::new(serial_input_callback));
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
use crate::prelude::*;
|
||||
use crate::tty::get_console;
|
||||
use crate::tty::get_n_tty;
|
||||
use core::any::Any;
|
||||
|
||||
use super::events::IoEvents;
|
||||
@ -21,7 +21,7 @@ pub trait File: Send + Sync + Any {
|
||||
match cmd {
|
||||
IoctlCmd::TCGETS => {
|
||||
// FIXME: only a work around
|
||||
let tty = get_console();
|
||||
let tty = get_n_tty();
|
||||
tty.ioctl(cmd, arg)
|
||||
}
|
||||
_ => panic!("Ioctl unsupported"),
|
||||
|
@ -67,4 +67,14 @@ impl Stat {
|
||||
stat.st_blksize = 4096;
|
||||
stat
|
||||
}
|
||||
|
||||
// Fake stat for /bin/stty
|
||||
pub fn fake_stty_stat() -> Self {
|
||||
let mut stat = Stat::default();
|
||||
stat.st_mode = S_IFREG | 0o755;
|
||||
stat.st_nlink = 1;
|
||||
stat.st_blksize = 4096;
|
||||
stat.st_size = 84344;
|
||||
stat
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::events::IoEvents;
|
||||
use crate::prelude::*;
|
||||
use crate::tty::{get_console, Tty};
|
||||
use crate::tty::{get_n_tty, Tty};
|
||||
|
||||
use super::file::{File, FileDescripter};
|
||||
|
||||
@ -10,23 +10,19 @@ pub const FD_STDERR: FileDescripter = 2;
|
||||
|
||||
pub struct Stdin {
|
||||
console: Option<Arc<Tty>>,
|
||||
bind_to_console: bool,
|
||||
}
|
||||
|
||||
pub struct Stdout {
|
||||
console: Option<Arc<Tty>>,
|
||||
bind_to_console: bool,
|
||||
}
|
||||
|
||||
pub struct Stderr {
|
||||
console: Option<Arc<Tty>>,
|
||||
bind_to_console: bool,
|
||||
}
|
||||
|
||||
impl File for Stdin {
|
||||
fn poll(&self) -> IoEvents {
|
||||
if self.bind_to_console {
|
||||
let console = self.console.as_ref().unwrap();
|
||||
if let Some(console) = self.console.as_ref() {
|
||||
console.poll()
|
||||
} else {
|
||||
todo!()
|
||||
@ -34,8 +30,7 @@ impl File for Stdin {
|
||||
}
|
||||
|
||||
fn read(&self, buf: &mut [u8]) -> Result<usize> {
|
||||
if self.bind_to_console {
|
||||
let console = self.console.as_ref().unwrap();
|
||||
if let Some(console) = self.console.as_ref() {
|
||||
console.read(buf)
|
||||
} else {
|
||||
todo!()
|
||||
@ -43,8 +38,7 @@ impl File for Stdin {
|
||||
}
|
||||
|
||||
fn ioctl(&self, cmd: super::ioctl::IoctlCmd, arg: usize) -> Result<i32> {
|
||||
if self.bind_to_console {
|
||||
let console = self.console.as_ref().unwrap();
|
||||
if let Some(console) = self.console.as_ref() {
|
||||
console.ioctl(cmd, arg)
|
||||
} else {
|
||||
todo!()
|
||||
@ -53,8 +47,7 @@ impl File for Stdin {
|
||||
}
|
||||
impl File for Stdout {
|
||||
fn ioctl(&self, cmd: super::ioctl::IoctlCmd, arg: usize) -> Result<i32> {
|
||||
if self.bind_to_console {
|
||||
let console = self.console.as_ref().unwrap();
|
||||
if let Some(console) = self.console.as_ref() {
|
||||
console.ioctl(cmd, arg)
|
||||
} else {
|
||||
todo!()
|
||||
@ -62,8 +55,7 @@ impl File for Stdout {
|
||||
}
|
||||
|
||||
fn write(&self, buf: &[u8]) -> Result<usize> {
|
||||
if self.bind_to_console {
|
||||
let console = self.console.as_ref().unwrap();
|
||||
if let Some(console) = self.console.as_ref() {
|
||||
console.write(buf)
|
||||
} else {
|
||||
todo!()
|
||||
@ -73,8 +65,7 @@ impl File for Stdout {
|
||||
|
||||
impl File for Stderr {
|
||||
fn ioctl(&self, cmd: super::ioctl::IoctlCmd, arg: usize) -> Result<i32> {
|
||||
if self.bind_to_console {
|
||||
let console = self.console.as_ref().unwrap();
|
||||
if let Some(console) = self.console.as_ref() {
|
||||
console.ioctl(cmd, arg)
|
||||
} else {
|
||||
todo!()
|
||||
@ -82,8 +73,7 @@ impl File for Stderr {
|
||||
}
|
||||
|
||||
fn write(&self, buf: &[u8]) -> Result<usize> {
|
||||
if self.bind_to_console {
|
||||
let console = self.console.as_ref().unwrap();
|
||||
if let Some(console) = self.console.as_ref() {
|
||||
console.write(buf)
|
||||
} else {
|
||||
todo!()
|
||||
@ -95,10 +85,9 @@ impl Stdin {
|
||||
/// FIXME: console should be file under devfs.
|
||||
/// reimplement the function when devfs is enabled.
|
||||
pub fn new_with_default_console() -> Self {
|
||||
let console = get_console();
|
||||
let console = get_n_tty();
|
||||
Self {
|
||||
console: Some(console.clone()),
|
||||
bind_to_console: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -107,10 +96,9 @@ impl Stdout {
|
||||
/// FIXME: console should be file under devfs.
|
||||
/// reimplement the function when devfs is enabled.
|
||||
pub fn new_with_default_console() -> Self {
|
||||
let console = get_console();
|
||||
let console = get_n_tty();
|
||||
Self {
|
||||
console: Some(console.clone()),
|
||||
bind_to_console: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -119,10 +107,9 @@ impl Stderr {
|
||||
/// FIXME: console should be file under devfs.
|
||||
/// reimplement the function when devfs is enabled.
|
||||
pub fn new_with_default_console() -> Self {
|
||||
let console = get_console();
|
||||
let console = get_n_tty();
|
||||
Self {
|
||||
console: Some(console.clone()),
|
||||
bind_to_console: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,17 +19,13 @@
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
user_apps::{get_busybox_app, UserApp},
|
||||
user_apps::{get_all_apps, get_busybox_app, UserApp},
|
||||
};
|
||||
use jinux_frame::{info, println};
|
||||
use process::Process;
|
||||
|
||||
use crate::{
|
||||
process::{
|
||||
process_filter::ProcessFilter,
|
||||
wait::{wait_child_exit, WaitOptions},
|
||||
},
|
||||
user_apps::get_all_apps,
|
||||
use crate::process::{
|
||||
process_filter::ProcessFilter,
|
||||
wait::{wait_child_exit, WaitOptions},
|
||||
};
|
||||
|
||||
extern crate alloc;
|
||||
@ -42,6 +38,8 @@ pub mod prelude;
|
||||
mod process;
|
||||
pub mod rights;
|
||||
pub mod syscall;
|
||||
pub mod thread;
|
||||
pub mod time;
|
||||
pub mod tty;
|
||||
mod user_apps;
|
||||
mod util;
|
||||
@ -56,19 +54,20 @@ pub fn init() {
|
||||
jinux_frame::enable_interrupts();
|
||||
}
|
||||
|
||||
pub fn init_process() {
|
||||
println!("[kernel] Spawn init process!, pid = {}", current!().pid());
|
||||
driver::pci::virtio::block::block_device_test();
|
||||
pub fn init_thread() {
|
||||
println!(
|
||||
"[kernel] Spawn init thread, tid = {}",
|
||||
current_thread!().tid()
|
||||
);
|
||||
// driver::pci::virtio::block::block_device_test();
|
||||
let process = Process::spawn_kernel_process(|| {
|
||||
println!("[kernel] Hello world from kernel!");
|
||||
let current = current!();
|
||||
let pid = current.pid();
|
||||
info!("current pid = {}", pid);
|
||||
let ppid = current.parent().unwrap().pid();
|
||||
info!("current ppid = {}", ppid);
|
||||
let current = current_thread!();
|
||||
let pid = current.tid();
|
||||
debug!("current pid = {}", pid);
|
||||
});
|
||||
info!(
|
||||
"[jinux-std/lib.rs] spawn kernel process, pid = {}",
|
||||
"[jinux-std/lib.rs] spawn kernel thread, tid = {}",
|
||||
process.pid()
|
||||
);
|
||||
|
||||
@ -79,18 +78,18 @@ pub fn init_process() {
|
||||
// Run test apps
|
||||
for app in get_all_apps().into_iter() {
|
||||
let UserApp {
|
||||
app_name,
|
||||
elf_path: app_name,
|
||||
app_content,
|
||||
argv,
|
||||
envp,
|
||||
} = app;
|
||||
info!("[jinux-std/lib.rs] spwan {:?} process", app_name);
|
||||
println!("[jinux-std/lib.rs] spwan {:?} process", app_name);
|
||||
Process::spawn_user_process(app_name.clone(), app_content, argv, Vec::new());
|
||||
}
|
||||
|
||||
// Run busybox ash
|
||||
let UserApp {
|
||||
app_name,
|
||||
elf_path: app_name,
|
||||
app_content,
|
||||
argv,
|
||||
envp,
|
||||
@ -102,13 +101,12 @@ pub fn init_process() {
|
||||
loop {
|
||||
// We don't have preemptive scheduler now.
|
||||
// The long running init process should yield its own execution to allow other tasks to go on.
|
||||
// The init process should wait and reap all children.
|
||||
let _ = wait_child_exit(ProcessFilter::Any, WaitOptions::empty());
|
||||
}
|
||||
}
|
||||
|
||||
/// first process never return
|
||||
pub fn run_first_process() -> ! {
|
||||
Process::spawn_kernel_process(init_process);
|
||||
Process::spawn_kernel_process(init_thread);
|
||||
unreachable!()
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ pub(crate) use jinux_frame::vm::Vaddr;
|
||||
pub(crate) use jinux_frame::{debug, error, info, print, println, trace, warn};
|
||||
pub(crate) use spin::{Mutex, RwLock};
|
||||
|
||||
/// return current process
|
||||
#[macro_export]
|
||||
macro_rules! current {
|
||||
() => {
|
||||
@ -24,7 +25,16 @@ macro_rules! current {
|
||||
};
|
||||
}
|
||||
|
||||
/// return current thread
|
||||
#[macro_export]
|
||||
macro_rules! current_thread {
|
||||
() => {
|
||||
crate::thread::Thread::current()
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use crate::current;
|
||||
pub(crate) use crate::current_thread;
|
||||
pub(crate) use crate::error::{Errno, Error};
|
||||
pub(crate) use lazy_static::lazy_static;
|
||||
pub(crate) type Result<T> = core::result::Result<T, Error>;
|
||||
|
@ -5,11 +5,22 @@ use jinux_frame::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
current_thread,
|
||||
fs::file_table::FileTable,
|
||||
prelude::*,
|
||||
process::{new_pid, signal::sig_queues::SigQueues, table, task::create_new_task},
|
||||
process::{
|
||||
posix_thread::{
|
||||
builder::PosixThreadBuilder, name::ThreadName, posix_thread_ext::PosixThreadExt,
|
||||
},
|
||||
process_table,
|
||||
},
|
||||
rights::Full,
|
||||
thread::{allocate_tid, thread_table, Thread, Tid},
|
||||
util::write_val_to_user,
|
||||
vm::vmar::Vmar,
|
||||
};
|
||||
|
||||
use super::Process;
|
||||
use super::{posix_thread::PosixThread, signal::sig_disposition::SigDispositions, Process};
|
||||
|
||||
bitflags! {
|
||||
pub struct CloneFlags: u32 {
|
||||
@ -42,10 +53,10 @@ bitflags! {
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct CloneArgs {
|
||||
new_sp: Vaddr,
|
||||
new_sp: u64,
|
||||
parent_tidptr: Vaddr,
|
||||
child_tidptr: Vaddr,
|
||||
tls: usize,
|
||||
tls: u64,
|
||||
clone_flags: CloneFlags,
|
||||
}
|
||||
|
||||
@ -61,10 +72,10 @@ impl CloneArgs {
|
||||
}
|
||||
|
||||
pub const fn new(
|
||||
new_sp: Vaddr,
|
||||
new_sp: u64,
|
||||
parent_tidptr: Vaddr,
|
||||
child_tidptr: Vaddr,
|
||||
tls: usize,
|
||||
tls: u64,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Self {
|
||||
CloneArgs {
|
||||
@ -86,104 +97,283 @@ impl From<u64> for CloneFlags {
|
||||
}
|
||||
|
||||
impl CloneFlags {
|
||||
fn contains_unsupported_flags(&self) -> bool {
|
||||
self.intersects(!(CloneFlags::CLONE_CHILD_SETTID | CloneFlags::CLONE_CHILD_CLEARTID))
|
||||
fn check_unsupported_flags(&self) -> Result<()> {
|
||||
let supported_flags = CloneFlags::CLONE_VM
|
||||
| CloneFlags::CLONE_FS
|
||||
| CloneFlags::CLONE_FILES
|
||||
| CloneFlags::CLONE_SIGHAND
|
||||
| CloneFlags::CLONE_THREAD
|
||||
| CloneFlags::CLONE_SYSVSEM
|
||||
| CloneFlags::CLONE_SETTLS
|
||||
| CloneFlags::CLONE_PARENT_SETTID
|
||||
| CloneFlags::CLONE_CHILD_SETTID
|
||||
| CloneFlags::CLONE_CHILD_CLEARTID;
|
||||
let unsupported_flags = *self - supported_flags;
|
||||
if !unsupported_flags.is_empty() {
|
||||
panic!("contains unsupported clone flags: {:?}", unsupported_flags);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Clone a child process. Without schedule it to run.
|
||||
pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<Arc<Process>> {
|
||||
let child_pid = new_pid();
|
||||
let current = Process::current();
|
||||
/// Clone a child thread. Without schedule it to run.
|
||||
pub fn clone_child(parent_context: CpuContext, clone_args: CloneArgs) -> Result<Tid> {
|
||||
clone_args.clone_flags.check_unsupported_flags()?;
|
||||
if clone_args.clone_flags.contains(CloneFlags::CLONE_THREAD) {
|
||||
let child_thread = clone_child_thread(parent_context, clone_args)?;
|
||||
let child_tid = child_thread.tid();
|
||||
debug!(
|
||||
"*********schedule child thread, child pid = {}**********",
|
||||
child_tid
|
||||
);
|
||||
child_thread.run();
|
||||
debug!(
|
||||
"*********return to parent thread, child pid = {}*********",
|
||||
child_tid
|
||||
);
|
||||
Ok(child_tid)
|
||||
} else {
|
||||
let child_process = clone_child_process(parent_context, clone_args)?;
|
||||
let child_pid = child_process.pid();
|
||||
debug!(
|
||||
"*********schedule child process, child pid = {}**********",
|
||||
child_pid
|
||||
);
|
||||
child_process.run();
|
||||
debug!(
|
||||
"*********return to parent process, child pid = {}*********",
|
||||
child_pid
|
||||
);
|
||||
Ok(child_pid)
|
||||
}
|
||||
}
|
||||
|
||||
// child process vmar
|
||||
let parent_root_vmar = current.root_vmar().unwrap();
|
||||
let child_root_vmar = current.root_vmar().unwrap().fork_vmar()?;
|
||||
fn clone_child_thread(parent_context: CpuContext, clone_args: CloneArgs) -> Result<Arc<Thread>> {
|
||||
let clone_flags = clone_args.clone_flags;
|
||||
let current = current!();
|
||||
debug_assert!(clone_flags.contains(CloneFlags::CLONE_VM));
|
||||
debug_assert!(clone_flags.contains(CloneFlags::CLONE_FILES));
|
||||
debug_assert!(clone_flags.contains(CloneFlags::CLONE_SIGHAND));
|
||||
let child_root_vmar = current.root_vmar();
|
||||
let child_vm_space = child_root_vmar.vm_space().clone();
|
||||
let child_cpu_context = clone_cpu_context(
|
||||
parent_context,
|
||||
clone_args.new_sp,
|
||||
clone_args.tls,
|
||||
clone_flags,
|
||||
);
|
||||
let child_user_space = Arc::new(UserSpace::new(child_vm_space, child_cpu_context));
|
||||
clone_sysvsem(clone_flags)?;
|
||||
|
||||
// child process user_vm
|
||||
let child_user_vm = match current.user_vm() {
|
||||
None => None,
|
||||
Some(user_vm) => Some(user_vm.clone()),
|
||||
};
|
||||
let child_tid = allocate_tid();
|
||||
// inherit sigmask from current thread
|
||||
let current_thread = current_thread!();
|
||||
let current_posix_thread = current_thread.posix_thread();
|
||||
let sig_mask = current_posix_thread.sig_mask().lock().clone();
|
||||
let is_main_thread = child_tid == current.pid();
|
||||
let thread_builder = PosixThreadBuilder::new(child_tid, child_user_space)
|
||||
.process(Arc::downgrade(¤t))
|
||||
.is_main_thread(is_main_thread);
|
||||
let child_thread = thread_builder.build();
|
||||
current.threads.lock().push(child_thread.clone());
|
||||
let child_posix_thread = child_thread.posix_thread();
|
||||
clone_parent_settid(child_tid, clone_args.parent_tidptr, clone_flags)?;
|
||||
clone_child_cleartid(child_posix_thread, clone_args.child_tidptr, clone_flags)?;
|
||||
clone_child_settid(
|
||||
child_root_vmar,
|
||||
child_tid,
|
||||
clone_args.child_tidptr,
|
||||
clone_flags,
|
||||
)?;
|
||||
Ok(child_thread)
|
||||
}
|
||||
|
||||
// child process user space
|
||||
let mut child_cpu_context = parent_context.clone();
|
||||
child_cpu_context.gp_regs.rax = 0; // Set return value of child process
|
||||
fn clone_child_process(parent_context: CpuContext, clone_args: CloneArgs) -> Result<Arc<Process>> {
|
||||
let current = current!();
|
||||
let clone_flags = clone_args.clone_flags;
|
||||
|
||||
// clone vm
|
||||
let parent_root_vmar = current.root_vmar();
|
||||
let child_root_vmar = clone_vm(parent_root_vmar, clone_flags)?;
|
||||
let child_user_vm = Some(current.user_vm().unwrap().clone());
|
||||
|
||||
// clone user space
|
||||
let child_cpu_context = clone_cpu_context(
|
||||
parent_context,
|
||||
clone_args.new_sp,
|
||||
clone_args.tls,
|
||||
clone_flags,
|
||||
);
|
||||
let child_vm_space = child_root_vmar.vm_space().clone();
|
||||
let child_user_space = Arc::new(UserSpace::new(child_vm_space, child_cpu_context));
|
||||
|
||||
let child_file_name = match current.filename() {
|
||||
None => None,
|
||||
Some(filename) => Some(filename.clone()),
|
||||
};
|
||||
let child_file_table = current.file_table.lock().clone();
|
||||
// clone file table
|
||||
let child_file_table = clone_files(current.file_table(), clone_flags);
|
||||
// clone sig dispositions
|
||||
let child_sig_dispositions = clone_sighand(current.sig_dispositions(), clone_flags);
|
||||
// clone system V semaphore
|
||||
clone_sysvsem(clone_flags)?;
|
||||
|
||||
let child_elf_path = current.filename().unwrap().clone();
|
||||
let child_thread_name = ThreadName::new_from_elf_path(&child_elf_path)?;
|
||||
|
||||
// inherit parent's sig disposition
|
||||
let child_sig_dispositions = current.sig_dispositions().lock().clone();
|
||||
// sig queue is set empty
|
||||
let child_sig_queues = SigQueues::new();
|
||||
// inherit parent's sig mask
|
||||
let child_sig_mask = current.sig_mask().lock().clone();
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
let child_sig_mask = posix_thread.sig_mask().lock().clone();
|
||||
|
||||
let child_tid = allocate_tid();
|
||||
let mut child_thread_builder = PosixThreadBuilder::new(child_tid, child_user_space)
|
||||
.thread_name(Some(child_thread_name))
|
||||
.sig_mask(child_sig_mask);
|
||||
|
||||
let child = Arc::new_cyclic(|child_process_ref| {
|
||||
let weak_child_process = child_process_ref.clone();
|
||||
let child_task = create_new_task(child_user_space.clone(), weak_child_process);
|
||||
let child_pid = child_tid;
|
||||
child_thread_builder = child_thread_builder.process(weak_child_process);
|
||||
let child_thread = child_thread_builder.build();
|
||||
Process::new(
|
||||
child_pid,
|
||||
child_task,
|
||||
child_file_name,
|
||||
vec![child_thread],
|
||||
Some(child_elf_path),
|
||||
child_user_vm,
|
||||
Some(child_user_space),
|
||||
Some(child_root_vmar),
|
||||
None,
|
||||
child_root_vmar.clone(),
|
||||
Weak::new(),
|
||||
child_file_table,
|
||||
child_sig_dispositions,
|
||||
child_sig_queues,
|
||||
child_sig_mask,
|
||||
)
|
||||
});
|
||||
// Inherit parent's process group
|
||||
let parent_process_group = current
|
||||
.process_group()
|
||||
.lock()
|
||||
.as_ref()
|
||||
.map(|ppgrp| ppgrp.upgrade())
|
||||
.flatten()
|
||||
.unwrap();
|
||||
let parent_process_group = current.process_group().lock().upgrade().unwrap();
|
||||
parent_process_group.add_process(child.clone());
|
||||
child.set_process_group(Arc::downgrade(&parent_process_group));
|
||||
|
||||
current!().add_child(child.clone());
|
||||
table::add_process(child.clone());
|
||||
deal_with_clone_args(clone_args, &child)?;
|
||||
process_table::add_process(child.clone());
|
||||
|
||||
let child_thread = thread_table::tid_to_thread(child_tid).unwrap();
|
||||
let child_posix_thread = child_thread.posix_thread();
|
||||
clone_parent_settid(child_tid, clone_args.parent_tidptr, clone_flags)?;
|
||||
clone_child_cleartid(child_posix_thread, clone_args.child_tidptr, clone_flags)?;
|
||||
clone_child_settid(
|
||||
&child_root_vmar,
|
||||
child_tid,
|
||||
clone_args.child_tidptr,
|
||||
clone_flags,
|
||||
)?;
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
fn deal_with_clone_args(clone_args: CloneArgs, child_process: &Arc<Process>) -> Result<()> {
|
||||
let clone_flags = clone_args.clone_flags;
|
||||
if clone_flags.contains_unsupported_flags() {
|
||||
panic!("Found unsupported clone flags: {:?}", clone_flags);
|
||||
}
|
||||
fn clone_child_cleartid(
|
||||
child_posix_thread: &PosixThread,
|
||||
child_tidptr: Vaddr,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Result<()> {
|
||||
if clone_flags.contains(CloneFlags::CLONE_CHILD_CLEARTID) {
|
||||
clone_child_clear_tid(child_process)?;
|
||||
let mut clear_tid = child_posix_thread.clear_child_tid().lock();
|
||||
*clear_tid = child_tidptr;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clone_child_settid(
|
||||
child_root_vmar: &Vmar<Full>,
|
||||
child_tid: Tid,
|
||||
child_tidptr: Vaddr,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Result<()> {
|
||||
if clone_flags.contains(CloneFlags::CLONE_CHILD_SETTID) {
|
||||
clone_child_set_tid(child_process, clone_args)?;
|
||||
child_root_vmar.write_val(child_tidptr, &child_tid)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clone_child_clear_tid(child_process: &Arc<Process>) -> Result<()> {
|
||||
// TODO: clone_child_clear_tid does nothing now
|
||||
fn clone_parent_settid(
|
||||
child_tid: Tid,
|
||||
parent_tidptr: Vaddr,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Result<()> {
|
||||
if clone_flags.contains(CloneFlags::CLONE_PARENT_SETTID) {
|
||||
write_val_to_user(parent_tidptr, &child_tid)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clone_child_set_tid(child_process: &Arc<Process>, clone_args: CloneArgs) -> Result<()> {
|
||||
let child_pid = child_process.pid();
|
||||
let child_vmar = child_process
|
||||
.root_vmar()
|
||||
.ok_or_else(|| Error::new(Errno::ECHILD))?;
|
||||
child_vmar.write_val(clone_args.child_tidptr, &child_pid)?;
|
||||
/// clone child vmar. If CLONE_VM is set, both threads share the same root vmar.
|
||||
/// Otherwise, fork a new copy-on-write vmar.
|
||||
fn clone_vm(
|
||||
parent_root_vmar: &Arc<Vmar<Full>>,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Result<Arc<Vmar<Full>>> {
|
||||
if clone_flags.contains(CloneFlags::CLONE_VM) {
|
||||
Ok(parent_root_vmar.clone())
|
||||
} else {
|
||||
Ok(Arc::new(parent_root_vmar.fork_vmar()?))
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_cpu_context(
|
||||
parent_context: CpuContext,
|
||||
new_sp: u64,
|
||||
tls: u64,
|
||||
clone_flags: CloneFlags,
|
||||
) -> CpuContext {
|
||||
let mut child_context = parent_context.clone();
|
||||
// The return value of child thread is zero
|
||||
child_context.set_rax(0);
|
||||
|
||||
if clone_flags.contains(CloneFlags::CLONE_VM) {
|
||||
// if parent and child shares the same address space, a new stack must be specified.
|
||||
debug_assert!(new_sp != 0);
|
||||
}
|
||||
if new_sp != 0 {
|
||||
child_context.set_rsp(new_sp as u64);
|
||||
}
|
||||
if clone_flags.contains(CloneFlags::CLONE_SETTLS) {
|
||||
// x86_64 specific: TLS is the fsbase register
|
||||
child_context.set_fsbase(tls as u64);
|
||||
}
|
||||
|
||||
child_context
|
||||
}
|
||||
|
||||
fn clone_fs(clone_flags: CloneFlags) -> Result<()> {
|
||||
if clone_flags.contains(CloneFlags::CLONE_FS) {
|
||||
warn!("CLONE_FS is not supported now")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clone_files(
|
||||
parent_file_table: &Arc<Mutex<FileTable>>,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Arc<Mutex<FileTable>> {
|
||||
// if CLONE_FILES is set, the child and parent shares the same file table
|
||||
// Otherwise, the child will deep copy a new file table.
|
||||
// FIXME: the clone may not be deep copy.
|
||||
if clone_flags.contains(CloneFlags::CLONE_FILES) {
|
||||
parent_file_table.clone()
|
||||
} else {
|
||||
Arc::new(Mutex::new(parent_file_table.lock().clone()))
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_sighand(
|
||||
parent_sig_dispositions: &Arc<Mutex<SigDispositions>>,
|
||||
clone_flags: CloneFlags,
|
||||
) -> Arc<Mutex<SigDispositions>> {
|
||||
// similer to CLONE_FILES
|
||||
if clone_flags.contains(CloneFlags::CLONE_SIGHAND) {
|
||||
parent_sig_dispositions.clone()
|
||||
} else {
|
||||
Arc::new(Mutex::new(parent_sig_dispositions.lock().clone()))
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_sysvsem(clone_flags: CloneFlags) -> Result<()> {
|
||||
if clone_flags.contains(CloneFlags::CLONE_SYSVSEM) {
|
||||
warn!("CLONE_SYSVSEM is not supported now");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,6 @@ use crate::{prelude::*, rights::Full, vm::vmar::Vmar};
|
||||
/// 2. create a vmo for each elf segment, create a backup pager for each segment. Then map the vmo to the root vmar.
|
||||
/// 3. write proper content to the init stack.
|
||||
pub fn load_elf_to_root_vmar(
|
||||
filename: CString,
|
||||
elf_file_content: &'static [u8],
|
||||
root_vmar: &Vmar<Full>,
|
||||
argv: Vec<CString>,
|
||||
|
@ -1,130 +1,120 @@
|
||||
use core::sync::atomic::{AtomicI32, Ordering};
|
||||
|
||||
use self::name::ProcessName;
|
||||
use self::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use self::process_group::ProcessGroup;
|
||||
use self::process_vm::user_heap::UserHeap;
|
||||
use self::process_vm::UserVm;
|
||||
use self::rlimit::ResourceLimits;
|
||||
use self::signal::constants::SIGCHLD;
|
||||
use self::signal::sig_disposition::SigDispositions;
|
||||
use self::signal::sig_mask::SigMask;
|
||||
use self::signal::sig_queues::SigQueues;
|
||||
use self::signal::signals::kernel::KernelSignal;
|
||||
use self::status::ProcessStatus;
|
||||
use self::task::create_user_task_from_elf;
|
||||
use crate::fs::file_table::FileTable;
|
||||
use crate::prelude::*;
|
||||
use crate::rights::Full;
|
||||
use crate::tty::get_console;
|
||||
use crate::thread::kernel_thread::KernelThreadExt;
|
||||
use crate::thread::{thread_table, Thread};
|
||||
use crate::tty::get_n_tty;
|
||||
use crate::vm::vmar::Vmar;
|
||||
use jinux_frame::sync::WaitQueue;
|
||||
use jinux_frame::{task::Task, user::UserSpace};
|
||||
use jinux_frame::task::Task;
|
||||
|
||||
pub mod clone;
|
||||
pub mod elf;
|
||||
pub mod exception;
|
||||
pub mod fifo_scheduler;
|
||||
pub mod name;
|
||||
pub mod posix_thread;
|
||||
pub mod process_filter;
|
||||
pub mod process_group;
|
||||
pub mod process_table;
|
||||
pub mod process_vm;
|
||||
pub mod rlimit;
|
||||
pub mod signal;
|
||||
pub mod status;
|
||||
pub mod table;
|
||||
pub mod task;
|
||||
pub mod wait;
|
||||
|
||||
static PID_ALLOCATOR: AtomicI32 = AtomicI32::new(0);
|
||||
|
||||
pub type Pid = i32;
|
||||
pub type Pgid = i32;
|
||||
pub type ExitCode = i32;
|
||||
|
||||
/// Process stands for a set of tasks that shares the same userspace.
|
||||
/// Currently, we only support one task inside a process.
|
||||
/// Process stands for a set of threads that shares the same userspace.
|
||||
/// Currently, we only support one thread inside a process.
|
||||
pub struct Process {
|
||||
// Immutable Part
|
||||
pid: Pid,
|
||||
task: Arc<Task>,
|
||||
filename: Option<CString>,
|
||||
user_space: Option<Arc<UserSpace>>,
|
||||
elf_path: Option<CString>,
|
||||
user_vm: Option<UserVm>,
|
||||
root_vmar: Option<Vmar<Full>>,
|
||||
root_vmar: Arc<Vmar<Full>>,
|
||||
/// wait for child status changed
|
||||
waiting_children: WaitQueue,
|
||||
/// wait for io events
|
||||
poll_queue: WaitQueue,
|
||||
|
||||
// Mutable Part
|
||||
/// The threads
|
||||
threads: Mutex<Vec<Arc<Thread>>>,
|
||||
/// The exit code
|
||||
exit_code: AtomicI32,
|
||||
/// Process status
|
||||
status: Mutex<ProcessStatus>,
|
||||
/// Parent process
|
||||
parent: Mutex<Option<Weak<Process>>>,
|
||||
parent: Mutex<Weak<Process>>,
|
||||
/// Children processes
|
||||
children: Mutex<BTreeMap<Pid, Arc<Process>>>,
|
||||
/// Process group
|
||||
process_group: Mutex<Option<Weak<ProcessGroup>>>,
|
||||
/// Process name
|
||||
process_name: Mutex<Option<ProcessName>>,
|
||||
process_group: Mutex<Weak<ProcessGroup>>,
|
||||
/// File table
|
||||
file_table: Mutex<FileTable>,
|
||||
file_table: Arc<Mutex<FileTable>>,
|
||||
/// resource limits
|
||||
resource_limits: Mutex<ResourceLimits>,
|
||||
|
||||
// Signal
|
||||
sig_dispositions: Mutex<SigDispositions>,
|
||||
/// sig dispositions
|
||||
sig_dispositions: Arc<Mutex<SigDispositions>>,
|
||||
/// Process-level signal queues
|
||||
sig_queues: Mutex<SigQueues>,
|
||||
/// Process-level sigmask
|
||||
sig_mask: Mutex<SigMask>,
|
||||
/// Signal handler ucontext address
|
||||
sig_context: Mutex<VecDeque<Vaddr>>,
|
||||
}
|
||||
|
||||
impl Process {
|
||||
/// returns the current process
|
||||
pub fn current() -> Arc<Process> {
|
||||
let task = Task::current();
|
||||
let process = task
|
||||
.data()
|
||||
.downcast_ref::<Weak<Process>>()
|
||||
.expect("[Internal Error] task data should points to weak<process>");
|
||||
process
|
||||
.upgrade()
|
||||
.expect("[Internal Error] current process cannot be None")
|
||||
let current_thread = Thread::current();
|
||||
if current_thread.is_posix_thread() {
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
posix_thread.process()
|
||||
} else if current_thread.is_kernel_thread() {
|
||||
let kernel_thread = current_thread.kernel_thread();
|
||||
kernel_thread.process()
|
||||
} else {
|
||||
panic!("[Internal error]The process is neither kernel process or user process");
|
||||
}
|
||||
}
|
||||
|
||||
/// create a new process(not schedule it)
|
||||
pub fn new(
|
||||
pid: Pid,
|
||||
task: Arc<Task>,
|
||||
exec_filename: Option<CString>,
|
||||
threads: Vec<Arc<Thread>>,
|
||||
elf_path: Option<CString>,
|
||||
user_vm: Option<UserVm>,
|
||||
user_space: Option<Arc<UserSpace>>,
|
||||
root_vmar: Option<Vmar<Full>>,
|
||||
process_group: Option<Weak<ProcessGroup>>,
|
||||
file_table: FileTable,
|
||||
sig_dispositions: SigDispositions,
|
||||
sig_queues: SigQueues,
|
||||
sig_mask: SigMask,
|
||||
root_vmar: Arc<Vmar<Full>>,
|
||||
process_group: Weak<ProcessGroup>,
|
||||
file_table: Arc<Mutex<FileTable>>,
|
||||
sig_dispositions: Arc<Mutex<SigDispositions>>,
|
||||
) -> Self {
|
||||
let parent = if pid == 0 {
|
||||
None
|
||||
Weak::new()
|
||||
} else {
|
||||
let current_process = current!();
|
||||
Some(Arc::downgrade(¤t_process))
|
||||
Arc::downgrade(¤t_process)
|
||||
};
|
||||
let children = BTreeMap::new();
|
||||
let waiting_children = WaitQueue::new();
|
||||
let poll_queue = WaitQueue::new();
|
||||
let process_name = exec_filename.as_ref().map(|filename| {
|
||||
let mut process_name = ProcessName::new();
|
||||
process_name.set_name(filename).unwrap();
|
||||
process_name
|
||||
});
|
||||
let resource_limits = ResourceLimits::default();
|
||||
Self {
|
||||
pid,
|
||||
task,
|
||||
filename: exec_filename,
|
||||
user_space,
|
||||
threads: Mutex::new(threads),
|
||||
elf_path,
|
||||
user_vm,
|
||||
root_vmar,
|
||||
waiting_children,
|
||||
@ -134,12 +124,10 @@ impl Process {
|
||||
parent: Mutex::new(parent),
|
||||
children: Mutex::new(children),
|
||||
process_group: Mutex::new(process_group),
|
||||
process_name: Mutex::new(process_name),
|
||||
file_table: Mutex::new(file_table),
|
||||
sig_dispositions: Mutex::new(sig_dispositions),
|
||||
sig_queues: Mutex::new(sig_queues),
|
||||
sig_mask: Mutex::new(sig_mask),
|
||||
sig_context: Mutex::new(VecDeque::new()),
|
||||
file_table,
|
||||
sig_dispositions,
|
||||
sig_queues: Mutex::new(SigQueues::new()),
|
||||
resource_limits: Mutex::new(resource_limits),
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,7 +139,7 @@ impl Process {
|
||||
&self.poll_queue
|
||||
}
|
||||
|
||||
/// init a user process and send the process to scheduler
|
||||
/// init a user process and run the process
|
||||
pub fn spawn_user_process(
|
||||
filename: CString,
|
||||
elf_file_content: &'static [u8],
|
||||
@ -161,68 +149,65 @@ impl Process {
|
||||
let process = Process::create_user_process(filename, elf_file_content, argv, envp);
|
||||
// FIXME: How to determine the fg process group?
|
||||
let pgid = process.pgid();
|
||||
get_console().set_fg(pgid);
|
||||
process.send_to_scheduler();
|
||||
// FIXME: tty should be a parameter?
|
||||
let tty = get_n_tty();
|
||||
tty.set_fg(pgid);
|
||||
process.run();
|
||||
process
|
||||
}
|
||||
|
||||
/// init a kernel process and send the process to scheduler
|
||||
/// init a kernel process and run the process
|
||||
pub fn spawn_kernel_process<F>(task_fn: F) -> Arc<Self>
|
||||
where
|
||||
F: Fn() + Send + Sync + 'static,
|
||||
{
|
||||
let process_fn = move || {
|
||||
task_fn();
|
||||
current!().exit(0);
|
||||
current!().exit_group(0);
|
||||
};
|
||||
let process = Process::create_kernel_process(process_fn);
|
||||
process.send_to_scheduler();
|
||||
process.run();
|
||||
process
|
||||
}
|
||||
|
||||
fn create_user_process(
|
||||
filename: CString,
|
||||
elf_path: CString,
|
||||
elf_file_content: &'static [u8],
|
||||
argv: Vec<CString>,
|
||||
envp: Vec<CString>,
|
||||
) -> Arc<Self> {
|
||||
let pid = new_pid();
|
||||
|
||||
let user_process = Arc::new_cyclic(|weak_process_ref| {
|
||||
let weak_process = weak_process_ref.clone();
|
||||
let cloned_filename = Some(filename.clone());
|
||||
let cloned_filename = Some(elf_path.clone());
|
||||
let root_vmar = Vmar::<Full>::new_root().unwrap();
|
||||
let task = create_user_task_from_elf(
|
||||
let thread = Thread::new_posix_thread_from_elf(
|
||||
&root_vmar,
|
||||
filename,
|
||||
elf_path,
|
||||
elf_file_content,
|
||||
weak_process,
|
||||
argv,
|
||||
envp,
|
||||
);
|
||||
let user_space = task.user_space().map(|user_space| user_space.clone());
|
||||
let pid = thread.tid();
|
||||
let user_vm = UserVm::new();
|
||||
let file_table = FileTable::new_with_stdio();
|
||||
let sig_dispositions = SigDispositions::new();
|
||||
let sig_queues = SigQueues::new();
|
||||
let sig_mask = SigMask::new_empty();
|
||||
Process::new(
|
||||
|
||||
let process = Process::new(
|
||||
pid,
|
||||
task,
|
||||
vec![thread],
|
||||
cloned_filename,
|
||||
Some(user_vm),
|
||||
user_space,
|
||||
Some(root_vmar),
|
||||
None,
|
||||
file_table,
|
||||
sig_dispositions,
|
||||
sig_queues,
|
||||
sig_mask,
|
||||
)
|
||||
Arc::new(root_vmar),
|
||||
Weak::new(),
|
||||
Arc::new(Mutex::new(file_table)),
|
||||
Arc::new(Mutex::new(sig_dispositions)),
|
||||
);
|
||||
process
|
||||
});
|
||||
// Set process group
|
||||
user_process.create_and_set_process_group();
|
||||
table::add_process(user_process.clone());
|
||||
process_table::add_process(user_process.clone());
|
||||
let parent = user_process
|
||||
.parent()
|
||||
.expect("[Internel error] User process should always have parent");
|
||||
@ -234,30 +219,27 @@ impl Process {
|
||||
where
|
||||
F: Fn() + Send + Sync + 'static,
|
||||
{
|
||||
let pid = new_pid();
|
||||
let kernel_process = Arc::new_cyclic(|weak_process_ref| {
|
||||
let weak_process = weak_process_ref.clone();
|
||||
let task = Task::new(task_fn, weak_process, None).expect("spawn kernel task failed");
|
||||
let thread = Thread::new_kernel_thread(task_fn, weak_process_ref.clone());
|
||||
let pid = thread.tid();
|
||||
let file_table = FileTable::new();
|
||||
let sig_dispositions = SigDispositions::new();
|
||||
let sig_queues = SigQueues::new();
|
||||
let sig_mask = SigMask::new_empty();
|
||||
// FIXME: kernel process does not need root vmar
|
||||
let root_vmar = Vmar::<Full>::new_root().unwrap();
|
||||
Process::new(
|
||||
pid,
|
||||
task,
|
||||
vec![thread],
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
file_table,
|
||||
sig_dispositions,
|
||||
sig_queues,
|
||||
sig_mask,
|
||||
Arc::new(root_vmar),
|
||||
Weak::new(),
|
||||
Arc::new(Mutex::new(file_table)),
|
||||
Arc::new(Mutex::new(sig_dispositions)),
|
||||
)
|
||||
});
|
||||
kernel_process.create_and_set_process_group();
|
||||
table::add_process(kernel_process.clone());
|
||||
process_table::add_process(kernel_process.clone());
|
||||
if let Some(parent) = kernel_process.parent() {
|
||||
parent.add_child(kernel_process.clone());
|
||||
}
|
||||
@ -271,53 +253,37 @@ impl Process {
|
||||
|
||||
/// returns the process group id of the process
|
||||
pub fn pgid(&self) -> Pgid {
|
||||
if let Some(process_group) = self
|
||||
.process_group
|
||||
.lock()
|
||||
.as_ref()
|
||||
.map(|process_group| process_group.upgrade())
|
||||
.flatten()
|
||||
{
|
||||
if let Some(process_group) = self.process_group.lock().upgrade() {
|
||||
process_group.pgid()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_name(&self) -> &Mutex<Option<ProcessName>> {
|
||||
&self.process_name
|
||||
}
|
||||
|
||||
pub fn process_group(&self) -> &Mutex<Option<Weak<ProcessGroup>>> {
|
||||
pub fn process_group(&self) -> &Mutex<Weak<ProcessGroup>> {
|
||||
&self.process_group
|
||||
}
|
||||
|
||||
pub fn sig_context(&self) -> &Mutex<VecDeque<Vaddr>> {
|
||||
&self.sig_context
|
||||
}
|
||||
|
||||
/// add a child process
|
||||
pub fn add_child(&self, child: Arc<Process>) {
|
||||
let child_pid = child.pid();
|
||||
self.children.lock().insert(child_pid, child);
|
||||
}
|
||||
|
||||
fn set_parent(&self, parent: Weak<Process>) {
|
||||
let _ = self.parent.lock().insert(parent);
|
||||
pub fn set_parent(&self, parent: Weak<Process>) {
|
||||
*self.parent.lock() = parent;
|
||||
}
|
||||
|
||||
/// Set process group for current process. If old process group exists,
|
||||
/// remove current process from old process group.
|
||||
pub fn set_process_group(&self, process_group: Weak<ProcessGroup>) {
|
||||
if let Some(old_process_group) = &*self.process_group().lock() {
|
||||
if let Some(old_process_group) = old_process_group.upgrade() {
|
||||
old_process_group.remove_process(self.pid());
|
||||
}
|
||||
if let Some(old_process_group) = self.process_group.lock().upgrade() {
|
||||
old_process_group.remove_process(self.pid());
|
||||
}
|
||||
let _ = self.process_group.lock().insert(process_group);
|
||||
*self.process_group.lock() = process_group;
|
||||
}
|
||||
|
||||
pub fn file_table(&self) -> &Mutex<FileTable> {
|
||||
pub fn file_table(&self) -> &Arc<Mutex<FileTable>> {
|
||||
&self.file_table
|
||||
}
|
||||
|
||||
@ -327,24 +293,24 @@ impl Process {
|
||||
let process_group = Arc::new(ProcessGroup::new(self.clone()));
|
||||
let pgid = process_group.pgid();
|
||||
self.set_process_group(Arc::downgrade(&process_group));
|
||||
table::add_process_group(process_group);
|
||||
process_table::add_process_group(process_group);
|
||||
}
|
||||
|
||||
pub fn parent(&self) -> Option<Arc<Process>> {
|
||||
self.parent
|
||||
.lock()
|
||||
.as_ref()
|
||||
.map(|parent| parent.upgrade())
|
||||
.flatten()
|
||||
self.parent.lock().upgrade()
|
||||
}
|
||||
|
||||
/// Exit process.
|
||||
/// Exit thread group(the process).
|
||||
/// Set the status of the process as Zombie and set exit code.
|
||||
/// Move all children to init process.
|
||||
/// Wake up the parent wait queue if parent is waiting for self.
|
||||
pub fn exit(&self, exit_code: i32) {
|
||||
pub fn exit_group(&self, exit_code: i32) {
|
||||
debug!("exit group was called");
|
||||
self.status.lock().set_zombie();
|
||||
self.exit_code.store(exit_code, Ordering::Relaxed);
|
||||
for thread in &*self.threads.lock() {
|
||||
thread.exit();
|
||||
}
|
||||
// move children to the init process
|
||||
if !self.is_init_process() {
|
||||
let init_process = get_init_process();
|
||||
@ -369,8 +335,18 @@ impl Process {
|
||||
}
|
||||
|
||||
/// start to run current process
|
||||
pub fn send_to_scheduler(self: &Arc<Self>) {
|
||||
self.task.send_to_scheduler();
|
||||
pub fn run(&self) {
|
||||
let threads = self.threads.lock();
|
||||
// when run the process, the process should has only one thread
|
||||
debug_assert!(threads.len() == 1);
|
||||
let thread = threads[0].clone();
|
||||
// should not hold the lock when run thread
|
||||
drop(threads);
|
||||
thread.run();
|
||||
}
|
||||
|
||||
pub fn threads(&self) -> &Mutex<Vec<Arc<Thread>>> {
|
||||
&self.threads
|
||||
}
|
||||
|
||||
/// yield the current process to allow other processes to run
|
||||
@ -378,19 +354,14 @@ impl Process {
|
||||
Task::yield_now();
|
||||
}
|
||||
|
||||
/// returns the userspace
|
||||
pub fn user_space(&self) -> Option<&Arc<UserSpace>> {
|
||||
self.user_space.as_ref()
|
||||
}
|
||||
|
||||
/// returns the user_vm
|
||||
pub fn user_vm(&self) -> Option<&UserVm> {
|
||||
self.user_vm.as_ref()
|
||||
}
|
||||
|
||||
/// returns the root vmar
|
||||
pub fn root_vmar(&self) -> Option<&Vmar<Full>> {
|
||||
self.root_vmar.as_ref()
|
||||
pub fn root_vmar(&self) -> &Arc<Vmar<Full>> {
|
||||
&self.root_vmar
|
||||
}
|
||||
|
||||
/// returns the user heap if the process does have, otherwise None
|
||||
@ -406,21 +377,22 @@ impl Process {
|
||||
pub fn reap_zombie_child(&self, pid: Pid) -> i32 {
|
||||
let child_process = self.children.lock().remove(&pid).unwrap();
|
||||
assert!(child_process.status().lock().is_zombie());
|
||||
table::remove_process(child_process.pid());
|
||||
if let Some(process_group) = child_process.process_group().lock().as_ref() {
|
||||
if let Some(process_group) = process_group.upgrade() {
|
||||
process_group.remove_process(child_process.pid);
|
||||
}
|
||||
for thread in &*child_process.threads.lock() {
|
||||
thread_table::remove_thread(thread.tid());
|
||||
}
|
||||
child_process.exit_code()
|
||||
process_table::remove_process(child_process.pid());
|
||||
if let Some(process_group) = child_process.process_group().lock().upgrade() {
|
||||
process_group.remove_process(child_process.pid);
|
||||
}
|
||||
child_process.exit_code().load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
pub fn children(&self) -> &Mutex<BTreeMap<Pid, Arc<Process>>> {
|
||||
&self.children
|
||||
}
|
||||
|
||||
pub fn exit_code(&self) -> i32 {
|
||||
self.exit_code.load(Ordering::Relaxed)
|
||||
pub fn exit_code(&self) -> &AtomicI32 {
|
||||
&self.exit_code
|
||||
}
|
||||
|
||||
/// whether the process has child process
|
||||
@ -429,24 +401,24 @@ impl Process {
|
||||
}
|
||||
|
||||
pub fn filename(&self) -> Option<&CString> {
|
||||
self.filename.as_ref()
|
||||
self.elf_path.as_ref()
|
||||
}
|
||||
|
||||
pub fn status(&self) -> &Mutex<ProcessStatus> {
|
||||
&self.status
|
||||
}
|
||||
|
||||
pub fn sig_dispositions(&self) -> &Mutex<SigDispositions> {
|
||||
pub fn resource_limits(&self) -> &Mutex<ResourceLimits> {
|
||||
&self.resource_limits
|
||||
}
|
||||
|
||||
pub fn sig_dispositions(&self) -> &Arc<Mutex<SigDispositions>> {
|
||||
&self.sig_dispositions
|
||||
}
|
||||
|
||||
pub fn sig_queues(&self) -> &Mutex<SigQueues> {
|
||||
&self.sig_queues
|
||||
}
|
||||
|
||||
pub fn sig_mask(&self) -> &Mutex<SigMask> {
|
||||
&self.sig_mask
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the init process
|
||||
@ -456,16 +428,9 @@ pub fn get_init_process() -> Arc<Process> {
|
||||
let process = current_process
|
||||
.parent
|
||||
.lock()
|
||||
.as_ref()
|
||||
.map(|current| current.upgrade())
|
||||
.flatten()
|
||||
.upgrade()
|
||||
.expect("[Internal Error] init process cannot be None");
|
||||
current_process = process;
|
||||
}
|
||||
current_process
|
||||
}
|
||||
|
||||
/// allocate a new pid for new process
|
||||
pub fn new_pid() -> Pid {
|
||||
PID_ALLOCATOR.fetch_add(1, Ordering::Release)
|
||||
}
|
||||
|
@ -1,31 +0,0 @@
|
||||
use crate::prelude::*;
|
||||
|
||||
pub const MAX_PROCESS_NAME_LEN: usize = 128;
|
||||
pub struct ProcessName {
|
||||
inner: [u8; MAX_PROCESS_NAME_LEN],
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl ProcessName {
|
||||
pub fn new() -> Self {
|
||||
ProcessName {
|
||||
inner: [0; MAX_PROCESS_NAME_LEN],
|
||||
count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_name(&mut self, name: &CStr) -> Result<()> {
|
||||
let bytes = name.to_bytes_with_nul();
|
||||
let bytes_len = bytes.len();
|
||||
if bytes_len > MAX_PROCESS_NAME_LEN {
|
||||
return_errno_with_message!(Errno::E2BIG, "process name is too long");
|
||||
}
|
||||
self.count = bytes_len;
|
||||
self.inner[..bytes_len].clone_from_slice(bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_name(&self) -> Result<Option<&CStr>> {
|
||||
Ok(Some(&(CStr::from_bytes_with_nul(&self.inner)?)))
|
||||
}
|
||||
}
|
109
src/services/libs/jinux-std/src/process/posix_thread/builder.rs
Normal file
109
src/services/libs/jinux-std/src/process/posix_thread/builder.rs
Normal file
@ -0,0 +1,109 @@
|
||||
use jinux_frame::user::UserSpace;
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
process::{
|
||||
posix_thread::name::ThreadName,
|
||||
signal::{sig_mask::SigMask, sig_queues::SigQueues},
|
||||
Process,
|
||||
},
|
||||
thread::{status::ThreadStatus, task::create_new_user_task, thread_table, Thread, Tid},
|
||||
};
|
||||
|
||||
use super::PosixThread;
|
||||
|
||||
/// The builder to build a posix thread
|
||||
pub struct PosixThreadBuilder {
|
||||
// The essential part
|
||||
tid: Tid,
|
||||
user_space: Arc<UserSpace>,
|
||||
process: Weak<Process>,
|
||||
|
||||
// Optional part
|
||||
thread_name: Option<ThreadName>,
|
||||
set_child_tid: Vaddr,
|
||||
clear_child_tid: Vaddr,
|
||||
sig_mask: SigMask,
|
||||
sig_queues: SigQueues,
|
||||
is_main_thread: bool,
|
||||
}
|
||||
|
||||
impl PosixThreadBuilder {
|
||||
pub fn new(tid: Tid, user_space: Arc<UserSpace>) -> Self {
|
||||
Self {
|
||||
tid,
|
||||
user_space,
|
||||
process: Weak::new(),
|
||||
thread_name: None,
|
||||
set_child_tid: 0,
|
||||
clear_child_tid: 0,
|
||||
sig_mask: SigMask::new_empty(),
|
||||
sig_queues: SigQueues::new(),
|
||||
is_main_thread: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process(mut self, process: Weak<Process>) -> Self {
|
||||
self.process = process;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn thread_name(mut self, thread_name: Option<ThreadName>) -> Self {
|
||||
self.thread_name = thread_name;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_child_tid(mut self, set_child_tid: Vaddr) -> Self {
|
||||
self.set_child_tid = set_child_tid;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn clear_child_tid(mut self, clear_child_tid: Vaddr) -> Self {
|
||||
self.clear_child_tid = clear_child_tid;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn is_main_thread(mut self, is_main_thread: bool) -> Self {
|
||||
self.is_main_thread = is_main_thread;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn sig_mask(mut self, sig_mask: SigMask) -> Self {
|
||||
self.sig_mask = sig_mask;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Arc<Thread> {
|
||||
let Self {
|
||||
tid,
|
||||
user_space,
|
||||
process,
|
||||
thread_name,
|
||||
set_child_tid,
|
||||
clear_child_tid,
|
||||
sig_mask,
|
||||
sig_queues,
|
||||
is_main_thread,
|
||||
} = self;
|
||||
let thread = Arc::new_cyclic(|thread_ref| {
|
||||
let task = create_new_user_task(user_space, thread_ref.clone());
|
||||
let status = ThreadStatus::Init;
|
||||
let sig_context = Mutex::new(None);
|
||||
let posix_thread = PosixThread {
|
||||
process,
|
||||
is_main_thread,
|
||||
name: Mutex::new(thread_name),
|
||||
set_child_tid: Mutex::new(set_child_tid),
|
||||
clear_child_tid: Mutex::new(clear_child_tid),
|
||||
sig_mask: Mutex::new(sig_mask),
|
||||
sig_queues: Mutex::new(sig_queues),
|
||||
sig_context,
|
||||
robust_list: Mutex::new(None),
|
||||
};
|
||||
|
||||
Thread::new(tid, task, posix_thread, status)
|
||||
});
|
||||
thread_table::add_thread(thread.clone());
|
||||
thread
|
||||
}
|
||||
}
|
438
src/services/libs/jinux-std/src/process/posix_thread/futex.rs
Normal file
438
src/services/libs/jinux-std/src/process/posix_thread/futex.rs
Normal file
@ -0,0 +1,438 @@
|
||||
use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use jinux_frame::cpu::num_cpus;
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
thread::{Thread, Tid},
|
||||
util::read_val_from_user,
|
||||
};
|
||||
|
||||
type FutexBitSet = u32;
|
||||
type FutexBucketRef = Arc<Mutex<FutexBucket>>;
|
||||
|
||||
const FUTEX_OP_MASK: u32 = 0x0000_000F;
|
||||
const FUTEX_FLAGS_MASK: u32 = 0xFFFF_FFF0;
|
||||
const FUTEX_BITSET_MATCH_ANY: FutexBitSet = 0xFFFF_FFFF;
|
||||
|
||||
/// do futex wait
|
||||
pub fn futex_wait(futex_addr: u64, futex_val: i32, timeout: &Option<FutexTimeout>) -> Result<()> {
|
||||
futex_wait_bitset(futex_addr as _, futex_val, timeout, FUTEX_BITSET_MATCH_ANY)
|
||||
}
|
||||
|
||||
/// do futex wait bitset
|
||||
pub fn futex_wait_bitset(
|
||||
futex_addr: Vaddr,
|
||||
futex_val: i32,
|
||||
timeout: &Option<FutexTimeout>,
|
||||
bitset: FutexBitSet,
|
||||
) -> Result<()> {
|
||||
debug!(
|
||||
"futex_wait_bitset addr: {:#x}, val: {}, timeout: {:?}, bitset: {:#x}",
|
||||
futex_addr, futex_val, timeout, bitset
|
||||
);
|
||||
let futex_key = FutexKey::new(futex_addr);
|
||||
let (_, futex_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_key);
|
||||
|
||||
// lock futex bucket ref here to avoid data race
|
||||
let mut futex_bucket = futex_bucket_ref.lock();
|
||||
|
||||
if futex_key.load_val() != futex_val {
|
||||
return_errno_with_message!(Errno::EINVAL, "futex value does not match");
|
||||
}
|
||||
let futex_item = FutexItem::new(futex_key, bitset);
|
||||
futex_bucket.enqueue_item(futex_item.clone());
|
||||
|
||||
// drop lock
|
||||
drop(futex_bucket);
|
||||
// Wait on the futex item
|
||||
futex_item.wait();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// do futex wake
|
||||
pub fn futex_wake(futex_addr: Vaddr, max_count: usize) -> Result<usize> {
|
||||
futex_wake_bitset(futex_addr, max_count, FUTEX_BITSET_MATCH_ANY)
|
||||
}
|
||||
|
||||
/// Do futex wake with bitset
|
||||
pub fn futex_wake_bitset(
|
||||
futex_addr: Vaddr,
|
||||
max_count: usize,
|
||||
bitset: FutexBitSet,
|
||||
) -> Result<usize> {
|
||||
debug!(
|
||||
"futex_wake_bitset addr: {:#x}, max_count: {}, bitset: {:#x}",
|
||||
futex_addr as usize, max_count, bitset
|
||||
);
|
||||
|
||||
let futex_key = FutexKey::new(futex_addr);
|
||||
let (_, futex_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_key);
|
||||
let mut futex_bucket = futex_bucket_ref.lock();
|
||||
let res = futex_bucket.dequeue_and_wake_items(futex_key, max_count, bitset);
|
||||
// debug!("futex wake bitset succeeds, res = {}", res);
|
||||
drop(futex_bucket);
|
||||
// for _ in 0..res {
|
||||
// Thread::yield_now();
|
||||
// }
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Do futex requeue
|
||||
pub fn futex_requeue(
|
||||
futex_addr: Vaddr,
|
||||
max_nwakes: usize,
|
||||
max_nrequeues: usize,
|
||||
futex_new_addr: Vaddr,
|
||||
) -> Result<usize> {
|
||||
if futex_new_addr == futex_addr {
|
||||
return futex_wake(futex_addr, max_nwakes);
|
||||
}
|
||||
|
||||
let futex_key = FutexKey::new(futex_addr);
|
||||
let futex_new_key = FutexKey::new(futex_new_addr);
|
||||
let (bucket_idx, futex_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_key);
|
||||
let (new_bucket_idx, futex_new_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_new_key);
|
||||
|
||||
let nwakes = {
|
||||
if bucket_idx == new_bucket_idx {
|
||||
let mut futex_bucket = futex_bucket_ref.lock();
|
||||
let nwakes =
|
||||
futex_bucket.dequeue_and_wake_items(futex_key, max_nwakes, FUTEX_BITSET_MATCH_ANY);
|
||||
futex_bucket.update_item_keys(futex_key, futex_new_key, max_nrequeues);
|
||||
drop(futex_bucket);
|
||||
nwakes
|
||||
} else {
|
||||
let (mut futex_bucket, mut futex_new_bucket) = {
|
||||
if bucket_idx < new_bucket_idx {
|
||||
let futex_bucket = futex_bucket_ref.lock();
|
||||
let futext_new_bucket = futex_new_bucket_ref.lock();
|
||||
(futex_bucket, futext_new_bucket)
|
||||
} else {
|
||||
// bucket_idx > new_bucket_idx
|
||||
let futex_new_bucket = futex_new_bucket_ref.lock();
|
||||
let futex_bucket = futex_bucket_ref.lock();
|
||||
(futex_bucket, futex_new_bucket)
|
||||
}
|
||||
};
|
||||
|
||||
let nwakes =
|
||||
futex_bucket.dequeue_and_wake_items(futex_key, max_nwakes, FUTEX_BITSET_MATCH_ANY);
|
||||
futex_bucket.requeue_items_to_another_bucket(
|
||||
futex_key,
|
||||
&mut futex_new_bucket,
|
||||
futex_new_key,
|
||||
max_nrequeues,
|
||||
);
|
||||
nwakes
|
||||
}
|
||||
};
|
||||
Ok(nwakes)
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
// Use the same count as linux kernel to keep the same performance
|
||||
static ref BUCKET_COUNT: usize = ((1<<8)* num_cpus()).next_power_of_two() as _;
|
||||
static ref BUCKET_MASK: usize = *BUCKET_COUNT - 1;
|
||||
static ref FUTEX_BUCKETS: FutexBucketVec = FutexBucketVec::new(*BUCKET_COUNT);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FutexTimeout {}
|
||||
|
||||
impl FutexTimeout {
|
||||
pub fn new() -> Self {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
struct FutexBucketVec {
|
||||
vec: Vec<FutexBucketRef>,
|
||||
}
|
||||
|
||||
impl FutexBucketVec {
|
||||
pub fn new(size: usize) -> FutexBucketVec {
|
||||
let mut buckets = FutexBucketVec {
|
||||
vec: Vec::with_capacity(size),
|
||||
};
|
||||
for _ in 0..size {
|
||||
let bucket = Arc::new(Mutex::new(FutexBucket::new()));
|
||||
buckets.vec.push(bucket);
|
||||
}
|
||||
buckets
|
||||
}
|
||||
|
||||
pub fn get_bucket(&self, key: FutexKey) -> (usize, FutexBucketRef) {
|
||||
let index = *BUCKET_MASK & {
|
||||
// The addr is the multiples of 4, so we ignore the last 2 bits
|
||||
let addr = key.addr() >> 2;
|
||||
// simple hash
|
||||
addr / self.size()
|
||||
};
|
||||
(index, self.vec[index].clone())
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.vec.len()
|
||||
}
|
||||
}
|
||||
|
||||
struct FutexBucket {
|
||||
queue: VecDeque<FutexItem>,
|
||||
}
|
||||
|
||||
impl FutexBucket {
|
||||
pub fn new() -> FutexBucket {
|
||||
FutexBucket {
|
||||
queue: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enqueue_item(&mut self, item: FutexItem) {
|
||||
self.queue.push_back(item);
|
||||
}
|
||||
|
||||
pub fn dequeue_item(&mut self, item: &FutexItem) {
|
||||
let item_i = self
|
||||
.queue
|
||||
.iter()
|
||||
.position(|futex_item| *futex_item == *item);
|
||||
if let Some(item_i) = item_i {
|
||||
self.queue.remove(item_i).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dequeue_and_wake_items(
|
||||
&mut self,
|
||||
key: FutexKey,
|
||||
max_count: usize,
|
||||
bitset: FutexBitSet,
|
||||
) -> usize {
|
||||
let mut count = 0;
|
||||
let mut items_to_wake = Vec::new();
|
||||
|
||||
self.queue.retain(|item| {
|
||||
if count >= max_count || key != item.key || (bitset & item.bitset) == 0 {
|
||||
true
|
||||
} else {
|
||||
items_to_wake.push(item.clone());
|
||||
count += 1;
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
// debug!("items to wake len: {}", items_to_wake.len());
|
||||
|
||||
FutexItem::batch_wake(&items_to_wake);
|
||||
count
|
||||
}
|
||||
|
||||
pub fn update_item_keys(&mut self, key: FutexKey, new_key: FutexKey, max_count: usize) {
|
||||
let mut count = 0;
|
||||
for item in self.queue.iter_mut() {
|
||||
if count == max_count {
|
||||
break;
|
||||
}
|
||||
if (*item).key == key {
|
||||
(*item).key = new_key;
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn requeue_items_to_another_bucket(
|
||||
&mut self,
|
||||
key: FutexKey,
|
||||
another: &mut Self,
|
||||
new_key: FutexKey,
|
||||
max_nrequeues: usize,
|
||||
) {
|
||||
let mut count = 0;
|
||||
|
||||
self.queue.retain(|item| {
|
||||
if count >= max_nrequeues || key != item.key {
|
||||
true
|
||||
} else {
|
||||
let mut new_item = item.clone();
|
||||
new_item.key = new_key;
|
||||
another.enqueue_item(new_item);
|
||||
count += 1;
|
||||
false
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
struct FutexItem {
|
||||
key: FutexKey,
|
||||
bitset: FutexBitSet,
|
||||
waiter: FutexWaiterRef,
|
||||
}
|
||||
|
||||
impl FutexItem {
|
||||
pub fn new(key: FutexKey, bitset: FutexBitSet) -> Self {
|
||||
FutexItem {
|
||||
key,
|
||||
bitset,
|
||||
waiter: Arc::new(FutexWaiter::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wake(&self) {
|
||||
// debug!("wake futex item, key = {:?}", self.key);
|
||||
self.waiter.wake();
|
||||
}
|
||||
|
||||
pub fn wait(&self) {
|
||||
// debug!("wait on futex item, key = {:?}", self.key);
|
||||
self.waiter.wait();
|
||||
// debug!("wait finished, key = {:?}", self.key);
|
||||
}
|
||||
|
||||
pub fn waiter(&self) -> &FutexWaiterRef {
|
||||
&self.waiter
|
||||
}
|
||||
|
||||
pub fn batch_wake(items: &[FutexItem]) {
|
||||
let waiters = items.iter().map(|item| item.waiter()).collect::<Vec<_>>();
|
||||
FutexWaiter::batch_wake(&waiters);
|
||||
}
|
||||
}
|
||||
|
||||
// The addr of a futex, it should be used to mark different futex word
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
struct FutexKey(Vaddr);
|
||||
|
||||
impl FutexKey {
|
||||
pub fn new(futex_addr: Vaddr) -> Self {
|
||||
FutexKey(futex_addr as _)
|
||||
}
|
||||
|
||||
pub fn load_val(&self) -> i32 {
|
||||
// FIXME: how to implement a atomic load?
|
||||
warn!("implement an atomic load");
|
||||
read_val_from_user(self.0).unwrap()
|
||||
}
|
||||
|
||||
pub fn addr(&self) -> Vaddr {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
// The implementation is from occlum
|
||||
|
||||
#[derive(PartialEq, Debug, Clone, Copy)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum FutexOp {
|
||||
FUTEX_WAIT = 0,
|
||||
FUTEX_WAKE = 1,
|
||||
FUTEX_FD = 2,
|
||||
FUTEX_REQUEUE = 3,
|
||||
FUTEX_CMP_REQUEUE = 4,
|
||||
FUTEX_WAKE_OP = 5,
|
||||
FUTEX_LOCK_PI = 6,
|
||||
FUTEX_UNLOCK_PI = 7,
|
||||
FUTEX_TRYLOCK_PI = 8,
|
||||
FUTEX_WAIT_BITSET = 9,
|
||||
FUTEX_WAKE_BITSET = 10,
|
||||
}
|
||||
|
||||
impl FutexOp {
|
||||
pub fn from_u32(bits: u32) -> Result<FutexOp> {
|
||||
match bits {
|
||||
0 => Ok(FutexOp::FUTEX_WAIT),
|
||||
1 => Ok(FutexOp::FUTEX_WAKE),
|
||||
2 => Ok(FutexOp::FUTEX_FD),
|
||||
3 => Ok(FutexOp::FUTEX_REQUEUE),
|
||||
4 => Ok(FutexOp::FUTEX_CMP_REQUEUE),
|
||||
5 => Ok(FutexOp::FUTEX_WAKE_OP),
|
||||
6 => Ok(FutexOp::FUTEX_LOCK_PI),
|
||||
7 => Ok(FutexOp::FUTEX_UNLOCK_PI),
|
||||
8 => Ok(FutexOp::FUTEX_TRYLOCK_PI),
|
||||
9 => Ok(FutexOp::FUTEX_WAIT_BITSET),
|
||||
10 => Ok(FutexOp::FUTEX_WAKE_BITSET),
|
||||
_ => return_errno_with_message!(Errno::EINVAL, "Unknown futex op"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
pub struct FutexFlags : u32 {
|
||||
const FUTEX_PRIVATE = 128;
|
||||
const FUTEX_CLOCK_REALTIME = 256;
|
||||
}
|
||||
}
|
||||
|
||||
impl FutexFlags {
|
||||
pub fn from_u32(bits: u32) -> Result<FutexFlags> {
|
||||
FutexFlags::from_bits(bits)
|
||||
.ok_or_else(|| Error::with_message(Errno::EINVAL, "unknown futex flags"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn futex_op_and_flags_from_u32(bits: u32) -> Result<(FutexOp, FutexFlags)> {
|
||||
let op = {
|
||||
let op_bits = bits & FUTEX_OP_MASK;
|
||||
FutexOp::from_u32(op_bits)?
|
||||
};
|
||||
let flags = {
|
||||
let flags_bits = bits & FUTEX_FLAGS_MASK;
|
||||
FutexFlags::from_u32(flags_bits)?
|
||||
};
|
||||
Ok((op, flags))
|
||||
}
|
||||
|
||||
type FutexWaiterRef = Arc<FutexWaiter>;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FutexWaiter {
|
||||
is_woken: AtomicBool,
|
||||
tid: Tid,
|
||||
}
|
||||
|
||||
impl PartialEq for FutexWaiter {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.tid == other.tid
|
||||
}
|
||||
}
|
||||
|
||||
impl FutexWaiter {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
is_woken: AtomicBool::new(false),
|
||||
tid: current_thread!().tid(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wait(&self) {
|
||||
let current_thread = current_thread!();
|
||||
if current_thread.tid() != self.tid {
|
||||
return;
|
||||
}
|
||||
self.is_woken.store(false, Ordering::SeqCst);
|
||||
while !self.is_woken() {
|
||||
// debug!("futex is wait for waken, tid = {}", self.tid);
|
||||
Thread::yield_now();
|
||||
}
|
||||
// debug!("futex is waken, tid = {}", self.tid);
|
||||
}
|
||||
|
||||
pub fn wake(&self) {
|
||||
if !self.is_woken() {
|
||||
// debug!("wake up futex, tid = {}", self.tid);
|
||||
self.is_woken.store(true, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_woken(&self) -> bool {
|
||||
self.is_woken.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
pub fn batch_wake(waiters: &[&FutexWaiterRef]) {
|
||||
waiters.iter().for_each(|waiter| {
|
||||
waiter.wake();
|
||||
});
|
||||
}
|
||||
}
|
145
src/services/libs/jinux-std/src/process/posix_thread/mod.rs
Normal file
145
src/services/libs/jinux-std/src/process/posix_thread/mod.rs
Normal file
@ -0,0 +1,145 @@
|
||||
use crate::{
|
||||
prelude::*,
|
||||
process::posix_thread::{futex::futex_wake, robust_list::wake_robust_futex},
|
||||
thread::{thread_table, Tid},
|
||||
util::write_val_to_user,
|
||||
};
|
||||
|
||||
use self::{name::ThreadName, robust_list::RobustListHead};
|
||||
|
||||
use super::{
|
||||
signal::{sig_mask::SigMask, sig_queues::SigQueues},
|
||||
Process,
|
||||
};
|
||||
|
||||
pub mod builder;
|
||||
pub mod futex;
|
||||
pub mod name;
|
||||
pub mod posix_thread_ext;
|
||||
pub mod robust_list;
|
||||
|
||||
pub struct PosixThread {
|
||||
// Immutable part
|
||||
process: Weak<Process>,
|
||||
is_main_thread: bool,
|
||||
|
||||
// Mutable part
|
||||
name: Mutex<Option<ThreadName>>,
|
||||
|
||||
// Linux specific attributes.
|
||||
// https://man7.org/linux/man-pages/man2/set_tid_address.2.html
|
||||
set_child_tid: Mutex<Vaddr>,
|
||||
clear_child_tid: Mutex<Vaddr>,
|
||||
|
||||
robust_list: Mutex<Option<RobustListHead>>,
|
||||
|
||||
// signal
|
||||
/// blocked signals
|
||||
sig_mask: Mutex<SigMask>,
|
||||
/// thread-directed sigqueue
|
||||
sig_queues: Mutex<SigQueues>,
|
||||
/// Signal handler ucontext address
|
||||
/// FIXME: This field may be removed. For glibc applications with RESTORER flag set, the sig_context is always equals with rsp.
|
||||
sig_context: Mutex<Option<Vaddr>>,
|
||||
}
|
||||
|
||||
impl PosixThread {
|
||||
pub fn process(&self) -> Arc<Process> {
|
||||
self.process.upgrade().unwrap()
|
||||
}
|
||||
|
||||
pub fn thread_name(&self) -> &Mutex<Option<ThreadName>> {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn set_child_tid(&self) -> &Mutex<Vaddr> {
|
||||
&self.set_child_tid
|
||||
}
|
||||
|
||||
pub fn clear_child_tid(&self) -> &Mutex<Vaddr> {
|
||||
&self.clear_child_tid
|
||||
}
|
||||
|
||||
pub fn sig_mask(&self) -> &Mutex<SigMask> {
|
||||
&self.sig_mask
|
||||
}
|
||||
|
||||
pub fn sig_queues(&self) -> &Mutex<SigQueues> {
|
||||
&self.sig_queues
|
||||
}
|
||||
|
||||
pub fn sig_context(&self) -> &Mutex<Option<Vaddr>> {
|
||||
&self.sig_context
|
||||
}
|
||||
|
||||
pub fn robust_list(&self) -> &Mutex<Option<RobustListHead>> {
|
||||
&self.robust_list
|
||||
}
|
||||
|
||||
/// Whether the thread is main thread. For Posix thread, If a thread's tid is equal to pid, it's main thread.
|
||||
pub fn is_main_thread(&self) -> bool {
|
||||
self.is_main_thread
|
||||
}
|
||||
|
||||
/// whether the thread is the last running thread in process
|
||||
pub fn is_last_thread(&self) -> bool {
|
||||
let process = self.process.upgrade().unwrap();
|
||||
let threads = process.threads().lock();
|
||||
threads
|
||||
.iter()
|
||||
.filter(|thread| !thread.status().lock().is_exited())
|
||||
.count()
|
||||
== 0
|
||||
}
|
||||
|
||||
/// Walks the robust futex list, marking futex dead and wake waiters.
|
||||
/// It corresponds to Linux's exit_robust_list(), errors are silently ignored.
|
||||
pub fn wake_robust_list(&self, tid: Tid) {
|
||||
let mut robust_list = self.robust_list.lock();
|
||||
let list_head = match *robust_list {
|
||||
None => {
|
||||
return;
|
||||
}
|
||||
Some(robust_list_head) => robust_list_head,
|
||||
};
|
||||
debug!("wake the rubust_list: {:?}", list_head);
|
||||
for futex_addr in list_head.futexes() {
|
||||
// debug!("futex addr = 0x{:x}", futex_addr);
|
||||
wake_robust_futex(futex_addr, tid).unwrap();
|
||||
}
|
||||
debug!("wake robust futex success");
|
||||
*robust_list = None;
|
||||
}
|
||||
|
||||
/// Posix thread does not contains tid info. So we require tid as a parameter.
|
||||
pub fn exit(&self, tid: Tid, exit_code: i32) -> Result<()> {
|
||||
let mut clear_ctid = self.clear_child_tid().lock();
|
||||
// If clear_ctid !=0 ,do a futex wake and write zero to the clear_ctid addr.
|
||||
debug!("wake up ctid");
|
||||
if *clear_ctid != 0 {
|
||||
futex_wake(*clear_ctid, 1)?;
|
||||
// FIXME: the correct write length?
|
||||
write_val_to_user(*clear_ctid, &0i32)?;
|
||||
*clear_ctid = 0;
|
||||
}
|
||||
// exit the robust list: walk the robust list; mark futex words as dead and do futex wake
|
||||
self.wake_robust_list(tid);
|
||||
|
||||
if tid != self.process().pid {
|
||||
// If the thread is not main thread. We don't remove main thread.
|
||||
// Main thread are removed when the whole process is reaped.
|
||||
thread_table::remove_thread(tid);
|
||||
}
|
||||
|
||||
if self.is_main_thread() || self.is_last_thread() {
|
||||
// exit current process.
|
||||
debug!("self is main thread or last thread");
|
||||
debug!("main thread: {}", self.is_main_thread());
|
||||
debug!("last thread: {}", self.is_last_thread());
|
||||
current!().exit_group(exit_code);
|
||||
}
|
||||
debug!("perform futex wake");
|
||||
futex_wake(Arc::as_ptr(&self.process()) as Vaddr, 1)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
47
src/services/libs/jinux-std/src/process/posix_thread/name.rs
Normal file
47
src/services/libs/jinux-std/src/process/posix_thread/name.rs
Normal file
@ -0,0 +1,47 @@
|
||||
use crate::prelude::*;
|
||||
|
||||
pub const MAX_THREAD_NAME_LEN: usize = 16;
|
||||
pub struct ThreadName {
|
||||
inner: [u8; MAX_THREAD_NAME_LEN],
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl ThreadName {
|
||||
pub fn new() -> Self {
|
||||
ThreadName {
|
||||
inner: [0; MAX_THREAD_NAME_LEN],
|
||||
count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_elf_path(elf_path: &CStr) -> Result<Self> {
|
||||
let mut thread_name = ThreadName::new();
|
||||
let elf_file_name = elf_path
|
||||
.to_str()?
|
||||
.split('/')
|
||||
.last()
|
||||
.ok_or(Error::with_message(Errno::EINVAL, "invalid elf path"))?;
|
||||
let name = CString::new(elf_file_name)?;
|
||||
thread_name.set_name(&name)?;
|
||||
Ok(thread_name)
|
||||
}
|
||||
|
||||
pub fn set_name(&mut self, name: &CStr) -> Result<()> {
|
||||
let bytes = name.to_bytes_with_nul();
|
||||
let bytes_len = bytes.len();
|
||||
if bytes_len > MAX_THREAD_NAME_LEN {
|
||||
// if len > MAX_THREAD_NAME_LEN, truncate it.
|
||||
self.count = MAX_THREAD_NAME_LEN;
|
||||
self.inner[..MAX_THREAD_NAME_LEN].clone_from_slice(&bytes[..MAX_THREAD_NAME_LEN]);
|
||||
self.inner[MAX_THREAD_NAME_LEN] = 0;
|
||||
return Ok(());
|
||||
}
|
||||
self.count = bytes_len;
|
||||
self.inner[..bytes_len].clone_from_slice(bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_name(&self) -> Result<Option<&CStr>> {
|
||||
Ok(Some(&(CStr::from_bytes_with_nul(&self.inner)?)))
|
||||
}
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
use jinux_frame::{cpu::CpuContext, user::UserSpace};
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
process::{elf::load_elf_to_root_vmar, Process},
|
||||
rights::Full,
|
||||
thread::{allocate_tid, Thread},
|
||||
vm::vmar::Vmar,
|
||||
};
|
||||
|
||||
use super::{builder::PosixThreadBuilder, name::ThreadName, PosixThread};
|
||||
pub trait PosixThreadExt {
|
||||
fn is_posix_thread(&self) -> bool;
|
||||
fn posix_thread(&self) -> &PosixThread;
|
||||
fn new_posix_thread_from_elf(
|
||||
root_vmar: &Vmar<Full>,
|
||||
elf_path: CString,
|
||||
elf_file_content: &'static [u8],
|
||||
process: Weak<Process>,
|
||||
argv: Vec<CString>,
|
||||
envp: Vec<CString>,
|
||||
) -> Arc<Self>;
|
||||
}
|
||||
|
||||
impl PosixThreadExt for Thread {
|
||||
/// This function should only be called when launch shell()
|
||||
fn new_posix_thread_from_elf(
|
||||
root_vmar: &Vmar<Full>,
|
||||
elf_path: CString,
|
||||
elf_file_content: &'static [u8],
|
||||
process: Weak<Process>,
|
||||
argv: Vec<CString>,
|
||||
envp: Vec<CString>,
|
||||
) -> Arc<Self> {
|
||||
let elf_load_info = load_elf_to_root_vmar(elf_file_content, &root_vmar, argv, envp)
|
||||
.expect("Load Elf failed");
|
||||
let vm_space = root_vmar.vm_space().clone();
|
||||
let mut cpu_ctx = CpuContext::default();
|
||||
cpu_ctx.set_rip(elf_load_info.entry_point());
|
||||
cpu_ctx.set_rsp(elf_load_info.user_stack_top());
|
||||
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
|
||||
let thread_name = Some(ThreadName::new_from_elf_path(&elf_path).unwrap());
|
||||
let tid = allocate_tid();
|
||||
let thread_builder = PosixThreadBuilder::new(tid, user_space)
|
||||
.thread_name(thread_name)
|
||||
.process(process);
|
||||
thread_builder.build()
|
||||
}
|
||||
|
||||
fn is_posix_thread(&self) -> bool {
|
||||
self.data().downcast_ref::<PosixThread>().is_some()
|
||||
}
|
||||
|
||||
fn posix_thread(&self) -> &PosixThread {
|
||||
self.data().downcast_ref::<PosixThread>().unwrap()
|
||||
}
|
||||
}
|
@ -0,0 +1,152 @@
|
||||
//! The implementation of robust list is from occlum.
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
process::{posix_thread::futex::futex_wake, Pid},
|
||||
util::{read_val_from_user, write_val_to_user},
|
||||
};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Pod)]
|
||||
struct RobustList {
|
||||
next: Vaddr, // *const Robust list
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Pod)]
|
||||
pub struct RobustListHead {
|
||||
/// Linked list of lock entries
|
||||
///
|
||||
/// If it points to the head of the list, then it is the end of the list.
|
||||
/// If it is an invalid user space pointer or a null pointer, stop iterating
|
||||
/// the list.
|
||||
list: RobustList,
|
||||
/// Specifies the offset from the address of the lock entry to the address
|
||||
/// of the futex.
|
||||
futex_offset: isize,
|
||||
/// Contains transient copy of the address of the lock entry, during list
|
||||
/// insertion and removal.
|
||||
list_op_pending: Vaddr, // *const RobustList
|
||||
}
|
||||
|
||||
impl RobustListHead {
|
||||
/// Return an iterator for all futexes in the robust list.
|
||||
///
|
||||
/// The futex refered to by `list_op_pending`, if any, will be returned as
|
||||
/// the last item.
|
||||
pub fn futexes<'a>(&'a self) -> FutexIter<'a> {
|
||||
FutexIter::new(self)
|
||||
}
|
||||
|
||||
/// Return the pending futex address if exist
|
||||
fn pending_futex_addr(&self) -> Option<Vaddr> {
|
||||
if self.list_op_pending == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(self.futex_addr(self.list_op_pending))
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the futex address
|
||||
fn futex_addr(&self, entry_ptr: Vaddr) -> Vaddr {
|
||||
(entry_ptr as isize + self.futex_offset) as _
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FutexIter<'a> {
|
||||
robust_list: &'a RobustListHead,
|
||||
entry_ptr: Vaddr,
|
||||
count: isize,
|
||||
}
|
||||
|
||||
impl<'a> FutexIter<'a> {
|
||||
pub fn new(robust_list: &'a RobustListHead) -> Self {
|
||||
Self {
|
||||
robust_list,
|
||||
entry_ptr: robust_list.list.next,
|
||||
count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// The `self.count` is normally a positive value used to iterate the list
|
||||
// to avoid excessively long or circular list, we use a special value -1
|
||||
// to represent the end of the Iterator.
|
||||
fn set_end(&mut self) {
|
||||
self.count = -1;
|
||||
}
|
||||
|
||||
fn is_end(&self) -> bool {
|
||||
self.count < 0
|
||||
}
|
||||
}
|
||||
|
||||
const ROBUST_LIST_LIMIT: isize = 2048;
|
||||
|
||||
impl<'a> Iterator for FutexIter<'a> {
|
||||
type Item = Vaddr;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.is_end() {
|
||||
return None;
|
||||
}
|
||||
|
||||
while self.entry_ptr != &self.robust_list.list as *const _ as _ {
|
||||
if self.count == ROBUST_LIST_LIMIT {
|
||||
break;
|
||||
}
|
||||
if self.entry_ptr == 0 {
|
||||
return None;
|
||||
}
|
||||
let futex_addr = if self.entry_ptr != self.robust_list.list_op_pending {
|
||||
Some(self.robust_list.futex_addr(self.entry_ptr))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let robust_list = read_val_from_user::<RobustList>(self.entry_ptr).unwrap();
|
||||
self.entry_ptr = robust_list.next;
|
||||
self.count += 1;
|
||||
if futex_addr.is_some() {
|
||||
return futex_addr;
|
||||
}
|
||||
}
|
||||
self.set_end();
|
||||
self.robust_list.pending_futex_addr()
|
||||
}
|
||||
}
|
||||
|
||||
const FUTEX_WAITERS: u32 = 0x8000_0000;
|
||||
const FUTEX_OWNER_DIED: u32 = 0x4000_0000;
|
||||
const FUTEX_TID_MASK: u32 = 0x3FFF_FFFF;
|
||||
|
||||
/// Wakeup one robust futex owned by the thread
|
||||
/// FIXME: requires atomic operations here
|
||||
pub fn wake_robust_futex(futex_addr: Vaddr, tid: Pid) -> Result<()> {
|
||||
let futex_val = {
|
||||
if futex_addr == 0 {
|
||||
return_errno_with_message!(Errno::EINVAL, "invalid futext addr");
|
||||
}
|
||||
read_val_from_user::<u32>(futex_addr)?
|
||||
};
|
||||
let mut old_val = futex_val;
|
||||
loop {
|
||||
// This futex may held by another thread, do nothing
|
||||
if old_val & FUTEX_TID_MASK != tid as u32 {
|
||||
break;
|
||||
}
|
||||
let new_val = (old_val & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
|
||||
let cur_val = read_val_from_user(futex_addr)?;
|
||||
if cur_val != new_val {
|
||||
// The futex value has changed, let's retry with current value
|
||||
old_val = cur_val;
|
||||
write_val_to_user(futex_addr, &new_val)?;
|
||||
continue;
|
||||
}
|
||||
// Wakeup one waiter
|
||||
if cur_val & FUTEX_WAITERS != 0 {
|
||||
debug!("wake robust futex addr: {:?}", futex_addr);
|
||||
futex_wake(futex_addr, 1)?;
|
||||
}
|
||||
break;
|
||||
}
|
||||
Ok(())
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use super::{table, Pgid, Pid, Process};
|
||||
use super::{process_table, signal::signals::kernel::KernelSignal, Pgid, Pid, Process};
|
||||
use crate::prelude::*;
|
||||
|
||||
pub struct ProcessGroup {
|
||||
@ -43,6 +43,10 @@ impl ProcessGroup {
|
||||
self.inner.lock().processes.insert(process.pid(), process);
|
||||
}
|
||||
|
||||
pub fn contains_process(&self, pid: Pid) -> bool {
|
||||
self.inner.lock().processes.contains_key(&pid)
|
||||
}
|
||||
|
||||
/// remove a process from this process group.
|
||||
/// If this group contains no processes now, the group itself will be deleted from global table.
|
||||
pub fn remove_process(&self, pid: Pid) {
|
||||
@ -53,11 +57,29 @@ impl ProcessGroup {
|
||||
// if self contains no process, remove self from table
|
||||
if len == 0 {
|
||||
// this must be the last statement
|
||||
table::remove_process_group(pgid);
|
||||
process_table::remove_process_group(pgid);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pgid(&self) -> Pgid {
|
||||
self.inner.lock().pgid
|
||||
}
|
||||
|
||||
/// Wake up all processes waiting on polling queue
|
||||
pub fn wake_all_polling_procs(&self) {
|
||||
let inner = self.inner.lock();
|
||||
for (_, process) in &inner.processes {
|
||||
process.poll_queue().wake_all();
|
||||
}
|
||||
}
|
||||
|
||||
/// send kernel signal to all processes in the group
|
||||
pub fn kernel_signal(&self, signal: KernelSignal) {
|
||||
for (_, process) in &self.inner.lock().processes {
|
||||
process
|
||||
.sig_queues()
|
||||
.lock()
|
||||
.enqueue(Box::new(signal.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ impl UserHeap {
|
||||
|
||||
pub fn brk(&self, new_heap_end: Option<Vaddr>) -> Result<Vaddr> {
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
match new_heap_end {
|
||||
None => {
|
||||
// create a heap vmo for current process
|
||||
@ -50,7 +50,8 @@ impl UserHeap {
|
||||
return Ok(current_heap_end);
|
||||
}
|
||||
let new_size = (new_heap_end - self.heap_base).align_up(PAGE_SIZE);
|
||||
let heap_vmo = root_vmar.get_mapped_vmo(USER_HEAP_BASE)?;
|
||||
let heap_mapping = root_vmar.get_vm_mapping(USER_HEAP_BASE)?;
|
||||
let heap_vmo = heap_mapping.vmo();
|
||||
heap_vmo.resize(new_size)?;
|
||||
self.current_heap_end.store(new_heap_end, Ordering::Release);
|
||||
return Ok(new_heap_end);
|
||||
|
119
src/services/libs/jinux-std/src/process/rlimit.rs
Normal file
119
src/services/libs/jinux-std/src/process/rlimit.rs
Normal file
@ -0,0 +1,119 @@
|
||||
//! This implementation is from occlum
|
||||
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
use super::{elf::init_stack::INIT_STACK_SIZE, process_vm::user_heap::USER_HEAP_SIZE_LIMIT};
|
||||
|
||||
pub struct ResourceLimits {
|
||||
rlimits: [RLimit64; RLIMIT_COUNT],
|
||||
}
|
||||
|
||||
impl ResourceLimits {
|
||||
pub fn get_rlimit(&self, resource: ResourceType) -> &RLimit64 {
|
||||
&self.rlimits[resource as usize]
|
||||
}
|
||||
|
||||
pub fn get_rlimit_mut(&mut self, resource: ResourceType) -> &mut RLimit64 {
|
||||
&mut self.rlimits[resource as usize]
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ResourceLimits {
|
||||
fn default() -> Self {
|
||||
let stack_size = RLimit64::new(INIT_STACK_SIZE as u64);
|
||||
let heap_size = RLimit64::new(USER_HEAP_SIZE_LIMIT as u64);
|
||||
let open_files = RLimit64::new(1024);
|
||||
|
||||
let mut rlimits = Self {
|
||||
rlimits: [RLimit64::default(); RLIMIT_COUNT],
|
||||
};
|
||||
*rlimits.get_rlimit_mut(ResourceType::RLIMIT_STACK) = stack_size;
|
||||
*rlimits.get_rlimit_mut(ResourceType::RLIMIT_DATA) = heap_size;
|
||||
*rlimits.get_rlimit_mut(ResourceType::RLIMIT_NOFILE) = open_files;
|
||||
rlimits
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum ResourceType {
|
||||
RLIMIT_CPU = 0,
|
||||
RLIMIT_FSIZE = 1,
|
||||
RLIMIT_DATA = 2,
|
||||
RLIMIT_STACK = 3,
|
||||
RLIMIT_CORE = 4,
|
||||
RLIMIT_RSS = 5,
|
||||
RLIMIT_NPROC = 6,
|
||||
RLIMIT_NOFILE = 7,
|
||||
RLIMIT_MEMLOCK = 8,
|
||||
RLIMIT_AS = 9,
|
||||
RLIMIT_LOCKS = 10,
|
||||
RLIMIT_SIGPENDING = 11,
|
||||
RLIMIT_MSGQUEUE = 12,
|
||||
RLIMIT_NICE = 13,
|
||||
RLIMIT_RTPRIO = 14,
|
||||
RLIMIT_RTTIME = 15,
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for ResourceType {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u32) -> Result<Self> {
|
||||
match value {
|
||||
0 => Ok(ResourceType::RLIMIT_CPU),
|
||||
1 => Ok(ResourceType::RLIMIT_FSIZE),
|
||||
2 => Ok(ResourceType::RLIMIT_DATA),
|
||||
3 => Ok(ResourceType::RLIMIT_STACK),
|
||||
4 => Ok(ResourceType::RLIMIT_CORE),
|
||||
5 => Ok(ResourceType::RLIMIT_RSS),
|
||||
6 => Ok(ResourceType::RLIMIT_NPROC),
|
||||
7 => Ok(ResourceType::RLIMIT_NOFILE),
|
||||
8 => Ok(ResourceType::RLIMIT_MEMLOCK),
|
||||
9 => Ok(ResourceType::RLIMIT_AS),
|
||||
10 => Ok(ResourceType::RLIMIT_LOCKS),
|
||||
11 => Ok(ResourceType::RLIMIT_SIGPENDING),
|
||||
12 => Ok(ResourceType::RLIMIT_MSGQUEUE),
|
||||
13 => Ok(ResourceType::RLIMIT_NICE),
|
||||
14 => Ok(ResourceType::RLIMIT_RTPRIO),
|
||||
15 => Ok(ResourceType::RLIMIT_RTTIME),
|
||||
_ => return_errno_with_message!(Errno::EINVAL, "invalid resource type"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const RLIMIT_COUNT: usize = 16;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Pod)]
|
||||
#[repr(C)]
|
||||
pub struct RLimit64 {
|
||||
cur: u64,
|
||||
max: u64,
|
||||
}
|
||||
|
||||
impl RLimit64 {
|
||||
pub fn new(cur: u64) -> Self {
|
||||
Self {
|
||||
cur,
|
||||
max: u64::max_value(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_cur(&self) -> u64 {
|
||||
self.cur
|
||||
}
|
||||
|
||||
pub fn get_max(&self) -> u64 {
|
||||
self.max
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RLimit64 {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
cur: u64::max_value(),
|
||||
max: u64::max_value(),
|
||||
}
|
||||
}
|
||||
}
|
@ -15,6 +15,8 @@ use jinux_frame::{cpu::CpuContext, task::Task};
|
||||
use self::c_types::siginfo_t;
|
||||
use self::sig_mask::SigMask;
|
||||
use self::sig_num::SigNum;
|
||||
use crate::current_thread;
|
||||
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use crate::process::signal::c_types::ucontext_t;
|
||||
use crate::process::signal::sig_action::SigActionFlags;
|
||||
use crate::util::{write_bytes_to_user, write_val_to_user};
|
||||
@ -26,12 +28,22 @@ use crate::{
|
||||
/// Handle pending signal for current process
|
||||
pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> {
|
||||
let current = current!();
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
let pid = current.pid();
|
||||
let process_name = current.filename().unwrap();
|
||||
let sig_queues = current.sig_queues();
|
||||
let mut sig_queues_guard = sig_queues.lock();
|
||||
let sig_mask = current.sig_mask().lock().clone();
|
||||
if let Some(signal) = sig_queues_guard.dequeue(&sig_mask) {
|
||||
let sig_mask = posix_thread.sig_mask().lock().clone();
|
||||
let mut thread_sig_queues = posix_thread.sig_queues().lock();
|
||||
let mut proc_sig_queues = current.sig_queues().lock();
|
||||
// We first deal with signal in current thread, then signal in current process.
|
||||
let signal = if let Some(signal) = thread_sig_queues.dequeue(&sig_mask) {
|
||||
Some(signal)
|
||||
} else if let Some(signal) = proc_sig_queues.dequeue(&sig_mask) {
|
||||
Some(signal)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(signal) = signal {
|
||||
let sig_num = signal.num();
|
||||
debug!("sig_num = {:?}, sig_name = {}", sig_num, sig_num.sig_name());
|
||||
let sig_action = current.sig_dispositions().lock().get(sig_num);
|
||||
@ -45,7 +57,7 @@ pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> {
|
||||
flags,
|
||||
restorer_addr,
|
||||
mask,
|
||||
} => handle_user_signal_handler(
|
||||
} => handle_user_signal(
|
||||
sig_num,
|
||||
handler_addr,
|
||||
flags,
|
||||
@ -65,28 +77,28 @@ pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> {
|
||||
sig_num.sig_name()
|
||||
);
|
||||
// FIXME: How to set correct status if process is terminated
|
||||
current.exit(1);
|
||||
current.exit_group(1);
|
||||
// We should exit current here, since we cannot restore a valid status from trap now.
|
||||
Task::current().exit();
|
||||
}
|
||||
SigDefaultAction::Ign => {}
|
||||
SigDefaultAction::Stop => {
|
||||
let mut status_guard = current.status().lock();
|
||||
if status_guard.is_runnable() {
|
||||
status_guard.set_suspend();
|
||||
let mut status = current_thread.status().lock();
|
||||
if status.is_running() {
|
||||
status.set_stopped();
|
||||
} else {
|
||||
panic!("Try to suspend a not running process.")
|
||||
}
|
||||
drop(status_guard);
|
||||
drop(status);
|
||||
}
|
||||
SigDefaultAction::Cont => {
|
||||
let mut status_guard = current.status().lock();
|
||||
if status_guard.is_suspend() {
|
||||
status_guard.set_runnable();
|
||||
let mut status = current_thread.status().lock();
|
||||
if status.is_stopped() {
|
||||
status.set_running();
|
||||
} else {
|
||||
panic!("Try to continue a not suspended process.")
|
||||
}
|
||||
drop(status_guard);
|
||||
drop(status);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -95,7 +107,7 @@ pub fn handle_pending_signal(context: &mut CpuContext) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn handle_user_signal_handler(
|
||||
pub fn handle_user_signal(
|
||||
sig_num: SigNum,
|
||||
handler_addr: Vaddr,
|
||||
flags: SigActionFlags,
|
||||
@ -117,9 +129,10 @@ pub fn handle_user_signal_handler(
|
||||
let current_mask = SigMask::from(sig_num);
|
||||
mask.block(current_mask.as_u64());
|
||||
}
|
||||
let current = current!();
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
// block signals in sigmask when running signal handler
|
||||
current.sig_mask().lock().block(mask.as_u64());
|
||||
posix_thread.sig_mask().lock().block(mask.as_u64());
|
||||
|
||||
// set up signal stack in user stack. // avoid corrupt user stack, we minus 128 first.
|
||||
let mut user_rsp = context.gp_regs.rsp;
|
||||
@ -129,27 +142,35 @@ pub fn handle_user_signal_handler(
|
||||
user_rsp = user_rsp - mem::size_of::<siginfo_t>() as u64;
|
||||
write_val_to_user(user_rsp as _, &sig_info)?;
|
||||
let siginfo_addr = user_rsp;
|
||||
debug!("siginfo_addr = 0x{:x}", siginfo_addr);
|
||||
// debug!("siginfo_addr = 0x{:x}", siginfo_addr);
|
||||
|
||||
// 2. write ucontext_t.
|
||||
user_rsp = alloc_aligned_in_user_stack(user_rsp, mem::size_of::<ucontext_t>(), 16)?;
|
||||
let mut ucontext = ucontext_t::default();
|
||||
ucontext.uc_sigmask = mask.as_u64();
|
||||
ucontext.uc_mcontext.inner.gp_regs = context.gp_regs;
|
||||
let mut sig_context = posix_thread.sig_context().lock();
|
||||
if let Some(sig_context_addr) = *sig_context {
|
||||
ucontext.uc_link = sig_context_addr;
|
||||
} else {
|
||||
ucontext.uc_link = 0;
|
||||
}
|
||||
// TODO: store fp regs in ucontext
|
||||
write_val_to_user(user_rsp as _, &ucontext)?;
|
||||
let ucontext_addr = user_rsp;
|
||||
debug!("ucontext addr = 0x{:x}", ucontext_addr);
|
||||
// Store the ucontext addr in sig context of current process.
|
||||
current.sig_context().lock().push_back(ucontext_addr as _);
|
||||
*sig_context = Some(ucontext_addr as Vaddr);
|
||||
// current.sig_context().lock().push_back(ucontext_addr as _);
|
||||
|
||||
// 3. Set the address of the trampoline code.
|
||||
if flags.contains(SigActionFlags::SA_RESTORER) {
|
||||
// If contains SA_RESTORER flag, trampoline code is provided by libc in restorer_addr.
|
||||
// We just store restorer_addr on user stack to allow user code just to trampoline code.
|
||||
user_rsp = write_u64_to_user_stack(user_rsp, restorer_addr as u64)?;
|
||||
debug!("After set restorer addr: user_rsp = 0x{:x}", user_rsp);
|
||||
} else {
|
||||
// Otherwise we create
|
||||
// Otherwise we create a trampoline.
|
||||
// FIXME: This may cause problems if we read old_context from rsp.
|
||||
const TRAMPOLINE: &[u8] = &[
|
||||
0xb8, 0x0f, 0x00, 0x00, 0x00, // mov eax, 15(syscall number of rt_sigreturn)
|
||||
0x0f, 0x05, // syscall (call rt_sigreturn)
|
||||
|
@ -64,4 +64,14 @@ impl SigMask {
|
||||
fn num_to_idx(num: SigNum) -> usize {
|
||||
(num.as_u8() - MIN_STD_SIG_NUM) as usize
|
||||
}
|
||||
|
||||
pub fn remove_signal(&mut self, signum: SigNum) {
|
||||
let idx = Self::num_to_idx(signum);
|
||||
self.bits &= !(1_u64 << idx);
|
||||
}
|
||||
|
||||
pub fn add_signal(&mut self, signum: SigNum) {
|
||||
let idx = Self::num_to_idx(signum);
|
||||
self.bits |= 1_u64 << idx;
|
||||
}
|
||||
}
|
||||
|
@ -4,8 +4,6 @@
|
||||
pub enum ProcessStatus {
|
||||
/// Can be scheduled to run
|
||||
Runnable,
|
||||
/// Suspend until be woken by SIGCONT signal
|
||||
SuspendSignalable,
|
||||
/// Exit while not reaped by parent
|
||||
Zombie,
|
||||
}
|
||||
@ -18,20 +16,4 @@ impl ProcessStatus {
|
||||
pub fn is_zombie(&self) -> bool {
|
||||
*self == ProcessStatus::Zombie
|
||||
}
|
||||
|
||||
pub fn set_suspend(&mut self) {
|
||||
*self = ProcessStatus::SuspendSignalable;
|
||||
}
|
||||
|
||||
pub fn is_suspend(&self) -> bool {
|
||||
*self == ProcessStatus::SuspendSignalable
|
||||
}
|
||||
|
||||
pub fn set_runnable(&mut self) {
|
||||
*self = ProcessStatus::Runnable;
|
||||
}
|
||||
|
||||
pub fn is_runnable(&self) -> bool {
|
||||
*self == ProcessStatus::Runnable
|
||||
}
|
||||
}
|
||||
|
@ -1,88 +0,0 @@
|
||||
use core::sync::atomic::AtomicUsize;
|
||||
|
||||
use jinux_frame::{
|
||||
cpu::CpuContext,
|
||||
task::Task,
|
||||
user::{UserEvent, UserMode, UserSpace},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
process::{exception::handle_exception, signal::handle_pending_signal},
|
||||
rights::Full,
|
||||
vm::vmar::Vmar,
|
||||
};
|
||||
|
||||
use crate::syscall::handle_syscall;
|
||||
|
||||
use super::{elf::load_elf_to_root_vmar, Process};
|
||||
|
||||
static COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
pub fn create_user_task_from_elf(
|
||||
root_vmar: &Vmar<Full>,
|
||||
filename: CString,
|
||||
elf_file_content: &'static [u8],
|
||||
parent: Weak<Process>,
|
||||
argv: Vec<CString>,
|
||||
envp: Vec<CString>,
|
||||
) -> Arc<Task> {
|
||||
let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, &root_vmar, argv, envp)
|
||||
.expect("Load Elf failed");
|
||||
let vm_space = root_vmar.vm_space().clone();
|
||||
let mut cpu_ctx = CpuContext::default();
|
||||
// set entry point
|
||||
cpu_ctx.gp_regs.rip = elf_load_info.entry_point();
|
||||
// set user stack
|
||||
cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_top();
|
||||
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
|
||||
create_new_task(user_space, parent)
|
||||
}
|
||||
|
||||
/// create new task with userspace and parent process
|
||||
pub fn create_new_task(userspace: Arc<UserSpace>, parent: Weak<Process>) -> Arc<Task> {
|
||||
fn user_task_entry() {
|
||||
let cur = Task::current();
|
||||
let user_space = cur.user_space().expect("user task should have user space");
|
||||
let mut user_mode = UserMode::new(user_space);
|
||||
debug!("In user task entry:");
|
||||
debug!("[new task] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip);
|
||||
debug!("[new task] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp);
|
||||
debug!("[new task] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax);
|
||||
loop {
|
||||
let user_event = user_mode.execute();
|
||||
let context = user_mode.context_mut();
|
||||
// handle user event:
|
||||
handle_user_event(user_event, context);
|
||||
let current = current!();
|
||||
// should be do this comparison before handle signal?
|
||||
if current.status().lock().is_zombie() {
|
||||
break;
|
||||
}
|
||||
handle_pending_signal(context).unwrap();
|
||||
if current.status().lock().is_zombie() {
|
||||
debug!("exit due to signal");
|
||||
break;
|
||||
}
|
||||
// If current is suspended, wait for a signal to wake up self
|
||||
while current.status().lock().is_suspend() {
|
||||
Process::yield_now();
|
||||
debug!("{} is suspended.", current.pid());
|
||||
handle_pending_signal(context).unwrap();
|
||||
}
|
||||
}
|
||||
debug!("exit user loop");
|
||||
// FIXME: This is a work around: exit in kernel task entry may be not called. Why this will happen?
|
||||
Task::current().exit();
|
||||
}
|
||||
|
||||
Task::new(user_task_entry, parent, Some(userspace)).expect("spawn task failed")
|
||||
}
|
||||
|
||||
fn handle_user_event(user_event: UserEvent, context: &mut CpuContext) {
|
||||
match user_event {
|
||||
UserEvent::Syscall => handle_syscall(context),
|
||||
UserEvent::Fault => todo!(),
|
||||
UserEvent::Exception => handle_exception(context),
|
||||
}
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
use super::{process_filter::ProcessFilter, ExitCode, Pid};
|
||||
@ -51,7 +53,7 @@ pub fn wait_child_exit(
|
||||
|
||||
if let Some(zombie_child) = zombie_child {
|
||||
let zombie_pid = zombie_child.pid();
|
||||
let exit_code = zombie_child.exit_code();
|
||||
let exit_code = zombie_child.exit_code().load(Ordering::SeqCst);
|
||||
if wait_options.contains(WaitOptions::WNOWAIT) {
|
||||
// does not reap child, directly return
|
||||
return Some(Ok((zombie_pid, exit_code)));
|
||||
|
44
src/services/libs/jinux-std/src/syscall/clock_nanosleep.rs
Normal file
44
src/services/libs/jinux-std/src/syscall/clock_nanosleep.rs
Normal file
@ -0,0 +1,44 @@
|
||||
use core::time::Duration;
|
||||
|
||||
use super::SyscallReturn;
|
||||
use super::SYS_CLOCK_NANOSLEEP;
|
||||
use crate::{
|
||||
log_syscall_entry,
|
||||
prelude::*,
|
||||
thread::Thread,
|
||||
time::{clockid_t, timespec_t, ClockID, TIMER_ABSTIME},
|
||||
util::{read_val_from_user, write_val_to_user},
|
||||
};
|
||||
|
||||
pub fn sys_clock_nanosleep(
|
||||
clockid: clockid_t,
|
||||
flags: i32,
|
||||
request_timespec_addr: Vaddr,
|
||||
remain_timespec_addr: Vaddr,
|
||||
) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_CLOCK_NANOSLEEP);
|
||||
let clock_id = ClockID::try_from(clockid)?;
|
||||
let abs_time = if flags == 0 {
|
||||
false
|
||||
} else if flags == TIMER_ABSTIME {
|
||||
true
|
||||
} else {
|
||||
unreachable!()
|
||||
};
|
||||
let request_timespec = read_val_from_user::<timespec_t>(request_timespec_addr)?;
|
||||
|
||||
debug!(
|
||||
"clockid = {:?}, abs_time = {}, request_timespec = {:?}, remain timespec addr = 0x{:x}",
|
||||
clock_id, abs_time, request_timespec, remain_timespec_addr
|
||||
);
|
||||
// FIXME: do real sleep. Here we simply yield the execution of current thread since we does not have timeout support now.
|
||||
// If the sleep is interrupted by a signal, this syscall should return error.
|
||||
Thread::yield_now();
|
||||
if remain_timespec_addr != 0 {
|
||||
let remain_duration = Duration::new(0, 0);
|
||||
let remain_timespec = timespec_t::from(remain_duration);
|
||||
write_val_to_user(remain_timespec_addr, &remain_timespec)?;
|
||||
}
|
||||
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
@ -10,22 +10,16 @@ use super::SyscallReturn;
|
||||
// This order we use here is the order for x86_64. See https://man7.org/linux/man-pages/man2/clone.2.html.
|
||||
pub fn sys_clone(
|
||||
clone_flags: u64,
|
||||
new_sp: Vaddr,
|
||||
new_sp: u64,
|
||||
parent_tidptr: Vaddr,
|
||||
child_tidptr: Vaddr,
|
||||
tls: usize,
|
||||
tls: u64,
|
||||
parent_context: CpuContext,
|
||||
) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_CLONE);
|
||||
let clone_flags = CloneFlags::from(clone_flags);
|
||||
debug!("flags = {:?}, child_stack_ptr = 0x{:x}, parent_tid_ptr = 0x{:x}, child tid ptr = 0x{:x}, tls = 0x{:x}", clone_flags, new_sp, parent_tidptr, child_tidptr, tls);
|
||||
let clone_args = CloneArgs::new(new_sp, parent_tidptr, child_tidptr, tls, clone_flags);
|
||||
let child_process = clone_child(parent_context, clone_args).unwrap();
|
||||
|
||||
let child_pid = child_process.pid();
|
||||
let pid = current!().pid();
|
||||
debug!("*********schedule child process, pid = {}**********", pid);
|
||||
child_process.send_to_scheduler();
|
||||
debug!("*********return to parent process, pid = {}*********", pid);
|
||||
let child_pid = clone_child(parent_context, clone_args).unwrap();
|
||||
Ok(SyscallReturn::Return(child_pid as _))
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ use jinux_frame::cpu::CpuContext;
|
||||
use super::{constants::*, SyscallReturn};
|
||||
use crate::log_syscall_entry;
|
||||
use crate::process::elf::load_elf_to_root_vmar;
|
||||
use crate::process::posix_thread::name::ThreadName;
|
||||
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use crate::util::{read_cstring_from_user, read_val_from_user};
|
||||
use crate::{prelude::*, syscall::SYS_EXECVE};
|
||||
|
||||
@ -13,31 +15,35 @@ pub fn sys_execve(
|
||||
context: &mut CpuContext,
|
||||
) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_EXECVE);
|
||||
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
|
||||
let elf_path = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
|
||||
let argv = read_cstring_vec(argv_ptr_ptr, MAX_ARGV_NUMBER, MAX_ARG_LEN)?;
|
||||
let envp = read_cstring_vec(envp_ptr_ptr, MAX_ENVP_NUMBER, MAX_ENV_LEN)?;
|
||||
debug!(
|
||||
"filename: {:?}, argv = {:?}, envp = {:?}",
|
||||
filename, argv, envp
|
||||
elf_path, argv, envp
|
||||
);
|
||||
if filename != CString::new("./hello").unwrap() {
|
||||
if elf_path != CString::new("./hello").unwrap() {
|
||||
panic!("Unknown filename.");
|
||||
}
|
||||
// FIXME: should we set thread name in execve?
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
let mut thread_name = posix_thread.thread_name().lock();
|
||||
let new_thread_name = ThreadName::new_from_elf_path(&elf_path)?;
|
||||
*thread_name = Some(new_thread_name);
|
||||
|
||||
let elf_file_content = crate::user_apps::read_execve_hello_content();
|
||||
let current = current!();
|
||||
// destroy root vmars
|
||||
let root_vmar = current
|
||||
.root_vmar()
|
||||
.expect("[Internal Error] User process should have vm space");
|
||||
let root_vmar = current.root_vmar();
|
||||
root_vmar.clear()?;
|
||||
let user_vm = current
|
||||
.user_vm()
|
||||
.expect("[Internal Error] User process should have user vm");
|
||||
user_vm.set_default();
|
||||
// load elf content to new vm space
|
||||
let elf_load_info = load_elf_to_root_vmar(filename, elf_file_content, root_vmar, argv, envp)
|
||||
.expect("load elf failed");
|
||||
let elf_load_info =
|
||||
load_elf_to_root_vmar(elf_file_content, root_vmar, argv, envp).expect("load elf failed");
|
||||
debug!("load elf in execve succeeds");
|
||||
// set signal disposition to default
|
||||
current.sig_dispositions().lock().inherit();
|
||||
|
@ -1,9 +1,19 @@
|
||||
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
|
||||
use crate::syscall::{SyscallReturn, SYS_EXIT};
|
||||
|
||||
pub fn sys_exit(exit_code: i32) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_EXIT);
|
||||
current!().exit(exit_code);
|
||||
debug!("exid code = {}", exit_code);
|
||||
let current_thread = current_thread!();
|
||||
let tid = current_thread.tid();
|
||||
let current = current!();
|
||||
let pid = current.pid();
|
||||
debug!("tid = {}, pid = {}", tid, pid);
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
current_thread.exit();
|
||||
posix_thread.exit(tid, exit_code)?;
|
||||
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ use crate::syscall::{SyscallReturn, SYS_EXIT_GROUP};
|
||||
/// Exit all thread in a process.
|
||||
pub fn sys_exit_group(exit_code: u64) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_EXIT_GROUP);
|
||||
current!().exit(exit_code as _);
|
||||
// Exit all thread in current process
|
||||
current!().exit_group(exit_code as _);
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
||||
|
@ -5,25 +5,15 @@ use crate::{
|
||||
};
|
||||
use jinux_frame::cpu::CpuContext;
|
||||
|
||||
use crate::{process::Process, syscall::SYS_FORK};
|
||||
use crate::syscall::SYS_FORK;
|
||||
|
||||
use super::SyscallReturn;
|
||||
|
||||
pub fn sys_fork(parent_context: CpuContext) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_FORK);
|
||||
let child_process = fork(parent_context);
|
||||
Ok(SyscallReturn::Return(child_process.pid() as _))
|
||||
}
|
||||
|
||||
/// Fork a child process
|
||||
fn fork(parent_context: CpuContext) -> Arc<Process> {
|
||||
let current = current!();
|
||||
// FIXME: set correct args for fork
|
||||
let clone_args = CloneArgs::default();
|
||||
let child = clone_child(parent_context, clone_args).unwrap();
|
||||
let pid = current.pid();
|
||||
debug!("*********schedule child process, pid = {}**********", pid);
|
||||
child.send_to_scheduler();
|
||||
debug!("*********return to parent process, pid = {}*********", pid);
|
||||
child
|
||||
let child_pid = clone_child(parent_context, clone_args).unwrap();
|
||||
Ok(SyscallReturn::Return(child_pid as _))
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ pub fn sys_fstat(fd: u64, stat_buf_ptr: Vaddr) -> Result<SyscallReturn> {
|
||||
debug!("fd = {}, stat_buf_addr = 0x{:x}", fd, stat_buf_ptr);
|
||||
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
if fd == 1 {
|
||||
let stat = Stat::stdout_stat();
|
||||
root_vmar.write_val(stat_buf_ptr, &stat)?;
|
||||
|
@ -1,24 +1,16 @@
|
||||
use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use crate::process::{Pid, Process};
|
||||
use crate::process::posix_thread::futex::{
|
||||
futex_op_and_flags_from_u32, futex_requeue, futex_wait, futex_wait_bitset, futex_wake,
|
||||
futex_wake_bitset, FutexOp, FutexTimeout,
|
||||
};
|
||||
use crate::syscall::SyscallReturn;
|
||||
use crate::syscall::SYS_FUTEX;
|
||||
use crate::util::read_val_from_user;
|
||||
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
use jinux_frame::cpu::num_cpus;
|
||||
|
||||
type FutexBitSet = u32;
|
||||
type FutexBucketRef = Arc<Mutex<FutexBucket>>;
|
||||
|
||||
const FUTEX_OP_MASK: u32 = 0x0000_000F;
|
||||
const FUTEX_FLAGS_MASK: u32 = 0xFFFF_FFF0;
|
||||
const FUTEX_BITSET_MATCH_ANY: FutexBitSet = 0xFFFF_FFFF;
|
||||
|
||||
pub fn sys_futex(
|
||||
futex_addr: u64,
|
||||
futex_op: u64,
|
||||
futex_val: u64,
|
||||
futex_addr: Vaddr,
|
||||
futex_op: i32,
|
||||
futex_val: u32,
|
||||
utime_addr: u64,
|
||||
futex_new_addr: u64,
|
||||
bitset: u64,
|
||||
@ -26,6 +18,10 @@ pub fn sys_futex(
|
||||
log_syscall_entry!(SYS_FUTEX);
|
||||
// FIXME: we current ignore futex flags
|
||||
let (futex_op, futex_flags) = futex_op_and_flags_from_u32(futex_op as _).unwrap();
|
||||
debug!(
|
||||
"futex_op = {:?}, futex_flags = {:?}, futex_addr = 0x{:x}",
|
||||
futex_op, futex_flags, futex_addr
|
||||
);
|
||||
|
||||
let get_futex_val = |val: i32| -> Result<usize> {
|
||||
if val < 0 {
|
||||
@ -74,406 +70,6 @@ pub fn sys_futex(
|
||||
}
|
||||
.unwrap();
|
||||
|
||||
debug!("futex returns, tid= {} ", current_thread!().tid());
|
||||
Ok(SyscallReturn::Return(res as _))
|
||||
}
|
||||
|
||||
/// do futex wait
|
||||
pub fn futex_wait(futex_addr: u64, futex_val: i32, timeout: &Option<FutexTimeout>) -> Result<()> {
|
||||
futex_wait_bitset(futex_addr as _, futex_val, timeout, FUTEX_BITSET_MATCH_ANY)
|
||||
}
|
||||
|
||||
/// do futex wait bitset
|
||||
pub fn futex_wait_bitset(
|
||||
futex_addr: Vaddr,
|
||||
futex_val: i32,
|
||||
timeout: &Option<FutexTimeout>,
|
||||
bitset: FutexBitSet,
|
||||
) -> Result<()> {
|
||||
debug!(
|
||||
"futex_wait_bitset addr: {:#x}, val: {}, timeout: {:?}, bitset: {:#x}",
|
||||
futex_addr, futex_val, timeout, bitset
|
||||
);
|
||||
let futex_key = FutexKey::new(futex_addr);
|
||||
let (_, futex_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_key);
|
||||
|
||||
// lock futex bucket ref here to avoid data race
|
||||
let mut futex_bucket = futex_bucket_ref.lock();
|
||||
|
||||
if futex_key.load_val() != futex_val {
|
||||
return_errno_with_message!(Errno::EINVAL, "futex value does not match");
|
||||
}
|
||||
let futex_item = FutexItem::new(futex_key, bitset);
|
||||
futex_bucket.enqueue_item(futex_item);
|
||||
|
||||
// drop lock
|
||||
drop(futex_bucket);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// do futex wake
|
||||
pub fn futex_wake(futex_addr: Vaddr, max_count: usize) -> Result<usize> {
|
||||
futex_wake_bitset(futex_addr, max_count, FUTEX_BITSET_MATCH_ANY)
|
||||
}
|
||||
|
||||
/// Do futex wake with bitset
|
||||
pub fn futex_wake_bitset(
|
||||
futex_addr: Vaddr,
|
||||
max_count: usize,
|
||||
bitset: FutexBitSet,
|
||||
) -> Result<usize> {
|
||||
debug!(
|
||||
"futex_wake_bitset addr: {:#x}, max_count: {}, bitset: {:#x}",
|
||||
futex_addr as usize, max_count, bitset
|
||||
);
|
||||
|
||||
let futex_key = FutexKey::new(futex_addr);
|
||||
let (_, futex_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_key);
|
||||
let mut futex_bucket = futex_bucket_ref.lock();
|
||||
let res = futex_bucket.dequeue_and_wake_items(futex_key, max_count, bitset);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Do futex requeue
|
||||
pub fn futex_requeue(
|
||||
futex_addr: Vaddr,
|
||||
max_nwakes: usize,
|
||||
max_nrequeues: usize,
|
||||
futex_new_addr: Vaddr,
|
||||
) -> Result<usize> {
|
||||
if futex_new_addr == futex_addr {
|
||||
return futex_wake(futex_addr, max_nwakes);
|
||||
}
|
||||
|
||||
let futex_key = FutexKey::new(futex_addr);
|
||||
let futex_new_key = FutexKey::new(futex_new_addr);
|
||||
let (bucket_idx, futex_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_key);
|
||||
let (new_bucket_idx, futex_new_bucket_ref) = FUTEX_BUCKETS.get_bucket(futex_new_key);
|
||||
|
||||
let nwakes = {
|
||||
if bucket_idx == new_bucket_idx {
|
||||
let mut futex_bucket = futex_bucket_ref.lock();
|
||||
let nwakes =
|
||||
futex_bucket.dequeue_and_wake_items(futex_key, max_nwakes, FUTEX_BITSET_MATCH_ANY);
|
||||
futex_bucket.update_item_keys(futex_key, futex_new_key, max_nrequeues);
|
||||
drop(futex_bucket);
|
||||
nwakes
|
||||
} else {
|
||||
let (mut futex_bucket, mut futex_new_bucket) = {
|
||||
if bucket_idx < new_bucket_idx {
|
||||
let futex_bucket = futex_bucket_ref.lock();
|
||||
let futext_new_bucket = futex_new_bucket_ref.lock();
|
||||
(futex_bucket, futext_new_bucket)
|
||||
} else {
|
||||
// bucket_idx > new_bucket_idx
|
||||
let futex_new_bucket = futex_new_bucket_ref.lock();
|
||||
let futex_bucket = futex_bucket_ref.lock();
|
||||
(futex_bucket, futex_new_bucket)
|
||||
}
|
||||
};
|
||||
|
||||
let nwakes =
|
||||
futex_bucket.dequeue_and_wake_items(futex_key, max_nwakes, FUTEX_BITSET_MATCH_ANY);
|
||||
futex_bucket.requeue_items_to_another_bucket(
|
||||
futex_key,
|
||||
&mut futex_new_bucket,
|
||||
futex_new_key,
|
||||
max_nrequeues,
|
||||
);
|
||||
nwakes
|
||||
}
|
||||
};
|
||||
Ok(nwakes)
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
// Use the same count as linux kernel to keep the same performance
|
||||
static ref BUCKET_COUNT: usize = ((1<<8)* num_cpus()).next_power_of_two() as _;
|
||||
static ref BUCKET_MASK: usize = *BUCKET_COUNT - 1;
|
||||
static ref FUTEX_BUCKETS: FutexBucketVec = FutexBucketVec::new(*BUCKET_COUNT);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FutexTimeout {}
|
||||
|
||||
impl FutexTimeout {
|
||||
pub fn new() -> Self {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
struct FutexBucketVec {
|
||||
vec: Vec<FutexBucketRef>,
|
||||
}
|
||||
|
||||
impl FutexBucketVec {
|
||||
pub fn new(size: usize) -> FutexBucketVec {
|
||||
let mut buckets = FutexBucketVec {
|
||||
vec: Vec::with_capacity(size),
|
||||
};
|
||||
for _ in 0..size {
|
||||
let bucket = Arc::new(Mutex::new(FutexBucket::new()));
|
||||
buckets.vec.push(bucket);
|
||||
}
|
||||
buckets
|
||||
}
|
||||
|
||||
pub fn get_bucket(&self, key: FutexKey) -> (usize, FutexBucketRef) {
|
||||
let index = *BUCKET_MASK & {
|
||||
// The addr is the multiples of 4, so we ignore the last 2 bits
|
||||
let addr = key.addr() >> 2;
|
||||
// simple hash
|
||||
addr / self.size()
|
||||
};
|
||||
(index, self.vec[index].clone())
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.vec.len()
|
||||
}
|
||||
}
|
||||
|
||||
struct FutexBucket {
|
||||
queue: VecDeque<FutexItem>,
|
||||
}
|
||||
|
||||
impl FutexBucket {
|
||||
pub fn new() -> FutexBucket {
|
||||
FutexBucket {
|
||||
queue: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enqueue_item(&mut self, item: FutexItem) {
|
||||
self.queue.push_back(item);
|
||||
}
|
||||
|
||||
pub fn dequeue_item(&mut self, item: &FutexItem) {
|
||||
let item_i = self
|
||||
.queue
|
||||
.iter()
|
||||
.position(|futex_item| *futex_item == *item);
|
||||
if let Some(item_i) = item_i {
|
||||
self.queue.remove(item_i).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dequeue_and_wake_items(
|
||||
&mut self,
|
||||
key: FutexKey,
|
||||
max_count: usize,
|
||||
bitset: FutexBitSet,
|
||||
) -> usize {
|
||||
let mut count = 0;
|
||||
let mut items_to_wake = Vec::new();
|
||||
|
||||
self.queue.retain(|item| {
|
||||
if count >= max_count || key != item.key || (bitset & item.bitset) == 0 {
|
||||
true
|
||||
} else {
|
||||
items_to_wake.push(item.clone());
|
||||
count += 1;
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
FutexItem::batch_wake(&items_to_wake);
|
||||
count
|
||||
}
|
||||
|
||||
pub fn update_item_keys(&mut self, key: FutexKey, new_key: FutexKey, max_count: usize) {
|
||||
let mut count = 0;
|
||||
for item in self.queue.iter_mut() {
|
||||
if count == max_count {
|
||||
break;
|
||||
}
|
||||
if (*item).key == key {
|
||||
(*item).key = new_key;
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn requeue_items_to_another_bucket(
|
||||
&mut self,
|
||||
key: FutexKey,
|
||||
another: &mut Self,
|
||||
new_key: FutexKey,
|
||||
max_nrequeues: usize,
|
||||
) {
|
||||
let mut count = 0;
|
||||
|
||||
self.queue.retain(|item| {
|
||||
if count >= max_nrequeues || key != item.key {
|
||||
true
|
||||
} else {
|
||||
let mut new_item = item.clone();
|
||||
new_item.key = new_key;
|
||||
another.enqueue_item(new_item);
|
||||
count += 1;
|
||||
false
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
struct FutexItem {
|
||||
key: FutexKey,
|
||||
bitset: FutexBitSet,
|
||||
waiter: FutexWaiterRef,
|
||||
}
|
||||
|
||||
impl FutexItem {
|
||||
pub fn new(key: FutexKey, bitset: FutexBitSet) -> Self {
|
||||
FutexItem {
|
||||
key,
|
||||
bitset,
|
||||
waiter: Arc::new(FutexWaiter::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wake(&self) {
|
||||
self.waiter.wake();
|
||||
}
|
||||
|
||||
pub fn wait(&self) {
|
||||
self.waiter.wait();
|
||||
}
|
||||
|
||||
pub fn waiter(&self) -> &FutexWaiterRef {
|
||||
&self.waiter
|
||||
}
|
||||
|
||||
pub fn batch_wake(items: &[FutexItem]) {
|
||||
let waiters = items.iter().map(|item| item.waiter()).collect::<Vec<_>>();
|
||||
FutexWaiter::batch_wake(&waiters);
|
||||
}
|
||||
}
|
||||
|
||||
// The addr of a futex, it should be used to mark different futex word
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
struct FutexKey(Vaddr);
|
||||
|
||||
impl FutexKey {
|
||||
pub fn new(futex_addr: Vaddr) -> Self {
|
||||
FutexKey(futex_addr as _)
|
||||
}
|
||||
|
||||
pub fn load_val(&self) -> i32 {
|
||||
// FIXME: how to implement a atomic load?
|
||||
warn!("implement an atomic load");
|
||||
read_val_from_user(self.0).unwrap()
|
||||
}
|
||||
|
||||
pub fn addr(&self) -> Vaddr {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
// The implementation is from occlum
|
||||
|
||||
#[derive(PartialEq)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum FutexOp {
|
||||
FUTEX_WAIT = 0,
|
||||
FUTEX_WAKE = 1,
|
||||
FUTEX_FD = 2,
|
||||
FUTEX_REQUEUE = 3,
|
||||
FUTEX_CMP_REQUEUE = 4,
|
||||
FUTEX_WAKE_OP = 5,
|
||||
FUTEX_LOCK_PI = 6,
|
||||
FUTEX_UNLOCK_PI = 7,
|
||||
FUTEX_TRYLOCK_PI = 8,
|
||||
FUTEX_WAIT_BITSET = 9,
|
||||
FUTEX_WAKE_BITSET = 10,
|
||||
}
|
||||
|
||||
impl FutexOp {
|
||||
pub fn from_u32(bits: u32) -> Result<FutexOp> {
|
||||
match bits {
|
||||
0 => Ok(FutexOp::FUTEX_WAIT),
|
||||
1 => Ok(FutexOp::FUTEX_WAKE),
|
||||
2 => Ok(FutexOp::FUTEX_FD),
|
||||
3 => Ok(FutexOp::FUTEX_REQUEUE),
|
||||
4 => Ok(FutexOp::FUTEX_CMP_REQUEUE),
|
||||
5 => Ok(FutexOp::FUTEX_WAKE_OP),
|
||||
6 => Ok(FutexOp::FUTEX_LOCK_PI),
|
||||
7 => Ok(FutexOp::FUTEX_UNLOCK_PI),
|
||||
8 => Ok(FutexOp::FUTEX_TRYLOCK_PI),
|
||||
9 => Ok(FutexOp::FUTEX_WAIT_BITSET),
|
||||
10 => Ok(FutexOp::FUTEX_WAKE_BITSET),
|
||||
_ => return_errno_with_message!(Errno::EINVAL, "Unknown futex op"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
pub struct FutexFlags : u32 {
|
||||
const FUTEX_PRIVATE = 128;
|
||||
const FUTEX_CLOCK_REALTIME = 256;
|
||||
}
|
||||
}
|
||||
|
||||
impl FutexFlags {
|
||||
pub fn from_u32(bits: u32) -> Result<FutexFlags> {
|
||||
FutexFlags::from_bits(bits)
|
||||
.ok_or_else(|| Error::with_message(Errno::EINVAL, "unknown futex flags"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn futex_op_and_flags_from_u32(bits: u32) -> Result<(FutexOp, FutexFlags)> {
|
||||
let op = {
|
||||
let op_bits = bits & FUTEX_OP_MASK;
|
||||
FutexOp::from_u32(op_bits)?
|
||||
};
|
||||
let flags = {
|
||||
let flags_bits = bits & FUTEX_FLAGS_MASK;
|
||||
FutexFlags::from_u32(flags_bits)?
|
||||
};
|
||||
Ok((op, flags))
|
||||
}
|
||||
|
||||
type FutexWaiterRef = Arc<FutexWaiter>;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FutexWaiter {
|
||||
is_woken: AtomicBool,
|
||||
pid: Pid,
|
||||
}
|
||||
|
||||
impl PartialEq for FutexWaiter {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.pid == other.pid
|
||||
}
|
||||
}
|
||||
|
||||
impl FutexWaiter {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
is_woken: AtomicBool::new(false),
|
||||
pid: current!().pid(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wait(&self) {
|
||||
self.is_woken.store(false, Ordering::SeqCst);
|
||||
while !self.is_woken() {
|
||||
Process::yield_now();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wake(&self) {
|
||||
self.is_woken.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
pub fn is_woken(&self) -> bool {
|
||||
self.is_woken.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
pub fn batch_wake(waiters: &[&FutexWaiterRef]) {
|
||||
waiters.iter().for_each(|waiter| {
|
||||
waiter.wake();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ use super::SyscallReturn;
|
||||
|
||||
pub fn sys_gettid() -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_GETTID);
|
||||
// For single-thread process, tid is equal to pid
|
||||
let tid = current!().pid();
|
||||
let current_thread = current_thread!();
|
||||
let tid = current_thread.tid();
|
||||
Ok(SyscallReturn::Return(tid as _))
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
|
||||
use crate::process::signal::signals::user::{UserSignal, UserSignalKind};
|
||||
use crate::process::{table, Process};
|
||||
use crate::process::{process_table, Process};
|
||||
use crate::{
|
||||
process::{process_filter::ProcessFilter, signal::sig_num::SigNum},
|
||||
syscall::SYS_KILL,
|
||||
@ -42,12 +42,12 @@ pub fn do_sys_kill(process_filter: ProcessFilter, sig_num: SigNum) -> Result<()>
|
||||
fn get_processes(filter: &ProcessFilter) -> Result<Vec<Arc<Process>>> {
|
||||
let processes = match filter {
|
||||
ProcessFilter::Any => {
|
||||
let mut processes = table::get_all_processes();
|
||||
let mut processes = process_table::get_all_processes();
|
||||
processes.retain(|process| process.pid() != 0);
|
||||
processes
|
||||
}
|
||||
ProcessFilter::WithPid(pid) => {
|
||||
let process = table::pid_to_process(*pid);
|
||||
let process = process_table::pid_to_process(*pid);
|
||||
match process {
|
||||
None => {
|
||||
return_errno_with_message!(Errno::ESRCH, "No such process in process table")
|
||||
|
95
src/services/libs/jinux-std/src/syscall/madvise.rs
Normal file
95
src/services/libs/jinux-std/src/syscall/madvise.rs
Normal file
@ -0,0 +1,95 @@
|
||||
use crate::util::read_bytes_from_user;
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
|
||||
use super::SyscallReturn;
|
||||
use super::SYS_MADVISE;
|
||||
|
||||
pub fn sys_madvise(start: Vaddr, len: usize, behavior: i32) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_MADVISE);
|
||||
let behavior = MadviseBehavior::try_from(behavior)?;
|
||||
debug!(
|
||||
"start = 0x{:x}, len = 0x{:x}, behavior = {:?}",
|
||||
start, len, behavior
|
||||
);
|
||||
match behavior {
|
||||
MadviseBehavior::MADV_NORMAL
|
||||
| MadviseBehavior::MADV_SEQUENTIAL
|
||||
| MadviseBehavior::MADV_WILLNEED => {
|
||||
// perform a read at first
|
||||
let mut buffer = vec![0u8; len];
|
||||
read_bytes_from_user(start, &mut buffer)?;
|
||||
}
|
||||
MadviseBehavior::MADV_DONTNEED => madv_dontneed(start, len)?,
|
||||
_ => todo!(),
|
||||
}
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
||||
|
||||
fn madv_dontneed(start: Vaddr, len: usize) -> Result<()> {
|
||||
debug_assert!(start % PAGE_SIZE == 0);
|
||||
debug_assert!(len % PAGE_SIZE == 0);
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar();
|
||||
let vm_mapping = root_vmar.get_vm_mapping(start)?;
|
||||
// ensure the range is totally in the mapping
|
||||
debug_assert!(vm_mapping.map_to_addr() <= start);
|
||||
debug_assert!(start + len <= vm_mapping.map_to_addr() + vm_mapping.size());
|
||||
vm_mapping.unmap_and_decommit(start..(start + len))
|
||||
}
|
||||
|
||||
#[repr(i32)]
|
||||
#[derive(Debug, Clone, Copy, Pod)]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// This definition is the same from linux
|
||||
pub enum MadviseBehavior {
|
||||
MADV_NORMAL = 0, /* no further special treatment */
|
||||
MADV_RANDOM = 1, /* expect random page references */
|
||||
MADV_SEQUENTIAL = 2, /* expect sequential page references */
|
||||
MADV_WILLNEED = 3, /* will need these pages */
|
||||
MADV_DONTNEED = 4, /* don't need these pages */
|
||||
|
||||
/* common parameters: try to keep these consistent across architectures */
|
||||
MADV_FREE = 8, /* free pages only if memory pressure */
|
||||
MADV_REMOVE = 9, /* remove these pages & resources */
|
||||
MADV_DONTFORK = 10, /* don't inherit across fork */
|
||||
MADV_DOFORK = 11, /* do inherit across fork */
|
||||
MADV_HWPOISON = 100, /* poison a page for testing */
|
||||
MADV_SOFT_OFFLINE = 101, /* soft offline page for testing */
|
||||
|
||||
MADV_MERGEABLE = 12, /* KSM may merge identical pages */
|
||||
MADV_UNMERGEABLE = 13, /* KSM may not merge identical pages */
|
||||
|
||||
MADV_HUGEPAGE = 14, /* Worth backing with hugepages */
|
||||
MADV_NOHUGEPAGE = 15, /* Not worth backing with hugepages */
|
||||
|
||||
MADV_DONTDUMP = 16, /* Explicity exclude from the core dump,
|
||||
overrides the coredump filter bits */
|
||||
MADV_DODUMP = 17, /* Clear the MADV_DONTDUMP flag */
|
||||
|
||||
MADV_WIPEONFORK = 18, /* Zero memory on fork, child only */
|
||||
MADV_KEEPONFORK = 19, /* Undo MADV_WIPEONFORK */
|
||||
|
||||
MADV_COLD = 20, /* deactivate these pages */
|
||||
MADV_PAGEOUT = 21, /* reclaim these pages */
|
||||
|
||||
MADV_POPULATE_READ = 22, /* populate (prefault) page tables readable */
|
||||
MADV_POPULATE_WRITE = 23, /* populate (prefault) page tables writable */
|
||||
|
||||
MADV_DONTNEED_LOCKED = 24, /* like DONTNEED, but drop locked pages too */
|
||||
}
|
||||
|
||||
impl TryFrom<i32> for MadviseBehavior {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: i32) -> Result<Self> {
|
||||
let behavior = match value {
|
||||
0 => MadviseBehavior::MADV_NORMAL,
|
||||
1 => MadviseBehavior::MADV_RANDOM,
|
||||
2 => MadviseBehavior::MADV_SEQUENTIAL,
|
||||
3 => MadviseBehavior::MADV_WILLNEED,
|
||||
4 => MadviseBehavior::MADV_DONTNEED,
|
||||
_ => return_errno_with_message!(Errno::EINVAL, "invalid madvise behavior"),
|
||||
};
|
||||
Ok(behavior)
|
||||
}
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
//! This mod defines mmap flags and the handler to syscall mmap
|
||||
|
||||
use crate::fs::file::FileDescripter;
|
||||
use crate::process::process_vm::mmap_flags::MMapFlags;
|
||||
use crate::rights::Rights;
|
||||
use crate::vm::perms::VmPerms;
|
||||
@ -27,7 +28,7 @@ pub fn sys_mmap(
|
||||
len as usize,
|
||||
perms,
|
||||
flags,
|
||||
fd as usize,
|
||||
fd as _,
|
||||
offset as usize,
|
||||
)?;
|
||||
Ok(SyscallReturn::Return(res as _))
|
||||
@ -38,10 +39,10 @@ pub fn do_sys_mmap(
|
||||
len: usize,
|
||||
vm_perm: VmPerm,
|
||||
flags: MMapFlags,
|
||||
fd: usize,
|
||||
fd: FileDescripter,
|
||||
offset: usize,
|
||||
) -> Result<Vaddr> {
|
||||
info!(
|
||||
debug!(
|
||||
"addr = 0x{:x}, len = 0x{:x}, perms = {:?}, flags = {:?}, fd = {}, offset = 0x{:x}",
|
||||
addr, len, vm_perm, flags, fd, offset
|
||||
);
|
||||
@ -76,7 +77,7 @@ pub fn mmap_anonymous_vmo(
|
||||
let vmo_options: VmoOptions<Rights> = VmoOptions::new(len);
|
||||
let vmo = vmo_options.alloc()?;
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
let perms = VmPerms::from(vm_perm);
|
||||
let mut vmar_map_options = root_vmar.new_map(vmo, perms)?;
|
||||
if flags.contains(MMapFlags::MAP_FIXED) {
|
||||
|
@ -4,6 +4,7 @@ use crate::prelude::*;
|
||||
use crate::syscall::access::sys_access;
|
||||
use crate::syscall::arch_prctl::sys_arch_prctl;
|
||||
use crate::syscall::brk::sys_brk;
|
||||
use crate::syscall::clock_nanosleep::sys_clock_nanosleep;
|
||||
use crate::syscall::clone::sys_clone;
|
||||
use crate::syscall::close::sys_close;
|
||||
use crate::syscall::execve::sys_execve;
|
||||
@ -26,19 +27,24 @@ use crate::syscall::ioctl::sys_ioctl;
|
||||
use crate::syscall::kill::sys_kill;
|
||||
use crate::syscall::lseek::sys_lseek;
|
||||
use crate::syscall::lstat::sys_lstat;
|
||||
use crate::syscall::madvise::sys_madvise;
|
||||
use crate::syscall::mmap::sys_mmap;
|
||||
use crate::syscall::mprotect::sys_mprotect;
|
||||
use crate::syscall::munmap::sys_munmap;
|
||||
use crate::syscall::openat::sys_openat;
|
||||
use crate::syscall::poll::sys_poll;
|
||||
use crate::syscall::prctl::sys_prctl;
|
||||
use crate::syscall::prlimit64::sys_prlimit64;
|
||||
use crate::syscall::read::sys_read;
|
||||
use crate::syscall::readlink::sys_readlink;
|
||||
use crate::syscall::rt_sigaction::sys_rt_sigaction;
|
||||
use crate::syscall::rt_sigprocmask::sys_rt_sigprocmask;
|
||||
use crate::syscall::rt_sigreturn::sys_rt_sigreturn;
|
||||
use crate::syscall::sched_yield::sys_sched_yield;
|
||||
use crate::syscall::set_robust_list::sys_set_robust_list;
|
||||
use crate::syscall::set_tid_address::sys_set_tid_address;
|
||||
use crate::syscall::setpgid::sys_setpgid;
|
||||
use crate::syscall::stat::sys_stat;
|
||||
use crate::syscall::tgkill::sys_tgkill;
|
||||
use crate::syscall::uname::sys_uname;
|
||||
use crate::syscall::wait4::sys_wait4;
|
||||
@ -50,6 +56,7 @@ use jinux_frame::cpu::CpuContext;
|
||||
mod access;
|
||||
mod arch_prctl;
|
||||
mod brk;
|
||||
mod clock_nanosleep;
|
||||
mod clone;
|
||||
mod close;
|
||||
mod constants;
|
||||
@ -73,19 +80,24 @@ mod ioctl;
|
||||
mod kill;
|
||||
mod lseek;
|
||||
mod lstat;
|
||||
mod madvise;
|
||||
mod mmap;
|
||||
mod mprotect;
|
||||
mod munmap;
|
||||
mod openat;
|
||||
mod poll;
|
||||
mod prctl;
|
||||
mod prlimit64;
|
||||
mod read;
|
||||
mod readlink;
|
||||
mod rt_sigaction;
|
||||
mod rt_sigprocmask;
|
||||
mod rt_sigreturn;
|
||||
mod sched_yield;
|
||||
mod set_robust_list;
|
||||
mod set_tid_address;
|
||||
mod setpgid;
|
||||
mod stat;
|
||||
mod tgkill;
|
||||
mod uname;
|
||||
mod wait4;
|
||||
@ -127,6 +139,7 @@ define_syscall_nums!(
|
||||
SYS_READ = 0,
|
||||
SYS_WRITE = 1,
|
||||
SYS_CLOSE = 3,
|
||||
SYS_STAT = 4,
|
||||
SYS_FSTAT = 5,
|
||||
SYS_LSTAT = 6,
|
||||
SYS_POLL = 7,
|
||||
@ -142,6 +155,7 @@ define_syscall_nums!(
|
||||
SYS_WRITEV = 20,
|
||||
SYS_ACCESS = 21,
|
||||
SYS_SCHED_YIELD = 24,
|
||||
SYS_MADVISE = 28,
|
||||
SYS_GETPID = 39,
|
||||
SYS_CLONE = 56,
|
||||
SYS_FORK = 57,
|
||||
@ -164,10 +178,14 @@ define_syscall_nums!(
|
||||
SYS_ARCH_PRCTL = 158,
|
||||
SYS_GETTID = 186,
|
||||
SYS_FUTEX = 202,
|
||||
SYS_SET_TID_ADDRESS = 218,
|
||||
SYS_CLOCK_NANOSLEEP = 230,
|
||||
SYS_EXIT_GROUP = 231,
|
||||
SYS_TGKILL = 234,
|
||||
SYS_WAITID = 247,
|
||||
SYS_OPENAT = 257
|
||||
SYS_OPENAT = 257,
|
||||
SYS_SET_ROBUST_LIST = 273,
|
||||
SYS_PRLIMIT64 = 302
|
||||
);
|
||||
|
||||
pub struct SyscallArgument {
|
||||
@ -208,15 +226,14 @@ pub fn handle_syscall(context: &mut CpuContext) {
|
||||
|
||||
match syscall_return {
|
||||
Ok(return_value) => {
|
||||
debug!("syscall return: {:?}", return_value);
|
||||
if let SyscallReturn::Return(return_value) = return_value {
|
||||
context.gp_regs.rax = return_value as u64;
|
||||
context.set_rax(return_value as u64);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
debug!("syscall return error: {:?}", err);
|
||||
let errno = err.error() as i32;
|
||||
context.gp_regs.rax = (-errno) as u64
|
||||
context.set_rax((-errno) as u64)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -230,6 +247,7 @@ pub fn syscall_dispatch(
|
||||
SYS_READ => syscall_handler!(3, sys_read, args),
|
||||
SYS_WRITE => syscall_handler!(3, sys_write, args),
|
||||
SYS_CLOSE => syscall_handler!(1, sys_close, args),
|
||||
SYS_STAT => syscall_handler!(2, sys_stat, args),
|
||||
SYS_FSTAT => syscall_handler!(2, sys_fstat, args),
|
||||
SYS_LSTAT => syscall_handler!(2, sys_lstat, args),
|
||||
SYS_POLL => syscall_handler!(3, sys_poll, args),
|
||||
@ -245,6 +263,7 @@ pub fn syscall_dispatch(
|
||||
SYS_WRITEV => syscall_handler!(3, sys_writev, args),
|
||||
SYS_ACCESS => syscall_handler!(2, sys_access, args),
|
||||
SYS_SCHED_YIELD => syscall_handler!(0, sys_sched_yield),
|
||||
SYS_MADVISE => syscall_handler!(3, sys_madvise, args),
|
||||
SYS_GETPID => syscall_handler!(0, sys_getpid),
|
||||
SYS_CLONE => syscall_handler!(5, sys_clone, args, context.clone()),
|
||||
SYS_FORK => syscall_handler!(0, sys_fork, context.clone()),
|
||||
@ -267,11 +286,18 @@ pub fn syscall_dispatch(
|
||||
SYS_ARCH_PRCTL => syscall_handler!(2, sys_arch_prctl, args, context),
|
||||
SYS_GETTID => syscall_handler!(0, sys_gettid),
|
||||
SYS_FUTEX => syscall_handler!(6, sys_futex, args),
|
||||
SYS_SET_TID_ADDRESS => syscall_handler!(1, sys_set_tid_address, args),
|
||||
SYS_CLOCK_NANOSLEEP => syscall_handler!(4, sys_clock_nanosleep, args),
|
||||
SYS_EXIT_GROUP => syscall_handler!(1, sys_exit_group, args),
|
||||
SYS_TGKILL => syscall_handler!(3, sys_tgkill, args),
|
||||
SYS_WAITID => syscall_handler!(5, sys_waitid, args),
|
||||
SYS_OPENAT => syscall_handler!(4, sys_openat, args),
|
||||
_ => panic!("Unsupported syscall number: {}, args:{:x?}", syscall_number,args),
|
||||
SYS_SET_ROBUST_LIST => syscall_handler!(2, sys_set_robust_list, args),
|
||||
SYS_PRLIMIT64 => syscall_handler!(4, sys_prlimit64, args),
|
||||
_ => {
|
||||
error!("Unimplemented syscall number: {}", syscall_number);
|
||||
return_errno_with_message!(Errno::ENOSYS, "Syscall was unimplemented");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,23 +1,21 @@
|
||||
use jinux_frame::vm::VmPerm;
|
||||
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
|
||||
use crate::syscall::SYS_MPROTECT;
|
||||
use crate::vm::perms::VmPerms;
|
||||
|
||||
use super::SyscallReturn;
|
||||
|
||||
pub fn sys_mprotect(vaddr: u64, len: u64, perms: u64) -> Result<SyscallReturn> {
|
||||
pub fn sys_mprotect(addr: Vaddr, len: usize, perms: u64) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_MPROTECT);
|
||||
let perms = VmPerm::try_from(perms).unwrap();
|
||||
do_sys_mprotect(vaddr as Vaddr, len as usize, perms);
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
||||
|
||||
pub fn do_sys_mprotect(addr: Vaddr, len: usize, perms: VmPerm) -> isize {
|
||||
// let perms = VmPerm::try_from(perms).unwrap();
|
||||
let vm_perms = VmPerms::from_bits_truncate(perms as u32);
|
||||
debug!(
|
||||
"addr = 0x{:x}, len = 0x{:x}, perms = {:?}",
|
||||
addr, len, perms
|
||||
addr, len, vm_perms
|
||||
);
|
||||
// TODO: mprotect do nothing now
|
||||
0
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar();
|
||||
let range = addr..(addr + len);
|
||||
root_vmar.protect(vm_perms, range)?;
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ use crate::fs::file_handle::FileHandle;
|
||||
use crate::log_syscall_entry;
|
||||
use crate::prelude::*;
|
||||
use crate::syscall::constants::MAX_FILENAME_LEN;
|
||||
use crate::tty::get_console;
|
||||
use crate::tty::get_n_tty;
|
||||
use crate::util::read_cstring_from_user;
|
||||
|
||||
use super::SyscallReturn;
|
||||
@ -38,11 +38,16 @@ pub fn sys_openat(
|
||||
}
|
||||
|
||||
if dirfd == AT_FDCWD && pathname == CString::new("./trace")? {
|
||||
return_errno_with_message!(Errno::ENOENT, "No such file");
|
||||
// Debug use: This file is used for output busybox log
|
||||
let trace_file = FileHandle::new_file(Arc::new(BusyBoxTraceFile) as Arc<dyn File>);
|
||||
let current = current!();
|
||||
let mut file_table = current.file_table().lock();
|
||||
let fd = file_table.insert(trace_file);
|
||||
return Ok(SyscallReturn::Return(fd as _));
|
||||
}
|
||||
|
||||
if dirfd == AT_FDCWD && pathname == CString::new("/dev/tty")? {
|
||||
let tty_file = FileHandle::new_file(get_console().clone() as Arc<dyn File>);
|
||||
let tty_file = FileHandle::new_file(get_n_tty().clone() as Arc<dyn File>);
|
||||
let current = current!();
|
||||
let mut file_table = current.file_table().lock();
|
||||
let fd = file_table.insert(tty_file);
|
||||
@ -50,3 +55,13 @@ pub fn sys_openat(
|
||||
}
|
||||
todo!()
|
||||
}
|
||||
|
||||
/// File for output busybox ash log.
|
||||
struct BusyBoxTraceFile;
|
||||
|
||||
impl File for BusyBoxTraceFile {
|
||||
fn write(&self, buf: &[u8]) -> Result<usize> {
|
||||
debug!("ASH TRACE: {}", core::str::from_utf8(buf)?);
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
use crate::log_syscall_entry;
|
||||
use crate::prelude::*;
|
||||
use crate::process::name::MAX_PROCESS_NAME_LEN;
|
||||
use crate::process::posix_thread::name::MAX_THREAD_NAME_LEN;
|
||||
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use crate::util::read_cstring_from_user;
|
||||
use crate::util::write_bytes_to_user;
|
||||
|
||||
@ -10,21 +11,22 @@ pub fn sys_prctl(option: i32, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> Res
|
||||
log_syscall_entry!(SYS_PRCTL);
|
||||
let prctl_cmd = PrctlCmd::from_args(option, arg2, arg3, arg4, arg5)?;
|
||||
debug!("prctl cmd = {:?}", prctl_cmd);
|
||||
let current = current!();
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
match prctl_cmd {
|
||||
PrctlCmd::PR_GET_NAME(write_to_addr) => {
|
||||
let process_name = current.process_name().lock();
|
||||
if let Some(process_name) = &*process_name {
|
||||
if let Some(process_name) = process_name.get_name()? {
|
||||
write_bytes_to_user(write_to_addr, process_name.to_bytes_with_nul())?;
|
||||
let thread_name = posix_thread.thread_name().lock();
|
||||
if let Some(thread_name) = &*thread_name {
|
||||
if let Some(thread_name) = thread_name.get_name()? {
|
||||
write_bytes_to_user(write_to_addr, thread_name.to_bytes_with_nul())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
PrctlCmd::PR_SET_NAME(read_addr) => {
|
||||
let mut process_name = current.process_name().lock();
|
||||
if let Some(process_name) = &mut *process_name {
|
||||
let new_process_name = read_cstring_from_user(read_addr, MAX_PROCESS_NAME_LEN)?;
|
||||
process_name.set_name(&new_process_name)?;
|
||||
let mut thread_name = posix_thread.thread_name().lock();
|
||||
if let Some(thread_name) = &mut *thread_name {
|
||||
let new_thread_name = read_cstring_from_user(read_addr, MAX_THREAD_NAME_LEN)?;
|
||||
thread_name.set_name(&new_thread_name)?;
|
||||
}
|
||||
}
|
||||
_ => todo!(),
|
||||
|
31
src/services/libs/jinux-std/src/syscall/prlimit64.rs
Normal file
31
src/services/libs/jinux-std/src/syscall/prlimit64.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use crate::process::rlimit::ResourceType;
|
||||
use crate::util::{read_val_from_user, write_val_to_user};
|
||||
use crate::{log_syscall_entry, prelude::*, process::Pid};
|
||||
|
||||
use super::SyscallReturn;
|
||||
use super::SYS_PRLIMIT64;
|
||||
|
||||
pub fn sys_prlimit64(
|
||||
pid: Pid,
|
||||
resource: u32,
|
||||
new_rlim_addr: Vaddr,
|
||||
old_rlim_addr: Vaddr,
|
||||
) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_PRLIMIT64);
|
||||
let resource = ResourceType::try_from(resource)?;
|
||||
debug!(
|
||||
"pid = {}, resource = {:?}, new_rlim_addr = 0x{:x}, old_rlim_addr = 0x{:x}",
|
||||
pid, resource, new_rlim_addr, old_rlim_addr
|
||||
);
|
||||
let current = current!();
|
||||
let mut resource_limits = current.resource_limits().lock();
|
||||
if old_rlim_addr != 0 {
|
||||
let rlimit = resource_limits.get_rlimit(resource);
|
||||
write_val_to_user(old_rlim_addr, rlimit)?;
|
||||
}
|
||||
if new_rlim_addr != 0 {
|
||||
let new_rlimit = read_val_from_user(new_rlim_addr)?;
|
||||
*resource_limits.get_rlimit_mut(resource) = new_rlimit;
|
||||
}
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
@ -10,29 +10,32 @@ use super::SyscallReturn;
|
||||
|
||||
pub fn sys_rt_sigaction(
|
||||
sig_num: u8,
|
||||
sig_action_ptr: Vaddr,
|
||||
old_sig_action_ptr: Vaddr,
|
||||
sig_action_addr: Vaddr,
|
||||
old_sig_action_addr: Vaddr,
|
||||
sigset_size: u64,
|
||||
) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_RT_SIGACTION);
|
||||
let sig_num = SigNum::try_from(sig_num)?;
|
||||
let sig_action_c = read_val_from_user::<sigaction_t>(sig_action_ptr)?;
|
||||
let sig_action = SigAction::try_from(sig_action_c).unwrap();
|
||||
debug!(
|
||||
"sig_num = {}, sig_action = {:x?}, old_sig_action_ptr = 0x{:x}, sigset_size = {}",
|
||||
"signal = {}, sig_action_addr = 0x{:x}, old_sig_action_addr = 0x{:x}, sigset_size = {}",
|
||||
sig_num.sig_name(),
|
||||
sig_action,
|
||||
old_sig_action_ptr,
|
||||
sig_action_addr,
|
||||
old_sig_action_addr,
|
||||
sigset_size
|
||||
);
|
||||
|
||||
let current = current!();
|
||||
let mut sig_dispositions = current.sig_dispositions().lock();
|
||||
let old_action = sig_dispositions.get(sig_num);
|
||||
let old_action_c = old_action.to_c();
|
||||
sig_dispositions.set(sig_num, sig_action);
|
||||
if old_sig_action_ptr != 0 {
|
||||
write_val_to_user(old_sig_action_ptr, &old_action_c)?;
|
||||
if old_sig_action_addr != 0 {
|
||||
write_val_to_user(old_sig_action_addr, &old_action_c)?;
|
||||
}
|
||||
if sig_action_addr != 0 {
|
||||
let sig_action_c = read_val_from_user::<sigaction_t>(sig_action_addr)?;
|
||||
let sig_action = SigAction::try_from(sig_action_c).unwrap();
|
||||
debug!("sig action = {:?}", sig_action);
|
||||
sig_dispositions.set(sig_num, sig_action);
|
||||
}
|
||||
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
||||
|
@ -1,8 +1,11 @@
|
||||
use jinux_frame::vm::VmIo;
|
||||
|
||||
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use crate::process::signal::constants::{SIGKILL, SIGSTOP};
|
||||
use crate::{
|
||||
log_syscall_entry,
|
||||
prelude::*,
|
||||
process::signal::sig_mask::SigMask,
|
||||
syscall::{SyscallReturn, SYS_RT_SIGPROCMASK},
|
||||
};
|
||||
|
||||
@ -32,8 +35,10 @@ fn do_rt_sigprocmask(
|
||||
sigset_size: usize,
|
||||
) -> Result<()> {
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let mut sig_mask = current.sig_mask().lock();
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
let root_vmar = current.root_vmar();
|
||||
let mut sig_mask = posix_thread.sig_mask().lock();
|
||||
let old_sig_mask_value = sig_mask.as_u64();
|
||||
debug!("old sig mask value: 0x{:x}", old_sig_mask_value);
|
||||
if oldset_ptr != 0 {
|
||||
@ -41,9 +46,15 @@ fn do_rt_sigprocmask(
|
||||
}
|
||||
if set_ptr != 0 {
|
||||
let new_set = root_vmar.read_val::<u64>(set_ptr)?;
|
||||
debug!("new set = 0x{:x}", new_set);
|
||||
match mask_op {
|
||||
MaskOp::Block => sig_mask.block(new_set),
|
||||
MaskOp::Block => {
|
||||
let mut new_sig_mask = SigMask::from(new_set);
|
||||
// According to man pages, "it is not possible to block SIGKILL or SIGSTOP.
|
||||
// Attempts to do so are silently ignored."
|
||||
new_sig_mask.remove_signal(SIGKILL);
|
||||
new_sig_mask.remove_signal(SIGSTOP);
|
||||
sig_mask.block(new_sig_mask.as_u64());
|
||||
}
|
||||
MaskOp::Unblock => sig_mask.unblock(new_set),
|
||||
MaskOp::SetMask => sig_mask.set(new_set),
|
||||
}
|
||||
|
@ -1,5 +1,8 @@
|
||||
use crate::{
|
||||
log_syscall_entry, prelude::*, process::signal::c_types::ucontext_t, util::read_val_from_user,
|
||||
log_syscall_entry,
|
||||
prelude::*,
|
||||
process::{posix_thread::posix_thread_ext::PosixThreadExt, signal::c_types::ucontext_t},
|
||||
util::read_val_from_user,
|
||||
};
|
||||
use jinux_frame::cpu::CpuContext;
|
||||
|
||||
@ -7,12 +10,28 @@ use super::{SyscallReturn, SYS_RT_SIGRETRUN};
|
||||
|
||||
pub fn sys_rt_sigreturn(context: &mut CpuContext) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_RT_SIGRETRUN);
|
||||
let current = current!();
|
||||
let sig_context = current.sig_context().lock().pop_back().unwrap();
|
||||
let ucontext = read_val_from_user::<ucontext_t>(sig_context)?;
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
let mut sig_context = posix_thread.sig_context().lock();
|
||||
if None == *sig_context {
|
||||
return_errno_with_message!(Errno::EINVAL, "sigretrun should not been called");
|
||||
}
|
||||
let sig_context_addr = sig_context.unwrap();
|
||||
// FIXME: This assertion is not always true, if RESTORER flag is not presented.
|
||||
// In this case, we will put restorer code on user stack, then the assertion will fail.
|
||||
// However, for most glibc applications, the restorer codes is provided by glibc and RESTORER flag is set.
|
||||
debug_assert!(sig_context_addr == context.gp_regs.rsp as Vaddr);
|
||||
|
||||
let ucontext = read_val_from_user::<ucontext_t>(sig_context_addr)?;
|
||||
// Set previous ucontext address
|
||||
if ucontext.uc_link == 0 {
|
||||
*sig_context = None;
|
||||
} else {
|
||||
*sig_context = Some(ucontext.uc_link);
|
||||
};
|
||||
context.gp_regs = ucontext.uc_mcontext.inner.gp_regs;
|
||||
// unblock sig mask
|
||||
let sig_mask = ucontext.uc_sigmask;
|
||||
current.sig_mask().lock().unblock(sig_mask);
|
||||
posix_thread.sig_mask().lock().unblock(sig_mask);
|
||||
Ok(SyscallReturn::NoReturn)
|
||||
}
|
||||
|
28
src/services/libs/jinux-std/src/syscall/set_robust_list.rs
Normal file
28
src/services/libs/jinux-std/src/syscall/set_robust_list.rs
Normal file
@ -0,0 +1,28 @@
|
||||
use super::{SyscallReturn, SYS_SET_ROBUST_LIST};
|
||||
use crate::{
|
||||
log_syscall_entry,
|
||||
prelude::*,
|
||||
process::posix_thread::{posix_thread_ext::PosixThreadExt, robust_list::RobustListHead},
|
||||
util::read_val_from_user,
|
||||
};
|
||||
|
||||
pub fn sys_set_robust_list(robust_list_head_ptr: Vaddr, len: usize) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_SET_ROBUST_LIST);
|
||||
debug!(
|
||||
"robust list head ptr: 0x{:x}, len = {}",
|
||||
robust_list_head_ptr, len
|
||||
);
|
||||
if len != core::mem::size_of::<RobustListHead>() {
|
||||
return_errno_with_message!(
|
||||
Errno::EINVAL,
|
||||
"The len is not equal to the size of robust list head"
|
||||
);
|
||||
}
|
||||
let robust_list_head: RobustListHead = read_val_from_user(robust_list_head_ptr)?;
|
||||
debug!("{:x?}", robust_list_head);
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
let mut robust_list = posix_thread.robust_list().lock();
|
||||
*robust_list = Some(robust_list_head);
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
22
src/services/libs/jinux-std/src/syscall/set_tid_address.rs
Normal file
22
src/services/libs/jinux-std/src/syscall/set_tid_address.rs
Normal file
@ -0,0 +1,22 @@
|
||||
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
|
||||
use super::SyscallReturn;
|
||||
use super::SYS_SET_TID_ADDRESS;
|
||||
|
||||
pub fn sys_set_tid_address(tidptr: Vaddr) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_SET_TID_ADDRESS);
|
||||
debug!("tidptr = 0x{:x}", tidptr);
|
||||
let current_thread = current_thread!();
|
||||
let posix_thread = current_thread.posix_thread();
|
||||
let mut clear_child_tid = posix_thread.clear_child_tid().lock();
|
||||
if *clear_child_tid != 0 {
|
||||
// According to manuals at https://man7.org/linux/man-pages/man2/set_tid_address.2.html
|
||||
// We need to write 0 to clear_child_tid and do futex wake
|
||||
todo!()
|
||||
} else {
|
||||
*clear_child_tid = tidptr;
|
||||
}
|
||||
let tid = current_thread.tid();
|
||||
Ok(SyscallReturn::Return(tid as _))
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
use crate::{
|
||||
log_syscall_entry,
|
||||
prelude::*,
|
||||
process::{process_group::ProcessGroup, table, Pgid, Pid},
|
||||
process::{process_group::ProcessGroup, process_table, Pgid, Pid},
|
||||
};
|
||||
|
||||
use super::{SyscallReturn, SYS_SETPGID};
|
||||
@ -23,18 +23,23 @@ pub fn sys_setpgid(pid: Pid, pgid: Pgid) -> Result<SyscallReturn> {
|
||||
}
|
||||
|
||||
// only can move process to an existing group or self
|
||||
if pgid != pid && table::pgid_to_process_group(pgid).is_none() {
|
||||
if pgid != pid && process_table::pgid_to_process_group(pgid).is_none() {
|
||||
return_errno_with_message!(Errno::EPERM, "process group must exist");
|
||||
}
|
||||
|
||||
if let Some(new_process_group) = table::pgid_to_process_group(pgid) {
|
||||
// if the process already belongs to the process group
|
||||
if current.pgid() == pgid {
|
||||
return Ok(SyscallReturn::Return(0));
|
||||
}
|
||||
|
||||
if let Some(new_process_group) = process_table::pgid_to_process_group(pgid) {
|
||||
new_process_group.add_process(current.clone());
|
||||
current.set_process_group(Arc::downgrade(&new_process_group));
|
||||
} else {
|
||||
let new_process_group = Arc::new(ProcessGroup::new(current.clone()));
|
||||
new_process_group.add_process(current.clone());
|
||||
current.set_process_group(Arc::downgrade(&new_process_group));
|
||||
table::add_process_group(new_process_group);
|
||||
process_table::add_process_group(new_process_group);
|
||||
}
|
||||
|
||||
Ok(SyscallReturn::Return(0))
|
||||
|
16
src/services/libs/jinux-std/src/syscall/stat.rs
Normal file
16
src/services/libs/jinux-std/src/syscall/stat.rs
Normal file
@ -0,0 +1,16 @@
|
||||
use super::SYS_STAT;
|
||||
use crate::syscall::constants::MAX_FILENAME_LEN;
|
||||
use crate::util::read_cstring_from_user;
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
|
||||
use super::SyscallReturn;
|
||||
|
||||
pub fn sys_stat(filename_ptr: Vaddr, stat_buf_ptr: Vaddr) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_STAT);
|
||||
let filename = read_cstring_from_user(filename_ptr, MAX_FILENAME_LEN)?;
|
||||
debug!(
|
||||
"filename = {:?}, stat_buf_ptr = 0x{:x}",
|
||||
filename, stat_buf_ptr
|
||||
);
|
||||
return_errno_with_message!(Errno::ENOSYS, "Stat is unimplemented");
|
||||
}
|
@ -1,8 +1,10 @@
|
||||
use crate::process::posix_thread::posix_thread_ext::PosixThreadExt;
|
||||
use crate::thread::{thread_table, Tid};
|
||||
use crate::{log_syscall_entry, prelude::*};
|
||||
|
||||
use crate::process::signal::sig_num::SigNum;
|
||||
use crate::process::signal::signals::user::{UserSignal, UserSignalKind};
|
||||
use crate::process::{table, Pgid, Pid};
|
||||
use crate::process::Pid;
|
||||
use crate::syscall::SYS_TGKILL;
|
||||
|
||||
use super::SyscallReturn;
|
||||
@ -10,20 +12,21 @@ use super::SyscallReturn;
|
||||
/// tgkill send a signal to a thread with pid as its thread id, and tgid as its thread group id.
|
||||
/// Since jinuxx only supports one-thread process now, tgkill will send signal to process with pid as its process id,
|
||||
/// and tgid as its process group id.
|
||||
pub fn sys_tgkill(tgid: Pgid, pid: Pid, sig_num: u8) -> Result<SyscallReturn> {
|
||||
pub fn sys_tgkill(tgid: Pid, tid: Tid, sig_num: u8) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_TGKILL);
|
||||
let sig_num = SigNum::from_u8(sig_num);
|
||||
debug!("tgid = {}, pid = {}, sig_num = {:?}", tgid, pid, sig_num);
|
||||
let target_process =
|
||||
table::pid_to_process(pid).ok_or(Error::with_message(Errno::EINVAL, "Invalid pid"))?;
|
||||
let pgid = target_process.pgid();
|
||||
if pgid != tgid {
|
||||
info!("tgid = {}, pid = {}, sig_num = {:?}", tgid, tid, sig_num);
|
||||
let target_thread = thread_table::tid_to_thread(tid)
|
||||
.ok_or(Error::with_message(Errno::EINVAL, "Invalid pid"))?;
|
||||
let posix_thread = target_thread.posix_thread();
|
||||
let pid = posix_thread.process().pid();
|
||||
if pid != tgid {
|
||||
return_errno_with_message!(
|
||||
Errno::EINVAL,
|
||||
"the combination of tgid and pid is not valid"
|
||||
);
|
||||
}
|
||||
if target_process.status().lock().is_zombie() {
|
||||
if target_thread.status().lock().is_exited() {
|
||||
return Ok(SyscallReturn::Return(0));
|
||||
}
|
||||
let signal = {
|
||||
@ -36,7 +39,7 @@ pub fn sys_tgkill(tgid: Pgid, pid: Pid, sig_num: u8) -> Result<SyscallReturn> {
|
||||
src_uid,
|
||||
))
|
||||
};
|
||||
let mut sig_queue = target_process.sig_queues().lock();
|
||||
let mut sig_queue = posix_thread.sig_queues().lock();
|
||||
sig_queue.enqueue(signal);
|
||||
Ok(SyscallReturn::Return(0))
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ const STDERR: u64 = 2;
|
||||
pub fn sys_write(
|
||||
fd: FileDescripter,
|
||||
user_buf_ptr: Vaddr,
|
||||
user_buf_len: u64,
|
||||
user_buf_len: usize,
|
||||
) -> Result<SyscallReturn> {
|
||||
log_syscall_entry!(SYS_WRITE);
|
||||
debug!(
|
||||
@ -23,8 +23,9 @@ pub fn sys_write(
|
||||
let current = current!();
|
||||
let file_table = current.file_table().lock();
|
||||
let file = file_table.get_file(fd)?;
|
||||
let mut buffer = vec![0u8; user_buf_len as usize];
|
||||
let mut buffer = vec![0u8; user_buf_len];
|
||||
read_bytes_from_user(user_buf_ptr as usize, &mut buffer)?;
|
||||
debug!("write content = {:?}", buffer);
|
||||
let write_len = file.write(&buffer)?;
|
||||
Ok(SyscallReturn::Return(write_len as _))
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ pub fn handle_exception(context: &mut CpuContext) {
|
||||
let trap_info = context.trap_information.clone();
|
||||
log_trap_info(&trap_info);
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
|
||||
match trap_info.id {
|
||||
PAGE_FAULT => handle_page_fault(&trap_info),
|
||||
@ -31,13 +31,18 @@ fn handle_page_fault(trap_info: &TrapInformation) {
|
||||
if not_present || write {
|
||||
// If page is not present or due to write access, we should ask the vmar try to commit this page
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
let page_fault_addr = trap_info.cr2 as Vaddr;
|
||||
debug!(
|
||||
trace!(
|
||||
"Page fault address: 0x{:x}, write access: {}",
|
||||
page_fault_addr, write
|
||||
page_fault_addr,
|
||||
write
|
||||
);
|
||||
if let Err(_) = root_vmar.handle_page_fault(page_fault_addr, not_present, write) {
|
||||
if let Err(e) = root_vmar.handle_page_fault(page_fault_addr, not_present, write) {
|
||||
error!(
|
||||
"page fault handler failed: addr: 0x{:x}, err: {:?}",
|
||||
page_fault_addr, e
|
||||
);
|
||||
generate_fault_signal(trap_info);
|
||||
} else {
|
||||
// ensure page fault is successfully handled
|
||||
@ -60,7 +65,7 @@ fn generate_fault_signal(trap_info: &TrapInformation) {
|
||||
|
||||
macro_rules! log_trap_common {
|
||||
($exception_name: ident, $trap_info: ident) => {
|
||||
debug!(
|
||||
trace!(
|
||||
"[Trap][{}][err = {}]",
|
||||
stringify!($exception_name),
|
||||
$trap_info.err
|
||||
@ -85,7 +90,7 @@ fn log_trap_info(trap_info: &TrapInformation) {
|
||||
STACK_SEGMENT_FAULT => log_trap_common!(STACK_SEGMENT_FAULT, trap_info),
|
||||
GENERAL_PROTECTION_FAULT => log_trap_common!(GENERAL_PROTECTION_FAULT, trap_info),
|
||||
PAGE_FAULT => {
|
||||
debug!(
|
||||
trace!(
|
||||
"[Trap][{}][page fault addr = 0x{:x}, err = {}]",
|
||||
stringify!(PAGE_FAULT),
|
||||
trap_info.cr2,
|
52
src/services/libs/jinux-std/src/thread/kernel_thread.rs
Normal file
52
src/services/libs/jinux-std/src/thread/kernel_thread.rs
Normal file
@ -0,0 +1,52 @@
|
||||
use jinux_frame::task::Task;
|
||||
|
||||
use crate::{prelude::*, process::Process};
|
||||
|
||||
use super::{allocate_tid, status::ThreadStatus, thread_table, Thread};
|
||||
pub struct KernelThread {
|
||||
process: Weak<Process>,
|
||||
}
|
||||
|
||||
impl KernelThread {
|
||||
pub fn new(process: Weak<Process>) -> Self {
|
||||
Self { process }
|
||||
}
|
||||
|
||||
pub fn process(&self) -> Arc<Process> {
|
||||
self.process.upgrade().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait KernelThreadExt {
|
||||
fn is_kernel_thread(&self) -> bool;
|
||||
fn kernel_thread(&self) -> &KernelThread;
|
||||
fn new_kernel_thread<F>(task_fn: F, process: Weak<Process>) -> Arc<Self>
|
||||
where
|
||||
F: Fn() + Send + Sync + 'static;
|
||||
}
|
||||
|
||||
impl KernelThreadExt for Thread {
|
||||
fn is_kernel_thread(&self) -> bool {
|
||||
self.data().downcast_ref::<KernelThread>().is_some()
|
||||
}
|
||||
|
||||
fn kernel_thread(&self) -> &KernelThread {
|
||||
self.data().downcast_ref::<KernelThread>().unwrap()
|
||||
}
|
||||
|
||||
fn new_kernel_thread<F>(task_fn: F, process: Weak<Process>) -> Arc<Self>
|
||||
where
|
||||
F: Fn() + Send + Sync + 'static,
|
||||
{
|
||||
let tid = allocate_tid();
|
||||
let thread = Arc::new_cyclic(|thread_ref| {
|
||||
let weal_thread = thread_ref.clone();
|
||||
let task = Task::new(task_fn, weal_thread, None).unwrap();
|
||||
let status = ThreadStatus::Init;
|
||||
let kernel_thread = KernelThread::new(process);
|
||||
Thread::new(tid, task, kernel_thread, status)
|
||||
});
|
||||
thread_table::add_thread(thread.clone());
|
||||
thread
|
||||
}
|
||||
}
|
98
src/services/libs/jinux-std/src/thread/mod.rs
Normal file
98
src/services/libs/jinux-std/src/thread/mod.rs
Normal file
@ -0,0 +1,98 @@
|
||||
//! Posix thread implementation
|
||||
|
||||
use core::{
|
||||
any::Any,
|
||||
sync::atomic::{AtomicI32, Ordering},
|
||||
};
|
||||
|
||||
use jinux_frame::task::Task;
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
use self::status::ThreadStatus;
|
||||
|
||||
pub mod exception;
|
||||
pub mod kernel_thread;
|
||||
pub mod status;
|
||||
pub mod task;
|
||||
pub mod thread_table;
|
||||
|
||||
pub type Tid = i32;
|
||||
|
||||
static TID_ALLOCATOR: AtomicI32 = AtomicI32::new(0);
|
||||
|
||||
/// A thread is a wrapper on top of task.
|
||||
pub struct Thread {
|
||||
// immutable part
|
||||
/// Thread id
|
||||
tid: Tid,
|
||||
/// Low-level info
|
||||
task: Arc<Task>,
|
||||
/// Data: Posix thread info/Kernel thread Info
|
||||
data: Box<dyn Send + Sync + Any>,
|
||||
|
||||
// mutable part
|
||||
status: Mutex<ThreadStatus>,
|
||||
}
|
||||
|
||||
impl Thread {
|
||||
/// Never call these function directly
|
||||
pub fn new(
|
||||
tid: Tid,
|
||||
task: Arc<Task>,
|
||||
data: impl Send + Sync + Any,
|
||||
status: ThreadStatus,
|
||||
) -> Self {
|
||||
Thread {
|
||||
tid,
|
||||
task,
|
||||
data: Box::new(data),
|
||||
status: Mutex::new(status),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn current() -> Arc<Self> {
|
||||
let task = Task::current();
|
||||
let thread = task
|
||||
.data()
|
||||
.downcast_ref::<Weak<Thread>>()
|
||||
.expect("[Internal Error] task data should points to weak<thread>");
|
||||
thread
|
||||
.upgrade()
|
||||
.expect("[Internal Error] current thread cannot be None")
|
||||
}
|
||||
|
||||
/// Add inner task to the run queue of scheduler. Note this does not means the thread will run at once.
|
||||
pub fn run(&self) {
|
||||
self.status.lock().set_running();
|
||||
self.task.run();
|
||||
}
|
||||
|
||||
pub fn exit(&self) {
|
||||
let mut status = self.status.lock();
|
||||
if !status.is_exited() {
|
||||
status.set_exited();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn status(&self) -> &Mutex<ThreadStatus> {
|
||||
&self.status
|
||||
}
|
||||
|
||||
pub fn yield_now() {
|
||||
Task::yield_now()
|
||||
}
|
||||
|
||||
pub fn tid(&self) -> Tid {
|
||||
self.tid
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &Box<dyn Send + Sync + Any> {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
/// allocate a new pid for new process
|
||||
pub fn allocate_tid() -> Tid {
|
||||
TID_ALLOCATOR.fetch_add(1, Ordering::SeqCst)
|
||||
}
|
35
src/services/libs/jinux-std/src/thread/status.rs
Normal file
35
src/services/libs/jinux-std/src/thread/status.rs
Normal file
@ -0,0 +1,35 @@
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum ThreadStatus {
|
||||
Init,
|
||||
Running,
|
||||
Exited,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
impl ThreadStatus {
|
||||
pub fn is_running(&self) -> bool {
|
||||
*self == ThreadStatus::Running
|
||||
}
|
||||
|
||||
pub fn is_exited(&self) -> bool {
|
||||
*self == ThreadStatus::Exited
|
||||
}
|
||||
|
||||
pub fn is_stopped(&self) -> bool {
|
||||
*self == ThreadStatus::Stopped
|
||||
}
|
||||
|
||||
pub fn set_running(&mut self) {
|
||||
debug_assert!(!self.is_exited());
|
||||
*self = ThreadStatus::Running;
|
||||
}
|
||||
|
||||
pub fn set_stopped(&mut self) {
|
||||
debug_assert!(!self.is_exited());
|
||||
*self = ThreadStatus::Stopped;
|
||||
}
|
||||
|
||||
pub fn set_exited(&mut self) {
|
||||
*self = ThreadStatus::Exited;
|
||||
}
|
||||
}
|
59
src/services/libs/jinux-std/src/thread/task.rs
Normal file
59
src/services/libs/jinux-std/src/thread/task.rs
Normal file
@ -0,0 +1,59 @@
|
||||
use jinux_frame::{
|
||||
cpu::CpuContext,
|
||||
task::Task,
|
||||
user::{UserEvent, UserMode, UserSpace},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
prelude::*, process::signal::handle_pending_signal, syscall::handle_syscall,
|
||||
thread::exception::handle_exception,
|
||||
};
|
||||
|
||||
use super::Thread;
|
||||
|
||||
/// create new task with userspace and parent process
|
||||
pub fn create_new_user_task(user_space: Arc<UserSpace>, thread_ref: Weak<Thread>) -> Arc<Task> {
|
||||
fn user_task_entry() {
|
||||
let cur = Task::current();
|
||||
let user_space = cur.user_space().expect("user task should have user space");
|
||||
let mut user_mode = UserMode::new(user_space);
|
||||
debug!("[Task entry] rip = 0x{:x}", user_space.cpu_ctx.gp_regs.rip);
|
||||
debug!("[Task entry] rsp = 0x{:x}", user_space.cpu_ctx.gp_regs.rsp);
|
||||
debug!("[Task entry] rax = 0x{:x}", user_space.cpu_ctx.gp_regs.rax);
|
||||
loop {
|
||||
let user_event = user_mode.execute();
|
||||
let context = user_mode.context_mut();
|
||||
// handle user event:
|
||||
handle_user_event(user_event, context);
|
||||
let current_thread = current_thread!();
|
||||
// should be do this comparison before handle signal?
|
||||
if current_thread.status().lock().is_exited() {
|
||||
break;
|
||||
}
|
||||
handle_pending_signal(context).unwrap();
|
||||
if current_thread.status().lock().is_exited() {
|
||||
debug!("exit due to signal");
|
||||
break;
|
||||
}
|
||||
// If current is suspended, wait for a signal to wake up self
|
||||
while current_thread.status().lock().is_stopped() {
|
||||
Thread::yield_now();
|
||||
debug!("{} is suspended.", current_thread.tid());
|
||||
handle_pending_signal(context).unwrap();
|
||||
}
|
||||
}
|
||||
debug!("exit user loop");
|
||||
// FIXME: This is a work around: exit in kernel task entry may be not called. Why this will happen?
|
||||
Task::current().exit();
|
||||
}
|
||||
|
||||
Task::new(user_task_entry, thread_ref, Some(user_space)).expect("spawn task failed")
|
||||
}
|
||||
|
||||
fn handle_user_event(user_event: UserEvent, context: &mut CpuContext) {
|
||||
match user_event {
|
||||
UserEvent::Syscall => handle_syscall(context),
|
||||
UserEvent::Fault => todo!(),
|
||||
UserEvent::Exception => handle_exception(context),
|
||||
}
|
||||
}
|
20
src/services/libs/jinux-std/src/thread/thread_table.rs
Normal file
20
src/services/libs/jinux-std/src/thread/thread_table.rs
Normal file
@ -0,0 +1,20 @@
|
||||
use crate::prelude::*;
|
||||
|
||||
use super::{Thread, Tid};
|
||||
|
||||
lazy_static! {
|
||||
static ref THREAD_TABLE: Mutex<BTreeMap<Tid, Arc<Thread>>> = Mutex::new(BTreeMap::new());
|
||||
}
|
||||
|
||||
pub fn add_thread(thread: Arc<Thread>) {
|
||||
let tid = thread.tid();
|
||||
THREAD_TABLE.lock().insert(tid, thread);
|
||||
}
|
||||
|
||||
pub fn remove_thread(tid: Tid) {
|
||||
THREAD_TABLE.lock().remove(&tid);
|
||||
}
|
||||
|
||||
pub fn tid_to_thread(tid: Tid) -> Option<Arc<Thread>> {
|
||||
THREAD_TABLE.lock().get(&tid).map(|thread| thread.clone())
|
||||
}
|
66
src/services/libs/jinux-std/src/time/mod.rs
Normal file
66
src/services/libs/jinux-std/src/time/mod.rs
Normal file
@ -0,0 +1,66 @@
|
||||
#![allow(non_camel_case_types)]
|
||||
use core::time::Duration;
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
pub type clockid_t = i32;
|
||||
pub type time_t = i64;
|
||||
pub type suseconds_t = i64;
|
||||
pub type clock_t = i64;
|
||||
|
||||
#[derive(Debug, Copy, Clone, Pod)]
|
||||
#[repr(i32)]
|
||||
pub enum ClockID {
|
||||
CLOCK_REALTIME = 0,
|
||||
CLOCK_MONOTONIC = 1,
|
||||
CLOCK_PROCESS_CPUTIME_ID = 2,
|
||||
CLOCK_THREAD_CPUTIME_ID = 3,
|
||||
CLOCK_MONOTONIC_RAW = 4,
|
||||
CLOCK_REALTIME_COARSE = 5,
|
||||
CLOCK_MONOTONIC_COARSE = 6,
|
||||
CLOCK_BOOTTIME = 7,
|
||||
}
|
||||
|
||||
impl TryFrom<clockid_t> for ClockID {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: clockid_t) -> Result<Self> {
|
||||
Ok(match value as i32 {
|
||||
0 => ClockID::CLOCK_REALTIME,
|
||||
1 => ClockID::CLOCK_MONOTONIC,
|
||||
2 => ClockID::CLOCK_PROCESS_CPUTIME_ID,
|
||||
3 => ClockID::CLOCK_THREAD_CPUTIME_ID,
|
||||
4 => ClockID::CLOCK_MONOTONIC_RAW,
|
||||
5 => ClockID::CLOCK_REALTIME_COARSE,
|
||||
6 => ClockID::CLOCK_MONOTONIC_COARSE,
|
||||
7 => ClockID::CLOCK_BOOTTIME,
|
||||
_ => return_errno_with_message!(Errno::EINVAL, "invalid clockid"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default, Copy, Clone, Pod)]
|
||||
|
||||
pub struct timespec_t {
|
||||
sec: time_t,
|
||||
nsec: i64,
|
||||
}
|
||||
|
||||
impl From<Duration> for timespec_t {
|
||||
fn from(duration: Duration) -> timespec_t {
|
||||
let sec = duration.as_secs() as time_t;
|
||||
let nsec = duration.subsec_nanos() as i64;
|
||||
debug_assert!(sec >= 0); // nsec >= 0 always holds
|
||||
timespec_t { sec, nsec }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<timespec_t> for Duration {
|
||||
fn from(timespec: timespec_t) -> Self {
|
||||
Duration::new(timespec.sec as u64, timespec.nsec as u32)
|
||||
}
|
||||
}
|
||||
|
||||
/// The various flags for setting POSIX.1b interval timers:
|
||||
pub const TIMER_ABSTIME: i32 = 0x01;
|
@ -1,58 +1,256 @@
|
||||
use crate::{prelude::*, process::Pgid};
|
||||
use crate::fs::events::IoEvents;
|
||||
use crate::process::signal::constants::{SIGINT, SIGQUIT};
|
||||
use crate::{
|
||||
prelude::*,
|
||||
process::{process_table, signal::signals::kernel::KernelSignal, Pgid},
|
||||
};
|
||||
use jinux_frame::sync::WaitQueue;
|
||||
use ringbuffer::{ConstGenericRingBuffer, RingBuffer, RingBufferRead, RingBufferWrite};
|
||||
|
||||
use super::termio::KernelTermios;
|
||||
use super::termio::{KernelTermios, CC_C_CHAR};
|
||||
|
||||
// This implementation refers the implementation of linux
|
||||
// https://elixir.bootlin.com/linux/latest/source/include/linux/tty_ldisc.h
|
||||
|
||||
const BUFFER_CAPACITY: usize = 4096;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LineDiscipline {
|
||||
/// The write buffer
|
||||
buffer: ConstGenericRingBuffer<u8, BUFFER_CAPACITY>,
|
||||
/// current line
|
||||
current_line: CurrentLine,
|
||||
/// The read buffer
|
||||
read_buffer: Mutex<ConstGenericRingBuffer<u8, BUFFER_CAPACITY>>,
|
||||
/// The foreground process group
|
||||
foreground: Option<Pgid>,
|
||||
/// termios
|
||||
termios: KernelTermios,
|
||||
/// wait until self is readable
|
||||
read_wait_queue: WaitQueue,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CurrentLine {
|
||||
buffer: ConstGenericRingBuffer<u8, BUFFER_CAPACITY>,
|
||||
}
|
||||
|
||||
impl CurrentLine {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
buffer: ConstGenericRingBuffer::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// read all bytes inside current line and clear current line
|
||||
pub fn drain(&mut self) -> Vec<u8> {
|
||||
self.buffer.drain().collect()
|
||||
}
|
||||
|
||||
pub fn push_char(&mut self, char: u8) {
|
||||
// What should we do if line is full?
|
||||
debug_assert!(!self.is_full());
|
||||
self.buffer.push(char);
|
||||
}
|
||||
|
||||
pub fn backspace(&mut self) {
|
||||
self.buffer.dequeue();
|
||||
}
|
||||
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.buffer.is_full()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.buffer.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl LineDiscipline {
|
||||
/// create a new line discipline
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
buffer: ConstGenericRingBuffer::new(),
|
||||
current_line: CurrentLine::new(),
|
||||
read_buffer: Mutex::new(ConstGenericRingBuffer::new()),
|
||||
foreground: None,
|
||||
termios: KernelTermios::default(),
|
||||
read_wait_queue: WaitQueue::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// push char to buffer
|
||||
/// push char to line discipline. This function should be called in input interrupt handler.
|
||||
pub fn push_char(&mut self, mut item: u8) {
|
||||
if self.termios.is_cooked_mode() {
|
||||
todo!("We only support raw mode now. Cooked mode will be supported further.");
|
||||
}
|
||||
if self.termios.contains_icrnl() {
|
||||
if item == b'\r' {
|
||||
item = b'\n'
|
||||
}
|
||||
}
|
||||
self.buffer.push(item);
|
||||
if self.termios.is_canonical_mode() {
|
||||
if item == *self.termios.get_special_char(CC_C_CHAR::VINTR) {
|
||||
// type Ctrl + C, signal SIGINT
|
||||
if self.termios.contains_isig() {
|
||||
if let Some(fg) = self.foreground {
|
||||
let kernel_signal = KernelSignal::new(SIGINT);
|
||||
let fg_group = process_table::pgid_to_process_group(fg).unwrap();
|
||||
fg_group.kernel_signal(kernel_signal);
|
||||
}
|
||||
}
|
||||
} else if item == *self.termios.get_special_char(CC_C_CHAR::VQUIT) {
|
||||
// type Ctrl + \, signal SIGQUIT
|
||||
if self.termios.contains_isig() {
|
||||
if let Some(fg) = self.foreground {
|
||||
let kernel_signal = KernelSignal::new(SIGQUIT);
|
||||
let fg_group = process_table::pgid_to_process_group(fg).unwrap();
|
||||
fg_group.kernel_signal(kernel_signal);
|
||||
}
|
||||
}
|
||||
} else if item == *self.termios.get_special_char(CC_C_CHAR::VKILL) {
|
||||
// erase current line
|
||||
self.current_line.drain();
|
||||
} else if item == *self.termios.get_special_char(CC_C_CHAR::VERASE) {
|
||||
// type backspace
|
||||
if !self.current_line.is_empty() {
|
||||
self.current_line.backspace();
|
||||
}
|
||||
} else if meet_new_line(item, &self.get_termios()) {
|
||||
// a new line was met. We currently add the item to buffer.
|
||||
// when we read content, the item should be skipped if it's EOF.
|
||||
self.current_line.push_char(item);
|
||||
let current_line_chars = self.current_line.drain();
|
||||
for char in current_line_chars {
|
||||
self.read_buffer.lock().push(char);
|
||||
}
|
||||
} else if item >= 0x20 && item < 0x7f {
|
||||
// printable character
|
||||
self.current_line.push_char(item);
|
||||
}
|
||||
} else {
|
||||
// raw mode
|
||||
self.read_buffer.lock().push(item);
|
||||
// debug!("push char: {}", char::from(item))
|
||||
}
|
||||
|
||||
if self.termios.contain_echo() {
|
||||
self.output_char(item);
|
||||
}
|
||||
|
||||
if self.is_readable() {
|
||||
self.read_wait_queue.wake_all();
|
||||
}
|
||||
}
|
||||
|
||||
/// whether self is readable
|
||||
fn is_readable(&self) -> bool {
|
||||
self.read_buffer.lock().len() > 0
|
||||
}
|
||||
|
||||
// TODO: respect output flags
|
||||
fn output_char(&self, item: u8) {
|
||||
if 0x20 <= item && item < 0x7f {
|
||||
let ch = char::from(item);
|
||||
print!("{}", ch);
|
||||
}
|
||||
if item == *self.termios.get_special_char(CC_C_CHAR::VERASE) {
|
||||
// write a space to overwrite current character
|
||||
let bytes: [u8; 3] = [b'\x08', b' ', b'\x08'];
|
||||
let backspace = core::str::from_utf8(&bytes).unwrap();
|
||||
print!("{}", backspace);
|
||||
}
|
||||
if self.termios.contains_echo_ctl() {
|
||||
// The unprintable chars between 1-31 are mapped to ctrl characters between 65-95.
|
||||
// e.g., 0x3 is mapped to 0x43, which is C. So, we will print ^C when 0x3 is met.
|
||||
if 0 < item && item < 0x20 {
|
||||
let ctrl_char_ascii = item + 0x40;
|
||||
let ctrl_char = char::from(ctrl_char_ascii);
|
||||
print!("^{ctrl_char}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// read all bytes buffered to dst, return the actual read length.
|
||||
pub fn read(&mut self, dst: &mut [u8]) -> Result<usize> {
|
||||
let len = self.buffer.len();
|
||||
let read_len = len.min(dst.len());
|
||||
for i in 0..read_len {
|
||||
if let Some(content) = self.buffer.dequeue() {
|
||||
dst[i] = content;
|
||||
let vmin = *self.termios.get_special_char(CC_C_CHAR::VMIN);
|
||||
let vtime = *self.termios.get_special_char(CC_C_CHAR::VTIME);
|
||||
let read_len: usize = self.read_wait_queue.wait_until(|| {
|
||||
// if current process does not belong to foreground process group,
|
||||
// block until current process become foreground.
|
||||
if !self.current_belongs_to_foreground() {
|
||||
warn!("current process does not belong to foreground process group");
|
||||
return None;
|
||||
}
|
||||
let len = self.read_buffer.lock().len();
|
||||
let max_read_len = len.min(dst.len());
|
||||
if vmin == 0 && vtime == 0 {
|
||||
// poll read
|
||||
return self.poll_read(dst);
|
||||
}
|
||||
if vmin > 0 && vtime == 0 {
|
||||
// block read
|
||||
return self.block_read(dst, vmin);
|
||||
}
|
||||
if vmin == 0 && vtime > 0 {
|
||||
todo!()
|
||||
}
|
||||
if vmin > 0 && vtime > 0 {
|
||||
todo!()
|
||||
}
|
||||
unreachable!()
|
||||
});
|
||||
Ok(read_len)
|
||||
}
|
||||
|
||||
pub fn poll(&self) -> IoEvents {
|
||||
if self.is_empty() {
|
||||
IoEvents::empty()
|
||||
} else {
|
||||
IoEvents::POLLIN
|
||||
}
|
||||
}
|
||||
|
||||
/// returns immediately with the lesser of the number of bytes available or the number of bytes requested.
|
||||
/// If no bytes are available, completes immediately, returning 0.
|
||||
fn poll_read(&self, dst: &mut [u8]) -> Option<usize> {
|
||||
let mut buffer = self.read_buffer.lock();
|
||||
let len = buffer.len();
|
||||
let max_read_len = len.min(dst.len());
|
||||
if max_read_len == 0 {
|
||||
return Some(0);
|
||||
}
|
||||
let mut read_len = 0;
|
||||
for i in 0..max_read_len {
|
||||
if let Some(next_char) = buffer.dequeue() {
|
||||
if self.termios.is_canonical_mode() {
|
||||
// canonical mode, read until meet new line
|
||||
if meet_new_line(next_char, self.get_termios()) {
|
||||
if !should_not_be_read(next_char, self.get_termios()) {
|
||||
dst[i] = next_char;
|
||||
read_len += 1;
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
dst[i] = next_char;
|
||||
read_len += 1;
|
||||
}
|
||||
} else {
|
||||
// raw mode
|
||||
// FIXME: avoid addtional bound check
|
||||
dst[i] = next_char;
|
||||
read_len += 1;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(read_len)
|
||||
|
||||
Some(read_len)
|
||||
}
|
||||
|
||||
// The read() blocks until the lesser of the number of bytes requested or
|
||||
// MIN bytes are available, and returns the lesser of the two values.
|
||||
pub fn block_read(&self, dst: &mut [u8], vmin: u8) -> Option<usize> {
|
||||
let min_read_len = (vmin as usize).min(dst.len());
|
||||
let buffer_len = self.read_buffer.lock().len();
|
||||
if buffer_len < min_read_len {
|
||||
return None;
|
||||
}
|
||||
return self.poll_read(&mut dst[..min_read_len]);
|
||||
}
|
||||
|
||||
/// write bytes to buffer, if flush to console, then write the content to console
|
||||
@ -60,9 +258,27 @@ impl LineDiscipline {
|
||||
todo!()
|
||||
}
|
||||
|
||||
/// whether the current process belongs to foreground process group
|
||||
fn current_belongs_to_foreground(&self) -> bool {
|
||||
let current = current!();
|
||||
if let Some(fg_pgid) = self.foreground {
|
||||
if let Some(process_group) = process_table::pgid_to_process_group(fg_pgid) {
|
||||
if process_group.contains_process(current.pid()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// set foreground process group
|
||||
pub fn set_fg(&mut self, fg_pgid: Pgid) {
|
||||
self.foreground = Some(fg_pgid);
|
||||
// Some background processes may be waiting on the wait queue, when set_fg, the background processes may be able to read.
|
||||
if self.is_readable() {
|
||||
self.read_wait_queue.wake_all();
|
||||
}
|
||||
}
|
||||
|
||||
/// get foreground process group id
|
||||
@ -72,7 +288,7 @@ impl LineDiscipline {
|
||||
|
||||
/// whether there is buffered data
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.buffer.len() == 0
|
||||
self.read_buffer.lock().len() == 0
|
||||
}
|
||||
|
||||
pub fn get_termios(&self) -> &KernelTermios {
|
||||
@ -83,3 +299,27 @@ impl LineDiscipline {
|
||||
self.termios = termios;
|
||||
}
|
||||
}
|
||||
|
||||
fn meet_new_line(item: u8, termios: &KernelTermios) -> bool {
|
||||
if item == b'\n'
|
||||
|| item == *termios.get_special_char(CC_C_CHAR::VEOF)
|
||||
|| item == *termios.get_special_char(CC_C_CHAR::VEOL)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if termios.contains_iexten() && item == *termios.get_special_char(CC_C_CHAR::VEOL2) {
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// The special char should not be read by reading process
|
||||
fn should_not_be_read(item: u8, termios: &KernelTermios) -> bool {
|
||||
if item == *termios.get_special_char(CC_C_CHAR::VEOF) {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
|
||||
use self::line_discipline::LineDiscipline;
|
||||
use crate::driver::console::receive_console_char;
|
||||
use crate::driver::tty::TtyDriver;
|
||||
use crate::fs::events::IoEvents;
|
||||
|
||||
use crate::fs::ioctl::IoctlCmd;
|
||||
use crate::process::Pgid;
|
||||
use crate::process::{process_table, Pgid};
|
||||
use crate::util::{read_val_from_user, write_val_to_user};
|
||||
use crate::{fs::file::File, prelude::*};
|
||||
|
||||
@ -22,6 +22,8 @@ pub struct Tty {
|
||||
name: CString,
|
||||
/// line discipline
|
||||
ldisc: Mutex<LineDiscipline>,
|
||||
/// driver
|
||||
driver: Mutex<Weak<TtyDriver>>,
|
||||
}
|
||||
|
||||
impl Tty {
|
||||
@ -29,12 +31,34 @@ impl Tty {
|
||||
Tty {
|
||||
name,
|
||||
ldisc: Mutex::new(LineDiscipline::new()),
|
||||
driver: Mutex::new(Weak::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set foreground process group
|
||||
pub fn set_fg(&self, pgid: Pgid) {
|
||||
self.ldisc.lock().set_fg(pgid);
|
||||
}
|
||||
|
||||
pub fn set_driver(&self, driver: Weak<TtyDriver>) {
|
||||
*self.driver.lock() = driver;
|
||||
}
|
||||
|
||||
/// Wake up foreground process group that wait on IO events.
|
||||
/// This function should be called when the interrupt handler of IO events is called.
|
||||
fn wake_fg_proc_grp(&self) {
|
||||
let ldisc = self.ldisc.lock();
|
||||
if let Some(fg_pgid) = ldisc.get_fg() {
|
||||
if let Some(fg_proc_grp) = process_table::pgid_to_process_group(*fg_pgid) {
|
||||
fg_proc_grp.wake_all_polling_procs();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn receive_char(&self, item: u8) {
|
||||
self.ldisc.lock().push_char(item);
|
||||
self.wake_fg_proc_grp();
|
||||
}
|
||||
}
|
||||
|
||||
impl File for Tty {
|
||||
@ -52,13 +76,7 @@ impl File for Tty {
|
||||
}
|
||||
|
||||
fn poll(&self) -> IoEvents {
|
||||
if !self.ldisc.lock().is_empty() {
|
||||
return IoEvents::POLLIN;
|
||||
}
|
||||
// receive keyboard input
|
||||
let byte = receive_console_char();
|
||||
self.ldisc.lock().push_char(byte);
|
||||
return IoEvents::POLLIN;
|
||||
self.ldisc.lock().poll()
|
||||
}
|
||||
|
||||
fn ioctl(&self, cmd: IoctlCmd, arg: usize) -> Result<i32> {
|
||||
@ -108,6 +126,7 @@ impl File for Tty {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_console() -> &'static Arc<Tty> {
|
||||
/// FIXME: should we maintain a static console?
|
||||
pub fn get_n_tty() -> &'static Arc<Tty> {
|
||||
&N_TTY
|
||||
}
|
||||
|
@ -113,27 +113,103 @@ pub enum CC_C_CHAR {
|
||||
VEOL2 = 16,
|
||||
}
|
||||
|
||||
impl CC_C_CHAR {
|
||||
// The special char is the same as ubuntu
|
||||
pub fn char(&self) -> u8 {
|
||||
match self {
|
||||
CC_C_CHAR::VINTR => 3,
|
||||
CC_C_CHAR::VQUIT => 28,
|
||||
CC_C_CHAR::VERASE => 127,
|
||||
CC_C_CHAR::VKILL => 21,
|
||||
CC_C_CHAR::VEOF => 4,
|
||||
CC_C_CHAR::VTIME => 0,
|
||||
CC_C_CHAR::VMIN => 1,
|
||||
CC_C_CHAR::VSWTC => 0,
|
||||
CC_C_CHAR::VSTART => 17,
|
||||
CC_C_CHAR::VSTOP => 19,
|
||||
CC_C_CHAR::VSUSP => 26,
|
||||
CC_C_CHAR::VEOL => 255,
|
||||
CC_C_CHAR::VREPRINT => 18,
|
||||
CC_C_CHAR::VDISCARD => 15,
|
||||
CC_C_CHAR::VWERASE => 23,
|
||||
CC_C_CHAR::VLNEXT => 22,
|
||||
CC_C_CHAR::VEOL2 => 255,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_usize(&self) -> usize {
|
||||
*self as usize
|
||||
}
|
||||
|
||||
pub fn from_char(item: u8) -> Result<Self> {
|
||||
if item == Self::VINTR.char() {
|
||||
return Ok(Self::VINTR);
|
||||
}
|
||||
if item == Self::VQUIT.char() {
|
||||
return Ok(Self::VQUIT);
|
||||
}
|
||||
if item == Self::VINTR.char() {
|
||||
return Ok(Self::VINTR);
|
||||
}
|
||||
if item == Self::VERASE.char() {
|
||||
return Ok(Self::VERASE);
|
||||
}
|
||||
if item == Self::VEOF.char() {
|
||||
return Ok(Self::VEOF);
|
||||
}
|
||||
if item == Self::VSTART.char() {
|
||||
return Ok(Self::VSTART);
|
||||
}
|
||||
if item == Self::VSTOP.char() {
|
||||
return Ok(Self::VSTOP);
|
||||
}
|
||||
if item == Self::VSUSP.char() {
|
||||
return Ok(Self::VSUSP);
|
||||
}
|
||||
|
||||
return_errno_with_message!(Errno::EINVAL, "Not a valid cc_char");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Pod)]
|
||||
#[repr(C)]
|
||||
pub struct KernelTermios {
|
||||
pub c_iflags: C_IFLAGS,
|
||||
pub c_oflags: C_OFLAGS,
|
||||
pub c_cflags: C_CFLAGS,
|
||||
pub c_lflags: C_LFLAGS,
|
||||
pub c_line: CcT,
|
||||
pub c_cc: [CcT; KERNEL_NCCS],
|
||||
c_iflags: C_IFLAGS,
|
||||
c_oflags: C_OFLAGS,
|
||||
c_cflags: C_CFLAGS,
|
||||
c_lflags: C_LFLAGS,
|
||||
c_line: CcT,
|
||||
c_cc: [CcT; KERNEL_NCCS],
|
||||
}
|
||||
|
||||
impl KernelTermios {
|
||||
pub fn default() -> Self {
|
||||
Self {
|
||||
let mut termios = Self {
|
||||
c_iflags: C_IFLAGS::ICRNL,
|
||||
c_oflags: C_OFLAGS::empty(),
|
||||
c_cflags: C_CFLAGS::B0,
|
||||
c_lflags: C_LFLAGS::ICANON | C_LFLAGS::ECHO,
|
||||
c_line: 0,
|
||||
c_cc: [0; KERNEL_NCCS],
|
||||
}
|
||||
};
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VINTR) = CC_C_CHAR::VINTR.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VQUIT) = CC_C_CHAR::VQUIT.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VERASE) = CC_C_CHAR::VERASE.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VKILL) = CC_C_CHAR::VKILL.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VEOF) = CC_C_CHAR::VEOF.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VTIME) = CC_C_CHAR::VTIME.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VMIN) = CC_C_CHAR::VMIN.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VSWTC) = CC_C_CHAR::VSWTC.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VSTART) = CC_C_CHAR::VSTART.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VSTOP) = CC_C_CHAR::VSTOP.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VSUSP) = CC_C_CHAR::VSUSP.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VEOL) = CC_C_CHAR::VEOL.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VREPRINT) = CC_C_CHAR::VREPRINT.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VDISCARD) = CC_C_CHAR::VDISCARD.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VWERASE) = CC_C_CHAR::VWERASE.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VLNEXT) = CC_C_CHAR::VLNEXT.char();
|
||||
*termios.get_special_char_mut(CC_C_CHAR::VEOL2) = CC_C_CHAR::VEOL2.char();
|
||||
termios
|
||||
}
|
||||
|
||||
fn new() -> Self {
|
||||
@ -147,7 +223,16 @@ impl KernelTermios {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_cooked_mode(&self) -> bool {
|
||||
pub fn get_special_char(&self, cc_c_char: CC_C_CHAR) -> &CcT {
|
||||
&self.c_cc[cc_c_char.as_usize()]
|
||||
}
|
||||
|
||||
pub fn get_special_char_mut(&mut self, cc_c_char: CC_C_CHAR) -> &mut CcT {
|
||||
&mut self.c_cc[cc_c_char.as_usize()]
|
||||
}
|
||||
|
||||
/// Canonical mode means we will handle input by lines, not by single character
|
||||
pub fn is_canonical_mode(&self) -> bool {
|
||||
self.c_lflags.contains(C_LFLAGS::ICANON)
|
||||
}
|
||||
|
||||
@ -155,4 +240,20 @@ impl KernelTermios {
|
||||
pub fn contains_icrnl(&self) -> bool {
|
||||
self.c_iflags.contains(C_IFLAGS::ICRNL)
|
||||
}
|
||||
|
||||
pub fn contains_isig(&self) -> bool {
|
||||
self.c_lflags.contains(C_LFLAGS::ISIG)
|
||||
}
|
||||
|
||||
pub fn contain_echo(&self) -> bool {
|
||||
self.c_lflags.contains(C_LFLAGS::ECHO)
|
||||
}
|
||||
|
||||
pub fn contains_echo_ctl(&self) -> bool {
|
||||
self.c_lflags.contains(C_LFLAGS::ECHOCTL)
|
||||
}
|
||||
|
||||
pub fn contains_iexten(&self) -> bool {
|
||||
self.c_lflags.contains(C_LFLAGS::IEXTEN)
|
||||
}
|
||||
}
|
||||
|
@ -1,17 +1,17 @@
|
||||
use crate::prelude::*;
|
||||
|
||||
pub struct UserApp {
|
||||
pub app_name: CString,
|
||||
pub elf_path: CString,
|
||||
pub app_content: &'static [u8],
|
||||
pub argv: Vec<CString>,
|
||||
pub envp: Vec<CString>,
|
||||
}
|
||||
|
||||
impl UserApp {
|
||||
pub fn new(app_name: &str, app_content: &'static [u8]) -> Self {
|
||||
let app_name = CString::new(app_name).unwrap();
|
||||
pub fn new(elf_path: &str, app_content: &'static [u8]) -> Self {
|
||||
let app_name = CString::new(elf_path).unwrap();
|
||||
UserApp {
|
||||
app_name,
|
||||
elf_path: app_name,
|
||||
app_content,
|
||||
argv: Vec::new(),
|
||||
envp: Vec::new(),
|
||||
@ -35,7 +35,7 @@ pub fn get_all_apps() -> Vec<UserApp> {
|
||||
res.push(asm_hello_world);
|
||||
|
||||
// Hello world, written in C language.
|
||||
// Since glibc requires the app name starts with "/", and we don't have filesystem now.
|
||||
// Since glibc requires the elf path starts with "/", and we don't have filesystem now.
|
||||
// So we manually add a leading "/" for app written in C language.
|
||||
let hello_c = UserApp::new("/hello_c", read_hello_c_content());
|
||||
res.push(hello_c);
|
||||
@ -56,6 +56,10 @@ pub fn get_all_apps() -> Vec<UserApp> {
|
||||
let signal_test = UserApp::new("/signal_test", read_signal_test_content());
|
||||
res.push(signal_test);
|
||||
|
||||
// pthread test
|
||||
let pthread_test = UserApp::new("/pthread_test", read_pthread_test_content());
|
||||
res.push(pthread_test);
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
@ -109,6 +113,10 @@ fn read_signal_test_content() -> &'static [u8] {
|
||||
include_bytes!("../../../../apps/signal_c/signal_test")
|
||||
}
|
||||
|
||||
fn read_pthread_test_content() -> &'static [u8] {
|
||||
include_bytes!("../../../../apps/pthread/pthread_test")
|
||||
}
|
||||
|
||||
fn read_busybox_content() -> &'static [u8] {
|
||||
include_bytes!("../../../../apps/busybox/busybox")
|
||||
}
|
||||
|
@ -5,28 +5,28 @@ use pod::Pod;
|
||||
/// copy bytes from user space of current process. The bytes len is the len of dest.
|
||||
pub fn read_bytes_from_user(src: Vaddr, dest: &mut [u8]) -> Result<()> {
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
Ok(root_vmar.read_bytes(src, dest)?)
|
||||
}
|
||||
|
||||
/// copy val (Plain of Data type) from user space of current process.
|
||||
pub fn read_val_from_user<T: Pod>(src: Vaddr) -> Result<T> {
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
Ok(root_vmar.read_val(src)?)
|
||||
}
|
||||
|
||||
/// write bytes from user space of current process. The bytes len is the len of src.
|
||||
pub fn write_bytes_to_user(dest: Vaddr, src: &[u8]) -> Result<()> {
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
Ok(root_vmar.write_bytes(dest, src)?)
|
||||
}
|
||||
|
||||
/// write val (Plain of Data type) to user space of current process.
|
||||
pub fn write_val_to_user<T: Pod>(dest: Vaddr, val: &T) -> Result<()> {
|
||||
let current = current!();
|
||||
let root_vmar = current.root_vmar().unwrap();
|
||||
let root_vmar = current.root_vmar();
|
||||
Ok(root_vmar.write_val(dest, val)?)
|
||||
}
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
use crate::prelude::*;
|
||||
|
||||
/// This trait is implemented by structs which can handle a user space page fault.
|
||||
/// In current implementation, they are vmars and vmos.
|
||||
pub trait PageFaultHandler {
|
||||
/// Handle a page fault at a specific addr. if not_present is true, the page fault is caused by page not present.
|
||||
/// Otherwise, it's caused by page protection error.
|
||||
/// if write is true, means the page fault is caused by a write access,
|
||||
/// if write is true, the page fault is caused by a write access,
|
||||
/// otherwise, the page fault is caused by a read access.
|
||||
/// If the page fault can be handled successfully, this function will return Ok(()).
|
||||
/// Otherwise, this function will return Err.
|
||||
|
@ -20,7 +20,6 @@ use jinux_frame::AlignExt;
|
||||
use self::vm_mapping::VmMapping;
|
||||
|
||||
use super::page_fault_handler::PageFaultHandler;
|
||||
use super::vmo::Vmo;
|
||||
|
||||
/// Virtual Memory Address Regions (VMARs) are a type of capability that manages
|
||||
/// user address spaces.
|
||||
@ -228,7 +227,7 @@ impl Vmar_ {
|
||||
// FIXME: If multiple vmos are mapped to the addr, should we allow all vmos to handle page fault?
|
||||
for (vm_mapping_base, vm_mapping) in &inner.vm_mappings {
|
||||
if *vm_mapping_base <= page_fault_addr
|
||||
&& page_fault_addr <= *vm_mapping_base + vm_mapping.size()
|
||||
&& page_fault_addr < *vm_mapping_base + vm_mapping.size()
|
||||
{
|
||||
return vm_mapping.handle_page_fault(page_fault_addr, not_present, write);
|
||||
}
|
||||
@ -639,10 +638,10 @@ impl Vmar_ {
|
||||
}
|
||||
|
||||
/// get mapped vmo at given offset
|
||||
pub fn get_mapped_vmo(&self, offset: Vaddr) -> Result<Vmo<Rights>> {
|
||||
pub fn get_vm_mapping(&self, offset: Vaddr) -> Result<Arc<VmMapping>> {
|
||||
for (vm_mapping_base, vm_mapping) in &self.inner.lock().vm_mappings {
|
||||
if *vm_mapping_base <= offset && offset < *vm_mapping_base + vm_mapping.size() {
|
||||
return Ok(vm_mapping.vmo().dup()?);
|
||||
return Ok(vm_mapping.clone());
|
||||
}
|
||||
}
|
||||
return_errno_with_message!(Errno::EACCES, "No mapped vmo at this offset");
|
||||
@ -671,10 +670,10 @@ impl<R> Vmar<R> {
|
||||
}
|
||||
|
||||
/// get a mapped vmo
|
||||
pub fn get_mapped_vmo(&self, offset: Vaddr) -> Result<Vmo<Rights>> {
|
||||
pub fn get_vm_mapping(&self, offset: Vaddr) -> Result<Arc<VmMapping>> {
|
||||
let rights = Rights::all();
|
||||
self.check_rights(rights)?;
|
||||
self.0.get_mapped_vmo(offset)
|
||||
self.0.get_vm_mapping(offset)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ impl VmMapping {
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn vmo(&self) -> &Vmo<Rights> {
|
||||
pub fn vmo(&self) -> &Vmo<Rights> {
|
||||
&self.vmo
|
||||
}
|
||||
|
||||
@ -143,11 +143,11 @@ impl VmMapping {
|
||||
}
|
||||
|
||||
/// the mapping's start address
|
||||
pub(super) fn map_to_addr(&self) -> Vaddr {
|
||||
pub fn map_to_addr(&self) -> Vaddr {
|
||||
self.map_to_addr
|
||||
}
|
||||
|
||||
pub(super) fn size(&self) -> usize {
|
||||
pub fn size(&self) -> usize {
|
||||
self.map_size
|
||||
}
|
||||
|
||||
@ -176,6 +176,13 @@ impl VmMapping {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn unmap_and_decommit(&self, range: Range<usize>) -> Result<()> {
|
||||
let vmo_range = (range.start - self.map_to_addr)..(range.end - self.map_to_addr);
|
||||
self.unmap(range, false)?;
|
||||
self.vmo.decommit(vmo_range)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_destroyed(&self) -> bool {
|
||||
self.inner.lock().is_destroyed
|
||||
}
|
||||
@ -206,11 +213,25 @@ impl VmMapping {
|
||||
pub(super) fn protect(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
|
||||
let rights = Rights::from(perms);
|
||||
self.vmo().check_rights(rights)?;
|
||||
// FIXME: should we commit and map these pages before protect vmspace?
|
||||
debug_assert!(range.start % PAGE_SIZE == 0);
|
||||
debug_assert!(range.end % PAGE_SIZE == 0);
|
||||
let start_page = (range.start - self.map_to_addr) / PAGE_SIZE;
|
||||
let end_page = (range.end - self.map_to_addr) / PAGE_SIZE;
|
||||
let vmar = self.parent.upgrade().unwrap();
|
||||
let vm_space = vmar.vm_space();
|
||||
let perm = VmPerm::from(perms);
|
||||
vm_space.protect(&range, perm)?;
|
||||
let mut inner = self.inner.lock();
|
||||
for page_idx in start_page..end_page {
|
||||
inner.page_perms.insert(page_idx, perm);
|
||||
let page_addr = page_idx * PAGE_SIZE + self.map_to_addr;
|
||||
if vm_space.is_mapped(page_addr) {
|
||||
// if the page is already mapped, we will modify page table
|
||||
let perm = VmPerm::from(perms);
|
||||
let page_range = page_addr..(page_addr + PAGE_SIZE);
|
||||
vm_space.protect(&page_range, perm)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ fn panic(info: &PanicInfo) -> ! {
|
||||
fn test_input() {
|
||||
jinux_frame::enable_interrupts();
|
||||
println!("please input value into console to pass this test");
|
||||
jinux_std::driver::console::register_console_callback(Arc::new(input_callback));
|
||||
jinux_std::driver::console::register_serial_input_callback(Arc::new(input_callback));
|
||||
unsafe {
|
||||
while INPUT_VALUE == 0 {
|
||||
jinux_frame::hlt();
|
||||
|
@ -26,5 +26,5 @@ fn panic(info: &PanicInfo) -> ! {
|
||||
|
||||
#[test_case]
|
||||
fn test_rtc() {
|
||||
println!("real time:{:?}",jinux_frame::time::get_real_time());
|
||||
println!("real time:{:?}", jinux_frame::time::get_real_time());
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user