reorgnize current codes and rename project to jinux

This commit is contained in:
Jianfeng Jiang
2022-11-22 16:42:26 +08:00
parent f3ab8219bc
commit 41b79cf823
245 changed files with 608 additions and 578 deletions

View File

@ -0,0 +1,29 @@
[package]
name = "jinux-frame"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitflags = "1.3"
x86_64 = "0.14.2"
spin = "0.9.4"
volatile = {version="0.4.5", features = ["unstable"] }
buddy_system_allocator = "0.6"
linked_list_allocator = "0.9.0"
bootloader = {version="0.10.12"}
font8x8 = { version = "0.2.5", default-features = false, features = ["unicode"]}
uart_16550 = "0.2.0"
pod = {path = "../pod"}
pod-derive = {path = "../pod-derive"}
[dependencies.lazy_static]
version = "1.0"
features = ["spin_no_std"]
[features]
default = ["serial_print"]
serial_print = []

View File

@ -0,0 +1,39 @@
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
};
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct Cell<T>(UnsafeCell<T>);
unsafe impl<T> Sync for Cell<T> {}
impl<T> Cell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
#[inline(always)]
pub const fn new(val: T) -> Self {
Self(UnsafeCell::new(val))
}
#[inline(always)]
pub fn get(&self) -> &mut T {
unsafe { &mut *self.0.get() }
}
}
impl<T> Deref for Cell<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
self.get()
}
}
impl<T> DerefMut for Cell<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
self.get()
}
}

View File

@ -0,0 +1,18 @@
#![allow(unused)]
use crate::log::LogLevel;
pub const USER_STACK_SIZE: usize = PAGE_SIZE * 2;
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 16;
pub const KERNEL_HEAP_SIZE: usize = 0x1_000_000;
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
pub const PHYS_OFFSET: usize = 0xFFFF800000000000;
pub const ENTRY_COUNT: usize = 512;
pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const KVA_START: usize = (usize::MAX) << PAGE_SIZE_BITS;
pub const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Close;

View File

@ -0,0 +1,298 @@
//! CPU.
use core::arch::x86_64::{_fxrstor, _fxsave};
use core::fmt::Debug;
use core::mem::MaybeUninit;
use crate::debug;
use crate::trap::{CalleeRegs, CallerRegs, SyscallFrame, TrapFrame};
use pod::Pod;
/// Defines a CPU-local variable.
#[macro_export]
macro_rules! cpu_local {
() => {
todo!()
};
}
/// Returns the number of CPUs.
pub fn num_cpus() -> u32 {
// FIXME: we only start one cpu now.
1
}
/// Returns the ID of this CPU.
pub fn this_cpu() -> u32 {
todo!()
}
/// Cpu context, including both general-purpose registers and floating-point registers.
#[derive(Clone, Default, Copy, Debug)]
#[repr(C)]
pub struct CpuContext {
pub fp_regs: FpRegs,
pub gp_regs: GpRegs,
pub fs_base: u64,
/// trap information, this field is all zero when it is syscall
pub trap_information: TrapInformation,
}
#[derive(Clone, Default, Copy, Debug)]
#[repr(C)]
pub struct TrapInformation {
pub cr2: u64,
pub id: u64,
pub err: u64,
pub cs: u64,
pub ss: u64,
}
/// The general-purpose registers of CPU.
#[derive(Clone, Copy, Debug, Default)]
#[repr(C)]
pub struct GpRegs {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub rdi: u64,
pub rsi: u64,
pub rbp: u64,
pub rbx: u64,
pub rdx: u64,
pub rax: u64,
pub rcx: u64,
pub rsp: u64,
pub rip: u64,
pub rflag: u64,
}
unsafe impl Pod for GpRegs {}
unsafe impl Pod for TrapInformation {}
unsafe impl Pod for CpuContext {}
unsafe impl Pod for FpRegs {}
impl From<SyscallFrame> for CpuContext {
fn from(syscall: SyscallFrame) -> Self {
Self {
gp_regs: GpRegs {
r8: syscall.caller.r8,
r9: syscall.caller.r9,
r10: syscall.caller.r10,
r11: syscall.caller.r11,
r12: syscall.callee.r12,
r13: syscall.callee.r13,
r14: syscall.callee.r14,
r15: syscall.callee.r15,
rdi: syscall.caller.rdi,
rsi: syscall.caller.rsi,
rbp: syscall.callee.rbp,
rbx: syscall.callee.rbx,
rdx: syscall.caller.rdx,
rax: syscall.caller.rax,
rcx: syscall.caller.rcx,
rsp: syscall.callee.rsp,
rip: syscall.caller.rcx,
rflag: 0,
},
fs_base: 0,
fp_regs: FpRegs::default(),
trap_information: TrapInformation::default(),
}
}
}
impl Into<SyscallFrame> for CpuContext {
fn into(self) -> SyscallFrame {
SyscallFrame {
caller: CallerRegs {
rax: self.gp_regs.rax,
rcx: self.gp_regs.rcx,
rdx: self.gp_regs.rdx,
rsi: self.gp_regs.rsi,
rdi: self.gp_regs.rdi,
r8: self.gp_regs.r8,
r9: self.gp_regs.r9,
r10: self.gp_regs.r10,
r11: self.gp_regs.r11,
},
callee: CalleeRegs {
rsp: self.gp_regs.rsp,
rbx: self.gp_regs.rbx,
rbp: self.gp_regs.rbp,
r12: self.gp_regs.r12,
r13: self.gp_regs.r13,
r14: self.gp_regs.r14,
r15: self.gp_regs.r15,
},
}
}
}
impl From<TrapFrame> for CpuContext {
fn from(trap: TrapFrame) -> Self {
Self {
gp_regs: GpRegs {
r8: trap.caller.r8,
r9: trap.caller.r9,
r10: trap.caller.r10,
r11: trap.caller.r11,
r12: trap.callee.r12,
r13: trap.callee.r13,
r14: trap.callee.r14,
r15: trap.callee.r15,
rdi: trap.caller.rdi,
rsi: trap.caller.rsi,
rbp: trap.callee.rbp,
rbx: trap.callee.rbx,
rdx: trap.caller.rdx,
rax: trap.caller.rax,
rcx: trap.caller.rcx,
rsp: trap.rsp,
rip: trap.rip,
rflag: trap.rflags,
},
fs_base: 0,
fp_regs: FpRegs::default(),
trap_information: TrapInformation {
cr2: trap.cr2,
id: trap.id,
err: trap.err,
cs: trap.cs,
ss: trap.ss,
},
}
}
}
impl Into<TrapFrame> for CpuContext {
fn into(self) -> TrapFrame {
let trap_information = self.trap_information;
TrapFrame {
caller: CallerRegs {
rax: self.gp_regs.rax,
rcx: self.gp_regs.rcx,
rdx: self.gp_regs.rdx,
rsi: self.gp_regs.rsi,
rdi: self.gp_regs.rdi,
r8: self.gp_regs.r8,
r9: self.gp_regs.r9,
r10: self.gp_regs.r10,
r11: self.gp_regs.r11,
},
callee: CalleeRegs {
rsp: self.gp_regs.rsp,
rbx: self.gp_regs.rbx,
rbp: self.gp_regs.rbp,
r12: self.gp_regs.r12,
r13: self.gp_regs.r13,
r14: self.gp_regs.r14,
r15: self.gp_regs.r15,
},
id: trap_information.id,
err: trap_information.err,
cr2: trap_information.cr2,
rip: self.gp_regs.rip,
cs: trap_information.cs,
rflags: self.gp_regs.rflag,
rsp: self.gp_regs.rsp,
ss: trap_information.ss,
}
}
}
/// The floating-point state of CPU.
#[derive(Clone, Copy, Debug)]
#[repr(C)]
pub struct FpRegs {
buf: FxsaveArea,
is_valid: bool,
}
impl FpRegs {
/// Create a new instance.
///
/// Note that a newly-created instance's floating point state is not
/// initialized, thus considered invalid (i.e., `self.is_valid() == false`).
pub fn new() -> Self {
// The buffer address requires 16bytes alignment.
Self {
buf: unsafe { MaybeUninit::uninit().assume_init() },
is_valid: false,
}
}
/// Save CPU's current floating pointer states into this instance.
pub fn save(&mut self) {
debug!("save fpregs");
debug!("write addr = 0x{:x}", (&mut self.buf) as *mut _ as usize);
let layout = alloc::alloc::Layout::for_value(&self.buf);
debug!("layout: {:?}", layout);
let ptr = unsafe { alloc::alloc::alloc(layout) } as usize;
debug!("ptr = 0x{:x}", ptr);
unsafe {
_fxsave((&mut self.buf.data).as_mut_ptr() as *mut u8);
}
debug!("save fpregs success");
self.is_valid = true;
}
/// Save the floating state given by a slice of u8.
///
/// After calling this method, the state of the instance will be considered valid.
///
/// # Safety
///
/// It is the caller's responsibility to ensure that the source slice contains
/// data that is in xsave/xrstor format. The slice must have a length of 512 bytes.
pub unsafe fn save_from_slice(&mut self, src: &[u8]) {
(&mut self.buf.data).copy_from_slice(src);
self.is_valid = true;
}
/// Returns whether the instance can contains data in valid xsave/xrstor format.
pub fn is_valid(&self) -> bool {
self.is_valid
}
/// Clear the state of the instance.
///
/// This method does not reset the underlying buffer that contains the floating
/// point state; it only marks the buffer __invalid__.
pub fn clear(&mut self) {
self.is_valid = false;
}
/// Restore CPU's CPU floating pointer states from this instance.
///
/// Panic. If the current state is invalid, the method will panic.
pub fn restore(&self) {
debug!("restore fpregs");
assert!(self.is_valid);
unsafe { _fxrstor((&self.buf.data).as_ptr()) };
debug!("restore fpregs success");
}
/// Returns the floating point state as a slice.
///
/// Note that the slice may contain garbage if `self.is_valid() == false`.
pub fn as_slice(&self) -> &[u8] {
&self.buf.data
}
}
impl Default for FpRegs {
fn default() -> Self {
Self::new()
}
}
#[repr(C, align(16))]
#[derive(Debug, Clone, Copy)]
struct FxsaveArea {
data: [u8; 512], // 512 bytes
}

View File

@ -0,0 +1,151 @@
use bootloader::boot_info::PixelFormat;
use core::fmt;
use font8x8::UnicodeFonts;
use spin::Mutex;
use volatile::Volatile;
pub(crate) static WRITER: Mutex<Option<Writer>> = Mutex::new(None);
pub fn init(framebuffer: &'static mut bootloader::boot_info::FrameBuffer) {
let mut writer = Writer {
info: framebuffer.info(),
buffer: Volatile::new(framebuffer.buffer_mut()),
x_pos: 0,
y_pos: 0,
};
writer.clear();
// global writer should not be locked here
let mut global_writer = WRITER.try_lock().unwrap();
assert!(global_writer.is_none(), "Global writer already initialized");
*global_writer = Some(writer);
}
pub(crate) struct Writer {
buffer: Volatile<&'static mut [u8]>,
info: bootloader::boot_info::FrameBufferInfo,
x_pos: usize,
y_pos: usize,
}
impl Writer {
fn newline(&mut self) {
self.y_pos += 8;
self.carriage_return();
}
fn carriage_return(&mut self) {
self.x_pos = 0;
}
/// Erases all text on the screen
pub fn clear(&mut self) {
self.x_pos = 0;
self.y_pos = 0;
self.buffer.fill(0);
}
fn shift_lines_up(&mut self) {
let offset = self.info.stride * self.info.bytes_per_pixel * 8;
self.buffer.copy_within(offset.., 0);
self.y_pos -= 8;
}
fn width(&self) -> usize {
self.info.horizontal_resolution
}
fn height(&self) -> usize {
self.info.vertical_resolution
}
fn write_char(&mut self, c: char) {
match c {
'\n' => self.newline(),
'\r' => self.carriage_return(),
c => {
if self.x_pos >= self.width() {
self.newline();
}
while self.y_pos >= (self.height() - 8) {
self.shift_lines_up();
}
let rendered = font8x8::BASIC_FONTS
.get(c)
.expect("character not found in basic font");
self.write_rendered_char(rendered);
}
}
}
fn write_rendered_char(&mut self, rendered_char: [u8; 8]) {
for (y, byte) in rendered_char.iter().enumerate() {
for (x, bit) in (0..8).enumerate() {
let on = *byte & (1 << bit) != 0;
self.write_pixel(self.x_pos + x, self.y_pos + y, on);
}
}
self.x_pos += 8;
}
fn write_pixel(&mut self, x: usize, y: usize, on: bool) {
let pixel_offset = y * self.info.stride + x;
let color = if on {
match self.info.pixel_format {
PixelFormat::RGB => [0x33, 0xff, 0x66, 0],
PixelFormat::BGR => [0x66, 0xff, 0x33, 0],
_other => [0xff, 0xff, 0xff, 0],
}
} else {
[0, 0, 0, 0]
};
let bytes_per_pixel = self.info.bytes_per_pixel;
let byte_offset = pixel_offset * bytes_per_pixel;
self.buffer
.index_mut(byte_offset..(byte_offset + bytes_per_pixel))
.copy_from_slice(&color[..bytes_per_pixel]);
}
/// Writes the given ASCII string to the buffer.
///
/// Wraps lines at `BUFFER_WIDTH`. Supports the `\n` newline character. Does **not**
/// support strings with non-ASCII characters, since they can't be printed in the VGA text
/// mode.
fn write_string(&mut self, s: &str) {
for char in s.chars() {
self.write_char(char);
}
}
}
impl fmt::Write for Writer {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
/// Like the `print!` macro in the standard library, but prints to the VGA text buffer.
#[macro_export]
macro_rules! screen_print {
($($arg:tt)*) => ($crate::device::framebuffer::_print(format_args!($($arg)*)));
}
/// Like the `println!` macro in the standard library, but prints to the VGA text buffer.
#[macro_export]
macro_rules! screen_println {
() => ($crate::screen_print!("\n"));
($($arg:tt)*) => ($crate::screen_print!("{}\n", format_args!($($arg)*)));
}
/// Prints the given formatted string to the VGA text buffer
/// through the global `WRITER` instance.
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
WRITER.lock().as_mut().unwrap().write_fmt(args).unwrap();
});
}

View File

@ -0,0 +1,55 @@
use crate::{prelude::*, x86_64_util};
/// An I/O port, representing a specific address in the I/O address of x86.
pub struct IoPort {
addr: u16,
}
impl IoPort {
/// Create an I/O port.
///
/// # Safety
///
/// This function is marked unsafe as creating an I/O port is considered
/// a privileged operation.
pub unsafe fn new(addr: u16) -> Result<Self> {
Ok(Self { addr: addr })
}
}
impl IoPort {
/// Get the address of this I/O port.
pub fn addr(&self) -> u16 {
self.addr
}
/// Read a value of `u32`.
pub fn read_u32(&self) -> u32 {
x86_64_util::in32(self.addr)
}
/// Write a value of `u32`.
pub fn write_u32(&self, val: u32) {
x86_64_util::out32(self.addr, val)
}
/// Read a value of `u16`.
pub fn read_u16(&self) -> u16 {
x86_64_util::in16(self.addr)
}
/// Write a value of `u16`.
pub fn write_u16(&self, val: u16) {
x86_64_util::out16(self.addr, val)
}
/// Read a value of `u8`.
pub fn read_u8(&self) -> u8 {
x86_64_util::in8(self.addr)
}
/// Write a value of `u8`.
pub fn write_u8(&self, val: u8) {
x86_64_util::out8(self.addr, val)
}
}

View File

@ -0,0 +1,16 @@
//! Device-related APIs.
pub mod framebuffer;
mod io_port;
pub mod pci;
mod pic;
pub mod serial;
pub use self::io_port::IoPort;
pub(crate) use pic::{add_timeout_list, TICK};
pub use pic::{TimerCallback, TIMER_FREQ};
pub(crate) fn init(framebuffer: &'static mut bootloader::boot_info::FrameBuffer) {
framebuffer::init(framebuffer);
pic::init();
}

View File

@ -0,0 +1,12 @@
//! PCI bus io port
use super::io_port::IoPort;
use lazy_static::lazy_static;
const CONFIG_ADDRESS: u16 = 0x0CF8;
const CONFIG_DATA: u16 = 0x0CFC;
lazy_static! {
pub static ref PCI_ADDRESS_PORT: IoPort = unsafe { IoPort::new(CONFIG_ADDRESS).unwrap() };
pub static ref PCI_DATA_PORT: IoPort = unsafe { IoPort::new(CONFIG_DATA).unwrap() };
}

View File

@ -0,0 +1,178 @@
use crate::cell::Cell;
use crate::x86_64_util::out8;
use crate::{IrqAllocateHandle, TrapFrame};
use alloc::sync::Arc;
use alloc::vec::Vec;
use alloc::{boxed::Box, collections::BinaryHeap};
use core::any::Any;
use lazy_static::lazy_static;
use spin::Mutex;
const MASTER_CMD: u16 = 0x20;
const MASTER_DATA: u16 = MASTER_CMD + 1;
const SLAVE_CMD: u16 = 0xA0;
const SLAVE_DATA: u16 = SLAVE_CMD + 1;
const TIMER_RATE: u32 = 1193182;
/// This value represent the base timer frequency in Hz
pub const TIMER_FREQ: u64 = 100;
const TIMER_PERIOD_IO_PORT: u16 = 0x40;
const TIMER_MODE_IO_PORT: u16 = 0x43;
const TIMER_SQUARE_WAVE: u8 = 0x36;
const TIMER_IRQ_NUM: u8 = 32;
pub static mut TICK: u64 = 0;
lazy_static! {
static ref TIMER_IRQ: Mutex<IrqAllocateHandle> = Mutex::new(
crate::trap::allocate_target_irq(TIMER_IRQ_NUM).expect("Timer irq Allocate error")
);
}
pub fn init() {
// Start initialization
out8(MASTER_CMD, 0x11);
out8(SLAVE_CMD, 0x11);
// Set offsets
// map master PIC vector 0x00~0x07 to 0x20~0x27 IRQ number
out8(MASTER_DATA, 0x20);
// map slave PIC vector 0x00~0x07 to 0x28~0x2f IRQ number
out8(SLAVE_DATA, 0x28);
// Set up cascade, there is slave at IRQ2
out8(MASTER_DATA, 4);
out8(SLAVE_DATA, 2);
// Set up interrupt mode (1 is 8086/88 mode, 2 is auto EOI)
out8(MASTER_DATA, 1);
out8(SLAVE_DATA, 1);
// Unmask timer interrupt
out8(MASTER_DATA, 0xFE);
out8(SLAVE_DATA, 0xFF);
// Ack remaining interrupts
out8(MASTER_CMD, 0x20);
out8(SLAVE_CMD, 0x20);
// Initialize timer.
let cycle = TIMER_RATE / TIMER_FREQ as u32; // 1ms per interrupt.
out8(TIMER_MODE_IO_PORT, TIMER_SQUARE_WAVE);
out8(TIMER_PERIOD_IO_PORT, (cycle & 0xFF) as _);
out8(TIMER_PERIOD_IO_PORT, (cycle >> 8) as _);
TIMER_IRQ.lock().on_active(timer_callback);
}
#[inline(always)]
fn ack() {
out8(MASTER_CMD, 0x20);
}
fn timer_callback(trap_frame: &TrapFrame) {
// FIXME: disable and enable interupt will cause infinity loop
// x86_64_util::disable_interrupts();
ack();
let current_ms;
unsafe {
current_ms = TICK;
TICK += 1;
}
let timeout_list = TIMEOUT_LIST.get();
let mut callbacks: Vec<Arc<TimerCallback>> = Vec::new();
while let Some(t) = timeout_list.peek() {
if t.expire_ms <= current_ms {
callbacks.push(timeout_list.pop().unwrap());
} else {
break;
}
}
for callback in callbacks {
if callback.is_enable() {
callback.callback.call((&callback,));
}
}
// x86_64_util::enable_interrupts();
}
lazy_static! {
static ref TIMEOUT_LIST: Cell<BinaryHeap<Arc<TimerCallback>>> = Cell::new(BinaryHeap::new());
}
pub struct TimerCallback {
expire_ms: u64,
data: Arc<dyn Any + Send + Sync>,
callback: Box<dyn Fn(&TimerCallback) + Send + Sync>,
enable: Cell<bool>,
}
impl TimerCallback {
fn new(
timeout_ms: u64,
data: Arc<dyn Any + Send + Sync>,
callback: Box<dyn Fn(&TimerCallback) + Send + Sync>,
) -> Self {
Self {
expire_ms: timeout_ms,
data,
callback,
enable: Cell::new(true),
}
}
pub fn data(&self) -> &Arc<dyn Any + Send + Sync> {
&self.data
}
/// disable this timeout
pub fn disable(&self) {
*self.enable.get() = false;
}
/// enable this timeout
pub fn enable(&self) {
*self.enable.get() = true;
}
pub fn is_enable(&self) -> bool {
*self.enable
}
}
impl PartialEq for TimerCallback {
fn eq(&self, other: &Self) -> bool {
self.expire_ms == other.expire_ms
}
}
impl Eq for TimerCallback {}
impl PartialOrd for TimerCallback {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for TimerCallback {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.expire_ms.cmp(&other.expire_ms).reverse()
}
}
/// add timeout task into timeout list, the frequency see const TIMER_FREQ
///
/// user should ensure that the callback function cannot take too much time
///
pub fn add_timeout_list<F, T>(timeout: u64, data: T, callback: F) -> Arc<TimerCallback>
where
F: Fn(&TimerCallback) + Send + Sync + 'static,
T: Any + Send + Sync,
{
unsafe {
let timer_callback = TimerCallback::new(TICK + timeout, Arc::new(data), Box::new(callback));
let arc = Arc::new(timer_callback);
TIMEOUT_LIST.get().push(arc.clone());
arc
}
}

View File

@ -0,0 +1,47 @@
use lazy_static::lazy_static;
use spin::Mutex;
use uart_16550::SerialPort;
lazy_static! {
pub static ref SERIAL: Mutex<SerialPort> = {
let mut serial_port = unsafe { SerialPort::new(0x3F8) };
serial_port.init();
Mutex::new(serial_port)
};
}
/// read a char from the keyboard input.
/// FIXME: this function should **NOT** block. If no char receives, this function should return None immediately.
/// However, the receive function on SERIAL will block until a char is received, which will block the whole kernel.
/// A more correct implementation should be added once interrupt is ready. We should register the kerboard interrupt
/// handler to wake up foreground processes which wait on IOEVENTS.
pub fn receive_char() -> Option<u8> {
let byte = SERIAL.lock().receive();
Some(byte)
}
#[doc(hidden)]
pub fn _print(args: ::core::fmt::Arguments) {
use core::fmt::Write;
SERIAL
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
}
/// Prints to the host through the serial interface.
#[macro_export]
macro_rules! serial_print {
($($arg:tt)*) => {
$crate::device::serial::_print(format_args!($($arg)*));
};
}
/// Prints to the host through the serial interface, appending a newline.
#[macro_export]
macro_rules! serial_println {
() => ($crate::serial_print!("\n"));
($fmt:expr) => ($crate::serial_print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => ($crate::serial_print!(
concat!($fmt, "\n"), $($arg)*));
}

View File

@ -0,0 +1,12 @@
/// The error type which is returned from the APIs of this crate.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Error {
InvalidArgs,
NoMemory,
PageFault,
AccessDenied,
IoError,
InvalidVmpermBits,
NotEnoughResources,
NoChild,
}

View File

@ -0,0 +1,152 @@
//! The framework part of Jinux.
#![no_std]
#![allow(dead_code)]
#![allow(unused_variables)]
#![feature(negative_impls)]
#![feature(fn_traits)]
#![feature(const_maybe_uninit_zeroed)]
#![feature(alloc_error_handler)]
#![feature(core_intrinsics)]
#![feature(new_uninit)]
#![feature(link_llvm_intrinsics)]
extern crate alloc;
pub(crate) mod cell;
pub mod config;
pub mod cpu;
pub mod device;
mod error;
pub mod log;
pub(crate) mod mm;
pub mod prelude;
pub mod sync;
pub mod task;
pub mod timer;
pub mod trap;
pub mod user;
mod util;
pub mod vm;
pub(crate) mod x86_64_util;
use core::{mem, panic::PanicInfo};
pub use self::error::Error;
pub use self::prelude::Result;
pub(crate) use self::sync::up::UPSafeCell;
use alloc::vec::Vec;
use bootloader::{
boot_info::{FrameBuffer, MemoryRegionKind},
BootInfo,
};
pub use device::serial::receive_char;
pub use mm::address::{align_down, align_up, is_aligned, virt_to_phys};
pub use trap::{allocate_irq, IrqAllocateHandle, TrapFrame};
use trap::{IrqCallbackHandle, IrqLine};
pub use util::AlignExt;
use x86_64_util::enable_common_cpu_features;
static mut IRQ_CALLBACK_LIST: Vec<IrqCallbackHandle> = Vec::new();
#[cfg(not(feature = "serial_print"))]
pub use crate::screen_print as print;
#[cfg(not(feature = "serial_print"))]
pub use crate::screen_println as println;
#[cfg(feature = "serial_print")]
pub use crate::serial_print as print;
#[cfg(feature = "serial_print")]
pub use crate::serial_println as println;
pub fn init(boot_info: &'static mut BootInfo) {
let siz = boot_info.framebuffer.as_ref().unwrap() as *const FrameBuffer as usize;
let mut memory_init = false;
// memory
for region in boot_info.memory_regions.iter() {
if region.kind == MemoryRegionKind::Usable {
let start: u64 = region.start;
let size: u64 = region.end - region.start;
println!(
"[kernel] physical frames start = {:x}, size = {:x}",
start, size
);
mm::init(start, size);
memory_init = true;
break;
}
}
if !memory_init {
panic!("memory init failed");
}
device::init(boot_info.framebuffer.as_mut().unwrap());
device::framebuffer::WRITER.lock().as_mut().unwrap().clear();
trap::init();
enable_common_cpu_features();
unsafe {
for i in 0..256 {
IRQ_CALLBACK_LIST.push(IrqLine::acquire(i as u8).on_active(general_handler))
}
}
// uncomment below code to enable timer interrupt
// x86_64_util::enable_interrupts_and_hlt();
}
fn general_handler(trap_frame: &TrapFrame) {
println!("{:#x?}", trap_frame);
println!("rip = 0x{:x}", trap_frame.rip);
println!("rsp = 0x{:x}", trap_frame.rsp);
println!("cr2 = 0x{:x}", trap_frame.cr2);
// println!("rbx = 0x{:x}", trap_frame.)
panic!("couldn't handler trap right now");
}
#[inline(always)]
pub(crate) const fn zero<T>() -> T {
unsafe { mem::MaybeUninit::zeroed().assume_init() }
}
pub trait Testable {
fn run(&self) -> ();
}
impl<T> Testable for T
where
T: Fn(),
{
fn run(&self) {
serial_print!("{}...\n", core::any::type_name::<T>());
self();
serial_println!("[ok]");
}
}
pub fn test_runner(tests: &[&dyn Testable]) {
serial_println!("Running {} tests", tests.len());
for test in tests {
test.run();
}
exit_qemu(QemuExitCode::Success);
}
pub fn test_panic_handler(info: &PanicInfo) -> ! {
serial_println!("[failed]");
serial_println!("Error: {}", info);
exit_qemu(QemuExitCode::Failed);
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u32)]
pub enum QemuExitCode {
Success = 0x10,
Failed = 0x11,
}
pub fn exit_qemu(exit_code: QemuExitCode) -> ! {
use x86_64::instructions::port::Port;
unsafe {
let mut port = Port::new(0xf4);
port.write(exit_code as u32);
}
unreachable!()
}

View File

@ -0,0 +1,29 @@
ENTRY(_start)
KERNEL_BEGIN = 0xffffff0000000000;
SECTIONS {
. = KERNEL_BEGIN;
.rodata ALIGN(4K): {
*(.rodata .rodata.*)
}
.text ALIGN(4K): {
*(.text .text.*)
}
.data ALIGN(4K): {
*(.data .data.*)
*(.sdata .sdata.*)
}
.got ALIGN(4K): {
*(.got .got.*)
}
.bss ALIGN(4K): {
*(.bss .bss.*)
*(.sbss .sbss.*)
}
}

View File

@ -0,0 +1,142 @@
use core::fmt::Arguments;
use crate::config::DEFAULT_LOG_LEVEL;
/// Print log message
/// This function should *NOT* be directly called.
/// Instead, print logs with macros.
#[cfg(not(feature = "serial_print"))]
#[doc(hidden)]
pub fn log_print(args: Arguments) {
use crate::device::framebuffer::WRITER;
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
WRITER.lock().as_mut().unwrap().write_fmt(args).unwrap();
});
}
/// Print log message
/// This function should *NOT* be directly called.
/// Instead, print logs with macros.
#[cfg(feature = "serial_print")]
#[doc(hidden)]
pub fn log_print(args: Arguments) {
use crate::device::serial::SERIAL;
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
SERIAL
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
});
}
/// This macro should not be directly called.
#[macro_export]
macro_rules! log_print {
($($arg:tt)*) => {
$crate::log::log_print(format_args!($($arg)*))
};
}
#[macro_export]
macro_rules! trace {
($($arg:tt)*) => {
if $crate::log::Logger::trace() {
$crate::log_print!("[trace]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
}
};
}
#[macro_export]
macro_rules! debug {
($($arg:tt)*) => {
if $crate::log::Logger::debug() {
$crate::log_print!("[debug]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
}
};
}
#[macro_export]
macro_rules! info {
($($arg:tt)*) => {
if $crate::log::Logger::info() {
($crate::log_print!("[info]:"));
($crate::log_print!($($arg)*));
($crate::log_print!("\n"));
}
};
}
#[macro_export]
macro_rules! warn {
($($arg:tt)*) => {
if $crate::log::Logger::warn() {
$crate::log_print!("[warn]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
}
};
}
#[macro_export]
macro_rules! error {
($($arg:tt)*) => {
if $crate::log::Logger::error() {
$crate::log_print!("[error]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
}
};
}
pub const LOGGER: Logger = Logger::default_log_level();
pub struct Logger {
log_level: LogLevel,
}
impl Logger {
pub const fn default_log_level() -> Logger {
Logger {
log_level: DEFAULT_LOG_LEVEL,
}
}
pub fn trace() -> bool {
LOGGER.log_level <= LogLevel::Trace
}
pub fn debug() -> bool {
LOGGER.log_level <= LogLevel::Debug
}
pub fn info() -> bool {
LOGGER.log_level <= LogLevel::Info
}
pub fn warn() -> bool {
LOGGER.log_level <= LogLevel::Warn
}
pub fn error() -> bool {
LOGGER.log_level <= LogLevel::Error
}
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
pub enum LogLevel {
Trace,
Debug,
Info,
Warn,
Error,
Close,
}

View File

@ -0,0 +1,247 @@
use core::ops::{Add, AddAssign, Sub, SubAssign};
use alloc::fmt;
use crate::config::{PAGE_SIZE, PHYS_OFFSET};
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub struct PhysAddr(pub usize);
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub struct VirtAddr(pub usize);
pub const fn phys_to_virt(pa: usize) -> usize {
pa + PHYS_OFFSET
}
pub const fn virt_to_phys(va: usize) -> usize {
va - PHYS_OFFSET
}
impl PhysAddr {
pub const fn kvaddr(self) -> VirtAddr {
VirtAddr(phys_to_virt(self.0))
}
pub const fn align_down(self) -> Self {
Self(align_down(self.0))
}
pub const fn align_up(self) -> Self {
Self(align_up(self.0))
}
pub const fn page_offset(self) -> usize {
page_offset(self.0)
}
pub const fn is_aligned(self) -> bool {
is_aligned(self.0)
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("PhysAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<usize> for PhysAddr {
type Output = Self;
#[inline]
fn add(self, rhs: usize) -> Self::Output {
PhysAddr(self.0 + rhs)
}
}
impl AddAssign<usize> for PhysAddr {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self = *self + rhs;
}
}
impl Sub<usize> for PhysAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: usize) -> Self::Output {
PhysAddr(self.0 - rhs)
}
}
impl SubAssign<usize> for PhysAddr {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self = *self - rhs;
}
}
impl Sub<PhysAddr> for PhysAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: PhysAddr) -> Self::Output {
self.0.checked_sub(rhs.0).unwrap().try_into().unwrap()
}
}
impl VirtAddr {
pub const fn as_ptr(self) -> *mut u8 {
self.0 as _
}
pub const fn align_down(self) -> Self {
Self(align_down(self.0))
}
pub const fn align_up(self) -> Self {
Self(align_up(self.0))
}
pub const fn page_offset(self) -> usize {
page_offset(self.0)
}
pub const fn is_aligned(self) -> bool {
is_aligned(self.0)
}
}
impl VirtAddr {
pub fn get_bytes_array(&self) -> &'static mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self.0 as *mut u8, 4096) }
}
pub fn get_ref<T>(&self) -> &'static T {
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl fmt::Debug for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("VirtAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<usize> for VirtAddr {
type Output = Self;
#[inline]
fn add(self, rhs: usize) -> Self::Output {
VirtAddr(self.0 + rhs)
}
}
impl AddAssign<usize> for VirtAddr {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self = *self + rhs;
}
}
impl Sub<usize> for VirtAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: usize) -> Self::Output {
VirtAddr(self.0 - rhs)
}
}
impl SubAssign<usize> for VirtAddr {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self = *self - rhs;
}
}
impl Sub<VirtAddr> for VirtAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: VirtAddr) -> Self::Output {
self.0.checked_sub(rhs.0).unwrap().try_into().unwrap()
}
}
pub const fn align_down(p: usize) -> usize {
p & !(PAGE_SIZE - 1)
}
pub const fn align_up(p: usize) -> usize {
(p + PAGE_SIZE - 1) & !(PAGE_SIZE - 1)
}
pub const fn page_offset(p: usize) -> usize {
p & (PAGE_SIZE - 1)
}
pub const fn is_aligned(p: usize) -> bool {
page_offset(p) == 0
}

View File

@ -0,0 +1,103 @@
use alloc::vec::Vec;
use crate::{config::PAGE_SIZE, vm::Paddr, UPSafeCell};
use super::address::PhysAddr;
use lazy_static::lazy_static;
lazy_static! {
static ref FRAME_ALLOCATOR: UPSafeCell<FreeListAllocator> = unsafe {
UPSafeCell::new(FreeListAllocator {
current: 0,
end: 0,
free_list: Vec::new(),
})
};
}
trait FrameAllocator {
fn alloc(&mut self) -> Option<usize>;
fn dealloc(&mut self, value: usize);
}
pub struct FreeListAllocator {
current: usize,
end: usize,
free_list: Vec<usize>,
}
impl FreeListAllocator {
fn alloc(&mut self) -> Option<usize> {
let mut ret = 0;
if let Some(x) = self.free_list.pop() {
ret = x;
} else if self.current < self.end {
ret = self.current;
self.current += PAGE_SIZE;
};
Some(ret)
}
fn dealloc(&mut self, value: usize) {
assert!(!self.free_list.contains(&value));
self.free_list.push(value);
}
}
#[derive(Debug, Clone)]
// #[repr(transparent)]
pub struct PhysFrame {
start_pa: usize,
}
impl PhysFrame {
pub const fn start_pa(&self) -> PhysAddr {
PhysAddr(self.start_pa)
}
pub const fn end_pa(&self) -> PhysAddr {
PhysAddr(self.start_pa + PAGE_SIZE)
}
pub fn alloc() -> Option<Self> {
FRAME_ALLOCATOR
.exclusive_access()
.alloc()
.map(|pa| Self { start_pa: pa })
}
pub fn alloc_with_paddr(paddr: Paddr) -> Option<Self> {
// FIXME: need to check whether the physical address is invalid or not
Some(Self { start_pa: paddr })
}
pub fn dealloc(pa: usize) {
FRAME_ALLOCATOR.exclusive_access().dealloc(pa)
}
pub fn alloc_zero() -> Option<Self> {
let mut f = Self::alloc()?;
f.zero();
Some(f)
}
pub fn zero(&mut self) {
unsafe { core::ptr::write_bytes(self.start_pa().kvaddr().as_ptr(), 0, PAGE_SIZE) }
}
pub fn as_slice(&self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self.start_pa().kvaddr().as_ptr(), PAGE_SIZE) }
}
}
impl Drop for PhysFrame {
fn drop(&mut self) {
FRAME_ALLOCATOR.exclusive_access().dealloc(self.start_pa);
}
}
pub(crate) fn init(start: usize, size: usize) {
FRAME_ALLOCATOR.exclusive_access().current = start;
FRAME_ALLOCATOR.exclusive_access().end = start + size;
}

View File

@ -0,0 +1,20 @@
use crate::config::KERNEL_HEAP_SIZE;
use buddy_system_allocator::LockedHeap;
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
#[alloc_error_handler]
pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
panic!("Heap allocation error, layout = {:?}", layout);
}
static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
pub fn init() {
unsafe {
HEAP_ALLOCATOR
.lock()
.init(HEAP_SPACE.as_ptr() as usize, KERNEL_HEAP_SIZE);
}
}

View File

@ -0,0 +1,332 @@
use super::{page_table::PageTable, *};
use crate::prelude::*;
use crate::{
config::PAGE_SIZE,
mm::address::is_aligned,
vm::{VmFrame, VmFrameVec},
*,
};
use alloc::collections::{btree_map::Entry, BTreeMap};
use core::fmt;
pub struct MapArea {
/// flags
pub flags: PTFlags,
/// start virtual address
pub start_va: VirtAddr,
/// the size of these area
pub size: usize,
/// all the map information
pub mapper: BTreeMap<VirtAddr, VmFrame>,
}
pub struct MemorySet {
pub pt: PageTable,
/// all the map area, sort by the start virtual address
areas: BTreeMap<VirtAddr, MapArea>,
}
impl MapArea {
pub fn mapped_size(&self) -> usize {
self.size
}
pub fn clone(&self) -> Self {
let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper {
let new = PhysFrame::alloc().unwrap();
new.as_slice()
.copy_from_slice(old.physical_frame.as_slice());
mapper.insert(va, unsafe { VmFrame::new(new) });
}
Self {
start_va: self.start_va,
size: self.size,
flags: self.flags,
mapper,
}
}
/// This function will map the vitural address to the given physical frames
pub fn new(
start_va: VirtAddr,
size: usize,
flags: PTFlags,
physical_frames: VmFrameVec,
) -> Self {
assert!(
start_va.is_aligned()
&& is_aligned(size)
&& physical_frames.len() == (size / PAGE_SIZE)
);
let mut map_area = Self {
flags,
start_va,
size,
mapper: BTreeMap::new(),
};
let mut current_va = start_va.clone();
let page_size = size / PAGE_SIZE;
let mut phy_frame_iter = physical_frames.iter();
for i in 0..page_size {
let vm_frame = phy_frame_iter.next().unwrap();
map_area.map_with_physical_address(current_va, vm_frame.clone());
current_va += PAGE_SIZE;
}
map_area
}
pub fn map_with_physical_address(&mut self, va: VirtAddr, pa: VmFrame) -> PhysAddr {
assert!(va.is_aligned());
match self.mapper.entry(va) {
Entry::Occupied(e) => panic!("already mapped a input physical address"),
Entry::Vacant(e) => e.insert(pa).physical_frame.start_pa(),
}
}
pub fn map(&mut self, va: VirtAddr) -> PhysAddr {
assert!(va.is_aligned());
match self.mapper.entry(va) {
Entry::Occupied(e) => e.get().physical_frame.start_pa(),
Entry::Vacant(e) => e
.insert(VmFrame::alloc_zero().unwrap())
.physical_frame
.start_pa(),
}
}
pub fn unmap(&mut self, va: VirtAddr) -> Option<VmFrame> {
self.mapper.remove(&va)
}
pub fn write_data(&mut self, addr: usize, data: &[u8]) {
let mut current_start_address = addr;
let mut remain = data.len();
let mut processed = 0;
for (va, pa) in self.mapper.iter() {
if current_start_address >= va.0 && current_start_address < va.0 + PAGE_SIZE {
let offset = current_start_address - va.0;
let copy_len = (va.0 + PAGE_SIZE - current_start_address).min(remain);
let src = &data[processed..processed + copy_len];
let dst =
&mut pa.start_pa().kvaddr().get_bytes_array()[offset..(offset + copy_len)];
dst.copy_from_slice(src);
processed += copy_len;
remain -= copy_len;
if remain == 0 {
return;
}
current_start_address = va.0 + PAGE_SIZE;
}
}
}
pub fn read_data(&self, addr: usize, data: &mut [u8]) {
let mut start = addr;
let mut remain = data.len();
let mut processed = 0;
for (va, pa) in self.mapper.iter() {
if start >= va.0 && start < va.0 + PAGE_SIZE {
let offset = start - va.0;
let copy_len = (va.0 + PAGE_SIZE - start).min(remain);
let src = &mut data[processed..processed + copy_len];
let dst = &pa.start_pa().kvaddr().get_bytes_array()[offset..(offset + copy_len)];
src.copy_from_slice(dst);
processed += copy_len;
remain -= copy_len;
if remain == 0 {
return;
}
start = va.0 + PAGE_SIZE;
}
}
}
}
// impl Clone for MapArea {
// fn clone(&self) -> Self {
// let mut mapper = BTreeMap::new();
// for (&va, old) in &self.mapper {
// let new = VmFrame::alloc().unwrap();
// new.physical_frame
// .exclusive_access()
// .as_slice()
// .copy_from_slice(old.physical_frame.exclusive_access().as_slice());
// mapper.insert(va, new);
// }
// Self {
// flags: self.flags,
// mapper,
// }
// }
// }
impl MemorySet {
pub fn map(&mut self, area: MapArea) {
if area.size > 0 {
// TODO: check overlap
if let Entry::Vacant(e) = self.areas.entry(area.start_va) {
self.pt.map_area(e.insert(area));
} else {
panic!(
"MemorySet::map: MapArea starts from {:#x?} is existed!",
area.start_va
);
}
}
}
/// determine whether a virtaddr is in a mapped area
pub fn is_mapped(&self, vaddr: VirtAddr) -> bool {
for (start_address, map_area) in self.areas.iter() {
if *start_address > vaddr {
break;
}
if *start_address <= vaddr && vaddr < *start_address + map_area.mapped_size() {
return true;
}
}
false
}
pub fn new() -> Self {
Self {
pt: PageTable::new(),
areas: BTreeMap::new(),
}
}
pub fn unmap(&mut self, va: VirtAddr) -> Result<()> {
if let Some(area) = self.areas.remove(&va) {
self.pt.unmap_area(&area);
Ok(())
} else {
Err(Error::PageFault)
}
}
pub fn clear(&mut self) {
for area in self.areas.values_mut() {
self.pt.unmap_area(area);
}
self.areas.clear();
}
pub fn write_bytes(&mut self, addr: usize, data: &[u8]) -> Result<()> {
let mut current_addr = addr;
let mut remain = data.len();
let start_write = false;
let mut offset = 0usize;
for (va, area) in self.areas.iter_mut() {
if current_addr >= va.0 && current_addr < area.size + va.0 {
if !area.flags.contains(PTFlags::WRITABLE) {
return Err(Error::PageFault);
}
let write_len = remain.min(area.size + va.0 - current_addr);
area.write_data(current_addr, &data[offset..(offset + write_len)]);
offset += write_len;
remain -= write_len;
// remain -= (va.0 + area.size - current_addr).min(remain);
if remain == 0 {
return Ok(());
}
current_addr = va.0 + area.size;
} else if start_write {
return Err(Error::PageFault);
}
}
Err(Error::PageFault)
}
pub fn read_bytes(&self, addr: usize, data: &mut [u8]) -> Result<()> {
let mut current_addr = addr;
let mut remain = data.len();
let mut offset = 0usize;
let start_read = false;
for (va, area) in self.areas.iter() {
if current_addr >= va.0 && current_addr < area.size + va.0 {
let read_len = remain.min(area.size + va.0 - current_addr);
area.read_data(current_addr, &mut data[offset..(offset + read_len)]);
remain -= read_len;
offset += read_len;
// remain -= (va.0 + area.size - current_addr).min(remain);
if remain == 0 {
return Ok(());
}
current_addr = va.0 + area.size;
} else if start_read {
return Err(Error::PageFault);
}
}
Err(Error::PageFault)
}
}
impl Clone for MemorySet {
fn clone(&self) -> Self {
let mut ms = Self::new();
for area in self.areas.values() {
ms.map(area.clone());
}
ms
}
}
impl Drop for MemorySet {
fn drop(&mut self) {
self.clear();
}
}
impl fmt::Debug for MapArea {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MapArea")
.field("flags", &self.flags)
.field("mapped area", &self.mapper)
.finish()
}
}
impl fmt::Debug for MemorySet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MemorySet")
.field("areas", &self.areas)
.field("page_table_root", &self.pt.root_pa)
.finish()
}
}
// pub fn load_app(elf_data: &[u8]) -> (usize, MemorySet) {
// let elf = ElfFile::new(elf_data).expect("invalid ELF file");
// assert_eq!(elf.header.pt1.class(), header::Class::SixtyFour, "64-bit ELF required");
// assert_eq!(elf.header.pt2.type_().as_type(), header::Type::Executable, "ELF is not an executable object");
// assert_eq!(elf.header.pt2.machine().as_machine(), header::Machine::X86_64, "invalid ELF arch");
// let mut ms = MemorySet::new();
// for ph in elf.program_iter() {
// if ph.get_type() != Ok(Type::Load) {
// continue;
// }
// let va = VirtAddr(ph.virtual_addr() as _);
// let offset = va.page_offset();
// let area_start = va.align_down();
// let area_end = VirtAddr((ph.virtual_addr() + ph.mem_size()) as _).align_up();
// let data = match ph.get_data(&elf).unwrap() {
// SegmentData::Undefined(data) => data,
// _ => panic!("failed to get ELF segment data"),
// };
// let mut flags = PTFlags::PRESENT | PTFlags::USER;
// if ph.flags().is_write() {
// flags |= PTFlags::WRITABLE;
// }
// let mut area = MapArea::new(area_start, area_end.0 - area_start.0, flags);
// area.write_data(offset, data);
// ms.insert(area);
// }
// ms.insert(MapArea::new(VirtAddr(USTACK_TOP - USTACK_SIZE), USTACK_SIZE,
// PTFlags::PRESENT | PTFlags::WRITABLE | PTFlags::USER));
// (elf.header.pt2.entry_point() as usize, ms)
// }

View File

@ -0,0 +1,38 @@
//! memory management.
pub mod address;
mod frame_allocator;
mod heap_allocator;
mod memory_set;
mod page_table;
use address::PhysAddr;
use address::VirtAddr;
pub use self::{frame_allocator::*, memory_set::*, page_table::*};
bitflags::bitflags! {
/// Possible flags for a page table entry.
pub struct PTFlags: usize {
/// Specifies whether the mapped frame or page table is loaded in memory.
const PRESENT = 1;
/// Controls whether writes to the mapped frames are allowed.
const WRITABLE = 1 << 1;
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry is cacheable.
const NO_CACHE = 1 << 4;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
}
}
pub(crate) fn init(start: u64, size: u64) {
heap_allocator::init();
frame_allocator::init(start as usize, size as usize);
page_table::init();
}

View File

@ -0,0 +1,210 @@
use super::{memory_set::MapArea, *};
use crate::{
config::{ENTRY_COUNT, PAGE_SIZE, PHYS_OFFSET},
vm::VmFrame,
*,
};
use alloc::{collections::BTreeMap, vec, vec::Vec};
use core::fmt;
use lazy_static::lazy_static;
lazy_static! {
pub(crate) static ref ALL_MAPPED_PTE: UPSafeCell<BTreeMap<usize, PageTableEntry>> =
unsafe { UPSafeCell::new(BTreeMap::new()) };
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct PageTableEntry(usize);
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = !(PAGE_SIZE - 1);
pub const fn new_page(pa: PhysAddr, flags: PTFlags) -> Self {
Self((pa.0 & Self::PHYS_ADDR_MASK) | flags.bits)
}
const fn pa(self) -> PhysAddr {
PhysAddr(self.0 as usize & Self::PHYS_ADDR_MASK)
}
const fn flags(self) -> PTFlags {
PTFlags::from_bits_truncate(self.0)
}
const fn is_unused(self) -> bool {
self.0 == 0
}
const fn is_present(self) -> bool {
(self.0 & PTFlags::PRESENT.bits) != 0
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("raw", &self.0)
.field("pa", &self.pa())
.field("flags", &self.flags())
.finish()
}
}
pub struct PageTable {
pub root_pa: PhysAddr,
/// store all the physical frame that the page table need to map all the frame e.g. the frame of the root_pa
tables: Vec<VmFrame>,
}
impl PageTable {
pub fn new() -> Self {
let root_frame = VmFrame::alloc_zero().unwrap();
let p4 = table_of(root_frame.start_pa());
let map_pte = ALL_MAPPED_PTE.exclusive_access();
for (index, pte) in map_pte.iter() {
p4[*index] = *pte;
}
Self {
root_pa: root_frame.start_pa(),
tables: vec![root_frame],
}
}
pub fn print_kernel(&self) {
let p4 = table_of(self.root_pa);
for i in 0..(256) {
let phys = PhysAddr(i << (12 + 27));
let a = p4[p4_index(phys.kvaddr())];
if a.is_present() {
println!("index:{:?},PTE:{:?}", i, a);
}
}
}
pub fn map(&mut self, va: VirtAddr, pa: PhysAddr, flags: PTFlags) {
let entry = self.get_entry_or_create(va).unwrap();
if !entry.is_unused() {
panic!("{:#x?} is mapped before mapping", va);
}
*entry = PageTableEntry::new_page(pa.align_down(), flags);
}
pub fn unmap(&mut self, va: VirtAddr) {
let entry = get_entry(self.root_pa, va).unwrap();
if entry.is_unused() {
panic!("{:#x?} is invalid before unmapping", va);
}
entry.0 = 0;
}
pub fn map_area(&mut self, area: &MapArea) {
for (va, pa) in area.mapper.iter() {
assert!(pa.start_pa().0 < PHYS_OFFSET);
self.map(*va, pa.start_pa(), area.flags);
}
}
pub fn unmap_area(&mut self, area: &MapArea) {
for (va, _) in area.mapper.iter() {
self.unmap(*va);
}
}
}
impl PageTable {
fn alloc_table(&mut self) -> PhysAddr {
let frame = VmFrame::alloc_zero().unwrap();
let pa = frame.start_pa();
self.tables.push(frame);
pa
}
fn get_entry_or_create(&mut self, va: VirtAddr) -> Option<&mut PageTableEntry> {
let p4 = table_of(self.root_pa);
let p4e = &mut p4[p4_index(va)];
let p3 = next_table_or_create(p4e, || self.alloc_table())?;
let p3e = &mut p3[p3_index(va)];
let p2 = next_table_or_create(p3e, || self.alloc_table())?;
let p2e = &mut p2[p2_index(va)];
let p1 = next_table_or_create(p2e, || self.alloc_table())?;
let p1e = &mut p1[p1_index(va)];
Some(p1e)
}
}
const fn p4_index(va: VirtAddr) -> usize {
(va.0 >> (12 + 27)) & (ENTRY_COUNT - 1)
}
const fn p3_index(va: VirtAddr) -> usize {
(va.0 >> (12 + 18)) & (ENTRY_COUNT - 1)
}
const fn p2_index(va: VirtAddr) -> usize {
(va.0 >> (12 + 9)) & (ENTRY_COUNT - 1)
}
const fn p1_index(va: VirtAddr) -> usize {
(va.0 >> 12) & (ENTRY_COUNT - 1)
}
pub fn query(root_pa: PhysAddr, va: VirtAddr) -> Option<(PhysAddr, PTFlags)> {
let entry = get_entry(root_pa, va)?;
if entry.is_unused() {
return None;
}
let off = va.page_offset();
Some((PhysAddr(entry.pa().0 + off), entry.flags()))
}
fn get_entry(root_pa: PhysAddr, va: VirtAddr) -> Option<&'static mut PageTableEntry> {
let p4 = table_of(root_pa);
let p4e = &mut p4[p4_index(va)];
let p3 = next_table(p4e)?;
let p3e = &mut p3[p3_index(va)];
let p2 = next_table(p3e)?;
let p2e = &mut p2[p2_index(va)];
let p1 = next_table(p2e)?;
let p1e = &mut p1[p1_index(va)];
Some(p1e)
}
fn table_of<'a>(pa: PhysAddr) -> &'a mut [PageTableEntry] {
let ptr = pa.kvaddr().as_ptr() as *mut _;
unsafe { core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT) }
}
fn next_table<'a>(entry: &PageTableEntry) -> Option<&'a mut [PageTableEntry]> {
if entry.is_present() {
Some(table_of(entry.pa()))
} else {
None
}
}
fn next_table_or_create<'a>(
entry: &mut PageTableEntry,
mut alloc: impl FnMut() -> PhysAddr,
) -> Option<&'a mut [PageTableEntry]> {
if entry.is_unused() {
let pa = alloc();
*entry = PageTableEntry::new_page(pa, PTFlags::PRESENT | PTFlags::WRITABLE | PTFlags::USER);
Some(table_of(pa))
} else {
next_table(entry)
}
}
pub(crate) fn init() {
let cr3 = x86_64_util::get_cr3();
let p4 = table_of(PhysAddr(cr3));
// Cancel mapping in lowest addresses.
p4[0].0 = 0;
// there is mapping where index is 1,2,3, so user may not use these value
let mut map_pte = ALL_MAPPED_PTE.exclusive_access();
for i in 0..512 {
if !p4[i].flags().is_empty() {
map_pte.insert(i, p4[i]);
}
}
// Cancel mapping in lowest addresses.
// p4[0].0 = 0;
}

View File

@ -0,0 +1,11 @@
//! The prelude.
pub type Result<T> = core::result::Result<T, crate::error::Error>;
pub(crate) use alloc::boxed::Box;
pub(crate) use alloc::sync::Arc;
pub(crate) use alloc::vec::Vec;
pub(crate) use core::any::Any;
pub(crate) use crate::util::AlignExt;
pub use crate::vm::{Paddr, Vaddr};

View File

@ -0,0 +1,389 @@
use core::fmt::{self};
use core::sync::atomic::{AtomicU64, Ordering::Relaxed};
use crate::prelude::*;
/// A fixed number of bits taht can be safely shared between threads.
pub struct AtomicBits {
num_bits: usize,
u64s: Box<[AtomicU64]>,
}
impl AtomicBits {
/// Create a given number of bit 0s.
pub fn new_zeroes(num_bits: usize) -> Self {
Self::new(0, num_bits)
}
/// Create a given number of bit 1s.
pub fn new_ones(num_bits: usize) -> Self {
Self::new(!0, num_bits)
}
fn new(u64_val: u64, num_bits: usize) -> Self {
let num_u64s = num_bits.align_up(64) / 64;
let u64s = {
let mut u64s = Vec::with_capacity(num_u64s);
for _ in 0..num_u64s {
u64s.push(AtomicU64::new(u64_val));
}
u64s.into_boxed_slice()
};
Self { num_bits, u64s }
}
/// Returns the length in bits.
pub fn len(&self) -> usize {
self.num_bits
}
/// Get the bit at a given position.
pub fn get(&self, index: usize) -> bool {
assert!(index < self.num_bits);
let i = index / 64;
let j = index % 64;
// Safety. Variable i is in range as variable index is in range.
let u64_atomic = unsafe { self.u64s.get_unchecked(i) };
(u64_atomic.load(Relaxed) & 1 << j) != 0
}
/// Set the bit at a given position.
pub fn set(&self, index: usize, new_bit: bool) {
assert!(index < self.num_bits);
let i = index / 64;
let j = index % 64;
// Safety. Variable i is in range as variable index is in range.
let u64_atomic = unsafe { self.u64s.get_unchecked(i) };
if new_bit {
u64_atomic.fetch_or(1 << j, Relaxed);
} else {
u64_atomic.fetch_and(!(1 << j), Relaxed);
}
}
/// Clear all the bits.
pub fn clear(&self) {
todo!()
}
/// Are all bits ones.
pub fn is_full(&self) -> bool {
self.match_pattern(!0)
}
/// Are all bits zeroes.
pub fn is_empty(&self) -> bool {
self.match_pattern(0)
}
fn match_pattern(&self, pattern: u64) -> bool {
todo!()
}
/// Get an iterator for the bits.
pub fn iter<'a>(&'a self) -> Iter<'a> {
Iter::new(self)
}
/// Get an iterator that gives the positions of all 1s in the bits.
pub fn iter_ones<'a>(&'a self) -> OnesIter<'a> {
OnesIter::new(self)
}
/// Get an iterator that gives the positions of all 0s in the bits.
pub fn iter_zeroes<'a>(&'a self) -> ZeroesIter<'a> {
ZeroesIter::new(self)
}
}
/// An iterator that accesses the bits of an `AtomicBits`.
pub struct Iter<'a> {
bits: &'a AtomicBits,
bit_i: usize,
}
impl<'a> Iter<'a> {
fn new(bits: &'a AtomicBits) -> Self {
Self { bits, bit_i: 0 }
}
}
impl<'a> Iterator for Iter<'a> {
type Item = bool;
fn next(&mut self) -> Option<bool> {
if self.bit_i < self.bits.len() {
let bit = self.bits.get(self.bit_i);
self.bit_i += 1;
Some(bit)
} else {
None
}
}
}
/// An iterator that returns the positions of 1s in an `AtomicBits`.
pub struct OnesIter<'a> {
bits: &'a AtomicBits,
u64_idx: usize,
u64_val: u64,
num_garbage_bits_in_last_u64: u8,
}
impl<'a> OnesIter<'a> {
fn new(bits: &'a AtomicBits) -> Self {
let num_garbage_bits_in_last_u64 = {
if bits.len() % 64 != 0 {
64 - ((bits.len() % 64) as u8)
} else {
0
}
};
let mut new_self = Self {
bits,
u64_idx: 0,
u64_val: 0, // NOT initalized yet!
num_garbage_bits_in_last_u64,
};
new_self.u64_val = new_self.get_u64_val(0);
new_self
}
/// Get the u64 value at the given position, removing the garbage bits if any.
fn get_u64_val(&self, idx: usize) -> u64 {
let mut u64_val = self.bits.u64s[idx].load(Relaxed);
// Clear the garbage bits, if any, in the last u64 so that they
// won't affect the result of the iterator.
if idx == self.bits.u64s.len() - 1 && self.num_garbage_bits_in_last_u64 > 0 {
let num_valid_bits_in_last_u64 = 64 - self.num_garbage_bits_in_last_u64;
let valid_bits_mask = (1 << num_valid_bits_in_last_u64) - 1;
u64_val &= valid_bits_mask;
}
u64_val
}
}
impl<'a> Iterator for OnesIter<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
loop {
if self.u64_idx >= self.bits.u64s.len() {
return None;
}
let first_one_in_u64 = self.u64_val.trailing_zeros() as usize;
if first_one_in_u64 < 64 {
self.u64_val &= !(1 << first_one_in_u64);
let one_pos = self.u64_idx * 64 + first_one_in_u64;
return Some(one_pos);
}
self.u64_idx += 1;
if self.u64_idx < self.bits.u64s.len() {
self.u64_val = self.get_u64_val(self.u64_idx);
}
}
}
}
/// An iterator that returns the positions of 0s in an `AtomicBits`.
pub struct ZeroesIter<'a> {
bits: &'a AtomicBits,
u64_idx: usize,
u64_val: u64,
num_garbage_bits_in_last_u64: u8,
}
impl<'a> ZeroesIter<'a> {
fn new(bits: &'a AtomicBits) -> Self {
let num_garbage_bits_in_last_u64 = {
if bits.len() % 64 != 0 {
64 - ((bits.len() % 64) as u8)
} else {
0
}
};
let mut new_self = Self {
bits,
u64_idx: 0,
u64_val: 0, // NOT initalized yet!
num_garbage_bits_in_last_u64,
};
new_self.u64_val = new_self.get_u64_val(0);
new_self
}
/// Get the u64 value at the given position, removing the garbage bits if any.
fn get_u64_val(&self, idx: usize) -> u64 {
let mut u64_val = self.bits.u64s[idx].load(Relaxed);
// Set all garbage bits, if any, in the last u64 so that they
// won't affect the result of the iterator.
if idx == self.bits.u64s.len() - 1 && self.num_garbage_bits_in_last_u64 > 0 {
let num_valid_bits_in_last_u64 = 64 - self.num_garbage_bits_in_last_u64;
let garbage_bits_mask = !((1 << num_valid_bits_in_last_u64) - 1);
u64_val |= garbage_bits_mask;
}
u64_val
}
}
impl<'a> Iterator for ZeroesIter<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
loop {
if self.u64_idx >= self.bits.u64s.len() {
return None;
}
let first_zero_in_u64 = self.u64_val.trailing_ones() as usize;
if first_zero_in_u64 < 64 {
self.u64_val |= 1 << first_zero_in_u64;
let one_pos = self.u64_idx * 64 + first_zero_in_u64;
return Some(one_pos);
}
self.u64_idx += 1;
if self.u64_idx < self.bits.u64s.len() {
self.u64_val = self.get_u64_val(self.u64_idx);
}
}
}
}
impl Clone for AtomicBits {
fn clone(&self) -> Self {
let num_bits = self.num_bits;
let num_u64s = self.u64s.len();
let u64s = {
let mut u64s = Vec::with_capacity(num_u64s);
for u64_i in 0..num_u64s {
let u64_val = self.u64s[u64_i].load(Relaxed);
u64s.push(AtomicU64::new(u64_val));
}
u64s.into_boxed_slice()
};
Self { num_bits, u64s }
}
}
impl fmt::Debug for AtomicBits {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AtomicBits(")?;
for bit in self.iter() {
if bit {
write!(f, "1")?;
} else {
write!(f, "0")?;
}
}
write!(f, ")")
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn new() {
let bits = AtomicBits::new_zeroes(1);
assert!(bits.len() == 1);
let bits = AtomicBits::new_zeroes(128);
assert!(bits.len() == 128);
let bits = AtomicBits::new_ones(7);
assert!(bits.len() == 7);
let bits = AtomicBits::new_zeroes(65);
assert!(bits.len() == 65);
}
#[test]
fn set_get() {
let bits = AtomicBits::new_zeroes(128);
for i in 0..bits.len() {
assert!(bits.get(i) == false);
bits.set(i, true);
assert!(bits.get(i) == true);
bits.set(i, false);
assert!(bits.get(i) == false);
}
let bits = AtomicBits::new_ones(128);
for i in 0..bits.len() {
assert!(bits.get(i) == true);
bits.set(i, false);
assert!(bits.get(i) == false);
bits.set(i, true);
assert!(bits.get(i) == true);
}
}
#[test]
fn iter_ones() {
let bits = AtomicBits::new_zeroes(1);
assert!(bits.iter_ones().count() == 0);
let bits = AtomicBits::new_zeroes(400);
assert!(bits.iter_ones().count() == 0);
let bits = AtomicBits::new_ones(1);
assert!(bits.iter_ones().count() == 1);
let bits = AtomicBits::new_ones(24);
assert!(bits.iter_ones().count() == 24);
let bits = AtomicBits::new_ones(64);
assert!(bits.iter_ones().count() == 64);
let bits = AtomicBits::new_ones(77);
assert!(bits.iter_ones().count() == 77);
let bits = AtomicBits::new_ones(128);
assert!(bits.iter_ones().count() == 128);
let bits = AtomicBits::new_zeroes(8);
bits.set(1, true);
bits.set(3, true);
bits.set(5, true);
assert!(bits.iter_ones().count() == 3);
}
#[test]
fn iter_zeroes() {
let bits = AtomicBits::new_ones(1);
assert!(bits.iter_zeroes().count() == 0);
let bits = AtomicBits::new_ones(130);
assert!(bits.iter_zeroes().count() == 0);
let bits = AtomicBits::new_zeroes(1);
assert!(bits.iter_zeroes().count() == 1);
let bits = AtomicBits::new_zeroes(24);
assert!(bits.iter_zeroes().count() == 24);
let bits = AtomicBits::new_zeroes(64);
assert!(bits.iter_zeroes().count() == 64);
let bits = AtomicBits::new_zeroes(77);
assert!(bits.iter_zeroes().count() == 77);
let bits = AtomicBits::new_zeroes(128);
assert!(bits.iter_zeroes().count() == 128);
let bits = AtomicBits::new_ones(96);
bits.set(1, false);
bits.set(3, false);
bits.set(5, false);
bits.set(64, false);
bits.set(76, false);
assert!(bits.iter_zeroes().count() == 5);
}
#[test]
fn iter() {
let bits = AtomicBits::new_zeroes(7);
assert!(bits.iter().all(|bit| bit == false));
let bits = AtomicBits::new_ones(128);
assert!(bits.iter().all(|bit| bit == true));
}
}

View File

@ -0,0 +1,10 @@
mod atomic_bits;
mod rcu;
mod spin;
pub(crate) mod up;
mod wait;
pub use self::atomic_bits::AtomicBits;
pub use self::rcu::{pass_quiescent_state, OwnerPtr, Rcu, RcuReadGuard, RcuReclaimer};
pub use self::spin::{SpinLock, SpinLockGuard};
pub use self::wait::WaitQueue;

View File

@ -0,0 +1,102 @@
//! Read-copy update (RCU).
use core::marker::PhantomData;
use core::ops::Deref;
use core::sync::atomic::{
AtomicPtr,
Ordering::{AcqRel, Acquire},
};
use self::monitor::RcuMonitor;
use crate::prelude::*;
use crate::sync::WaitQueue;
mod monitor;
mod owner_ptr;
pub use owner_ptr::OwnerPtr;
pub struct Rcu<P: OwnerPtr> {
ptr: AtomicPtr<<P as OwnerPtr>::Target>,
marker: PhantomData<P::Target>,
}
impl<P: OwnerPtr> Rcu<P> {
pub fn new(ptr: P) -> Self {
let ptr = AtomicPtr::new(OwnerPtr::into_raw(ptr) as *mut _);
Self {
ptr,
marker: PhantomData,
}
}
pub fn get(&self) -> RcuReadGuard<'_, P> {
let obj = unsafe { &*self.ptr.load(Acquire) };
RcuReadGuard { obj, rcu: self }
}
}
impl<P: OwnerPtr + Send> Rcu<P> {
pub fn replace(&self, new_ptr: P) -> RcuReclaimer<P> {
let new_ptr = <P as OwnerPtr>::into_raw(new_ptr) as *mut _;
let old_ptr = {
let old_raw_ptr = self.ptr.swap(new_ptr, AcqRel);
unsafe { <P as OwnerPtr>::from_raw(old_raw_ptr) }
};
RcuReclaimer { ptr: old_ptr }
}
}
pub struct RcuReadGuard<'a, P: OwnerPtr> {
obj: &'a <P as OwnerPtr>::Target,
rcu: &'a Rcu<P>,
}
impl<'a, P: OwnerPtr> Deref for RcuReadGuard<'a, P> {
type Target = <P as OwnerPtr>::Target;
fn deref(&self) -> &Self::Target {
self.obj
}
}
#[repr(transparent)]
pub struct RcuReclaimer<P> {
ptr: P,
}
impl<P: Send + 'static> RcuReclaimer<P> {
pub fn delay(mut self) {
let ptr: P = unsafe {
let ptr = core::mem::replace(&mut self.ptr, core::mem::uninitialized());
core::mem::forget(self);
ptr
};
get_singleton().after_grace_period(move || {
drop(ptr);
});
}
}
impl<P> Drop for RcuReclaimer<P> {
fn drop(&mut self) {
let wq = Arc::new(WaitQueue::new());
get_singleton().after_grace_period({
let wq = wq.clone();
move || {
wq.wake_one();
}
});
wq.wait_until(|| Some(0u8));
}
}
pub unsafe fn pass_quiescent_state() {
get_singleton().pass_quiescent_state()
}
fn get_singleton() -> &'static RcuMonitor {
todo!()
}

View File

@ -0,0 +1,138 @@
use alloc::collections::VecDeque;
use core::sync::atomic::{
AtomicBool,
Ordering::{Acquire, Relaxed, Release},
};
use crate::cpu;
use crate::prelude::*;
use crate::sync::AtomicBits;
use crate::sync::SpinLock;
/// A RCU monitor ensures the completion of _grace periods_ by keeping track
/// of each CPU's passing _quiescent states_.
pub struct RcuMonitor {
is_monitoring: AtomicBool,
state: SpinLock<State>,
}
impl RcuMonitor {
pub fn new(num_cpus: u32) -> Self {
Self {
is_monitoring: AtomicBool::new(false),
state: SpinLock::new(State::new(num_cpus)),
}
}
pub unsafe fn pass_quiescent_state(&self) {
// Fast path
if !self.is_monitoring.load(Relaxed) {
return;
}
// Check if the current GP is complete after passing the quiescent state
// on the current CPU. If GP is complete, take the callbacks of the current
// GP.
let callbacks = {
let mut state = self.state.lock();
if state.current_gp.is_complete() {
return;
}
state.current_gp.pass_quiescent_state();
if !state.current_gp.is_complete() {
return;
}
// Now that the current GP is complete, take its callbacks
let current_callbacks = state.current_gp.take_callbacks();
// Check if we need to70G watch for a next GP
if !state.next_callbacks.is_empty() {
let callbacks = core::mem::take(&mut state.next_callbacks);
state.current_gp.restart(callbacks);
} else {
self.is_monitoring.store(false, Relaxed);
}
current_callbacks
};
// Invoke the callbacks to notify the completion of GP
for f in callbacks {
(f)();
}
}
pub fn after_grace_period<F>(&self, f: F)
where
F: FnOnce() -> () + Send + 'static,
{
let mut state = self.state.lock();
state.next_callbacks.push_back(Box::new(f));
if !state.current_gp.is_complete() {
return;
}
let callbacks = core::mem::take(&mut state.next_callbacks);
state.current_gp.restart(callbacks);
self.is_monitoring.store(true, Relaxed);
}
}
struct State {
current_gp: GracePeriod,
next_callbacks: Callbacks,
}
impl State {
pub fn new(num_cpus: u32) -> Self {
Self {
current_gp: GracePeriod::new(num_cpus),
next_callbacks: VecDeque::new(),
}
}
}
type Callbacks = VecDeque<Box<dyn FnOnce() -> () + Send + 'static>>;
struct GracePeriod {
callbacks: Callbacks,
cpu_mask: AtomicBits,
is_complete: bool,
}
impl GracePeriod {
pub fn new(num_cpus: u32) -> Self {
Self {
callbacks: Default::default(),
cpu_mask: AtomicBits::new_zeroes(num_cpus as usize),
is_complete: false,
}
}
pub fn is_complete(&self) -> bool {
self.is_complete
}
pub unsafe fn pass_quiescent_state(&mut self) {
let this_cpu = cpu::this_cpu();
self.cpu_mask.set(this_cpu as usize, true);
if self.cpu_mask.is_full() {
self.is_complete = true;
}
}
pub fn take_callbacks(&mut self) -> Callbacks {
core::mem::take(&mut self.callbacks)
}
pub fn restart(&mut self, callbacks: Callbacks) {
self.is_complete = false;
self.cpu_mask.clear();
self.callbacks = callbacks;
}
}

View File

@ -0,0 +1,78 @@
use core::ptr::NonNull;
use crate::prelude::*;
/// A trait that abstracts pointers that have the ownership of the objects they
/// refer to.
///
/// The most typical examples smart pointer types like `Box<T>` and `Arc<T>`.
///
/// which can be converted to and from the raw pointer type of `*const T`.
pub trait OwnerPtr {
/// The target type that this pointer refers to.
// TODO: allow ?Sized
type Target;
/// Converts to a raw pointer.
///
/// If `Self` owns the object that it refers to (e.g., `Box<_>`), then
/// each call to `into_raw` must be paired with a call to `from_raw`
/// in order to avoid memory leakage.
fn into_raw(self) -> *const Self::Target;
/// Converts back from a raw pointer.
///
/// # Safety
///
/// The raw pointer must have been previously returned by a call to `into_raw`.
unsafe fn from_raw(ptr: *const Self::Target) -> Self;
}
impl<T> OwnerPtr for Box<T> {
type Target = T;
fn into_raw(self) -> *const Self::Target {
Box::into_raw(self) as *const _
}
unsafe fn from_raw(ptr: *const Self::Target) -> Self {
Box::from_raw(ptr as *mut _)
}
}
impl<T> OwnerPtr for Arc<T> {
type Target = T;
fn into_raw(self) -> *const Self::Target {
Arc::into_raw(self)
}
unsafe fn from_raw(ptr: *const Self::Target) -> Self {
Arc::from_raw(ptr)
}
}
impl<P> OwnerPtr for Option<P>
where
P: OwnerPtr,
// We cannot support fat pointers, e.g., when `Target: dyn Trait`.
// This is because Rust does not allow fat null pointers. Yet,
// we need the null pointer to represent `None`.
// See https://github.com/rust-lang/rust/issues/66316.
<P as OwnerPtr>::Target: Sized,
{
type Target = P::Target;
fn into_raw(self) -> *const Self::Target {
self.map(|p| <P as OwnerPtr>::into_raw(p))
.unwrap_or(core::ptr::null())
}
unsafe fn from_raw(ptr: *const Self::Target) -> Self {
if ptr.is_null() {
Some(<P as OwnerPtr>::from_raw(ptr))
} else {
None
}
}
}

View File

@ -0,0 +1,47 @@
use core::ops::{Deref, DerefMut};
/// A spin lock.
pub struct SpinLock<T: ?Sized> {
val: T,
}
impl<T> SpinLock<T> {
/// Creates a new spin lock.
pub fn new(val: T) -> Self {
todo!()
}
/// Acquire the spin lock.
///
/// This method runs in a busy loop until the lock can be acquired.
/// After acquiring the spin lock, all interrupts are disabled.
pub fn lock<'a>(&self) -> SpinLockGuard<'a, T> {
todo!()
}
}
unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
/// The guard of a spin lock.
pub struct SpinLockGuard<'a, T: ?Sized + 'a> {
lock: &'a SpinLock<T>,
}
impl<'a, T> Deref for SpinLockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
todo!()
}
}
impl<'a, T> DerefMut for SpinLockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
todo!()
}
}
impl<'a, T: ?Sized> !Send for SpinLockGuard<'a, T> {}
unsafe impl<T: ?Sized + Sync> Sync for SpinLockGuard<'_, T> {}

View File

@ -0,0 +1,36 @@
use core::cell::{Ref, RefCell, RefMut};
#[derive(Debug)]
/// Wrap a static data structure inside it so that we are
/// able to access it without any `unsafe`.
///
/// We should only use it in uniprocessor.
///
/// In order to get mutable reference of inner data, call
/// `exclusive_access`.
pub(crate) struct UPSafeCell<T> {
/// inner data
inner: RefCell<T>,
}
unsafe impl<T> Sync for UPSafeCell<T> {}
impl<T> UPSafeCell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
pub unsafe fn new(value: T) -> Self {
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
/// Panic if the data has been borrowed.
pub fn get_ref(&self) -> Ref<'_, T> {
self.inner.borrow()
}
}

View File

@ -0,0 +1,140 @@
use core::sync::atomic::{AtomicBool, Ordering};
use alloc::{collections::VecDeque, sync::Arc};
use bitflags::bitflags;
use spin::mutex::Mutex;
use crate::task::schedule;
/// A wait queue.
///
/// One may wait on a wait queue to put its executing thread to sleep.
/// Multiple threads may be the waiters of a wait queue.
/// Other threads may invoke the `wake`-family methods of a wait queue to
/// wake up one or many waiter threads.
pub struct WaitQueue {
waiters: Mutex<VecDeque<Arc<Waiter>>>,
}
impl WaitQueue {
/// Creates a new instance.
pub fn new() -> Self {
WaitQueue {
waiters: Mutex::new(VecDeque::new()),
}
}
/// Wait until some condition becomes true.
///
/// This method takes a closure that tests a user-given condition.
/// The method only returns if the condition returns Some(_).
/// A waker thread should first make the condition Some(_), then invoke the
/// `wake`-family method. This ordering is important to ensure that waiter
/// threads do not lose any wakeup notifiations.
///
/// By taking a condition closure, his wait-wakeup mechanism becomes
/// more efficient and robust.
pub fn wait_until<F, R>(&self, mut cond: F) -> R
where
F: FnMut() -> Option<R>,
{
let waiter = Arc::new(Waiter::new());
self.enqueue(&waiter);
loop {
if let Some(res) = cond() {
waiter.set_finished();
self.finish_wait();
return res;
};
waiter.wait();
}
}
/// Wake one waiter thread, if there is one.
pub fn wake_one(&self) {
if let Some(waiter) = self.waiters.lock().front() {
waiter.wake_up();
}
}
/// Wake all not-exclusive waiter threads and at most one exclusive waiter.
pub fn wake_all(&self) {
for waiter in self.waiters.lock().iter() {
waiter.wake_up();
if waiter.is_exclusive() {
break;
}
}
}
// enqueue a waiter into current waitqueue. If waiter is exclusive, add to the back of waitqueue.
// Otherwise, add to the front of waitqueue
fn enqueue(&self, waiter: &Arc<Waiter>) {
if waiter.is_exclusive() {
self.waiters.lock().push_back(waiter.clone())
} else {
self.waiters.lock().push_front(waiter.clone());
}
}
/// removes all waiters that have finished wait
fn finish_wait(&self) {
self.waiters.lock().retain(|waiter| !waiter.is_finished())
}
}
#[derive(Debug)]
struct Waiter {
/// Whether the
is_woken_up: AtomicBool,
/// To respect different wait condition
flag: WaiterFlag,
/// if the wait condition is ture, then the waiter is finished and can be removed from waitqueue
wait_finished: AtomicBool,
}
impl Waiter {
pub fn new() -> Self {
Waiter {
is_woken_up: AtomicBool::new(false),
flag: WaiterFlag::empty(),
wait_finished: AtomicBool::new(false),
}
}
/// make self into wait status until be called wake up
pub fn wait(&self) {
self.is_woken_up.store(false, Ordering::SeqCst);
while !self.is_woken_up.load(Ordering::SeqCst) {
// yield the execution, to allow other task to continue
schedule();
}
}
pub fn is_woken_up(&self) -> bool {
self.is_woken_up.load(Ordering::SeqCst)
}
pub fn wake_up(&self) {
self.is_woken_up.store(true, Ordering::SeqCst);
}
pub fn set_finished(&self) {
self.wait_finished.store(true, Ordering::SeqCst);
}
pub fn is_finished(&self) -> bool {
self.wait_finished.load(Ordering::SeqCst)
}
pub fn is_exclusive(&self) -> bool {
self.flag.contains(WaiterFlag::EXCLUSIVE)
}
}
bitflags! {
pub struct WaiterFlag: u32 {
const EXCLUSIVE = 0x1;
const INTERRUPTIABLE = 0x10;
}
}

View File

@ -0,0 +1,13 @@
//! Tasks are the unit of code execution.
mod processor;
mod scheduler;
#[allow(clippy::module_inception)]
mod task;
pub(crate) use self::processor::{get_idle_task_cx_ptr, schedule};
pub use self::scheduler::{set_scheduler, Scheduler};
pub(crate) use self::task::context_switch;
pub(crate) use self::task::TaskContext;
pub(crate) use self::task::SWITCH_TO_USER_SPACE_TASK;
pub use self::task::{Task, TaskStatus};

View File

@ -0,0 +1,87 @@
use super::{
scheduler::{fetch_task, GLOBAL_SCHEDULER},
task::{context_switch, TaskContext},
Task, TaskStatus,
};
use crate::UPSafeCell;
use alloc::sync::Arc;
use lazy_static::*;
pub struct Processor {
current: Option<Arc<Task>>,
idle_task_cx: TaskContext,
}
impl Processor {
pub fn new() -> Self {
Self {
current: None,
idle_task_cx: TaskContext::default(),
}
}
fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
&mut self.idle_task_cx as *mut _
}
pub fn take_current(&mut self) -> Option<Arc<Task>> {
self.current.take()
}
pub fn current(&self) -> Option<Arc<Task>> {
self.current.as_ref().map(Arc::clone)
}
pub fn set_current_task(&mut self, task: Arc<Task>) {
self.current = Some(task.clone());
}
}
lazy_static! {
static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
pub fn take_current_task() -> Option<Arc<Task>> {
PROCESSOR.exclusive_access().take_current()
}
pub fn current_task() -> Option<Arc<Task>> {
PROCESSOR.exclusive_access().current()
}
pub(crate) fn get_idle_task_cx_ptr() -> *mut TaskContext {
PROCESSOR.exclusive_access().get_idle_task_cx_ptr()
}
/// call this function to switch to other task by using GLOBAL_SCHEDULER
pub fn schedule() {
if let Some(task) = fetch_task() {
switch_to_task(task);
}
}
/// call this function to switch to other task
///
/// if current task is none, then it will use the default task context and it will not return to this function again
///
/// if current task status is exit, then it will not add to the scheduler
///
/// before context switch, current task will switch to the next task
pub fn switch_to_task(next_task: Arc<Task>) {
let current_task_option = current_task();
let next_task_cx_ptr = &next_task.inner_ctx() as *const TaskContext;
let current_task: Arc<Task>;
let current_task_cx_ptr = if current_task_option.is_none() {
PROCESSOR.exclusive_access().get_idle_task_cx_ptr()
} else {
current_task = current_task_option.unwrap();
if current_task.status() != TaskStatus::Exited {
GLOBAL_SCHEDULER
.exclusive_access()
.enqueue(current_task.clone());
}
&mut current_task.inner_exclusive_access().ctx as *mut TaskContext
};
// change the current task to the next task
PROCESSOR.exclusive_access().current = Some(next_task.clone());
unsafe {
context_switch(current_task_cx_ptr, next_task_cx_ptr);
}
}

View File

@ -0,0 +1,54 @@
use crate::task::Task;
use crate::{prelude::*, UPSafeCell};
use lazy_static::lazy_static;
lazy_static! {
pub(crate) static ref GLOBAL_SCHEDULER: UPSafeCell<GlobalScheduler> =
unsafe { UPSafeCell::new(GlobalScheduler { scheduler: None }) };
}
/// A scheduler for tasks.
///
/// An implementation of scheduler can attach scheduler-related information
/// with the `TypeMap` returned from `task.data()`.
pub trait Scheduler: Sync + Send {
fn enqueue(&self, task: Arc<Task>);
fn dequeue(&self) -> Option<Arc<Task>>;
}
pub struct GlobalScheduler {
scheduler: Option<&'static dyn Scheduler>,
}
impl GlobalScheduler {
pub fn new() -> Self {
Self { scheduler: None }
}
/// dequeue a task using scheduler
/// require the scheduler is not none
pub fn dequeue(&mut self) -> Option<Arc<Task>> {
self.scheduler.unwrap().dequeue()
}
/// enqueue a task using scheduler
/// require the scheduler is not none
pub fn enqueue(&mut self, task: Arc<Task>) {
self.scheduler.unwrap().enqueue(task)
}
}
/// Set the global task scheduler.
///
/// This must be called before invoking `Task::spawn`.
pub fn set_scheduler(scheduler: &'static dyn Scheduler) {
GLOBAL_SCHEDULER.exclusive_access().scheduler = Some(scheduler);
}
pub fn fetch_task() -> Option<Arc<Task>> {
GLOBAL_SCHEDULER.exclusive_access().dequeue()
}
pub fn add_task(task: Arc<Task>) {
GLOBAL_SCHEDULER.exclusive_access().enqueue(task);
}

View File

@ -0,0 +1,24 @@
.text
.global context_switch
context_switch: # (cur: *mut TaskContext, nxt: *TaskContext)
# Save cur's register
mov rax, [rsp] # return address
mov [rdi + 56], rax # 56 = offsetof(Context, rip)
mov [rdi + 0], rsp
mov [rdi + 8], rbx
mov [rdi + 16], rbp
mov [rdi + 24], r12
mov [rdi + 32], r13
mov [rdi + 40], r14
mov [rdi + 48], r15
# Restore nxt's registers
mov rsp, [rsi + 0]
mov rbx, [rsi + 8]
mov rbp, [rsi + 16]
mov r12, [rsi + 24]
mov r13, [rsi + 32]
mov r14, [rsi + 40]
mov r15, [rsi + 48]
mov rax, [rsi + 56] # restore return address
mov [rsp], rax # for stack balance, must use mov instead of push
ret

View File

@ -0,0 +1,292 @@
use core::cell::RefMut;
use core::mem::size_of;
use lazy_static::lazy_static;
use crate::cell::Cell;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE};
use crate::task::processor::switch_to_task;
use crate::trap::{CalleeRegs, SyscallFrame, TrapFrame};
use crate::user::{syscall_switch_to_user_space, trap_switch_to_user_space, UserSpace};
use crate::vm::{VmAllocOptions, VmFrameVec};
use crate::{prelude::*, UPSafeCell};
use super::processor::{current_task, schedule};
core::arch::global_asm!(include_str!("switch.S"));
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub(crate) struct TaskContext {
pub regs: CalleeRegs,
pub rip: usize,
}
extern "C" {
pub(crate) fn context_switch(cur: *mut TaskContext, nxt: *const TaskContext);
}
fn context_switch_to_user_space() {
let task = Task::current();
let switch_space_task = SWITCH_TO_USER_SPACE_TASK.get();
if task.inner_exclusive_access().is_from_trap {
*switch_space_task.trap_frame() = *task.trap_frame();
unsafe {
trap_switch_to_user_space(
&task.user_space.as_ref().unwrap().cpu_ctx,
switch_space_task.trap_frame(),
);
}
} else {
*switch_space_task.syscall_frame() = *task.syscall_frame();
unsafe {
syscall_switch_to_user_space(
&task.user_space.as_ref().unwrap().cpu_ctx,
switch_space_task.syscall_frame(),
);
}
}
}
lazy_static! {
/// This variable is mean to switch to user space and then switch back in `UserMode.execute`
///
/// When context switch to this task, there is no need to set the current task
pub(crate) static ref SWITCH_TO_USER_SPACE_TASK : Cell<Task> =
Cell::new({
let task = Task{
func: Box::new(context_switch_to_user_space),
data: Box::new(None::<u8>),
user_space: None,
task_inner: unsafe {
UPSafeCell::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap:false,
})
},
exit_code: usize::MAX,
kstack: KernelStack::new(),
};
task.task_inner.exclusive_access().task_status = TaskStatus::Runnable;
task.task_inner.exclusive_access().ctx.rip = context_switch_to_user_space as usize;
task.task_inner.exclusive_access().ctx.regs.rsp = (task.kstack.frame.end_pa().unwrap().kvaddr().0
- size_of::<usize>()
- size_of::<SyscallFrame>()) as u64;
task
});
}
pub struct KernelStack {
frame: VmFrameVec,
}
impl KernelStack {
pub fn new() -> Self {
Self {
frame: VmFrameVec::allocate(&VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE))
.expect("out of memory"),
}
}
}
/// A task that executes a function to the end.
pub struct Task {
func: Box<dyn Fn() + Send + Sync>,
data: Box<dyn Any + Send + Sync>,
user_space: Option<Arc<UserSpace>>,
task_inner: UPSafeCell<TaskInner>,
exit_code: usize,
/// kernel stack, note that the top is SyscallFrame/TrapFrame
kstack: KernelStack,
}
pub(crate) struct TaskInner {
pub task_status: TaskStatus,
pub ctx: TaskContext,
/// whether the task from trap. If it is Trap, then you should use read TrapFrame instead of SyscallFrame
pub is_from_trap: bool,
}
impl Task {
/// Gets the current task.
pub fn current() -> Arc<Task> {
current_task().unwrap()
}
/// get inner
pub(crate) fn inner_exclusive_access(&self) -> RefMut<'_, TaskInner> {
self.task_inner.exclusive_access()
}
/// get inner
pub(crate) fn inner_ctx(&self) -> TaskContext {
self.task_inner.exclusive_access().ctx
}
/// Yields execution so that another task may be scheduled.
///
/// Note that this method cannot be simply named "yield" as the name is
/// a Rust keyword.
pub fn yield_now() {
schedule();
}
/// Spawns a task that executes a function.
///
/// Each task is associated with a per-task data and an optional user space.
/// If having a user space, then the task can switch to the user space to
/// execute user code. Multiple tasks can share a single user space.
pub fn spawn<F, T>(
task_fn: F,
task_data: T,
user_space: Option<Arc<UserSpace>>,
) -> Result<Arc<Self>>
where
F: Fn() + Send + Sync + 'static,
T: Any + Send + Sync,
{
/// all task will entering this function
/// this function is mean to executing the task_fn in Task
fn kernel_task_entry() {
let current_task = current_task()
.expect("no current task, it should have current task in kernel task entry");
current_task.func.call(());
current_task.exit();
}
let result = Self {
func: Box::new(task_fn),
data: Box::new(task_data),
user_space,
task_inner: unsafe {
UPSafeCell::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap: false,
})
},
exit_code: 0,
kstack: KernelStack::new(),
};
result.task_inner.exclusive_access().task_status = TaskStatus::Runnable;
result.task_inner.exclusive_access().ctx.rip = kernel_task_entry as usize;
result.task_inner.exclusive_access().ctx.regs.rsp =
(result.kstack.frame.end_pa().unwrap().kvaddr().0
- size_of::<usize>()
- size_of::<SyscallFrame>()) as u64;
let arc_self = Arc::new(result);
switch_to_task(arc_self.clone());
Ok(arc_self)
}
pub fn new<F, T>(
task_fn: F,
task_data: T,
user_space: Option<Arc<UserSpace>>,
) -> Result<Arc<Self>>
where
F: Fn() + Send + Sync + 'static,
T: Any + Send + Sync,
{
/// all task will entering this function
/// this function is mean to executing the task_fn in Task
fn kernel_task_entry() {
let current_task = current_task()
.expect("no current task, it should have current task in kernel task entry");
current_task.func.call(());
current_task.exit();
}
let result = Self {
func: Box::new(task_fn),
data: Box::new(task_data),
user_space,
task_inner: unsafe {
UPSafeCell::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap: false,
})
},
exit_code: 0,
kstack: KernelStack::new(),
};
result.task_inner.exclusive_access().task_status = TaskStatus::Runnable;
result.task_inner.exclusive_access().ctx.rip = kernel_task_entry as usize;
result.task_inner.exclusive_access().ctx.regs.rsp =
(result.kstack.frame.end_pa().unwrap().kvaddr().0
- size_of::<usize>()
- size_of::<SyscallFrame>()) as u64;
Ok(Arc::new(result))
}
pub fn send_to_scheduler(self: &Arc<Self>) {
switch_to_task(self.clone());
}
pub(crate) fn syscall_frame(&self) -> &mut SyscallFrame {
unsafe {
&mut *(self
.kstack
.frame
.end_pa()
.unwrap()
.kvaddr()
.get_mut::<SyscallFrame>() as *mut SyscallFrame)
.sub(1)
}
}
pub(crate) fn trap_frame(&self) -> &mut TrapFrame {
unsafe {
&mut *(self
.kstack
.frame
.end_pa()
.unwrap()
.kvaddr()
.get_mut::<TrapFrame>() as *mut TrapFrame)
.sub(1)
}
}
/// Returns the task status.
pub fn status(&self) -> TaskStatus {
self.task_inner.exclusive_access().task_status
}
/// Returns the task data.
pub fn data(&self) -> &Box<dyn Any + Send + Sync> {
&self.data
}
/// Returns the user space of this task, if it has.
pub fn user_space(&self) -> Option<&Arc<UserSpace>> {
if self.user_space.is_some() {
Some(self.user_space.as_ref().unwrap())
} else {
None
}
}
pub fn exit(&self) -> ! {
self.inner_exclusive_access().task_status = TaskStatus::Exited;
schedule();
unreachable!()
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
/// The status of a task.
pub enum TaskStatus {
/// The task is runnable.
Runnable,
/// The task is running.
Running,
/// The task is sleeping.
Sleeping,
/// The task has exited.
Exited,
}

View File

@ -0,0 +1,110 @@
//! Timer.
use crate::{
device::{TimerCallback, TICK, TIMER_FREQ},
prelude::*,
};
use core::time::Duration;
use spin::Mutex;
/// A timer invokes a callback function after a specified span of time elapsed.
///
/// A new timer is initially inactive. Only after a timeout value is set with
/// the `set` method can the timer become active and the callback function
/// be triggered.
///
/// Timers are one-shot. If the time is out, one has to set the timer again
/// in order to trigger the callback again.
pub struct Timer {
function: Arc<dyn Fn(Arc<Self>) + Send + Sync>,
inner: Mutex<TimerInner>,
}
#[derive(Default)]
struct TimerInner {
start_tick: u64,
timeout_tick: u64,
timer_callback: Option<Arc<TimerCallback>>,
}
fn timer_callback(callback: &TimerCallback) {
let data = callback.data();
if data.is::<Arc<Timer>>() {
let timer = data.downcast_ref::<Arc<Timer>>().unwrap();
timer.function.call((timer.clone(),));
} else {
panic!("the timer callback is not Timer structure");
}
}
const NANOS_DIVIDE: u64 = 1_000_000_000 / TIMER_FREQ;
impl Timer {
/// Creates a new instance, given a callback function.
pub fn new<F>(f: F) -> Result<Arc<Self>>
where
F: Fn(Arc<Timer>) + Send + Sync + 'static,
{
Ok(Arc::new(Self {
function: Arc::new(f),
inner: Mutex::new(TimerInner::default()),
}))
}
/// Set a timeout value.
///
/// If a timeout value is already set, the timeout value will be refreshed.
///
pub fn set(self: Arc<Self>, timeout: Duration) {
let mut lock = self.inner.lock();
match &lock.timer_callback {
Some(callback) => {
callback.disable();
}
None => {}
}
let tick_count = timeout.as_secs() * TIMER_FREQ
+ if timeout.subsec_nanos() != 0 {
(timeout.subsec_nanos() as u64 - 1) / NANOS_DIVIDE + 1
} else {
0
};
unsafe {
lock.start_tick = TICK;
lock.timeout_tick = TICK + tick_count;
}
lock.timer_callback = Some(crate::device::add_timeout_list(
tick_count,
self.clone(),
timer_callback,
));
}
/// Returns the remaining timeout value.
///
/// If the timer is not set, then the remaining timeout value is zero.
pub fn remain(&self) -> Duration {
let lock = self.inner.lock();
let tick_remain;
unsafe {
tick_remain = lock.timeout_tick as i64 - TICK as i64;
}
if tick_remain <= 0 {
Duration::new(0, 0)
} else {
let second_count = tick_remain as u64 / TIMER_FREQ;
let remain_count = tick_remain as u64 % TIMER_FREQ;
Duration::new(second_count, (remain_count * NANOS_DIVIDE) as u32)
}
}
/// Clear the timeout value.
pub fn clear(&self) {
let mut lock = self.inner.lock();
if let Some(callback) = &lock.timer_callback {
callback.disable();
}
lock.timeout_tick = 0;
lock.start_tick = 0;
lock.timer_callback = None;
}
}

View File

@ -0,0 +1,94 @@
use crate::task::{
context_switch, get_idle_task_cx_ptr, Task, TaskContext, SWITCH_TO_USER_SPACE_TASK,
};
use super::{irq::IRQ_LIST, *};
#[no_mangle]
pub(crate) extern "C" fn syscall_handler(f: &mut SyscallFrame) -> isize {
let r = &f.caller;
let current = Task::current();
current.inner_exclusive_access().is_from_trap = false;
*current.syscall_frame() = *SWITCH_TO_USER_SPACE_TASK.get().syscall_frame();
unsafe {
context_switch(
get_idle_task_cx_ptr() as *mut TaskContext,
&Task::current().inner_ctx() as *const TaskContext,
)
}
-1
}
#[no_mangle]
pub(crate) extern "C" fn trap_handler(f: &mut TrapFrame) {
if !is_from_kernel(f.cs) {
let current = Task::current();
current.inner_exclusive_access().is_from_trap = true;
*current.trap_frame() = *SWITCH_TO_USER_SPACE_TASK.trap_frame();
if is_cpu_fault(current.trap_frame()) {
// if is cpu fault, we will pass control to trap handler in jinux std
unsafe {
context_switch(
get_idle_task_cx_ptr() as *mut TaskContext,
&Task::current().inner_ctx() as *const TaskContext,
)
}
} else {
let irq_line = IRQ_LIST.get(f.id as usize).unwrap();
let callback_functions = irq_line.callback_list();
for callback_function in callback_functions.iter() {
callback_function.call(f);
}
}
} else {
if is_cpu_fault(f) {
panic!("cannot handle kernel cpu fault now");
}
let irq_line = IRQ_LIST.get(f.id as usize).unwrap();
let callback_functions = irq_line.callback_list();
for callback_function in callback_functions.iter() {
callback_function.call(f);
}
}
}
fn is_from_kernel(cs: u64) -> bool {
if cs & 0x3 == 0 {
true
} else {
false
}
}
/// As Osdev Wiki defines(https://wiki.osdev.org/Exceptions):
/// CPU exceptions are classified as:
/// Faults: These can be corrected and the program may continue as if nothing happened.
/// Traps: Traps are reported immediately after the execution of the trapping instruction.
/// Aborts: Some severe unrecoverable error.
/// This function will determine a trap is a CPU faults.
/// We will pass control to jinux-std if the trap is **faults**.
pub fn is_cpu_fault(trap_frame: &TrapFrame) -> bool {
match trap_frame.id {
DIVIDE_BY_ZERO
| DEBUG
| BOUND_RANGE_EXCEEDED
| INVALID_OPCODE
| DEVICE_NOT_AVAILABLE
| INVAILD_TSS
| SEGMENT_NOT_PRESENT
| STACK_SEGMENT_FAULT
| GENERAL_PROTECTION_FAULT
| PAGE_FAULT
| X87_FLOATING_POINT_EXCEPTION
| ALIGNMENT_CHECK
| SIMD_FLOATING_POINT_EXCEPTION
| VIRTUALIZATION_EXCEPTION
| CONTROL_PROTECTION_EXCEPTION
| HYPERVISOR_INJECTION_EXCEPTION
| VMM_COMMUNICATION_EXCEPTION
| SECURITY_EXCEPTION => true,
_ => false,
}
}

View File

@ -0,0 +1,186 @@
use crate::{prelude::*, Error};
use super::TrapFrame;
use crate::util::recycle_allocator::RecycleAllocator;
use core::fmt::Debug;
use lazy_static::lazy_static;
use spin::{Mutex, MutexGuard};
lazy_static! {
/// The IRQ numbers which are not using
static ref NOT_USING_IRQ: Mutex<RecycleAllocator> = Mutex::new(RecycleAllocator::with_start_max(32,256));
}
pub fn allocate_irq() -> Result<IrqAllocateHandle> {
let irq_num = NOT_USING_IRQ.lock().alloc();
if irq_num == usize::MAX {
Err(Error::NotEnoughResources)
} else {
Ok(IrqAllocateHandle::new(irq_num as u8))
}
}
pub(crate) fn allocate_target_irq(target_irq: u8) -> Result<IrqAllocateHandle> {
if NOT_USING_IRQ.lock().get_target(target_irq as usize) {
Ok(IrqAllocateHandle::new(target_irq))
} else {
Err(Error::NotEnoughResources)
}
}
/// The handle to a allocate irq number between [32,256), used in std and other parts in jinux
///
/// When the handle is dropped, all the callback in this will be unregistered automatically.
#[derive(Debug)]
#[must_use]
pub struct IrqAllocateHandle {
irq_num: u8,
irq: Arc<&'static IrqLine>,
callbacks: Vec<IrqCallbackHandle>,
}
impl IrqAllocateHandle {
fn new(irq_num: u8) -> Self {
Self {
irq_num: irq_num,
irq: unsafe { IrqLine::acquire(irq_num) },
callbacks: Vec::new(),
}
}
/// Get the IRQ number.
pub fn num(&self) -> u8 {
self.irq_num
}
/// Register a callback that will be invoked when the IRQ is active.
///
/// For each IRQ line, multiple callbacks may be registered.
pub fn on_active<F>(&mut self, callback: F)
where
F: Fn(&TrapFrame) + Sync + Send + 'static,
{
self.callbacks.push(self.irq.on_active(callback))
}
pub fn is_empty(&self) -> bool {
self.callbacks.is_empty()
}
}
impl Drop for IrqAllocateHandle {
fn drop(&mut self) {
for callback in &self.callbacks {
drop(callback)
}
NOT_USING_IRQ.lock().dealloc(self.irq_num as usize);
}
}
lazy_static! {
pub(crate) static ref IRQ_LIST: Vec<IrqLine> = {
let mut list: Vec<IrqLine> = Vec::new();
for i in 0..256 {
list.push(IrqLine {
irq_num: i as u8,
callback_list: Mutex::new(Vec::new()),
});
}
list
};
}
lazy_static! {
static ref ID_ALLOCATOR: Mutex<RecycleAllocator> = Mutex::new(RecycleAllocator::new());
}
pub struct CallbackElement {
function: Box<dyn Fn(&TrapFrame) + Send + Sync + 'static>,
id: usize,
}
impl CallbackElement {
pub fn call(&self, element: &TrapFrame) {
self.function.call((element,));
}
}
impl Debug for CallbackElement {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("CallbackElement")
.field("id", &self.id)
.finish()
}
}
/// An interrupt request (IRQ) line.
#[derive(Debug)]
pub(crate) struct IrqLine {
irq_num: u8,
callback_list: Mutex<Vec<CallbackElement>>,
}
impl IrqLine {
/// Acquire an interrupt request line.
///
/// # Safety
///
/// This function is marked unsafe as manipulating interrupt lines is
/// considered a dangerous operation.
pub unsafe fn acquire(irq_num: u8) -> Arc<&'static Self> {
Arc::new(IRQ_LIST.get(irq_num as usize).unwrap())
}
/// Get the IRQ number.
pub fn num(&self) -> u8 {
self.irq_num
}
pub fn callback_list(&self) -> MutexGuard<'_, alloc::vec::Vec<CallbackElement>> {
self.callback_list.lock()
}
/// Register a callback that will be invoked when the IRQ is active.
///
/// A handle to the callback is returned. Dropping the handle
/// automatically unregisters the callback.
///
/// For each IRQ line, multiple callbacks may be registered.
pub fn on_active<F>(&self, callback: F) -> IrqCallbackHandle
where
F: Fn(&TrapFrame) + Sync + Send + 'static,
{
let allocate_id = ID_ALLOCATOR.lock().alloc();
self.callback_list.lock().push(CallbackElement {
function: Box::new(callback),
id: allocate_id,
});
IrqCallbackHandle {
irq_num: self.irq_num,
id: allocate_id,
}
}
}
/// The handle to a registered callback for a IRQ line.
///
/// When the handle is dropped, the callback will be unregistered automatically.
#[must_use]
#[derive(Debug)]
pub struct IrqCallbackHandle {
irq_num: u8,
id: usize,
// cursor: CursorMut<'a, Box<dyn Fn(&IrqLine)+Sync+Send+'static>>
}
impl Drop for IrqCallbackHandle {
fn drop(&mut self) {
let mut a = IRQ_LIST
.get(self.irq_num as usize)
.unwrap()
.callback_list
.lock();
a.retain(|item| if (*item).id == self.id { false } else { true });
ID_ALLOCATOR.lock().dealloc(self.id);
}
}

View File

@ -0,0 +1,230 @@
mod handler;
mod irq;
use crate::cell::Cell;
use lazy_static::lazy_static;
use x86_64::{
registers::{
model_specific::{self, EferFlags},
rflags::RFlags,
},
structures::{gdt::*, tss::TaskStateSegment},
};
pub use self::irq::{allocate_irq, IrqAllocateHandle};
pub(crate) use self::irq::{allocate_target_irq, IrqCallbackHandle, IrqLine};
use core::{fmt::Debug, mem::size_of_val};
use crate::{x86_64_util::*, *};
core::arch::global_asm!(include_str!("trap.S"));
core::arch::global_asm!(include_str!("vector.S"));
#[derive(Default, Clone, Copy)]
#[repr(C)]
pub struct CallerRegs {
pub rax: u64,
pub rcx: u64,
pub rdx: u64,
pub rsi: u64,
pub rdi: u64,
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
}
impl Debug for CallerRegs {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_fmt(format_args!("rax: 0x{:x}, rcx: 0x{:x}, rdx: 0x{:x}, rsi: 0x{:x}, rdi: 0x{:x}, r8: 0x{:x}, r9: 0x{:x}, r10: 0x{:x}, r11: 0x{:x}",
self.rax, self.rcx, self.rdx, self.rsi, self.rdi, self.r8, self.r9, self.r10, self.r11))?;
Ok(())
}
}
#[derive(Default, Clone, Copy)]
#[repr(C)]
pub struct CalleeRegs {
pub rsp: u64,
pub rbx: u64,
pub rbp: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
}
impl Debug for CalleeRegs {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_fmt(format_args!("rsp: 0x{:x}, rbx: 0x{:x}, rbp: 0x{:x}, r12: 0x{:x}, r13: 0x{:x}, r14: 0x{:x}, r15: 0x{:x}", self.rsp, self.rbx, self.rbp, self.r12, self.r13, self.r14, self.r15))?;
Ok(())
}
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct SyscallFrame {
pub caller: CallerRegs,
pub callee: CalleeRegs,
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct TrapFrame {
pub cr2: u64,
pub caller: CallerRegs,
// do not use the rsp inside the callee, use another rsp instead
pub callee: CalleeRegs,
pub id: u64,
pub err: u64,
// Pushed by CPU
pub rip: u64,
pub cs: u64,
pub rflags: u64,
pub rsp: u64,
pub ss: u64,
}
const TSS_SIZE: usize = 104;
extern "C" {
/// TSS
static TSS: [u8; TSS_SIZE];
/// 所有的中断向量push一个id后跳转到trao_entry
static __vectors: [usize; 256];
fn syscall_entry();
}
lazy_static! {
static ref GDT: Cell<GlobalDescriptorTable> = Cell::new(GlobalDescriptorTable::new());
}
#[repr(C, align(16))]
struct IDT {
/**
* The structure of all entries in IDT are shown below:
* related link: https://wiki.osdev.org/IDT#Structure_on_x86-64
* Low 64 bits of entry:
* |0-------------------------------------15|16------------------------------31|
* | Low 16 bits of target address | Segment Selector |
* |32-34|35------39|40-------43|44|45-46|47|48------------------------------63|
* | IST | Reserved | Gate Type | 0| DPL |P | Middle 16 bits of target address |
* |---------------------------------------------------------------------------|
* High 64 bits of entry:
* |64-----------------------------------------------------------------------95|
* | High 32 bits of target address |
* |96----------------------------------------------------------------------127|
* | Reserved |
* |---------------------------------------------------------------------------|
*/
entries: [[usize; 2]; 256],
}
impl IDT {
const fn default() -> Self {
Self {
entries: [[0; 2]; 256],
}
}
}
static mut IDT: IDT = IDT::default();
pub(crate) fn init() {
// FIXME: use GDT in x86_64 crate in
let tss = unsafe { &*(TSS.as_ptr() as *const TaskStateSegment) };
let gdt = GDT.get();
let kcs = gdt.add_entry(Descriptor::kernel_code_segment());
let kss = gdt.add_entry(Descriptor::kernel_data_segment());
let uss = gdt.add_entry(Descriptor::user_data_segment());
let ucs = gdt.add_entry(Descriptor::user_code_segment());
let tss_load = gdt.add_entry(Descriptor::tss_segment(tss));
gdt.load();
x86_64_util::set_cs(kcs.0);
x86_64_util::set_ss(kss.0);
load_tss(tss_load.0);
unsafe {
// enable syscall extensions
model_specific::Efer::update(|efer_flags| {
efer_flags.insert(EferFlags::SYSTEM_CALL_EXTENSIONS);
});
}
model_specific::Star::write(ucs, uss, kcs, kss)
.expect("error when configure star msr register");
// set the syscall entry
model_specific::LStar::write(x86_64::VirtAddr::new(syscall_entry as u64));
model_specific::SFMask::write(
RFlags::TRAP_FLAG
| RFlags::DIRECTION_FLAG
| RFlags::INTERRUPT_FLAG
| RFlags::IOPL_LOW
| RFlags::IOPL_HIGH
| RFlags::NESTED_TASK
| RFlags::ALIGNMENT_CHECK,
);
// initialize the trap entry for all irq number
for i in 0..256 {
let p = unsafe { __vectors[i] };
// set gate type to 1110: 64 bit Interrupt Gate, Present bit to 1, DPL to Ring 0
let p_low = (((p >> 16) & 0xFFFF) << 48) | (p & 0xFFFF);
let trap_entry_option: usize = 0b1000_1110_0000_0000;
let low = (trap_entry_option << 32)
| ((kcs.0 as usize) << 16)
| p_low;
let high = p >> 32;
unsafe {
IDT.entries[i] = [low, high];
}
}
unsafe {
lidt(&DescriptorTablePointer {
limit: size_of_val(&IDT) as u16 - 1,
base: &IDT as *const _ as _,
})
}
}
macro_rules! define_cpu_exception {
( $( $name: ident = $exception_num: expr ),* ) => {
$(
pub const $name : u64 = $exception_num;
)*
}
}
define_cpu_exception!(
DIVIDE_BY_ZERO = 0,
DEBUG = 1,
NON_MASKABLE_INTERRUPT = 2,
BREAKPOINT = 3,
OVERFLOW = 4,
BOUND_RANGE_EXCEEDED = 5,
INVALID_OPCODE = 6,
DEVICE_NOT_AVAILABLE = 7,
DOUBLE_FAULT = 8,
COPROCESSOR_SEGMENT_OVERRUN = 9,
INVAILD_TSS = 10,
SEGMENT_NOT_PRESENT = 11,
STACK_SEGMENT_FAULT = 12,
GENERAL_PROTECTION_FAULT = 13,
PAGE_FAULT = 14,
// 15 reserved
X87_FLOATING_POINT_EXCEPTION = 16,
ALIGNMENT_CHECK = 17,
MACHINE_CHECK = 18,
SIMD_FLOATING_POINT_EXCEPTION = 19,
VIRTUALIZATION_EXCEPTION = 20,
CONTROL_PROTECTION_EXCEPTION = 21,
// 22-27 reserved
HYPERVISOR_INJECTION_EXCEPTION = 28,
VMM_COMMUNICATION_EXCEPTION = 29,
SECURITY_EXCEPTION = 30 // 31 reserved
);

View File

@ -0,0 +1,126 @@
.data
.align 4
TSS:
.space 104
.text
.macro save
push r11
push r10
push r9
push r8
push rdi
push rsi
push rdx
push rcx
push rax
.endm
.macro restore
pop rax
pop rcx
pop rdx
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
.endm
.global __trap_entry
__trap_entry:
#
push r15
push r14
push r13
push r12
push rbp
push rbx
mov rdi, 0
push rdi
save
# save cr2
mov rdi, cr2
push rdi
# trap_handler
mov rdi, rsp
call trap_handler
__trap_return:
# judge whether the trap from kernel mode
mov rax, [rsp + 160] # 160 = offsetof(TrapFrame, cs)
and rax, 0x3
jz __from_kernel
lea rax, [rsp + 192] # prepare new TSS.sp0, 192 = sizeof(TrapFrame)
mov [TSS + rip + 4], rax
__from_kernel:
add rsp, 8 # skip cr2
restore
add rsp,8 # skip rsp in callee
pop rbx
pop rbp
pop r12
pop r13
pop r14
pop r15
add rsp, 16 # skip TrapFrame.err and id
iretq
.global syscall_entry
syscall_entry:
# syscall instruction do:
# - load cs, ss from STAR MSR
# - r11 <- rflags, mask rflags from RFMASK MSR
# - rcx <- rip, load rip from LSTAR MSR
# temporarily store user rsp into TSS.sp0 and load kernel rsp from it.
xchg rsp, [TSS + rip + 4]
push r15
push r14
push r13
push r12
push rbp
push rbx
push [TSS + rip + 4] # store user rsp into SyscallFrame.rsp
save
mov rdi, rsp
call syscall_handler
mov [rsp], rax # CallerRegs.rax is at offset 0
jmp __syscall_return
.global syscall_return
syscall_return: # (SyscallFrame *)
mov rsp, rdi
__syscall_return:
lea rax, [rsp + 128] # prepare new TSS.sp0, 128 = sizeof(SyscallFrame)
# store the rsp in TSS
mov [TSS + rip + 4], rax
restore
mov rbx, [rsp + 8]
mov rbp, [rsp + 16]
mov r12, [rsp + 24]
mov r13, [rsp + 32]
mov r14, [rsp + 40]
mov r15, [rsp + 48]
mov rsp, [rsp + 0]
sysretq
.global syscall_switch_to_user_space
syscall_switch_to_user_space: # (cpu_context: *CpuContext,reg: *SyscallFrame)
# mov rflag, [rdi+136]
mov rdi, rsi
jmp syscall_return
.global trap_switch_to_user_space
trap_switch_to_user_space: # (cpu_context: *CpuContext,reg: *TrapFrame)
# mov rflag, [rdi+136]
mov rdi, rsi
mov rsp, rdi
jmp __trap_return

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,200 @@
//! User space.
use crate::debug;
use crate::x86_64_util::{rdfsbase, wrfsbase};
use crate::cpu::CpuContext;
use crate::prelude::*;
use crate::task::{context_switch, Task, TaskContext, SWITCH_TO_USER_SPACE_TASK};
use crate::trap::{SyscallFrame, TrapFrame};
use crate::vm::VmSpace;
extern "C" {
pub(crate) fn syscall_switch_to_user_space(
cpu_context: &CpuContext,
syscall_frame: &SyscallFrame,
);
/// cpu_context may delete in the future
pub(crate) fn trap_switch_to_user_space(cpu_context: &CpuContext, trap_frame: &TrapFrame);
}
/// A user space.
///
/// Each user space has a VM address space and allows a task to execute in
/// user mode.
pub struct UserSpace {
/// vm space
vm_space: VmSpace,
/// cpu context before entering user space
pub cpu_ctx: CpuContext,
}
impl UserSpace {
/// Creates a new instance.
///
/// Each instance maintains a VM address space and the CPU state to enable
/// execution in the user space.
pub fn new(vm_space: VmSpace, cpu_ctx: CpuContext) -> Self {
Self {
vm_space: vm_space,
cpu_ctx: cpu_ctx,
}
}
/// Returns the VM address space.
pub fn vm_space(&self) -> &VmSpace {
&self.vm_space
}
/// Returns the user mode that is bound to the current task and user space.
///
/// See `UserMode` on how to use it to execute user code.
///
/// # Panic
///
/// This method is intended to only allow each task to have at most one
/// instance of `UserMode` initiated. If this method is called again before
/// the first instance for the current task is dropped, then the method
/// panics.
pub fn user_mode(&self) -> UserMode<'_> {
todo!()
}
}
/// Code execution in the user mode.
///
/// This type enables executing the code in user space from a task in the kernel
/// space safely.
///
/// Here is a sample code on how to use `UserMode`.
///
/// ```no_run
/// use jinux_frame::task::Task;
///
/// let current = Task::current();
/// let user_space = current.user_space()
/// .expect("the current task is associated with a user space");
/// let mut user_mode = user_space.user_mode();
/// loop {
/// // Execute in the user space until some interesting user event occurs
/// let user_event = user_mode.execute();
/// todo!("handle the user event, e.g., syscall");
/// }
/// ```
pub struct UserMode<'a> {
current: Arc<Task>,
user_space: &'a Arc<UserSpace>,
context: CpuContext,
executed: bool,
}
// An instance of `UserMode` is bound to the current task. So it cannot be
impl<'a> !Send for UserMode<'a> {}
impl<'a> UserMode<'a> {
pub fn new(user_space: &'a Arc<UserSpace>) -> Self {
Self {
current: Task::current(),
user_space,
context: CpuContext::default(),
executed: false,
}
}
/// Starts executing in the user mode. Make sure current task is the task in `UserMode`.
///
/// The method returns for one of three possible reasons indicated by `UserEvent`.
/// 1. The user invokes a system call;
/// 2. The user triggers an exception;
/// 3. The user triggers a fault.
///
/// After handling the user event and updating the user-mode CPU context,
/// this method can be invoked again to go back to the user space.
pub fn execute(&mut self) -> UserEvent {
unsafe {
self.user_space.vm_space().activate();
}
if !self.executed {
*self.current.syscall_frame() = self.user_space.cpu_ctx.into();
self.current.syscall_frame().caller.rcx = self.user_space.cpu_ctx.gp_regs.rip;
// write fsbase
wrfsbase(self.user_space.cpu_ctx.fs_base);
let fp_regs = self.user_space.cpu_ctx.fp_regs;
if fp_regs.is_valid() {
fp_regs.restore();
}
self.executed = true;
} else {
if self.current.inner_exclusive_access().is_from_trap {
*self.current.trap_frame() = self.context.into();
} else {
*self.current.syscall_frame() = self.context.into();
self.current.syscall_frame().caller.rcx = self.context.gp_regs.rip;
}
// write fsbase
if rdfsbase() != self.context.fs_base {
debug!("write fsbase: 0x{:x}", self.context.fs_base);
wrfsbase(self.context.fs_base);
}
// write fp_regs
// let fp_regs = self.context.fp_regs;
// if fp_regs.is_valid() {
// fp_regs.restore();
// }
}
let mut current_task_inner = self.current.inner_exclusive_access();
let binding = SWITCH_TO_USER_SPACE_TASK.get();
let next_task_inner = binding.inner_exclusive_access();
let current_ctx = &mut current_task_inner.ctx as *mut TaskContext;
let next_ctx = &next_task_inner.ctx as *const TaskContext;
drop(current_task_inner);
drop(next_task_inner);
drop(binding);
unsafe {
context_switch(current_ctx, next_ctx);
// switch_to_user_space(&self.user_space.cpu_ctx, self.current.syscall_frame());
}
if self.current.inner_exclusive_access().is_from_trap {
self.context = CpuContext::from(*self.current.trap_frame());
self.context.fs_base = rdfsbase();
// self.context.fp_regs.save();
UserEvent::Exception
} else {
self.context = CpuContext::from(*self.current.syscall_frame());
self.context.fs_base = rdfsbase();
// self.context.fp_regs.save();
// debug!("[kernel] syscall id:{}", self.context.gp_regs.rax);
// debug!("[kernel] rsp: 0x{:x}", self.context.gp_regs.rsp);
// debug!("[kernel] rcx: 0x{:x}", self.context.gp_regs.rcx);
// debug!("[kernel] rip: 0x{:x}", self.context.gp_regs.rip);
UserEvent::Syscall
}
}
/// Returns an immutable reference the user-mode CPU context.
pub fn context(&self) -> &CpuContext {
&self.context
}
/// Returns a mutable reference the user-mode CPU context.
pub fn context_mut(&mut self) -> &mut CpuContext {
&mut self.context
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]
/// A user event is what brings back the control of the CPU back from
/// the user space to the kernel space.
///
/// Note that hardware interrupts are not considered user events as they
/// are triggered by devices and not visible to user programs.
/// To handle interrupts, one should register callback funtions for
/// IRQ lines (`IrqLine`).
pub enum UserEvent {
Syscall,
Exception,
Fault,
}

View File

@ -0,0 +1,104 @@
/// An extension trait for Rust integer types, including `u8`, `u16`, `u32`,
/// `u64`, and `usize`, to provide methods to make integers aligned to a
/// power of two.
pub trait AlignExt {
/// returns whether the number is a power of two
fn is_power_of_two(&self) -> bool;
/// Returns to the smallest number that is greater than or equal to
/// `self` and is a multiple of the given power of two.
///
/// The method panics if `power_of_two` is not a
/// power of two or is smaller than 2 or the calculation overflows
/// because `self` is too large.
///
/// # Examples
///
/// ```
/// assert!(align_up(12, 2), 12);
/// assert!(align_up(12, 4), 12);
/// assert!(align_up(12, 8), 16);
/// assert!(align_up(12, 16), 16);
/// ```
fn align_up(self, power_of_two: Self) -> Self;
/// Returns to the greatest number that is smaller than or equal to
/// `self` and is a multiple of the given power of two.
///
/// The method panics if `power_of_two` is not a
/// power of two or is smaller than 2 or the calculation overflows
/// because `self` is too large. In release mode,
///
/// # Examples
///
/// ```
/// assert!(align_down(12, 2), 12);
/// assert!(align_down(12, 4), 12);
/// assert!(align_down(12, 8), 8);
/// assert!(align_down(12, 16), 0);
/// ```
fn align_down(self, power_of_two: Self) -> Self;
}
macro_rules! impl_align_ext {
($( $uint_type:ty ),+,) => {
$(
impl AlignExt for $uint_type {
fn is_power_of_two(&self) -> bool {
(*self != 0) && ((*self & (*self - 1)) == 0)
}
fn align_up(self, align: Self) -> Self {
assert!(align.is_power_of_two() && align >= 2);
self.checked_add(align - 1).unwrap() & !(align - 1)
}
fn align_down(self, align: Self) -> Self {
assert!(align.is_power_of_two() && align >= 2);
self & !(align - 1)
}
}
)*
}
}
impl_align_ext! {
u8,
u16,
u32,
u64,
usize,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_align_up() {
let input_ns = [0usize, 1, 2, 9, 15, 21, 32, 47, 50];
let input_as = [2usize, 2, 2, 2, 4, 4, 8, 8, 8];
let output_ns = [0usize, 2, 2, 10, 16, 24, 32, 48, 56];
for i in 0..input_ns.len() {
let n = input_ns[i];
let a = input_as[i];
let n2 = output_ns[i];
assert!(n.align_up(a) == n2);
}
}
#[test]
fn test_align_down() {
let input_ns = [0usize, 1, 2, 9, 15, 21, 32, 47, 50];
let input_as = [2usize, 2, 2, 2, 4, 4, 8, 8, 8];
let output_ns = [0usize, 0, 2, 8, 12, 20, 32, 40, 48];
for i in 0..input_ns.len() {
let n = input_ns[i];
let a = input_as[i];
let n2 = output_ns[i];
assert!(n.align_down(a) == n2);
}
}
}

View File

@ -0,0 +1,6 @@
mod align_ext;
pub mod recycle_allocator;
mod type_map;
pub use self::align_ext::AlignExt;
pub use self::type_map::TypeMap;

View File

@ -0,0 +1,93 @@
use alloc::vec::Vec;
pub struct RecycleAllocator {
current: usize,
recycled: Vec<usize>,
skip: Vec<usize>,
max: usize,
}
impl RecycleAllocator {
pub fn new() -> Self {
RecycleAllocator {
current: 0,
recycled: Vec::new(),
skip: Vec::new(),
max: usize::MAX - 1,
}
}
pub fn with_start_max(start: usize, max: usize) -> Self {
RecycleAllocator {
current: start,
recycled: Vec::new(),
skip: Vec::new(),
max: max,
}
}
#[allow(unused)]
pub fn alloc(&mut self) -> usize {
if let Some(id) = self.recycled.pop() {
return id;
}
// recycle list is empty, need to use current to allocate an id.
// it should skip the element in skip list
while self.skip.contains(&self.current) {
self.current += 1;
}
if self.current == self.max {
return usize::MAX;
}
self.current += 1;
self.current - 1
}
/// deallocate a id, it should fit one of the following requirement, otherwise it will panic:
///
/// 1. It is in the skip list
///
/// 2. It smaller than current and not in recycled list
#[allow(unused)]
pub fn dealloc(&mut self, id: usize) {
if !self.skip.contains(&id) {
assert!(id < self.current);
assert!(
!self.recycled.iter().any(|i| *i == id),
"id {} has been deallocated!",
id
);
} else {
// if the value is in skip list, then remove it from the skip list
self.skip.retain(|value| *value != id);
}
self.recycled.push(id);
}
/// get target id in the list, it will return true if the target can used, false if can not used.
/// the target need to meet one of the following requirement so that it can used:
///
/// 1. It is in the recycled list
///
/// 2. It is bigger than the current, smaller than max and not in the skip list
///
pub fn get_target(&mut self, target: usize) -> bool {
if target >= self.max {
return false;
}
if target >= self.current {
if self.skip.contains(&target) {
false
} else {
self.skip.push(target);
true
}
} else {
if self.recycled.contains(&target) {
self.recycled.retain(|value| *value != target);
true
} else {
false
}
}
}
}

View File

@ -0,0 +1,26 @@
/// A type map is a collection whose keys are types, rather than values.
pub struct TypeMap {}
pub trait Any: core::any::Any + Send + Sync {}
impl TypeMap {
/// Creates an empty typed map.
pub fn new() -> Self {
todo!()
}
/// Inserts a new item of type `T`.
pub fn insert<T: Any>(&mut self, val: T) -> Option<T> {
todo!()
}
/// Gets an item of type `T`.
pub fn get<T: Any>(&self) -> Option<&T> {
todo!()
}
/// Gets an item of type `T`.
pub fn remove<T: Any>(&self) -> Option<T> {
todo!()
}
}

View File

@ -0,0 +1,338 @@
use core::iter::Iterator;
use crate::{config::PAGE_SIZE, mm::address::PhysAddr, prelude::*, Error};
use pod::Pod;
use super::VmIo;
use crate::mm::PhysFrame;
/// A collection of page frames (physical memory pages).
///
/// For the most parts, `VmFrameVec` is like `Vec<VmFrame>`. But the
/// implementation may or may not be based on `Vec`. Having a dedicated
/// type to represent a series of page frames is convenient because,
/// more often than not, one needs to operate on a batch of frames rather
/// a single frame.
pub struct VmFrameVec(Vec<VmFrame>);
impl VmFrameVec {
/// Allocate a collection of free frames according to the given options.
///
/// All returned frames are safe to use in the sense that they are
/// not _typed memory_. We define typed memory as the memory that
/// may store Rust objects or affect Rust memory safety, e.g.,
/// the code and data segments of the OS kernel, the stack and heap
/// allocated for the OS kernel.
///
/// For more information, see `VmAllocOptions`.
pub fn allocate(options: &VmAllocOptions) -> Result<Self> {
let page_size = options.page_size;
let mut frame_list = Vec::new();
for i in 0..page_size {
let vm_frame = if let Some(paddr) = options.paddr {
VmFrame::alloc_with_paddr(paddr)
} else {
VmFrame::alloc()
};
if vm_frame.is_none() {
return Err(Error::NoMemory);
}
frame_list.push(vm_frame.unwrap());
}
Ok(Self(frame_list))
}
/// Pushs a new frame to the collection.
pub fn push(&mut self, new_frame: VmFrame) {
self.0.push(new_frame);
}
/// get the end pa of the collection
pub fn end_pa(&self) -> Option<PhysAddr> {
if let Some(frame) = self.0.last() {
Some(PhysAddr(frame.paddr() + PAGE_SIZE))
} else {
None
}
}
/// Pop a frame from the collection.
pub fn pop(&mut self) -> Option<VmFrame> {
self.0.pop()
}
/// Removes a frame at a position.
pub fn remove(&mut self, at: usize) -> VmFrame {
self.0.remove(at)
}
/// Append some frames.
pub fn append(&mut self, more: &mut VmFrameVec) -> Result<()> {
self.0.append(&mut more.0);
Ok(())
}
/// Truncate some frames.
///
/// If `new_len >= self.len()`, then this method has no effect.
pub fn truncate(&mut self, new_len: usize) {
if new_len >= self.0.len() {
return;
}
self.0.truncate(new_len)
}
/// Returns an iterator
pub fn iter(&self) -> core::slice::Iter<'_, VmFrame> {
self.0.iter()
}
/// Returns the number of frames.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the frame collection is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of bytes.
///
/// This method is equivalent to `self.len() * PAGE_SIZE`.
pub fn nbytes(&self) -> usize {
self.0.len() * PAGE_SIZE
}
}
impl VmIo for VmFrameVec {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
let mut start = offset;
let mut remain = buf.len();
let mut processed = 0;
for pa in self.0.iter() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &mut buf[processed..processed + copy_len];
let dst = &pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
src.copy_from_slice(dst);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
break;
}
}
}
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
let mut start = offset;
let mut remain = buf.len();
let mut processed = 0;
for pa in self.0.iter() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &buf[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
dst.copy_from_slice(src);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
break;
}
}
}
Ok(())
}
}
/// An iterator for frames.
pub struct VmFrameVecIter<'a> {
frames: &'a VmFrameVec,
current: usize,
// more...
}
impl<'a> VmFrameVecIter<'a> {
pub fn new(frames: &'a VmFrameVec) -> Self {
Self { frames, current: 0 }
}
}
impl<'a> Iterator for VmFrameVecIter<'a> {
type Item = &'a VmFrame;
fn next(&mut self) -> Option<Self::Item> {
if self.current >= self.frames.0.len() {
return None;
}
Some(self.frames.0.get(self.current).unwrap())
}
}
/// Options for allocating physical memory pages (or frames).
/// See `VmFrameVec::alloc`.
pub struct VmAllocOptions {
page_size: usize,
paddr: Option<Paddr>,
}
impl VmAllocOptions {
/// Creates new options for allocating the specified number of frames.
pub fn new(len: usize) -> Self {
Self {
page_size: len,
paddr: None,
}
}
/// Sets the physical address of the first frame.
///
/// If the physical address is given, then the allocated frames will be
/// contiguous.
///
/// The default value is `None`.
pub fn paddr(&mut self, paddr: Option<Paddr>) -> &mut Self {
self.paddr = paddr;
self
}
/// Sets whether the allocated frames should be contiguous.
///
/// If the physical address is set, then the frames must be contiguous.
///
/// The default value is `false`.
pub fn is_contiguous(&mut self, is_contiguous: bool) -> &mut Self {
todo!()
}
/// Sets whether the pages can be accessed by devices through
/// Direct Memory Access (DMA).
///
/// In a TEE environment, DMAable pages are untrusted pages shared with
/// the VMM.
pub fn can_dma(&mut self, can_dma: bool) -> &mut Self {
todo!()
}
}
#[derive(Debug, Clone)]
/// A handle to a page frame.
///
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
/// page). A cloned `VmFrame` refers to the same page frame as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other. Behind the scene,
/// a reference counter is maintained for each page frame so that
/// when all instances of `VmFrame` that refer to the
/// same page frame are dropped, the page frame will be freed.
/// Free page frames are allocated in bulk by `VmFrameVec::allocate`.
pub struct VmFrame {
pub(crate) physical_frame: Arc<PhysFrame>,
}
impl VmFrame {
/// Creates a new VmFrame.
///
/// # Safety
///
/// The given physical address must be valid for use.
pub(crate) unsafe fn new(physical_frame: PhysFrame) -> Self {
Self {
physical_frame: Arc::new(physical_frame),
}
}
/// Allocate a new VmFrame
pub(crate) fn alloc() -> Option<Self> {
let phys = PhysFrame::alloc();
if phys.is_none() {
return None;
}
Some(Self {
physical_frame: Arc::new(phys.unwrap()),
})
}
/// Allocate a new VmFrame filled with zero
pub(crate) fn alloc_zero() -> Option<Self> {
let phys = PhysFrame::alloc_zero();
if phys.is_none() {
return None;
}
Some(Self {
physical_frame: Arc::new(phys.unwrap()),
})
}
pub(crate) fn alloc_with_paddr(paddr: Paddr) -> Option<Self> {
let phys = PhysFrame::alloc_with_paddr(paddr);
if phys.is_none() {
return None;
}
Some(Self {
physical_frame: Arc::new(phys.unwrap()),
})
}
/// Returns the physical address of the page frame.
pub fn paddr(&self) -> Paddr {
self.physical_frame.start_pa().0
}
pub fn start_pa(&self) -> PhysAddr {
self.physical_frame.start_pa()
}
/// Returns whether the page frame is accessible by DMA.
///
/// In a TEE environment, DMAable pages are untrusted pages shared with
/// the VMM.
pub fn can_dma(&self) -> bool {
todo!()
}
}
impl VmIo for VmFrame {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
if offset >= PAGE_SIZE || buf.len() + offset > PAGE_SIZE {
Err(Error::InvalidArgs)
} else {
let dst = &self.start_pa().kvaddr().get_bytes_array()[offset..buf.len() + offset];
buf.copy_from_slice(dst);
Ok(())
}
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
if offset >= PAGE_SIZE || buf.len() + offset > PAGE_SIZE {
Err(Error::InvalidArgs)
} else {
let dst = &mut self.start_pa().kvaddr().get_bytes_array()[offset..buf.len() + offset];
dst.copy_from_slice(buf);
Ok(())
}
}
/// Read a value of a specified type at a specified offset.
fn read_val<T: Pod>(&self, offset: usize) -> Result<T> {
let paddr = self.paddr() + offset;
let val = unsafe { &mut *(crate::mm::address::phys_to_virt(paddr) as *mut T) };
Ok(*val)
}
/// Write a value of a specified type at a specified offset.
fn write_val<T: Pod>(&self, offset: usize, new_val: &T) -> Result<()> {
let paddr = self.paddr() + offset;
unsafe { (crate::mm::address::phys_to_virt(paddr) as *mut T).write(*new_val) };
Ok(())
}
}

View File

@ -0,0 +1,64 @@
use crate::prelude::*;
use pod::Pod;
/// A trait that enables reading/writing data from/to a VM object,
/// e.g., `VmSpace`, `VmFrameVec`, and `VmFrame`.
///
/// # Concurrency
///
/// The methods may be executed by multiple concurrent reader and writer
/// threads. In this case, if the results of concurrent reads or writes
/// desire predictability or atomicity, the users should add extra mechanism
/// for such properties.
pub trait VmIo: Send + Sync {
/// Read a specified number of bytes at a specified offset into a given buffer.
///
/// # No short reads
///
/// On success, the output `buf` must be filled with the requested data
/// completely. If, for any reason, the requested data is only partially
/// available, then the method shall return an error.
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()>;
/// Read a value of a specified type at a specified offset.
fn read_val<T: Pod>(&self, offset: usize) -> Result<T> {
let mut val = T::new_uninit();
self.read_bytes(offset, val.as_bytes_mut())?;
Ok(val)
}
/// Read a slice of a specified type at a specified offset.
///
/// # No short reads
///
/// Similar to `read_bytes`.
fn read_slice<T: Pod>(&self, offset: usize, slice: &mut [T]) -> Result<()> {
let buf = unsafe { core::mem::transmute(slice) };
self.read_bytes(offset, buf)
}
/// Write a specified number of bytes from a given buffer at a specified offset.
///
/// # No short writes
///
/// On success, the input `buf` must be written to the VM object entirely.
/// If, for any reason, the input data can only be written partially,
/// then the method shall return an error.
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()>;
/// Write a value of a specified type at a specified offset.
fn write_val<T: Pod>(&self, offset: usize, new_val: &T) -> Result<()> {
self.write_bytes(offset, new_val.as_bytes())?;
Ok(())
}
/// Write a slice of a specified type at a specified offset.
///
/// # No short write
///
/// Similar to `write_bytes`.
fn write_slice<T: Pod>(&self, offset: usize, slice: &[T]) -> Result<()> {
let buf = unsafe { core::mem::transmute(slice) };
self.write_bytes(offset, buf)
}
}

View File

@ -0,0 +1,16 @@
//! Virtual memory (VM).
/// Virtual addresses.
pub type Vaddr = usize;
/// Physical addresses.
pub type Paddr = usize;
mod frame;
mod io;
mod offset;
mod space;
pub use self::frame::{VmAllocOptions, VmFrame, VmFrameVec, VmFrameVecIter};
pub use self::io::VmIo;
pub use self::space::{VmMapOptions, VmPerm, VmSpace};

View File

@ -0,0 +1,35 @@
/// Get the offset of a field within a type as a pointer.
///
/// ```rust
/// #[repr(C)]
/// pub struct Foo {
/// first: u8,
/// second: u32,
/// }
///
/// assert!(offset_of(Foo, first) == (0 as *const u8));
/// assert!(offset_of(Foo, second) == (4 as *const u32));
/// ```
#[macro_export]
macro_rules! offset_of {
($container:ty, $($field:tt)+) => ({
// SAFETY. It is ok to have this uninitialized value because
// 1) Its memory won't be acccessed;
// 2) It will be forgoten rather than being dropped;
// 3) Before it gets forgten, the code won't return prematurely or panic.
let tmp: $container = unsafe { core::mem::MaybeUninit::uninit().assume_init() };
let container_addr = &tmp as *const _;
let field_addr = &tmp.$($field)* as *const _;
::core::mem::forget(tmp);
let field_offset = (field_addr as usize - container_addr as usize) as *const _;
// Let Rust compiler infer our intended pointer type of field_offset
// by comparing it with another pointer.
let _: bool = field_offset == field_addr;
field_offset
});
}

View File

@ -0,0 +1,229 @@
use crate::config::PAGE_SIZE;
use crate::x86_64_util;
use bitflags::bitflags;
use core::ops::Range;
use spin::Mutex;
use crate::mm::address::{is_aligned, VirtAddr};
use crate::mm::{MapArea, MemorySet, PTFlags};
use crate::vm::VmFrameVec;
use crate::{prelude::*, Error};
use super::VmIo;
/// Virtual memory space.
///
/// A virtual memory space (`VmSpace`) can be created and assigned to a user space so that
/// the virtual memory of the user space can be manipulated safely. For example,
/// given an arbitrary user-space pointer, one can read and write the memory
/// location refered to by the user-space pointer without the risk of breaking the
/// memory safety of the kernel space.
///
/// A newly-created `VmSpace` is not backed by any physical memory pages.
/// To provide memory pages for a `VmSpace`, one can allocate and map
/// physical memory (`VmFrames`) to the `VmSpace`.
pub struct VmSpace {
memory_set: Mutex<MemorySet>,
}
impl VmSpace {
/// Creates a new VM address space.
pub fn new() -> Self {
Self {
memory_set: Mutex::new(MemorySet::new()),
}
}
/// Activate the page table, load root physical address to cr3
pub unsafe fn activate(&self) {
x86_64_util::set_cr3(self.memory_set.lock().pt.root_pa.0);
}
/// Maps some physical memory pages into the VM space according to the given
/// options, returning the address where the mapping is created.
///
/// the frames in variable frames will delete after executing this function
///
/// For more information, see `VmMapOptions`.
pub fn map(&self, frames: VmFrameVec, options: &VmMapOptions) -> Result<Vaddr> {
let mut flags = PTFlags::PRESENT;
if options.perm.contains(VmPerm::W) {
flags.insert(PTFlags::WRITABLE);
}
// if options.perm.contains(VmPerm::U) {
flags.insert(PTFlags::USER);
// }
if options.addr.is_none() {
return Err(Error::InvalidArgs);
}
self.memory_set.lock().map(MapArea::new(
VirtAddr(options.addr.unwrap()),
frames.len() * PAGE_SIZE,
flags,
frames,
));
Ok(options.addr.unwrap())
}
/// determine whether a vaddr is already mapped
pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
let memory_set = self.memory_set.lock();
memory_set.is_mapped(VirtAddr(vaddr))
}
/// Unmaps the physical memory pages within the VM address range.
///
/// The range is allowed to contain gaps, where no physical memory pages
/// are mapped.
pub fn unmap(&self, range: &Range<Vaddr>) -> Result<()> {
assert!(is_aligned(range.start) && is_aligned(range.end));
let mut start_va = VirtAddr(range.start);
let page_size = (range.end - range.start) / PAGE_SIZE;
let mut inner = self.memory_set.lock();
for i in 0..page_size {
let res = inner.unmap(start_va);
if res.is_err() {
return res;
}
start_va += PAGE_SIZE;
}
Ok(())
}
/// clear all mappings
pub fn clear(&self) {
self.memory_set.lock().clear();
crate::x86_64_util::flush_tlb();
}
/// Update the VM protection permissions within the VM address range.
///
/// The entire specified VM range must have been mapped with physical
/// memory pages.
pub fn protect(&self, range: &Range<Vaddr>, perm: VmPerm) -> Result<()> {
todo!()
}
}
impl Default for VmSpace {
fn default() -> Self {
Self::new()
}
}
impl Clone for VmSpace {
fn clone(&self) -> Self {
let memory_set = self.memory_set.lock().clone();
VmSpace {
memory_set: Mutex::new(memory_set),
}
}
}
impl VmIo for VmSpace {
fn read_bytes(&self, vaddr: usize, buf: &mut [u8]) -> Result<()> {
self.memory_set.lock().read_bytes(vaddr, buf)
}
fn write_bytes(&self, vaddr: usize, buf: &[u8]) -> Result<()> {
self.memory_set.lock().write_bytes(vaddr, buf)
}
}
/// Options for mapping physical memory pages into a VM address space.
/// See `VmSpace::map`.
pub struct VmMapOptions {
/// start virtual address
addr: Option<Vaddr>,
/// permission
perm: VmPerm,
}
impl VmMapOptions {
/// Creates the default options.
pub fn new() -> Self {
Self {
addr: None,
perm: VmPerm::empty(),
}
}
/// Sets the alignment of the address of the mapping.
///
/// The alignment must be a power-of-2 and greater than or equal to the
/// page size.
///
/// The default value of this option is the page size.
pub fn align(&mut self, align: usize) -> &mut Self {
todo!()
}
/// Sets the permissions of the mapping, which affects whether
/// the mapping can be read, written, or executed.
///
/// The default value of this option is read-only.
pub fn perm(&mut self, perm: VmPerm) -> &mut Self {
self.perm = perm;
self
}
/// Sets the address of the new mapping.
///
/// The default value of this option is `None`.
pub fn addr(&mut self, addr: Option<Vaddr>) -> &mut Self {
if addr == None {
return self;
}
self.addr = Some(addr.unwrap());
self
}
/// Sets whether the mapping can overwrite any existing mappings.
///
/// If this option is `true`, then the address option must be `Some(_)`.
///
/// The default value of this option is `false`.
pub fn can_overwrite(&mut self, can_overwrite: bool) -> &mut Self {
todo!()
}
}
impl Default for VmMapOptions {
fn default() -> Self {
Self::new()
}
}
bitflags! {
/// Virtual memory protection permissions.
pub struct VmPerm: u8 {
/// Readable.
const R = 0b00000001;
/// Writable.
const W = 0b00000010;
/// Executable.
const X = 0b00000100;
/// User
const U = 0b00001000;
/// Readable + writable.
const RW = Self::R.bits | Self::W.bits;
/// Readable + execuable.
const RX = Self::R.bits | Self::X.bits;
/// Readable + writable + executable.
const RWX = Self::R.bits | Self::W.bits | Self::X.bits;
/// Readable + writable + user.
const RWU = Self::R.bits | Self::W.bits | Self::U.bits;
/// Readable + execuable + user.
const RXU = Self::R.bits | Self::X.bits | Self::U.bits;
/// Readable + writable + executable + user.
const RWXU = Self::R.bits | Self::W.bits | Self::X.bits | Self::U.bits;
}
}
impl TryFrom<u64> for VmPerm {
type Error = Error;
fn try_from(value: u64) -> Result<Self> {
VmPerm::from_bits(value as u8).ok_or_else(|| Error::InvalidVmpermBits)
}
}

View File

@ -0,0 +1,232 @@
//! util for x86_64, it will rename to x86_64 when depend x86_64 isn't necessary
use core::arch::asm;
use x86_64::registers::{control::Cr4Flags, segmentation::Segment64, xcontrol::XCr0Flags};
#[inline(always)]
pub fn read_rsp() -> usize {
let val: usize;
unsafe {
asm!("mov {}, rsp", out(reg) val);
}
val
}
#[inline(always)]
pub fn in8(port: u16) -> u8 {
// ::x86_64::instructions::port::Port::read()
let val: u8;
unsafe {
asm!("in al, dx", out("al") val, in("dx") port, options(nomem, nostack, preserves_flags));
}
val
}
#[inline(always)]
pub fn in16(port: u16) -> u16 {
let val: u16;
unsafe {
asm!("in ax, dx", out("ax") val, in("dx") port, options(nomem, nostack, preserves_flags));
}
val
}
#[inline(always)]
pub fn in32(port: u16) -> u32 {
let val: u32;
unsafe {
asm!("in eax, dx", out("eax") val, in("dx") port, options(nomem, nostack, preserves_flags));
}
val
}
#[inline(always)]
pub fn out8(port: u16, val: u8) {
unsafe {
asm!("out dx, al", in("dx") port, in("al") val, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn out16(port: u16, val: u16) {
unsafe {
asm!("out dx, ax", in("dx") port, in("ax") val, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn out32(port: u16, val: u32) {
unsafe {
asm!("out dx, eax", in("dx") port, in("eax") val, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn disable_interrupts() {
unsafe {
asm!("cli", options(nomem, nostack));
}
}
#[inline(always)]
pub fn enable_interrupts_and_hlt() {
unsafe {
asm!("sti; hlt", options(nomem, nostack));
}
}
#[inline]
pub fn enable_interrupts() {
unsafe {
asm!("sti", options(nomem, nostack));
}
}
pub const RING0: u16 = 0;
pub const RING3: u16 = 3;
pub const RFLAGS_IF: usize = 1 << 9;
#[inline(always)]
pub fn get_msr(id: u32) -> usize {
let (high, low): (u32, u32);
unsafe {
asm!("rdmsr", in("ecx") id, out("eax") low, out("edx") high, options(nomem, nostack, preserves_flags));
}
((high as usize) << 32) | (low as usize)
}
#[inline(always)]
pub fn set_msr(id: u32, val: usize) {
let low = val as u32;
let high = (val >> 32) as u32;
unsafe {
asm!("wrmsr", in("ecx") id, in("eax") low, in("edx") high, options(nostack, preserves_flags));
}
}
pub const EFER_MSR: u32 = 0xC000_0080;
pub const STAR_MSR: u32 = 0xC000_0081;
pub const LSTAR_MSR: u32 = 0xC000_0082;
pub const SFMASK_MSR: u32 = 0xC000_0084;
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
pub struct DescriptorTablePointer {
/// Size of the DT.
pub limit: u16,
/// Pointer to the memory region containing the DT.
pub base: usize,
}
/// Load a GDT.
#[inline(always)]
pub fn lgdt(gdt: &DescriptorTablePointer) {
unsafe {
asm!("lgdt [{}]", in(reg) gdt, options(readonly, nostack, preserves_flags));
}
}
/// Load an IDT.
#[inline(always)]
pub fn lidt(idt: &DescriptorTablePointer) {
unsafe {
asm!("lidt [{}]", in(reg) idt, options(readonly, nostack, preserves_flags));
}
}
/// Load the task state register using the `ltr` instruction.
#[inline(always)]
pub fn load_tss(sel: u16) {
unsafe {
asm!("ltr {0:x}", in(reg) sel, options(nomem, nostack, preserves_flags));
}
}
#[inline(always)]
pub fn set_cs(sel: u16) {
unsafe {
asm!(
"push {sel}",
"lea {tmp}, [1f + rip]",
"push {tmp}",
"retfq",
"1:",
sel = in(reg) sel as usize,
tmp = lateout(reg) _,
options(preserves_flags),
);
}
}
#[inline(always)]
pub fn set_ss(sel: u16) {
unsafe {
asm!("mov ss, {0:x}", in(reg) sel, options(nostack, preserves_flags));
}
}
#[inline(always)]
pub fn get_cr3() -> usize {
let val: usize;
unsafe {
asm!("mov {}, cr3", out(reg) val, options(nomem, nostack, preserves_flags));
}
// Mask top bits and flags.
val & 0x_000f_ffff_ffff_f000
}
#[inline(always)]
pub fn get_cr3_raw() -> usize {
let val: usize;
unsafe {
asm!("mov {}, cr3", out(reg) val, options(nomem, nostack, preserves_flags));
}
// Mask top bits and flags.
val
}
#[inline(always)]
pub fn get_return_address() -> usize {
let val: usize;
unsafe {
asm!("mov {}, [rsp]", out(reg) val);
}
val
}
#[inline(always)]
pub fn set_cr3(pa: usize) {
unsafe {
asm!("mov cr3, {}", in(reg) pa, options(nostack, preserves_flags));
}
}
#[inline(always)]
pub fn wrfsbase(base: u64) {
unsafe { asm!("wrfsbase {0}", in(reg) base, options(att_syntax)) }
}
#[inline(always)]
pub fn rdfsbase() -> u64 {
let fs_base = x86_64::registers::segmentation::FS::read_base();
fs_base.as_u64()
}
pub fn enable_common_cpu_features() {
let mut cr4 = x86_64::registers::control::Cr4::read();
cr4 |= Cr4Flags::FSGSBASE | Cr4Flags::OSXSAVE | Cr4Flags::OSFXSR | Cr4Flags::OSXMMEXCPT_ENABLE;
unsafe {
x86_64::registers::control::Cr4::write(cr4);
}
let mut xcr0 = x86_64::registers::xcontrol::XCr0::read();
xcr0 |= XCr0Flags::AVX | XCr0Flags::SSE;
unsafe {
x86_64::registers::xcontrol::XCr0::write(xcr0);
}
}
pub fn flush_tlb() {
x86_64::instructions::tlb::flush_all();
}

View File

@ -0,0 +1,14 @@
[package]
name = "pod-derive"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
proc-macro = true
[dependencies]
proc-macro2 = "1.0"
quote = "1.0"
syn = {version = "1.0.90", features = ["extra-traits"]}

View File

@ -0,0 +1,117 @@
//! This crate is used to provide a procedural macro to derive Pod trait defined in framework/pod.
//! When use this crate, framework/pod should also be added as a dependency.
//! This macro should only be used outside
//! When derive Pod trait, we will do a check whether the derive is safe since Pod trait is an unsafe trait.
//! For struct, we will check that the struct has valid repr (e.g,. repr(C), repr(u8)), and each field is Pod type.
//! For union and enum, we only check the valid repr.
use proc_macro2::{Ident, TokenStream};
use quote::quote;
use syn::{parse_macro_input, Attribute, Data, DataStruct, DeriveInput, Fields, Generics};
#[proc_macro_derive(Pod)]
pub fn derive_pod(input_token: proc_macro::TokenStream) -> proc_macro::TokenStream {
let input = parse_macro_input!(input_token as DeriveInput);
expand_derive_pod(input).into()
}
const ALLOWED_REPRS: [&'static str; 13] = [
"C", "u8", "i8", "u16", "i16", "u32", "i32", "u64", "i64", "usize", "isize", "u128", "i128",
];
fn expand_derive_pod(input: DeriveInput) -> TokenStream {
let attrs = input.attrs;
let ident = input.ident;
let generics = input.generics;
match input.data {
Data::Struct(data_struct) => impl_pod_for_struct(data_struct, generics, ident, attrs),
Data::Enum(..) | Data::Union(..) => impl_pod_for_enum_or_union(attrs, generics, ident),
}
}
fn impl_pod_for_struct(
data_struct: DataStruct,
generics: Generics,
ident: Ident,
attrs: Vec<Attribute>,
) -> TokenStream {
if !has_valid_repr(attrs) {
panic!("{} has invalid repr to implement Pod", ident.to_string());
}
let DataStruct { fields, .. } = data_struct;
let fields = match fields {
Fields::Named(fields_named) => fields_named.named,
Fields::Unnamed(fields_unnamed) => fields_unnamed.unnamed,
Fields::Unit => panic!("derive pod does not work for struct with unit field"),
};
// deal with generics
let (impl_generics, type_generics, where_clause) = generics.split_for_impl();
let pod_where_predicates = fields
.into_iter()
.map(|field| {
let field_ty = field.ty;
quote! {
#field_ty: ::pod::Pod
}
})
.collect::<Vec<_>>();
// if where_clause is none, we should add a `where` word manually.
if where_clause.is_none() {
quote! {
#[automatically_derived]
unsafe impl #impl_generics ::pod::Pod #type_generics for #ident where #(#pod_where_predicates),* {}
}
} else {
quote! {
#[automatically_derived]
unsafe impl #impl_generics ::pod::Pod #type_generics for #ident #where_clause, #(#pod_where_predicates),* {}
}
}
}
fn impl_pod_for_enum_or_union(
attrs: Vec<Attribute>,
generics: Generics,
ident: Ident,
) -> TokenStream {
if !has_valid_repr(attrs) {
panic!(
"{} does not have invalid repr to implement Pod.",
ident.to_string()
);
}
// deal with generics
let (impl_generics, type_generics, where_clause) = generics.split_for_impl();
quote! {
#[automatically_derived]
unsafe impl #impl_generics ::pod::Pod #type_generics for #ident #where_clause {}
}
}
fn has_valid_repr(attrs: Vec<Attribute>) -> bool {
for attr in attrs {
if let Some(ident) = attr.path.get_ident() {
if "repr" == ident.to_string().as_str() {
let repr = attr.tokens.to_string();
let repr = repr.replace("(", "").replace(")", "");
let reprs = repr
.split(",")
.map(|one_repr| one_repr.trim())
.collect::<Vec<_>>();
if let Some(_) = ALLOWED_REPRS.iter().position(|allowed_repr| {
reprs
.iter()
.position(|one_repr| one_repr == allowed_repr)
.is_some()
}) {
return true;
}
}
}
}
false
}

View File

@ -0,0 +1,8 @@
[package]
name = "pod"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@ -0,0 +1,65 @@
#![no_std]
use core::{fmt::Debug, mem::MaybeUninit};
/// A marker trait for plain old data (POD).
///
/// A POD type `T:Pod` supports converting to and from arbitrary
/// `mem::size_of::<T>()` bytes _safely_.
/// For example, simple primitive types like `u8` and `i16`
/// are POD types. But perhaps surprisingly, `bool` is not POD
/// because Rust compiler makes implicit assumption that
/// a byte of `bool` has a value of either `0` or `1`.
/// Interpreting a byte of value `3` has a `bool` value has
/// undefined behavior.
///
/// # Safety
///
/// Marking a non-POD type as POD may cause undefined behaviors.
pub unsafe trait Pod: Copy + Sized + Debug {
/// Creates a new instance of Pod type that is filled with zeroes.
fn new_zeroed() -> Self {
// SAFETY. An all-zero value of `T: Pod` is always valid.
unsafe { core::mem::zeroed() }
}
/// Creates a new instance of Pod type with uninitialized content.
fn new_uninit() -> Self {
// SAFETY. A value of `T: Pod` can have arbitrary bits.
#[allow(clippy::uninit_assumed_init)]
unsafe {
MaybeUninit::uninit().assume_init()
}
}
/// Creates a new instance from the given bytes.
fn from_bytes(bytes: &[u8]) -> Self {
let mut new_self = Self::new_uninit();
new_self.as_bytes_mut().copy_from_slice(bytes);
new_self
}
/// As a slice of bytes.
fn as_bytes(&self) -> &[u8] {
let ptr = self as *const Self as *const u8;
let len = core::mem::size_of::<Self>();
unsafe { core::slice::from_raw_parts(ptr, len) }
}
/// As a mutable slice of bytes.
fn as_bytes_mut(&mut self) -> &mut [u8] {
let ptr = self as *mut Self as *mut u8;
let len = core::mem::size_of::<Self>();
unsafe { core::slice::from_raw_parts_mut(ptr, len) }
}
}
macro_rules! impl_pod_for {
($($pod_ty:ty),*) => {
$(unsafe impl Pod for $pod_ty {})*
};
}
// impl Pod for primitive types
impl_pod_for!(u8, u16, u32, u64, i8, i16, i32, i64, isize, usize);
// impl Pod for array
unsafe impl<T: Pod, const N: usize> Pod for [T; N] {}