Commit 0c8ba636 authored by Jeremy Soller's avatar Jeremy Soller

Cleanup Redox repo, update Rust, remove old target

parent 04ed7002
[package]
name = "kernel"
version = "0.1.0"
[lib]
name = "kernel"
path = "src/lib.rs"
crate-type = ["staticlib"]
[dependencies]
bitflags = "*"
spin = "*"
redox_syscall = { path = "../syscall/" }
[dependencies.goblin]
git = "https://github.com/m4b/goblin.git"
default-features = false
features = ["elf32", "elf64"]
[dev-dependencies]
arch_test = { path = "arch/test" }
[target.'cfg(target_arch = "arm")'.dependencies]
arch_arm = { path = "arch/arm" }
[target.'cfg(target_arch = "x86_64")'.dependencies]
arch_x86_64 = { path = "arch/x86_64" }
[features]
default = []
live = []
[profile.dev]
panic = "unwind"
[profile.release]
panic = "abort"
[package]
name = "arch_arm"
version = "0.1.0"
[dependencies]
bitflags = "*"
hole_list_allocator = { path = "../../../crates/hole_list_allocator"}
spin = "*"
#[derive(Debug)]
pub struct Context;
impl Context {
pub fn new() -> Self {
Context
}
}
/// Memcpy
///
/// Copy N bytes of memory from one location to another.
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
dest
}
/// Memmove
///
/// Copy N bytes of memory from src to dest. The memory areas may overlap.
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
if src < dest as *const u8 {
let mut i = n;
while i != 0 {
i -= 1;
*dest.offset(i as isize) = *src.offset(i as isize);
}
} else {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
dest
}
/// Memset
///
/// Fill a block of memory with a specified value.
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
s
}
/// Memcmp
///
/// Compare two blocks of memory.
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a != b {
return a as i32 - b as i32
}
i += 1;
}
0
}
//! Interrupt instructions
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
halt();
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
//asm!("wfi" : : : : "volatile");
asm!("nop" : : : : "volatile");
}
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
}
//! Architecture support for ARM
#![feature(asm)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![no_std]
extern crate hole_list_allocator as allocator;
#[macro_use]
extern crate bitflags;
extern crate spin;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Context switching
pub mod context;
/// Memset, memcpy, etc.
pub mod externs;
/// Interrupt handling
pub mod interrupt;
/// Panic support
pub mod panic;
/// Initialization function
pub mod start;
ENTRY(kstart)
OUTPUT_ARCH(arm)
OUTPUT_FORMAT(elf32-littlearm)
KERNEL_OFFSET = 0;
SECTIONS {
. = KERNEL_OFFSET;
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.debug*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}
//! Intrinsics for panic handling
use interrupt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { interrupt::halt(); }
}
}
/// Required for linker
#[no_mangle]
pub extern "C" fn __aeabi_unwind_cpp_pr0() {
loop {}
}
const SERIAL_BASE: *mut u8 = 0x16000000 as *mut u8;
const SERIAL_FLAG_REGISTER: *const u8 = 0x16000018 as *const u8;
const SERIAL_BUFFER_FULL: u8 = (1 << 5);
unsafe fn putc (c: u8)
{
/* Wait until the serial buffer is empty */
while *SERIAL_FLAG_REGISTER & SERIAL_BUFFER_FULL == SERIAL_BUFFER_FULL {}
/* Put our character, c, into the serial buffer */
*SERIAL_BASE = c;
}
unsafe fn puts(string: &str)
{
for b in string.bytes() {
putc(b);
}
}
#[no_mangle]
#[naked]
pub unsafe extern fn kstart() -> ! {
asm!("mov sp, 0x18000" : : : : "volatile");
puts("TEST\r\n");
loop {}
}
[package]
name = "arch_test"
version = "0.1.0"
//! Interrupt instructions
static mut INTERRUPTS_ENABLED: bool = false;
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
println!("CLEAR INTERRUPTS");
INTERRUPTS_ENABLED = false;
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
println!("SET INTERRUPTS");
INTERRUPTS_ENABLED = true;
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
assert!(INTERRUPTS_ENABLED);
::std::thread::yield_now();
}
/// Pause instruction
#[inline(always)]
pub unsafe fn pause() {
}
/// Set interrupts and nop
#[inline(always)]
pub unsafe fn enable_and_nop() {
enable();
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
enable();
halt();
}
//! Architecture support for testing
pub use std::io;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use $crate::io::Write;
let _ = write!($crate::io::stdout(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
pub unsafe extern fn $name () {
unsafe fn inner() {
$func
}
// Call inner rust function
inner();
}
};
}
/// Interrupt instructions
pub mod interrupt;
/// Initialization and main function
pub mod main;
/// Time functions
pub mod time;
/// This function is where the kernel sets up IRQ handlers
/// It is increcibly unsafe, and should be minimal in nature
extern {
fn kmain() -> !;
}
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
kmain();
}
pub fn monotonic() -> (u64, u64) {
(0, 0)
}
pub fn realtime() -> (u64, u64) {
(0, 0)
}
[package]
name = "arch_x86_64"
version = "0.1.0"
[dependencies]
bitflags = "*"
hole_list_allocator = { path = "../../../crates/hole_list_allocator/" }
io = { path = "../../../crates/io/" }
raw-cpuid = { git = "https://github.com/gz/rust-cpuid" }
spin = "*"
redox_syscall = { path = "../../../syscall/" }
[dependencies.x86]
version = "0.7"
default-features = false
#[repr(packed)]
pub struct DrhdFault {
pub sts: u32,
pub ctrl: u32,
pub data: u32,
pub addr: [u32; 2],
_rsv: [u64; 2],
pub log: u64,
}
#[repr(packed)]
pub struct DrhdProtectedMemory {
pub en: u32,
pub low_base: u32,
pub low_limit: u32,
pub high_base: u64,
pub high_limit: u64,
}
#[repr(packed)]
pub struct DrhdInvalidation {
pub queue_head: u64,
pub queue_tail: u64,
pub queue_addr: u64,
_rsv: u32,
pub cmpl_sts: u32,
pub cmpl_ctrl: u32,
pub cmpl_data: u32,
pub cmpl_addr: [u32; 2],
}
#[repr(packed)]
pub struct DrhdPageRequest {
pub queue_head: u64,
pub queue_tail: u64,
pub queue_addr: u64,
_rsv: u32,
pub sts: u32,
pub ctrl: u32,
pub data: u32,
pub addr: [u32; 2],
}
#[repr(packed)]
pub struct DrhdMtrrVariable {
pub base: u64,
pub mask: u64,
}
#[repr(packed)]
pub struct DrhdMtrr {
pub cap: u64,
pub def_type: u64,
pub fixed: [u64; 11],
pub variable: [DrhdMtrrVariable; 10],
}
#[repr(packed)]
pub struct Drhd {
pub version: u32,
_rsv: u32,
pub cap: u64,
pub ext_cap: u64,
pub gl_cmd: u32,
pub gl_sts: u32,
pub root_table: u64,
pub ctx_cmd: u64,
_rsv1: u32,
pub fault: DrhdFault,
_rsv2: u32,
pub pm: DrhdProtectedMemory,
pub invl: DrhdInvalidation,
_rsv3: u64,
pub intr_table: u64,
pub page_req: DrhdPageRequest,
pub mtrr: DrhdMtrr,
}
use core::mem;
use super::sdt::Sdt;
use self::drhd::Drhd;
use memory::Frame;
use paging::{entry, ActivePageTable, PhysicalAddress};
pub mod drhd;
/// The DMA Remapping Table
#[derive(Debug)]
pub struct Dmar {
sdt: &'static Sdt,
pub addr_width: u8,
pub flags: u8,
_rsv: [u8; 10],
}
impl Dmar {
pub fn new(sdt: &'static Sdt) -> Option<Dmar> {
if &sdt.signature == b"DMAR" && sdt.data_len() >= 12 { //Not valid if no local address and flags
let addr_width = unsafe { *(sdt.data_address() as *const u8) };
let flags = unsafe { *(sdt.data_address() as *const u8).offset(1) };
let rsv: [u8; 10] = unsafe { *((sdt.data_address() as *const u8).offset(2) as *const [u8; 10]) };
Some(Dmar {
sdt: sdt,
addr_width: addr_width,
flags: flags,
_rsv: rsv,
})
} else {
None
}
}
pub fn iter(&self) -> DmarIter {
DmarIter {
sdt: self.sdt,
i: 12 // Skip address width and flags
}
}
}
///
/// DMAR DMA Remapping Hardware Unit Definition
// TODO: Implement iterator on DmarDrhd scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarDrhd {
kind: u16,
length: u16,
flags: u8,
_rsv: u8,
segment: u16,
base: u64,
}
impl DmarDrhd {
pub fn get(&self, active_table: &mut ActivePageTable) -> &'static mut Drhd {
let result = active_table.identity_map(Frame::containing_address(PhysicalAddress::new(self.base as usize)), entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
result.flush(active_table);
unsafe { &mut *(self.base as *mut Drhd) }
}
}
/// DMAR Reserved Memory Region Reporting
// TODO: Implement iterator on DmarRmrr scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarRmrr {
kind: u16,
length: u16,
_rsv: u16,
segment: u16,
base: u64,
limit: u64,
}
/// DMAR Root Port ATS Capability Reporting
// TODO: Implement iterator on DmarAtsr scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarAtsr {
kind: u16,
length: u16,
flags: u8,
_rsv: u8,
segment: u16,
}
/// DMAR Remapping Hardware Static Affinity
#[derive(Debug)]
#[repr(packed)]
pub struct DmarRhsa {
kind: u16,
length: u16,
_rsv: u32,
base: u64,
domain: u32,
}
/// DMAR ACPI Name-space Device Declaration
// TODO: Implement iterator on DmarAndd object name
#[derive(Debug)]
#[repr(packed)]
pub struct DmarAndd {
kind: u16,
length: u16,
_rsv: [u8; 3],
acpi_dev: u8,
}
/// DMAR Entries
#[derive(Debug)]
pub enum DmarEntry {
Drhd(&'static DmarDrhd),
InvalidDrhd(usize),
Rmrr(&'static DmarRmrr),
InvalidRmrr(usize),
Atsr(&'static DmarAtsr),
InvalidAtsr(usize),
Rhsa(&'static DmarRhsa),
InvalidRhsa(usize),
Andd(&'static DmarAndd),
InvalidAndd(usize),
Unknown(u16)
}
pub struct DmarIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for DmarIter {
type Item = DmarEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.i + 4 <= self.sdt.data_len() {
let entry_type = unsafe { *((self.sdt.data_address() as *const u8).offset(self.i as isize) as *const u16) };
let entry_len = unsafe { *((self.sdt.data_address() as *const u8).offset(self.i as isize + 2) as *const u16) } as usize;
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0 => if entry_len >= mem::size_of::<DmarDrhd>() {
DmarEntry::Drhd(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarDrhd) })
} else {
DmarEntry::InvalidDrhd(entry_len)
},
1 => if entry_len >= mem::size_of::<DmarRmrr>() {
DmarEntry::Rmrr(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarRmrr) })
} else {
DmarEntry::InvalidRmrr(entry_len)
},
2 => if entry_len >= mem::size_of::<DmarAtsr>() {
DmarEntry::Atsr(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarAtsr) })
} else {
DmarEntry::InvalidAtsr(entry_len)
},
3 => if entry_len == mem::size_of::<DmarRhsa>() {
DmarEntry::Rhsa(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarRhsa) })
} else {
DmarEntry::InvalidRhsa(entry_len)
},
4 => if entry_len >= mem::size_of::<DmarAndd>() {
DmarEntry::Andd(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarAndd) })
} else {
DmarEntry::InvalidAndd(entry_len)
},