...
 
Commits (51)
......@@ -9,10 +9,10 @@ path = "src/lib.rs"
crate-type = ["staticlib"]
[dependencies]
bitfield = "0.13.1"
bitflags = "1.0.3"
clippy = { version = "0.0.209", optional = true }
linked_list_allocator = "0.6.2"
raw-cpuid = "4.0.0"
redox_syscall = { path = "syscall" }
slab_allocator = { path = "slab_allocator", optional = true }
spin = "0.4.8"
......@@ -26,9 +26,17 @@ features = ["elf32", "elf64"]
version = "0.1.13"
default-features = false
[dependencies.x86]
version = "0.9.0"
default-features = false
[target.'cfg(target_arch = "aarch64")'.dependencies]
byteorder = { version = "1", default-features = false }
fdt = { git = "https://gitlab.redox-os.org/thomhuds/fdt.git", default-features = false }
[target.'cfg(target_arch = "x86_64")'.dependencies]
x86 = { version = "0.9.0", default-features = false }
raw-cpuid = "4.0.0"
[build-dependencies]
cc = "1.0.3"
rustc-cfg = "0.3.0"
[features]
default = []
......
......@@ -3,7 +3,10 @@ use std::fs;
use std::io::{Error, Write};
use std::path::Path;
use std::collections::HashMap;
use rustc_cfg::Cfg;
extern crate cc;
extern crate rustc_cfg;
// View loc folder with subfolders, get listings
// Returns touple (folder_map, file_list)
......@@ -117,4 +120,12 @@ mod gen {
}
}
").unwrap();
// Build pre kstart init asm code for aarch64
let cfg = Cfg::new(env::var_os("TARGET").unwrap()).unwrap();
if cfg.target_arch == "aarch64" {
cc::Build::new()
.file("src/arch/aarch64/init/pre_kstart/early_init.S")
.compile("early_init");
}
}
ENTRY(early_init)
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
KERNEL_OFFSET = 0xffffff0000000000;
SECTIONS {
. = KERNEL_OFFSET;
. += SIZEOF_HEADERS;
. = ALIGN(4096);
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.early_init.text*)
. = ALIGN(4096);
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}
......@@ -3,7 +3,7 @@ use core::ptr::NonNull;
use linked_list_allocator::Heap;
use spin::Mutex;
use paging::ActivePageTable;
use paging::{ActivePageTable, PageTableType};
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
......@@ -32,7 +32,7 @@ unsafe impl GlobalAlloc for Allocator {
panic!("__rust_allocate: heap not initialized");
};
super::map_heap(&mut ActivePageTable::new(), ::KERNEL_HEAP_OFFSET + size, ::KERNEL_HEAP_SIZE);
super::map_heap(&mut ActivePageTable::new(PageTableType::Kernel), ::KERNEL_HEAP_OFFSET + size, ::KERNEL_HEAP_SIZE);
if let Some(ref mut heap) = *HEAP.lock() {
heap.extend(::KERNEL_HEAP_SIZE);
......
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
/// Size of a page and frame
pub const PAGE_SIZE: usize = 4096;
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE;
/// Kernel stack size - must be kept in sync with early_init.S. Used by memory::init
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB
/// Offset of device map region
pub const KERNEL_DEVMAP_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
/// Offset of environment region
pub const KERNEL_ENV_OFFSET: usize = KERNEL_DEVMAP_OFFSET - PML4_SIZE;
/// Offset of temporary mapping for misc kernel bring-up actions
pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_ENV_OFFSET - PML4_SIZE;
/// Offset of FDT DTB image
pub const KERNEL_DTB_OFFSET: usize = KERNEL_TMP_MISC_OFFSET - PML4_SIZE;
pub const KERNEL_DTB_MAX_SIZE: usize = 2 * 1024 * 1024; // 2 MB
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_DTB_OFFSET - PML4_SIZE;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE/2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user sigstack
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of user sigstack
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK)/PML4_SIZE;
pub const USER_TLS_SIZE: usize = 64 * 1024;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary sigstack (used when cloning)
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK)/PML4_SIZE;
use core::fmt;
use spin::MutexGuard;
use devices::uart_pl011::SerialPort;
use super::device::serial::COM1;
pub struct Writer<'a> {
serial: MutexGuard<'a, Option<SerialPort>>,
}
impl<'a> Writer<'a> {
pub fn new() -> Writer<'a> {
Writer {
serial: unsafe { COM1.lock() },
}
}
}
impl<'a> fmt::Write for Writer<'a> {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
if let Some(ref mut serial_port) = *self.serial {
serial_port.write_str(s);
Ok(())
} else {
Err(fmt::Error)
}
}
}
use core::fmt::{Result, Write};
use device::cpu::registers::{control_regs};
pub mod registers;
bitfield! {
pub struct MachineId(u32);
get_implementer, _: 31, 24;
get_variant, _: 23, 20;
get_architecture, _: 19, 16;
get_part_number, _: 15, 4;
get_revision, _: 3, 0;
}
enum ImplementerID {
Unknown,
Arm,
Broadcom,
Cavium,
Digital,
Infineon,
Motorola,
Nvidia,
AMCC,
Qualcomm,
Marvell,
Intel,
}
const IMPLEMENTERS: [&'static str; 12] = [
"Unknown",
"Arm",
"Broadcom",
"Cavium",
"Digital",
"Infineon",
"Motorola",
"Nvidia",
"AMCC",
"Qualcomm",
"Marvell",
"Intel",
];
enum VariantID {
Unknown,
}
const VARIANTS: [&'static str; 1] = [
"Unknown",
];
enum ArchitectureID {
Unknown,
V4,
V4T,
V5,
V5T,
V5TE,
V5TEJ,
V6,
}
const ARCHITECTURES: [&'static str; 8] = [
"Unknown",
"v4",
"v4T",
"v5",
"v5T",
"v5TE",
"v5TEJ",
"v6",
];
enum PartNumberID {
Unknown,
Thunder,
Foundation,
CortexA35,
CortexA53,
CortexA55,
CortexA57,
CortexA72,
CortexA73,
CortexA75,
}
const PART_NUMBERS: [&'static str; 10] = [
"Unknown",
"Thunder",
"Foundation",
"Cortex-A35",
"Cortex-A53",
"Cortex-A55",
"Cortex-A57",
"Cortex-A72",
"Cortex-A73",
"Cortex-A75",
];
enum RevisionID {
Unknown,
Thunder1_0,
Thunder1_1,
}
const REVISIONS: [&'static str; 3] = [
"Unknown",
"Thunder-1.0",
"Thunder-1.1",
];
struct CpuInfo {
implementer: &'static str,
variant: &'static str,
architecture: &'static str,
part_number: &'static str,
revision: &'static str,
}
impl CpuInfo {
fn new() -> CpuInfo {
let midr = unsafe { control_regs::midr() };
println!("MIDR: 0x{:x}", midr);
let midr = MachineId(midr);
let implementer = match midr.get_implementer() {
0x41 => IMPLEMENTERS[ImplementerID::Arm as usize],
0x42 => IMPLEMENTERS[ImplementerID::Broadcom as usize],
0x43 => IMPLEMENTERS[ImplementerID::Cavium as usize],
0x44 => IMPLEMENTERS[ImplementerID::Digital as usize],
0x49 => IMPLEMENTERS[ImplementerID::Infineon as usize],
0x4d => IMPLEMENTERS[ImplementerID::Motorola as usize],
0x4e => IMPLEMENTERS[ImplementerID::Nvidia as usize],
0x50 => IMPLEMENTERS[ImplementerID::AMCC as usize],
0x51 => IMPLEMENTERS[ImplementerID::Qualcomm as usize],
0x56 => IMPLEMENTERS[ImplementerID::Marvell as usize],
0x69 => IMPLEMENTERS[ImplementerID::Intel as usize],
_ => IMPLEMENTERS[ImplementerID::Unknown as usize],
};
let variant = match midr.get_variant() {
_ => VARIANTS[VariantID::Unknown as usize],
};
let architecture = match midr.get_architecture() {
0b0001 => ARCHITECTURES[ArchitectureID::V4 as usize],
0b0010 => ARCHITECTURES[ArchitectureID::V4T as usize],
0b0011 => ARCHITECTURES[ArchitectureID::V5 as usize],
0b0100 => ARCHITECTURES[ArchitectureID::V5T as usize],
0b0101 => ARCHITECTURES[ArchitectureID::V5TE as usize],
0b0110 => ARCHITECTURES[ArchitectureID::V5TEJ as usize],
0b0111 => ARCHITECTURES[ArchitectureID::V6 as usize],
_ => ARCHITECTURES[ArchitectureID::Unknown as usize],
};
let part_number = match midr.get_part_number() {
0x0a1 => PART_NUMBERS[PartNumberID::Thunder as usize],
0xd00 => PART_NUMBERS[PartNumberID::Foundation as usize],
0xd04 => PART_NUMBERS[PartNumberID::CortexA35 as usize],
0xd03 => PART_NUMBERS[PartNumberID::CortexA53 as usize],
0xd05 => PART_NUMBERS[PartNumberID::CortexA55 as usize],
0xd07 => PART_NUMBERS[PartNumberID::CortexA57 as usize],
0xd08 => PART_NUMBERS[PartNumberID::CortexA72 as usize],
0xd09 => PART_NUMBERS[PartNumberID::CortexA73 as usize],
0xd0a => PART_NUMBERS[PartNumberID::CortexA75 as usize],
_ => PART_NUMBERS[PartNumberID::Unknown as usize],
};
let revision = match part_number {
"Thunder" => {
let val = match midr.get_revision() {
0x00 => REVISIONS[RevisionID::Thunder1_0 as usize],
0x01 => REVISIONS[RevisionID::Thunder1_1 as usize],
_ => REVISIONS[RevisionID::Unknown as usize],
};
val
},
_ => REVISIONS[RevisionID::Unknown as usize],
};
CpuInfo {
implementer,
variant,
architecture,
part_number,
revision,
}
}
}
pub fn cpu_info<W: Write>(w: &mut W) -> Result {
let cpuinfo = CpuInfo::new();
write!(w, "Implementer: {}\n", cpuinfo.implementer)?;
write!(w, "Variant: {}\n", cpuinfo.variant)?;
write!(w, "Architecture version: {}\n", cpuinfo.architecture)?;
write!(w, "Part Number: {}\n", cpuinfo.part_number)?;
write!(w, "Revision: {}\n", cpuinfo.revision)?;
write!(w, "\n")?;
Ok(())
}
//! Functions to read and write control registers.
bitflags! {
pub struct MairEl1: u64 {
const DEVICE_MEMORY = 0x00;
const NORMAL_UNCACHED_MEMORY = 0x44 << 8;
const NORMAL_WRITEBACK_MEMORY = 0xff << 16;
}
}
pub unsafe fn ttbr0_el1() -> u64 {
let ret: u64;
asm!("mrs $0, ttbr0_el1" : "=r" (ret));
ret
}
pub unsafe fn ttbr0_el1_write(val: u64) {
asm!("msr ttbr0_el1, $0" :: "r" (val) : "memory");
}
pub unsafe fn ttbr1_el1() -> u64 {
let ret: u64;
asm!("mrs $0, ttbr1_el1" : "=r" (ret));
ret
}
pub unsafe fn ttbr1_el1_write(val: u64) {
asm!("msr ttbr1_el1, $0" :: "r" (val) : "memory");
}
pub unsafe fn mair_el1() -> MairEl1 {
let ret: u64;
asm!("mrs $0, mair_el1" : "=r" (ret));
MairEl1::from_bits_truncate(ret)
}
pub unsafe fn mair_el1_write(val: MairEl1) {
asm!("msr mair_el1, $0" :: "r" (val.bits()) : "memory");
}
pub unsafe fn tpidr_el0_write(val: u64) {
asm!("msr tpidr_el0, $0" :: "r" (val) : "memory");
}
pub unsafe fn tpidr_el1_write(val: u64) {
asm!("msr tpidr_el1, $0" :: "r" (val) : "memory");
}
pub unsafe fn esr_el1() -> u32 {
let ret: u32;
asm!("mrs $0, esr_el1" : "=r" (ret));
ret
}
pub unsafe fn cntfreq_el0() -> u32 {
let ret: u32;
asm!("mrs $0, cntfrq_el0" : "=r" (ret));
ret
}
pub unsafe fn tmr_ctrl() -> u32 {
let ret: u32;
asm!("mrs $0, cntp_ctl_el0" : "=r" (ret));
ret
}
pub unsafe fn tmr_ctrl_write(val: u32) {
asm!("msr cntp_ctl_el0, $0" :: "r" (val) : "memory");
}
pub unsafe fn tmr_tval() -> u32 {
let ret: u32;
asm!("mrs $0, cntp_tval_el0" : "=r" (ret));
ret
}
pub unsafe fn tmr_tval_write(val: u32) {
asm!("msr cntp_tval_el0, $0" :: "r" (val) : "memory");
}
pub unsafe fn midr() -> u32 {
let ret: u32;
asm!("mrs $0, midr_el1" : "=r" (ret));
ret
}
pub mod control_regs;
pub mod tlb;
//! Functions to flush the translation lookaside buffer (TLB).
pub unsafe fn flush(_addr: usize) {
asm!("tlbi vmalle1is");
}
pub unsafe fn flush_all() {
asm!("tlbi vmalle1is");
}
use arch::device::gic;
use device::cpu::registers::{control_regs};
bitflags! {
struct TimerCtrlFlags: u32 {
const ENABLE = 1 << 0;
const IMASK = 1 << 1;
const ISTATUS = 1 << 2;
}
}
pub static mut GENTIMER: GenericTimer = GenericTimer {
clk_freq: 0,
reload_count: 0,
};
pub unsafe fn init() {
GENTIMER.init();
}
/*
pub unsafe fn clear_irq() {
GENTIMER.clear_irq();
}
pub unsafe fn reload() {
GENTIMER.reload_count();
}
*/
pub struct GenericTimer {
pub clk_freq: u32,
pub reload_count: u32,
}
impl GenericTimer {
pub fn init(&mut self) {
let clk_freq = unsafe { control_regs::cntfreq_el0() };
self.clk_freq = clk_freq;;
self.reload_count = clk_freq / 100;
unsafe { control_regs::tmr_tval_write(self.reload_count) };
let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() });
ctrl.insert(TimerCtrlFlags::ENABLE);
ctrl.remove(TimerCtrlFlags::IMASK);
unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) };
gic::irq_enable(30);
}
fn disable() {
let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() });
ctrl.remove(TimerCtrlFlags::ENABLE);
unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) };
}
pub fn set_irq(&mut self) {
let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() });
ctrl.remove(TimerCtrlFlags::IMASK);
unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) };
}
pub fn clear_irq(&mut self) {
let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() });
if ctrl.contains(TimerCtrlFlags::ISTATUS) {
ctrl.insert(TimerCtrlFlags::IMASK);
unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) };
}
}
pub fn reload_count(&mut self) {
let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() });
ctrl.insert(TimerCtrlFlags::ENABLE);
ctrl.remove(TimerCtrlFlags::IMASK);
unsafe { control_regs::tmr_tval_write(self.reload_count) };
unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) };
}
}
use core::intrinsics::{volatile_load, volatile_store};
use memory::Frame;
use paging::{ActivePageTable, PhysicalAddress, Page, PageTableType, VirtualAddress};
use paging::entry::EntryFlags;
static GICD_CTLR: u32 = 0x000;
static GICD_TYPER: u32 = 0x004;
static GICD_ISENABLER: u32 = 0x100;
static GICD_ICENABLER: u32 = 0x180;
static GICD_IPRIORITY: u32 = 0x400;
static GICD_ITARGETSR: u32 = 0x800;
static GICD_ICFGR: u32 = 0xc00;
static GICC_EOIR: u32 = 0x0010;
static GICC_IAR: u32 = 0x000c;
static GICC_CTLR: u32 = 0x0000;
static GICC_PMR: u32 = 0x0004;
static mut GIC_DIST_IF: GicDistIf = GicDistIf {
address: 0,
ncpus: 0,
nirqs: 0,
};
static mut GIC_CPU_IF: GicCpuIf = GicCpuIf {
address: 0,
};
pub unsafe fn init() {
GIC_DIST_IF.init();
GIC_CPU_IF.init();
}
pub fn irq_enable(irq_num: u32) {
unsafe { GIC_DIST_IF.irq_enable(irq_num) };
}
pub fn irq_disable(irq_num: u32) {
unsafe { GIC_DIST_IF.irq_disable(irq_num) };
}
pub unsafe fn irq_ack() -> u32 {
GIC_CPU_IF.irq_ack()
}
pub unsafe fn irq_eoi(irq_num: u32) {
GIC_CPU_IF.irq_eoi(irq_num);
}
pub struct GicDistIf {
pub address: usize,
pub ncpus: u32,
pub nirqs: u32,
}
impl GicDistIf {
unsafe fn init(&mut self) {
// Map in the Distributor interface
let mut active_table = ActivePageTable::new(PageTableType::Kernel);
let start_frame = Frame::containing_address(PhysicalAddress::new(0x08000000));
let end_frame = Frame::containing_address(PhysicalAddress::new(0x08000000 + 0x10000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
result.flush(&mut active_table);
}
self.address = ::KERNEL_DEVMAP_OFFSET + 0x08000000;
// Map in CPU0's interface
let start_frame = Frame::containing_address(PhysicalAddress::new(0x08010000));
let end_frame = Frame::containing_address(PhysicalAddress::new(0x08010000 + 0x10000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
result.flush(&mut active_table);
}
GIC_CPU_IF.address = ::KERNEL_DEVMAP_OFFSET + 0x08010000;
// Disable IRQ Distribution
self.write(GICD_CTLR, 0);
let typer = self.read(GICD_TYPER);
self.ncpus = ((typer & (0x7 << 5)) >> 5) + 1;
self.nirqs = ((typer & 0x1f) + 1) * 32;
println!("gic: Distributor supports {:?} CPUs and {:?} IRQs", self.ncpus, self.nirqs);
// Set all SPIs to level triggered
for irq in (32..self.nirqs).step_by(16) {
self.write(GICD_ICFGR + ((irq / 16) * 4), 0);
}
// Disable all SPIs
for irq in (32..self.nirqs).step_by(32) {
self.write(GICD_ICENABLER + ((irq / 32) * 4), 0xffff_ffff);
}
// Affine all SPIs to CPU0 and set priorities for all IRQs
for irq in 0..self.nirqs {
if irq > 31 {
let ext_offset = GICD_ITARGETSR + (4 * (irq / 4));
let int_offset = irq % 4;
let mut val = self.read(ext_offset);
val |= 0b0000_0001 << (8 * int_offset);
self.write(ext_offset, val);
}
let ext_offset = GICD_IPRIORITY + (4 * (irq / 4));
let int_offset = irq % 4;
let mut val = self.read(ext_offset);
val |= 0b0000_0000 << (8 * int_offset);
self.write(ext_offset, val);
}
// Enable CPU0's GIC interface
GIC_CPU_IF.write(GICC_CTLR, 1);
// Set CPU0's Interrupt Priority Mask
GIC_CPU_IF.write(GICC_PMR, 0xff);
// Enable IRQ distribution
self.write(GICD_CTLR, 0x1);
}
unsafe fn irq_enable(&mut self, irq: u32) {
let offset = GICD_ISENABLER + (4 * (irq / 32));
let shift = 1 << (irq % 32);
let mut val = self.read(offset);
val |= shift;
self.write(offset, val);
}
unsafe fn irq_disable(&mut self, irq: u32) {
let offset = GICD_ICENABLER + (4 * (irq / 32));
let shift = 1 << (irq % 32);
let mut val = self.read(offset);
val |= shift;
self.write(offset, val);
}
unsafe fn read(&self, reg: u32) -> u32 {
let val = volatile_load((self.address + reg as usize) as *const u32);
val
}
unsafe fn write(&mut self, reg: u32, value: u32) {
volatile_store((self.address + reg as usize) as *mut u32, value);
}
}
pub struct GicCpuIf {
pub address: usize,
}
impl GicCpuIf {
unsafe fn init(&mut self) {
}
unsafe fn irq_ack(&mut self) -> u32 {
let irq = self.read(GICC_IAR) & 0x1ff;
if irq == 1023 {
panic!("irq_ack: got ID 1023!!!");
}
irq
}
unsafe fn irq_eoi(&mut self, irq: u32) {
self.write(GICC_EOIR, irq);
}
unsafe fn read(&self, reg: u32) -> u32 {
let val = volatile_load((self.address + reg as usize) as *const u32);
val
}
unsafe fn write(&mut self, reg: u32, value: u32) {
volatile_store((self.address + reg as usize) as *mut u32, value);
}
}
use paging::ActivePageTable;
pub mod cpu;
pub mod gic;
pub mod generic_timer;
pub mod serial;
pub mod rtc;
pub unsafe fn init(_active_table: &mut ActivePageTable) {
gic::init();
generic_timer::init();
}
pub unsafe fn init_noncore() {
serial::init();
rtc::init();
}
pub unsafe fn init_ap() {
}
use time;
use core::intrinsics::{volatile_load, volatile_store};
use memory::Frame;
use paging::{ActivePageTable, PhysicalAddress, Page, PageTableType, VirtualAddress};
use paging::entry::EntryFlags;
static RTC_DR: u32 = 0x000;
static RTC_MR: u32 = 0x004;
static RTC_LR: u32 = 0x008;
static RTC_CR: u32 = 0x00c;
static RTC_IMSC: u32 = 0x010;
static RTC_RIS: u32 = 0x014;
static RTC_MIS: u32 = 0x018;
static RTC_ICR: u32 = 0x01c;
static mut PL031_RTC: Pl031rtc = Pl031rtc {
address: 0,
};
pub unsafe fn init() {
PL031_RTC.init();
time::START.lock().0 = PL031_RTC.time();
}
struct Pl031rtc {
pub address: usize,
}
impl Pl031rtc {
unsafe fn init(&mut self) {
let mut active_table = ActivePageTable::new(PageTableType::Kernel);
let start_frame = Frame::containing_address(PhysicalAddress::new(0x09010000));
let end_frame = Frame::containing_address(PhysicalAddress::new(0x09010000 + 0x1000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
result.flush(&mut active_table);
}
self.address = ::KERNEL_DEVMAP_OFFSET + 0x09010000;
}
unsafe fn read(&self, reg: u32) -> u32 {
let val = volatile_load((self.address + reg as usize) as *const u32);
val
}
unsafe fn write(&mut self, reg: u32, value: u32) {
volatile_store((self.address + reg as usize) as *mut u32, value);
}
pub fn time(&mut self) -> u64 {
let seconds = unsafe { self.read(RTC_DR) } as u64;
seconds
}
}
use devices::uart_pl011::SerialPort;
use core::sync::atomic::{Ordering};
use init::device_tree;
use core::sync::atomic::{Ordering};
use init::device_tree;
use memory::Frame;
use paging::mapper::{MapperFlushAll, MapperType};
use paging::{ActivePageTable, Page, PageTableType, PhysicalAddress, VirtualAddress};
use paging::entry::EntryFlags;
use spin::Mutex;
pub static COM1: Mutex<Option<SerialPort>> = Mutex::new(None);
pub unsafe fn init() {
if let Some(ref mut serial_port) = *COM1.lock() {
return;
}
let (base, size) = device_tree::diag_uart_range(::KERNEL_DTB_OFFSET, ::KERNEL_DTB_MAX_SIZE).unwrap();
let mut active_ktable = unsafe { ActivePageTable::new(PageTableType::Kernel) };
let mut flush_all = MapperFlushAll::new();
let start_frame = Frame::containing_address(PhysicalAddress::new(base));
let end_frame = Frame::containing_address(PhysicalAddress::new(base + size - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_DEVMAP_OFFSET));
let result = active_ktable.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE);
flush_all.consume(result);
};
flush_all.flush(&mut active_ktable);
let start_frame = Frame::containing_address(PhysicalAddress::new(base));
let vaddr = start_frame.start_address().get() + ::KERNEL_DEVMAP_OFFSET;
*COM1.lock() = Some(SerialPort::new(vaddr));
if let Some(ref mut serial_port) = *COM1.lock() {
serial_port.init();
}
}
extern crate fdt;
extern crate byteorder;
use alloc::vec::Vec;
use core::slice;
use memory;
use self::byteorder::{ByteOrder, BE};
fn root_cell_sz(dt: &fdt::DeviceTree) -> Option<(u32, u32)> {
let root_node = dt.nodes().nth(0).unwrap();
let address_cells = root_node.properties().find(|p| p.name.contains("#address-cells")).unwrap();
let size_cells = root_node.properties().find(|p| p.name.contains("#size-cells")).unwrap();
Some((BE::read_u32(&size_cells.data), BE::read_u32(&size_cells.data)))
}
fn memory_ranges(dt: &fdt::DeviceTree, address_cells: usize, size_cells: usize, ranges: &mut [(usize, usize); 10]) -> usize {
let memory_node = dt.find_node("/memory").unwrap();
let reg = memory_node.properties().find(|p| p.name.contains("reg")).unwrap();
let chunk_sz = (address_cells + size_cells) * 4;
let chunk_count = (reg.data.len() / chunk_sz);
let mut index = 0;
for chunk in reg.data.chunks(chunk_sz as usize) {
if index == chunk_count {
return index;
}
let (base, size) = chunk.split_at((address_cells * 4) as usize);
let mut b = 0;
for base_chunk in base.rchunks(4) {
b += BE::read_u32(base_chunk);
}
let mut s = 0;
for sz_chunk in size.rchunks(4) {
s += BE::read_u32(sz_chunk);
}
ranges[index] = (b as usize, s as usize);
index += 1;
}
index
}
pub fn diag_uart_range(dtb_base: usize, dtb_size: usize) -> Option<(usize, usize)> {
let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) };
let dt = fdt::DeviceTree::new(data).unwrap();
let chosen_node = dt.find_node("/chosen").unwrap();
let stdout_path = chosen_node.properties().find(|p| p.name.contains("stdout-path")).unwrap();
let uart_node_name = core::str::from_utf8(stdout_path.data).unwrap()
.split('/')
.collect::<Vec<&str>>()[1].trim_end();
let len = uart_node_name.len();
let uart_node_name = &uart_node_name[0..len-1];
let uart_node = dt.nodes().find(|n| n.name.contains(uart_node_name)).unwrap();
let reg = uart_node.properties().find(|p| p.name.contains("reg")).unwrap();
let (address_cells, size_cells) = root_cell_sz(&dt).unwrap();
let chunk_sz = (address_cells + size_cells) * 4;
let (base, size) = reg.data.split_at((address_cells * 4) as usize);
let mut b = 0;
for base_chunk in base.rchunks(4) {
b += BE::read_u32(base_chunk);
}
let mut s = 0;
for sz_chunk in size.rchunks(4) {
s += BE::read_u32(sz_chunk);
}
Some((b as usize, s as usize))
}
fn compatible_node_present<'a>(dt: &fdt::DeviceTree<'a>, compat_string: &str) -> bool {
for node in dt.nodes() {
if let Some(compatible) = node.properties().find(|p| p.name.contains("compatible")) {
let s = core::str::from_utf8(compatible.data).unwrap();
if s.contains(compat_string) {
return true;
}
}
}
false
}
pub fn fill_env_data(dtb_base: usize, dtb_size: usize, env_base: usize) -> usize {
let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) };
let dt = fdt::DeviceTree::new(data).unwrap();
let chosen_node = dt.find_node("/chosen").unwrap();
let bootargs = chosen_node.properties().find(|p| p.name.contains("bootargs")).unwrap();
let bootargs_len = bootargs.data.len();
let env_base_slice = unsafe { slice::from_raw_parts_mut(env_base as *mut u8, bootargs_len) };
env_base_slice[..bootargs_len].clone_from_slice(bootargs.data);
bootargs_len
}
pub fn fill_memory_map(dtb_base: usize, dtb_size: usize) {
let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) };
let dt = fdt::DeviceTree::new(data).unwrap();
let (address_cells, size_cells) = root_cell_sz(&dt).unwrap();
let mut ranges: [(usize, usize); 10] = [(0,0); 10];
let nranges = memory_ranges(&dt, address_cells as usize, size_cells as usize, &mut ranges);
for index in (0..nranges) {
let (base, size) = ranges[index];
unsafe {
memory::MEMORY_MAP[index] = memory::MemoryArea {
base_addr: base as u64,
length: size as u64,
_type: memory::MEMORY_AREA_FREE,
acpi: memory::MEMORY_AREA_FREE
};
}
}
}
pub mod device_tree;
// Early initialisation for AArch64 systems.
//
// This code is responsible for taking over control of the boot CPU from
// the bootloader and setting up enough of the CPU so Rust code can take
// over (in kstart).
//
// Readers are recommended to refer to the Arm Architecture Reference Manual
// when studying this code. The latest version of the Arm Arm can be found at:
//
// https://developer.arm.com/products/architecture/cpu-architecture/a-profile/docs
//
// The code is structured such that different phases/functionality are
// in separate files included by this central one.
//
// This is hopefully easier to grok and study than one gigantic file.
//
// The emphasis is on clarity and not optimisation. Clarity is hard without
// a decent understanding of the Arm architecture.
//
// Optimisation is not too much of a concern given that this is boot code.
// That said, future revisions will aim to optimise.
#include "helpers/consts.h"
#include "helpers/pre_mmu_enabled.S"
#include "helpers/build_page_tables.S"
#include "helpers/post_mmu_enabled.S"
#include "helpers/vectors.S"
// Entry point for the boot CPU. We assume that x0 contains the physical address of a DTB image
// passed in by the bootloader.
//
// Note that the kernel linker script arranges for this code to lie at the start of the kernel
// image.
.text
.align 2
.pushsection ".early_init.text", "ax"
.globl early_init
early_init:
bl early_setup
bl disable_mmu
bl create_page_tables
bl enable_mmu
b mmu_on_trampoline // With the mmu now on, this returns below to
// mmu_on using Virtual Addressing
mmu_on:
bl setup_kstart_context // Setup environment for kstart
b kstart // Let the show begin! :)
.popsection
// Creates the following MMU mappings:
//
// 1. Identity mapping for the kernel (VA == PA) to be able to switch on the MMU
// 2. Mapping for the kernel with high VAs from KERNEL_OFFSET onwards
// 3. Mapping for the kernel stack
// 4. Mapping for the DTB Image
// 5. Optional Mapping for a diagnostic UART
create_page_tables:
mov x22, x30
adr x0, addr_marker // x0: Physical address of addr_marker
ldr x1, [x0] // x1: Virtual address of addr_marker
ldr x2, =KERNEL_OFFSET // x2: Virtual address of kernel base
sub x3, x1, x2 // x3: 'Distance' of addr_marker from kernel base
sub x0, x0, x3 // x0: Physical address of kernel base
mov x11,x0 // x11: Stash away the Physical address of the kernel image base
ldr x1, =KERNEL_OFFSET // x1: Virtual address of kernel start addr
ldr x2, =__end // x2: Virtual address of kernel end addr
sub x12, x2, x1 // x12: Size of the kernel image
add x12, x12, #(0x200000) // x12: Align to 2MB (Add 2MB, then clear low bits if any)
and x3, x12, #0xffffffffffe00000
cmp x12, #0x200, lsl #12
csel x12, x3, x12, hi
add x13, x1, x12 // x13: Stack top vaddr (kbase.vaddr + ksize)
mov x14, #(EARLY_KSTACK_SIZE) // x14: Stack size
ldr x15, =KERNEL_OFFSET // x15: Kernel base vaddr
// From this point on, the following registers are not to be modified for convenience:
// x11: PA of kernel image base
// x12: Kernel image size (2MB aligned)
// x13: VA of stack top
// x14: Stack size
// x15: VA of kernel Base
// Zero out all the tables
zero_tables:
adr x0, identkmap_l0_ptable
mov x1, #(PAGE_SIZE)
mov x2, #(NUM_L2_TABLES) // There are normally 12 tables to clear (2 L0, 5 L1, 5 L2, 1 env)
mul x1, x1, x2
lsr x1, x1, #3
mov x2, xzr
zero_loop:
str xzr, [x0, x2]
add x2, x2, #8
cmp x1, x2
b.ne zero_loop
// Identity map the kernel
mov x0, x11 // x0: Paddr of kernel image base
mov x1, x11 // x1: Paddr of kernel image base
mov x2, x12 // x2: Kernel image size
mov x3, #(NORMAL_UNCACHED_MEM) // x3: Attributes to apply
adr x4, identkmap_l0_ptable // x5: Ptr to L0 table for identity mapping the kernel
adr x5, identkmap_l1_ptable // x6: Ptr to L1 table for identity mapping the kernel
adr x6, identkmap_l2_ptable // x7: Ptr to L2 table for identity mapping the kernel
bl build_map
// Map the kernel
ldr x0, =KERNEL_OFFSET // x0: Vaddr of kernel base
mov x1, x11 // x1: Paddr of kernel base
mov x2, x12 // x2: Kernel image size
mov x3, #(NORMAL_CACHED_MEM) // x3: Attributes to apply
adr x4, kernmap_l0_ptable // x5: Ptr to L0 table for mapping the kernel
adr x5, kernmap_l1_ptable // x6: Ptr to L1 table for mapping the kernel
adr x6, kernmap_l2_ptable // x7: Ptr to L2 table for mapping the kernel
bl build_map
// Map the kernel stack
ldr x0, =KERNEL_OFFSET // x0: Vaddr of kernel stack top
add x0, x0, x12
sub x1, x11, x14 // x1: Paddr of kernel stack top (kbase.paddr - kstack size)
mov x2, #(EARLY_KSTACK_SIZE) // x2: Size of kernel stack
mov x3, #(NORMAL_CACHED_MEM) // x3: Attributes to apply
adr x4, kernmap_l0_ptable // x5: Ptr to the kernel L0 table
adr x5, kstack_l1_ptable // x6: Ptr to L1 table for mapping the kernel stack
adr x6, kstack_l2_ptable // x7: Ptr to L2 table for mapping the kernel stack
bl build_map
// Map the DTB image
ldr x0, =DTB_VBASE // x0: Vaddr of DTB Image
mov x1, x19 // x1: Paddr of DTB Image
mov x2, x21 // x2: Size of DTB Image
mov x3, #(NORMAL_CACHED_MEM) // x3: Attributes to apply
adr x4, kernmap_l0_ptable // x5: Ptr to the kernel L0 table
adr x5, dtbmap_l1_ptable // x6: Ptr to L1 table for mapping the DTB Image
adr x6, dtbmap_l2_ptable // x7: Ptr to L2 table for mapping the DTB Image
bl build_map
#ifdef DEBUG_UART
// This is a temporary 'static' mapping to enable the use of the UART on
// qemu-system-aarch64's virt machine. This should be dynamically
// mapped using the DTB info and exists purely to help bring up the
// core kernel on qemu-system-aarch64. This code should be removed once
// the kstart MMU setup code is done and kstart is able to create
// it's own mappings to access the UART (and other devices in this
// region).
ldr x0, =UART_VBASE
ldr x1, =UART_PBASE
mov x2, #(UART_SIZE)
mov x3, #(DEVICE_MEM)
adr x4, kernmap_l0_ptable
adr x5, devmap_l1_ptable
adr x6, devmap_l2_ptable
bl build_map
#endif
// Set up recursive paging for TTBR1
adr x0, kernmap_l0_ptable
add x1, x0, #(511 * 8)
orr x0, x0, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x0, x0, #(ACCESS_FLAG_BIT)
str x0, [x1]
// Set up recursive paging for TTBR0
adr x0, identkmap_l0_ptable
add x1, x0, #(511 * 8)
orr x0, x0, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x0, x0, #(ACCESS_FLAG_BIT)
str x0, [x1]
mov x30, x22
ret
// Generic routine to build mappings. Requires the following inputs:
//
// x0: Vaddr to map to Paddr
// x1: Paddr to map Vaddr to
// x2: Length (in bytes) of region to map
// x3: Region attributes
// x4: Paddr of L0 table to use for mapping
// x5: Paddr of L1 table to use for mapping
// x6: Paddr of L2 table to use for mapping
//
// To keep things simple everything is mapped using 2MB blocks. This implies that the length
// is explicitly aligned to 2MB to prevent any translation aliases. Since block translations
// at L2 cover 2MB blocks, that suits us nicely so everything uses 2MB L2 blocks. Wasteful
// perhaps but at this stage it's convenient and in any case will get ripped out and
// reprogrammed in kstart.
build_map:
lsr x8, x0, #39 // First group of 9 bits of VA
and x8, x8, #0x1ff
lsl x8, x8, #3 // x8: Index into L0 table
ldr x9, [x4, x8]
cbnz x9, l1_idx_prefilled
mov x9, x5 // Get L1 base
bfm w9, wzr, #0, #11
orr x9, x9, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x9, x9, #(ACCESS_FLAG_BIT)
str x9, [x4, x8] // L0[Index]: L1
l1_idx_prefilled:
lsr x8, x0, #30 // Second group of 9 bits of VA
and x8, x8, #0x1ff
lsl x8, x8, #3 // x8: Index into L1 table
ldr x9, [x5, x8]
cbnz x9, l2_idx_prefilled
build_map_l2:
mov x9, x6 // Get L2 base
bfm w9, wzr, #0, #11
orr x9, x9, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x9, x9, #(ACCESS_FLAG_BIT)
lsl x4, x3, #2
orr x9, x9, x4
str x9, [x5, x8] // L1[Index]: Base of L2 table
l2_idx_prefilled:
lsr x2, x2, #21 // Number of 2MB blocks needed */
add x2, x2, #1
lsr x8, x0, #21 // Third group of 9 bits of VA
and x8, x8, #0x1ff
lsl x8, x8, #3 // x8: Index into L2 table
ldr x9, [x6, x8]
cbnz x9, build_map_error
build_map_l2_loop:
mov x9, x1
bfm w9, wzr, #0, #11
orr x9, x9, #((DESC_TYPE_BLOCK << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x9, x9, #(ACCESS_FLAG_BIT)
lsl x4, x3, #2
orr x9, x9, x4
ldr x10, [x6, x8]
mov x7, #(DESC_VALID << DESC_VALID_BIT)
and x10, x10, x7
cmp x10, x7
b.eq build_map_error
str x9, [x6, x8] // L2[Index]: PA of 2MB region to map to
mov x9, #1
add x1, x1, x9, lsl #21
add x8, x8, #8
sub x2, x2, #1
cbnz x2, build_map_l2_loop
ret
build_map_error:
wfi
b build_map_error
// Statically allocated tables consumed by build_map.
.align 12
identkmap_l0_ptable:
.space PAGE_SIZE
identkmap_l1_ptable:
.space PAGE_SIZE
identkmap_l2_ptable:
.space PAGE_SIZE
kernmap_l0_ptable:
.space PAGE_SIZE
kernmap_l1_ptable:
.space PAGE_SIZE
kernmap_l2_ptable:
.space PAGE_SIZE
kstack_l1_ptable:
.space PAGE_SIZE
kstack_l2_ptable:
.space PAGE_SIZE
devmap_l1_ptable:
.space PAGE_SIZE
devmap_l2_ptable: