Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • redox-os/kernel
  • deepaksirone/kernel
  • microcolonel/kernel
  • jD91mZM2/kernel
  • liamnprg/kernel
  • ids1024/kernel
  • luojia/kernel
  • efikarl/kernel
  • jferard/kernel
  • rosehuds/kernel
  • xTibor/kernel
  • Ano-Nymus/kernel
  • carrot93/kernel
  • noam93k/kernel
  • corvus_corax/kernel
  • antares/kernel
  • nrdxp/kernel
  • SoyaOhnishi/kernel
  • potatogim/kernel
  • bsjung/kernel
  • batzor/kernel
  • retrocoder68/kernel
  • kal/kernel
  • jabedude/kernel
  • 4lDO2/kernel
  • cherusk/kernel
  • sudoamin/kernel
  • chetankhilosiya/kernel
  • t-nil/kernel
  • Majoneza/kernel
  • wiredtv/kernel
  • tijlleenders/kernel
  • Mottl/kernel
  • usapmz/kernel
  • kamirr/kernel
  • CodingRays/kernel
  • Ivan/kernel
  • zacklukem/kernel
  • devnexen/kernel
  • uuuvn/kernel
  • rw_van/kernel
  • freewilll/kernel
  • ebalalic/kernel
  • henritel/kernel
  • dahc/kernel
  • Forest0923/kernel
  • andrey.turkin/kernel
  • amidamaru/kernel
  • gmacd/kernel
  • jinb-park/kernel
  • bjorn3/kernel
  • neallred/kernel
  • hmcmillan/kernel
  • jmaine/kernel
  • wt/kernel
  • aaronjanse/kernel
  • Skallwar/kernel
  • NateDogg1232/kernel
  • maxtnuk/kernel
  • Vladimare/kernel
  • ylz0923/kernel
  • wheatfox/kernel
  • mjdr/kernel
  • adi-g15/kernel
  • heghe/kernel
  • enygmator/kernel
  • vincent/kernel
  • StaringAtEditor/redox-os-kernel
  • zhaozhao/kernel
  • arthurpaulino/kernel
  • andypython/kernel
  • LLeny/kernel
  • Seti/kernel
73 results
Show changes
Commits on Source (164)
Showing
with 334 additions and 252 deletions
......@@ -64,7 +64,7 @@ dependencies = [
[[package]]
name = "kernel"
version = "0.2.12"
version = "0.3.4"
dependencies = [
"bitfield",
"bitflags",
......@@ -75,8 +75,8 @@ dependencies = [
"linked_list_allocator 0.9.1",
"log",
"memoffset",
"paste",
"raw-cpuid",
"redox-initfs",
"redox_syscall",
"rmm",
"rustc-cfg",
......@@ -106,9 +106,9 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.4.7"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390"
dependencies = [
"autocfg",
"scopeguard",
......@@ -116,9 +116,9 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.16"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
......@@ -132,6 +132,12 @@ dependencies = [
"autocfg",
]
[[package]]
name = "paste"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
[[package]]
name = "plain"
version = "0.2.3"
......@@ -140,24 +146,16 @@ checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6"
[[package]]
name = "raw-cpuid"
version = "10.3.0"
version = "10.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12"
checksum = "6aa2540135b6a94f74c7bc90ad4b794f822026a894f3d7bcd185c100d13d4ad6"
dependencies = [
"bitflags",
]
[[package]]
name = "redox-initfs"
version = "0.1.0"
source = "git+https://gitlab.redox-os.org/redox-os/redox-initfs.git#89b8fb8984cf96c418880b7dcd9ce3d6afc3f71c"
dependencies = [
"plain",
]
[[package]]
name = "redox_syscall"
version = "0.2.12"
version = "0.3.4"
dependencies = [
"bitflags",
]
......
[package]
name = "kernel"
version = "0.2.12"
version = "0.3.4"
build = "build.rs"
edition = "2018"
......@@ -24,7 +24,6 @@ slab_allocator = { path = "slab_allocator", optional = true }
# FIXME: There is some undefined behavior probably in the kernel, which forces us to use spin 0.9.0 and not 0.9.2.
spin = "=0.9.0"
rmm = { path = "rmm", default-features = false }
redox-initfs = { git = "https://gitlab.redox-os.org/redox-os/redox-initfs.git", features = ["kernel"], default-features = false }
[dependencies.goblin]
version = "0.2.1"
......@@ -38,8 +37,9 @@ default-features = false
[target.'cfg(target_arch = "aarch64")'.dependencies]
byteorder = { version = "1", default-features = false }
fdt = { git = "https://gitlab.redox-os.org/thomhuds/fdt.git", default-features = false }
paste = "1.0.7"
[target.'cfg(target_arch = "x86_64")'.dependencies]
[target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies]
raw-cpuid = "10.2.0"
x86 = { version = "0.47.0", default-features = false }
......
use rustc_cfg::Cfg;
use std::env;
#[cfg(not(target_arch = "x86_64"))]
fn asm(_out_dir: &str) {}
#[cfg(target_arch = "x86_64")]
fn asm(out_dir: &str) {
use std::process::Command;
println!("cargo:rerun-if-changed=src/asm/x86_64/trampoline.asm");
let status = Command::new("nasm")
.arg("-f").arg("bin")
.arg("-o").arg(format!("{}/trampoline", out_dir))
.arg("src/asm/x86_64/trampoline.asm")
.status()
.expect("failed to run nasm");
if ! status.success() {
panic!("nasm failed with exit status {}", status);
}
}
use std::process::Command;
fn main() {
println!("cargo:rustc-env=TARGET={}", env::var("TARGET").unwrap());
let out_dir = env::var("OUT_DIR").unwrap();
asm(&out_dir);
// Build pre kstart init asm code for aarch64
let cfg = Cfg::new(env::var_os("TARGET").unwrap()).unwrap();
if cfg.target_arch == "aarch64" {
println!("cargo:rerun-if-changed=src/arch/aarch64/init/pre_kstart/early_init.S");
cc::Build::new()
.file("src/arch/aarch64/init/pre_kstart/early_init.S")
.target("aarch64-unknown-redox")
.compile("early_init");
match cfg.target_arch.as_str() {
"aarch64" => {
// Build pre kstart init asm code for aarch64
/*TODO: do we need any of this?
println!("cargo:rerun-if-changed=src/arch/aarch64/init/pre_kstart/early_init.S");
cc::Build::new()
.file("src/arch/aarch64/init/pre_kstart/early_init.S")
.target("aarch64-unknown-redox")
.compile("early_init");
*/
},
"x86" => {
println!("cargo:rerun-if-changed=src/asm/x86/trampoline.asm");
let status = Command::new("nasm")
.arg("-f").arg("bin")
.arg("-o").arg(format!("{}/trampoline", out_dir))
.arg("src/asm/x86/trampoline.asm")
.status()
.expect("failed to run nasm");
if ! status.success() {
panic!("nasm failed with exit status {}", status);
}
},
"x86_64" => {
println!("cargo:rerun-if-changed=src/asm/x86_64/trampoline.asm");
let status = Command::new("nasm")
.arg("-f").arg("bin")
.arg("-o").arg(format!("{}/trampoline", out_dir))
.arg("src/asm/x86_64/trampoline.asm")
.status()
.expect("failed to run nasm");
if ! status.success() {
panic!("nasm failed with exit status {}", status);
}
}
_ => (),
}
}
......@@ -2,15 +2,6 @@
set -e
# https://github.com/rust-lang/rust-clippy/issues/4579
export RUSTUP_TOOLCHAIN="nightly-2019-07-19"
rustup update "${RUSTUP_TOOLCHAIN}"
rustup component add clippy --toolchain "${RUSTUP_TOOLCHAIN}"
rustup component add rust-src --toolchain "${RUSTUP_TOOLCHAIN}"
# Cause recompilation
touch src/lib.rs
export RUST_TARGET_PATH="${PWD}/targets"
export RUSTFLAGS="-C soft-float -C debuginfo=2"
xargo clippy --lib --release --target x86_64-unknown-none
cargo clippy --lib --release --target x86_64-unknown-none "$@"
ENTRY(early_init)
ENTRY(kstart)
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
KERNEL_OFFSET = 0xFFFFFF0000000000;
......
ENTRY(kstart)
OUTPUT_FORMAT(elf32-i386)
KERNEL_OFFSET = 0xC0000000;
SECTIONS {
. = KERNEL_OFFSET;
. += SIZEOF_HEADERS;
. = ALIGN(4096);
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}
Subproject commit 507f7ccd4a33eee5ca442803c91e19b3d2095f39
Subproject commit 81b03cc69397d8f729c5fd199574ba9c29d3aa26
......@@ -3,7 +3,8 @@ use core::{mem, ptr};
use core::intrinsics::{volatile_load, volatile_store};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, PhysicalAddress, Page, PageFlags, VirtualAddress};
use crate::paging::{KernelMapper, PhysicalAddress, PageFlags};
use crate::paging::entry::EntryFlags;
use super::sdt::Sdt;
use super::{ACPI_TABLE, find_sdt};
......@@ -11,10 +12,10 @@ use super::{ACPI_TABLE, find_sdt};
#[repr(packed)]
#[derive(Clone, Copy, Debug, Default)]
pub struct GenericAddressStructure {
address_space: u8,
bit_width: u8,
bit_offset: u8,
access_size: u8,
_address_space: u8,
_bit_width: u8,
_bit_offset: u8,
_access_size: u8,
pub address: u64,
}
......@@ -35,10 +36,10 @@ pub struct Hpet {
}
impl Hpet {
pub fn init(active_table: &mut ActivePageTable) {
pub fn init() {
let hpet_sdt = find_sdt("HPET");
let hpet = if hpet_sdt.len() == 1 {
Hpet::new(hpet_sdt[0], active_table)
Hpet::new(hpet_sdt[0])
} else {
println!("Unable to find HPET");
return;
......@@ -52,10 +53,10 @@ impl Hpet {
}
}
pub fn new(sdt: &'static Sdt, active_table: &mut ActivePageTable) -> Option<Hpet> {
pub fn new(sdt: &'static Sdt) -> Option<Hpet> {
if &sdt.signature == b"HPET" && sdt.length as usize >= mem::size_of::<Hpet>() {
let s = unsafe { ptr::read((sdt as *const Sdt) as *const Hpet) };
unsafe { s.base_address.init(active_table) };
unsafe { s.base_address.init(&mut KernelMapper::lock()) };
Some(s)
} else {
None
......@@ -63,19 +64,49 @@ impl Hpet {
}
}
//TODO: x86 use assumes only one HPET and only one GenericAddressStructure
#[cfg(target_arch = "x86")]
impl GenericAddressStructure {
pub unsafe fn init(&self, active_table: &mut ActivePageTable) {
let page = Page::containing_address(VirtualAddress::new(self.address as usize));
pub unsafe fn init(&self, mapper: &mut KernelMapper) {
use crate::paging::{Page, VirtualAddress};
let frame = Frame::containing_address(PhysicalAddress::new(self.address as usize));
let page = Page::containing_address(VirtualAddress::new(crate::HPET_OFFSET));
mapper
.get_mut()
.expect("KernelMapper locked re-entrant while mapping memory for GenericAddressStructure")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true))
.expect("failed to map memory for GenericAddressStructure")
.flush();
}
pub unsafe fn read_u64(&self, offset: usize) -> u64{
volatile_load((crate::HPET_OFFSET + offset) as *const u64)
}
pub unsafe fn write_u64(&mut self, offset: usize, value: u64) {
volatile_store((crate::HPET_OFFSET + offset) as *mut u64, value);
}
}
#[cfg(not(target_arch = "x86"))]
impl GenericAddressStructure {
pub unsafe fn init(&self, mapper: &mut KernelMapper) {
let frame = Frame::containing_address(PhysicalAddress::new(self.address as usize));
let result = active_table.map_to(page, frame, PageFlags::new().write(true));
let (_, result) = mapper
.get_mut()
.expect("KernelMapper locked re-entrant while mapping memory for GenericAddressStructure")
.map_linearly(frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true))
.expect("failed to map memory for GenericAddressStructure");
result.flush();
}
pub unsafe fn read_u64(&self, offset: usize) -> u64{
volatile_load((self.address as usize + offset) as *const u64)
volatile_load((self.address as usize + offset + crate::PHYS_OFFSET) as *const u64)
}
pub unsafe fn write_u64(&mut self, offset: usize, value: u64) {
volatile_store((self.address as usize + offset) as *mut u64, value);
volatile_store((self.address as usize + offset + crate::PHYS_OFFSET) as *mut u64, value);
}
}
use core::mem;
use crate::memory::{allocate_frames, Frame};
use crate::paging::{ActivePageTable, Page, PageFlags, PhysicalAddress, VirtualAddress};
use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, RmmArch, VirtualAddress};
use super::sdt::Sdt;
use super::find_sdt;
......@@ -28,7 +28,7 @@ pub static mut MADT: Option<Madt> = None;
pub const FLAG_PCAT: u32 = 1;
impl Madt {
pub fn init(active_table: &mut ActivePageTable) {
pub fn init() {
let madt_sdt = find_sdt("APIC");
let madt = if madt_sdt.len() == 1 {
Madt::new(madt_sdt[0])
......@@ -56,7 +56,18 @@ impl Madt {
// Map trampoline
let trampoline_frame = Frame::containing_address(PhysicalAddress::new(TRAMPOLINE));
let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE));
let result = active_table.map_to(trampoline_page, trampoline_frame, PageFlags::new().execute(true).write(true)); //TODO: do not have writable and executable!
let (result, page_table_physaddr) = unsafe {
//TODO: do not have writable and executable!
let mut mapper = KernelMapper::lock();
let result = mapper
.get_mut()
.expect("expected kernel page table not to be recursively locked while initializing MADT")
.map_phys(trampoline_page.start_address(), trampoline_frame.start_address(), PageFlags::new().execute(true).write(true))
.expect("failed to map trampoline");
(result, mapper.table().phys().data())
};
result.flush();
// Write trampoline, make sure TRAMPOLINE page is free for use
......@@ -90,7 +101,7 @@ impl Madt {
// Set the ap_ready to 0, volatile
unsafe { atomic_store(ap_ready, 0) };
unsafe { atomic_store(ap_cpu_id, ap_local_apic.id as u64) };
unsafe { atomic_store(ap_page_table, active_table.address() as u64) };
unsafe { atomic_store(ap_page_table, page_table_physaddr as u64) };
unsafe { atomic_store(ap_stack_start, stack_start as u64) };
unsafe { atomic_store(ap_stack_end, stack_end as u64) };
unsafe { atomic_store(ap_code, kstart_ap as u64) };
......@@ -137,7 +148,7 @@ impl Madt {
}
println!(" Ready");
active_table.flush_all();
unsafe { RmmA::invalidate_all(); }
} else {
println!(" CPU Disabled");
}
......@@ -147,8 +158,14 @@ impl Madt {
}
// Unmap trampoline
let (result, _frame) = active_table.unmap_return(trampoline_page, false);
result.flush();
let (_frame, _, flush) = unsafe {
KernelMapper::lock()
.get_mut()
.expect("expected kernel page table not to be recursively locked while initializing MADT")
.unmap_phys(trampoline_page.start_address(), true)
.expect("failed to unmap trampoline page")
};
flush.flush();
}
}
}
......@@ -159,9 +176,9 @@ impl Madt {
let flags = unsafe { *(sdt.data_address() as *const u32).offset(1) };
Some(Madt {
sdt: sdt,
local_address: local_address,
flags: flags
sdt,
local_address,
flags
})
} else {
None
......@@ -195,7 +212,7 @@ pub struct MadtIoApic {
/// I/O APIC ID
pub id: u8,
/// reserved
reserved: u8,
_reserved: u8,
/// I/O APIC address
pub address: u32,
/// Global system interrupt base
......
......@@ -9,8 +9,7 @@ use alloc::boxed::Box;
use spin::{Once, RwLock};
use crate::log::info;
use crate::memory::Frame;
use crate::paging::{ActivePageTable, Page, PageFlags, PhysicalAddress, VirtualAddress};
use crate::paging::{KernelMapper, PageFlags, PhysicalAddress, RmmA, RmmArch};
use self::madt::Madt;
use self::rsdt::Rsdt;
......@@ -28,31 +27,33 @@ mod xsdt;
mod rxsdt;
mod rsdp;
pub fn get_sdt(sdt_address: usize, active_table: &mut ActivePageTable) -> &'static Sdt {
{
let page = Page::containing_address(VirtualAddress::new(sdt_address));
if active_table.translate_page(page).is_none() {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data()));
let result = active_table.map_to(page, frame, PageFlags::new());
result.flush();
}
unsafe fn map_linearly(addr: PhysicalAddress, len: usize, mapper: &mut crate::paging::PageMapper) {
let base = PhysicalAddress::new(crate::paging::round_down_pages(addr.data()));
let aligned_len = crate::paging::round_up_pages(len + (addr.data() - base.data()));
for page_idx in 0..aligned_len / crate::memory::PAGE_SIZE {
let (_, flush) = mapper.map_linearly(base.add(page_idx * crate::memory::PAGE_SIZE), PageFlags::new()).expect("failed to linearly map SDT");
flush.flush();
}
}
let sdt = unsafe { &*(sdt_address as *const Sdt) };
pub fn get_sdt(sdt_address: usize, mapper: &mut KernelMapper) -> &'static Sdt {
let mapper = mapper
.get_mut()
.expect("KernelMapper mapper locked re-entrant in get_sdt");
// Map extra SDT frames if required
{
let start_page = Page::containing_address(VirtualAddress::new(sdt_address + 4096));
let end_page = Page::containing_address(VirtualAddress::new(sdt_address + sdt.length as usize));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data()));
let result = active_table.map_to(page, frame, PageFlags::new());
result.flush();
}
}
}
let physaddr = PhysicalAddress::new(sdt_address);
let sdt;
unsafe {
const SDT_SIZE: usize = core::mem::size_of::<Sdt>();
map_linearly(physaddr, SDT_SIZE, mapper);
sdt = &*(RmmA::phys_to_virt(physaddr).data() as *const Sdt);
map_linearly(physaddr.add(SDT_SIZE), sdt.length as usize - SDT_SIZE, mapper);
}
sdt
}
......@@ -72,16 +73,18 @@ impl Rxsdt for RxsdtEnum {
pub static RXSDT_ENUM: Once<RxsdtEnum> = Once::new();
/// Parse the ACPI tables to gather CPU, interrupt, and timer information
pub unsafe fn init(active_table: &mut ActivePageTable, already_supplied_rsdps: Option<(u64, u64)>) {
pub unsafe fn init(already_supplied_rsdps: Option<(u64, u64)>) {
{
let mut sdt_ptrs = SDT_POINTERS.write();
*sdt_ptrs = Some(BTreeMap::new());
}
// Search for RSDP
if let Some(rsdp) = RSDP::get_rsdp(active_table, already_supplied_rsdps) {
let rsdp_opt = RSDP::get_rsdp(&mut KernelMapper::lock(), already_supplied_rsdps);
if let Some(rsdp) = rsdp_opt {
info!("RSDP: {:?}", rsdp);
let rxsdt = get_sdt(rsdp.sdt_address(), active_table);
let rxsdt = get_sdt(rsdp.sdt_address(), &mut KernelMapper::lock());
for &c in rxsdt.signature.iter() {
print!("{}", c as char);
......@@ -122,10 +125,10 @@ pub unsafe fn init(active_table: &mut ActivePageTable, already_supplied_rsdps: O
// TODO: Don't touch ACPI tables in kernel?
rxsdt.map_all(active_table);
rxsdt.map_all();
for sdt_address in rxsdt.iter() {
let sdt = &*(sdt_address as *const Sdt);
let sdt = &*((sdt_address + crate::PHYS_OFFSET) as *const Sdt);
let signature = get_sdt_signature(sdt);
if let Some(ref mut ptrs) = *(SDT_POINTERS.write()) {
......@@ -135,10 +138,10 @@ pub unsafe fn init(active_table: &mut ActivePageTable, already_supplied_rsdps: O
// TODO: Enumerate processors in userspace, and then provide an ACPI-independent interface
// to initialize enumerated processors to userspace?
Madt::init(active_table);
Madt::init();
// TODO: Let userspace setup HPET, and then provide an interface to specify which timer to
// use?
Hpet::init(active_table);
Hpet::init();
} else {
println!("NO RSDP FOUND");
}
......
......@@ -2,21 +2,21 @@ use core::convert::TryFrom;
use core::mem;
use crate::memory::Frame;
use crate::paging::{ActivePageTable, Page, PageFlags, PhysicalAddress, VirtualAddress};
use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, VirtualAddress};
/// RSDP
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct RSDP {
signature: [u8; 8],
checksum: u8,
oemid: [u8; 6],
_checksum: u8,
_oemid: [u8; 6],
revision: u8,
rsdt_address: u32,
length: u32,
_length: u32,
xsdt_address: u64,
extended_checksum: u8,
reserved: [u8; 3]
_extended_checksum: u8,
_reserved: [u8; 3]
}
impl RSDP {
......@@ -71,16 +71,16 @@ impl RSDP {
None
}
pub fn get_rsdp(active_table: &mut ActivePageTable, already_supplied_rsdps: Option<(u64, u64)>) -> Option<RSDP> {
pub fn get_rsdp(mapper: &mut KernelMapper, already_supplied_rsdps: Option<(u64, u64)>) -> Option<RSDP> {
if let Some((base, size)) = already_supplied_rsdps {
let area = unsafe { core::slice::from_raw_parts(base as usize as *const u8, size as usize) };
Self::get_already_supplied_rsdps(area).or_else(|| Self::get_rsdp_by_searching(active_table))
Self::get_already_supplied_rsdps(area).or_else(|| Self::get_rsdp_by_searching(mapper))
} else {
Self::get_rsdp_by_searching(active_table)
Self::get_rsdp_by_searching(mapper)
}
}
/// Search for the RSDP
pub fn get_rsdp_by_searching(active_table: &mut ActivePageTable) -> Option<RSDP> {
pub fn get_rsdp_by_searching(mapper: &mut KernelMapper) -> Option<RSDP> {
let start_addr = 0xE_0000;
let end_addr = 0xF_FFFF;
......@@ -90,7 +90,9 @@ impl RSDP {
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data()));
let result = active_table.map_to(page, frame, PageFlags::new());
let result = unsafe {
mapper.get_mut().expect("KernelMapper locked re-entrant while locating RSDPs").map_phys(page.start_address(), frame.start_address(), PageFlags::new()).expect("failed to map page while searching for RSDP")
};
result.flush();
}
}
......
use alloc::boxed::Box;
use crate::paging::ActivePageTable;
use crate::paging::KernelMapper;
use super::sdt::Sdt;
use super::get_sdt;
......@@ -8,9 +8,10 @@ use super::get_sdt;
pub trait Rxsdt {
fn iter(&self) -> Box<dyn Iterator<Item = usize>>;
fn map_all(&self, active_table: &mut ActivePageTable) {
fn map_all(&self) {
let mut mapper = KernelMapper::lock();
for sdt in self.iter() {
get_sdt(sdt, active_table);
get_sdt(sdt, &mut mapper);
}
}
......
......@@ -3,7 +3,7 @@ use core::ptr::{self, NonNull};
use linked_list_allocator::Heap;
use spin::Mutex;
use crate::paging::{ActivePageTable, TableKind};
use crate::paging::KernelMapper;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
......@@ -21,7 +21,7 @@ unsafe impl GlobalAlloc for Allocator {
match heap.allocate_first_fit(layout) {
Err(()) => {
let size = heap.size();
super::map_heap(&mut ActivePageTable::new(TableKind::Kernel), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE);
super::map_heap(&mut KernelMapper::lock(), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE);
heap.extend(crate::KERNEL_HEAP_SIZE);
},
other => return other.ok().map_or(ptr::null_mut(), |allocation| allocation.as_ptr()),
......
use crate::paging::{ActivePageTable, Page, PageFlags, VirtualAddress, mapper::PageFlushAll, entry::EntryFlags};
use rmm::Flusher;
use crate::paging::{KernelMapper, Page, PageFlags, VirtualAddress, mapper::PageFlushAll, entry::EntryFlags};
#[cfg(not(feature="slab"))]
pub use self::linked_list::Allocator;
......@@ -12,13 +13,14 @@ mod linked_list;
#[cfg(feature="slab")]
mod slab;
unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usize) {
let flush_all = PageFlushAll::new();
unsafe fn map_heap(mapper: &mut KernelMapper, offset: usize, size: usize) {
let mapper = mapper.get_mut().expect("failed to obtain exclusive access to KernelMapper while extending heap");
let mut flush_all = PageFlushAll::new();
let heap_start_page = Page::containing_address(VirtualAddress::new(offset));
let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
let result = active_table.map(page, PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))))
let result = mapper.map(page.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))))
.expect("failed to map kernel heap");
flush_all.consume(result);
}
......@@ -26,12 +28,12 @@ unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usiz
flush_all.flush();
}
pub unsafe fn init(active_table: &mut ActivePageTable) {
pub unsafe fn init() {
let offset = crate::KERNEL_HEAP_OFFSET;
let size = crate::KERNEL_HEAP_SIZE;
// Map heap pages
map_heap(active_table, offset, size);
map_heap(&mut KernelMapper::lock(), offset, size);
// Initialize global heap
Allocator::init(offset, size);
......
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
/// Size of a page and frame
pub const PAGE_SIZE: usize = 4096;
/// Offset of recursive paging
/// Offset of recursive paging (deprecated, but still reserved)
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE;
......@@ -18,97 +14,29 @@
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE;
/// Kernel stack size - must be kept in sync with early_init.S. Used by memory::init
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB
/// Offset of device map region
pub const KERNEL_DEVMAP_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
/// Offset of environment region
pub const KERNEL_ENV_OFFSET: usize = KERNEL_DEVMAP_OFFSET - PML4_SIZE;
/// Offset of temporary mapping for misc kernel bring-up actions
pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_ENV_OFFSET - PML4_SIZE;
pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
/// Offset to kernel percpu variables
pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_TMP_MISC_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_PML4: usize = (KERNEL_PERCPU_OFFSET & PML4_MASK) / PML4_SIZE;
pub const KERNEL_PERCPU_PML4: usize = (KERNEL_PERCPU_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
pub const KERNEL_PERCPU_SHIFT: u8 = 16; // 2^16 = 64 KiB
pub const KERNEL_PERCPU_SIZE: usize = 1_usize << KERNEL_PERCPU_SHIFT;
/// Offset of physmap
// This needs to match RMM's PHYS_OFFSET
pub const PHYS_OFFSET: usize = 0xFFFF_FE00_0000_0000;
pub const PHYS_OFFSET: usize = 0xFFFF_8000_0000_0000;
pub const PHYS_PML4: usize = (PHYS_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user image
pub const USER_OFFSET: usize = 0;
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE/2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user sigstack
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of user sigstack
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK)/PML4_SIZE;
// Maximum TLS allocated to each PID, should be approximately 8 MB
pub const USER_TLS_SIZE: usize = PML4_SIZE / 65536;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary sigstack (used when cloning)
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK)/PML4_SIZE;
/// End offset of the user image, i.e. kernel start
pub const USER_END_OFFSET: usize = 256 * PML4_SIZE;
......@@ -3,6 +3,8 @@ use spin::MutexGuard;
use crate::log::{LOG, Log};
#[cfg(feature = "graphical_debug")]
use crate::devices::graphical_debug::{DEBUG_DISPLAY, DebugDisplay};
#[cfg(feature = "serial_debug")]
use super::device::{
serial::COM1,
......@@ -11,6 +13,8 @@ use super::device::{
pub struct Writer<'a> {
log: MutexGuard<'a, Option<Log>>,
#[cfg(feature = "graphical_debug")]
display: MutexGuard<'a, Option<DebugDisplay>>,
#[cfg(feature = "serial_debug")]
serial: MutexGuard<'a, Option<SerialPort>>,
}
......@@ -19,6 +23,8 @@ impl<'a> Writer<'a> {
pub fn new() -> Writer<'a> {
Writer {
log: LOG.lock(),
#[cfg(feature = "graphical_debug")]
display: DEBUG_DISPLAY.lock(),
#[cfg(feature = "serial_debug")]
serial: COM1.lock(),
}
......@@ -31,6 +37,13 @@ impl<'a> Writer<'a> {
}
}
#[cfg(feature = "graphical_debug")]
{
if let Some(ref mut display) = *self.display {
let _ = display.write(buf);
}
}
#[cfg(feature = "serial_debug")]
{
if let Some(ref mut serial) = *self.serial {
......
......@@ -4,7 +4,7 @@ use crate::device::cpu::registers::{control_regs};
pub mod registers;
bitfield! {
bitfield::bitfield! {
pub struct MachineId(u32);
get_implementer, _: 31, 24;
get_variant, _: 23, 20;
......@@ -194,12 +194,12 @@ impl CpuInfo {
pub fn cpu_info<W: Write>(w: &mut W) -> Result {
let cpuinfo = CpuInfo::new();
write!(w, "Implementer: {}\n", cpuinfo.implementer)?;
write!(w, "Variant: {}\n", cpuinfo.variant)?;
write!(w, "Architecture version: {}\n", cpuinfo.architecture)?;
write!(w, "Part Number: {}\n", cpuinfo.part_number)?;
write!(w, "Revision: {}\n", cpuinfo.revision)?;
write!(w, "\n")?;
writeln!(w, "Implementer: {}", cpuinfo.implementer)?;
writeln!(w, "Variant: {}", cpuinfo.variant)?;
writeln!(w, "Architecture version: {}", cpuinfo.architecture)?;
writeln!(w, "Part Number: {}", cpuinfo.part_number)?;
writeln!(w, "Revision: {}", cpuinfo.revision)?;
writeln!(w)?;
Ok(())
}
//! Functions to read and write control registers.
use core::arch::asm;
bitflags! {
pub struct MairEl1: u64 {
const DEVICE_MEMORY = 0x00;
......@@ -10,76 +12,92 @@ bitflags! {
pub unsafe fn ttbr0_el1() -> u64 {
let ret: u64;
llvm_asm!("mrs $0, ttbr0_el1" : "=r" (ret));
asm!("mrs {}, ttbr0_el1", out(reg) ret);
ret
}
pub unsafe fn ttbr0_el1_write(val: u64) {
llvm_asm!("msr ttbr0_el1, $0" :: "r" (val) : "memory");
asm!("msr ttbr0_el1, {}", in(reg) val);
}
pub unsafe fn ttbr1_el1() -> u64 {
let ret: u64;
llvm_asm!("mrs $0, ttbr1_el1" : "=r" (ret));
asm!("mrs {}, ttbr1_el1", out(reg) ret);
ret
}
pub unsafe fn ttbr1_el1_write(val: u64) {
llvm_asm!("msr ttbr1_el1, $0" :: "r" (val) : "memory");
asm!("msr ttbr1_el1, {}", in(reg) val);
}
pub unsafe fn mair_el1() -> MairEl1 {
let ret: u64;
llvm_asm!("mrs $0, mair_el1" : "=r" (ret));
asm!("mrs {}, mair_el1", out(reg) ret);
MairEl1::from_bits_truncate(ret)
}
pub unsafe fn mair_el1_write(val: MairEl1) {
llvm_asm!("msr mair_el1, $0" :: "r" (val.bits()) : "memory");
asm!("msr mair_el1, {}", in(reg) val.bits());
}
pub unsafe fn tpidr_el0() -> u64 {
let ret: u64;
asm!("mrs {}, tpidr_el0", out(reg) ret);
ret
}
pub unsafe fn tpidr_el0_write(val: u64) {
llvm_asm!("msr tpidr_el0, $0" :: "r" (val) : "memory");
asm!("msr tpidr_el0, {}", in(reg) val);
}
pub unsafe fn tpidr_el1_write(val: u64) {
llvm_asm!("msr tpidr_el1, $0" :: "r" (val) : "memory");
asm!("msr tpidr_el1, {}", in(reg) val);
}
pub unsafe fn tpidrro_el0() -> u64 {
let ret: u64;
asm!("mrs {}, tpidrro_el0", out(reg) ret);
ret
}
pub unsafe fn tpidrro_el0_write(val: u64) {
asm!("msr tpidrro_el0, {}", in(reg) val);
}
pub unsafe fn esr_el1() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, esr_el1" : "=r" (ret));
asm!("mrs {}, esr_el1", out(reg) ret);
ret
}
pub unsafe fn cntfreq_el0() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, cntfrq_el0" : "=r" (ret));
asm!("mrs {}, cntfrq_el0", out(reg) ret);
ret
}
pub unsafe fn tmr_ctrl() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, cntp_ctl_el0" : "=r" (ret));
asm!("mrs {}, cntp_ctl_el0", out(reg) ret);
ret
}
pub unsafe fn tmr_ctrl_write(val: u32) {
llvm_asm!("msr cntp_ctl_el0, $0" :: "r" (val) : "memory");
asm!("msr cntp_ctl_el0, {}", in(reg) val);
}
pub unsafe fn tmr_tval() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, cntp_tval_el0" : "=r" (ret));
asm!("mrs {}, cntp_tval_el0", out(reg) ret);
ret
}
pub unsafe fn tmr_tval_write(val: u32) {
llvm_asm!("msr cntp_tval_el0, $0" :: "r" (val) : "memory");
asm!("msr cntp_tval_el0, {}", in(reg) val);
}
pub unsafe fn midr() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, midr_el1" : "=r" (ret));
asm!("mrs {}, midr_el1", out(reg) ret);
ret
}
//! Functions to flush the translation lookaside buffer (TLB).
use core::arch::asm;
pub unsafe fn flush(_addr: usize) {
llvm_asm!("tlbi vmalle1is");
asm!("tlbi vmalle1is");
}
pub unsafe fn flush_all() {
llvm_asm!("tlbi vmalle1is");
asm!("tlbi vmalle1is");
}
use core::intrinsics::{volatile_load, volatile_store};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, PhysicalAddress, Page, PageFlags, TableKind, VirtualAddress};
use crate::paging::{KernelMapper, PhysicalAddress, Page, PageFlags, TableKind, VirtualAddress};
static GICD_CTLR: u32 = 0x000;
static GICD_TYPER: u32 = 0x004;
......@@ -56,28 +56,36 @@ pub struct GicDistIf {
impl GicDistIf {
unsafe fn init(&mut self) {
// Map in the Distributor interface
let mut active_table = ActivePageTable::new(TableKind::Kernel);
let mut mapper = KernelMapper::lock();
let start_frame = Frame::containing_address(PhysicalAddress::new(0x08000000));
let end_frame = Frame::containing_address(PhysicalAddress::new(0x08000000 + 0x10000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, PageFlags::new().write(true));
result.flush();
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::PHYS_OFFSET));
mapper
.get_mut()
.expect("failed to access KernelMapper for mapping GIC distributor")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true))
.expect("failed to map GIC distributor")
.flush();
}
self.address = crate::KERNEL_DEVMAP_OFFSET + 0x08000000;
self.address = crate::PHYS_OFFSET + 0x08000000;
// Map in CPU0's interface
let start_frame = Frame::containing_address(PhysicalAddress::new(0x08010000));
let end_frame = Frame::containing_address(PhysicalAddress::new(0x08010000 + 0x10000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, PageFlags::new().write(true));
result.flush();
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::PHYS_OFFSET));
mapper
.get_mut()
.expect("failed to access KernelMapper for mapping GIC interface")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true))
.expect("failed to map GIC interface")
.flush();
}
GIC_CPU_IF.address = crate::KERNEL_DEVMAP_OFFSET + 0x08010000;
GIC_CPU_IF.address = crate::PHYS_OFFSET + 0x08010000;
// Disable IRQ Distribution
self.write(GICD_CTLR, 0);
......