Commit c28c147a authored by Jeremy Soller's avatar Jeremy Soller

Update to new dependencies

parent fc0db71d
This diff is collapsed.
......@@ -9,21 +9,21 @@ path = "src/lib.rs"
crate-type = ["staticlib"]
[dependencies]
bitflags = "1"
clippy = { version = "*", optional = true }
linked_list_allocator = "0.6"
raw-cpuid = "3.0"
bitflags = "1.0.3"
clippy = { version = "0.0.209", optional = true }
linked_list_allocator = "0.6.2"
raw-cpuid = "4.0.0"
redox_syscall = { path = "syscall" }
slab_allocator = { path = "slab_allocator", optional = true }
spin = "0.4"
spin = "0.4.8"
[dependencies.goblin]
version = "0.0.10"
version = "0.0.15"
default-features = false
features = ["elf32", "elf64"]
[dependencies.x86]
version = "0.7"
version = "0.9.0"
default-features = false
[features]
......
use alloc::heap::{AllocErr, GlobalAlloc, Layout, Opaque};
use core::alloc::{AllocErr, GlobalAlloc, Layout};
use core::ptr::NonNull;
use linked_list_allocator::Heap;
use spin::Mutex;
......@@ -16,7 +16,7 @@ impl Allocator {
}
unsafe impl GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut Opaque {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
loop {
let res = if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate_first_fit(layout)
......@@ -40,12 +40,12 @@ unsafe impl GlobalAlloc for Allocator {
panic!("__rust_allocate: heap not initialized");
}
},
other => return other.ok().map_or(0 as *mut Opaque, |allocation| allocation.as_ptr()),
other => return other.ok().map_or(0 as *mut u8, |allocation| allocation.as_ptr()),
}
}
}
unsafe fn dealloc(&self, ptr: *mut Opaque, layout: Layout) {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(NonNull::new_unchecked(ptr), layout)
} else {
......
use alloc::heap::{Alloc, AllocErr, Layout};
use core::alloc::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
......
......@@ -115,7 +115,7 @@ pub fn cpu_info<W: Write>(w: &mut W) -> Result {
if info.has_rep_movsb_stosb() { write!(w, " erms")? };
if info.has_invpcid() { write!(w, " invpcid")? };
if info.has_rtm() { write!(w, " rtm")? };
if info.has_qm() { write!(w, " qm")? };
//if info.has_qm() { write!(w, " qm")? };
if info.has_fpu_cs_ds_deprecated() { write!(w, " fpu_seg")? };
if info.has_mpx() { write!(w, " mpx")? };
}
......
use core::intrinsics::{volatile_load, volatile_store};
use x86::cpuid::CpuId;
use x86::msr::*;
use x86::shared::cpuid::CpuId;
use x86::shared::msr::*;
use memory::Frame;
use paging::{ActivePageTable, PhysicalAddress, Page, VirtualAddress};
......
//! Global descriptor table
use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use x86::segmentation::{self, SegmentSelector};
use x86::task::{self, TaskStateSegment};
use x86::current::segmentation::set_cs;
use x86::current::task::TaskStateSegment;
use x86::shared::PrivilegeLevel;
use x86::shared::dtables::{self, DescriptorTablePointer};
use x86::shared::segmentation::{self, SegmentDescriptor, SegmentSelector};
use x86::shared::task;
pub const GDT_NULL: usize = 0;
pub const GDT_KERNEL_CODE: usize = 1;
......@@ -33,9 +36,9 @@ pub const GDT_F_PAGE_SIZE: u8 = 1 << 7;
pub const GDT_F_PROTECTED_MODE: u8 = 1 << 6;
pub const GDT_F_LONG_MODE: u8 = 1 << 5;
static mut INIT_GDTR: DescriptorTablePointer = DescriptorTablePointer {
static mut INIT_GDTR: DescriptorTablePointer<SegmentDescriptor> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const SegmentDescriptor
};
static mut INIT_GDT: [GdtEntry; 4] = [
......@@ -50,9 +53,9 @@ static mut INIT_GDT: [GdtEntry; 4] = [
];
#[thread_local]
pub static mut GDTR: DescriptorTablePointer = DescriptorTablePointer {
pub static mut GDTR: DescriptorTablePointer<SegmentDescriptor> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const SegmentDescriptor
};
#[thread_local]
......@@ -105,18 +108,18 @@ pub unsafe fn init() {
// Setup the initial GDT with TLS, so we can setup the TLS GDT (a little confusing)
// This means that each CPU will have its own GDT, but we only need to define it once as a thread local
INIT_GDTR.limit = (INIT_GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
INIT_GDTR.base = INIT_GDT.as_ptr() as u64;
INIT_GDTR.base = INIT_GDT.as_ptr() as *const SegmentDescriptor;
// Load the initial GDT, before we have access to thread locals
dtables::lgdt(&INIT_GDTR);
// Load the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
set_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16, PrivilegeLevel::Ring0));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
}
/// Initialize GDT with TLS
......@@ -128,11 +131,11 @@ pub unsafe fn init_paging(tcb_offset: usize, stack_offset: usize) {
dtables::lgdt(&INIT_GDTR);
// Load the segment descriptors
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16, PrivilegeLevel::Ring0));
// Now that we have access to thread locals, setup the AP's individual GDT
GDTR.limit = (GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
GDTR.base = GDT.as_ptr() as u64;
GDTR.base = GDT.as_ptr() as *const SegmentDescriptor;
// Set the TLS segment to the offset of the Thread Control Block
GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32);
......@@ -151,15 +154,15 @@ pub unsafe fn init_paging(tcb_offset: usize, stack_offset: usize) {
dtables::lgdt(&GDTR);
// Reload the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
set_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16, PrivilegeLevel::Ring0));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16, PrivilegeLevel::Ring0));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
// Load the task register
task::load_ltr(SegmentSelector::new(GDT_TSS as u16));
task::load_tr(SegmentSelector::new(GDT_TSS as u16, PrivilegeLevel::Ring0));
}
#[derive(Copy, Clone, Debug)]
......
use alloc::allocator::{Alloc, Layout};
use alloc::heap::Heap;
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::{cmp, slice};
use super::FONT;
......@@ -16,7 +15,7 @@ pub struct Display {
impl Display {
pub fn new(width: usize, height: usize, onscreen: usize) -> Display {
let size = width * height;
let offscreen = unsafe { Heap.alloc(Layout::from_size_align_unchecked(size * 4, 4096)).unwrap() };
let offscreen = unsafe { ::ALLOCATOR.alloc(Layout::from_size_align_unchecked(size * 4, 4096)).unwrap() };
unsafe { fast_set64(offscreen as *mut u64, 0, size/2) };
Display {
width: width,
......@@ -145,6 +144,6 @@ impl Display {
impl Drop for Display {
fn drop(&mut self) {
unsafe { Heap.dealloc(self.offscreen.as_mut_ptr() as *mut u8, Layout::from_size_align_unchecked(self.offscreen.len() * 4, 4096)) };
unsafe { ::ALLOCATOR.dealloc(self.offscreen.as_mut_ptr() as *mut u8, Layout::from_size_align_unchecked(self.offscreen.len() * 4, 4096)) };
}
}
use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use x86::current::irq::IdtEntry as X86IdtEntry;
use x86::shared::dtables::{self, DescriptorTablePointer};
use interrupt::*;
pub static mut INIT_IDTR: DescriptorTablePointer = DescriptorTablePointer {
pub static mut INIT_IDTR: DescriptorTablePointer<X86IdtEntry> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const X86IdtEntry
};
pub static mut IDTR: DescriptorTablePointer = DescriptorTablePointer {
pub static mut IDTR: DescriptorTablePointer<X86IdtEntry> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const X86IdtEntry
};
pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256];
......@@ -21,7 +22,7 @@ pub unsafe fn init() {
pub unsafe fn init_paging() {
IDTR.limit = (IDT.len() * mem::size_of::<IdtEntry>() - 1) as u16;
IDTR.base = IDT.as_ptr() as u64;
IDTR.base = IDT.as_ptr() as *const X86IdtEntry;
// Set up exceptions
IDT[0].set_func(exception::divide_by_zero);
......
......@@ -3,7 +3,7 @@
use core::{mem, ptr};
use core::ops::{Deref, DerefMut};
use x86::{msr, tlb};
use x86::shared::{control_regs, msr, tlb};
use memory::{allocate_frames, Frame};
......@@ -283,15 +283,13 @@ impl ActivePageTable {
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
PhysicalAddress::new(unsafe { controlregs::cr3() } as usize)
PhysicalAddress::new(unsafe { control_regs::cr3() } as usize)
),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address().get() as u64);
control_regs::cr3_write(new_table.p4_frame.start_address().get() as u64);
}
old_table
}
......@@ -307,10 +305,8 @@ impl ActivePageTable {
pub fn with<F>(&mut self, table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, f: F)
where F: FnOnce(&mut Mapper)
{
use x86::controlregs;
{
let backup = Frame::containing_address(PhysicalAddress::new(unsafe { controlregs::cr3() as usize }));
let backup = Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::cr3() as usize }));
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, self);
......@@ -331,8 +327,7 @@ impl ActivePageTable {
}
pub unsafe fn address(&self) -> usize {
use x86::controlregs;
controlregs::cr3() as usize
control_regs::cr3() as usize
}
}
......
......@@ -5,7 +5,6 @@ use event;
use spin::RwLock;
use scheme::{self, SchemeId};
use syscall::error::{Result, Error, EBADF};
use scheme::FileHandle;
/// A file description
#[derive(Debug)]
......
use alloc::allocator::{Alloc, Layout};
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::heap::Heap;
use alloc::BTreeMap;
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::mem;
use core::sync::atomic::Ordering;
use paging;
......@@ -67,7 +66,7 @@ impl ContextList {
let context_lock = self.new_context()?;
{
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap().as_ptr() as *mut [u8; 512]) };
let mut fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}
......
//! Context management
use alloc::allocator::{Alloc, Layout};
use alloc::boxed::Box;
use alloc::heap::Heap;
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::sync::atomic::Ordering;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
......@@ -49,7 +48,7 @@ pub fn init() {
let mut contexts = contexts_mut();
let context_lock = contexts.new_context().expect("could not initialize first context");
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap().as_ptr() as *mut [u8; 512]) };
let mut fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}
......
......@@ -27,6 +27,7 @@
#![feature(lang_items)]
#![feature(naked_functions)]
#![feature(never_type)]
#![feature(panic_implementation)]
#![feature(ptr_internals)]
#![feature(thread_local)]
#![feature(unique)]
......
//! Intrinsics for panic handling
use core::panic::PanicInfo;
use interrupt;
#[lang = "eh_personality"]
......@@ -7,12 +9,10 @@ use interrupt;
pub extern "C" fn rust_eh_personality() {}
/// Required to handle panics
#[lang = "panic_fmt"]
#[panic_implementation]
#[no_mangle]
pub extern "C" fn rust_begin_unwind(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
pub extern "C" fn rust_begin_unwind(info: &PanicInfo) -> ! {
println!("KERNEL PANIC: {}", info);
unsafe { interrupt::stack_trace(); }
......
use alloc::allocator::{Alloc, Layout};
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::heap::Heap;
use alloc::{BTreeMap, Vec};
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::{intrinsics, mem, str};
use core::ops::DerefMut;
use spin::Mutex;
......@@ -113,7 +112,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
arch = context.arch.clone();
if let Some(ref fx) = context.kfx {
let mut new_fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap().as_ptr() as *mut [u8; 512]) };
let mut new_fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) {
*new_b = *b;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment