From 3e06a372156f05caf106d350073c2d8292b94b20 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Robert=20W=C4=99c=C5=82awski?= <r.weclawski@gmail.com>
Date: Wed, 17 Jan 2018 23:31:27 +0100
Subject: [PATCH] Use slab allocator for kernel heap

---
 Cargo.toml                                    |  2 +-
 alloc_kernel/Cargo.toml                       |  8 -----
 src/arch/x86_64/start.rs                      |  4 +--
 src/lib.rs                                    |  4 +--
 src/memory/mod.rs                             |  1 +
 alloc_kernel/src/lib.rs => src/memory/slab.rs | 30 ++++++++++---------
 6 files changed, 22 insertions(+), 27 deletions(-)
 delete mode 100644 alloc_kernel/Cargo.toml
 rename alloc_kernel/src/lib.rs => src/memory/slab.rs (60%)

diff --git a/Cargo.toml b/Cargo.toml
index 5ac75ded..a1d8cfbd 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -9,9 +9,9 @@ path = "src/lib.rs"
 crate-type = ["staticlib"]
 
 [dependencies]
-alloc_kernel = { path = "alloc_kernel" }
 bitflags = "1"
 clippy = { version = "*", optional = true }
+slab_allocator = "0.3.0"
 spin = "0.4"
 raw-cpuid = "3.0"
 redox_syscall = { path = "syscall" }
diff --git a/alloc_kernel/Cargo.toml b/alloc_kernel/Cargo.toml
deleted file mode 100644
index bbd1212c..00000000
--- a/alloc_kernel/Cargo.toml
+++ /dev/null
@@ -1,8 +0,0 @@
-[package]
-authors = ["Philipp Oppermann <dev@phil-opp.com>"]
-name = "alloc_kernel"
-version = "0.1.0"
-
-[dependencies]
-linked_list_allocator = { git = "https://github.com/redox-os/linked-list-allocator.git" }
-spin = "*"
diff --git a/src/arch/x86_64/start.rs b/src/arch/x86_64/start.rs
index ddc7b61d..41991c77 100644
--- a/src/arch/x86_64/start.rs
+++ b/src/arch/x86_64/start.rs
@@ -7,13 +7,13 @@ use core::slice;
 use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
 
 use acpi;
-use allocator;
 use arch::x86_64::pti;
 use device;
 use gdt;
 use idt;
 use interrupt;
 use memory;
+use memory::slab as allocator;
 use paging::{self, Page, VirtualAddress};
 use paging::entry::EntryFlags;
 use paging::mapper::MapperFlushAll;
@@ -113,7 +113,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
             flush_all.flush(&mut active_table);
 
             // Init the allocator
-            allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
+            allocator::init_heap(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
         }
 
         // Initialize devices
diff --git a/src/lib.rs b/src/lib.rs
index 69727a62..34629d5b 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -32,7 +32,6 @@
 #![feature(const_size_of)]
 #![no_std]
 
-extern crate alloc_kernel as allocator;
 pub extern crate x86;
 
 #[macro_use]
@@ -42,6 +41,7 @@ extern crate alloc;
 extern crate bitflags;
 extern crate goblin;
 extern crate spin;
+extern crate slab_allocator;
 
 use alloc::arc::Arc;
 use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
@@ -103,7 +103,7 @@ pub mod time;
 pub mod tests;
 
 #[global_allocator]
-static ALLOCATOR: allocator::Allocator = allocator::Allocator;
+static ALLOCATOR: memory::slab::Allocator = memory::slab::Allocator;
 
 /// A unique number that identifies the current CPU - used for scheduling
 #[thread_local]
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index 6ab6f0fb..39cdf5db 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -10,6 +10,7 @@ use spin::Mutex;
 
 pub mod bump;
 pub mod recycle;
+pub mod slab;
 
 /// The current memory map. It's size is maxed out to 512 entries, due to it being
 /// from 0x500 to 0x5000 (800 is the absolute total)
diff --git a/alloc_kernel/src/lib.rs b/src/memory/slab.rs
similarity index 60%
rename from alloc_kernel/src/lib.rs
rename to src/memory/slab.rs
index 9caeb286..286e146d 100644
--- a/alloc_kernel/src/lib.rs
+++ b/src/memory/slab.rs
@@ -1,20 +1,10 @@
-#![deny(warnings)]
-#![feature(alloc)]
-#![feature(allocator_api)]
-#![feature(const_fn)]
-#![no_std]
-
-extern crate alloc;
-extern crate spin;
-extern crate linked_list_allocator;
-
 use alloc::heap::{Alloc, AllocErr, Layout};
 use spin::Mutex;
-use linked_list_allocator::Heap;
+use slab_allocator::Heap;
 
 static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
 
-pub unsafe fn init(offset: usize, size: usize) {
+pub unsafe fn init_heap(offset: usize, size: usize) {
     *HEAP.lock() = Some(Heap::new(offset, size));
 }
 
@@ -23,7 +13,7 @@ pub struct Allocator;
 unsafe impl<'a> Alloc for &'a Allocator {
     unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
         if let Some(ref mut heap) = *HEAP.lock() {
-            heap.allocate_first_fit(layout)
+            heap.allocate(layout)
         } else {
             panic!("__rust_allocate: heap not initialized");
         }
@@ -36,4 +26,16 @@ unsafe impl<'a> Alloc for &'a Allocator {
             panic!("__rust_deallocate: heap not initialized");
         }
     }
-}
+
+    fn oom(&mut self, error: AllocErr) -> ! {
+        panic!("Out of memory: {:?}", error);
+    }
+
+    fn usable_size(&self, layout: &Layout) -> (usize, usize) {
+        if let Some(ref mut heap) = *HEAP.lock() {
+            heap.usable_size(layout)
+        } else {
+            panic!("__rust_usable_size: heap not initialized");
+        }
+    }
+}
\ No newline at end of file
-- 
GitLab