Skip to content
Snippets Groups Projects
Commit a9e2bb08 authored by Jeremy Soller's avatar Jeremy Soller
Browse files
parents 64676e79 7721c551
No related branches found
No related tags found
No related merge requests found
[package]
name = "slab_allocator"
version = "0.3.0"
version = "0.3.1"
authors = ["Robert Węcławski <r.weclawski@gmail.com>"]
license = "MIT"
......
#![feature(alloc, allocator_api)]
#![feature(attr_literals)]
#![feature(const_fn)]
#![feature(ptr_internals)]
#![feature(unique)]
#![feature(pointer_methods)]
#![no_std]
extern crate alloc;
......@@ -15,7 +14,6 @@ mod slab;
use core::ops::Deref;
use slab::Slab;
use alloc::allocator::{Alloc, AllocErr, Layout};
......@@ -84,7 +82,7 @@ impl Heap {
}
/// Adds memory to the heap. The start address must be valid
/// and the memory in the `[heap_start_addr, heap_start_addr + heap_size)` range must not be used for
/// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for
/// anything else.
/// In case of linked list allocator the memory can only be extended.
/// This function is unsafe because it can cause undefined behavior if the
......@@ -107,24 +105,16 @@ impl Heap {
/// This function finds the slab of lowest size which can still accomodate the given chunk.
/// The runtime is in `O(1)` for chunks of size <= 4096, and `O(n)` when chunk size is > 4096,
pub fn allocate(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if layout.size() > 4096 {
return self.linked_list_allocator.allocate_first_fit(layout)
} else if layout.size() <= 64 && layout.align() <= 64 {
return self.slab_64_bytes.allocate(layout)
} else if layout.size() <= 128 && layout.align() <= 128 {
return self.slab_128_bytes.allocate(layout)
} else if layout.size() <= 256 && layout.align() <= 256 {
return self.slab_256_bytes.allocate(layout)
} else if layout.size() <= 512 && layout.align() <= 512 {
return self.slab_512_bytes.allocate(layout)
} else if layout.size() <= 1024 && layout.align() <= 1024 {
return self.slab_1024_bytes.allocate(layout)
} else if layout.size() <= 2048 && layout.align() <= 2048 {
return self.slab_2048_bytes.allocate(layout)
} else if layout.size() <= 4096 && layout.align() <= 4096 {
return self.slab_4096_bytes.allocate(layout)
match Heap::layout_to_allocator(&layout) {
HeapAllocator::Slab64Bytes => self.slab_64_bytes.allocate(layout),
HeapAllocator::Slab128Bytes => self.slab_128_bytes.allocate(layout),
HeapAllocator::Slab256Bytes => self.slab_256_bytes.allocate(layout),
HeapAllocator::Slab512Bytes => self.slab_512_bytes.allocate(layout),
HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.allocate(layout),
HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.allocate(layout),
HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.allocate(layout),
HeapAllocator::LinkedListAllocator => self.linked_list_allocator.allocate_first_fit(layout),
}
Err(AllocErr::Exhausted { request: layout })
}
/// Frees the given allocation. `ptr` must be a pointer returned
......@@ -135,46 +125,51 @@ impl Heap {
/// with `ptr` address to the list of free blocks.
/// This operation is in `O(1)` for blocks <= 4096 bytes and `O(n)` for blocks > 4096 bytes.
pub unsafe fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
match Heap::layout_to_allocator(&layout) {
HeapAllocator::Slab64Bytes => self.slab_64_bytes.deallocate(ptr),
HeapAllocator::Slab128Bytes => self.slab_128_bytes.deallocate(ptr),
HeapAllocator::Slab256Bytes => self.slab_256_bytes.deallocate(ptr),
HeapAllocator::Slab512Bytes => self.slab_512_bytes.deallocate(ptr),
HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.deallocate(ptr),
HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.deallocate(ptr),
HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.deallocate(ptr),
HeapAllocator::LinkedListAllocator => self.linked_list_allocator.deallocate(ptr, layout),
}
}
/// Returns bounds on the guaranteed usable size of a successful
/// allocation created with the specified `layout`.
pub fn usable_size(&self, layout: &Layout) -> (usize, usize) {
match Heap::layout_to_allocator(&layout) {
HeapAllocator::Slab64Bytes => (layout.size(), 64),
HeapAllocator::Slab128Bytes => (layout.size(), 128),
HeapAllocator::Slab256Bytes => (layout.size(), 256),
HeapAllocator::Slab512Bytes => (layout.size(), 512),
HeapAllocator::Slab1024Bytes => (layout.size(), 1024),
HeapAllocator::Slab2048Bytes => (layout.size(), 2048),
HeapAllocator::Slab4096Bytes => (layout.size(), 4096),
HeapAllocator::LinkedListAllocator => (layout.size(), layout.size()),
}
}
///Finds allocator to use based on layout size and alignment
pub fn layout_to_allocator(layout: &Layout) -> HeapAllocator {
if layout.size() > 4096 {
self.linked_list_allocator.deallocate(ptr, layout)
HeapAllocator::LinkedListAllocator
} else if layout.size() <= 64 && layout.align() <= 64 {
self.slab_64_bytes.deallocate(ptr)
HeapAllocator::Slab64Bytes
} else if layout.size() <= 128 && layout.align() <= 128 {
self.slab_128_bytes.deallocate(ptr)
HeapAllocator::Slab128Bytes
} else if layout.size() <= 256 && layout.align() <= 256 {
self.slab_256_bytes.deallocate(ptr)
HeapAllocator::Slab256Bytes
} else if layout.size() <= 512 && layout.align() <= 512 {
self.slab_512_bytes.deallocate(ptr)
HeapAllocator::Slab512Bytes
} else if layout.size() <= 1024 && layout.align() <= 1024 {
self.slab_1024_bytes.deallocate(ptr)
HeapAllocator::Slab1024Bytes
} else if layout.size() <= 2048 && layout.align() <= 2048 {
self.slab_2048_bytes.deallocate(ptr)
} else if layout.size() <= 4096 && layout.align() <= 4096 {
self.slab_4096_bytes.deallocate(ptr)
}
}
/// Returns bounds on the guaranteed usable size of a successful
/// allocation created with the specified `layout`.
pub fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if layout.size() <= 32 {
(layout.size(), 32)
} else if layout.size() <= 64 {
(layout.size(), 64)
} else if layout.size() <= 128 {
(layout.size(), 128)
} else if layout.size() <= 256 {
(layout.size(), 256)
} else if layout.size() <= 512 {
(layout.size(), 512)
} else if layout.size() <= 1024 {
(layout.size(), 1024)
} else if layout.size() <= 2048 {
(layout.size(), 2048)
} else if layout.size() <= 4096 {
(layout.size(), 4096)
HeapAllocator::Slab2048Bytes
} else {
(layout.size(), layout.size())
HeapAllocator::Slab4096Bytes
}
}
......
use core::ptr::Unique;
use alloc::allocator::{AllocErr, Layout};
pub struct Slab {
......@@ -29,29 +28,29 @@ impl Slab {
pub fn allocate(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
match self.free_block_list.pop() {
Some(block) => Ok(block.as_ptr() as *mut u8),
Some(block) => Ok(block.addr() as *mut u8),
None => Err(AllocErr::Exhausted { request: layout }),
}
}
pub fn deallocate(&mut self, ptr: *mut u8) {
self.free_block_list.push(unsafe { Unique::new_unchecked(ptr as *mut FreeBlock) });
let ptr = ptr as *mut FreeBlock;
unsafe {self.free_block_list.push(&mut *ptr);}
}
}
struct FreeBlockList {
len: usize,
head: Option<Unique<FreeBlock>>,
head: Option<&'static mut FreeBlock>,
}
impl FreeBlockList {
unsafe fn new(start_addr: usize, block_size: usize, num_of_blocks: usize) -> FreeBlockList {
let mut new_list = FreeBlockList::new_empty();
for i in (0..num_of_blocks).rev() {
new_list.push(Unique::new_unchecked(
(start_addr + i * block_size) as *mut FreeBlock,
));
let new_block = (start_addr + i * block_size) as *mut FreeBlock;
new_list.push(&mut *new_block);
}
new_list
}
......@@ -67,20 +66,18 @@ impl FreeBlockList {
self.len
}
fn pop(&mut self) -> Option<Unique<FreeBlock>> {
self.head.take().map(|mut node| unsafe {
self.head = node.as_mut().next;
fn pop(&mut self) -> Option<&'static mut FreeBlock> {
self.head.take().map(|node| {
self.head = node.next.take();
self.len -= 1;
node
})
}
fn push(&mut self, mut free_block: Unique<FreeBlock>) {
unsafe {
free_block.as_mut().next = self.head.take();
self.len += 1;
self.head = Some(free_block);
}
fn push(&mut self, free_block: &'static mut FreeBlock) {
free_block.next = self.head.take();
self.len += 1;
self.head = Some(free_block);
}
fn is_empty(&self) -> bool {
......@@ -96,8 +93,7 @@ impl Drop for FreeBlockList {
}
struct FreeBlock {
next: Option<Unique<FreeBlock>>,
size: usize,
next: Option<&'static mut FreeBlock>,
}
impl FreeBlock {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment