Verified Commit 92ca1d73 authored by Tom Almeida's avatar Tom Almeida
Browse files

Run rustfmt on all files

parent c90145ab
......@@ -3,8 +3,8 @@
extern crate ralloc;
extern crate test;
use std::thread;
use std::sync::mpsc;
use std::thread;
#[bench]
fn bench_mpsc(b: &mut test::Bencher) {
......
......@@ -5,7 +5,5 @@ extern crate test;
#[bench]
fn bench_sbrk(b: &mut test::Bencher) {
b.iter(|| {
ralloc::sbrk(200).unwrap()
});
b.iter(|| ralloc::sbrk(200).unwrap());
}
......@@ -8,7 +8,9 @@ fn bench_vec(b: &mut test::Bencher) {
b.iter(|| {
let mut stuff = Vec::with_capacity(10);
for i in 0..10000 { stuff.push(i) }
for i in 0..10000 {
stuff.push(i)
}
stuff.reserve(100000);
......
wrap_comments = true
max_width = 80
error_on_line_overflow = true
\ No newline at end of file
......@@ -6,22 +6,25 @@ use prelude::*;
use core::{mem, ops};
use bookkeeper::{self, Allocator, Bookkeeper};
use {brk, sync};
use bookkeeper::{self, Bookkeeper, Allocator};
use shim::config;
#[cfg(feature = "tls")]
use tls;
/// Alias for the wrapper type of the thread-local variable holding the local allocator.
/// Alias for the wrapper type of the thread-local variable holding the local
/// allocator.
#[cfg(feature = "tls")]
type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
type ThreadLocalAllocator =
MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
/// The global default allocator.
// TODO: Remove these filthy function pointers.
static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
static GLOBAL_ALLOCATOR: sync::Mutex<
LazyInit<fn() -> GlobalAllocator, GlobalAllocator>,
> = sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
#[cfg(feature = "tls")]
tls! {
/// The thread-local allocator.
......@@ -30,18 +33,19 @@ tls! {
/// Temporarily get the allocator.
///
/// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff:
/// This is simply to avoid repeating ourself, so we let this take care of the
/// hairy stuff:
///
/// 1. Initialize the allocator if needed.
/// 2. If the allocator is not yet initialized, fallback to the global allocator.
/// 3. Unlock/move temporarily out of reference.
/// 2. If the allocator is not yet initialized, fallback to the global
/// allocator. 3. Unlock/move temporarily out of reference.
///
/// This is a macro due to the lack of generic closure, which makes it impossible to have one
/// closure for both cases (global and local).
// TODO: Instead of falling back to the global allocator, the thread dtor should be set such that
// it run after the TLS keys that might be declared.
/// This is a macro due to the lack of generic closure, which makes it
/// impossible to have one closure for both cases (global and local).
// TODO: Instead of falling back to the global allocator, the thread dtor
// should be set such that it run after the TLS keys that might be declared.
macro_rules! get_allocator {
(|$v:ident| $b:expr) => {{
(| $v:ident | $b:expr) => {{
// Get the thread allocator, if TLS is enabled
#[cfg(feature = "tls")]
{
......@@ -58,9 +62,12 @@ macro_rules! get_allocator {
res
} else {
// The local allocator seems to have been deinitialized, for this reason we fallback to
// the global allocator.
log!(WARNING, "Accessing the allocator after deinitialization of the local allocator.");
// The local allocator seems to have been deinitialized, for this reason we
// fallback to the global allocator.
log!(
WARNING,
"Accessing the allocator after deinitialization of the local allocator."
);
// Lock the global allocator.
let mut guard = GLOBAL_ALLOCATOR.lock();
......@@ -82,7 +89,7 @@ macro_rules! get_allocator {
let $v = guard.get();
$b
}
}}
}};
}
/// Derives `Deref` and `DerefMut` to the `inner` field.
......@@ -108,9 +115,9 @@ macro_rules! derive_deref {
/// Global SBRK-based allocator.
///
/// This will extend the data segment whenever new memory is needed. Since this includes leaving
/// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
/// local).
/// This will extend the data segment whenever new memory is needed. Since this
/// includes leaving userspace, this shouldn't be used when other allocators
/// are available (i.e. the bookkeeper is local).
struct GlobalAllocator {
// The inner bookkeeper.
inner: Bookkeeper,
......@@ -123,8 +130,10 @@ impl GlobalAllocator {
log!(NOTE, "Initializing the global allocator.");
// The initial acquired segment.
let (aligner, initial_segment, excessive) =
brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
let (aligner, initial_segment, excessive) = brk::lock().canonical_brk(
4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(),
mem::align_of::<Block>(),
);
// Initialize the new allocator.
let mut res = GlobalAllocator {
......@@ -149,11 +158,13 @@ impl Allocator for GlobalAllocator {
#[inline]
fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
// Obtain what you need.
let (alignment_block, res, excessive) = brk::lock().canonical_brk(size, align);
let (alignment_block, res, excessive) =
brk::lock().canonical_brk(size, align);
// Add it to the list. This will not change the order, since the pointer is higher than all
// the previous blocks (BRK extends the data segment). Although, it is worth noting that
// the stack is higher than the program break.
// Add it to the list. This will not change the order, since the
// pointer is higher than all the previous blocks (BRK extends
// the data segment). Although, it is worth noting that the
// stack is higher than the program break.
self.push(alignment_block);
self.push(excessive);
......@@ -165,7 +176,9 @@ impl Allocator for GlobalAllocator {
// memtrim the fack outta 'em.
// Pop the last block.
let block = self.pop().expect("The byte count on the global allocator is invalid.");
let block = self
.pop()
.expect("The byte count on the global allocator is invalid.");
// Check if the memtrim is worth it.
if block.size() >= config::OS_MEMTRIM_WORTHY {
......@@ -179,9 +192,10 @@ impl Allocator for GlobalAllocator {
self.push(block);
}
// Note that this block is the only block next to the program break, due to the
// segments being as long as possible. For that reason, repeating to push and
// release would fail.
// Note that this block is the only block next to the program
// break, due to the segments being as long as
// possible. For that reason, repeating to push and
// release would fail.
} else {
/// Logging...
log!(WARNING, "Memtrimming for the global allocator failed.");
......@@ -196,7 +210,8 @@ impl Allocator for GlobalAllocator {
/// A local allocator.
///
/// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
/// This acquires memory from the upstream (global) allocator, which is
/// protected by a `Mutex`.
#[cfg(feature = "tls")]
pub struct LocalAllocator {
// The inner bookkeeper.
......@@ -211,36 +226,45 @@ impl LocalAllocator {
/// The destructor of the local allocator.
///
/// This will simply free everything to the global allocator.
extern fn dtor(alloc: &ThreadLocalAllocator) {
extern "C" fn dtor(alloc: &ThreadLocalAllocator) {
/// Logging...
log!(NOTE, "Deinitializing and freeing the local allocator.");
// This is important! The thread destructors guarantee no other, and thus one could use the
// allocator _after_ this destructor have been finished. In fact, this is a real problem,
// and happens when using `Arc` and terminating the main thread, for this reason we place
// `None` as a permanent marker indicating that the allocator is deinitialized. After such
// a state is in place, all allocation calls will be redirected to the global allocator,
// which is of course still usable at this moment.
let alloc = alloc.replace(None).expect("Thread-local allocator is already freed.");
// This is important! The thread destructors guarantee no other,
// and thus one could use the allocator _after_ this
// destructor have been finished. In fact, this is a
// real problem, and happens when using `Arc` and
// terminating the main thread, for this reason we place
// `None` as a permanent marker indicating that the allocator is
// deinitialized. After such a state is in place, all
// allocation calls will be redirected to the global
// allocator, which is of course still usable at this
// moment.
let alloc = alloc
.replace(None)
.expect("Thread-local allocator is already freed.");
// Lock the global allocator.
let mut global_alloc = GLOBAL_ALLOCATOR.lock();
let global_alloc = global_alloc.get();
// TODO: we know this is sorted, so we could abuse that fact to faster insertion in the
// global allocator.
// TODO: we know this is sorted, so we could abuse that fact to
// faster insertion in the global allocator.
alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
alloc
.into_inner()
.inner
.for_each(move |block| global_alloc.free(block));
}
/// Logging...
log!(NOTE, "Initializing the local allocator.");
// The initial acquired segment.
let initial_segment = GLOBAL_ALLOCATOR
.lock()
.get()
.alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
let initial_segment = GLOBAL_ALLOCATOR.lock().get().alloc(
4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(),
mem::align_of::<Block>(),
);
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
......@@ -262,17 +286,19 @@ derive_deref!(LocalAllocator, Bookkeeper);
impl Allocator for LocalAllocator {
#[inline]
fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
// Get the block from the global allocator. Please note that we cannot canonicalize `size`,
// due to freeing excessive blocks would change the order.
// Get the block from the global allocator. Please note that we cannot
// canonicalize `size`, due to freeing excessive blocks would change
// the order.
GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
}
#[inline]
fn on_new_memory(&mut self) {
// The idea is to free memory to the global allocator to unify small stubs and avoid
// fragmentation and thread accumulation.
// The idea is to free memory to the global allocator to unify small
// stubs and avoid fragmentation and thread accumulation.
if self.total_bytes() < config::FRAGMENTATION_SCALE * self.len()
|| self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT {
|| self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT
{
// Log stuff.
log!(NOTE, "Memtrimming the local allocator.");
......@@ -285,7 +311,9 @@ impl Allocator for LocalAllocator {
global_alloc.free(block);
// Memtrim 'till we won't memtrim anymore.
if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { break; }
if self.total_bytes() < config::LOCAL_MEMTRIM_STOP {
break;
}
}
}
}
......@@ -298,20 +326,25 @@ impl Allocator for LocalAllocator {
/// The OOM handler handles out-of-memory conditions.
#[inline]
pub fn alloc(size: usize, align: usize) -> *mut u8 {
log!(CALL, "Allocating buffer of size {} (align {}).", size, align);
log!(
CALL,
"Allocating buffer of size {} (align {}).",
size,
align
);
get_allocator!(|alloc| Pointer::from(alloc.alloc(size, align)).get())
}
/// Free a buffer.
///
/// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
/// that it is not used after the free.
/// Note that this do not have to be a buffer allocated through ralloc. The
/// only requirement is that it is not used after the free.
///
/// # Important!
///
/// You should only allocate buffers allocated through `ralloc`. Anything else is considered
/// invalid.
/// You should only allocate buffers allocated through `ralloc`. Anything else
/// is considered invalid.
///
/// # Errors
///
......@@ -319,26 +352,28 @@ pub fn alloc(size: usize, align: usize) -> *mut u8 {
///
/// # Safety
///
/// Rust assume that the allocation symbols returns correct values. For this reason, freeing
/// invalid pointers might introduce memory unsafety.
/// Rust assume that the allocation symbols returns correct values. For this
/// reason, freeing invalid pointers might introduce memory unsafety.
///
/// Secondly, freeing an used buffer can introduce use-after-free.
#[inline]
pub unsafe fn free(ptr: *mut u8, size: usize) {
log!(CALL, "Freeing buffer of size {}.", size);
get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
get_allocator!(
|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size))
)
}
/// Reallocate memory.
///
/// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
/// returned pointer with size `size`.
/// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer
/// starting at the returned pointer with size `size`.
///
/// # Important!
///
/// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
/// invalid.
/// You should only reallocate buffers allocated through `ralloc`. Anything
/// else is considered invalid.
///
/// # Errors
///
......@@ -346,39 +381,61 @@ pub unsafe fn free(ptr: *mut u8, size: usize) {
///
/// # Safety
///
/// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
/// this is marked unsafe.
/// Due to being able to potentially memcpy an arbitrary buffer, as well as
/// shrinking a buffer, this is marked unsafe.
#[inline]
pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
log!(CALL, "Reallocating buffer of size {} to new size {}.", old_size, size);
get_allocator!(|alloc| {
Pointer::from(alloc.realloc(
Block::from_raw_parts(Pointer::new(ptr), old_size),
size,
align
)).get()
})
pub unsafe fn realloc(
ptr: *mut u8,
old_size: usize,
size: usize,
align: usize,
) -> *mut u8 {
log!(
CALL,
"Reallocating buffer of size {} to new size {}.",
old_size,
size
);
get_allocator!(|alloc| Pointer::from(alloc.realloc(
Block::from_raw_parts(Pointer::new(ptr), old_size),
size,
align
)).get())
}
/// Try to reallocate the buffer _inplace_.
///
/// In case of success, return the new buffer's size. On failure, return the old size.
/// In case of success, return the new buffer's size. On failure, return the
/// old size.
///
/// This can be used to shrink (truncate) a buffer as well.
///
/// # Safety
///
/// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
/// Due to being able to shrink (and thus free) the buffer, this is marked
/// unsafe.
#[inline]
pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
log!(CALL, "Inplace reallocating buffer of size {} to new size {}.", old_size, size);
pub unsafe fn realloc_inplace(
ptr: *mut u8,
old_size: usize,
size: usize,
) -> Result<(), ()> {
log!(
CALL,
"Inplace reallocating buffer of size {} to new size {}.",
old_size,
size
);
get_allocator!(|alloc| {
if alloc.realloc_inplace(
Block::from_raw_parts(Pointer::new(ptr), old_size),
size
).is_ok() {
if alloc
.realloc_inplace(
Block::from_raw_parts(Pointer::new(ptr), old_size),
size,
)
.is_ok()
{
Ok(())
} else {
Err(())
......
//! Memory blocks.
//!
//! Blocks are the main unit for the memory bookkeeping. A block is a simple construct with a
//! `Pointer` pointer and a size. Occupied (non-free) blocks are represented by a zero-sized block.
//! Blocks are the main unit for the memory bookkeeping. A block is a simple
//! construct with a `Pointer` pointer and a size. Occupied (non-free) blocks
//! are represented by a zero-sized block.
// TODO: Check the allow(cast_possible_wrap)s again.
use prelude::*;
use core::{ptr, cmp, mem, fmt};
use core::{cmp, fmt, mem, ptr};
/// A contiguous memory block.
///
/// This provides a number of guarantees,
///
/// 1. The buffer is valid for the block's lifetime, but not necessarily initialized.
/// 2. The Block "owns" the inner data.
/// 3. There is no interior mutability. Mutation requires either mutable access or ownership over
/// the block.
/// 4. The buffer is not aliased. That is, it do not overlap with other blocks or is aliased in any
/// way.
/// 1. The buffer is valid for the block's lifetime, but not necessarily
/// initialized. 2. The Block "owns" the inner data.
/// 3. There is no interior mutability. Mutation requires either mutable access
/// or ownership over the block.
/// 4. The buffer is not aliased. That is, it do not overlap with other blocks
/// or is aliased in any way.
///
/// All this is enforced through the type system. These invariants can only be broken through
/// unsafe code.
/// All this is enforced through the type system. These invariants can only be
/// broken through unsafe code.
///
/// Accessing it through an immutable reference does not break these guarantees. That is, you are
/// not able to read/mutate without acquiring a _mutable_ reference.
/// Accessing it through an immutable reference does not break these
/// guarantees. That is, you are not able to read/mutate without acquiring a
/// _mutable_ reference.
#[must_use]
pub struct Block {
/// The size of this block, in bytes.
......@@ -67,8 +69,8 @@ impl Block {
ptr: unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// By the invariants of this type (the end is addressable), this conversion isn't
// overflowing.
// By the invariants of this type (the end is addressable),
// this conversion isn't overflowing.
self.ptr.clone().offset(self.size as isize)
},
}
......@@ -76,23 +78,26 @@ impl Block {
/// Merge this block with a block to the right.
///
/// This will simply extend the block, adding the size of the block, and then set the size to
/// zero. The return value is `Ok(())` on success, and `Err(())` on failure (e.g., the blocks
/// are not adjacent).
/// This will simply extend the block, adding the size of the block, and
/// then set the size to zero. The return value is `Ok(())` on success,
/// and `Err(())` on failure (e.g., the blocks are not adjacent).
///
/// If you merge with a zero sized block, it will succeed, even if they are not adjacent.
/// If you merge with a zero sized block, it will succeed, even if they are
/// not adjacent.
#[inline]
pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> {
if block.is_empty() {
Ok(())
} else if self.left_to(block) {
// Since the end of `block` is bounded by the address space, adding them cannot
// overflow.
// Since the end of `block` is bounded by the address space, adding
// them cannot overflow.
self.size += block.pop().size;
// We pop it to make sure it isn't aliased.
Ok(())
} else { Err(()) }
} else {
Err(())
}
}
/// Is this block empty/free?
......@@ -128,7 +133,11 @@ impl Block {
// LAST AUDIT: 2016-08-21 (Ticki).
// From the invariants of `Block`, this copy is well-defined.
ptr::copy_nonoverlapping(self.ptr.get(), block.ptr.get(), self.size);
ptr::copy_nonoverlapping(
self.ptr.get(),
block.ptr.get(),
self.size,
);
}
}
......@@ -142,8 +151,8 @@ impl Block {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Since the memory of the block is inaccessible (read-wise), zeroing it is fully
// safe.
// Since the memory of the block is inaccessible (read-wise),
// zeroing it is fully safe.
intrinsics::volatile_set_memory(self.ptr.get(), 0, self.size);
}
}
......@@ -160,7 +169,8 @@ impl Block {
/// Is this block placed left to the given other block?
#[inline]
pub fn left_to(&self, to: &Block) -> bool {
// This won't overflow due to the end being bounded by the address space.
// This won't overflow due to the end being bounded by the address
// space.
self.size + self.ptr.get() as usize == to.ptr.get() as usize
}
......@@ -171,7 +181,12 @@ impl Block {
/// Panics if `pos` is out of bound.
#[inline]
pub fn split(self, pos: usize) -> (Block, Block) {
assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size);
assert!(
pos <= self.size,
"Split {} out of bound (size is {})!",
pos,
self.size
);
(
Block {
......@@ -183,11 +198,12 @@ impl Block {
ptr: unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// This won't overflow due to the assertion above, ensuring that it is bounded
// by the address space. See the `split_at_mut` source from libcore.
// This won't overflow due to the assertion above, ensuring
// that it is bounded by the address
// space. See the `split_at_mut` source from libcore.
self.ptr.offset(pos as isize)
},
}
},
)
}
......@@ -199,15 +215,15 @@ impl Block {
// Logging.
log!(INTERNAL, "Padding {:?} to align {}", self, align);
// TODO: This functions suffers from external fragmentation. Leaving bigger segments might
// increase performance.
// TODO: This functions suffers from external fragmentation. Leaving
// bigger segments might increase performance.
// Calculate the aligner, which defines the smallest size required as precursor to align
// the block to `align`.
// Calculate the aligner, which defines the smallest size required as
// precursor to align the block to `align`.
let aligner = (align - self.ptr.get() as usize % align) % align;
// ^^^^^^^^
// To avoid wasting space on the case where the block is already aligned, we calculate it
// modulo `align`.
// To avoid wasting space on the case where the block is already
// aligned, we calculate it modulo `align`.
// Bound check.
if aligner < self.size {
......@@ -224,11 +240,12 @@ impl Block {
ptr: unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The aligner is bounded by the size, which itself is bounded by the
// address space. Therefore, this conversion cannot overflow.
// The aligner is bounded by the size, which itself is
// bounded by the address space.
// Therefore, this conversion cannot overflow.
old.ptr.offset(aligner as isize)
},
}
},
))
} else {
// Logging.
......@@ -240,8 +257,8 @@ impl Block {
/// Mark this block free to the debugger.
///
/// The debugger might do things like memleak and use-after-free checks. This methods informs
/// the debugger that this block is freed.
/// The debugger might do things like memleak and use-after-free checks.
/// This methods informs the debugger that this block is freed.
#[inline]