diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..f39aaadd3b685a9f67dc0ad6531d7e4dd4ba85b9 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,25 @@ +# This file is a template, and might need editing before it works on your project. +# Official language image. Look for the different tagged releases at: +# https://hub.docker.com/r/library/rust/tags/ +image: "rust:latest" + +# Optional: Pick zero or more services to be used on all builds. +# Only needed when using a docker container to run your tests in. +# Check out: http://docs.gitlab.com/ce/ci/docker/using_docker_images.html#what-is-a-service +#services: +# - mysql:latest +# - redis:latest +# - postgres:latest + +# Optional: Install a C compiler, cmake and git into the container. +# You will often need this when you (or any of your dependencies) depends on C code. +#before_script: +#- apt-get update -yqq +#- apt-get install -yqq --no-install-recommends build-essential + +# Use cargo to test the project +test:cargo: + script: + - rustup toolchain install nightly + - rustc --version && cargo --version # Print version info for debugging + - cargo +nightly test --all --verbose diff --git a/Cargo.toml b/Cargo.toml index 452db24108763f45d65c398ed1f5ff8ac9752fb4..ff0fb7829f41ad9bec923d8a4d87d86fb49971a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ authors = ["ticki <ticki@users.noreply.github.com>"] # URLs and paths description = "An efficient alternative platform-agnostic allocator." -repository = "https://gitlab.redox-os.org/redox-os/ralloc" +repository = "https://github.com/redox-os/ralloc" readme = "README.md" # Metadata diff --git a/benches/mpsc.rs b/benches/mpsc.rs index ec9bfb9b589a1d93b2bcaa367dde4ac7d9f5baa5..af01d5657ddae3d09b9f7a789a6a8a4562b3ea49 100644 --- a/benches/mpsc.rs +++ b/benches/mpsc.rs @@ -3,8 +3,8 @@ extern crate ralloc; extern crate test; -use std::thread; use std::sync::mpsc; +use std::thread; #[bench] fn bench_mpsc(b: &mut test::Bencher) { diff --git a/benches/sbrk.rs b/benches/sbrk.rs index 4242a7c48778ead7d1d973814ecfc0e831af7073..69aa6373cbee0b668c6b2b87fa4f8f6de6c10a73 100644 --- a/benches/sbrk.rs +++ b/benches/sbrk.rs @@ -5,7 +5,5 @@ extern crate test; #[bench] fn bench_sbrk(b: &mut test::Bencher) { - b.iter(|| { - ralloc::sbrk(200).unwrap() - }); + b.iter(|| ralloc::sbrk(200).unwrap()); } diff --git a/benches/vec.rs b/benches/vec.rs index d6a2d779be7d5ff0178f56f8ba6a326b38c37e6c..524030aa546bee97a3d77564f88f8fba638dd6d1 100644 --- a/benches/vec.rs +++ b/benches/vec.rs @@ -8,7 +8,9 @@ fn bench_vec(b: &mut test::Bencher) { b.iter(|| { let mut stuff = Vec::with_capacity(10); - for i in 0..10000 { stuff.push(i) } + for i in 0..10000 { + stuff.push(i) + } stuff.reserve(100000); diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000000000000000000000000000000000000..ad743bdc5732ed555ed1c9575a1c9eeb45c5cf0a --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +wrap_comments = true +max_width = 80 +error_on_line_overflow = true \ No newline at end of file diff --git a/src/allocator.rs b/src/allocator.rs index 68a2afc92a4fa3508af8d74b4f0a55caf5ae7a6c..2d9ad7011795ba3741e6d166e1ed80687a679e46 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -6,22 +6,25 @@ use prelude::*; use core::{mem, ops}; +use bookkeeper::{self, Allocator, Bookkeeper}; use {brk, sync}; -use bookkeeper::{self, Bookkeeper, Allocator}; use shim::config; #[cfg(feature = "tls")] use tls; -/// Alias for the wrapper type of the thread-local variable holding the local allocator. +/// Alias for the wrapper type of the thread-local variable holding the local +/// allocator. #[cfg(feature = "tls")] -type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>; +type ThreadLocalAllocator = + MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>; /// The global default allocator. // TODO: Remove these filthy function pointers. -static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> = - sync::Mutex::new(LazyInit::new(GlobalAllocator::init)); +static GLOBAL_ALLOCATOR: sync::Mutex< + LazyInit<fn() -> GlobalAllocator, GlobalAllocator>, +> = sync::Mutex::new(LazyInit::new(GlobalAllocator::init)); #[cfg(feature = "tls")] tls! { /// The thread-local allocator. @@ -30,18 +33,19 @@ tls! { /// Temporarily get the allocator. /// -/// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff: +/// This is simply to avoid repeating ourself, so we let this take care of the +/// hairy stuff: /// /// 1. Initialize the allocator if needed. -/// 2. If the allocator is not yet initialized, fallback to the global allocator. -/// 3. Unlock/move temporarily out of reference. +/// 2. If the allocator is not yet initialized, fallback to the global +/// allocator. 3. Unlock/move temporarily out of reference. /// -/// This is a macro due to the lack of generic closure, which makes it impossible to have one -/// closure for both cases (global and local). -// TODO: Instead of falling back to the global allocator, the thread dtor should be set such that -// it run after the TLS keys that might be declared. +/// This is a macro due to the lack of generic closure, which makes it +/// impossible to have one closure for both cases (global and local). +// TODO: Instead of falling back to the global allocator, the thread dtor +// should be set such that it run after the TLS keys that might be declared. macro_rules! get_allocator { - (|$v:ident| $b:expr) => {{ + (| $v:ident | $b:expr) => {{ // Get the thread allocator, if TLS is enabled #[cfg(feature = "tls")] { @@ -58,9 +62,12 @@ macro_rules! get_allocator { res } else { - // The local allocator seems to have been deinitialized, for this reason we fallback to - // the global allocator. - log!(WARNING, "Accessing the allocator after deinitialization of the local allocator."); + // The local allocator seems to have been deinitialized, for this reason we + // fallback to the global allocator. + log!( + WARNING, + "Accessing the allocator after deinitialization of the local allocator." + ); // Lock the global allocator. let mut guard = GLOBAL_ALLOCATOR.lock(); @@ -82,7 +89,7 @@ macro_rules! get_allocator { let $v = guard.get(); $b } - }} + }}; } /// Derives `Deref` and `DerefMut` to the `inner` field. @@ -108,9 +115,9 @@ macro_rules! derive_deref { /// Global SBRK-based allocator. /// -/// This will extend the data segment whenever new memory is needed. Since this includes leaving -/// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is -/// local). +/// This will extend the data segment whenever new memory is needed. Since this +/// includes leaving userspace, this shouldn't be used when other allocators +/// are available (i.e. the bookkeeper is local). struct GlobalAllocator { // The inner bookkeeper. inner: Bookkeeper, @@ -123,8 +130,10 @@ impl GlobalAllocator { log!(NOTE, "Initializing the global allocator."); // The initial acquired segment. - let (aligner, initial_segment, excessive) = - brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>()); + let (aligner, initial_segment, excessive) = brk::lock().canonical_brk( + 8 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), + mem::align_of::<Block>(), + ); // Initialize the new allocator. let mut res = GlobalAllocator { @@ -149,11 +158,13 @@ impl Allocator for GlobalAllocator { #[inline] fn alloc_fresh(&mut self, size: usize, align: usize) -> Block { // Obtain what you need. - let (alignment_block, res, excessive) = brk::lock().canonical_brk(size, align); + let (alignment_block, res, excessive) = + brk::lock().canonical_brk(size, align); - // Add it to the list. This will not change the order, since the pointer is higher than all - // the previous blocks (BRK extends the data segment). Although, it is worth noting that - // the stack is higher than the program break. + // Add it to the list. This will not change the order, since the + // pointer is higher than all the previous blocks (BRK extends + // the data segment). Although, it is worth noting that the + // stack is higher than the program break. self.push(alignment_block); self.push(excessive); @@ -165,7 +176,9 @@ impl Allocator for GlobalAllocator { // memtrim the fack outta 'em. // Pop the last block. - let block = self.pop().expect("The byte count on the global allocator is invalid."); + let block = self + .pop() + .expect("The byte count on the global allocator is invalid."); // Check if the memtrim is worth it. if block.size() >= config::OS_MEMTRIM_WORTHY { @@ -179,9 +192,10 @@ impl Allocator for GlobalAllocator { self.push(block); } - // Note that this block is the only block next to the program break, due to the - // segments being as long as possible. For that reason, repeating to push and - // release would fail. + // Note that this block is the only block next to the program + // break, due to the segments being as long as + // possible. For that reason, repeating to push and + // release would fail. } else { /// Logging... log!(WARNING, "Memtrimming for the global allocator failed."); @@ -196,7 +210,8 @@ impl Allocator for GlobalAllocator { /// A local allocator. /// -/// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`. +/// This acquires memory from the upstream (global) allocator, which is +/// protected by a `Mutex`. #[cfg(feature = "tls")] pub struct LocalAllocator { // The inner bookkeeper. @@ -211,36 +226,45 @@ impl LocalAllocator { /// The destructor of the local allocator. /// /// This will simply free everything to the global allocator. - extern fn dtor(alloc: &ThreadLocalAllocator) { + extern "C" fn dtor(alloc: &ThreadLocalAllocator) { /// Logging... log!(NOTE, "Deinitializing and freeing the local allocator."); - // This is important! The thread destructors guarantee no other, and thus one could use the - // allocator _after_ this destructor have been finished. In fact, this is a real problem, - // and happens when using `Arc` and terminating the main thread, for this reason we place - // `None` as a permanent marker indicating that the allocator is deinitialized. After such - // a state is in place, all allocation calls will be redirected to the global allocator, - // which is of course still usable at this moment. - let alloc = alloc.replace(None).expect("Thread-local allocator is already freed."); + // This is important! The thread destructors guarantee no other, + // and thus one could use the allocator _after_ this + // destructor have been finished. In fact, this is a + // real problem, and happens when using `Arc` and + // terminating the main thread, for this reason we place + // `None` as a permanent marker indicating that the allocator is + // deinitialized. After such a state is in place, all + // allocation calls will be redirected to the global + // allocator, which is of course still usable at this + // moment. + let alloc = alloc + .replace(None) + .expect("Thread-local allocator is already freed."); // Lock the global allocator. let mut global_alloc = GLOBAL_ALLOCATOR.lock(); let global_alloc = global_alloc.get(); - // TODO: we know this is sorted, so we could abuse that fact to faster insertion in the - // global allocator. + // TODO: we know this is sorted, so we could abuse that fact to + // faster insertion in the global allocator. - alloc.into_inner().inner.for_each(move |block| global_alloc.free(block)); + alloc + .into_inner() + .inner + .for_each(move |block| global_alloc.free(block)); } /// Logging... log!(NOTE, "Initializing the local allocator."); // The initial acquired segment. - let initial_segment = GLOBAL_ALLOCATOR - .lock() - .get() - .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>()); + let initial_segment = GLOBAL_ALLOCATOR.lock().get().alloc( + 8 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), + mem::align_of::<Block>(), + ); unsafe { // LAST AUDIT: 2016-08-21 (Ticki). @@ -262,17 +286,19 @@ derive_deref!(LocalAllocator, Bookkeeper); impl Allocator for LocalAllocator { #[inline] fn alloc_fresh(&mut self, size: usize, align: usize) -> Block { - // Get the block from the global allocator. Please note that we cannot canonicalize `size`, - // due to freeing excessive blocks would change the order. + // Get the block from the global allocator. Please note that we cannot + // canonicalize `size`, due to freeing excessive blocks would change + // the order. GLOBAL_ALLOCATOR.lock().get().alloc(size, align) } #[inline] fn on_new_memory(&mut self) { - // The idea is to free memory to the global allocator to unify small stubs and avoid - // fragmentation and thread accumulation. + // The idea is to free memory to the global allocator to unify small + // stubs and avoid fragmentation and thread accumulation. if self.total_bytes() < config::FRAGMENTATION_SCALE * self.len() - || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT { + || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT + { // Log stuff. log!(NOTE, "Memtrimming the local allocator."); @@ -285,7 +311,9 @@ impl Allocator for LocalAllocator { global_alloc.free(block); // Memtrim 'till we won't memtrim anymore. - if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { break; } + if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { + break; + } } } } @@ -298,20 +326,25 @@ impl Allocator for LocalAllocator { /// The OOM handler handles out-of-memory conditions. #[inline] pub fn alloc(size: usize, align: usize) -> *mut u8 { - log!(CALL, "Allocating buffer of size {} (align {}).", size, align); + log!( + CALL, + "Allocating buffer of size {} (align {}).", + size, + align + ); get_allocator!(|alloc| Pointer::from(alloc.alloc(size, align)).get()) } /// Free a buffer. /// -/// Note that this do not have to be a buffer allocated through ralloc. The only requirement is -/// that it is not used after the free. +/// Note that this do not have to be a buffer allocated through ralloc. The +/// only requirement is that it is not used after the free. /// /// # Important! /// -/// You should only allocate buffers allocated through `ralloc`. Anything else is considered -/// invalid. +/// You should only allocate buffers allocated through `ralloc`. Anything else +/// is considered invalid. /// /// # Errors /// @@ -319,26 +352,28 @@ pub fn alloc(size: usize, align: usize) -> *mut u8 { /// /// # Safety /// -/// Rust assume that the allocation symbols returns correct values. For this reason, freeing -/// invalid pointers might introduce memory unsafety. +/// Rust assume that the allocation symbols returns correct values. For this +/// reason, freeing invalid pointers might introduce memory unsafety. /// /// Secondly, freeing an used buffer can introduce use-after-free. #[inline] pub unsafe fn free(ptr: *mut u8, size: usize) { log!(CALL, "Freeing buffer of size {}.", size); - get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size))) + get_allocator!( + |alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)) + ) } /// Reallocate memory. /// -/// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the -/// returned pointer with size `size`. +/// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer +/// starting at the returned pointer with size `size`. /// /// # Important! /// -/// You should only reallocate buffers allocated through `ralloc`. Anything else is considered -/// invalid. +/// You should only reallocate buffers allocated through `ralloc`. Anything +/// else is considered invalid. /// /// # Errors /// @@ -346,39 +381,61 @@ pub unsafe fn free(ptr: *mut u8, size: usize) { /// /// # Safety /// -/// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer, -/// this is marked unsafe. +/// Due to being able to potentially memcpy an arbitrary buffer, as well as +/// shrinking a buffer, this is marked unsafe. #[inline] -pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 { - log!(CALL, "Reallocating buffer of size {} to new size {}.", old_size, size); - - get_allocator!(|alloc| { - Pointer::from(alloc.realloc( - Block::from_raw_parts(Pointer::new(ptr), old_size), - size, - align - )).get() - }) +pub unsafe fn realloc( + ptr: *mut u8, + old_size: usize, + size: usize, + align: usize, +) -> *mut u8 { + log!( + CALL, + "Reallocating buffer of size {} to new size {}.", + old_size, + size + ); + + get_allocator!(|alloc| Pointer::from(alloc.realloc( + Block::from_raw_parts(Pointer::new(ptr), old_size), + size, + align + )).get()) } /// Try to reallocate the buffer _inplace_. /// -/// In case of success, return the new buffer's size. On failure, return the old size. +/// In case of success, return the new buffer's size. On failure, return the +/// old size. /// /// This can be used to shrink (truncate) a buffer as well. /// /// # Safety /// -/// Due to being able to shrink (and thus free) the buffer, this is marked unsafe. +/// Due to being able to shrink (and thus free) the buffer, this is marked +/// unsafe. #[inline] -pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> { - log!(CALL, "Inplace reallocating buffer of size {} to new size {}.", old_size, size); +pub unsafe fn realloc_inplace( + ptr: *mut u8, + old_size: usize, + size: usize, +) -> Result<(), ()> { + log!( + CALL, + "Inplace reallocating buffer of size {} to new size {}.", + old_size, + size + ); get_allocator!(|alloc| { - if alloc.realloc_inplace( - Block::from_raw_parts(Pointer::new(ptr), old_size), - size - ).is_ok() { + if alloc + .realloc_inplace( + Block::from_raw_parts(Pointer::new(ptr), old_size), + size, + ) + .is_ok() + { Ok(()) } else { Err(()) diff --git a/src/block.rs b/src/block.rs index 5926ea1edc33e68173f296d7e93903e53cb132f4..9f8577168f14f0a636d6d7d086b5365d8a142eb5 100644 --- a/src/block.rs +++ b/src/block.rs @@ -1,30 +1,32 @@ //! Memory blocks. //! -//! Blocks are the main unit for the memory bookkeeping. A block is a simple construct with a -//! `Pointer` pointer and a size. Occupied (non-free) blocks are represented by a zero-sized block. +//! Blocks are the main unit for the memory bookkeeping. A block is a simple +//! construct with a `Pointer` pointer and a size. Occupied (non-free) blocks +//! are represented by a zero-sized block. // TODO: Check the allow(cast_possible_wrap)s again. use prelude::*; -use core::{ptr, cmp, mem, fmt}; +use core::{cmp, fmt, mem, ptr}; /// A contiguous memory block. /// /// This provides a number of guarantees, /// -/// 1. The buffer is valid for the block's lifetime, but not necessarily initialized. -/// 2. The Block "owns" the inner data. -/// 3. There is no interior mutability. Mutation requires either mutable access or ownership over -/// the block. -/// 4. The buffer is not aliased. That is, it do not overlap with other blocks or is aliased in any -/// way. +/// 1. The buffer is valid for the block's lifetime, but not necessarily +/// initialized. 2. The Block "owns" the inner data. +/// 3. There is no interior mutability. Mutation requires either mutable access +/// or ownership over the block. +/// 4. The buffer is not aliased. That is, it do not overlap with other blocks +/// or is aliased in any way. /// -/// All this is enforced through the type system. These invariants can only be broken through -/// unsafe code. +/// All this is enforced through the type system. These invariants can only be +/// broken through unsafe code. /// -/// Accessing it through an immutable reference does not break these guarantees. That is, you are -/// not able to read/mutate without acquiring a _mutable_ reference. +/// Accessing it through an immutable reference does not break these +/// guarantees. That is, you are not able to read/mutate without acquiring a +/// _mutable_ reference. #[must_use] pub struct Block { /// The size of this block, in bytes. @@ -61,15 +63,14 @@ impl Block { /// Create an empty block representing the right edge of this block #[inline] - #[allow(cast_possible_wrap)] pub fn empty_right(&self) -> Block { Block { size: 0, ptr: unsafe { // LAST AUDIT: 2016-08-21 (Ticki). - // By the invariants of this type (the end is addressable), this conversion isn't - // overflowing. + // By the invariants of this type (the end is addressable), + // this conversion isn't overflowing. self.ptr.clone().offset(self.size as isize) }, } @@ -77,23 +78,26 @@ impl Block { /// Merge this block with a block to the right. /// - /// This will simply extend the block, adding the size of the block, and then set the size to - /// zero. The return value is `Ok(())` on success, and `Err(())` on failure (e.g., the blocks - /// are not adjacent). + /// This will simply extend the block, adding the size of the block, and + /// then set the size to zero. The return value is `Ok(())` on success, + /// and `Err(())` on failure (e.g., the blocks are not adjacent). /// - /// If you merge with a zero sized block, it will succeed, even if they are not adjacent. + /// If you merge with a zero sized block, it will succeed, even if they are + /// not adjacent. #[inline] pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> { if block.is_empty() { Ok(()) } else if self.left_to(block) { - // Since the end of `block` is bounded by the address space, adding them cannot - // overflow. + // Since the end of `block` is bounded by the address space, adding + // them cannot overflow. self.size += block.pop().size; // We pop it to make sure it isn't aliased. Ok(()) - } else { Err(()) } + } else { + Err(()) + } } /// Is this block empty/free? @@ -129,7 +133,11 @@ impl Block { // LAST AUDIT: 2016-08-21 (Ticki). // From the invariants of `Block`, this copy is well-defined. - ptr::copy_nonoverlapping(self.ptr.get(), block.ptr.get(), self.size); + ptr::copy_nonoverlapping( + self.ptr.get(), + block.ptr.get(), + self.size, + ); } } @@ -143,8 +151,8 @@ impl Block { unsafe { // LAST AUDIT: 2016-08-21 (Ticki). - // Since the memory of the block is inaccessible (read-wise), zeroing it is fully - // safe. + // Since the memory of the block is inaccessible (read-wise), + // zeroing it is fully safe. intrinsics::volatile_set_memory(self.ptr.get(), 0, self.size); } } @@ -161,7 +169,8 @@ impl Block { /// Is this block placed left to the given other block? #[inline] pub fn left_to(&self, to: &Block) -> bool { - // This won't overflow due to the end being bounded by the address space. + // This won't overflow due to the end being bounded by the address + // space. self.size + self.ptr.get() as usize == to.ptr.get() as usize } @@ -171,9 +180,13 @@ impl Block { /// /// Panics if `pos` is out of bound. #[inline] - #[allow(cast_possible_wrap)] pub fn split(self, pos: usize) -> (Block, Block) { - assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size); + assert!( + pos <= self.size, + "Split {} out of bound (size is {})!", + pos, + self.size + ); ( Block { @@ -185,11 +198,12 @@ impl Block { ptr: unsafe { // LAST AUDIT: 2016-08-21 (Ticki). - // This won't overflow due to the assertion above, ensuring that it is bounded - // by the address space. See the `split_at_mut` source from libcore. + // This won't overflow due to the assertion above, ensuring + // that it is bounded by the address + // space. See the `split_at_mut` source from libcore. self.ptr.offset(pos as isize) }, - } + }, ) } @@ -197,20 +211,19 @@ impl Block { /// /// Returns an `None` holding the intact block if `align` is out of bounds. #[inline] - #[allow(cast_possible_wrap)] pub fn align(&mut self, align: usize) -> Option<(Block, Block)> { // Logging. log!(INTERNAL, "Padding {:?} to align {}", self, align); - // TODO: This functions suffers from external fragmentation. Leaving bigger segments might - // increase performance. + // TODO: This functions suffers from external fragmentation. Leaving + // bigger segments might increase performance. - // Calculate the aligner, which defines the smallest size required as precursor to align - // the block to `align`. + // Calculate the aligner, which defines the smallest size required as + // precursor to align the block to `align`. let aligner = (align - self.ptr.get() as usize % align) % align; // ^^^^^^^^ - // To avoid wasting space on the case where the block is already aligned, we calculate it - // modulo `align`. + // To avoid wasting space on the case where the block is already + // aligned, we calculate it modulo `align`. // Bound check. if aligner < self.size { @@ -227,11 +240,12 @@ impl Block { ptr: unsafe { // LAST AUDIT: 2016-08-21 (Ticki). - // The aligner is bounded by the size, which itself is bounded by the - // address space. Therefore, this conversion cannot overflow. + // The aligner is bounded by the size, which itself is + // bounded by the address space. + // Therefore, this conversion cannot overflow. old.ptr.offset(aligner as isize) }, - } + }, )) } else { // Logging. @@ -243,8 +257,8 @@ impl Block { /// Mark this block free to the debugger. /// - /// The debugger might do things like memleak and use-after-free checks. This methods informs - /// the debugger that this block is freed. + /// The debugger might do things like memleak and use-after-free checks. + /// This methods informs the debugger that this block is freed. #[inline] pub fn mark_free(self) -> Block { #[cfg(feature = "debugger")] @@ -310,7 +324,10 @@ mod test { fn test_array() { let arr = b"Lorem ipsum dolor sit amet"; let block = unsafe { - Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len()) + Block::from_raw_parts( + Pointer::new(arr.as_ptr() as *mut u8), + arr.len(), + ) }; // Test split. @@ -323,14 +340,20 @@ mod test { assert!(!rest.is_empty()); assert!(lorem.align(2).unwrap().1.aligned_to(2)); assert!(rest.align(15).unwrap().1.aligned_to(15)); - assert_eq!(Pointer::from(lorem).get() as usize + 5, Pointer::from(rest).get() as usize); + assert_eq!( + Pointer::from(lorem).get() as usize + 5, + Pointer::from(rest).get() as usize + ); } #[test] fn test_merge() { let arr = b"Lorem ipsum dolor sit amet"; let block = unsafe { - Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len()) + Block::from_raw_parts( + Pointer::new(arr.as_ptr() as *mut u8), + arr.len(), + ) }; let (mut lorem, mut rest) = block.split(5); @@ -346,7 +369,10 @@ mod test { fn test_oob() { let arr = b"lorem"; let block = unsafe { - Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len()) + Block::from_raw_parts( + Pointer::new(arr.as_ptr() as *mut u8), + arr.len(), + ) }; // Test OOB. @@ -371,12 +397,18 @@ mod test { fn test_empty_lr() { let arr = b"Lorem ipsum dolor sit amet"; let block = unsafe { - Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len()) + Block::from_raw_parts( + Pointer::new(arr.as_ptr() as *mut u8), + arr.len(), + ) }; assert!(block.empty_left().is_empty()); assert!(block.empty_right().is_empty()); - assert_eq!(Pointer::from(block.empty_left()).get() as *const u8, arr.as_ptr()); + assert_eq!( + Pointer::from(block.empty_left()).get() as *const u8, + arr.as_ptr() + ); assert_eq!(block.empty_right(), block.split(arr.len()).1); } } diff --git a/src/bookkeeper.rs b/src/bookkeeper.rs index d3d85ef1c77db7f850e5d93a7cedeef8113778e3..3b2272bc663bdce264693be4169ea548e2f019ba 100644 --- a/src/bookkeeper.rs +++ b/src/bookkeeper.rs @@ -3,7 +3,7 @@ use prelude::*; use core::ops::Range; -use core::{ptr, mem, ops}; +use core::{mem, ops, ptr}; use shim::config; @@ -69,12 +69,14 @@ pub struct Bookkeeper { id: usize, } -#[allow(len_without_is_empty)] impl Bookkeeper { /// Create a new bookkeeper with some initial vector. pub fn new(vec: Vec<Block>) -> Bookkeeper { // Make sure the assumptions are satisfied. - debug_assert!(vec.capacity() >= EXTRA_ELEMENTS, "Not enough initial capacity of the vector."); + debug_assert!( + vec.capacity() >= EXTRA_ELEMENTS, + "Not enough initial capacity of the vector." + ); debug_assert!(vec.is_empty(), "Initial vector isn't empty."); // TODO: When added use expr field attributes. @@ -114,7 +116,9 @@ impl Bookkeeper { let len = self.pool.len(); // Move left. - ind - self.pool.iter_mut() + ind - self + .pool + .iter_mut() .rev() .skip(len - ind) .take_while(|x| x.is_empty()) @@ -137,7 +141,9 @@ impl Bookkeeper { let len = self.pool.len(); // Move left. - left_ind -= self.pool.iter_mut() + left_ind -= self + .pool + .iter_mut() .rev() .skip(len - left_ind) .take_while(|x| x.is_empty()) @@ -148,7 +154,9 @@ impl Bookkeeper { }; // Move right. - right_ind += self.pool.iter() + right_ind += self + .pool + .iter() .skip(right_ind) .take_while(|x| x.is_empty()) .count(); @@ -215,9 +223,11 @@ impl Bookkeeper { let mut it = self.pool.iter().enumerate().rev(); // Check that the capacity is large enough. - assert!(self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(), - "The capacity should be at least {} more than the length of the pool.", - EXTRA_ELEMENTS); + assert!( + self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(), + "The capacity should be at least {} more than the length of the pool.", + EXTRA_ELEMENTS + ); if let Some((_, x)) = it.next() { // Make sure there are no leading empty blocks. @@ -230,26 +240,51 @@ impl Bookkeeper { total_bytes += i.size(); // Check if sorted. - assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?}).", - n, next, i); + assert!( + next >= i, + "The block pool is not sorted at index, {} ({:?} < {:?}).", + n, + next, + i + ); // Make sure no blocks are adjacent. - assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \ - {:?})", n, i, next); + assert!( + !i.left_to(next) || i.is_empty(), + "Adjacent blocks at index, {} ({:?} and \ + {:?})", + n, + i, + next + ); // Make sure an empty block has the same address as its right neighbor. - assert!(!i.is_empty() || i == next, "Empty block not adjacent to right neighbor \ - at index {} ({:?} and {:?})", n, i, next); + assert!( + !i.is_empty() || i == next, + "Empty block not adjacent to right neighbor \ + at index {} ({:?} and {:?})", + n, + i, + next + ); // Set the variable tracking the previous block. next = i; } // Check for trailing empty blocks. - assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks."); + assert!( + !self.pool.last().unwrap().is_empty(), + "Trailing empty blocks." + ); } // Make sure the sum is maintained properly. - assert!(total_bytes == self.total_bytes, "The sum is not equal to the 'total_bytes' \ - field: {} ≠{}.", total_bytes, self.total_bytes); + assert!( + total_bytes == self.total_bytes, + "The sum is not equal to the 'total_bytes' \ + field: {} ≠{}.", + total_bytes, + self.total_bytes + ); } } } @@ -332,25 +367,31 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { // Logging. bk_log!(self, "Allocating {} bytes with alignment {}.", size, align); - if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| { - if i.size() >= size { - // Try to split at the aligner. - i.align(align).and_then(|(mut a, mut b)| { - if b.size() >= size { - // Override the old block. - *i = a; - Some((n, b)) - } else { - // Put the split block back together and place it back in its spot. - a.merge_right(&mut b).expect("Unable to merge block right."); - *i = a; - None - } - }) - } else { - None - } - }).next() { + if let Some((n, b)) = self + .pool + .iter_mut() + .enumerate() + .filter_map(|(n, i)| { + if i.size() >= size { + // Try to split at the aligner. + i.align(align).and_then(|(mut a, mut b)| { + if b.size() >= size { + // Override the old block. + *i = a; + Some((n, b)) + } else { + // Put the split block back together and place it back in its spot. + a.merge_right(&mut b).expect("Unable to merge block right."); + *i = a; + None + } + }) + } else { + None + } + }) + .next() + { // Update the pool byte count. self.total_bytes -= b.size(); @@ -369,8 +410,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { // Check consistency. self.check(); debug_assert!(res.aligned_to(align), "Alignment failed."); - debug_assert!(res.size() == size, "Requested space does not match with the returned \ - block."); + debug_assert!( + res.size() == size, + "Requested space does not match with the returned \ + block." + ); res } else { @@ -490,11 +534,14 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { // Check consistency. self.check(); debug_assert!(res.aligned_to(align), "Alignment failed."); - debug_assert!(res.size() >= new_size, "Requested space does not match with the \ - returned block."); + debug_assert!( + res.size() >= new_size, + "Requested space does not match with the \ + returned block." + ); res - }, + } } } @@ -520,8 +567,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { let res = self.realloc_inplace_bound(bound, block, new_size); // Check consistency. - debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \ - does not match with the returned block."); + debug_assert!( + res.as_ref().ok().map_or(true, |x| x.size() == new_size), + "Requested space \ + does not match with the returned block." + ); res } @@ -529,13 +579,21 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { /// Reallocate a block on a know index bound inplace. /// /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information. - fn realloc_inplace_bound(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> { + fn realloc_inplace_bound( + &mut self, + ind: Range<usize>, + mut block: Block, + new_size: usize, + ) -> Result<Block, Block> { // Logging. bk_log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size); /// Assertions... - debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \ - index."); + debug_assert!( + self.find(&block) == ind.start, + "Block is not inserted at the appropriate \ + index." + ); if new_size <= block.size() { // Shrink the block. @@ -554,7 +612,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { return Ok(block); - // We check if `ind` is the end of the array. + // We check if `ind` is the end of the array. } else { let mut mergable = false; if let Some(entry) = self.pool.get_mut(ind.end) { @@ -568,7 +626,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { bk_log!(self;ind, "Merging {:?} to the right.", block); // We'll merge it with the block at the end of the range. - block.merge_right(&mut self.remove_at(ind.end)) + block + .merge_right(&mut self.remove_at(ind.end)) .expect("Unable to merge block right, to the end of the range."); // Merge succeeded. @@ -604,7 +663,9 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { bk_log!(self;ind, "Freeing {:?}.", block); // Short circuit in case of empty block. - if block.is_empty() { return; } + if block.is_empty() { + return; + } // When compiled with `security`, we zero this block. block.sec_zero(); @@ -615,24 +676,34 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { } // Assertions... - debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \ - index."); + debug_assert!( + self.find(&block) == ind.start, + "Block is not inserted at the appropriate \ + index." + ); // Try to merge it with the block to the right. if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) { // Merge the block with the rightmost block in the range. - block.merge_right(&mut self.remove_at(ind.end)) + block + .merge_right(&mut self.remove_at(ind.end)) .expect("Unable to merge block right to the block at the end of the range"); // The merging succeeded. We proceed to try to close in the possible gap. + let size = block.size(); if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() { - // Check consistency. - self.check(); - - return; + self.total_bytes += size; } + // Check consistency. + self.check(); + + return; // Dammit, let's try to merge left. - } else if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() { + } else if ind.start != 0 && self.pool[ind.start - 1].left_to(&block) { + let size = block.size(); + if self.pool[ind.start - 1].merge_right(&mut block).is_ok() { + self.total_bytes += size; + } // Check consistency. self.check(); @@ -653,7 +724,12 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { /// The returned pointer is guaranteed to be aligned to `align`. fn alloc_external(&mut self, size: usize, align: usize) -> Block { // Logging. - bk_log!(self, "Fresh allocation of size {} with alignment {}.", size, align); + bk_log!( + self, + "Fresh allocation of size {} with alignment {}.", + size, + align + ); // Break it to me! let res = self.alloc_fresh(size, align); @@ -682,8 +758,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { self.total_bytes += block.size(); // Some assertions... - debug_assert!(self.pool.is_empty() || &block > self.pool.last().unwrap(), "Pushing will \ - make the list unsorted."); + debug_assert!( + self.pool.is_empty() || &block > self.pool.last().unwrap(), + "Pushing will \ + make the list unsorted." + ); // We will try to simply merge it with the last block. if let Some(x) = self.pool.last_mut() { @@ -708,7 +787,6 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { } } - // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is // the only non-empty candidate which may be adjacent to `block`. @@ -743,7 +821,10 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { // Logging. bk_log!(self;min_cap, "Reserving {}.", min_cap); - if !self.reserving && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS || self.pool.capacity() < min_cap + EXTRA_ELEMENTS) { + if !self.reserving + && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS + || self.pool.capacity() < min_cap + EXTRA_ELEMENTS) + { // Reserve a little extra for performance reasons. // TODO: This should be moved to some new method. let new_cap = min_cap + EXTRA_ELEMENTS + config::extra_fresh(min_cap); @@ -755,7 +836,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { self.reserving = true; // Break it to me! - let new_buf = self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>()); + let new_buf = + self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>()); // Go back to the original state. self.reserving = false; @@ -842,9 +924,16 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { assert!(self.pool.len() >= ind, "Insertion out of bounds."); // Some assertions... - debug_assert!(self.pool.len() <= ind || block <= self.pool[ind], "Inserting at {} will make \ - the list unsorted.", ind); - debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index."); + debug_assert!( + self.pool.len() <= ind || block <= self.pool[ind], + "Inserting at {} will make \ + the list unsorted.", + ind + ); + debug_assert!( + self.find(&block) == ind, + "Block is not inserted at the appropriate index." + ); debug_assert!(!block.is_empty(), "Inserting an empty block."); // Trigger the new memory event handler. @@ -872,29 +961,31 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { // LAST AUDIT: 2016-08-21 (Ticki). // Memmove the elements to make a gap to the new block. - ptr::copy(self.pool.get_unchecked(ind) as *const Block, - self.pool.get_unchecked_mut(ind + 1) as *mut Block, - // The gap defaults to the end of the pool. - gap.unwrap_or_else(|| { - // We will only extend the length if we were unable to fit it into the current length. + ptr::copy( + self.pool.get_unchecked(ind) as *const Block, + self.pool.get_unchecked_mut(ind + 1) as *mut Block, + // The gap defaults to the end of the pool. + gap.unwrap_or_else(|| { + // We will only extend the length if we were unable to fit it into the current length. - // Loooooooging... - bk_log!(self;ind, "Block pool not long enough for shift. Extending."); + // Loooooooging... + bk_log!(self;ind, "Block pool not long enough for shift. Extending."); - // Reserve space. This does not break order, due to the assumption that - // `reserve` never breaks order. - old_buf = unborrow!(self.reserve(self.pool.len() + 1)); + // Reserve space. This does not break order, due to the assumption that + // `reserve` never breaks order. + old_buf = unborrow!(self.reserve(self.pool.len() + 1)); - // We will move a block into reserved memory but outside of the vec's bounds. For - // that reason, we push an uninitialized element to extend the length, which will - // be assigned in the memcpy. - let res = self.pool.push(mem::uninitialized()); + // We will move a block into reserved memory but outside of the vec's bounds. For + // that reason, we push an uninitialized element to extend the length, which will + // be assigned in the memcpy. + let res = self.pool.push(mem::uninitialized()); - // Just some assertions... - debug_assert!(res.is_ok(), "Push failed (buffer full)."); + // Just some assertions... + debug_assert!(res.is_ok(), "Push failed (buffer full)."); - self.pool.len() - 1 - }) - ind); + self.pool.len() - 1 + }) - ind, + ); // Update the pool byte count. self.total_bytes += block.size(); @@ -920,7 +1011,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { let res = if ind + 1 == self.pool.len() { let block = self.pool[ind].pop(); // Make sure there are no trailing empty blocks. - let new_len = self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count(); + let new_len = + self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count(); // Truncate the vector. self.pool.truncate(new_len); @@ -936,7 +1028,13 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> { // Iterate over the pool from `ind` and down and set it to the empty of our block. let skip = self.pool.len() - ind; - for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) { + for place in self + .pool + .iter_mut() + .rev() + .skip(skip) + .take_while(|x| x.is_empty()) + { // Empty the blocks. *place = empty2.empty_left(); } diff --git a/src/brk.rs b/src/brk.rs index fc2ce14d75e8306c56f2879b37fa443137b4de8c..1536eda3c90db555b5d2ab38f4b6acbb5a59c55b 100644 --- a/src/brk.rs +++ b/src/brk.rs @@ -4,19 +4,17 @@ use prelude::*; -use core::ptr; use core::convert::TryInto; +use core::ptr; -use shim::{syscalls, config}; +use shim::{config, syscalls}; -use {sync, fail}; +use {fail, sync}; /// The BRK mutex. /// /// This is used for avoiding data races in multiple allocator. -static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState { - current_brk: None, -}); +static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState { current_brk: None }); /// A cache of the BRK state. /// @@ -73,7 +71,6 @@ impl BrkLock { /// Safely release memory to the OS. /// /// If failed, we return the memory. - #[allow(cast_possible_wrap)] pub fn release(&mut self, block: Block) -> Result<(), Block> { // Check if we are actually next to the program break. if self.current_brk() == Pointer::from(block.empty_right()) { @@ -109,9 +106,12 @@ impl BrkLock { if let Some(ref cur) = self.state.current_brk { let res = cur.clone(); // Make sure that the break is set properly (i.e. there is no libc interference). - debug_assert!(res == current_brk(), "The cached program break is out of sync with the \ - actual program break. Are you interfering with BRK? If so, prefer the \ - provided 'sbrk' instead, then."); + debug_assert!( + res == current_brk(), + "The cached program break is out of sync with the \ + actual program break. Are you interfering with BRK? If so, prefer the \ + provided 'sbrk' instead, then." + ); return res; } @@ -147,17 +147,22 @@ impl BrkLock { Block::from_raw_parts( // Important! The conversion is failable to avoid arithmetic overflow-based // attacks. - self.sbrk(brk_size.try_into().unwrap()).unwrap_or_else(|()| fail::oom()), + self.sbrk(brk_size.try_into().unwrap()) + .unwrap_or_else(|()| fail::oom()), brk_size, ) - }.align(align).unwrap(); + }.align(align) + .unwrap(); // Split the block to leave the excessive space. let (res, excessive) = rest.split(size); // Make some assertions. debug_assert!(res.aligned_to(align), "Alignment failed."); - debug_assert!(res.size() + alignment_block.size() + excessive.size() == brk_size, "BRK memory leak."); + debug_assert!( + res.size() + alignment_block.size() + excessive.size() == brk_size, + "BRK memory leak." + ); (alignment_block, res, excessive) } @@ -181,8 +186,11 @@ pub fn lock() -> BrkLock { /// # Failure /// /// On failure the maximum pointer (`!0 as *mut u8`) is returned. -pub unsafe extern fn sbrk(size: isize) -> *mut u8 { - lock().sbrk(size).unwrap_or_else(|()| Pointer::new(!0 as *mut u8)).get() +pub unsafe extern "C" fn sbrk(size: isize) -> *mut u8 { + lock() + .sbrk(size) + .unwrap_or_else(|()| Pointer::new(!0 as *mut u8)) + .get() } /// Get the current program break. diff --git a/src/cell.rs b/src/cell.rs index 713e2a32b5c1580537a8f30866ef6179046c7f5f..8be0d5c2a25b532db8fe2cc1d652dd5635ae03b8 100644 --- a/src/cell.rs +++ b/src/cell.rs @@ -25,13 +25,16 @@ impl<T> MoveCell<T> { /// Replace the inner data and return the old. #[inline] pub fn replace(&self, new: T) -> T { - mem::replace(unsafe { - // LAST AUDIT: 2016-08-21 (Ticki). + mem::replace( + unsafe { + // LAST AUDIT: 2016-08-21 (Ticki). - // This is safe due to never aliasing the value, but simply transfering ownership to - // the caller. - &mut *self.inner.get() - }, new) + // This is safe due to never aliasing the value, but simply transfering ownership to + // the caller. + &mut *self.inner.get() + }, + new, + ) } } diff --git a/src/fail.rs b/src/fail.rs index 4caa01a470c024e21d501ad1ed3f4063c9177134..ae17c8818e8a0fb27e62a7533c01d78ddbc68398 100644 --- a/src/fail.rs +++ b/src/fail.rs @@ -2,8 +2,8 @@ use prelude::*; -use core::sync::atomic::{self, AtomicPtr}; use core::mem; +use core::sync::atomic::{self, AtomicPtr}; use shim::config; diff --git a/src/lib.rs b/src/lib.rs index 463e7b653fd1107a35f5208f19b9f3cb5dd9c570..b03089511eff694e130e3a8e35f940d24459643c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,19 +11,14 @@ #![cfg_attr(feature = "clippy", feature(plugin))] #![cfg_attr(feature = "clippy", plugin(clippy))] - #![no_std] +#![feature( + allocator_api, const_fn, core_intrinsics, stmt_expr_attributes, optin_builtin_traits, + type_ascription, thread_local, linkage, try_from, const_unsafe_cell_new, const_atomic_bool_new, + const_nonzero_new, const_atomic_ptr_new +)] +#![warn(missing_docs)] -#![feature(alloc, allocator_api, const_fn, core_intrinsics, stmt_expr_attributes, drop_types_in_const, - nonzero, optin_builtin_traits, type_ascription, thread_local, linkage, - try_from, const_unsafe_cell_new, const_atomic_bool_new, const_nonzero_new, - const_atomic_ptr_new)] -#![warn(missing_docs, cast_precision_loss, cast_sign_loss, cast_possible_wrap, - cast_possible_truncation, filter_map, if_not_else, items_after_statements, - invalid_upcast_comparisons, mutex_integer, nonminimal_bool, shadow_same, shadow_unrelated, - single_match_else, string_add, string_add_assign, wrong_pub_self_convention)] - -extern crate alloc; extern crate ralloc_shim as shim; #[macro_use] @@ -48,7 +43,9 @@ mod ptr; mod sync; mod vec; -use alloc::heap::{Alloc, AllocErr, Layout, CannotReallocInPlace}; +use core::alloc::GlobalAlloc; +use core::alloc::{Alloc, AllocErr, CannotReallocInPlace, Layout}; +use core::ptr::NonNull; pub use allocator::{alloc, free, realloc, realloc_inplace}; pub use brk::sbrk; @@ -56,31 +53,57 @@ pub use fail::set_oom_handler; #[cfg(feature = "tls")] pub use fail::set_thread_oom_handler; +/// The rallocator pub struct Allocator; unsafe impl<'a> Alloc for &'a Allocator { - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - Ok(allocator::alloc(layout.size(), layout.align())) + unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { + let ptr = allocator::alloc(layout.size(), layout.align()); + if ptr.is_null() { + Err(AllocErr) + } else { + Ok(NonNull::new_unchecked(ptr)) + } } - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - allocator::free(ptr, layout.size()); + unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) { + allocator::free(ptr.as_ptr(), layout.size()); } - unsafe fn realloc(&mut self, ptr: *mut u8, layout: Layout, new_layout: Layout) -> Result<*mut u8, AllocErr> { - Ok(allocator::realloc(ptr, layout.size(), new_layout.size(), new_layout.align())) + unsafe fn realloc( + &mut self, + ptr: NonNull<u8>, + layout: Layout, + new_size: usize, + ) -> Result<NonNull<u8>, AllocErr> { + let ptr = allocator::realloc(ptr.as_ptr(), layout.size(), new_size, layout.align()); + if ptr.is_null() { + Err(AllocErr) + } else { + Ok(NonNull::new_unchecked(ptr)) + } } - unsafe fn grow_in_place(&mut self, ptr: *mut u8, layout: Layout, new_layout: Layout) -> Result<(), CannotReallocInPlace> { - if allocator::realloc_inplace(ptr, layout.size(), new_layout.size()).is_ok() { + unsafe fn grow_in_place( + &mut self, + ptr: NonNull<u8>, + layout: Layout, + new_size: usize, + ) -> Result<(), CannotReallocInPlace> { + if allocator::realloc_inplace(ptr.as_ptr(), layout.size(), new_size).is_ok() { Ok(()) } else { Err(CannotReallocInPlace) } } - unsafe fn shrink_in_place(&mut self, ptr: *mut u8, layout: Layout, new_layout: Layout) -> Result<(), CannotReallocInPlace> { - if allocator::realloc_inplace(ptr, layout.size(), new_layout.size()).is_ok() { + unsafe fn shrink_in_place( + &mut self, + ptr: NonNull<u8>, + layout: Layout, + new_size: usize, + ) -> Result<(), CannotReallocInPlace> { + if allocator::realloc_inplace(ptr.as_ptr(), layout.size(), new_size).is_ok() { Ok(()) } else { Err(CannotReallocInPlace) @@ -92,3 +115,12 @@ unsafe impl<'a> Alloc for &'a Allocator { (layout.size(), layout.size()) } } + +unsafe impl GlobalAlloc for Allocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + allocator::alloc(layout.size(), layout.align()) + } + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + allocator::free(ptr, layout.size()); + } +} diff --git a/src/log.rs b/src/log.rs index 4d2fdc99c78058e186b50822001cea71a0abda5e..0bff30b7d0f5dff78a6684a0a1a4a02769dc2fbe 100644 --- a/src/log.rs +++ b/src/log.rs @@ -125,13 +125,13 @@ macro_rules! debug_assert { #[cfg(feature = "write")] #[macro_export] macro_rules! assert_eq { - ($left:expr, $right:expr) => ({ + ($left:expr, $right:expr) => {{ // We evaluate _once_. let left = &$left; let right = &$right; assert!(left == right, "(left: '{:?}', right: '{:?}')", left, right) - }) + }}; } /// Top-secret module. @@ -139,8 +139,8 @@ macro_rules! assert_eq { pub mod internal { use prelude::*; - use core::fmt; use core::cell::Cell; + use core::fmt; use core::ops::Range; use shim::config; @@ -179,7 +179,11 @@ pub mod internal { impl fmt::Write for LogWriter { fn write_str(&mut self, s: &str) -> fmt::Result { - if config::log(s) == !0 { Err(fmt::Error) } else { Ok(()) } + if config::log(s) == !0 { + Err(fmt::Error) + } else { + Ok(()) + } } } @@ -254,9 +258,13 @@ pub mod internal { } impl Cursor for () { - fn at(&self, _: &mut fmt::Formatter, _: usize) -> fmt::Result { Ok(()) } + fn at(&self, _: &mut fmt::Formatter, _: usize) -> fmt::Result { + Ok(()) + } - fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { Ok(()) } + fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } } impl IntoCursor for () { @@ -286,16 +294,16 @@ pub mod internal { Ok(()) } - fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { Ok(()) } + fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } } impl IntoCursor for Range<usize> { type Cursor = RangeCursor; fn into_cursor(self) -> RangeCursor { - RangeCursor { - range: self, - } + RangeCursor { range: self } } } @@ -341,7 +349,6 @@ pub mod internal { } /// Check if this log level is enabled. - #[allow(absurd_extreme_comparisons)] #[inline] pub fn level(lv: u8) -> bool { lv >= config::MIN_LOG_LEVEL diff --git a/src/prelude.rs b/src/prelude.rs index 05d748a2d1b4e2a3d1b1204fcb625518e1a47428..f22327556f3d9a7d5281d17599e381ba4c742484 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -5,6 +5,6 @@ pub use block::Block; pub use cell::MoveCell; pub use lazy_init::LazyInit; -pub use sync::Mutex; pub use ptr::Pointer; +pub use sync::Mutex; pub use vec::Vec; diff --git a/src/ptr.rs b/src/ptr.rs index 65b38dc04f202489c454319a44b38fed873d42ce..e6b44984f8376a280a31c43fb0cc9b0345758a74 100644 --- a/src/ptr.rs +++ b/src/ptr.rs @@ -1,7 +1,7 @@ //! Pointer wrappers. -use core::nonzero::NonZero; -use core::{ops, marker}; +use core::marker; +use core::ptr::NonNull; /// A pointer wrapper type. /// @@ -10,7 +10,7 @@ use core::{ops, marker}; #[derive(PartialEq, Eq, Debug, Clone)] pub struct Pointer<T> { /// The internal pointer. - ptr: NonZero<*mut T>, + ptr: NonNull<T>, /// Associated phantom data. /// /// This indicates that we _own_ T. @@ -30,7 +30,7 @@ impl<T> Pointer<T> { debug_assert!(!ptr.is_null(), "Null pointer!"); Pointer { - ptr: NonZero::new_unchecked(ptr), + ptr: NonNull::new_unchecked(ptr), _phantom: marker::PhantomData, } } @@ -45,7 +45,7 @@ impl<T> Pointer<T> { // LAST AUDIT: 2016-08-21 (Ticki). // 0x1 is non-zero. - NonZero::new_unchecked(0x1 as *mut T) + NonNull::new_unchecked(0x1 as *mut T) }, _phantom: marker::PhantomData, } @@ -61,7 +61,7 @@ impl<T> Pointer<T> { // LAST AUDIT: 2016-08-21 (Ticki). // Casting the pointer will preserve its nullable state. - NonZero::new_unchecked(self.get() as *mut U) + NonNull::new_unchecked(self.get() as *mut U) }, _phantom: marker::PhantomData, } @@ -76,11 +76,11 @@ impl<T> Pointer<T> { /// This is unsafe, due to OOB offsets being undefined behavior. #[inline] pub unsafe fn offset(self, diff: isize) -> Pointer<T> { - Pointer::new(self.ptr.get().offset(diff)) + Pointer::new(self.ptr.as_ptr().offset(diff)) } pub fn get(&self) -> *mut T { - self.ptr.get() + self.ptr.as_ptr() } } diff --git a/src/sync.rs b/src/sync.rs index 8b2b2c66c3c69dc1a1ea81fc120716ea665838d8..548132eb297a057de7aff20caacb23fc294635f9 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -1,8 +1,8 @@ //! Synchronization primitives. use core::cell::UnsafeCell; -use core::sync::atomic::{self, AtomicBool}; use core::ops; +use core::sync::atomic::{self, AtomicBool}; use shim; @@ -37,7 +37,10 @@ impl<T> Mutex<T> { pub fn lock(&self) -> MutexGuard<T> { // Lock the mutex. #[cfg(not(feature = "unsafe_no_mutex_lock"))] - while self.locked.compare_and_swap(false, true, atomic::Ordering::SeqCst) { + while self + .locked + .compare_and_swap(false, true, atomic::Ordering::SeqCst) + { // ,___, // {O,o} // |)``) @@ -45,9 +48,7 @@ impl<T> Mutex<T> { shim::syscalls::sched_yield(); } - MutexGuard { - mutex: self, - } + MutexGuard { mutex: self } } } diff --git a/src/tls.rs b/src/tls.rs index 4939239caf55d751ecec73d83371a11cca66d673..ca6ae42c7d229558ee6402558712f482a69ba8a3 100644 --- a/src/tls.rs +++ b/src/tls.rs @@ -30,7 +30,9 @@ impl<T: 'static> Key<T> { /// another thread. #[inline] pub fn with<F, R>(&self, f: F) -> R - where F: FnOnce(&T) -> R { + where + F: FnOnce(&T) -> R, + { // Logging. log!(INTERNAL, "Accessing TLS variable."); @@ -42,7 +44,7 @@ impl<T: 'static> Key<T> { /// Note that this has to be registered for every thread, it is needed for. // TODO: Make this automatic on `Drop`. #[inline] - pub fn register_thread_destructor(&self, dtor: extern fn(&T)) { + pub fn register_thread_destructor(&self, dtor: extern "C" fn(&T)) { // Logging. log!(INTERNAL, "Registering thread destructor."); diff --git a/src/vec.rs b/src/vec.rs index 5d3c8d15520f1599ed52f741700788aa926defb8..445c1168b64ca1cff40450564ceb7f474e764af5 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -2,7 +2,7 @@ use prelude::*; -use core::{slice, ops, mem, ptr}; +use core::{mem, ops, ptr, slice}; use leak::Leak; @@ -54,7 +54,10 @@ impl<T: Leak> Vec<T> { let new_cap = block.size() / mem::size_of::<T>(); // Make some assertions. - assert!(self.len <= new_cap, "Block not large enough to cover the vector."); + assert!( + self.len <= new_cap, + "Block not large enough to cover the vector." + ); assert!(block.aligned_to(mem::align_of::<T>()), "Block not aligned."); let old = mem::replace(self, Vec::default()); @@ -84,7 +87,6 @@ impl<T: Leak> Vec<T> { /// /// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`. #[inline] - #[allow(cast_possible_wrap)] pub fn push(&mut self, elem: T) -> Result<(), ()> { if self.len == self.cap { Err(()) @@ -141,9 +143,7 @@ impl<T: Leak> Vec<T> { /// Yield an iterator popping from the vector. pub fn pop_iter(&mut self) -> PopIter<T> { - PopIter { - vec: self, - } + PopIter { vec: self } } } @@ -220,7 +220,7 @@ mod test { let mut vec = unsafe { Vec::from_raw_parts( Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32), - 16 + 16, ) }; @@ -233,8 +233,11 @@ mod test { assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc"); unsafe { - assert_eq!(vec.refill( - Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32)).size(), + assert_eq!( + vec.refill(Block::from_raw_parts( + Pointer::new(&mut buffer[0] as *mut u8), + 32 + )).size(), 32 ); } @@ -247,13 +250,14 @@ mod test { assert_eq!(vec.pop().unwrap(), b'_'); vec.push(b'@').unwrap(); - vec.push(b'!').unwrap_err(); assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc_____________@"); assert_eq!(vec.capacity(), 32); - for _ in 0..32 { vec.pop().unwrap(); } + for _ in 0..32 { + vec.pop().unwrap(); + } assert!(vec.pop().is_none()); assert!(vec.pop().is_none()); diff --git a/tests/arc.rs b/tests/arc.rs index 4e109e5e20d7f7994cae81e722b6783d5132cc7d..d3d3158faee545973dbe4eeb912503bc21bf731a 100644 --- a/tests/arc.rs +++ b/tests/arc.rs @@ -1,4 +1,5 @@ -//! This test is a more subtle one. It is one which can hit thread destructors unexpectedly. +//! This test is a more subtle one. It is one which can hit thread destructors +//! unexpectedly. extern crate ralloc; diff --git a/tests/cross_thread_drop.rs b/tests/cross_thread_drop.rs index 5c270f54aac03d28d74a33b50da471f3a77fffcb..4c50a194ef41f9a22484ca81ac60adf8a6607973 100644 --- a/tests/cross_thread_drop.rs +++ b/tests/cross_thread_drop.rs @@ -30,7 +30,8 @@ fn cross_thread_drop() { fn cross_thread_drop_2() { util::multiply(|| { for _ in 0..10 { - let bx = thread::spawn(|| Box::new(0x11FE15C001u64)).join().unwrap(); + let bx = + thread::spawn(|| Box::new(0x11FE15C001u64)).join().unwrap(); thread::spawn(move || { util::acid(|| { diff --git a/tests/mpsc.rs b/tests/mpsc.rs index 90366f4df8f4eb1a41f80147165867a6c1396e6d..579e639676657956fc6905b57954f2330213db6c 100644 --- a/tests/mpsc.rs +++ b/tests/mpsc.rs @@ -2,8 +2,8 @@ extern crate ralloc; mod util; -use std::thread; use std::sync::mpsc; +use std::thread; #[test] fn mpsc_queue() { diff --git a/tests/util/mod.rs b/tests/util/mod.rs index 2f369799a9c291db81ff1e95ca3975a5cdae9dfe..b6576713e0ef277a2dc64f11b7cb34c12d0be597 100644 --- a/tests/util/mod.rs +++ b/tests/util/mod.rs @@ -1,6 +1,6 @@ //! Test automation. -use std::{thread, mem}; +use std::{mem, thread}; /// Magic trait for boxed `FnOnce`s. /// @@ -11,11 +11,15 @@ trait FnBox { } impl<F: FnOnce()> FnBox for F { - fn call_box(self: Box<Self>) { (*self)() } + fn call_box(self: Box<Self>) { + (*self)() + } } /// Like `std::thread::spawn`, but without the closure bounds. -unsafe fn spawn_unsafe<'a, F: FnOnce() + Send + 'a>(func: F) -> thread::JoinHandle<()> { +unsafe fn spawn_unsafe<'a, F: FnOnce() + Send + 'a>( + func: F, +) -> thread::JoinHandle<()> { let closure: Box<FnBox + 'a> = Box::new(func); let closure: Box<FnBox + Send> = mem::transmute(closure); thread::spawn(move || closure.call_box()) @@ -46,12 +50,14 @@ pub fn multiply<F: Fn() + Sync + Send + 'static>(func: F) { /// Wrap a block in acid tests. /// -/// This performs a number of temporary allocations to try to detect inconsistency. +/// This performs a number of temporary allocations to try to detect +/// inconsistency. /// -/// The basic idea is that if the allocator is broken, it might allocate the same memory twice, or -/// corrupt when allocating. Thus, we allocate some temporary segment and override it. This way we -/// might be able to detect memory corruption through asserting memory consistency after the -/// closure is completed. +/// The basic idea is that if the allocator is broken, it might allocate the +/// same memory twice, or corrupt when allocating. Thus, we allocate some +/// temporary segment and override it. This way we might be able to detect +/// memory corruption through asserting memory consistency after the closure is +/// completed. #[allow(dead_code)] pub fn acid<F: FnOnce()>(func: F) { let mut vec = vec!["something", "yep", "yup"]; @@ -70,7 +76,19 @@ pub fn acid<F: FnOnce()>(func: F) { vec.push("heyaya"); *bx = 55; - assert_eq!(vec, ["something", "yep", "yup", "lol", "lulz", "we", "are", "heyaya"]); + assert_eq!( + vec, + [ + "something", + "yep", + "yup", + "lol", + "lulz", + "we", + "are", + "heyaya" + ] + ); assert_eq!(*bx, 55); assert_eq!(*abc, "abc"); } diff --git a/tests/vec.rs b/tests/vec.rs index 5dc1b3446a14416a33167566f83788b4d7c2d4f5..76365cd71861d66a8156ea225df3ead356b50f59 100644 --- a/tests/vec.rs +++ b/tests/vec.rs @@ -8,7 +8,8 @@ fn simple_vec() { let mut vec = Vec::new(); for i in 0..0xFFFF { - // We're going to annoy the allocator by allocating a small chunk, after which we push. + // We're going to annoy the allocator by allocating a small chunk, + // after which we push. let _bx = Box::new(4); vec.push(i); } @@ -30,5 +31,4 @@ fn simple_vec() { assert_eq!(vec[i], 0); } }); - }