diff --git a/src/allocator.rs b/src/allocator.rs
index c50cc2f59c810496e91eae486da079725d83bbf6..71c2939491a9a34aa02a966399263c0690213a14 100644
--- a/src/allocator.rs
+++ b/src/allocator.rs
@@ -7,8 +7,31 @@ use crate::{BlockAddr, BlockLevel, BlockPtr, BlockTrait, BLOCK_SIZE};
 pub const ALLOC_LIST_ENTRIES: usize =
     (BLOCK_SIZE as usize - mem::size_of::<BlockPtr<AllocList>>()) / mem::size_of::<AllocEntry>();
 
+/// The RedoxFS block allocator. This struct manages all "data" blocks in RedoxFS
+/// (i.e, all blocks that aren't reserved or part of the header chain).
+///
+/// [`Allocator`] can allocate blocks of many "levels"---that is, it can
+/// allocate multiple consecutive [`BLOCK_SIZE`] blocks in one operation.
+///
+/// This reduces the amount of memory that the [`Allocator`] uses:
+/// Instead of storing the index of each free [`BLOCK_SIZE`] block,
+/// the `levels` array can keep track of higher-level blocks, splitting
+/// them when a smaller block is requested.
+///
+/// Higher-level blocks also allow us to more efficiently allocate memory
+/// for large files.
 #[derive(Clone, Default)]
 pub struct Allocator {
+    /// This array keeps track of all free blocks of each level,
+    /// and is initialized using the AllocList chain when we open the filesystem.
+    ///
+    /// Every element of the outer array represents a block level:
+    /// - item 0: free level 0 blocks (with size [`BLOCK_SIZE`])
+    /// - item 1: free level 1 blocks (with size 2*[`BLOCK_SIZE`])
+    /// - item 2: free level 2 blocks (with size 4*[`BLOCK_SIZE`])
+    /// ...and so on.
+    ///
+    /// Each inner array contains a list of free block indices,
     levels: Vec<Vec<u64>>,
 }
 
@@ -17,6 +40,7 @@ impl Allocator {
         &self.levels
     }
 
+    /// Count the number of free [`BLOCK_SIZE`] available to this [`Allocator`].
     pub fn free(&self) -> u64 {
         let mut free = 0;
         for level in 0..self.levels.len() {
@@ -26,10 +50,13 @@ impl Allocator {
         free
     }
 
+    /// Find a free block of the given level, mark it as "used", and return its address.
+    /// Returns [`None`] if there are no free blocks with this level.
     pub fn allocate(&mut self, block_level: BlockLevel) -> Option<BlockAddr> {
         // First, find the lowest level with a free block
         let mut index_opt = None;
         let mut level = block_level.0;
+        // Start searching at the level we want. Smaller levels are too small!
         while level < self.levels.len() {
             if !self.levels[level].is_empty() {
                 index_opt = self.levels[level].pop();
@@ -38,7 +65,8 @@ impl Allocator {
             level += 1;
         }
 
-        // Next, if a free block was found, split it up until you have a usable block of the right level
+        // If a free block was found, split it until we find a usable block of the right level.
+        // The left side of the split block is kept free, and the right side is allocated.
         let index = index_opt?;
         while level > block_level.0 {
             level -= 1;
@@ -49,6 +77,10 @@ impl Allocator {
         Some(unsafe { BlockAddr::new(index, block_level) })
     }
 
+    /// Try to allocate the exact block specified, making all necessary splits.
+    /// Returns [`None`] if this some (or all) of this block is already allocated.
+    ///
+    /// Note that [`BlockAddr`] encodes the blocks location _and_ level.
     pub fn allocate_exact(&mut self, exact_addr: BlockAddr) -> Option<BlockAddr> {
         // This function only supports level 0 right now
         assert_eq!(exact_addr.level().0, 0);
@@ -83,9 +115,12 @@ impl Allocator {
         Some(unsafe { BlockAddr::new(index_opt?, exact_addr.level()) })
     }
 
+    /// Deallocate the given block, marking it "free" so that it can be re-used later.
     pub fn deallocate(&mut self, addr: BlockAddr) {
-        // See if block matches with a sibling - if so, join them into a larger block, and populate
-        // this all the way to the top level
+        // When we deallocate, we check if block we're deallocating has a free sibling.
+        // If it does, we join the two to create one free block in the next (higher) level.
+        //
+        // We repeat this until we no longer have a sibling to join.
         let mut index = addr.index();
         let mut level = addr.level().0;
         loop {
@@ -98,26 +133,39 @@ impl Allocator {
 
             let mut found = false;
             let mut i = 0;
+            // look at all free blocks in the current level...
             while i < self.levels[level].len() {
+                // index of the second block we're looking at
                 let level_index = self.levels[level][i];
+
+                // - the block we just freed aligns with the next largest block, and
+                // - the second block we're looking at is the right sibling of this block
                 if index % next_size == 0 && index + level_size == level_index {
+                    // "alloc" the next highest block, repeat deallocation process.
                     self.levels[level].remove(i);
                     found = true;
                     break;
+                // - the index of this block doesn't align with the next largest block, and
+                // - the block we're looking at is the left neighbor of this block
                 } else if level_index % next_size == 0 && level_index + level_size == index {
+                    // "alloc" the next highest block, repeat deallocation process.
                     self.levels[level].remove(i);
-                    index = level_index;
+                    index = level_index; // index moves to left block
                     found = true;
                     break;
                 }
                 i += 1;
             }
 
+            // We couldn't find a higher block,
+            // deallocate this one and finish
             if !found {
                 self.levels[level].push(index);
                 return;
             }
 
+            // repeat deallocation process on the
+            // higher-level block we just created.
             level += 1;
         }
     }
@@ -125,7 +173,11 @@ impl Allocator {
 
 #[repr(C, packed)]
 pub struct AllocEntry {
+    /// The index of the first block this [`AllocEntry`] refers to
     index: Le<u64>,
+
+    /// The number of blocks after (and including) `index` that are are free or used.
+    /// If negative, they are used; if positive, they are free.
     count: Le<i64>,
 }
 
@@ -186,10 +238,14 @@ impl fmt::Debug for AllocEntry {
     }
 }
 
-/// Alloc log node
+/// A node in the allocation chain.
 #[repr(C, packed)]
 pub struct AllocList {
+    /// A pointer to the previous AllocList.
+    /// If this is the null pointer, this is the first element of the chain.
     pub prev: BlockPtr<AllocList>,
+
+    /// Allocation entries.
     pub entries: [AllocEntry; ALLOC_LIST_ENTRIES],
 }
 
diff --git a/src/bin/mount.rs b/src/bin/mount.rs
index 58391c2620b9c07b04fa9531324f61b677a5eade..6da747bd0a69622b46eeb33a6c1962270fd3560e 100644
--- a/src/bin/mount.rs
+++ b/src/bin/mount.rs
@@ -81,13 +81,13 @@ fn bootloader_password() -> Option<Vec<u8>> {
         addr_env.to_str().expect("REDOXFS_PASSWORD_ADDR not valid"),
         16,
     )
-        .expect("failed to parse REDOXFS_PASSWORD_ADDR");
+    .expect("failed to parse REDOXFS_PASSWORD_ADDR");
 
     let size = usize::from_str_radix(
         size_env.to_str().expect("REDOXFS_PASSWORD_SIZE not valid"),
         16,
     )
-        .expect("failed to parse REDOXFS_PASSWORD_SIZE");
+    .expect("failed to parse REDOXFS_PASSWORD_SIZE");
 
     let mut password = Vec::with_capacity(size);
     unsafe {
@@ -103,7 +103,9 @@ fn bootloader_password() -> Option<Vec<u8>> {
             flags: libredox::flag::MAP_SHARED,
             fd: fd.raw(),
             offset: addr as u64,
-        }).expect("failed to map REDOXFS_PASSWORD").cast::<u8>();
+        })
+        .expect("failed to map REDOXFS_PASSWORD")
+        .cast::<u8>();
 
         for i in 0..size {
             password.push(password_map.add(i).read());
@@ -272,7 +274,12 @@ fn filesystem_by_uuid(
     None
 }
 
-fn daemon(disk_id: &DiskId, mountpoint: &str, block_opt: Option<u64>, mut write: Option<File>) -> ! {
+fn daemon(
+    disk_id: &DiskId,
+    mountpoint: &str,
+    block_opt: Option<u64>,
+    mut write: Option<File>,
+) -> ! {
     setsig();
 
     let filesystem_opt = match *disk_id {
@@ -337,11 +344,15 @@ fn main() {
             "--no-daemon" | "-d" => daemonise = false,
 
             "--uuid" if disk_id.is_none() => {
-                disk_id = Some(DiskId::Uuid(match args.next().as_deref().map(Uuid::parse_str) {
-                    Some(Ok(uuid)) => uuid,
-                    Some(Err(err)) => print_err_exit(format!("redoxfs: invalid uuid '{}': {}", arg, err)),
-                    None => print_err_exit("redoxfs: no uuid provided")
-                }));
+                disk_id = Some(DiskId::Uuid(
+                    match args.next().as_deref().map(Uuid::parse_str) {
+                        Some(Ok(uuid)) => uuid,
+                        Some(Err(err)) => {
+                            print_err_exit(format!("redoxfs: invalid uuid '{}': {}", arg, err))
+                        }
+                        None => print_err_exit("redoxfs: no uuid provided"),
+                    },
+                ));
             }
 
             disk if disk_id.is_none() => disk_id = Some(DiskId::Path(disk.to_owned())),
@@ -350,10 +361,10 @@ fn main() {
 
             opts if mountpoint.is_some() => match u64::from_str_radix(opts, 16) {
                 Ok(block) => block_opt = Some(block),
-                Err(err) => print_err_exit(format!("redoxfs: invalid block '{}': {}", opts, err))
+                Err(err) => print_err_exit(format!("redoxfs: invalid block '{}': {}", opts, err)),
             },
 
-            _ => print_usage_exit()
+            _ => print_usage_exit(),
         }
     }
 
diff --git a/src/block.rs b/src/block.rs
index 722fb9d37e4154e1b77736fa83930581e355ffc8..b66a66d2f161a0ab486e222a1d19694cbc6e2774 100644
--- a/src/block.rs
+++ b/src/block.rs
@@ -5,6 +5,11 @@ use crate::BLOCK_SIZE;
 
 const BLOCK_LIST_ENTRIES: usize = BLOCK_SIZE as usize / mem::size_of::<BlockPtr<BlockRaw>>();
 
+/// An address of a data block.
+///
+/// This encodes a block's position _and_ [`BlockLevel`]:
+/// the first four bits of this `u64` encode the block's level,
+/// the rest encode its index.
 #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
 pub struct BlockAddr(u64);
 
@@ -43,35 +48,50 @@ impl BlockAddr {
     }
 }
 
+/// The size of a block.
+///
+/// Level 0 blocks are blocks of [`BLOCK_SIZE`] bytes.
+/// A level 1 block consists of two consecutive level 0 blocks.
+/// A level n block consists of two consecutive level n-1 blocks.
+///
+/// See [`crate::Allocator`] docs for more details.
 #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
 pub struct BlockLevel(pub(crate) usize);
 
 impl BlockLevel {
+    /// Returns the smallest block level that can contain
+    /// the given number of bytes.
     pub(crate) fn for_bytes(bytes: u64) -> Self {
         if bytes == 0 {
             return BlockLevel(0);
         }
-        let level = bytes.div_ceil(BLOCK_SIZE)
+        let level = bytes
+            .div_ceil(BLOCK_SIZE)
             .next_power_of_two()
             .trailing_zeros() as usize;
         BlockLevel(level)
     }
 
+    /// The number of [`BLOCK_SIZE`] blocks (i.e, level 0 blocks)
+    /// in a block of this level
     pub fn blocks(self) -> i64 {
         1 << self.0
     }
 
+    /// The number of bytes in a block of this level
     pub fn bytes(self) -> u64 {
         BLOCK_SIZE << self.0
     }
 }
 
 pub unsafe trait BlockTrait {
+    /// Create an empty block of this type.
     fn empty(level: BlockLevel) -> Option<Self>
     where
         Self: Sized;
 }
 
+/// A [`BlockAddr`] and the data it points to.
 #[derive(Clone, Copy, Debug, Default)]
 pub struct BlockData<T> {
     addr: BlockAddr,
@@ -87,15 +107,6 @@ impl<T> BlockData<T> {
         self.addr
     }
 
-    #[must_use = "don't forget to de-allocate old block address"]
-    pub fn swap_addr(&mut self, addr: BlockAddr) -> BlockAddr {
-        // Address levels must match
-        assert_eq!(self.addr.level(), addr.level());
-        let old = self.addr;
-        self.addr = addr;
-        old
-    }
-
     pub fn data(&self) -> &T {
         &self.data
     }
@@ -107,6 +118,19 @@ impl<T> BlockData<T> {
     pub(crate) unsafe fn into_parts(self) -> (BlockAddr, T) {
         (self.addr, self.data)
     }
+
+    /// Set the address of this [`BlockData`] to `addr`, returning this
+    /// block's old address. This method does not update block data.
+    ///
+    /// `addr` must point to a block with the same level as this block.
+    #[must_use = "don't forget to de-allocate old block address"]
+    pub fn swap_addr(&mut self, addr: BlockAddr) -> BlockAddr {
+        // Address levels must match
+        assert_eq!(self.addr.level(), addr.level());
+        let old = self.addr;
+        self.addr = addr;
+        old
+    }
 }
 
 impl<T: BlockTrait> BlockData<T> {
@@ -177,6 +201,13 @@ impl<T> ops::DerefMut for BlockList<T> {
     }
 }
 
+/// An address of a data block, along with a checksum of its data.
+///
+/// This encodes a block's position _and_ [`BlockLevel`].
+/// the first four bits of `addr` encode the block's level,
+/// the rest encode its index.
+///
+/// Also see [`BlockAddr`].
 #[repr(C, packed)]
 pub struct BlockPtr<T> {
     addr: Le<u64>,
diff --git a/src/dir.rs b/src/dir.rs
index 1a93c1a553676ba26e749024aa115fd47f025af9..63846516b228f61d471e9f4cfd772d3b7ed770cd 100644
--- a/src/dir.rs
+++ b/src/dir.rs
@@ -1,7 +1,7 @@
 use alloc::{boxed::Box, vec};
 use core::{mem, ops, slice, str};
 
-use crate::{BlockLevel, BlockTrait, Node, TreePtr, RECORD_LEVEL, DIR_ENTRY_MAX_LENGTH};
+use crate::{BlockLevel, BlockTrait, Node, TreePtr, DIR_ENTRY_MAX_LENGTH, RECORD_LEVEL};
 
 #[repr(C, packed)]
 pub struct DirEntry {
diff --git a/src/disk/file.rs b/src/disk/file.rs
index 1a46baf11ff4c2939e41906cdf32a94b7847e61e..55445ce42b39e5f78f715cc7722c85a757af31bc 100644
--- a/src/disk/file.rs
+++ b/src/disk/file.rs
@@ -3,7 +3,7 @@ use std::io::{Seek, SeekFrom};
 use std::os::unix::fs::FileExt;
 use std::path::Path;
 
-use syscall::error::{Result, Error, EIO};
+use syscall::error::{Error, Result, EIO};
 
 use crate::disk::Disk;
 use crate::BLOCK_SIZE;
@@ -43,7 +43,11 @@ impl<T> ResultExt for std::io::Result<T> {
 
 impl DiskFile {
     pub fn open(path: impl AsRef<Path>) -> Result<DiskFile> {
-        let file = OpenOptions::new().read(true).write(true).open(path).or_eio()?;
+        let file = OpenOptions::new()
+            .read(true)
+            .write(true)
+            .open(path)
+            .or_eio()?;
         Ok(DiskFile { file })
     }
 
diff --git a/src/filesystem.rs b/src/filesystem.rs
index 3644ede4bf2cdda777608d1a1f8e7fa960330ec6..8df906eba7ccffb472697752164f0bdefbb79f8a 100644
--- a/src/filesystem.rs
+++ b/src/filesystem.rs
@@ -2,10 +2,9 @@ use aes::{Aes128, BlockDecrypt, BlockEncrypt};
 use alloc::{collections::VecDeque, vec::Vec};
 use syscall::error::{Error, Result, EKEYREJECTED, ENOENT, ENOKEY};
 
-use crate::{Allocator, BlockAddr, BlockLevel, Disk, Header, Transaction, BLOCK_SIZE, HEADER_RING};
 #[cfg(feature = "std")]
-use crate::{AllocEntry, AllocList, BlockData, BlockTrait, Key, KeySlot, Node, Salt,  TreeList};
-
+use crate::{AllocEntry, AllocList, BlockData, BlockTrait, Key, KeySlot, Node, Salt, TreeList};
+use crate::{Allocator, BlockAddr, BlockLevel, Disk, Header, Transaction, BLOCK_SIZE, HEADER_RING};
 
 /// A file system
 pub struct FileSystem<D: Disk> {
@@ -121,94 +120,96 @@ impl<D: Disk> FileSystem<D> {
         let size = disk.size()?;
         let block_offset = (reserved.len() as u64 + BLOCK_SIZE - 1) / BLOCK_SIZE;
 
-        if size >= (block_offset + HEADER_RING + 4) * BLOCK_SIZE {
-            for block in 0..block_offset as usize {
-                let mut data = [0; BLOCK_SIZE as usize];
+        if size < (block_offset + HEADER_RING + 4) * BLOCK_SIZE {
+            return Err(Error::new(syscall::error::ENOSPC));
+        }
 
-                let mut i = 0;
-                while i < data.len() && block * BLOCK_SIZE as usize + i < reserved.len() {
-                    data[i] = reserved[block * BLOCK_SIZE as usize + i];
-                    i += 1;
-                }
+        // Fill reserved data, pad with zeroes
+        for block in 0..block_offset as usize {
+            let mut data = [0; BLOCK_SIZE as usize];
 
-                unsafe {
-                    disk.write_at(block as u64, &data)?;
-                }
+            let mut i = 0;
+            while i < data.len() && block * BLOCK_SIZE as usize + i < reserved.len() {
+                data[i] = reserved[block * BLOCK_SIZE as usize + i];
+                i += 1;
             }
 
-            let mut header = Header::new(size);
-
-            let aes_opt = match password_opt {
-                Some(password) => {
-                    //TODO: handle errors
-                    header.key_slots[0] =
-                        KeySlot::new(password, Salt::new().unwrap(), Key::new().unwrap()).unwrap();
-                    Some(header.key_slots[0].key(password).unwrap().into_aes())
-                }
-                None => None,
-            };
+            unsafe {
+                disk.write_at(block as u64, &data)?;
+            }
+        }
 
-            let mut fs = FileSystem {
-                disk,
-                block: block_offset,
-                header,
-                allocator: Allocator::default(),
-                aes_opt,
-                aes_blocks: Vec::with_capacity(BLOCK_SIZE as usize / aes::BLOCK_SIZE),
-            };
+        let mut header = Header::new(size);
 
-            // Write header generation zero
-            let count = unsafe { fs.disk.write_at(fs.block, &fs.header)? };
-            if count != core::mem::size_of_val(&fs.header) {
-                // Wrote wrong number of bytes
-                #[cfg(feature = "log")]
-                log::error!("CREATE: WRONG NUMBER OF BYTES");
-                return Err(Error::new(syscall::error::EIO));
+        let aes_opt = match password_opt {
+            Some(password) => {
+                //TODO: handle errors
+                header.key_slots[0] =
+                    KeySlot::new(password, Salt::new().unwrap(), Key::new().unwrap()).unwrap();
+                Some(header.key_slots[0].key(password).unwrap().into_aes())
             }
+            None => None,
+        };
+
+        let mut fs = FileSystem {
+            disk,
+            block: block_offset,
+            header,
+            allocator: Allocator::default(),
+            aes_opt,
+            aes_blocks: Vec::with_capacity(BLOCK_SIZE as usize / aes::BLOCK_SIZE),
+        };
+
+        // Write header generation zero
+        let count = unsafe { fs.disk.write_at(fs.block, &fs.header)? };
+        if count != core::mem::size_of_val(&fs.header) {
+            // Wrote wrong number of bytes
+            #[cfg(feature = "log")]
+            log::error!("CREATE: WRONG NUMBER OF BYTES");
+            return Err(Error::new(syscall::error::EIO));
+        }
 
-            // Set tree and alloc pointers and write header generation one
-            fs.tx(|tx| unsafe {
-                let tree = BlockData::new(
-                    BlockAddr::new(HEADER_RING + 1, BlockLevel::default()),
-                    TreeList::empty(BlockLevel::default()).unwrap(),
-                );
+        // Set tree and alloc pointers and write header generation one
+        fs.tx(|tx| unsafe {
+            let tree = BlockData::new(
+                BlockAddr::new(HEADER_RING + 1, BlockLevel::default()),
+                TreeList::empty(BlockLevel::default()).unwrap(),
+            );
 
-                let mut alloc = BlockData::new(
-                    BlockAddr::new(HEADER_RING + 2, BlockLevel::default()),
-                    AllocList::empty(BlockLevel::default()).unwrap(),
-                );
-                let alloc_free = size / BLOCK_SIZE - (block_offset + HEADER_RING + 4);
-                alloc.data_mut().entries[0] = AllocEntry::new(HEADER_RING + 4, alloc_free as i64);
+            let mut alloc = BlockData::new(
+                BlockAddr::new(HEADER_RING + 2, BlockLevel::default()),
+                AllocList::empty(BlockLevel::default()).unwrap(),
+            );
 
-                tx.header.tree = tx.write_block(tree)?;
-                tx.header.alloc = tx.write_block(alloc)?;
-                tx.header_changed = true;
+            let alloc_free = size / BLOCK_SIZE - (block_offset + HEADER_RING + 4);
+            alloc.data_mut().entries[0] = AllocEntry::new(HEADER_RING + 4, alloc_free as i64);
 
-                Ok(())
-            })?;
+            tx.header.tree = tx.write_block(tree)?;
+            tx.header.alloc = tx.write_block(alloc)?;
+            tx.header_changed = true;
 
-            unsafe {
-                fs.reset_allocator()?;
-            }
+            Ok(())
+        })?;
 
-            fs.tx(|tx| unsafe {
-                let mut root = BlockData::new(
-                    BlockAddr::new(HEADER_RING + 3, BlockLevel::default()),
-                    Node::new(Node::MODE_DIR | 0o755, 0, 0, ctime, ctime_nsec),
-                );
-                root.data_mut().set_links(1);
-                let root_ptr = tx.write_block(root)?;
-                assert_eq!(tx.insert_tree(root_ptr)?.id(), 1);
-                Ok(())
-            })?;
-
-            // Make sure everything is synced and squash allocations
-            Transaction::new(&mut fs).commit(true)?;
-
-            Ok(fs)
-        } else {
-            Err(Error::new(syscall::error::ENOSPC))
+        unsafe {
+            fs.reset_allocator()?;
         }
+
+        fs.tx(|tx| unsafe {
+            let mut root = BlockData::new(
+                BlockAddr::new(HEADER_RING + 3, BlockLevel::default()),
+                Node::new(Node::MODE_DIR | 0o755, 0, 0, ctime, ctime_nsec),
+            );
+            root.data_mut().set_links(1);
+            let root_ptr = tx.write_block(root)?;
+            assert_eq!(tx.insert_tree(root_ptr)?.id(), 1);
+            Ok(())
+        })?;
+
+        // Make sure everything is synced and squash allocations
+        Transaction::new(&mut fs).commit(true)?;
+
+        Ok(fs)
     }
 
     /// start a filesystem transaction, required for making any changes
@@ -226,7 +227,7 @@ impl<D: Disk> FileSystem<D> {
     /// Reset allocator to state stored on disk
     ///
     /// # Safety
-    /// Unsafe, it must only be called when openning the filesystem
+    /// Unsafe, it must only be called when opening the filesystem
     unsafe fn reset_allocator(&mut self) -> Result<()> {
         self.allocator = Allocator::default();
 
@@ -267,52 +268,58 @@ impl<D: Disk> FileSystem<D> {
     }
 
     pub(crate) fn decrypt(&mut self, data: &mut [u8]) -> bool {
-        if let Some(ref aes) = self.aes_opt {
-            assert_eq!(data.len() % aes::BLOCK_SIZE, 0);
-
-            self.aes_blocks.clear();
-            for i in 0..data.len() / aes::BLOCK_SIZE {
-                self.aes_blocks.push(aes::Block::clone_from_slice(
-                    &data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE],
-                ));
-            }
+        let aes = if let Some(ref aes) = self.aes_opt {
+            aes
+        } else {
+            // Do nothing if encryption is disabled
+            return false;
+        };
 
-            aes.decrypt_blocks(&mut self.aes_blocks);
+        assert_eq!(data.len() % aes::BLOCK_SIZE, 0);
 
-            for i in 0..data.len() / aes::BLOCK_SIZE {
-                data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE]
-                    .copy_from_slice(&self.aes_blocks[i]);
-            }
-            self.aes_blocks.clear();
+        self.aes_blocks.clear();
+        for i in 0..data.len() / aes::BLOCK_SIZE {
+            self.aes_blocks.push(aes::Block::clone_from_slice(
+                &data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE],
+            ));
+        }
 
-            true
-        } else {
-            false
+        aes.decrypt_blocks(&mut self.aes_blocks);
+
+        for i in 0..data.len() / aes::BLOCK_SIZE {
+            data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE]
+                .copy_from_slice(&self.aes_blocks[i]);
         }
+        self.aes_blocks.clear();
+
+        true
     }
 
     pub(crate) fn encrypt(&mut self, data: &mut [u8]) -> bool {
-        if let Some(ref aes) = self.aes_opt {
-            assert_eq!(data.len() % aes::BLOCK_SIZE, 0);
-
-            self.aes_blocks.clear();
-            for i in 0..data.len() / aes::BLOCK_SIZE {
-                self.aes_blocks.push(aes::Block::clone_from_slice(
-                    &data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE],
-                ));
-            }
+        let aes = if let Some(ref aes) = self.aes_opt {
+            aes
+        } else {
+            // Do nothing if encryption is disabled
+            return false;
+        };
 
-            aes.encrypt_blocks(&mut self.aes_blocks);
+        assert_eq!(data.len() % aes::BLOCK_SIZE, 0);
 
-            for i in 0..data.len() / aes::BLOCK_SIZE {
-                data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE]
-                    .copy_from_slice(&self.aes_blocks[i]);
-            }
-            self.aes_blocks.clear();
+        self.aes_blocks.clear();
+        for i in 0..data.len() / aes::BLOCK_SIZE {
+            self.aes_blocks.push(aes::Block::clone_from_slice(
+                &data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE],
+            ));
+        }
 
-            true
-        } else {
-            false
+        aes.encrypt_blocks(&mut self.aes_blocks);
+
+        for i in 0..data.len() / aes::BLOCK_SIZE {
+            data[i * aes::BLOCK_SIZE..(i + 1) * aes::BLOCK_SIZE]
+                .copy_from_slice(&self.aes_blocks[i]);
         }
+        self.aes_blocks.clear();
+
+        true
     }
 }
diff --git a/src/mount/redox/mod.rs b/src/mount/redox/mod.rs
index 53ab9ba9470c02cfdd0d240b723c6d4f4449693f..0c86ac3dc19784e3f5a21cd2f503818ce102879d 100644
--- a/src/mount/redox/mod.rs
+++ b/src/mount/redox/mod.rs
@@ -1,7 +1,7 @@
+use redox_scheme::{RequestKind, SignalBehavior, Socket, V2};
 use std::io;
 use std::path::Path;
 use std::sync::atomic::Ordering;
-use redox_scheme::{RequestKind, SignalBehavior, Socket, V2};
 
 use crate::{Disk, FileSystem, Transaction, IS_UMT};
 
@@ -26,12 +26,14 @@ where
     while IS_UMT.load(Ordering::SeqCst) == 0 {
         let req = match socket.next_request(SignalBehavior::Restart)? {
             None => break,
-            Some(req) => if let RequestKind::Call(r) = req.kind() {
-                r
-            } else {
-                // TODO: Redoxfs does not yet support asynchronous file IO. It might still make
-                // sense to implement cancellation for huge buffers, e.g. dd bs=1G
-                continue;
+            Some(req) => {
+                if let RequestKind::Call(r) = req.kind() {
+                    r
+                } else {
+                    // TODO: Redoxfs does not yet support asynchronous file IO. It might still make
+                    // sense to implement cancellation for huge buffers, e.g. dd bs=1G
+                    continue;
+                }
             }
         };
         let response = req.handle_scheme_mut(&mut scheme);
diff --git a/src/mount/redox/resource.rs b/src/mount/redox/resource.rs
index 424acf4cc8e743414ac0aa490d56513ecfdf11e1..8d59693c36e13e3f8b3e800b267482e0ad0f196c 100644
--- a/src/mount/redox/resource.rs
+++ b/src/mount/redox/resource.rs
@@ -9,7 +9,7 @@ use syscall::data::{Stat, TimeSpec};
 use syscall::error::{Error, Result, EBADF, EINVAL, EISDIR, EPERM};
 use syscall::flag::{
     MapFlags, F_GETFL, F_SETFL, MODE_PERM, O_ACCMODE, O_APPEND, O_RDONLY, O_RDWR, O_WRONLY,
-    PROT_READ, PROT_WRITE
+    PROT_READ, PROT_WRITE,
 };
 use syscall::{EBADFD, PAGE_SIZE};
 
@@ -192,7 +192,10 @@ impl<D: Disk> Resource<D> for DirResource {
 
     fn read(&mut self, buf: &mut [u8], offset: u64, _tx: &mut Transaction<D>) -> Result<usize> {
         let data = self.data.as_ref().ok_or(Error::new(EISDIR))?;
-        let src = usize::try_from(offset).ok().and_then(|o| data.get(o..)).unwrap_or(&[]);
+        let src = usize::try_from(offset)
+            .ok()
+            .and_then(|o| data.get(o..))
+            .unwrap_or(&[]);
 
         let byte_count = core::cmp::min(src.len(), buf.len());
         buf[..byte_count].copy_from_slice(&src[..byte_count]);
@@ -462,15 +465,13 @@ impl<D: Disk> Resource<D> for FileResource {
                         length: new_size,
                         // PRIVATE/SHARED doesn't matter once the pages are passed in the fmap
                         // handler.
-                        prot: libredox::flag::PROT_READ
-                            | libredox::flag::PROT_WRITE,
+                        prot: libredox::flag::PROT_READ | libredox::flag::PROT_WRITE,
                         flags: libredox::flag::MAP_PRIVATE,
 
                         offset: 0,
                         fd: !0,
                         addr: core::ptr::null_mut(),
-                    }
-                    )? as *mut u8
+                    })? as *mut u8
                 }
             } else {
                 unsafe {
diff --git a/src/node.rs b/src/node.rs
index 44933b9de9f3e42c2918cc164e51f0dec5ffe1e6..b2566a37e4e6217029e970a227bfb9ade9894938 100644
--- a/src/node.rs
+++ b/src/node.rs
@@ -3,6 +3,7 @@ use endian_num::Le;
 
 use crate::{BlockLevel, BlockList, BlockPtr, BlockTrait, RecordRaw, BLOCK_SIZE, RECORD_LEVEL};
 
+/// An index into a [`Node`]'s block table.
 pub enum NodeLevel {
     L0(usize),
     L1(usize, usize),
@@ -13,6 +14,11 @@ pub enum NodeLevel {
 
 impl NodeLevel {
     // Warning: this uses constant record offsets, make sure to sync with Node
+
+    /// Return the [`NodeLevel`] of the record with the given index.
+    /// - the first 128 are level 0,
+    /// - the next 64*256 are level 1,
+    /// - ...and so on.
     pub fn new(mut record_offset: u64) -> Option<Self> {
         // 1 << 8 = 256, this is the number of entries in a BlockList
         const SHIFT: u64 = 8;
@@ -82,28 +88,65 @@ type BlockListL4 = BlockList<BlockListL3>;
 /// A file/folder node
 #[repr(C, packed)]
 pub struct Node {
+    /// This node's type & permissions.
+    /// - first four bits are permissions
+    /// - next four bits are permissions for the file's user
+    /// - next four bits are permissions for the file's group
+    /// - last four bits are permissions for everyone else
     pub mode: Le<u16>,
+
+    /// The uid that owns this file
     pub uid: Le<u32>,
+
+    /// The gid that owns this file
     pub gid: Le<u32>,
+
+    /// The number of links to this file
+    /// (directory entries, symlinks, etc)
     pub links: Le<u32>,
+
+    /// The length of this file, in bytes
     pub size: Le<u64>,
+
     pub ctime: Le<u64>,
     pub ctime_nsec: Le<u32>,
     pub mtime: Le<u64>,
     pub mtime_nsec: Le<u32>,
     pub atime: Le<u64>,
     pub atime_nsec: Le<u32>,
+
     pub record_level: Le<u32>,
+
     pub padding: [u8; BLOCK_SIZE as usize - 4094],
-    // 128 * RECORD_SIZE (16 MiB, 128 KiB each)
+
+    /// The first 128 blocks of this file.
+    ///
+    /// Total size: 128 * RECORD_SIZE (16 MiB, 128 KiB each)
     pub level0: [BlockPtr<RecordRaw>; 128],
-    // 64 * 256 * RECORD_SIZE (2 GiB, 32 MiB each)
+
+    /// The next 64 * 256 blocks of this file,
+    /// stored behind 64 level one tables.
+    ///
+    /// Total size: 64 * 256 * RECORD_SIZE (2 GiB, 32 MiB each)
     pub level1: [BlockPtr<BlockListL1>; 64],
-    // 32 * 256 * 256 * RECORD_SIZE (256 GiB, 8 GiB each)
+
+    /// The next 32 * 256 * 256 blocks of this file,
+    /// stored behind 32 level two tables.
+    /// Each level two table points to 256 level one tables.
+    ///
+    /// Total size: 32 * 256 * 256 * RECORD_SIZE (256 GiB, 8 GiB each)
     pub level2: [BlockPtr<BlockListL2>; 32],
-    // 16 * 256 * 256 * 256 * RECORD_SIZE (32 TiB, 2 TiB each)
+
+    /// The next 16 * 256 * 256 * 256 blocks of this file,
+    /// stored behind 16 level three tables.
+    ///
+    /// Total size: 16 * 256 * 256 * 256 * RECORD_SIZE (32 TiB, 2 TiB each)
     pub level3: [BlockPtr<BlockListL3>; 16],
-    // 12 * 256 * 256 * 256 * 256 * RECORD_SIZE (6 PiB, 512 TiB each)
+
+    /// The next 12 * 256 * 256 * 256 * 256 blocks of this file,
+    /// stored behind 12 level four tables.
+    ///
+    /// Total size: 12 * 256 * 256 * 256 * 256 * RECORD_SIZE (6 PiB, 512 TiB each)
     pub level4: [BlockPtr<BlockListL4>; 12],
 }
 
@@ -148,11 +191,13 @@ impl Node {
     pub const MODE_DIR: u16 = 0x4000;
     pub const MODE_SYMLINK: u16 = 0xA000;
 
+    /// Mask for node permission bits
     pub const MODE_PERM: u16 = 0x0FFF;
     pub const MODE_EXEC: u16 = 0o1;
     pub const MODE_WRITE: u16 = 0o2;
     pub const MODE_READ: u16 = 0o4;
 
+    /// Create a new, empty node with the given metadata
     pub fn new(mode: u16, uid: u32, gid: u32, ctime: u64, ctime_nsec: u32) -> Self {
         Self {
             mode: mode.into(),
@@ -177,22 +222,32 @@ impl Node {
         }
     }
 
+    /// This node's type & permissions.
+    /// - first four bits are permissions
+    /// - next four bits are permissions for the file's user
+    /// - next four bits are permissions for the file's group
+    /// - last four bits are permissions for everyone else
     pub fn mode(&self) -> u16 {
         self.mode.to_ne()
     }
 
+    /// The uid that owns this file
     pub fn uid(&self) -> u32 {
         self.uid.to_ne()
     }
 
+    /// The gid that owns this file
     pub fn gid(&self) -> u32 {
         self.gid.to_ne()
     }
 
+    /// The number of links to this file
+    /// (directory entries, symlinks, etc)
     pub fn links(&self) -> u32 {
         self.links.to_ne()
     }
 
+    /// The length of this file, in bytes.
     pub fn size(&self) -> u64 {
         self.size.to_ne()
     }
diff --git a/src/tests.rs b/src/tests.rs
index 989d042a20e2f156ba8697e03b1f4acedbe58392..b9d809648bcfec8880f6a0800a289cf951cfaa89 100644
--- a/src/tests.rs
+++ b/src/tests.rs
@@ -1,9 +1,9 @@
+use crate::{unmount_path, DiskSparse, FileSystem, Node, TreePtr};
 use std::path::Path;
 use std::process::Command;
-use std::{fs, thread, time};
 use std::sync::atomic::AtomicUsize;
 use std::sync::atomic::Ordering::Relaxed;
-use crate::{unmount_path, DiskSparse, FileSystem, Node, TreePtr};
+use std::{fs, thread, time};
 
 static IMAGE_SEQ: AtomicUsize = AtomicUsize::new(0);
 
@@ -147,16 +147,12 @@ fn create_remove_should_not_increase_size() {
 
         let tree_ptr = TreePtr::<Node>::root();
         let name = "test";
-        let _ = fs.tx(|tx| {
-            tx.create_node(
-                tree_ptr,
-                name,
-                Node::MODE_FILE | 0644,
-                1,
-                0,
-            )?;
-            tx.remove_node(tree_ptr, name, Node::MODE_FILE)
-        }).unwrap();
+        let _ = fs
+            .tx(|tx| {
+                tx.create_node(tree_ptr, name, Node::MODE_FILE | 0644, 1, 0)?;
+                tx.remove_node(tree_ptr, name, Node::MODE_FILE)
+            })
+            .unwrap();
 
         assert_eq!(fs.allocator().free(), initially_free);
     });
diff --git a/src/transaction.rs b/src/transaction.rs
index 82e5e40b54dc76f2c92814c1b74884fa2452751c..b547b473b4b96e257963243f907a8521cbf3b1ca 100644
--- a/src/transaction.rs
+++ b/src/transaction.rs
@@ -12,7 +12,11 @@ use syscall::error::{
     Error, Result, EEXIST, EINVAL, EIO, EISDIR, ENOENT, ENOSPC, ENOTDIR, ENOTEMPTY, ERANGE,
 };
 
-use crate::{AllocEntry, AllocList, Allocator, BlockAddr, BlockData, BlockLevel, BlockPtr, BlockTrait, DirEntry, DirList, Disk, FileSystem, Header, Node, NodeLevel, RecordRaw, TreeData, TreePtr, ALLOC_LIST_ENTRIES, HEADER_RING, DIR_ENTRY_MAX_LENGTH};
+use crate::{
+    AllocEntry, AllocList, Allocator, BlockAddr, BlockData, BlockLevel, BlockPtr, BlockTrait,
+    DirEntry, DirList, Disk, FileSystem, Header, Node, NodeLevel, RecordRaw, TreeData, TreePtr,
+    ALLOC_LIST_ENTRIES, DIR_ENTRY_MAX_LENGTH, HEADER_RING,
+};
 
 pub struct Transaction<'a, D: Disk> {
     fs: &'a mut FileSystem<D>,
@@ -48,7 +52,13 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(())
     }
 
-    // Unsafe because order must be done carefully and changes must be flushed to disk
+    //
+    // MARK: block operations
+    //
+
+    /// Allocate a new block of size `level`, returning its address.
+    /// - returns `Err(ENOSPC)` if a block of this size could not be alloated.
+    /// - unsafe because order must be done carefully and changes must be flushed to disk
     unsafe fn allocate(&mut self, level: BlockLevel) -> Result<BlockAddr> {
         match self.allocator.allocate(level) {
             Some(addr) => {
@@ -59,7 +69,8 @@ impl<'a, D: Disk> Transaction<'a, D> {
         }
     }
 
-    // Unsafe because order must be done carefully and changes must be flushed to disk
+    /// Deallocate the given block.
+    /// - unsafe because order must be done carefully and changes must be flushed to disk
     unsafe fn deallocate(&mut self, addr: BlockAddr) {
         //TODO: should we use some sort of not-null abstraction?
         assert!(!addr.is_null());
@@ -96,6 +107,14 @@ impl<'a, D: Disk> Transaction<'a, D> {
         }
     }
 
+    /// Drain `self.allocator_log` and `self.deallocate`,
+    /// updating the [`AllocList`] with the resulting state.
+    ///
+    /// This method does not write anything to disk,
+    /// all writes are cached.
+    ///
+    /// If `squash` is true, fully rebuild the allocator log
+    /// using the state of `self.allocator`.
     fn sync_allocator(&mut self, squash: bool) -> Result<bool> {
         let mut prev_ptr = BlockPtr::default();
         if squash {
@@ -185,14 +204,18 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(true)
     }
 
-    //TODO: change this function, provide another way to squash, only write header in commit
+    // TODO: change this function, provide another way to squash, only write header in commit
+    /// Write all changes cached in this [`Transaction`] to disk.
     pub fn sync(&mut self, squash: bool) -> Result<bool> {
         // Make sure alloc is synced
         self.sync_allocator(squash)?;
 
         // Write all items in write cache
         for (addr, raw) in self.write_cache.iter_mut() {
+            // sync_alloc must have changed alloc block pointer
+            // if we have any blocks to write
             assert!(self.header_changed);
+
             self.fs.encrypt(raw);
             let count = unsafe { self.fs.disk.write_at(self.fs.block + addr.index(), raw)? };
             if count != raw.len() {
@@ -204,6 +227,10 @@ impl<'a, D: Disk> Transaction<'a, D> {
         }
         self.write_cache.clear();
 
+        // Do nothing if there are no changes to write.
+        //
+        // This only happens if `self.write_cache` was empty,
+        // and the fs header wasn't changed by another operation.
         if !self.header_changed {
             return Ok(false);
         }
@@ -314,7 +341,9 @@ impl<'a, D: Disk> Transaction<'a, D> {
             return Ok(record);
         }
 
-        // Expand record if larger level requested
+        // If a larger level was requested,
+        // create a fake record with the requested level
+        // and fill it with the data in the original record.
         let (_old_addr, old_raw) = unsafe { record.into_parts() };
         let mut raw = match T::empty(level) {
             Some(empty) => empty,
@@ -372,6 +401,12 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(block.create_ptr())
     }
 
+    //
+    // MARK: tree operations
+    //
+
+    /// Walk the tree and return the contents and address
+    /// of the data block that `ptr` points too.
     fn read_tree_and_addr<T: BlockTrait + DerefMut<Target = [u8]>>(
         &mut self,
         ptr: TreePtr<T>,
@@ -404,6 +439,7 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok((TreeData::new(ptr.id(), data), raw.addr()))
     }
 
+    /// Walk the tree and return the contents of the data block that `ptr` points too.
     pub fn read_tree<T: BlockTrait + DerefMut<Target = [u8]>>(
         &mut self,
         ptr: TreePtr<T>,
@@ -411,11 +447,14 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(self.read_tree_and_addr(ptr)?.0)
     }
 
-    //TODO: improve performance, reduce writes
+    /// Insert `block_ptr` into the first free slot in the tree,
+    /// returning a pointer to that slot.
     pub fn insert_tree<T: Deref<Target = [u8]>>(
         &mut self,
         block_ptr: BlockPtr<T>,
     ) -> Result<TreePtr<T>> {
+        // TODO: improve performance, reduce writes
+
         // Remember that if there is a free block at any level it will always sync when it
         // allocates at the lowest level, so we can save a write by not writing each level as it
         // is allocated.
@@ -442,6 +481,7 @@ impl<'a, D: Disk> Transaction<'a, D> {
                                 continue;
                             }
 
+                            // TODO: do we need to write all of these?
                             // Write updates to newly allocated blocks
                             l0.data_mut().ptrs[i0] = block_ptr.cast();
                             l1.data_mut().ptrs[i1] = self.sync_block(l0)?;
@@ -503,7 +543,13 @@ impl<'a, D: Disk> Transaction<'a, D> {
         self.sync_trees(&[node])
     }
 
-    //TODO: use more efficient methods for reading directories
+    //
+    // MARK: node operations
+    //
+
+    // TODO: use more efficient methods for reading directories
+    /// Write all children of `parent_ptr` to `children`.
+    /// `parent_ptr` must point to a directory node.
     pub fn child_nodes(
         &mut self,
         parent_ptr: TreePtr<Node>,
@@ -513,6 +559,8 @@ impl<'a, D: Disk> Transaction<'a, D> {
         let record_level = parent.data().record_level();
         for record_offset in 0..(parent.data().size() / record_level.bytes()) {
             let block_ptr = self.node_record_ptr(&parent, record_offset)?;
+            // TODO: is this safe? what if child_nodes is called on
+            // a node that isn't a directory?
             let dir_ptr: BlockPtr<DirList> = unsafe { block_ptr.cast() };
             let dir = self.read_block(dir_ptr)?;
             for entry in dir.data().entries.iter() {
@@ -531,6 +579,8 @@ impl<'a, D: Disk> Transaction<'a, D> {
     }
 
     //TODO: improve performance (h-tree?)
+    /// Find a node that is a child of the `parent_ptr` and is named `name`.
+    /// Returns ENOENT if this node is not found.
     pub fn find_node(&mut self, parent_ptr: TreePtr<Node>, name: &str) -> Result<TreeData<Node>> {
         let parent = self.read_tree(parent_ptr)?;
         let record_level = parent.data().record_level();
@@ -559,7 +609,8 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Err(Error::new(ENOENT))
     }
 
-    //TODO: improve performance (h-tree?)
+    // TODO: improve performance (h-tree?)
+    /// Create a new node in the tree with the given parameters.
     pub fn create_node(
         &mut self,
         parent_ptr: TreePtr<Node>,
@@ -598,37 +649,36 @@ impl<'a, D: Disk> Transaction<'a, D> {
         name: &str,
         node_ptr: TreePtr<Node>,
     ) -> Result<()> {
-       self.check_name(&parent_ptr, name)?;
-
-        let entry = DirEntry::new(node_ptr, name);
+        self.check_name(&parent_ptr, name)?;
 
         let mut parent = self.read_tree(parent_ptr)?;
-
         let mut node = self.read_tree(node_ptr)?;
+
+        // Increment node reference counter
         let links = node.data().links();
         node.data_mut().set_links(links + 1);
 
+        let entry = DirEntry::new(node_ptr, name);
+
         let record_level = parent.data().record_level();
         let record_end = parent.data().size() / record_level.bytes();
         for record_offset in 0..record_end {
             let mut dir_record_ptr = self.node_record_ptr(&parent, record_offset)?;
             let mut dir_ptr: BlockPtr<DirList> = unsafe { dir_record_ptr.cast() };
             let mut dir = self.read_block(dir_ptr)?;
-            let mut dir_changed = false;
+
             for old_entry in dir.data_mut().entries.iter_mut() {
-                // Skip filled entries
                 if !old_entry.node_ptr().is_null() {
                     continue;
                 }
 
+                // Write our new entry into the first
+                // free slot in this directory
                 *old_entry = entry;
-                dir_changed = true;
-                break;
-            }
-            if dir_changed {
+
+                // Write updated blocks
                 dir_ptr = self.sync_block(dir)?;
                 dir_record_ptr = unsafe { dir_ptr.cast() };
-
                 self.sync_node_record_ptr(&mut parent, record_offset, dir_record_ptr)?;
                 self.sync_trees(&[parent, node])?;
 
@@ -636,7 +686,10 @@ impl<'a, D: Disk> Transaction<'a, D> {
             }
         }
 
-        // Append a new dirlist, with first entry set to new entry
+        // We couldn't find a free direntry slot, this directory is full.
+        // We now need to add a new dirlist block to the parent node,
+        // with `entry` as its first member.
+
         let mut dir =
             BlockData::<DirList>::empty(unsafe { self.allocate(BlockLevel::default())? }).unwrap();
         dir.data_mut().entries[0] = entry;
@@ -763,7 +816,7 @@ impl<'a, D: Disk> Transaction<'a, D> {
     ) -> Result<()> {
         let orig = self.find_node(orig_parent_ptr, orig_name)?;
 
-        //TODO: only allow ENOENT as an error?
+        // TODO: only allow ENOENT as an error?
         if let Ok(new) = self.find_node(new_parent_ptr, new_name) {
             // Move to same name, return
             if new.id() == orig.id() {
@@ -771,6 +824,7 @@ impl<'a, D: Disk> Transaction<'a, D> {
             }
 
             // Remove new name
+            // (we renamed to a node that already exists, overwrite it.)
             self.remove_node(
                 new_parent_ptr,
                 new_name,
@@ -791,9 +845,7 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(())
     }
 
-    fn check_name(&mut self,
-                  parent_ptr: &TreePtr<Node>,
-                  name: &str) -> Result<()> {
+    fn check_name(&mut self, parent_ptr: &TreePtr<Node>, name: &str) -> Result<()> {
         if name.contains(':') {
             return Err(Error::new(EINVAL));
         }
@@ -809,6 +861,8 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(())
     }
 
+    /// Get a pointer to a the record of `node` with the given offset.
+    /// (i.e, to the `n`th record of `node`.)
     fn node_record_ptr(
         &mut self,
         node: &TreeData<Node>,
@@ -931,6 +985,7 @@ impl<'a, D: Disk> Transaction<'a, D> {
         }
     }
 
+    /// Set the record at `ptr` as the data at `record_offset` of `node`.
     fn sync_node_record_ptr(
         &mut self,
         node: &mut TreeData<Node>,
@@ -992,24 +1047,35 @@ impl<'a, D: Disk> Transaction<'a, D> {
     ) -> Result<usize> {
         let node_size = node.data().size();
         let record_level = node.data().record_level();
-        let mut i = 0;
-        while i < buf.len() && offset < node_size {
+
+        let mut bytes_read = 0;
+        while bytes_read < buf.len() && offset < node_size {
+            // How many bytes we've read into the next record
             let j = (offset % record_level.bytes()) as usize;
+
+            // Number of bytes to read in this iteration
             let len = min(
-                buf.len() - i,
-                min(record_level.bytes() - j as u64, node_size - offset) as usize,
+                buf.len() - bytes_read, // number of bytes we have left in `buf`
+                min(
+                    record_level.bytes() - j as u64, // number of bytes we haven't read in this record
+                    node_size - offset,              // number of bytes left in this node
+                ) as usize,
             );
+
+            let record_idx = offset / record_level.bytes();
+            let record_ptr = self.node_record_ptr(node, record_idx)?;
+
+            // The level of the record to read.
+            // This is at most `record_level` due to the way `len` is computed.
             let level = BlockLevel::for_bytes((j + len) as u64);
 
-            let record_ptr = self.node_record_ptr(node, offset / record_level.bytes())?;
             let record = unsafe { self.read_record(record_ptr, level)? };
+            buf[bytes_read..bytes_read + len].copy_from_slice(&record.data()[j..j + len]);
 
-            buf[i..i + len].copy_from_slice(&record.data()[j..j + len]);
-
-            i += len;
+            bytes_read += len;
             offset += len as u64;
         }
-        Ok(i)
+        Ok(bytes_read)
     }
 
     pub fn read_node(
@@ -1052,7 +1118,8 @@ impl<'a, D: Disk> Transaction<'a, D> {
         }
 
         if old_size < size {
-            // If size is smaller, write zeroes until the size matches
+            // If we're "truncating" to a larger size,
+            // write zeroes until the size matches
             let zeroes = RecordRaw::empty(record_level).unwrap();
 
             let mut offset = old_size;
@@ -1088,6 +1155,10 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(true)
     }
 
+    /// Truncate the given node to the given size.
+    ///
+    /// If `size` is larger than the node's current size,
+    /// expand the node with zeroes.
     pub fn truncate_node(
         &mut self,
         node_ptr: TreePtr<Node>,
@@ -1166,6 +1237,7 @@ impl<'a, D: Disk> Transaction<'a, D> {
         Ok(node_changed)
     }
 
+    /// Write the bytes at `buf` to `node` starting at `offset`.
     pub fn write_node(
         &mut self,
         node_ptr: TreePtr<Node>,
diff --git a/src/tree.rs b/src/tree.rs
index 63df18318b73ea3f822a908736ac8fb065641725..90a2cb2e110c6122229afbbd3254fffd936f0e7d 100644
--- a/src/tree.rs
+++ b/src/tree.rs
@@ -7,12 +7,16 @@ use crate::{BlockLevel, BlockPtr, BlockRaw, BlockTrait};
 const TREE_LIST_SHIFT: u32 = 8;
 const TREE_LIST_ENTRIES: usize = 1 << TREE_LIST_SHIFT;
 
-// Tree with 4 levels
+/// A tree with 4 levels
 pub type Tree = TreeList<TreeList<TreeList<TreeList<BlockRaw>>>>;
 
+/// A [`TreePtr`] and the contents of the block it references.
 #[derive(Clone, Copy, Debug, Default)]
 pub struct TreeData<T> {
+    /// The value of the [`TreePtr`]
     id: u32,
+
+    // The data
     data: T,
 }
 
@@ -45,6 +49,8 @@ impl<T> TreeData<T> {
     }
 }
 
+/// A list of pointers to blocks of type `T`.
+/// This is one level of a [`Tree`], defined above.
 #[repr(C, packed)]
 pub struct TreeList<T> {
     pub ptrs: [BlockPtr<T>; TREE_LIST_ENTRIES],
@@ -85,6 +91,7 @@ impl<T> ops::DerefMut for TreeList<T> {
     }
 }
 
+/// A pointer to an entry in a [`Tree`].
 #[repr(C, packed)]
 pub struct TreePtr<T> {
     id: Le<u32>,
@@ -92,6 +99,8 @@ pub struct TreePtr<T> {
 }
 
 impl<T> TreePtr<T> {
+    /// Get a [`TreePtr`] to the filesystem root
+    /// directory's node.
     pub fn root() -> Self {
         Self::new(1)
     }
@@ -103,6 +112,11 @@ impl<T> TreePtr<T> {
         }
     }
 
+    /// Create a [`TreePtr`] from [`Tree`] indices,
+    /// Where `indexes` is `(i3, i2, i1, i0)`.
+    /// - `i3` is the index into the level 3 table,
+    /// - `i2` is the index into the level 2 table at `i3`
+    /// - ...and so on.
     pub fn from_indexes(indexes: (usize, usize, usize, usize)) -> Self {
         const SHIFT: u32 = TREE_LIST_SHIFT;
         let id = ((indexes.0 << (3 * SHIFT)) as u32)
@@ -123,17 +137,23 @@ impl<T> TreePtr<T> {
         self.id() == 0
     }
 
+    /// Get this indices of this [`TreePtr`] in a [`Tree`].
+    /// Returns `(i3, i2, i1, i0)`:
+    /// - `i3` is the index into the level 3 table,
+    /// - `i2` is the index into the level 2 table at `i3`
+    /// - ...and so on.
     pub fn indexes(&self) -> (usize, usize, usize, usize) {
         const SHIFT: u32 = TREE_LIST_SHIFT;
         const NUM: u32 = 1 << SHIFT;
         const MASK: u32 = NUM - 1;
         let id = self.id();
-        (
-            ((id >> (3 * SHIFT)) & MASK) as usize,
-            ((id >> (2 * SHIFT)) & MASK) as usize,
-            ((id >> SHIFT) & MASK) as usize,
-            (id & MASK) as usize,
-        )
+
+        let i3 = ((id >> (3 * SHIFT)) & MASK) as usize;
+        let i2 = ((id >> (2 * SHIFT)) & MASK) as usize;
+        let i1 = ((id >> SHIFT) & MASK) as usize;
+        let i0 = (id & MASK) as usize;
+
+        return (i3, i2, i1, i0);
     }
 }