diff --git a/src/lib.rs b/src/lib.rs
index 4f18ee3cbe75fc9c1ad04ea9384fa2c2ea9da54c..862794080a53943bf3f5e1646f05cdb65c11be2f 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -48,6 +48,7 @@
 #![feature(integer_atomics)]
 #![feature(lang_items)]
 #![feature(naked_functions)]
+#![feature(matches_macro)] // stable in current Rust
 #![feature(ptr_internals)]
 #![feature(thread_local)]
 #![no_std]
diff --git a/src/memory/bump.rs b/src/memory/bump.rs
index 5ffb4ad2b6504a11974b096581ff81750277c66e..e824fa4cd60dbb07062ca785fa84c1b6babb11c5 100644
--- a/src/memory/bump.rs
+++ b/src/memory/bump.rs
@@ -2,9 +2,9 @@
 //! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
 
 use crate::paging::PhysicalAddress;
-
 use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};
 
+use syscall::{PartialAllocStrategy, PhysallocFlags};
 
 pub struct BumpAllocator {
     next_free_frame: Frame,
@@ -88,14 +88,20 @@ impl FrameAllocator for BumpAllocator {
         count
     }
 
-    fn allocate_frames(&mut self, count: usize) -> Option<Frame> {
+    fn allocate_frames3(&mut self, count: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, min: usize) -> Option<(Frame, usize)> {
+        // TODO: Comply with flags and allocation strategies better.
         if count == 0 {
-            None
+            return None;
         } else if let Some(area) = self.current_area {
+            let space32 = flags.contains(PhysallocFlags::SPACE_32);
+            let partial_alloc = flags.contains(PhysallocFlags::PARTIAL_ALLOC);
+            let mut actual_size = count;
+
             // "Clone" the frame to return it if it's free. Frame doesn't
             // implement Clone, but we can construct an identical frame.
-            let start_frame = Frame{ number: self.next_free_frame.number };
-            let end_frame = Frame { number: self.next_free_frame.number + (count - 1) };
+            let start_frame = Frame { number: self.next_free_frame.number };
+            let mut end_frame = Frame { number: self.next_free_frame.number + (count - 1) };
+            let min_end_frame = if partial_alloc { Frame { number: self.next_free_frame.number + (min - 1) } } else { Frame { number: self.next_free_frame.number + (count - 1) } };
 
             // the last frame of the current area
             let current_area_last_frame = {
@@ -103,24 +109,39 @@ impl FrameAllocator for BumpAllocator {
                 Frame::containing_address(PhysicalAddress::new(address as usize))
             };
 
-            if end_frame > current_area_last_frame {
+            if end_frame > current_area_last_frame && min_end_frame > current_area_last_frame {
                 // all frames of current area are used, switch to next area
                 self.choose_next_area();
-            } else if (start_frame >= self.kernel_start && start_frame <= self.kernel_end)
+                return self.allocate_frames3(count, flags, strategy, min)
+            } else if partial_alloc {
+                end_frame = Frame { number: self.next_free_frame.number + (min - 1) };
+                actual_size = min;
+            }
+
+            if space32 && end_frame.start_address().get() + super::PAGE_SIZE >= 0x1_0000_0000 {
+                // assuming that the bump allocator always advances, and that the memory map is sorted,
+                // when allocating in 32-bit space we can only return None when the free range was
+                // outside 0x0000_0000-0xFFFF_FFFF.
+                //
+                // we don't want to skip an entire memory region just because one 32-bit allocation failed.
+                return None;
+            }
+
+            if (start_frame >= self.kernel_start && start_frame <= self.kernel_end)
                     || (end_frame >= self.kernel_start && end_frame <= self.kernel_end) {
                 // `frame` is used by the kernel
                 self.next_free_frame = Frame {
                     number: self.kernel_end.number + 1
                 };
-            } else {
-                // frame is unused, increment `next_free_frame` and return it
-                self.next_free_frame.number += count;
-                return Some(start_frame);
+                // `frame` was not valid, try it again with the updated `next_free_frame`
+                return self.allocate_frames3(count, flags, strategy, min)
             }
-            // `frame` was not valid, try it again with the updated `next_free_frame`
-            self.allocate_frames(count)
+
+            // frame is unused, increment `next_free_frame` and return it
+            self.next_free_frame.number += actual_size;
+            return Some((start_frame, actual_size));
         } else {
-            None // no free frames left
+            None // no free memory areas left, and thus no frames left
         }
     }
 
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index 146ff4209b5d8eb6f3dbd25ec116cc282f0d4c81..bb45e939aef0c4cc69b4a0da5e43a241dc2ceb5d 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -7,6 +7,7 @@ use self::bump::BumpAllocator;
 use self::recycle::RecycleAllocator;
 
 use spin::Mutex;
+use syscall::{PartialAllocStrategy, PhysallocFlags};
 
 pub mod bump;
 pub mod recycle;
@@ -118,6 +119,13 @@ pub fn allocate_frames(count: usize) -> Option<Frame> {
         panic!("frame allocator not initialized");
     }
 }
+pub fn allocate_frames_complex(count: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, min: usize) -> Option<(Frame, usize)> {
+    if let Some(ref mut allocator) = *ALLOCATOR.lock() {
+        allocator.allocate_frames3(count, flags, strategy, min)
+    } else {
+        panic!("frame allocator not initialized");
+    }
+}
 
 /// Deallocate a range of frames frame
 pub fn deallocate_frames(frame: Frame, count: usize) {
@@ -184,6 +192,12 @@ pub trait FrameAllocator {
     fn set_noncore(&mut self, noncore: bool);
     fn free_frames(&self) -> usize;
     fn used_frames(&self) -> usize;
-    fn allocate_frames(&mut self, size: usize) -> Option<Frame>;
+    fn allocate_frames(&mut self, size: usize) -> Option<Frame> {
+        self.allocate_frames2(size, PhysallocFlags::SPACE_64)
+    }
+    fn allocate_frames2(&mut self, size: usize, flags: PhysallocFlags) -> Option<Frame> {
+        self.allocate_frames3(size, flags, None, size).map(|(s, _)| s)
+    }
+    fn allocate_frames3(&mut self, size: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, min: usize) -> Option<(Frame, usize)>;
     fn deallocate_frames(&mut self, frame: Frame, size: usize);
 }
diff --git a/src/memory/recycle.rs b/src/memory/recycle.rs
index 55a4c81f2b379d7b65c823eb517aad861228b47b..aac6bb901bb2634087d2fdb6e6f3e5558af4540a 100644
--- a/src/memory/recycle.rs
+++ b/src/memory/recycle.rs
@@ -4,13 +4,19 @@
 use alloc::vec::Vec;
 
 use crate::paging::PhysicalAddress;
-
 use super::{Frame, FrameAllocator};
 
+use syscall::{PartialAllocStrategy, PhysallocFlags};
+
+struct Range {
+    base: usize,
+    count: usize,
+}
+
 pub struct RecycleAllocator<T: FrameAllocator> {
     inner: T,
     noncore: bool,
-    free: Vec<(usize, usize)>,
+    free: Vec<Range>,
 }
 
 impl<T: FrameAllocator> RecycleAllocator<T> {
@@ -23,23 +29,19 @@ impl<T: FrameAllocator> RecycleAllocator<T> {
     }
 
     fn free_count(&self) -> usize {
-        let mut count = 0;
-        for free in self.free.iter() {
-            count += free.1;
-        }
-        count
+        self.free.len()
     }
 
     fn merge(&mut self, address: usize, count: usize) -> bool {
         for i in 0 .. self.free.len() {
             let changed = {
                 let free = &mut self.free[i];
-                if address + count * 4096 == free.0 {
-                    free.0 = address;
-                    free.1 += count;
+                if address + count * super::PAGE_SIZE == free.base {
+                    free.base = address;
+                    free.count += count;
                     true
-                } else if free.0 + free.1 * 4096 == address {
-                    free.1 += count;
+                } else if free.base + free.count * super::PAGE_SIZE == address {
+                    free.count += count;
                     true
                 } else {
                     false
@@ -48,7 +50,7 @@ impl<T: FrameAllocator> RecycleAllocator<T> {
 
             if changed {
                 //TODO: Use do not use recursion
-                let (address, count) = self.free[i];
+                let Range { base: address, count } = self.free[i];
                 if self.merge(address, count) {
                     self.free.remove(i);
                 }
@@ -58,6 +60,48 @@ impl<T: FrameAllocator> RecycleAllocator<T> {
 
         false
     }
+    fn try_recycle(&mut self, count: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, min: usize) -> Option<(usize, usize)> {
+        let space32 = flags.contains(PhysallocFlags::SPACE_32);
+        let partial_alloc = flags.contains(PhysallocFlags::PARTIAL_ALLOC);
+
+        let mut actual_size = count;
+        let mut current_optimal_index = None;
+        let mut current_optimal = self.free.first()?;
+
+        for (free_range_index, free_range) in self.free.iter().enumerate().skip(1) {
+            // Later entries can be removed faster
+
+            if space32 && free_range.base + count * super::PAGE_SIZE >= 0x1_0000_0000 {
+                // We need a 32-bit physical address and this range is outside that address
+                // space.
+                continue;
+            }
+
+            if free_range.count < count {
+                if partial_alloc && free_range.count >= min && matches!(strategy, Some(PartialAllocStrategy::Greedy)) {
+                    // The free range does not fit the entire requested range, but is still
+                    // at least as large as the minimum range. When using the "greedy"
+                    // strategy, we return immediately.
+                    current_optimal_index = Some(free_range_index);
+                    actual_size = free_range.count;
+                    break;
+                }
+
+                // Range has to fit if we want the entire frame requested.
+                continue;
+            }
+            if free_range.count > current_optimal.count {
+                // Skip this free range if it wasn't smaller than the old one; we do want to use
+                // the smallest range possible to reduce fragmentation as much as possible.
+                continue;
+            }
+
+            // We found a range that fit.
+            current_optimal_index = Some(free_range_index);
+            current_optimal = free_range;
+        }
+        current_optimal_index.map(|idx| (actual_size, idx))
+    }
 }
 
 impl<T: FrameAllocator> FrameAllocator for RecycleAllocator<T> {
@@ -73,38 +117,25 @@ impl<T: FrameAllocator> FrameAllocator for RecycleAllocator<T> {
         self.inner.used_frames() - self.free_count()
     }
 
-    fn allocate_frames(&mut self, count: usize) -> Option<Frame> {
-        let mut small_i = None;
-        {
-            let mut small = (0, 0);
-            for i in 0..self.free.len() {
-                let free = self.free[i];
-                // Later entries can be removed faster
-                if free.1 >= count {
-                    if free.1 <= small.1 || small_i.is_none() {
-                        small_i = Some(i);
-                        small = free;
-                    }
-                }
-            }
-        }
+    fn allocate_frames3(&mut self, count: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, min: usize) -> Option<(Frame, usize)> {
+        // TODO: Cover all different strategies.
 
-        if let Some(i) = small_i {
+        if let Some((actual_size, free_range_idx_to_use)) = self.try_recycle(count, flags, strategy, min) {
             let (address, remove) = {
-                let free = &mut self.free[i];
-                free.1 -= count;
-                (free.0 + free.1 * 4096, free.1 == 0)
+                let free_range = &mut self.free[free_range_idx_to_use];
+                free_range.count -= actual_size;
+                (free_range.base + free_range.count * super::PAGE_SIZE, free_range.count == 0)
             };
 
             if remove {
-                self.free.remove(i);
+                self.free.remove(free_range_idx_to_use);
             }
 
             //println!("Restoring frame {:?}, {}", frame, count);
-            Some(Frame::containing_address(PhysicalAddress::new(address)))
+            Some((Frame::containing_address(PhysicalAddress::new(address)), actual_size))
         } else {
             //println!("No saved frames {}", count);
-            self.inner.allocate_frames(count)
+            self.inner.allocate_frames3(count, flags, strategy, min)
         }
     }
 
@@ -112,7 +143,7 @@ impl<T: FrameAllocator> FrameAllocator for RecycleAllocator<T> {
         if self.noncore {
             let address = frame.start_address().get();
             if ! self.merge(address, count) {
-                self.free.push((address, count));
+                self.free.push(Range { base: address, count });
             }
         } else {
             //println!("Could not save frame {:?}, {}", frame, count);
diff --git a/src/syscall/debug.rs b/src/syscall/debug.rs
index e62d034d2347dc402457a727ca938595b3fd99b4..8b42526bc19a0e51fa967b52bfed34bc9a095aa8 100644
--- a/src/syscall/debug.rs
+++ b/src/syscall/debug.rs
@@ -271,6 +271,10 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -
             "physalloc({})",
             b
         ),
+        SYS_PHYSALLOC3 => format!(
+            "physalloc3({}, {}, {})",
+            b, c, d,
+        ),
         SYS_PHYSFREE => format!(
             "physfree({:#X}, {})",
             b,
diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs
index e1655fdb514c1430972d4b321d2afc76a84a9ed0..7cf168db4f5a896566e36799527946912b68539a 100644
--- a/src/syscall/driver.rs
+++ b/src/syscall/driver.rs
@@ -1,11 +1,11 @@
 use crate::macros::InterruptStack;
-use crate::memory::{allocate_frames, deallocate_frames, Frame};
+use crate::memory::{allocate_frames_complex, deallocate_frames, Frame};
 use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress};
 use crate::paging::entry::EntryFlags;
 use crate::context;
 use crate::context::memory::Grant;
 use crate::syscall::error::{Error, EFAULT, EINVAL, ENOMEM, EPERM, ESRCH, Result};
-use crate::syscall::flag::{PhysmapFlags, PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
+use crate::syscall::flag::{PhysallocFlags, PartialAllocStrategy, PhysmapFlags, PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
 
 fn enforce_root() -> Result<()> {
     let contexts = context::contexts();
@@ -30,12 +30,28 @@ pub fn iopl(level: usize, stack: &mut InterruptStack) -> Result<usize> {
     Ok(0)
 }
 
-pub fn inner_physalloc(size: usize) -> Result<usize> {
-    allocate_frames((size + 4095)/4096).ok_or(Error::new(ENOMEM)).map(|frame| frame.start_address().get())
+pub fn inner_physalloc(size: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, min: usize) -> Result<(usize, usize)> {
+    if flags.contains(PhysallocFlags::SPACE_32 | PhysallocFlags::SPACE_64) {
+        return Err(Error::new(EINVAL));
+    }
+    let space32 = flags.contains(PhysallocFlags::SPACE_32);
+    allocate_frames_complex((size + 4095) / 4096, flags, strategy, (min + 4095) / 4096).ok_or(Error::new(ENOMEM)).map(|(frame, count)| (frame.start_address().get(), count * 4096))
 }
 pub fn physalloc(size: usize) -> Result<usize> {
     enforce_root()?;
-    inner_physalloc(size)
+    inner_physalloc(size, PhysallocFlags::SPACE_64, None, size).map(|(base, _)| base)
+}
+pub fn physalloc3(size: usize, flags_raw: usize, min: &mut usize) -> Result<usize> {
+    enforce_root()?;
+    let flags = PhysallocFlags::from_bits(flags_raw & !syscall::PARTIAL_ALLOC_STRATEGY_MASK).ok_or(Error::new(EINVAL))?;
+    let strategy = if flags.contains(PhysallocFlags::PARTIAL_ALLOC) {
+        Some(PartialAllocStrategy::from_raw(flags_raw & syscall::PARTIAL_ALLOC_STRATEGY_MASK).ok_or(Error::new(EINVAL))?)
+    } else {
+        None
+    };
+    let (base, count) = inner_physalloc(size, flags, strategy, *min)?;
+    *min = count;
+    Ok(base)
 }
 
 pub fn inner_physfree(physical_address: usize, size: usize) -> Result<usize> {
diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs
index de4014a0bccf6130ec6725ffd567d77b8506fe2e..bb01f60d53cc344cb49d110a64cad9af599e6849 100644
--- a/src/syscall/mod.rs
+++ b/src/syscall/mod.rs
@@ -152,6 +152,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
                 SYS_SIGRETURN => sigreturn(),
                 SYS_PIPE2 => pipe2(validate_slice_mut(b as *mut usize, 2)?, c),
                 SYS_PHYSALLOC => physalloc(b),
+                SYS_PHYSALLOC3 => physalloc3(b, c, &mut validate_slice_mut(d as *mut usize, 1)?[0]),
                 SYS_PHYSFREE => physfree(b, c),
                 SYS_PHYSMAP => physmap(b, c, PhysmapFlags::from_bits_truncate(d)),
                 SYS_PHYSUNMAP => physunmap(b),
diff --git a/syscall b/syscall
index 1c637e72b2f3be8e8f942372e8414101e463df98..9ecdc11d73677477b37567a47af1633478093cbb 160000
--- a/syscall
+++ b/syscall
@@ -1 +1 @@
-Subproject commit 1c637e72b2f3be8e8f942372e8414101e463df98
+Subproject commit 9ecdc11d73677477b37567a47af1633478093cbb