Commit 3efc8d06 authored by 4lDO2's avatar 4lDO2 🖖 Committed by Jeremy Soller

Add a more complex physalloc syscall.

parent 8d0015be
......@@ -251,7 +251,7 @@ pub fn open<T: AsRef<[u8]>>(path: T, flags: usize) -> Result<usize> {
unsafe { syscall3(SYS_OPEN, path.as_ref().as_ptr() as usize, path.as_ref().len(), flags) }
}
/// Allocate pages, linearly in physical memory
/// Allocate frames, linearly in physical memory.
///
/// # Errors
///
......@@ -261,6 +261,37 @@ pub unsafe fn physalloc(size: usize) -> Result<usize> {
syscall1(SYS_PHYSALLOC, size)
}
/// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain
/// [`PARTIAL_ALLOC`], this will result in `physalloc3` with `min = 1`.
///
/// Refer to the simpler [`physalloc`] and the more complex [`physalloc3`], that this convenience
/// function is based on.
///
/// # Errors
///
/// * `EPERM` - `uid != 0`
/// * `ENOMEM` - the system has run out of available memory
pub unsafe fn physalloc2(size: usize, flags: usize) -> Result<usize> {
let mut ret = 1usize;
physalloc3(size, flags, &mut ret)
}
/// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain
/// [`PARTIAL_ALLOC`], the `min` parameter specifies the number of frames that have to be allocated
/// for this operation to succeed. The return value is the offset of the first frame, and `min` is
/// overwritten with the number of frames actually allocated.
///
/// Refer to the simpler [`physalloc`] and the simpler library function [`physalloc2`].
///
/// # Errors
///
/// * `EPERM` - `uid != 0`
/// * `ENOMEM` - the system has run out of available memory
/// * `EINVAL` - `min = 0`
pub unsafe fn physalloc3(size: usize, flags: usize, min: &mut usize) -> Result<usize> {
syscall3(SYS_PHYSALLOC3, size, flags, min as *mut usize as usize)
}
/// Free physically allocated pages
///
/// # Errors
......
......@@ -114,6 +114,70 @@ bitflags! {
const PHYSMAP_NO_CACHE = 0x0000_0004;
}
}
bitflags! {
/// Extra flags for [`physalloc2`] or [`physalloc3`].
///
/// [`physalloc2`]: ../call/fn.physalloc2.html
/// [`physalloc3`]: ../call/fn.physalloc3.html
pub struct PhysallocFlags: usize {
/// Only allocate memory within the 32-bit physical memory space. This is necessary for
/// some devices may not support 64-bit memory.
const SPACE_32 = 0x0000_0001;
/// The frame that will be allocated, is going to reside anywhere in 64-bit space. This
/// flag is redundant for the most part, except when overriding some other default.
const SPACE_64 = 0x0000_0002;
/// Do a "partial allocation", which means that not all of the frames specified in the
/// frame count `size` actually have to be allocated. This means that if the allocator was
/// unable to find a physical memory range large enough, it can instead return whatever
/// range it decides is optimal. Thus, instead of letting one driver get an expensive
/// 128MiB physical memory range when the physical memory has become fragmented, and
/// failing, it can instead be given a more optimal range. If the device supports
/// scatter-gather lists, then the driver only has to allocate more ranges, and the device
/// will do vectored I/O.
///
/// PARTIAL_ALLOC supports different allocation strategies, refer to
/// [`Optimal`], [`GreatestRange`].
///
/// [`Optimal`]: ./enum.PartialAllocStrategy.html
/// [`GreatestRange`]: ./enum.PartialAllocStrategy.html
const PARTIAL_ALLOC = 0x0000_0004;
}
}
/// The bitmask of the partial allocation strategy. Currently four different strategies are
/// supported. If [`PARTIAL_ALLOC`] is not set, this bitmask is no longer reserved.
pub const PARTIAL_ALLOC_STRATEGY_MASK: usize = 0x0003_0000;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[repr(usize)]
pub enum PartialAllocStrategy {
/// The allocator decides itself the size of the memory range, based on e.g. free memory ranges
/// and other processes which require large physical memory chunks.
Optimal = 0x0001_0000,
/// The allocator returns the absolute greatest range it can find.
GreatestRange = 0x0002_0000,
/// The allocator returns the first range that fits the minimum count, without searching extra.
Greedy = 0x0003_0000,
}
impl Default for PartialAllocStrategy {
fn default() -> Self {
Self::Optimal
}
}
impl PartialAllocStrategy {
pub fn from_raw(raw: usize) -> Option<Self> {
match raw {
0x0001_0000 => Some(Self::Optimal),
0x0002_0000 => Some(Self::GreatestRange),
_ => None,
}
}
}
// The top 48 bits of PTRACE_* are reserved, for now
......
use core::{mem, ptr};
use core::mem::{self, MaybeUninit};
use core::ops::{Deref, DerefMut};
use core::{ptr, slice};
use crate::Result;
use crate::{PartialAllocStrategy, PhysallocFlags};
struct PhysBox {
/// An RAII guard of a physical memory allocation. Currently all physically allocated memory are
/// page-aligned and take up at least 4k of space (on x86_64).
#[derive(Debug)]
pub struct PhysBox {
address: usize,
size: usize
}
impl PhysBox {
fn new(size: usize) -> Result<PhysBox> {
/// Construct a PhysBox from an address and a size.
///
/// # Safety
/// This function is unsafe because when dropping, Self has to a valid allocation.
pub unsafe fn from_raw_parts(address: usize, size: usize) -> Self {
Self {
address,
size,
}
}
/// Retrieve the byte address in physical memory, of this allocation.
pub fn address(&self) -> usize {
self.address
}
/// Retrieve the size in bytes of the alloc.
pub fn size(&self) -> usize {
self.size
}
/// Allocate physical memory that must reside in 32-bit space.
pub fn new_in_32bit_space(size: usize) -> Result<Self> {
Self::new_with_flags(size, PhysallocFlags::SPACE_32)
}
pub fn new_with_flags(size: usize, flags: PhysallocFlags) -> Result<Self> {
assert!(!flags.contains(PhysallocFlags::PARTIAL_ALLOC));
let address = unsafe { crate::physalloc2(size, flags.bits())? };
Ok(Self {
address,
size,
})
}
/// "Partially" allocate physical memory, in the sense that the allocation may be smaller than
/// expected, but still with a minimum limit. This is particularly useful when the physical
/// memory space is fragmented, and a device supports scatter-gather I/O. In that case, the
/// driver can optimistically request e.g. 1 alloc of 1 MiB, with the minimum of 512 KiB. If
/// that first allocation only returns half the size, the driver can do another allocation
/// and then let the device use both buffers.
pub fn new_partial_allocation(size: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, mut min: usize) -> Result<Self> {
debug_assert!(!(flags.contains(PhysallocFlags::PARTIAL_ALLOC) && strategy.is_none()));
let address = unsafe { crate::physalloc3(size, flags.bits() | strategy.map(|s| s as usize).unwrap_or(0), &mut min)? };
Ok(Self {
address,
size: min,
})
}
pub fn new(size: usize) -> Result<Self> {
let address = unsafe { crate::physalloc(size)? };
Ok(PhysBox {
address: address,
size: size
Ok(Self {
address,
size,
})
}
}
......@@ -26,52 +83,99 @@ impl Drop for PhysBox {
pub struct Dma<T: ?Sized> {
phys: PhysBox,
virt: *mut T
virt: *mut T,
}
impl<T> Dma<T> {
pub fn new(value: T) -> Result<Dma<T>> {
let phys = PhysBox::new(mem::size_of::<T>())?;
let virt = unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? } as *mut T;
unsafe { ptr::write(virt, value); }
pub fn from_physbox_uninit(phys: PhysBox) -> Result<Dma<MaybeUninit<T>>> {
let virt = unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? } as *mut MaybeUninit<T>;
Ok(Dma {
phys: phys,
virt: virt
phys,
virt,
})
}
pub fn from_physbox_zeroed(phys: PhysBox) -> Result<Dma<MaybeUninit<T>>> {
let this = Self::from_physbox_uninit(phys)?;
unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit<u8>, 0, this.phys.size) }
Ok(this)
}
pub fn zeroed() -> Result<Dma<T>> {
let phys = PhysBox::new(mem::size_of::<T>())?;
let virt = unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? } as *mut T;
unsafe { ptr::write_bytes(virt as *mut u8, 0, phys.size); }
Ok(Dma {
phys: phys,
virt: virt
pub fn from_physbox(phys: PhysBox, value: T) -> Result<Self> {
let this = Self::from_physbox_uninit(phys)?;
Ok(unsafe {
ptr::write(this.virt, MaybeUninit::new(value));
this.assume_init()
})
}
pub fn new(value: T) -> Result<Self> {
let phys = PhysBox::new(mem::size_of::<T>())?;
Self::from_physbox(phys, value)
}
pub fn zeroed() -> Result<Dma<MaybeUninit<T>>> {
let phys = PhysBox::new(mem::size_of::<T>())?;
Self::from_physbox_zeroed(phys)
}
}
impl<T> Dma<MaybeUninit<T>> {
pub unsafe fn assume_init(self) -> Dma<T> {
let &Dma { phys: PhysBox { address, size }, virt } = &self;
mem::forget(self);
Dma {
phys: PhysBox { address, size },
virt: virt as *mut T,
}
}
}
impl<T: ?Sized> Dma<T> {
pub fn physical(&self) -> usize {
self.phys.address
self.phys.address()
}
pub fn size(&self) -> usize {
self.phys.size()
}
pub fn phys(&self) -> &PhysBox {
&self.phys
}
}
impl<T> Dma<[T]> {
/// Crates a new DMA buffer with a size only known at runtime.
pub fn from_physbox_uninit_unsized(phys: PhysBox, len: usize) -> Result<Dma<[MaybeUninit<T>]>> {
let max_len = phys.size() / mem::size_of::<T>();
assert!(len <= max_len);
Ok(Dma {
virt: unsafe { slice::from_raw_parts_mut(crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? as *mut MaybeUninit<T>, len) } as *mut [MaybeUninit<T>],
phys,
})
}
pub fn from_physbox_zeroed_unsized(phys: PhysBox, len: usize) -> Result<Dma<[MaybeUninit<T>]>> {
let this = Self::from_physbox_uninit_unsized(phys, len)?;
unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit<u8>, 0, this.phys.size()) }
Ok(this)
}
/// Creates a new DMA buffer with a size only known at runtime.
/// ## Safety
/// * `T` must be properly aligned.
/// * `T` must be valid as zeroed (i.e. no NonNull pointers).
pub unsafe fn zeroed_unsized(count: usize) -> Result<Self> {
let phys = PhysBox::new(mem::size_of::<T>() * count)?;
let virt_ptr = crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? as *mut T;
ptr::write_bytes(virt_ptr, 0, count);
let virt = core::slice::from_raw_parts_mut(virt_ptr, count);
Ok(Self::from_physbox_zeroed_unsized(phys, count)?.assume_init())
}
}
impl<T> Dma<[MaybeUninit<T>]> {
pub unsafe fn assume_init(self) -> Dma<[T]> {
let &Dma { phys: PhysBox { address, size }, virt } = &self;
mem::forget(self);
Ok(Dma {
phys,
virt,
})
Dma {
phys: PhysBox { address, size },
virt: virt as *mut [T],
}
}
}
......
......@@ -59,6 +59,7 @@ pub const SYS_MPROTECT: usize = 125;
pub const SYS_MKNS: usize = 984;
pub const SYS_NANOSLEEP: usize =162;
pub const SYS_PHYSALLOC: usize =945;
pub const SYS_PHYSALLOC3: usize=9453;
pub const SYS_PHYSFREE: usize = 946;
pub const SYS_PHYSMAP: usize = 947;
pub const SYS_PHYSUNMAP: usize =948;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment