Commit 52c2357f authored by John Hodge's avatar John Hodge

Expand to support x86 and be testable

parent 4d96144a
......@@ -3,3 +3,5 @@ name = "va_list"
version = "0.0.1"
authors = [ "John Hodge <tpg@mutabah.net>" ]
[dev-dependencies.va_list-helper]
path = "deps/helper/"
Rust implementation of C's `va_list` type
# Status
- x86-64 linux/ELF ABI (aka Itanium)
- x86-64 linux/ELF ABI (aka System-V) : Tested in the wild, works relatively well
- x86 linux/ELF ABI (sys-v) : Unit tested only
[package]
name = "va_list-helper"
version = "0.0.1"
authors = [ "John Hodge <tpg@mutabah.net>" ]
build = "build.rs"
[build-dependencies]
gcc = "*"
use std::env;
extern crate gcc;
fn main() {
::gcc::compile_library("libva_list_test.a", &["src/helper.c"]);
}
/*
A quick and evil C file to convert a rust FFI va-args function into a call with va_list passed
*/
#include <stdio.h>
#include <stdarg.h>
extern void inbound(void *context, unsigned int count, va_list args);
void dispatch(void* context, unsigned int count, ...)
{
va_list args;
va_start(args, count);
inbound(context, count, args);
va_end(args);
}
// Stub file to make cargo happy
/*
*
*/
#[cfg(nightly)] use core::prelude::*;
#[cfg(nightly)] use core::{mem,ptr};
#[cfg(not(nightly))] use std::{mem,ptr};
use super::VaPrimitive;
#[allow(non_camel_case_types)]
pub struct va_list(*const u8);
impl va_list
{
pub unsafe fn get<T: VaPrimitive>(&mut self) -> T {
T::get(self)
}
// Read a raw value from the list
unsafe fn get_raw<T: 'static>(&mut self) -> T {
assert_eq!(self.0 as usize % mem::min_align_of::<T>(), 0);
let rv = ptr::read(self.0 as *const T);
self.0 = self.0.offset( mem::size_of::<T>() as isize );
rv
}
}
impl<T: 'static> VaPrimitive for *const T
{
unsafe fn get(list: &mut va_list) -> Self {
<usize>::get(list) as *const T
}
}
impl VaPrimitive for usize { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
impl VaPrimitive for isize { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
impl VaPrimitive for u64 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
impl VaPrimitive for i64 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
impl VaPrimitive for u32 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
impl VaPrimitive for i32 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
//impl VaPrimitive for u16 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
//impl VaPrimitive for i16 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
//impl VaPrimitive for u8 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
//impl VaPrimitive for i8 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
impl VaPrimitive for f64 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
impl VaPrimitive for f32 { unsafe fn get(l: &mut va_list) -> Self { l.get_raw() } }
#[cfg(nightly)] use core::prelude::*;
#[cfg(nightly)] use core::{mem,ptr};
#[cfg(not(nightly))] use std::{mem,ptr};
use super::VaPrimitive;
#[allow(non_camel_case_types)]
/// Core type as passed though the FFI
pub struct va_list(*mut VaListInner);
// /// Saves the state of the va_list, similar to va_copy
//impl Clone for va_list { fn clone(&self) -> Self { va_list(self.0) } }
#[repr(C)]
#[derive(Debug)]
#[allow(raw_pointer_derive)]
#[doc(hidden)]
pub struct VaListInner
{
gp_offset: u32,
fp_offset: u32,
overflow_arg_area: *const (),
reg_save_area: *const (),
}
impl va_list
{
/// Read a value from the va_list
///
/// Users should take care that they are reading the correct type
pub unsafe fn get<T: VaPrimitive>(&mut self) -> T {
//log_debug!("inner = {:p} {:?}", self.0, *self.0);
T::get(self)
}
fn inner(&mut self) -> &mut VaListInner {
// This pointer should be valid
unsafe { &mut *self.0 }
}
}
#[doc(hidden)]
impl VaListInner
{
fn check_space(&self, num_gp: u32, num_fp: u32) -> bool {
!(self.gp_offset > 48 - num_gp * 8 || self.fp_offset > 304 - num_fp * 16)
}
unsafe fn get_gp<T>(&mut self) -> T {
let n_gp = (mem::size_of::<T>()+7)/8;
assert!( self.check_space(n_gp as u32, 0) );
let rv = ptr::read( (self.reg_save_area as usize + self.gp_offset as usize) as *const _ );
self.gp_offset += (8*n_gp) as u32;
rv
}
unsafe fn get_overflow<T>(&mut self) -> T {
let align = mem::min_align_of::<T>();
// 7. Align overflow_reg_area upwards to a 16-byte boundary if alignment
// needed by T exceeds 8 bytes
let addr = self.overflow_arg_area as usize;
if align > 8 {
if addr % 16 != 0 {
self.overflow_arg_area = ((addr + 15) & !(16-1)) as *const _;
}
}
else {
if addr % 8 != 0 {
self.overflow_arg_area = ((addr + 7) & !(8-1)) as *const _;
}
}
// 8. Fetch from overflow areay
let rv = ptr::read( self.overflow_arg_area as *const _ );
self.overflow_arg_area = ((self.overflow_arg_area as usize) + mem::size_of::<T>()) as *const _;
rv
}
}
impl<T: 'static> VaPrimitive for *const T
{
unsafe fn get(list: &mut va_list) -> Self {
<usize>::get(list) as *const T
}
}
macro_rules! impl_va_prim {
($u:ty, $s:ty) => {
impl VaPrimitive for $u {
unsafe fn get(list: &mut va_list) -> Self {
let inner = list.inner();
// See the ELF AMD64 ABI document for a description of how this should act
if ! inner.check_space(1, 0) {
inner.get_overflow()
}
else {
inner.get_gp()
}
}
}
impl VaPrimitive for $s {
unsafe fn get(list: &mut va_list) -> Self {
mem::transmute( <$u>::get(list) )
}
}
};
}
impl_va_prim!{ usize, isize }
impl_va_prim!{ u64, i64 }
impl_va_prim!{ u32, i32 }
impl_va_prim!{ u16, i16 }
impl_va_prim!{ u8, i8 }
......@@ -8,117 +8,26 @@
#![crate_type="lib"]
#![crate_name="va_list"]
#[cfg(nightly)] use core::prelude::*;
#[cfg(nightly)] use core::{mem,ptr};
#[cfg(not(nightly))] use std::{mem,ptr};
#[cfg(nightly)]
#[macro_use]
extern crate core;
#[allow(non_camel_case_types)]
/// Core type as passed though the FFI
pub struct va_list(*mut VaListInner);
/// Saves the state of the va_list, similar to va_copy
impl Copy for va_list {}
impl Clone for va_list { fn clone(&self) -> Self { *self } }
// x86_64 on unix platforms is _usually_ ELF.
#[cfg(target_arch="x86_64")] #[cfg(target_family="unix")]
#[path="impl-x86_64-elf.rs"] mod imp;
//// x86_64 on windows is special
//#[cfg(target_arch="x86_64")] #[cfg(target_family="windows")]
//#[path="impl-x86_64-elf.rs"] mod imp;
// x86+unix = cdecl
#[cfg(target_arch="x86")] #[cfg(target_family="unix")]
#[path="impl-x86-sysv.rs"] mod imp;
#[repr(C)]
#[derive(Debug)]
#[allow(raw_pointer_derive)]
#[doc(hidden)]
pub struct VaListInner
{
gp_offset: u32,
fp_offset: u32,
overflow_arg_area: *const (),
reg_save_area: *const (),
}
pub use imp::va_list;
/// Trait implemented on types that can be read from a va_list
pub trait VaPrimitive
pub trait VaPrimitive: 'static
{
#[doc(hidden)]
unsafe fn get(&mut VaListInner) -> Self;
}
impl va_list
{
/// Read a value from the va_list
///
/// Users should take care that they are reading the correct type
pub unsafe fn get<T: VaPrimitive>(&mut self) -> T {
//log_debug!("inner = {:p} {:?}", self.0, *self.0);
T::get(&mut *self.0)
}
unsafe fn get(&mut va_list) -> Self;
}
#[doc(hidden)]
impl VaListInner
{
fn check_space(&self, num_gp: u32, num_fp: u32) -> bool {
!(self.gp_offset > 48 - num_gp * 8 || self.fp_offset > 304 - num_fp * 16)
}
unsafe fn get_gp<T>(&mut self) -> T {
let n_gp = (mem::size_of::<T>()+7)/8;
assert!( self.check_space(n_gp as u32, 0) );
let rv = ptr::read( (self.reg_save_area as usize + self.gp_offset as usize) as *const _ );
self.gp_offset += (8*n_gp) as u32;
rv
}
unsafe fn get_overflow<T>(&mut self) -> T {
let align = mem::min_align_of::<T>();
// 7. Align overflow_reg_area upwards to a 16-byte boundary if alignment
// needed by T exceeds 8 bytes
let addr = self.overflow_arg_area as usize;
if align > 8 {
if addr % 16 != 0 {
self.overflow_arg_area = ((addr + 15) & !(16-1)) as *const _;
}
}
else {
if addr % 8 != 0 {
self.overflow_arg_area = ((addr + 7) & !(8-1)) as *const _;
}
}
// 8. Fetch from overflow areay
let rv = ptr::read( self.overflow_arg_area as *const _ );
self.overflow_arg_area = ((self.overflow_arg_area as usize) + mem::size_of::<T>()) as *const _;
rv
}
}
impl<T> VaPrimitive for *const T
{
unsafe fn get(inner: &mut VaListInner) -> Self {
<usize>::get(inner) as *const T
}
}
macro_rules! impl_va_prim {
($u:ty, $s:ty) => {
impl VaPrimitive for $u {
unsafe fn get(inner: &mut VaListInner) -> Self {
// See the ELF AMD64 ABI document for a description of how this should act
if ! inner.check_space(1, 0) {
inner.get_overflow()
}
else {
inner.get_gp()
}
}
}
impl VaPrimitive for $s {
unsafe fn get(inner: &mut VaListInner) -> Self {
mem::transmute( <$u>::get(inner) )
}
}
};
}
impl_va_prim!{ usize, isize }
impl_va_prim!{ u64, i64 }
impl_va_prim!{ u32, i32 }
impl_va_prim!{ u16, i16 }
impl_va_prim!{ u8, i8 }
extern crate va_list;
extern "C" {
fn dispatch(context: *mut (), count: u32, ...);
}
type CbType<'a> = &'a mut FnMut(u32, va_list::va_list);
#[no_mangle]
/// Method called by 'dispatch'
pub extern "C" fn inbound(context: *mut (), count: u32, args: va_list::va_list) {
let cb_ptr = unsafe { ::std::ptr::read(context as *mut CbType ) };
// call passed closure
(cb_ptr)(count, args);
}
macro_rules! test_va_list {
($int:expr, ($($args:expr),*), $code:expr) => ({
let mut cb = $code;
let mut cb_ref: CbType = &mut cb;
unsafe {
dispatch(&mut cb_ref as *mut _ as *mut (), $int, $($args),*);
}
});
}
#[test]
fn trivial_values() {
// Trivial test: Pass four random-ish sized integers
test_va_list!(4, (123456u32, 2u64, 1i32, -23i64),
|_count, mut list: va_list::va_list| { unsafe {
assert_eq!( list.get::<u32>(), 123456u32 );
assert_eq!( list.get::<u64>(), 2u64 );
assert_eq!( list.get::<i32>(), 1i32 );
assert_eq!( list.get::<i64>(), -23i64 );
} });
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment