Commit 1ead8f81 authored by Dan Robertson's avatar Dan Robertson

Post merge updates

 - Enable floating point test on AMD64
 - Run formatter on code
parent 952bf100
......@@ -26,46 +26,46 @@ impl VaList {
#[doc(hidden)]
impl VaListInner {
fn check_space(&self, num_gp: u32, num_fp: u32) -> bool {
!(self.gp_offset > 48 - num_gp * 8 || self.fp_offset > 304 - num_fp * 16)
}
fn check_space(&self, num_gp: u32, num_fp: u32) -> bool {
!(self.gp_offset > 48 - num_gp * 8 || self.fp_offset > 304 - num_fp * 16)
}
unsafe fn get_gp<T>(&mut self) -> T {
let n_gp = (mem::size_of::<T>()+7)/8;
assert!( self.check_space(n_gp as u32, 0) );
let rv = ptr::read( (self.reg_save_area as usize + self.gp_offset as usize) as *const _ );
self.gp_offset += (8*n_gp) as u32;
rv
}
unsafe fn get_gp<T>(&mut self) -> T {
let n_gp = (mem::size_of::<T>() + 7) / 8;
assert!(self.check_space(n_gp as u32, 0));
let rv = ptr::read((self.reg_save_area as usize + self.gp_offset as usize) as *const _);
self.gp_offset += (8 * n_gp) as u32;
rv
}
unsafe fn get_fp<T>(&mut self) -> T {
let n_fp = (mem::size_of::<T>()+15)/16;
assert!( self.check_space(0, n_fp as u32) );
let rv = ptr::read( (self.reg_save_area as usize + self.fp_offset as usize) as *const _ );
self.fp_offset += (16*n_fp) as u32;
rv
}
unsafe fn get_fp<T>(&mut self) -> T {
let n_fp = (mem::size_of::<T>() + 15) / 16;
assert!(self.check_space(0, n_fp as u32));
let rv = ptr::read((self.reg_save_area as usize + self.fp_offset as usize) as *const _);
self.fp_offset += (16 * n_fp) as u32;
rv
}
unsafe fn get_overflow<T>(&mut self) -> T {
let align = mem::align_of::<T>();
// 7. Align overflow_reg_area upwards to a 16-byte boundary if alignment
// needed by T exceeds 8 bytes
let addr = self.overflow_arg_area as usize;
if align > 8 {
if addr % 16 != 0 {
self.overflow_arg_area = ((addr + 15) & !(16-1)) as *const _;
}
}
else {
if addr % 8 != 0 {
self.overflow_arg_area = ((addr + 7) & !(8-1)) as *const _;
}
}
// 8. Fetch from overflow areay
let rv = ptr::read( self.overflow_arg_area as *const _ );
self.overflow_arg_area = ((self.overflow_arg_area as usize) + mem::size_of::<T>()) as *const _;
rv
}
unsafe fn get_overflow<T>(&mut self) -> T {
let align = mem::align_of::<T>();
// 7. Align overflow_reg_area upwards to a 16-byte boundary if alignment
// needed by T exceeds 8 bytes
let addr = self.overflow_arg_area as usize;
if align > 8 {
if addr % 16 != 0 {
self.overflow_arg_area = ((addr + 15) & !(16 - 1)) as *const _;
}
} else {
if addr % 8 != 0 {
self.overflow_arg_area = ((addr + 7) & !(8 - 1)) as *const _;
}
}
// 8. Fetch from overflow areay
let rv = ptr::read(self.overflow_arg_area as *const _);
self.overflow_arg_area =
((self.overflow_arg_area as usize) + mem::size_of::<T>()) as *const _;
rv
}
}
impl<T: 'static> VaPrimitive for *const T {
......@@ -102,14 +102,13 @@ impl_va_prim!{ u32, i32 }
//impl_va_prim!{ u8, i8 }
impl VaPrimitive for f64 {
unsafe fn get(list: &mut VaList) -> Self {
let inner = list.inner();
// See the ELF AMD64 ABI document for a description of how this should act
if ! inner.check_space(0, 1) {
inner.get_overflow()
}
else {
inner.get_fp()
}
}
unsafe fn get(list: &mut VaList) -> Self {
let inner = list.inner();
// See the ELF AMD64 ABI document for a description of how this should act
if !inner.check_space(0, 1) {
inner.get_overflow()
} else {
inner.get_fp()
}
}
}
......@@ -42,7 +42,6 @@ fn trivial_values() {
}
#[test]
#[cfg(not(all(target_arch = "x86_64", target_family = "unix")))] // TODO: Float on AMD64
fn floating_point() {
test_va_list!(
4,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment