Commit 1d067e51 authored by Tom Almeida's avatar Tom Almeida
Browse files

Merge branch 'fix_unaligned_reads' into 'master'

Fix use read_unaligned instead of dereferencing ptrs

See merge request !5
parents db709305 27ec8280
Pipeline #8762 failed with stage
in 9 seconds
...@@ -285,6 +285,7 @@ mod tests { ...@@ -285,6 +285,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(miri, ignore)] // very slow to run on miri
fn zero() { fn zero() {
let arr = [0; 4096]; let arr = [0; 4096];
for n in 0..4096 { for n in 0..4096 {
......
...@@ -16,34 +16,34 @@ pub fn read_int(buf: &[u8]) -> u64 { ...@@ -16,34 +16,34 @@ pub fn read_int(buf: &[u8]) -> u64 {
// u8. // u8.
1 => *ptr as u64, 1 => *ptr as u64,
// u16. // u16.
2 => (*(ptr as *const u16)).to_le() as u64, 2 => (ptr as *const u16).read_unaligned().to_le() as u64,
// u16 + u8. // u16 + u8.
3 => { 3 => {
let a = (*(ptr as *const u16)).to_le() as u64; let a = (ptr as *const u16).read_unaligned().to_le() as u64;
let b = *ptr.offset(2) as u64; let b = *ptr.offset(2) as u64;
a | (b << 16) a | (b << 16)
}, },
// u32. // u32.
4 => (*(ptr as *const u32)).to_le() as u64, 4 => (ptr as *const u32).read_unaligned().to_le() as u64,
// u32 + u8. // u32 + u8.
5 => { 5 => {
let a = (*(ptr as *const u32)).to_le() as u64; let a = (ptr as *const u32).read_unaligned().to_le() as u64;
let b = *ptr.offset(4) as u64; let b = *ptr.offset(4) as u64;
a | (b << 32) a | (b << 32)
}, },
// u32 + u16. // u32 + u16.
6 => { 6 => {
let a = (*(ptr as *const u32)).to_le() as u64; let a = (ptr as *const u32).read_unaligned().to_le() as u64;
let b = (*(ptr.offset(4) as *const u16)).to_le() as u64; let b = (ptr.offset(4) as *const u16).read_unaligned().to_le() as u64;
a | (b << 32) a | (b << 32)
}, },
// u32 + u16 + u8. // u32 + u16 + u8.
7 => { 7 => {
let a = (*(ptr as *const u32)).to_le() as u64; let a = (ptr as *const u32).read_unaligned().to_le() as u64;
let b = (*(ptr.offset(4) as *const u16)).to_le() as u64; let b = (ptr.offset(4) as *const u16).read_unaligned().to_le() as u64;
let c = *ptr.offset(6) as u64; let c = *ptr.offset(6) as u64;
a | (b << 32) | (c << 48) a | (b << 32) | (c << 48)
...@@ -60,12 +60,15 @@ pub unsafe fn read_u64(ptr: *const u8) -> u64 { ...@@ -60,12 +60,15 @@ pub unsafe fn read_u64(ptr: *const u8) -> u64 {
{ {
// We cannot be sure about the memory layout of a potentially emulated 64-bit integer, so // We cannot be sure about the memory layout of a potentially emulated 64-bit integer, so
// we read it manually. If possible, the compiler should emit proper instructions. // we read it manually. If possible, the compiler should emit proper instructions.
(*(ptr as *const u32)).to_le() as u64 | ((*(ptr.offset(4) as *const u32)).to_le() as u64) << 32 let a = (ptr as *const u32).read_unaligned().to_le();
let b = (ptr.offset(4) as *const u32).read_unaligned().to_le();
a as u64 | ((b as u64) << 32)
} }
#[cfg(target_pointer_width = "64")] #[cfg(target_pointer_width = "64")]
{ {
(*(ptr as *const u64)).to_le() (ptr as *const u64).read_unaligned().to_le()
} }
} }
......
...@@ -36,6 +36,7 @@ fn hash_chunking_vs_not() { ...@@ -36,6 +36,7 @@ fn hash_chunking_vs_not() {
assert_eq!(hash1, hash2); assert_eq!(hash1, hash2);
assert_eq!(hash1, reference); assert_eq!(hash1, reference);
assert_eq!(hash1, buffer); assert_eq!(hash1, buffer);
assert_eq!(hash1, 0xa06e72e1b06144a0);
} }
#[test] #[test]
......
...@@ -11,6 +11,7 @@ use std::hash::Hasher; ...@@ -11,6 +11,7 @@ use std::hash::Hasher;
use std::num::{NonZeroUsize, NonZeroU8}; use std::num::{NonZeroUsize, NonZeroU8};
quickcheck! { quickcheck! {
#[cfg_attr(miri, ignore)] // very slow to run on miri
fn chunked_matches_buffered(xs: Vec<u8>, chunk_size: NonZeroUsize, times: NonZeroU8, additional: u8) -> TestResult { fn chunked_matches_buffered(xs: Vec<u8>, chunk_size: NonZeroUsize, times: NonZeroU8, additional: u8) -> TestResult {
let target_size = xs.len() * times.get() as usize + additional as usize; let target_size = xs.len() * times.get() as usize + additional as usize;
if xs.is_empty() || target_size > 10_000_000 { if xs.is_empty() || target_size > 10_000_000 {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment