Commit df565f17 authored by Alex Hornby's avatar Alex Hornby

Fix for case where calling shrink_to_fit could make the map larger

Reproduced issue by adding new test shrink_to_fit_after_insert.  Checked it was failing before doing any changes

With the test in place fixes were:

1. Make CHashMap::reserve be the place the amortizing growth factor LENGTH_MULTIPLIER is applied, rather than Table::with_capacity ( non-CHashMap::reserve users of Table::with_capacity are requesting a specific capacity and just need load factor applied, not amortizing growth multiplier )

2. Apply load factor consistently in Table::with_capacity() vs CHashMap::capacity()

3. Apply MINIMUM_CAPACITY in CHashMap::capacity to stop reported capacity being below requested.

4. Add test known_size_with_capacity_matches_shrink to test the initial capacity+shrink case

Tested via cargo test
parent b580b4c0
......@@ -204,7 +204,8 @@ impl<K, V> Table<K, V> {
/// Create a table with at least some capacity.
fn with_capacity(cap: usize) -> Table<K, V> {
Table::new(cmp::max(MINIMUM_CAPACITY, cap * LENGTH_MULTIPLIER))
// The + 1 is needed to avoid losing fractional bucket to integer division.
Table::new(cmp::max(MINIMUM_CAPACITY, cap * MAX_LOAD_FACTOR_DENOM / MAX_LOAD_FACTOR_NUM + 1))
}
}
......@@ -602,7 +603,7 @@ impl<K, V> CHashMap<K, V> {
///
/// The capacity is equal to the number of entries the table can hold before reallocating.
pub fn capacity(&self) -> usize {
self.buckets() * MAX_LOAD_FACTOR_NUM / MAX_LOAD_FACTOR_DENOM
cmp::max(MINIMUM_CAPACITY, self.buckets()) * MAX_LOAD_FACTOR_NUM / MAX_LOAD_FACTOR_DENOM
}
/// Get the number of buckets of the hash table.
......@@ -677,7 +678,7 @@ impl<K, V> CHashMap<K, V> {
}
}
}
impl<K: PartialEq + Hash, V> CHashMap<K, V> {
/// Get the value of some key.
///
......@@ -917,12 +918,12 @@ impl<K: PartialEq + Hash, V> CHashMap<K, V> {
/// in order make reallocation less common.
pub fn reserve(&self, additional: usize) {
// Get the new length.
let len = self.len() + additional;
let len = (self.len() + additional) * LENGTH_MULTIPLIER;
// Acquire the write lock (needed because we'll mess with the table).
let mut lock = self.table.write();
// Handle the case where another thread has resized the table while we were acquiring the
// lock.
if lock.buckets.len() < len * LENGTH_MULTIPLIER {
if lock.buckets.len() < len {
// Swap the table out with a new table of desired size (multiplied by some factor).
let table = mem::replace(&mut *lock, Table::with_capacity(len));
// Fill the new table with the data from the old table.
......
......@@ -12,6 +12,7 @@ use std::thread;
use std::cell::RefCell;
use std::sync::Arc;
use CHashMap;
use MAX_LOAD_FACTOR_DENOM;
#[test]
fn spam_insert() {
......@@ -621,6 +622,52 @@ fn find() {
}
}
#[test]
fn known_size_with_capacity_matches_shrink() {
for i in 0..(MAX_LOAD_FACTOR_DENOM * 4) {
// Setup a map where we know the number of entries we will store
let m = CHashMap::with_capacity(i);
let original_capacity = m.capacity();
let buckets = m.buckets();
assert!(i <= original_capacity, "Expected {} <= {} for {} buckets",
i, original_capacity, buckets);
for j in 0..i {
m.insert(j, j);
}
// Make sure inserting didn't increase capacity given we already knew
// number of entries planned on map construction
let grown_capacity = m.capacity();
assert_eq!(original_capacity, grown_capacity,
" for {} inserts", i);
// Shrink it and check that capacity is the same
m.shrink_to_fit();
let shrunken_capacity = m.capacity();
assert_eq!(shrunken_capacity, original_capacity,
"Expected {} == {} ", shrunken_capacity, shrunken_capacity);
}
}
#[test]
fn shrink_to_fit_after_insert() {
for i in 0..(MAX_LOAD_FACTOR_DENOM * 4) {
// Setup
let m = CHashMap::new();
for j in 0..i {
m.insert(j, j);
}
let original_capacity = m.capacity();
// Test
m.shrink_to_fit();
let shrunken_capacity = m.capacity();
assert!(shrunken_capacity <= original_capacity,
"Unexpected capacity after shrink given {} inserts. Expected {} <= {}",
i, shrunken_capacity, original_capacity );
}
}
#[test]
fn reserve_shrink_to_fit() {
let m = CHashMap::new();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment