Commit b4746252 authored by Mike Kolupaev's avatar Mike Kolupaev Committed by Dave Watson

folly::AtomicHashMap: fixed race between erase() and find()

Summary: advancePastEmpty() was called for all created iterators. It only makes sense for begin(). For find() it's harmful: find() shouldn't return an iterator to the next element if the key was removed. I suspect that the same race condition was possible for insert(), but I haven't tried to reproduce it.

Test Plan: Added a test for it. It fails without these changes.

Reviewed By: delong.j@fb.com

Subscribers: folly-diffs@, lovro

FB internal diff: D1751280

Tasks: 5841499

Signature: t1:1751280:1419107193:71311ff68d92d0a4dcf1941dacdfdc23c25255cc
parent cc42e3ad
...@@ -353,15 +353,19 @@ struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator ...@@ -353,15 +353,19 @@ struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
explicit aha_iterator(ContT* array, size_t offset) explicit aha_iterator(ContT* array, size_t offset)
: aha_(array) : aha_(array)
, offset_(offset) , offset_(offset)
{ {}
advancePastEmpty();
}
// Returns unique index that can be used with findAt(). // Returns unique index that can be used with findAt().
// WARNING: The following function will fail silently for hashtable // WARNING: The following function will fail silently for hashtable
// with capacity > 2^32 // with capacity > 2^32
uint32_t getIndex() const { return offset_; } uint32_t getIndex() const { return offset_; }
void advancePastEmpty() {
while (offset_ < aha_->capacity_ && !isValid()) {
++offset_;
}
}
private: private:
friend class AtomicHashArray; friend class AtomicHashArray;
friend class boost::iterator_core_access; friend class boost::iterator_core_access;
...@@ -379,12 +383,6 @@ struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator ...@@ -379,12 +383,6 @@ struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
return aha_->cells_[offset_]; return aha_->cells_[offset_];
} }
void advancePastEmpty() {
while (offset_ < aha_->capacity_ && !isValid()) {
++offset_;
}
}
bool isValid() const { bool isValid() const {
KeyT key = acquireLoadKey(aha_->cells_[offset_]); KeyT key = acquireLoadKey(aha_->cells_[offset_]);
return key != aha_->kEmptyKey_ && return key != aha_->kEmptyKey_ &&
......
...@@ -183,9 +183,18 @@ class AtomicHashArray : boost::noncopyable { ...@@ -183,9 +183,18 @@ class AtomicHashArray : boost::noncopyable {
bool empty() const { return size() == 0; } bool empty() const { return size() == 0; }
iterator begin() { return iterator(this, 0); } iterator begin() {
iterator it(this, 0);
it.advancePastEmpty();
return it;
}
const_iterator begin() const {
const_iterator it(this, 0);
it.advancePastEmpty();
return it;
}
iterator end() { return iterator(this, capacity_); } iterator end() { return iterator(this, capacity_); }
const_iterator begin() const { return const_iterator(this, 0); }
const_iterator end() const { return const_iterator(this, capacity_); } const_iterator end() const { return const_iterator(this, capacity_); }
// See AtomicHashMap::findAt - access elements directly // See AtomicHashMap::findAt - access elements directly
......
...@@ -370,9 +370,7 @@ struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::ahm_iterator ...@@ -370,9 +370,7 @@ struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::ahm_iterator
: ahm_(ahm) : ahm_(ahm)
, subMap_(subMap) , subMap_(subMap)
, subIt_(subIt) , subIt_(subIt)
{ {}
checkAdvanceToNextSubmap();
}
friend class boost::iterator_core_access; friend class boost::iterator_core_access;
......
...@@ -318,17 +318,21 @@ class AtomicHashMap : boost::noncopyable { ...@@ -318,17 +318,21 @@ class AtomicHashMap : boost::noncopyable {
} }
iterator begin() { iterator begin() {
return iterator(this, 0, iterator it(this, 0,
subMaps_[0].load(std::memory_order_relaxed)->begin()); subMaps_[0].load(std::memory_order_relaxed)->begin());
} it.checkAdvanceToNextSubmap();
return it;
iterator end() {
return iterator();
} }
const_iterator begin() const { const_iterator begin() const {
return const_iterator(this, 0, const_iterator it(this, 0,
subMaps_[0].load(std::memory_order_relaxed)->begin()); subMaps_[0].load(std::memory_order_relaxed)->begin());
it.checkAdvanceToNextSubmap();
return it;
}
iterator end() {
return iterator();
} }
const_iterator end() const { const_iterator end() const {
......
...@@ -618,6 +618,44 @@ TEST(Ahm, atomic_hash_array_insert_race) { ...@@ -618,6 +618,44 @@ TEST(Ahm, atomic_hash_array_insert_race) {
} }
} }
// Repro for T#5841499. Race between erase() and find() on the same key.
TEST(Ahm, erase_find_race) {
const uint64_t limit = 10000;
AtomicHashMap<uint64_t, uint64_t> map(limit + 10);
std::atomic<uint64_t> key {1};
// Invariant: all values are equal to their keys.
// At any moment there is one or two consecutive keys in the map.
std::thread write_thread([&]() {
while (true) {
uint64_t k = ++key;
if (k > limit) {
break;
}
map.insert(k + 1, k + 1);
map.erase(k);
}
});
std::thread read_thread([&]() {
while (true) {
uint64_t k = key.load();
if (k > limit) {
break;
}
auto it = map.find(k);
if (it != map.end()) {
ASSERT_EQ(k, it->second);
}
}
});
read_thread.join();
write_thread.join();
}
// Repro for a bug when iterator didn't skip empty submaps. // Repro for a bug when iterator didn't skip empty submaps.
TEST(Ahm, iterator_skips_empty_submaps) { TEST(Ahm, iterator_skips_empty_submaps) {
AtomicHashMap<uint64_t, uint64_t>::Config config; AtomicHashMap<uint64_t, uint64_t>::Config config;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment