Commit 83a22fdd authored by Dan Melnic's avatar Dan Melnic Committed by Facebook Github Bot

Make ThreadEntry::elementsCapacity atomic

Summary: Make ThreadEntry::elementsCapacity atomic

Reviewed By: yfeldblum

Differential Revision: D8963304

fbshipit-source-id: 0b37bbe97475a400e9b574b8285bde8f38a479f5
parent acdd0d80
...@@ -125,7 +125,8 @@ void StaticMetaBase::onThreadExit(void* ptr) { ...@@ -125,7 +125,8 @@ void StaticMetaBase::onThreadExit(void* ptr) {
// mark it as removed // mark it as removed
threadEntry->removed_ = true; threadEntry->removed_ = true;
meta.erase(&(*threadEntry)); meta.erase(&(*threadEntry));
for (size_t i = 0u; i < threadEntry->elementsCapacity; ++i) { auto elementsCapacity = threadEntry->getElementsCapacity();
for (size_t i = 0u; i < elementsCapacity; ++i) {
threadEntry->elements[i].node.eraseZero(); threadEntry->elements[i].node.eraseZero();
} }
// No need to hold the lock any longer; the ThreadEntry is private to this // No need to hold the lock any longer; the ThreadEntry is private to this
...@@ -137,7 +138,8 @@ void StaticMetaBase::onThreadExit(void* ptr) { ...@@ -137,7 +138,8 @@ void StaticMetaBase::onThreadExit(void* ptr) {
// may be required. // may be required.
for (bool shouldRun = true; shouldRun;) { for (bool shouldRun = true; shouldRun;) {
shouldRun = false; shouldRun = false;
FOR_EACH_RANGE (i, 0, threadEntry->elementsCapacity) { auto elementsCapacity = threadEntry->getElementsCapacity();
FOR_EACH_RANGE (i, 0, elementsCapacity) {
if (threadEntry->elements[i].dispose(TLPDestructionMode::THIS_THREAD)) { if (threadEntry->elements[i].dispose(TLPDestructionMode::THIS_THREAD)) {
threadEntry->elements[i].cleanup(); threadEntry->elements[i].cleanup();
shouldRun = true; shouldRun = true;
...@@ -169,7 +171,8 @@ void StaticMetaBase::onThreadExit(void* ptr) { ...@@ -169,7 +171,8 @@ void StaticMetaBase::onThreadExit(void* ptr) {
} }
for (bool shouldRunInner = true; shouldRunInner;) { for (bool shouldRunInner = true; shouldRunInner;) {
shouldRunInner = false; shouldRunInner = false;
FOR_EACH_RANGE (i, 0, tmp->elementsCapacity) { auto elementsCapacity = tmp->getElementsCapacity();
FOR_EACH_RANGE (i, 0, elementsCapacity) {
if (tmp->elements[i].dispose(TLPDestructionMode::THIS_THREAD)) { if (tmp->elements[i].dispose(TLPDestructionMode::THIS_THREAD)) {
tmp->elements[i].cleanup(); tmp->elements[i].cleanup();
shouldRunInner = true; shouldRunInner = true;
...@@ -191,7 +194,7 @@ void StaticMetaBase::onThreadExit(void* ptr) { ...@@ -191,7 +194,7 @@ void StaticMetaBase::onThreadExit(void* ptr) {
if (tmp->elements) { if (tmp->elements) {
free(tmp->elements); free(tmp->elements);
tmp->elements = nullptr; tmp->elements = nullptr;
tmp->elementsCapacity = 0; tmp->setElementsCapacity(0);
} }
#ifndef FOLLY_TLD_USE_FOLLY_TLS #ifndef FOLLY_TLD_USE_FOLLY_TLS
...@@ -207,7 +210,7 @@ void StaticMetaBase::onThreadExit(void* ptr) { ...@@ -207,7 +210,7 @@ void StaticMetaBase::onThreadExit(void* ptr) {
uint32_t StaticMetaBase::elementsCapacity() const { uint32_t StaticMetaBase::elementsCapacity() const {
ThreadEntry* threadEntry = (*threadEntry_)(); ThreadEntry* threadEntry = (*threadEntry_)();
return FOLLY_LIKELY(!!threadEntry) ? threadEntry->elementsCapacity : 0; return FOLLY_LIKELY(!!threadEntry) ? threadEntry->getElementsCapacity() : 0;
} }
uint32_t StaticMetaBase::allocate(EntryID* ent) { uint32_t StaticMetaBase::allocate(EntryID* ent) {
...@@ -268,8 +271,8 @@ void StaticMetaBase::destroy(EntryID* ent) { ...@@ -268,8 +271,8 @@ void StaticMetaBase::destroy(EntryID* ent) {
next->eraseZero(); next->eraseZero();
ThreadEntry* e = next->parent; ThreadEntry* e = next->parent;
auto elementsCapacity = e->getElementsCapacity();
if (id < e->elementsCapacity && e->elements[id].ptr) { if (id < elementsCapacity && e->elements[id].ptr) {
elements.push_back(e->elements[id]); elements.push_back(e->elements[id]);
/* /*
...@@ -306,15 +309,16 @@ ElementWrapper* StaticMetaBase::reallocate( ...@@ -306,15 +309,16 @@ ElementWrapper* StaticMetaBase::reallocate(
ThreadEntry* threadEntry, ThreadEntry* threadEntry,
uint32_t idval, uint32_t idval,
size_t& newCapacity) { size_t& newCapacity) {
size_t prevCapacity = threadEntry->elementsCapacity; size_t prevCapacity = threadEntry->getElementsCapacity();
// Growth factor < 2, see folly/docs/FBVector.md; + 5 to prevent // Growth factor < 2, see folly/docs/FBVector.md; + 5 to prevent
// very slow start. // very slow start.
auto smallCapacity = static_cast<size_t>((idval + 5) * kSmallGrowthFactor); auto smallCapacity = static_cast<size_t>((idval + 5) * kSmallGrowthFactor);
auto bigCapacity = static_cast<size_t>((idval + 5) * kBigGrowthFactor); auto bigCapacity = static_cast<size_t>((idval + 5) * kBigGrowthFactor);
newCapacity = (threadEntry->meta && newCapacity =
(bigCapacity <= threadEntry->meta->head_.elementsCapacity)) (threadEntry->meta &&
(bigCapacity <= threadEntry->meta->head_.getElementsCapacity()))
? bigCapacity ? bigCapacity
: smallCapacity; : smallCapacity;
...@@ -377,7 +381,7 @@ ElementWrapper* StaticMetaBase::reallocate( ...@@ -377,7 +381,7 @@ ElementWrapper* StaticMetaBase::reallocate(
void StaticMetaBase::reserve(EntryID* id) { void StaticMetaBase::reserve(EntryID* id) {
auto& meta = *this; auto& meta = *this;
ThreadEntry* threadEntry = (*threadEntry_)(); ThreadEntry* threadEntry = (*threadEntry_)();
size_t prevCapacity = threadEntry->elementsCapacity; size_t prevCapacity = threadEntry->getElementsCapacity();
uint32_t idval = id->getOrAllocate(meta); uint32_t idval = id->getOrAllocate(meta);
if (prevCapacity > idval) { if (prevCapacity > idval) {
...@@ -415,15 +419,15 @@ void StaticMetaBase::reserve(EntryID* id) { ...@@ -415,15 +419,15 @@ void StaticMetaBase::reserve(EntryID* id) {
threadEntry->elements[i].node.initZero(threadEntry, i); threadEntry->elements[i].node.initZero(threadEntry, i);
} }
threadEntry->elementsCapacity = newCapacity; threadEntry->setElementsCapacity(newCapacity);
} }
free(reallocated); free(reallocated);
} }
void StaticMetaBase::reserveHeadUnlocked(uint32_t id) { void StaticMetaBase::reserveHeadUnlocked(uint32_t id) {
if (head_.elementsCapacity <= id) { if (head_.getElementsCapacity() <= id) {
size_t prevCapacity = head_.elementsCapacity; size_t prevCapacity = head_.getElementsCapacity();
size_t newCapacity; size_t newCapacity;
ElementWrapper* reallocated = reallocate(&head_, id, newCapacity); ElementWrapper* reallocated = reallocate(&head_, id, newCapacity);
...@@ -439,7 +443,7 @@ void StaticMetaBase::reserveHeadUnlocked(uint32_t id) { ...@@ -439,7 +443,7 @@ void StaticMetaBase::reserveHeadUnlocked(uint32_t id) {
head_.elements[i].node.init(&head_, i); head_.elements[i].node.init(&head_, i);
} }
head_.elementsCapacity = newCapacity; head_.setElementsCapacity(newCapacity);
free(reallocated); free(reallocated);
} }
} }
......
...@@ -203,16 +203,26 @@ struct ThreadEntryList; ...@@ -203,16 +203,26 @@ struct ThreadEntryList;
* This is written from the owning thread only (under the lock), read * This is written from the owning thread only (under the lock), read
* from the owning thread (no lock necessary), and read from other threads * from the owning thread (no lock necessary), and read from other threads
* (under the lock). * (under the lock).
* StaticMetaBase::head_ elementsCapacity can be read from any thread on
* reallocate (no lock)
*/ */
struct ThreadEntry { struct ThreadEntry {
ElementWrapper* elements{nullptr}; ElementWrapper* elements{nullptr};
size_t elementsCapacity{0}; std::atomic<size_t> elementsCapacity{0};
ThreadEntry* next{nullptr}; ThreadEntry* next{nullptr};
ThreadEntry* prev{nullptr}; ThreadEntry* prev{nullptr};
ThreadEntryList* list{nullptr}; ThreadEntryList* list{nullptr};
ThreadEntry* listNext{nullptr}; ThreadEntry* listNext{nullptr};
StaticMetaBase* meta{nullptr}; StaticMetaBase* meta{nullptr};
bool removed_{false}; bool removed_{false};
size_t getElementsCapacity() const noexcept {
return elementsCapacity.load(std::memory_order_relaxed);
}
void setElementsCapacity(size_t capacity) noexcept {
elementsCapacity.store(capacity, std::memory_order_relaxed);
}
}; };
struct ThreadEntryList { struct ThreadEntryList {
...@@ -448,11 +458,11 @@ struct StaticMeta : StaticMetaBase { ...@@ -448,11 +458,11 @@ struct StaticMeta : StaticMetaBase {
size_t& capacity) { size_t& capacity) {
auto& inst = instance(); auto& inst = instance();
threadEntry = inst.threadEntry_(); threadEntry = inst.threadEntry_();
if (UNLIKELY(threadEntry->elementsCapacity <= id)) { if (UNLIKELY(threadEntry->getElementsCapacity() <= id)) {
inst.reserve(ent); inst.reserve(ent);
id = ent->getOrInvalid(); id = ent->getOrInvalid();
} }
capacity = threadEntry->elementsCapacity; capacity = threadEntry->getElementsCapacity();
assert(capacity > id); assert(capacity > id);
} }
...@@ -505,12 +515,14 @@ struct StaticMeta : StaticMetaBase { ...@@ -505,12 +515,14 @@ struct StaticMeta : StaticMetaBase {
// init the head list // init the head list
head.next = head.prev = &head; head.next = head.prev = &head;
// init the circular lists // init the circular lists
for (size_t i = 0u; i < head.elementsCapacity; ++i) { auto elementsCapacity = head.getElementsCapacity();
for (size_t i = 0u; i < elementsCapacity; ++i) {
head.elements[i].node.init(&head, static_cast<uint32_t>(i)); head.elements[i].node.init(&head, static_cast<uint32_t>(i));
} }
// init the thread entry // init the thread entry
ThreadEntry* threadEntry = instance().threadEntry_(); ThreadEntry* threadEntry = instance().threadEntry_();
for (size_t i = 0u; i < threadEntry->elementsCapacity; ++i) { elementsCapacity = threadEntry->getElementsCapacity();
for (size_t i = 0u; i < elementsCapacity; ++i) {
if (!threadEntry->elements[i].node.zero()) { if (!threadEntry->elements[i].node.zero()) {
threadEntry->elements[i].node.initZero( threadEntry->elements[i].node.initZero(
threadEntry, static_cast<uint32_t>(i)); threadEntry, static_cast<uint32_t>(i));
...@@ -519,7 +531,7 @@ struct StaticMeta : StaticMetaBase { ...@@ -519,7 +531,7 @@ struct StaticMeta : StaticMetaBase {
} }
// If this thread was in the list before the fork, add it back. // If this thread was in the list before the fork, add it back.
if (threadEntry->elementsCapacity != 0) { if (elementsCapacity != 0) {
instance().push_back(threadEntry); instance().push_back(threadEntry);
} }
instance().lock_.unlock(); instance().lock_.unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment