Commit 791b61f0 authored by Anton Likhtarov's avatar Anton Likhtarov Committed by Facebook Github Bot

Fibers: allow more than one guard page per stack

Summary: Guard pages are cheap (VM only + mapping state). More than one is occasionally useful (e.g. imagine a 10k object on stack that's not used, subsequent function calls will create a frame that jumps over a single 4K guard page and we get hard to debug memory corruption instead of a clean segfault).

Reviewed By: andriigrynenko

Differential Revision: D15367640

fbshipit-source-id: 8f1d42a71926653929dd847eeba172c680f98589
parent 74588469
...@@ -577,7 +577,7 @@ FiberManager::FiberManager( ...@@ -577,7 +577,7 @@ FiberManager::FiberManager(
std::unique_ptr<LoopController> loopController__, std::unique_ptr<LoopController> loopController__,
Options options) Options options)
: loopController_(std::move(loopController__)), : loopController_(std::move(loopController__)),
stackAllocator_(options.useGuardPages), stackAllocator_(options.guardPagesPerStack),
options_(preprocessOptions(std::move(options))), options_(preprocessOptions(std::move(options))),
exceptionCallback_([](std::exception_ptr eptr, std::string context) { exceptionCallback_([](std::exception_ptr eptr, std::string context) {
try { try {
......
...@@ -110,9 +110,9 @@ class FiberManager : public ::folly::Executor { ...@@ -110,9 +110,9 @@ class FiberManager : public ::folly::Executor {
size_t maxFibersPoolSize{1000}; size_t maxFibersPoolSize{1000};
/** /**
* Protect limited amount of fiber stacks with guard pages. * Protect a small number of fiber stacks with this many guard pages.
*/ */
bool useGuardPages{true}; size_t guardPagesPerStack{1};
/** /**
* Free unnecessary fibers in the fibers pool every fibersPoolResizePeriodMs * Free unnecessary fibers in the fibers pool every fibersPoolResizePeriodMs
...@@ -129,7 +129,7 @@ class FiberManager : public ::folly::Executor { ...@@ -129,7 +129,7 @@ class FiberManager : public ::folly::Executor {
stackSizeMultiplier, stackSizeMultiplier,
recordStackEvery, recordStackEvery,
maxFibersPoolSize, maxFibersPoolSize,
useGuardPages, guardPagesPerStack,
fibersPoolResizePeriodMs); fibersPoolResizePeriodMs);
} }
}; };
......
...@@ -59,7 +59,9 @@ constexpr size_t kMaxInUse = 100; ...@@ -59,7 +59,9 @@ constexpr size_t kMaxInUse = 100;
*/ */
class StackCache { class StackCache {
public: public:
explicit StackCache(size_t stackSize) : allocSize_(allocSize(stackSize)) { explicit StackCache(size_t stackSize, size_t guardPagesPerStack)
: allocSize_(allocSize(stackSize, guardPagesPerStack)),
guardPagesPerStack_(guardPagesPerStack) {
auto p = ::mmap( auto p = ::mmap(
nullptr, nullptr,
allocSize_ * kNumGuarded, allocSize_ * kNumGuarded,
...@@ -82,15 +84,17 @@ class StackCache { ...@@ -82,15 +84,17 @@ class StackCache {
assert(storage_); assert(storage_);
auto as = allocSize(size); auto as = allocSize(size, guardPagesPerStack_);
if (as != allocSize_ || freeList_.empty()) { if (as != allocSize_ || freeList_.empty()) {
return nullptr; return nullptr;
} }
auto p = freeList_.back().first; auto p = freeList_.back().first;
if (!freeList_.back().second) { if (!freeList_.back().second) {
PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE)); PCHECK(0 == ::mprotect(p, pagesize() * guardPagesPerStack_, PROT_NONE));
protectedPages().wlock()->insert(reinterpret_cast<intptr_t>(p)); protectedRanges().wlock()->insert(std::make_pair(
reinterpret_cast<intptr_t>(p),
reinterpret_cast<intptr_t>(p + pagesize() * guardPagesPerStack_)));
} }
freeList_.pop_back(); freeList_.pop_back();
...@@ -106,7 +110,7 @@ class StackCache { ...@@ -106,7 +110,7 @@ class StackCache {
limit -^ limit -^
*/ */
auto limit = p + allocSize_ - size; auto limit = p + allocSize_ - size;
assert(limit >= p + pagesize()); assert(limit >= p + pagesize() * guardPagesPerStack_);
return limit; return limit;
} }
...@@ -115,7 +119,7 @@ class StackCache { ...@@ -115,7 +119,7 @@ class StackCache {
assert(storage_); assert(storage_);
auto as = allocSize(size); auto as = allocSize(size, guardPagesPerStack_);
if (std::less_equal<void*>{}(limit, storage_) || if (std::less_equal<void*>{}(limit, storage_) ||
std::less_equal<void*>{}(storage_ + allocSize_ * kNumGuarded, limit)) { std::less_equal<void*>{}(storage_ + allocSize_ * kNumGuarded, limit)) {
/* not mine */ /* not mine */
...@@ -131,9 +135,12 @@ class StackCache { ...@@ -131,9 +135,12 @@ class StackCache {
~StackCache() { ~StackCache() {
assert(storage_); assert(storage_);
protectedPages().withWLock([&](auto& pages) { protectedRanges().withWLock([&](auto& ranges) {
for (const auto& item : freeList_) { for (const auto& item : freeList_) {
pages.erase(reinterpret_cast<intptr_t>(item.first)); ranges.erase(std::make_pair(
reinterpret_cast<intptr_t>(item.first),
reinterpret_cast<intptr_t>(
item.first + pagesize() * guardPagesPerStack_)));
} }
}); });
PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded)); PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
...@@ -141,10 +148,9 @@ class StackCache { ...@@ -141,10 +148,9 @@ class StackCache {
static bool isProtected(intptr_t addr) { static bool isProtected(intptr_t addr) {
// Use a read lock for reading. // Use a read lock for reading.
return protectedPages().withRLock([&](auto const& pages) { return protectedRanges().withRLock([&](auto const& ranges) {
for (const auto& page : pages) { for (const auto& range : ranges) {
intptr_t pageEnd = intptr_t(page + pagesize()); if (range.first <= addr && addr < range.second) {
if (page <= addr && addr < pageEnd) {
return true; return true;
} }
} }
...@@ -155,7 +161,8 @@ class StackCache { ...@@ -155,7 +161,8 @@ class StackCache {
private: private:
folly::SpinLock lock_; folly::SpinLock lock_;
unsigned char* storage_{nullptr}; unsigned char* storage_{nullptr};
size_t allocSize_{0}; const size_t allocSize_{0};
const size_t guardPagesPerStack_{0};
/** /**
* LIFO free list. Each pair contains stack pointer and protected flag. * LIFO free list. Each pair contains stack pointer and protected flag.
...@@ -167,14 +174,20 @@ class StackCache { ...@@ -167,14 +174,20 @@ class StackCache {
return pagesize; return pagesize;
} }
/* Returns a multiple of pagesize() enough to store size + one guard page */ /**
static size_t allocSize(size_t size) { * Returns a multiple of pagesize() enough to store size + a few guard pages
return pagesize() * ((size + pagesize() - 1) / pagesize() + 1); */
static size_t allocSize(size_t size, size_t guardPages) {
return pagesize() * ((size + pagesize() * guardPages - 1) / pagesize() + 1);
} }
static folly::Synchronized<std::unordered_set<intptr_t>>& protectedPages() { /**
static auto instance = * For each [b, e) range in this set, the bytes in the range were mprotected.
new folly::Synchronized<std::unordered_set<intptr_t>>(); */
static folly::Synchronized<std::unordered_set<std::pair<intptr_t, intptr_t>>>&
protectedRanges() {
static auto instance = new folly::Synchronized<
std::unordered_set<std::pair<intptr_t, intptr_t>>>();
return *instance; return *instance;
} }
}; };
...@@ -240,11 +253,13 @@ class CacheManager { ...@@ -240,11 +253,13 @@ class CacheManager {
return *inst; return *inst;
} }
std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) { std::unique_ptr<StackCacheEntry> getStackCache(
size_t stackSize,
size_t guardPagesPerStack) {
std::lock_guard<folly::SpinLock> lg(lock_); std::lock_guard<folly::SpinLock> lg(lock_);
if (inUse_ < kMaxInUse) { if (inUse_ < kMaxInUse) {
++inUse_; ++inUse_;
return std::make_unique<StackCacheEntry>(stackSize); return std::make_unique<StackCacheEntry>(stackSize, guardPagesPerStack);
} }
return nullptr; return nullptr;
...@@ -275,8 +290,9 @@ class CacheManager { ...@@ -275,8 +290,9 @@ class CacheManager {
*/ */
class StackCacheEntry { class StackCacheEntry {
public: public:
explicit StackCacheEntry(size_t stackSize) explicit StackCacheEntry(size_t stackSize, size_t guardPagesPerStack)
: stackCache_(std::make_unique<StackCache>(stackSize)) {} : stackCache_(
std::make_unique<StackCache>(stackSize, guardPagesPerStack)) {}
StackCache& cache() const noexcept { StackCache& cache() const noexcept {
return *stackCache_; return *stackCache_;
...@@ -290,8 +306,8 @@ class StackCacheEntry { ...@@ -290,8 +306,8 @@ class StackCacheEntry {
std::unique_ptr<StackCache> stackCache_; std::unique_ptr<StackCache> stackCache_;
}; };
GuardPageAllocator::GuardPageAllocator(bool useGuardPages) GuardPageAllocator::GuardPageAllocator(size_t guardPagesPerStack)
: useGuardPages_(useGuardPages) { : guardPagesPerStack_(guardPagesPerStack) {
#ifndef _WIN32 #ifndef _WIN32
installSignalHandler(); installSignalHandler();
#endif #endif
...@@ -300,8 +316,9 @@ GuardPageAllocator::GuardPageAllocator(bool useGuardPages) ...@@ -300,8 +316,9 @@ GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
GuardPageAllocator::~GuardPageAllocator() = default; GuardPageAllocator::~GuardPageAllocator() = default;
unsigned char* GuardPageAllocator::allocate(size_t size) { unsigned char* GuardPageAllocator::allocate(size_t size) {
if (useGuardPages_ && !stackCache_) { if (guardPagesPerStack_ && !stackCache_) {
stackCache_ = CacheManager::instance().getStackCache(size); stackCache_ =
CacheManager::instance().getStackCache(size, guardPagesPerStack_);
} }
if (stackCache_) { if (stackCache_) {
......
...@@ -31,10 +31,10 @@ class StackCacheEntry; ...@@ -31,10 +31,10 @@ class StackCacheEntry;
class GuardPageAllocator { class GuardPageAllocator {
public: public:
/** /**
* @param useGuardPages if true, protect limited amount of stacks with guard * @param guardPagesPerStack Protect a small number of fiber stacks
* pages, otherwise acts as std::allocator. * with this many guard pages. If 0, acts as std::allocator.
*/ */
explicit GuardPageAllocator(bool useGuardPages); explicit GuardPageAllocator(size_t guardPagesPerStack);
~GuardPageAllocator(); ~GuardPageAllocator();
/** /**
...@@ -50,7 +50,7 @@ class GuardPageAllocator { ...@@ -50,7 +50,7 @@ class GuardPageAllocator {
private: private:
std::unique_ptr<StackCacheEntry> stackCache_; std::unique_ptr<StackCacheEntry> stackCache_;
std::allocator<unsigned char> fallbackAllocator_; std::allocator<unsigned char> fallbackAllocator_;
bool useGuardPages_{true}; size_t guardPagesPerStack_{0};
}; };
} // namespace fibers } // namespace fibers
} // namespace folly } // namespace folly
...@@ -449,7 +449,7 @@ ...@@ -449,7 +449,7 @@
<h3 id="stack-overflow-detection">Stack overflow detection <a href="#stack-overflow-detection" class="headerLink">#</a></h3> <h3 id="stack-overflow-detection">Stack overflow detection <a href="#stack-overflow-detection" class="headerLink">#</a></h3>
<p>By default every fiber-task stack is allocated with a special guard page next to it (this can be controlled via <tt>useGuardPages</tt> option of <tt>FiberManager</tt>). If a stack overflow happens - this guard page will be accessed, which will result in immediate segmentation fault.</p> <p>By default every fiber-task stack is allocated with a special guard page next to it (this can be controlled via <tt>guardPagesPerStack</tt> option of <tt>FiberManager</tt>). If a stack overflow happens - this guard page will be accessed, which will result in immediate segmentation fault.</p>
<div class="remarkup-important"><span class="remarkup-note-word">IMPORTANT:</span> disabling guard page protection may result in unnoticed stack overflows. Those will inevitably cause memory corruptions, which are usually very hard to debug.</div></section><section class="dex_document"><h1>Event Loops</h1><p class="dex_introduction"></p><p>folly::fibers library doesn&#039;t implement it&#039;s own event system. Instead it allows <tt>fibers::FiberManager</tt> to work with any other event system by implementing <tt>fibers::LoopController</tt> interface.</p> <div class="remarkup-important"><span class="remarkup-note-word">IMPORTANT:</span> disabling guard page protection may result in unnoticed stack overflows. Those will inevitably cause memory corruptions, which are usually very hard to debug.</div></section><section class="dex_document"><h1>Event Loops</h1><p class="dex_introduction"></p><p>folly::fibers library doesn&#039;t implement it&#039;s own event system. Instead it allows <tt>fibers::FiberManager</tt> to work with any other event system by implementing <tt>fibers::LoopController</tt> interface.</p>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment