Commit 614f17b5 authored by Nathan Bronson's avatar Nathan Bronson Committed by facebook-github-bot-1

fix initializer order bug in MemoryIdler

Summary: Make sure page size has been fetched from the operating system
before it is used in asserts.

Reviewed By: meyering

Differential Revision: D2551368

fb-gh-sync-id: d3735571e2a45f613bbbd7e0f158a755d36b376c
parent bab85e25
...@@ -94,10 +94,14 @@ void MemoryIdler::flushLocalMallocCaches() { ...@@ -94,10 +94,14 @@ void MemoryIdler::flushLocalMallocCaches() {
// platforms could be increased if it was useful. // platforms could be increased if it was useful.
#if (FOLLY_X64 || FOLLY_PPC64 ) && defined(_GNU_SOURCE) && defined(__linux__) #if (FOLLY_X64 || FOLLY_PPC64 ) && defined(_GNU_SOURCE) && defined(__linux__)
static const size_t s_pageSize = sysconf(_SC_PAGESIZE);
static FOLLY_TLS uintptr_t tls_stackLimit; static FOLLY_TLS uintptr_t tls_stackLimit;
static FOLLY_TLS size_t tls_stackSize; static FOLLY_TLS size_t tls_stackSize;
static size_t pageSize() {
static const size_t s_pageSize = sysconf(_SC_PAGESIZE);
return s_pageSize;
}
static void fetchStackLimits() { static void fetchStackLimits() {
pthread_attr_t attr; pthread_attr_t attr;
pthread_getattr_np(pthread_self(), &attr); pthread_getattr_np(pthread_self(), &attr);
...@@ -128,7 +132,7 @@ static void fetchStackLimits() { ...@@ -128,7 +132,7 @@ static void fetchStackLimits() {
tls_stackLimit = uintptr_t(addr) + guardSize; tls_stackLimit = uintptr_t(addr) + guardSize;
tls_stackSize = rawSize - guardSize; tls_stackSize = rawSize - guardSize;
assert((tls_stackLimit & (s_pageSize - 1)) == 0); assert((tls_stackLimit & (pageSize() - 1)) == 0);
} }
FOLLY_NOINLINE static uintptr_t getStackPtr() { FOLLY_NOINLINE static uintptr_t getStackPtr() {
...@@ -150,14 +154,14 @@ void MemoryIdler::unmapUnusedStack(size_t retain) { ...@@ -150,14 +154,14 @@ void MemoryIdler::unmapUnusedStack(size_t retain) {
assert(sp >= tls_stackLimit); assert(sp >= tls_stackLimit);
assert(sp - tls_stackLimit < tls_stackSize); assert(sp - tls_stackLimit < tls_stackSize);
auto end = (sp - retain) & ~(s_pageSize - 1); auto end = (sp - retain) & ~(pageSize() - 1);
if (end <= tls_stackLimit) { if (end <= tls_stackLimit) {
// no pages are eligible for unmapping // no pages are eligible for unmapping
return; return;
} }
size_t len = end - tls_stackLimit; size_t len = end - tls_stackLimit;
assert((len & (s_pageSize - 1)) == 0); assert((len & (pageSize() - 1)) == 0);
if (madvise((void*)tls_stackLimit, len, MADV_DONTNEED) != 0) { if (madvise((void*)tls_stackLimit, len, MADV_DONTNEED) != 0) {
// It is likely that the stack vma hasn't been fully grown. In this // It is likely that the stack vma hasn't been fully grown. In this
// case madvise will apply dontneed to the present vmas, then return // case madvise will apply dontneed to the present vmas, then return
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment