Commit 40233942 authored by Andre Nash's avatar Andre Nash Committed by Facebook GitHub Bot

Provide a new optimized AVX memcpy with prefetch

Summary:
This introduces `__folly_memcpy`, which is an implementation of `memcpy` that uses prefetch to speed up cold copies (data absent from L1) and uses overlapping copies to avoid as much branching as possible to speed up hot copies (data present in L1).

A description of the core ideas for this memcpy is in the file comment at the top of folly_memcpy.S.

`__folly_memcpy` *does* act as a `memmove`, although it isn't optimized for that purpose for copies of 257 or more bytes. This masks some undefined behavior bugs when code calls `memcpy` on an overlapping region of data. `perf` samples will show when `memmove` is called by `__folly_memcpy`, which will help identify these undefined behavior bugs for copies of 257 bytes or more.

Reviewed By: yfeldblum

Differential Revision: D23629205

fbshipit-source-id: 61ed66122cc8edf33154ea6e8b87f4223c0ffcc0
parent fef59129
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstring>
#if !defined(__AVX2__)
void* __folly_memcpy(
void* __restrict dst, const void* __restrict src, size_t size) {
return std::memcpy(dst, src, size);
}
#endif
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// @author: Logan Evans <lpe@fb.com>
#include <stdlib.h>
extern "C" {
void* __folly_memcpy(
void* __restrict dst, const void* __restrict src, size_t size);
}
...@@ -15,164 +15,315 @@ ...@@ -15,164 +15,315 @@
*/ */
/* /*
* memcpy: An optimized memcpy implementation for x86_64. It uses AVX when * __folly_memcpy: An optimized memcpy implementation that uses prefetch and
* __AVX__ is defined, and uses SSE2 otherwise. * AVX2 instructions.
* *
* @author Bin Liu <binliu@fb.com> * This implementation of memcpy acts as a memmove, but it is not optimized for
* this purpose. While overlapping copies are undefined in memcpy, this
* implementation acts like memmove for sizes up through 256 bytes and will
* detect overlapping copies and call memmove for overlapping copies of 257 or
* more bytes.
*
* This implementation uses prefetch to avoid dtlb misses. This can
* substantially reduce dtlb store misses in cases where the destination
* location is absent from L1 cache and where the copy size is small enough
* that the hardware prefetcher doesn't have a large impact.
*
* The number of branches is limited by the use of overlapping copies. This
* helps with copies where the source and destination cache lines are already
* present in L1 because there are fewer instructions to execute and fewer
* branches to potentially mispredict.
*
* Vector operations up to 32-bytes are used (avx2 instruction set). Larger
* mov operations (avx512) are not used.
*
* Large copies make use of aligned store operations. This operation is
* observed to always be faster than rep movsb, so the rep movsb instruction
* is not used.
*
* If the copy size is humongous and the source and destination are both
* aligned, this memcpy will use non-temporal operations. This can have
* a substantial speedup for copies where data is absent from L1, but it
* is significantly slower if the source and destination data were already
* in L1. The use of non-temporal operations also has the effect that after
* the copy is complete, the data will be moved out of L1, even if the data was
* present before the copy started.
*
* @author Logan Evans <lpe@fb.com>
*/ */
#if defined(__x86_64__) && defined(__linux__) && !defined(__CYGWIN__) #if defined(__AVX2__)
.file "memcpy.S" // This threshold is half of L1 cache on a Skylake machine, which means that
.text // potentially all of L1 will be populated by this copy once it is executed
// (dst and src are cached for temporal copies).
#define NON_TEMPORAL_STORE_THRESHOLD $32768
/* .file "memcpy.S"
* _memcpy_short is a local helper used when length < 8. It cannot be called .section .text,"ax"
* from outside, because it expects a non-standard calling convention:
* .type __folly_memcpy_short, @function
* %rax: destination buffer address. __folly_memcpy_short:
* %rsi: source buffer address.
* %edx: length, in the range of [0, 7]
*/
.type _memcpy_short, @function
_memcpy_short:
.LSHORT:
.cfi_startproc .cfi_startproc
// if (length == 0) return;
test %edx, %edx .L_GE1_LE7:
jz .LEND cmp $1, %rdx
je .L_EQ1
movzbl (%rsi), %ecx
// if (length - 4 < 0) goto LS4; cmp $4, %rdx
sub $4, %edx jae .L_GE4_LE7
jb .LS4
.L_GE2_LE3:
mov (%rsi), %ecx movw (%rsi), %r8w
mov (%rsi, %rdx), %edi movw -2(%rsi,%rdx), %r9w
mov %ecx, (%rax) movw %r8w, (%rdi)
mov %edi, (%rax, %rdx) movw %r9w, -2(%rdi,%rdx)
.LEND:
rep
ret ret
nop
.align 2
.LS4: .L_EQ1:
// At this point, length can be 1 or 2 or 3, and $cl contains movb (%rsi), %r8b
// the first byte. movb %r8b, (%rdi)
mov %cl, (%rax) ret
// if (length - 4 + 2 < 0) return;
add $2, %edx // Aligning the target of a jump to an even address has a measurable
jnc .LEND // speedup in microbenchmarks.
.align 2
// length is 2 or 3 here. In either case, just copy the last .L_GE4_LE7:
// two bytes. movl (%rsi), %r8d
movzwl (%rsi, %rdx), %ecx movl -4(%rsi,%rdx), %r9d
mov %cx, (%rax, %rdx) movl %r8d, (%rdi)
movl %r9d, -4(%rdi,%rdx)
ret ret
.cfi_endproc .cfi_endproc
.size _memcpy_short, .-_memcpy_short .size __folly_memcpy_short, .-__folly_memcpy_short
// memcpy is an alternative entrypoint into the function named __folly_memcpy.
// The compiler is able to call memcpy since the name is global while
// stacktraces will show __folly_memcpy since that is the name of the function.
// This is intended to aid in debugging by making it obvious which version of
// memcpy is being used.
.align 64
.globl __folly_memcpy
.type __folly_memcpy, @function
/* __folly_memcpy:
* void* memcpy(void* dst, void* src, uint32_t length);
*
*/
.align 16
.globl memcpy
.type memcpy, @function
memcpy:
.cfi_startproc .cfi_startproc
mov %rdx, %rcx mov %rdi, %rax
mov %rdi, %rax
cmp $8, %rdx test %rdx, %rdx
jb .LSHORT je .L_EQ0
mov -8(%rsi, %rdx), %r8 prefetchw (%rdi)
mov (%rsi), %r9 prefetchw -1(%rdi,%rdx)
mov %r8, -8(%rdi, %rdx)
and $24, %rcx cmp $8, %rdx
jz .L32 jb .L_GE1_LE7
mov %r9, (%rdi) .L_GE8:
mov %rcx, %r8 cmp $32, %rdx
sub $16, %rcx ja .L_GE33
jb .LT32
#ifndef __AVX__ .L_GE8_LE32:
movdqu (%rsi, %rcx), %xmm1 cmp $16, %rdx
movdqu %xmm1, (%rdi, %rcx) ja .L_GE17_LE32
#else
vmovdqu (%rsi, %rcx), %xmm1 .L_GE8_LE16:
vmovdqu %xmm1, (%rdi, %rcx) mov (%rsi), %r8
#endif mov -8(%rsi,%rdx), %r9
// Test if there are 32-byte groups mov %r8, (%rdi)
.LT32: mov %r9, -8(%rdi,%rdx)
add %r8, %rsi .L_EQ0:
and $-32, %rdx
jnz .L32_adjDI
ret ret
.align 16 .align 2
.L32_adjDI: .L_GE17_LE32:
add %r8, %rdi movdqu (%rsi), %xmm0
.L32: movdqu -16(%rsi,%rdx), %xmm1
#ifndef __AVX__ movdqu %xmm0, (%rdi)
movdqu (%rsi), %xmm0 movdqu %xmm1, -16(%rdi,%rdx)
movdqu 16(%rsi), %xmm1 ret
#else
vmovdqu (%rsi), %ymm0 .align 2
#endif .L_GE193_LE256:
shr $6, %rdx vmovdqu %ymm3, 96(%rdi)
jnc .L64_32read vmovdqu %ymm4, -128(%rdi,%rdx)
#ifndef __AVX__
movdqu %xmm0, (%rdi) .L_GE129_LE192:
movdqu %xmm1, 16(%rdi) vmovdqu %ymm2, 64(%rdi)
#else vmovdqu %ymm5, -96(%rdi,%rdx)
vmovdqu %ymm0, (%rdi)
#endif .L_GE65_LE128:
lea 32(%rsi), %rsi vmovdqu %ymm1, 32(%rdi)
jnz .L64_adjDI vmovdqu %ymm6, -64(%rdi,%rdx)
#ifdef __AVX__
.L_GE33_LE64:
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm7, -32(%rdi,%rdx)
vzeroupper vzeroupper
#endif
ret ret
.L64_adjDI: .align 2
add $32, %rdi .L_GE33:
vmovdqu (%rsi), %ymm0
vmovdqu -32(%rsi,%rdx), %ymm7
.L64: cmp $64, %rdx
#ifndef __AVX__ jbe .L_GE33_LE64
movdqu (%rsi), %xmm0
movdqu 16(%rsi), %xmm1 prefetchw 64(%rdi)
#else
vmovdqu (%rsi), %ymm0 vmovdqu 32(%rsi), %ymm1
#endif vmovdqu -64(%rsi,%rdx), %ymm6
cmp $128, %rdx
jbe .L_GE65_LE128
prefetchw 128(%rdi)
vmovdqu 64(%rsi), %ymm2
vmovdqu -96(%rsi,%rdx), %ymm5
cmp $192, %rdx
jbe .L_GE129_LE192
prefetchw 192(%rdi)
vmovdqu 96(%rsi), %ymm3
vmovdqu -128(%rsi,%rdx), %ymm4
cmp $256, %rdx
jbe .L_GE193_LE256
.L_GE257:
prefetchw 256(%rdi)
// Check if there is an overlap. If there is an overlap then the caller
// has a bug since this is undefined behavior. However, for legacy
// reasons this behavior is expected by some callers.
//
// All copies through 256 bytes will operate as a memmove since for
// those sizes all reads are performed before any writes.
//
// This check uses the idea that there is an overlap if
// (%rdi < (%rsi + %rdx)) && (%rsi < (%rdi + %rdx)),
// or equivalently, there is no overlap if
// ((%rsi + %rdx) <= %rdi) || ((%rdi + %rdx) <= %rsi).
//
// %r9 will be used after .L_ALIGNED_DST_LOOP to calculate how many
// bytes remain to be copied.
lea (%rsi,%rdx), %r9
cmp %rdi, %r9
jbe .L_NO_OVERLAP
lea (%rdi,%rdx), %r8
cmp %rsi, %r8
// This is a forward jump so that the branch predictor will not predict
// a memmove.
ja .L_MEMMOVE
.align 2
.L_NO_OVERLAP:
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
// Align %rdi to a 32 byte boundary.
// %rcx = 128 - 31 & %rdi
mov $128, %rcx
and $31, %rdi
sub %rdi, %rcx
lea (%rsi,%rcx), %rsi
lea (%rax,%rcx), %rdi
sub %rcx, %rdx
// %r8 is the end condition for the loop.
lea -128(%rsi,%rdx), %r8
cmp NON_TEMPORAL_STORE_THRESHOLD, %rdx
jae .L_NON_TEMPORAL_LOOP
.align 2
.L_ALIGNED_DST_LOOP:
prefetchw 128(%rdi)
prefetchw 192(%rdi)
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
add $128, %rsi
vmovdqa %ymm0, (%rdi)
vmovdqa %ymm1, 32(%rdi)
vmovdqa %ymm2, 64(%rdi)
vmovdqa %ymm3, 96(%rdi)
add $128, %rdi
cmp %r8, %rsi
jb .L_ALIGNED_DST_LOOP
.L_ALIGNED_DST_LOOP_END:
sub %rsi, %r9
mov %r9, %rdx
vmovdqu %ymm4, -128(%rdi,%rdx)
vmovdqu %ymm5, -96(%rdi,%rdx)
vmovdqu %ymm6, -64(%rdi,%rdx)
vmovdqu %ymm7, -32(%rdi,%rdx)
.L64_32read:
#ifndef __AVX__
movdqu 32(%rsi), %xmm2
movdqu 48(%rsi), %xmm3
add $64, %rsi
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 32(%rdi)
movdqu %xmm3, 48(%rdi)
#else
vmovdqu 32(%rsi), %ymm1
add $64, %rsi
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
#endif
add $64, %rdi
dec %rdx
jnz .L64
#ifdef __AVX__
vzeroupper vzeroupper
#endif ret
.align 2
.L_NON_TEMPORAL_LOOP:
testb $31, %sil
jne .L_ALIGNED_DST_LOOP
// This is prefetching the source data unlike ALIGNED_DST_LOOP which
// prefetches the destination data. This choice is again informed by
// benchmarks. With a non-temporal store the entirety of the cache line
// is being written so the previous data can be discarded without being
// fetched.
prefetchnta 128(%rsi)
prefetchnta 196(%rsi)
vmovntdqa (%rsi), %ymm0
vmovntdqa 32(%rsi), %ymm1
vmovntdqa 64(%rsi), %ymm2
vmovntdqa 96(%rsi), %ymm3
add $128, %rsi
vmovntdq %ymm0, (%rdi)
vmovntdq %ymm1, 32(%rdi)
vmovntdq %ymm2, 64(%rdi)
vmovntdq %ymm3, 96(%rdi)
add $128, %rdi
cmp %r8, %rsi
jb .L_NON_TEMPORAL_LOOP
sfence
jmp .L_ALIGNED_DST_LOOP_END
.L_MEMMOVE:
call memmove
ret ret
.cfi_endproc .cfi_endproc
.size memcpy, .-memcpy .size __folly_memcpy, .-__folly_memcpy
#ifdef FOLLY_MEMCPY_IS_MEMCPY
.weak memcpy
memcpy = __folly_memcpy
#endif
.ident "GCC: (GNU) 4.8.2"
#ifdef __linux__
.section .note.GNU-stack,"",@progbits
#endif
#endif #endif
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include <chrono>
#include <random>
#include <folly/Benchmark.h>
#include <folly/FollyMemcpy.h>
#include <folly/portability/Unistd.h>
void bench(
uint32_t iters,
void*(memcpy_func)(void*, const void*, size_t),
size_t min,
size_t max,
size_t align,
bool hot) {
static std::string dst_buffer;
static std::string src_buffer;
static std::vector<size_t> sizes;
static std::vector<size_t> dst_offsets;
static std::vector<size_t> src_offsets;
BENCHMARK_SUSPEND {
size_t src_buffer_size = sysconf(_SC_PAGE_SIZE) *
std::ceil(static_cast<double>(max + 2 * align) /
sysconf(_SC_PAGE_SIZE));
size_t dst_buffer_size;
if (hot) {
dst_buffer_size = src_buffer_size;
} else {
dst_buffer_size = 1024 * 1024 * 1024; // 1 GiB
}
dst_buffer.resize(dst_buffer_size);
memset(dst_buffer.data(), 'd', dst_buffer.size());
src_buffer.resize(src_buffer_size);
memset(src_buffer.data(), 's', src_buffer.size());
std::default_random_engine gen;
sizes.resize(4095);
std::uniform_int_distribution<size_t> size_dist(min, max);
for (size_t i = 0; i < sizes.size(); i++) {
sizes[i] = size_dist(gen);
}
src_offsets.resize(4096);
dst_offsets.resize(4096);
std::uniform_int_distribution<size_t> src_offset_dist(
0, (src_buffer_size - max) / align);
std::uniform_int_distribution<size_t> dst_offset_dist(
0, (dst_buffer_size - max) / align);
for (size_t i = 0; i < src_offsets.size(); i++) {
src_offsets[i] = align * src_offset_dist(gen);
dst_offsets[i] = align * dst_offset_dist(gen);
}
}
size_t size_idx = 0;
size_t offset_idx = 0;
for (unsigned int i = 0; i < iters; i++) {
if (size_idx + 1 == sizes.size()) {
size_idx = 0;
}
if (offset_idx >= src_offsets.size()) {
offset_idx = 0;
}
void* dst = &dst_buffer[dst_offsets[offset_idx]];
const void* src = &src_buffer[src_offsets[offset_idx]];
size_t size = sizes[size_idx];
memcpy_func(dst, src, size);
size_idx++;
offset_idx++;
}
}
#define BENCH_BOTH(MIN, MAX, HOT, HOT_STR) \
BENCHMARK_NAMED_PARAM( \
bench, \
MIN##_to_##MAX##_##HOT_STR##_glibc, \
/*memcpy_func=*/memcpy, \
/*min=*/MIN, \
/*max=*/MAX, \
/*align=*/1, \
/*hot=*/HOT) \
BENCHMARK_RELATIVE_NAMED_PARAM( \
bench, \
MIN##_to_##MAX##_##HOT_STR##_folly, \
/*memcpy_func=*/__folly_memcpy, \
/*min=*/MIN, \
/*max=*/MAX, \
/*align=*/1, \
/*hot=*/HOT)
BENCH_BOTH(0, 7, true, HOT)
BENCH_BOTH(0, 16, true, HOT)
BENCH_BOTH(0, 32, true, HOT)
BENCH_BOTH(0, 64, true, HOT)
BENCH_BOTH(0, 128, true, HOT)
BENCH_BOTH(0, 256, true, HOT)
BENCH_BOTH(0, 512, true, HOT)
BENCH_BOTH(0, 1024, true, HOT)
BENCH_BOTH(0, 32768, true, HOT)
BENCH_BOTH(8, 16, true, HOT)
BENCH_BOTH(16, 32, true, HOT)
BENCH_BOTH(32, 256, true, HOT)
BENCH_BOTH(256, 1024, true, HOT)
BENCH_BOTH(1024, 8192, true, HOT)
BENCHMARK_DRAW_LINE();
BENCH_BOTH(0, 7, false, COLD)
BENCH_BOTH(0, 16, false, COLD)
BENCH_BOTH(0, 32, false, COLD)
BENCH_BOTH(0, 64, false, COLD)
BENCH_BOTH(0, 128, false, COLD)
BENCH_BOTH(0, 256, false, COLD)
BENCH_BOTH(0, 512, false, COLD)
BENCH_BOTH(0, 1024, false, COLD)
BENCH_BOTH(0, 32768, false, COLD)
BENCH_BOTH(8, 16, false, COLD)
BENCH_BOTH(16, 32, false, COLD)
BENCH_BOTH(32, 256, false, COLD)
BENCH_BOTH(256, 1024, false, COLD)
BENCH_BOTH(1024, 8192, false, COLD)
BENCHMARK_DRAW_LINE();
BENCHMARK_NAMED_PARAM(
bench,
64k_to_1024k_unaligned_cold_glibc,
/*memcpy_func=*/memcpy,
/*min=*/65536,
/*max=*/1048576,
/*align=*/1,
/*hot=*/false)
BENCHMARK_RELATIVE_NAMED_PARAM(
bench,
64k_to_1024k_unaligned_cold_folly,
/*memcpy_func=*/__folly_memcpy,
/*min=*/65536,
/*max=*/1048576,
/*align=*/1,
/*hot=*/false)
BENCHMARK_NAMED_PARAM(
bench,
64k_to_1024k_aligned_cold_glibc,
/*memcpy_func=*/memcpy,
/*min=*/65536,
/*max=*/1048576,
/*align=*/32,
/*hot=*/false)
BENCHMARK_RELATIVE_NAMED_PARAM(
bench,
64k_to_1024k_aligned_cold_folly,
/*memcpy_func=*/__folly_memcpy,
/*min=*/65536,
/*max=*/1048576,
/*align=*/32,
/*hot=*/false)
// Benchmark results (Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GHz, Linux x86_64)
// Buck build mode: @mode/opt-lto
// ============================================================================
// folly/test/MemcpyBenchmark.cpp relative time/iter iters/s
// ============================================================================
// bench(0_to_7_HOT_glibc) 9.51ns 105.19M
// bench(0_to_7_HOT_folly) 142.33% 6.68ns 149.72M
// bench(0_to_16_HOT_glibc) 8.98ns 111.30M
// bench(0_to_16_HOT_folly) 153.23% 5.86ns 170.55M
// bench(0_to_32_HOT_glibc) 9.08ns 110.08M
// bench(0_to_32_HOT_folly) 166.79% 5.45ns 183.61M
// bench(0_to_64_HOT_glibc) 8.35ns 119.79M
// bench(0_to_64_HOT_folly) 124.48% 6.71ns 149.11M
// bench(0_to_128_HOT_glibc) 8.20ns 122.00M
// bench(0_to_128_HOT_folly) 121.55% 6.74ns 148.29M
// bench(0_to_256_HOT_glibc) 8.64ns 115.68M
// bench(0_to_256_HOT_folly) 95.85% 9.02ns 110.88M
// bench(0_to_512_HOT_glibc) 13.05ns 76.61M
// bench(0_to_512_HOT_folly) 110.04% 11.86ns 84.31M
// bench(0_to_1024_HOT_glibc) 16.00ns 62.50M
// bench(0_to_1024_HOT_folly) 100.53% 15.91ns 62.83M
// bench(0_to_32768_HOT_glibc) 658.76ns 1.52M
// bench(0_to_32768_HOT_folly) 112.30% 586.62ns 1.70M
// bench(8_to_16_HOT_glibc) 5.18ns 193.08M
// bench(8_to_16_HOT_folly) 162.18% 3.19ns 313.13M
// bench(16_to_32_HOT_glibc) 4.55ns 219.65M
// bench(16_to_32_HOT_folly) 117.18% 3.89ns 257.39M
// bench(32_to_256_HOT_glibc) 8.70ns 114.98M
// bench(32_to_256_HOT_folly) 95.64% 9.09ns 109.97M
// bench(256_to_1024_HOT_glibc) 16.59ns 60.28M
// bench(256_to_1024_HOT_folly) 96.15% 17.25ns 57.96M
// bench(1024_to_8192_HOT_glibc) 111.93ns 8.93M
// bench(1024_to_8192_HOT_folly) 135.92% 82.35ns 12.14M
// ----------------------------------------------------------------------------
// bench(0_to_7_COLD_glibc) 101.72ns 9.83M
// bench(0_to_7_COLD_folly) 242.15% 42.01ns 23.81M
// bench(0_to_16_COLD_glibc) 105.14ns 9.51M
// bench(0_to_16_COLD_folly) 244.61% 42.98ns 23.26M
// bench(0_to_32_COLD_glibc) 108.45ns 9.22M
// bench(0_to_32_COLD_folly) 238.48% 45.48ns 21.99M
// bench(0_to_64_COLD_glibc) 102.38ns 9.77M
// bench(0_to_64_COLD_folly) 192.08% 53.30ns 18.76M
// bench(0_to_128_COLD_glibc) 122.86ns 8.14M
// bench(0_to_128_COLD_folly) 198.17% 62.00ns 16.13M
// bench(0_to_256_COLD_glibc) 125.43ns 7.97M
// bench(0_to_256_COLD_folly) 154.93% 80.96ns 12.35M
// bench(0_to_512_COLD_glibc) 161.50ns 6.19M
// bench(0_to_512_COLD_folly) 149.92% 107.72ns 9.28M
// bench(0_to_1024_COLD_glibc) 229.68ns 4.35M
// bench(0_to_1024_COLD_folly) 141.36% 162.48ns 6.15M
// bench(0_to_32768_COLD_glibc) 2.91us 343.90K
// bench(0_to_32768_COLD_folly) 138.83% 2.09us 477.42K
// bench(8_to_16_COLD_glibc) 115.47ns 8.66M
// bench(8_to_16_COLD_folly) 242.11% 47.69ns 20.97M
// bench(16_to_32_COLD_glibc) 103.71ns 9.64M
// bench(16_to_32_COLD_folly) 207.16% 50.06ns 19.98M
// bench(32_to_256_COLD_glibc) 141.85ns 7.05M
// bench(32_to_256_COLD_folly) 179.79% 78.90ns 12.67M
// bench(256_to_1024_COLD_glibc) 236.81ns 4.22M
// bench(256_to_1024_COLD_folly) 110.72% 213.88ns 4.68M
// bench(1024_to_8192_COLD_glibc) 911.56ns 1.10M
// bench(1024_to_8192_COLD_folly) 120.27% 757.90ns 1.32M
// ----------------------------------------------------------------------------
// bench(64k_to_1024k_unaligned_cold_glibc) 70.17us 14.25K
// bench(64k_to_1024k_unaligned_cold_folly) 129.15% 54.34us 18.40K
// bench(64k_to_1024k_aligned_cold_glibc) 69.28us 14.43K
// bench(64k_to_1024k_aligned_cold_folly) 246.52% 28.10us 35.58K
// ============================================================================
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
folly::runBenchmarks();
return 0;
}
...@@ -14,65 +14,156 @@ ...@@ -14,65 +14,156 @@
* limitations under the License. * limitations under the License.
*/ */
#include <array>
#include <folly/FollyMemcpy.h>
#include <folly/Portability.h> #include <folly/Portability.h>
#include <folly/portability/GTest.h> #include <folly/portability/GTest.h>
namespace { namespace {
constexpr size_t kSize = 4096 * 4; constexpr size_t kSize = 4096 * 4;
char src[kSize]; constexpr size_t kBufSize = 4096 * 50;
char dst[kSize]; std::array<char, kBufSize> src;
std::array<char, kBufSize> dst;
char expected_src(size_t offset) {
return static_cast<char>(offset % 128);
}
char expected_dst(size_t offset) {
// Don't use any common characters with src.
return static_cast<char>((offset % 128) + 128);
}
void init() { void init(size_t size) {
for (size_t i = 0; i < kSize; ++i) { for (size_t i = 0; i < size; ++i) {
src[i] = static_cast<char>(i); src[i] = expected_src(i);
dst[i] = static_cast<char>(255 - i); dst[i] = expected_dst(i);
} }
} }
} // namespace } // namespace
TEST(memcpy, zero_len) TEST(folly_memcpy, zero_len)
FOLLY_DISABLE_UNDEFINED_BEHAVIOR_SANITIZER("nonnull-attribute") { FOLLY_DISABLE_UNDEFINED_BEHAVIOR_SANITIZER("nonnull-attribute") {
// If length is 0, we shouldn't touch any memory. So this should // If length is 0, we shouldn't touch any memory. So this should
// not crash. // not crash.
char* srcNull = nullptr; char* srcNull = nullptr;
char* dstNull = nullptr; char* dstNull = nullptr;
memcpy(dstNull, srcNull, 0); __folly_memcpy(dstNull, srcNull, 0);
} }
// Test copy `len' bytes and verify that exactly `len' bytes are copied. // Test copy `len' bytes and verify that exactly `len' bytes are copied.
void testLen(size_t len) { void testLen(size_t len, size_t dst_offset = 0, size_t src_offset = 0) {
if (len > kSize) { if (len + std::max(dst_offset, src_offset) + 1 > kBufSize) {
return; return;
} }
init(); init(len + std::max(dst_offset, src_offset) + 1);
memcpy(dst, src, len); void* ret =
__folly_memcpy(dst.data() + dst_offset, src.data() + src_offset, len);
ASSERT_EQ(ret, dst.data() + dst_offset);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
EXPECT_EQ(src[i], static_cast<char>(i)); ASSERT_EQ(src[i + src_offset], expected_src(i + src_offset))
EXPECT_EQ(src[i], dst[i]); << "__folly_memcpy(dst+" << dst_offset << ", src+" << src_offset << ", "
<< len << "), i = " << i;
ASSERT_EQ(src[i + src_offset], dst[i + dst_offset])
<< "__folly_memcpy(dst+" << dst_offset << ", src+" << src_offset << ", "
<< len << "), i = " << i;
} }
if (len < kSize) { if (len + dst_offset < kBufSize) {
EXPECT_EQ(src[len], static_cast<char>(len)); ASSERT_EQ(dst[len + dst_offset], expected_dst(len + dst_offset))
EXPECT_EQ(dst[len], static_cast<char>(255 - len)); << "__folly_memcpy(dst+" << dst_offset << ", src+" << src_offset << ", "
<< len << "), overwrote";
}
if (src_offset > 0) {
ASSERT_EQ(src[src_offset - 1], expected_src(src_offset - 1));
}
if (dst_offset > 0) {
ASSERT_EQ(dst[dst_offset - 1], expected_dst(dst_offset - 1));
} }
} }
TEST(memcpy, small) { TEST(folly_memcpy, small) {
for (size_t len = 1; len < 8; ++len) { for (size_t len = 1; len < 8; ++len) {
testLen(len); testLen(len);
} }
} }
TEST(memcpy, main) { TEST(folly_memcpy, offset) {
for (size_t len = 8; len < 128; ++len) { for (size_t dst_offset = 0; dst_offset < 32; dst_offset += 7) {
for (size_t src_offset = 0; src_offset < 32; src_offset += 7) {
for (size_t len = 8; len < 1000; len += 7) {
testLen(len, dst_offset, src_offset);
}
}
}
}
TEST(folly_memcpy, offset_huge) {
size_t kHuge = 49 * 4096;
testLen(kHuge, 0, 0);
testLen(kHuge, 16, 16);
testLen(kHuge, 32, 32);
testLen(kHuge, 7, 31);
testLen(kHuge, 31, 2);
testLen(kHuge, 0, 31);
testLen(kHuge, 7, 0);
}
TEST(folly_memcpy, overlap) {
static constexpr ssize_t kStartIndex = 1000;
std::array<char, 2000> copy_buf;
std::array<char, 2000> check_buf;
for (ssize_t copy_size = 0; copy_size < 300; copy_size++) {
for (ssize_t overlap_offset = -copy_size - 1;
overlap_offset <= copy_size + 1;
overlap_offset++) {
for (size_t i = 0; i < check_buf.size(); i++) {
copy_buf[i] = static_cast<char>(i % 128);
}
memset(check_buf.data(), static_cast<char>(-1), check_buf.size());
memmove(check_buf.data(), copy_buf.data() + kStartIndex, copy_size);
// Call __folly_memcpy directly so that asan doesn't complain about the
// overlapping memcpy.
__folly_memcpy(
copy_buf.data() + kStartIndex + overlap_offset,
copy_buf.data() + kStartIndex,
copy_size);
for (ssize_t i = 1000 + overlap_offset - 1;
i < 100 + overlap_offset + copy_size + 1;
i++) {
printf(
"i: %zd, val: %c\n",
i,
*(copy_buf.data() + kStartIndex + overlap_offset + i));
}
for (ssize_t i = 0; i < copy_size; i++) {
ASSERT_EQ(
*(copy_buf.data() + kStartIndex + overlap_offset + i),
*(check_buf.data() + i))
<< "Error after __folly_memcpy(src + " << kStartIndex << " + "
<< overlap_offset << ", src + " << kStartIndex << ", " << copy_size
<< ") at index i = " << i;
}
}
}
}
TEST(folly_memcpy, main) {
for (size_t len = 8; len <= 128; ++len) {
testLen(len); testLen(len);
} }
for (size_t len = 128; len < kSize; len += 128) { for (size_t len = 128; len <= kSize; len += 128) {
testLen(len); testLen(len);
} }
for (size_t len = 128; len < kSize; len += 73) { for (size_t len = 128; len <= kSize; len += 73) {
testLen(len); testLen(len);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment