150
|
1
|
|
2 #include "benchmark/benchmark.h"
|
|
3
|
|
4 #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
|
|
5
|
|
6 void BM_empty(benchmark::State& state) {
|
|
7 for (auto _ : state) {
|
|
8 benchmark::DoNotOptimize(state.iterations());
|
|
9 }
|
|
10 }
|
|
11 BENCHMARK(BM_empty);
|
|
12 BENCHMARK(BM_empty)->ThreadPerCpu();
|
|
13
|
|
14 void BM_spin_empty(benchmark::State& state) {
|
|
15 for (auto _ : state) {
|
|
16 for (int x = 0; x < state.range(0); ++x) {
|
|
17 benchmark::DoNotOptimize(x);
|
|
18 }
|
|
19 }
|
|
20 }
|
|
21 BASIC_BENCHMARK_TEST(BM_spin_empty);
|
|
22 BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
|
|
23
|
|
24 void BM_spin_pause_before(benchmark::State& state) {
|
|
25 for (int i = 0; i < state.range(0); ++i) {
|
|
26 benchmark::DoNotOptimize(i);
|
|
27 }
|
|
28 for (auto _ : state) {
|
|
29 for (int i = 0; i < state.range(0); ++i) {
|
|
30 benchmark::DoNotOptimize(i);
|
|
31 }
|
|
32 }
|
|
33 }
|
|
34 BASIC_BENCHMARK_TEST(BM_spin_pause_before);
|
|
35 BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
|
|
36
|
|
37 void BM_spin_pause_during(benchmark::State& state) {
|
|
38 for (auto _ : state) {
|
|
39 state.PauseTiming();
|
|
40 for (int i = 0; i < state.range(0); ++i) {
|
|
41 benchmark::DoNotOptimize(i);
|
|
42 }
|
|
43 state.ResumeTiming();
|
|
44 for (int i = 0; i < state.range(0); ++i) {
|
|
45 benchmark::DoNotOptimize(i);
|
|
46 }
|
|
47 }
|
|
48 }
|
|
49 BASIC_BENCHMARK_TEST(BM_spin_pause_during);
|
|
50 BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
|
|
51
|
|
52 void BM_pause_during(benchmark::State& state) {
|
|
53 for (auto _ : state) {
|
|
54 state.PauseTiming();
|
|
55 state.ResumeTiming();
|
|
56 }
|
|
57 }
|
|
58 BENCHMARK(BM_pause_during);
|
|
59 BENCHMARK(BM_pause_during)->ThreadPerCpu();
|
|
60 BENCHMARK(BM_pause_during)->UseRealTime();
|
|
61 BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
|
|
62
|
|
63 void BM_spin_pause_after(benchmark::State& state) {
|
|
64 for (auto _ : state) {
|
|
65 for (int i = 0; i < state.range(0); ++i) {
|
|
66 benchmark::DoNotOptimize(i);
|
|
67 }
|
|
68 }
|
|
69 for (int i = 0; i < state.range(0); ++i) {
|
|
70 benchmark::DoNotOptimize(i);
|
|
71 }
|
|
72 }
|
|
73 BASIC_BENCHMARK_TEST(BM_spin_pause_after);
|
|
74 BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
|
|
75
|
|
76 void BM_spin_pause_before_and_after(benchmark::State& state) {
|
|
77 for (int i = 0; i < state.range(0); ++i) {
|
|
78 benchmark::DoNotOptimize(i);
|
|
79 }
|
|
80 for (auto _ : state) {
|
|
81 for (int i = 0; i < state.range(0); ++i) {
|
|
82 benchmark::DoNotOptimize(i);
|
|
83 }
|
|
84 }
|
|
85 for (int i = 0; i < state.range(0); ++i) {
|
|
86 benchmark::DoNotOptimize(i);
|
|
87 }
|
|
88 }
|
|
89 BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
|
|
90 BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
|
|
91
|
|
92 void BM_empty_stop_start(benchmark::State& state) {
|
|
93 for (auto _ : state) {
|
|
94 }
|
|
95 }
|
|
96 BENCHMARK(BM_empty_stop_start);
|
|
97 BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
|
|
98
|
|
99
|
|
100 void BM_KeepRunning(benchmark::State& state) {
|
223
|
101 benchmark::IterationCount iter_count = 0;
|
150
|
102 assert(iter_count == state.iterations());
|
|
103 while (state.KeepRunning()) {
|
|
104 ++iter_count;
|
|
105 }
|
|
106 assert(iter_count == state.iterations());
|
|
107 }
|
|
108 BENCHMARK(BM_KeepRunning);
|
|
109
|
|
110 void BM_KeepRunningBatch(benchmark::State& state) {
|
223
|
111 // Choose a batch size >1000 to skip the typical runs with iteration
|
|
112 // targets of 10, 100 and 1000. If these are not actually skipped the
|
|
113 // bug would be detectable as consecutive runs with the same iteration
|
|
114 // count. Below we assert that this does not happen.
|
|
115 const benchmark::IterationCount batch_size = 1009;
|
|
116
|
|
117 static benchmark::IterationCount prior_iter_count = 0;
|
|
118 benchmark::IterationCount iter_count = 0;
|
150
|
119 while (state.KeepRunningBatch(batch_size)) {
|
|
120 iter_count += batch_size;
|
|
121 }
|
|
122 assert(state.iterations() == iter_count);
|
223
|
123
|
|
124 // Verify that the iteration count always increases across runs (see
|
|
125 // comment above).
|
|
126 assert(iter_count == batch_size // max_iterations == 1
|
|
127 || iter_count > prior_iter_count); // max_iterations > batch_size
|
|
128 prior_iter_count = iter_count;
|
150
|
129 }
|
223
|
130 // Register with a fixed repetition count to establish the invariant that
|
|
131 // the iteration count should always change across runs. This overrides
|
|
132 // the --benchmark_repetitions command line flag, which would otherwise
|
|
133 // cause this test to fail if set > 1.
|
|
134 BENCHMARK(BM_KeepRunningBatch)->Repetitions(1);
|
150
|
135
|
|
136 void BM_RangedFor(benchmark::State& state) {
|
223
|
137 benchmark::IterationCount iter_count = 0;
|
150
|
138 for (auto _ : state) {
|
|
139 ++iter_count;
|
|
140 }
|
|
141 assert(iter_count == state.max_iterations);
|
|
142 }
|
|
143 BENCHMARK(BM_RangedFor);
|
|
144
|
|
145 // Ensure that StateIterator provides all the necessary typedefs required to
|
|
146 // instantiate std::iterator_traits.
|
|
147 static_assert(std::is_same<
|
|
148 typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
|
|
149 typename benchmark::State::StateIterator::value_type>::value, "");
|
|
150
|
|
151 BENCHMARK_MAIN();
|