aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/testing/benchmark.go
blob: 4647cfe9364aaef1130927312372dbebac0bdb30 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// This file has been modified for use by the TinyGo compiler.

package testing

import (
	"time"
)

var (
	benchTime = benchTimeFlag{d: 1 * time.Second} // changed during test of testing package
)

type benchTimeFlag struct {
	d time.Duration
}

// InternalBenchmark is an internal type but exported because it is cross-package;
// it is part of the implementation of the "go test" command.
type InternalBenchmark struct {
	Name string
	F    func(b *B)
}

// B is a type passed to Benchmark functions to manage benchmark
// timing and to specify the number of iterations to run.
//
// A benchmark ends when its Benchmark function returns or calls any of the methods
// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called
// only from the goroutine running the Benchmark function.
// The other reporting methods, such as the variations of Log and Error,
// may be called simultaneously from multiple goroutines.
//
// Like in tests, benchmark logs are accumulated during execution
// and dumped to standard output when done. Unlike in tests, benchmark logs
// are always printed, so as not to hide output whose existence may be
// affecting benchmark results.
type B struct {
	common
	hasSub    bool          // TODO: should be in common, and atomic
	start     time.Time     // TODO: should be in common
	duration  time.Duration // TODO: should be in common
	N         int
	benchFunc func(b *B)
	benchTime benchTimeFlag
	timerOn   bool
	result    BenchmarkResult
}

// StartTimer starts timing a test. This function is called automatically
// before a benchmark starts, but it can also be used to resume timing after
// a call to StopTimer.
func (b *B) StartTimer() {
	if !b.timerOn {
		b.start = time.Now()
		b.timerOn = true
	}
}

// StopTimer stops timing a test. This can be used to pause the timer
// while performing complex initialization that you don't
// want to measure.
func (b *B) StopTimer() {
	if b.timerOn {
		b.duration += time.Since(b.start)
		b.timerOn = false
	}
}

// ResetTimer zeroes the elapsed benchmark time and memory allocation counters
// and deletes user-reported metrics.
func (b *B) ResetTimer() {
	if b.timerOn {
		b.start = time.Now()
	}
	b.duration = 0
}

// SetBytes records the number of bytes processed in a single operation.
// If this is called, the benchmark will report ns/op and MB/s.
func (b *B) SetBytes(n int64) {
	panic("testing: unimplemented: B.SetBytes")
}

// ReportAllocs enables malloc statistics for this benchmark.
// It is equivalent to setting -test.benchmem, but it only affects the
// benchmark function that calls ReportAllocs.
func (b *B) ReportAllocs() {
	panic("testing: unimplemented: B.ReportAllocs")
}

// runN runs a single benchmark for the specified number of iterations.
func (b *B) runN(n int) {
	b.N = n
	b.ResetTimer()
	b.StartTimer()
	b.benchFunc(b)
	b.StopTimer()
}

func min(x, y int64) int64 {
	if x > y {
		return y
	}
	return x
}

func max(x, y int64) int64 {
	if x < y {
		return y
	}
	return x
}

// run1 runs the first iteration of benchFunc. It reports whether more
// iterations of this benchmarks should be run.
func (b *B) run1() bool {
	b.runN(1)
	return !b.hasSub
}

// run executes the benchmark.
func (b *B) run() {
	b.launch()
}

// launch launches the benchmark function. It gradually increases the number
// of benchmark iterations until the benchmark runs for the requested benchtime.
// run1 must have been called on b.
func (b *B) launch() {
	d := b.benchTime.d
	for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
		last := n
		// Predict required iterations.
		goalns := d.Nanoseconds()
		prevIters := int64(b.N)
		prevns := b.duration.Nanoseconds()
		if prevns <= 0 {
			// Round up, to avoid div by zero.
			prevns = 1
		}
		// Order of operations matters.
		// For very fast benchmarks, prevIters ~= prevns.
		// If you divide first, you get 0 or 1,
		// which can hide an order of magnitude in execution time.
		// So multiply first, then divide.
		n = goalns * prevIters / prevns
		// Run more iterations than we think we'll need (1.2x).
		n += n / 5
		// Don't grow too fast in case we had timing errors previously.
		n = min(n, 100*last)
		// Be sure to run at least one more than last time.
		n = max(n, last+1)
		// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
		n = min(n, 1e9)
		b.runN(int(n))
	}
	b.result = BenchmarkResult{b.N, b.duration}
}

// BenchmarkResult contains the results of a benchmark run.
type BenchmarkResult struct {
	N int           // The number of iterations.
	T time.Duration // The total time taken.
}

// NsPerOp returns the "ns/op" metric.
func (r BenchmarkResult) NsPerOp() int64 {
	if r.N <= 0 {
		return 0
	}
	return r.T.Nanoseconds() / int64(r.N)
}

// AllocsPerOp returns the "allocs/op" metric,
// which is calculated as r.MemAllocs / r.N.
func (r BenchmarkResult) AllocsPerOp() int64 {
	return 0 // Dummy version to allow running e.g. golang.org/test/fibo.go
}

// AllocedBytesPerOp returns the "B/op" metric,
// which is calculated as r.MemBytes / r.N.
func (r BenchmarkResult) AllocedBytesPerOp() int64 {
	return 0 // Dummy version to allow running e.g. golang.org/test/fibo.go
}

// Run benchmarks f as a subbenchmark with the given name. It reports
// true if the subbenchmark succeeded.
//
// A subbenchmark is like any other benchmark. A benchmark that calls Run at
// least once will not be measured itself and will be called once with N=1.
func (b *B) Run(name string, f func(b *B)) bool {
	b.hasSub = true
	sub := &B{
		common:    common{name: name},
		benchFunc: f,
		benchTime: b.benchTime,
	}
	if sub.run1() {
		sub.run()
	}
	b.add(sub.result)
	return !sub.failed
}

// add simulates running benchmarks in sequence in a single iteration. It is
// used to give some meaningful results in case func Benchmark is used in
// combination with Run.
func (b *B) add(other BenchmarkResult) {
	r := &b.result
	// The aggregated BenchmarkResults resemble running all subbenchmarks as
	// in sequence in a single benchmark.
	r.N = 1
	r.T += time.Duration(other.NsPerOp())
}

// A PB is used by RunParallel for running parallel benchmarks.
type PB struct {
}

// Next reports whether there are more iterations to execute.
func (pb *PB) Next() bool {
	return false
}

// RunParallel runs a benchmark in parallel.
//
// Not implemented
func (b *B) RunParallel(body func(*PB)) {
	return
}

// Benchmark benchmarks a single function. It is useful for creating
// custom benchmarks that do not use the "go test" command.
//
// If f calls Run, the result will be an estimate of running all its
// subbenchmarks that don't call Run in sequence in a single benchmark.
func Benchmark(f func(b *B)) BenchmarkResult {
	b := &B{
		benchFunc: f,
		benchTime: benchTime,
	}
	if b.run1() {
		b.run()
	}
	return b.result
}