import (
"flag"
"fmt"
- "internal/race"
+ "internal/sysinfo"
"io"
"math"
"os"
func initBenchmarkFlags() {
matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
- flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
+ flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d` or N times if `d` is of the form Nx")
}
var (
matchBenchmarks *string
benchmarkMemory *bool
- benchTime = benchTimeFlag{d: 1 * time.Second} // changed during test of testing package
+ benchTime = durationOrCountFlag{d: 1 * time.Second} // changed during test of testing package
)
-type benchTimeFlag struct {
- d time.Duration
- n int
+type durationOrCountFlag struct {
+ d time.Duration
+ n int
+ allowZero bool
}
-func (f *benchTimeFlag) String() string {
+func (f *durationOrCountFlag) String() string {
if f.n > 0 {
return fmt.Sprintf("%dx", f.n)
}
- return time.Duration(f.d).String()
+ return f.d.String()
}
-func (f *benchTimeFlag) Set(s string) error {
+func (f *durationOrCountFlag) Set(s string) error {
if strings.HasSuffix(s, "x") {
n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
- if err != nil || n <= 0 {
+ if err != nil || n < 0 || (!f.allowZero && n == 0) {
return fmt.Errorf("invalid count")
}
- *f = benchTimeFlag{n: int(n)}
+ *f = durationOrCountFlag{n: int(n)}
return nil
}
d, err := time.ParseDuration(s)
- if err != nil || d <= 0 {
+ if err != nil || d < 0 || (!f.allowZero && d == 0) {
return fmt.Errorf("invalid duration")
}
- *f = benchTimeFlag{d: d}
+ *f = durationOrCountFlag{d: d}
return nil
}
F func(b *B)
}
-// B is a type passed to Benchmark functions to manage benchmark
+// B is a type passed to [Benchmark] functions to manage benchmark
// timing and to specify the number of iterations to run.
//
// A benchmark ends when its Benchmark function returns or calls any of the methods
// may be called simultaneously from multiple goroutines.
//
// Like in tests, benchmark logs are accumulated during execution
-// and dumped to standard error when done. Unlike in tests, benchmark logs
+// and dumped to standard output when done. Unlike in tests, benchmark logs
// are always printed, so as not to hide output whose existence may be
// affecting benchmark results.
type B struct {
previousN int // number of iterations in the previous run
previousDuration time.Duration // total duration of the previous run
benchFunc func(b *B)
- benchTime benchTimeFlag
+ benchTime durationOrCountFlag
bytes int64
missingBytes bool // one of the subbenchmarks does not have bytes set.
timerOn bool
// StartTimer starts timing a test. This function is called automatically
// before a benchmark starts, but it can also be used to resume timing after
-// a call to StopTimer.
+// a call to [B.StopTimer].
func (b *B) StartTimer() {
if !b.timerOn {
runtime.ReadMemStats(&memStats)
// Pre-size it to make more allocation unlikely.
b.extra = make(map[string]float64, 16)
} else {
- for k := range b.extra {
- delete(b.extra, k)
- }
+ clear(b.extra)
}
if b.timerOn {
runtime.ReadMemStats(&memStats)
func (b *B) runN(n int) {
benchmarkLock.Lock()
defer benchmarkLock.Unlock()
+ defer func() {
+ b.runCleanup(normalPanic)
+ b.checkRaces()
+ }()
// Try to get a comparable environment for each run
// by clearing garbage from previous runs.
runtime.GC()
- b.raceErrors = -race.Errors()
+ b.resetRaces()
b.N = n
b.parallelism = 1
b.ResetTimer()
b.StopTimer()
b.previousN = n
b.previousDuration = b.duration
- b.raceErrors += race.Errors()
- if b.raceErrors > 0 {
- b.Errorf("race detected during execution of benchmark")
- }
-}
-
-func min(x, y int64) int64 {
- if x > y {
- return y
- }
- return x
-}
-
-func max(x, y int64) int64 {
- if x < y {
- return y
- }
- return x
}
// run1 runs the first iteration of benchFunc. It reports whether more
}()
<-b.signal
if b.failed {
- fmt.Fprintf(b.w, "--- FAIL: %s\n%s", b.name, b.output)
+ fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), b.name, b.output)
return false
}
// Only print the output if we know we are not going to proceed.
// Otherwise it is printed in processBench.
- if atomic.LoadInt32(&b.hasSub) != 0 || b.finished {
+ b.mu.RLock()
+ finished := b.finished
+ b.mu.RUnlock()
+ if b.hasSub.Load() || finished {
tag := "BENCH"
if b.skipped {
tag = "SKIP"
}
- if b.chatty && (len(b.output) > 0 || b.finished) {
+ if b.chatty != nil && (len(b.output) > 0 || finished) {
b.trimOutput()
- fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output)
+ fmt.Fprintf(b.w, "%s--- %s: %s\n%s", b.chatty.prefix(), tag, b.name, b.output)
}
return false
}
if b.importPath != "" {
fmt.Fprintf(b.w, "pkg: %s\n", b.importPath)
}
+ if cpu := sysinfo.CPUName(); cpu != "" {
+ fmt.Fprintf(b.w, "cpu: %s\n", cpu)
+ }
})
if b.context != nil {
// Running go test --test.bench
// Run the benchmark for at least the specified amount of time.
if b.benchTime.n > 0 {
- b.runN(b.benchTime.n)
+ // We already ran a single iteration in run1.
+ // If -benchtime=1x was requested, use that result.
+ // See https://golang.org/issue/32051.
+ if b.benchTime.n > 1 {
+ b.runN(b.benchTime.n)
+ }
} else {
d := b.benchTime.d
for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
}
+// Elapsed returns the measured elapsed time of the benchmark.
+// The duration reported by Elapsed matches the one measured by
+// [B.StartTimer], [B.StopTimer], and [B.ResetTimer].
+func (b *B) Elapsed() time.Duration {
+ d := b.duration
+ if b.timerOn {
+ d += time.Since(b.start)
+ }
+ return d
+}
+
// ReportMetric adds "n unit" to the reported benchmark results.
// If the metric is per-iteration, the caller should divide by b.N,
// and by convention units should end in "/op".
// benchmark name.
// Extra metrics override built-in metrics of the same name.
// String does not include allocs/op or B/op, since those are reported
-// by MemString.
+// by [BenchmarkResult.MemString].
func (r BenchmarkResult) String() string {
buf := new(strings.Builder)
fmt.Fprintf(buf, "%8d", r.N)
func prettyPrint(w io.Writer, x float64, unit string) {
// Print all numbers with 10 places before the decimal point
- // and small numbers with three sig figs.
+ // and small numbers with four sig figs. Field widths are
+ // chosen to fit the whole part in 10 places while aligning
+ // the decimal point of all fractional formats.
var format string
switch y := math.Abs(x); {
- case y == 0 || y >= 99.95:
+ case y == 0 || y >= 999.95:
format = "%10.0f %s"
- case y >= 9.995:
+ case y >= 99.995:
format = "%12.1f %s"
- case y >= 0.9995:
+ case y >= 9.9995:
format = "%13.2f %s"
- case y >= 0.09995:
+ case y >= 0.99995:
format = "%14.3f %s"
- case y >= 0.009995:
+ case y >= 0.099995:
format = "%15.4f %s"
- case y >= 0.0009995:
+ case y >= 0.0099995:
format = "%16.5f %s"
- default:
+ case y >= 0.00099995:
format = "%17.6f %s"
+ default:
+ format = "%18.7f %s"
}
fmt.Fprintf(w, format, x, unit)
}
}
}
ctx := &benchContext{
- match: newMatcher(matchString, *matchBenchmarks, "-test.bench"),
+ match: newMatcher(matchString, *matchBenchmarks, "-test.bench", *skip),
extLen: len(benchmarkName("", maxprocs)),
}
var bs []InternalBenchmark
}
main := &B{
common: common{
- name: "Main",
- w: os.Stdout,
- chatty: *chatty,
+ name: "Main",
+ w: os.Stdout,
+ bench: true,
},
importPath: importPath,
benchFunc: func(b *B) {
benchTime: benchTime,
context: ctx,
}
+ if Verbose() {
+ main.chatty = newChattyPrinter(main.w)
+ }
main.runN(1)
return !main.failed
}
benchName := benchmarkName(b.name, procs)
// If it's chatty, we've already printed this information.
- if !b.chatty {
+ if b.chatty == nil {
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
}
// Recompute the running time for all but the first iteration.
name: b.name,
w: b.w,
chatty: b.chatty,
+ bench: true,
},
benchFunc: b.benchFunc,
benchTime: b.benchTime,
// The output could be very long here, but probably isn't.
// We print it all, regardless, because we don't want to trim the reason
// the benchmark failed.
- fmt.Fprintf(b.w, "--- FAIL: %s\n%s", benchName, b.output)
+ fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), benchName, b.output)
continue
}
results := r.String()
- if b.chatty {
+ if b.chatty != nil {
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
}
if *benchmarkMemory || b.showAllocResult {
// benchmarks since the output generation time will skew the results.
if len(b.output) > 0 {
b.trimOutput()
- fmt.Fprintf(b.w, "--- BENCH: %s\n%s", benchName, b.output)
+ fmt.Fprintf(b.w, "%s--- BENCH: %s\n%s", b.chatty.prefix(), benchName, b.output)
}
if p := runtime.GOMAXPROCS(-1); p != procs {
fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
}
+ if b.chatty != nil && b.chatty.json {
+ b.chatty.Updatef("", "=== NAME %s\n", "")
+ }
}
}
}
+// If hideStdoutForTesting is true, Run does not print the benchName.
+// This avoids a spurious print during 'go test' on package testing itself,
+// which invokes b.Run in its own tests (see sub_test.go).
+var hideStdoutForTesting = false
+
// Run benchmarks f as a subbenchmark with the given name. It reports
// whether there were any failures.
//
func (b *B) Run(name string, f func(b *B)) bool {
// Since b has subbenchmarks, we will no longer run it as a benchmark itself.
// Release the lock and acquire it on exit to ensure locks stay paired.
- atomic.StoreInt32(&b.hasSub, 1)
+ b.hasSub.Store(true)
benchmarkLock.Unlock()
defer benchmarkLock.Lock()
creator: pc[:n],
w: b.w,
chatty: b.chatty,
+ bench: true,
},
importPath: b.importPath,
benchFunc: f,
if partial {
// Partial name match, like -bench=X/Y matching BenchmarkX.
// Only process sub-benchmarks, if any.
- atomic.StoreInt32(&sub.hasSub, 1)
+ sub.hasSub.Store(true)
}
- if b.chatty {
+ if b.chatty != nil {
labelsOnce.Do(func() {
fmt.Printf("goos: %s\n", runtime.GOOS)
fmt.Printf("goarch: %s\n", runtime.GOARCH)
if b.importPath != "" {
fmt.Printf("pkg: %s\n", b.importPath)
}
+ if cpu := sysinfo.CPUName(); cpu != "" {
+ fmt.Printf("cpu: %s\n", cpu)
+ }
})
- fmt.Println(benchName)
+ if !hideStdoutForTesting {
+ if b.chatty.json {
+ b.chatty.Updatef(benchName, "=== RUN %s\n", benchName)
+ }
+ fmt.Println(benchName)
+ }
}
if sub.run1() {
// RunParallel runs a benchmark in parallel.
// It creates multiple goroutines and distributes b.N iterations among them.
// The number of goroutines defaults to GOMAXPROCS. To increase parallelism for
-// non-CPU-bound benchmarks, call SetParallelism before RunParallel.
+// non-CPU-bound benchmarks, call [B.SetParallelism] before RunParallel.
// RunParallel is usually used with the go test -cpu flag.
//
// The body function will be run in each goroutine. It should set up any
// goroutine-local state and then iterate until pb.Next returns false.
-// It should not use the StartTimer, StopTimer, or ResetTimer functions,
-// because they have global effect. It should also not call Run.
+// It should not use the [B.StartTimer], [B.StopTimer], or [B.ResetTimer] functions,
+// because they have global effect. It should also not call [B.Run].
+//
+// RunParallel reports ns/op values as wall time for the benchmark as a whole,
+// not the sum of wall time or CPU time over each parallel goroutine.
func (b *B) RunParallel(body func(*PB)) {
if b.N == 0 {
return // Nothing to do when probing.
}
}
-// SetParallelism sets the number of goroutines used by RunParallel to p*GOMAXPROCS.
+// SetParallelism sets the number of goroutines used by [B.RunParallel] to p*GOMAXPROCS.
// There is usually no need to call SetParallelism for CPU-bound benchmarks.
// If p is less than 1, this call will have no effect.
func (b *B) SetParallelism(p int) {
// Benchmark benchmarks a single function. It is useful for creating
// custom benchmarks that do not use the "go test" command.
//
-// If f depends on testing flags, then Init must be used to register
-// those flags before calling Benchmark and before calling flag.Parse.
+// If f depends on testing flags, then [Init] must be used to register
+// those flags before calling Benchmark and before calling [flag.Parse].
//
// If f calls Run, the result will be an estimate of running all its
// subbenchmarks that don't call Run in sequence in a single benchmark.