1 // Copyright 2020 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // Metrics implementation exported to runtime/metrics.
15 // metrics is a map of runtime/metrics keys to data used by the runtime
16 // to sample each metric's value. metricsInit indicates it has been
19 // These fields are protected by metricsSema which should be
20 // locked/unlocked with metricsLock() / metricsUnlock().
21 metricsSema uint32 = 1
23 metrics map[string]metricData
25 sizeClassBuckets []float64
26 timeHistBuckets []float64
29 type metricData struct {
30 // deps is the set of runtime statistics that this metric
31 // depends on. Before compute is called, the statAggregate
32 // which will be passed must ensure() these dependencies.
35 // compute is a function that populates a metricValue
36 // given a populated statAggregate structure.
37 compute func(in *statAggregate, out *metricValue)
41 // Acquire the metricsSema but with handoff. Operations are typically
42 // expensive enough that queueing up goroutines and handing off between
43 // them will be noticeably better-behaved.
44 semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
46 raceacquire(unsafe.Pointer(&metricsSema))
50 func metricsUnlock() {
52 racerelease(unsafe.Pointer(&metricsSema))
54 semrelease(&metricsSema)
57 // initMetrics initializes the metrics map if it hasn't been yet.
59 // metricsSema must be held.
65 sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
66 // Skip size class 0 which is a stand-in for large objects, but large
67 // objects are tracked separately (and they actually get placed in
68 // the last bucket, not the first).
69 sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
70 for i := 1; i < _NumSizeClasses; i++ {
71 // Size classes have an inclusive upper-bound
72 // and exclusive lower bound (e.g. 48-byte size class is
73 // (32, 48]) whereas we want and inclusive lower-bound
74 // and exclusive upper-bound (e.g. 48-byte size class is
75 // [33, 49). We can achieve this by shifting all bucket
76 // boundaries up by 1.
78 // Also, a float64 can precisely represent integers with
79 // value up to 2^53 and size classes are relatively small
80 // (nowhere near 2^48 even) so this will give us exact
82 sizeClassBuckets[i] = float64(class_to_size[i] + 1)
84 sizeClassBuckets = append(sizeClassBuckets, float64Inf())
86 timeHistBuckets = timeHistogramMetricsBuckets()
87 metrics = map[string]metricData{
88 "/cgo/go-to-c-calls:calls": {
89 compute: func(_ *statAggregate, out *metricValue) {
90 out.kind = metricKindUint64
91 out.scalar = uint64(NumCgoCall())
94 "/cpu/classes/gc/mark/assist:cpu-seconds": {
95 deps: makeStatDepSet(cpuStatsDep),
96 compute: func(in *statAggregate, out *metricValue) {
97 out.kind = metricKindFloat64
98 out.scalar = float64bits(nsToSec(in.cpuStats.gcAssistTime))
101 "/cpu/classes/gc/mark/dedicated:cpu-seconds": {
102 deps: makeStatDepSet(cpuStatsDep),
103 compute: func(in *statAggregate, out *metricValue) {
104 out.kind = metricKindFloat64
105 out.scalar = float64bits(nsToSec(in.cpuStats.gcDedicatedTime))
108 "/cpu/classes/gc/mark/idle:cpu-seconds": {
109 deps: makeStatDepSet(cpuStatsDep),
110 compute: func(in *statAggregate, out *metricValue) {
111 out.kind = metricKindFloat64
112 out.scalar = float64bits(nsToSec(in.cpuStats.gcIdleTime))
115 "/cpu/classes/gc/pause:cpu-seconds": {
116 deps: makeStatDepSet(cpuStatsDep),
117 compute: func(in *statAggregate, out *metricValue) {
118 out.kind = metricKindFloat64
119 out.scalar = float64bits(nsToSec(in.cpuStats.gcPauseTime))
122 "/cpu/classes/gc/total:cpu-seconds": {
123 deps: makeStatDepSet(cpuStatsDep),
124 compute: func(in *statAggregate, out *metricValue) {
125 out.kind = metricKindFloat64
126 out.scalar = float64bits(nsToSec(in.cpuStats.gcTotalTime))
129 "/cpu/classes/idle:cpu-seconds": {
130 deps: makeStatDepSet(cpuStatsDep),
131 compute: func(in *statAggregate, out *metricValue) {
132 out.kind = metricKindFloat64
133 out.scalar = float64bits(nsToSec(in.cpuStats.idleTime))
136 "/cpu/classes/scavenge/assist:cpu-seconds": {
137 deps: makeStatDepSet(cpuStatsDep),
138 compute: func(in *statAggregate, out *metricValue) {
139 out.kind = metricKindFloat64
140 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeAssistTime))
143 "/cpu/classes/scavenge/background:cpu-seconds": {
144 deps: makeStatDepSet(cpuStatsDep),
145 compute: func(in *statAggregate, out *metricValue) {
146 out.kind = metricKindFloat64
147 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeBgTime))
150 "/cpu/classes/scavenge/total:cpu-seconds": {
151 deps: makeStatDepSet(cpuStatsDep),
152 compute: func(in *statAggregate, out *metricValue) {
153 out.kind = metricKindFloat64
154 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeTotalTime))
157 "/cpu/classes/total:cpu-seconds": {
158 deps: makeStatDepSet(cpuStatsDep),
159 compute: func(in *statAggregate, out *metricValue) {
160 out.kind = metricKindFloat64
161 out.scalar = float64bits(nsToSec(in.cpuStats.totalTime))
164 "/cpu/classes/user:cpu-seconds": {
165 deps: makeStatDepSet(cpuStatsDep),
166 compute: func(in *statAggregate, out *metricValue) {
167 out.kind = metricKindFloat64
168 out.scalar = float64bits(nsToSec(in.cpuStats.userTime))
171 "/gc/cycles/automatic:gc-cycles": {
172 deps: makeStatDepSet(sysStatsDep),
173 compute: func(in *statAggregate, out *metricValue) {
174 out.kind = metricKindUint64
175 out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
178 "/gc/cycles/forced:gc-cycles": {
179 deps: makeStatDepSet(sysStatsDep),
180 compute: func(in *statAggregate, out *metricValue) {
181 out.kind = metricKindUint64
182 out.scalar = in.sysStats.gcCyclesForced
185 "/gc/cycles/total:gc-cycles": {
186 deps: makeStatDepSet(sysStatsDep),
187 compute: func(in *statAggregate, out *metricValue) {
188 out.kind = metricKindUint64
189 out.scalar = in.sysStats.gcCyclesDone
192 "/gc/heap/allocs-by-size:bytes": {
193 deps: makeStatDepSet(heapStatsDep),
194 compute: func(in *statAggregate, out *metricValue) {
195 hist := out.float64HistOrInit(sizeClassBuckets)
196 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
197 // Cut off the first index which is ostensibly for size class 0,
198 // but large objects are tracked separately so it's actually unused.
199 for i, count := range in.heapStats.smallAllocCount[1:] {
200 hist.counts[i] = uint64(count)
204 "/gc/heap/allocs:bytes": {
205 deps: makeStatDepSet(heapStatsDep),
206 compute: func(in *statAggregate, out *metricValue) {
207 out.kind = metricKindUint64
208 out.scalar = in.heapStats.totalAllocated
211 "/gc/heap/allocs:objects": {
212 deps: makeStatDepSet(heapStatsDep),
213 compute: func(in *statAggregate, out *metricValue) {
214 out.kind = metricKindUint64
215 out.scalar = in.heapStats.totalAllocs
218 "/gc/heap/frees-by-size:bytes": {
219 deps: makeStatDepSet(heapStatsDep),
220 compute: func(in *statAggregate, out *metricValue) {
221 hist := out.float64HistOrInit(sizeClassBuckets)
222 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
223 // Cut off the first index which is ostensibly for size class 0,
224 // but large objects are tracked separately so it's actually unused.
225 for i, count := range in.heapStats.smallFreeCount[1:] {
226 hist.counts[i] = uint64(count)
230 "/gc/heap/frees:bytes": {
231 deps: makeStatDepSet(heapStatsDep),
232 compute: func(in *statAggregate, out *metricValue) {
233 out.kind = metricKindUint64
234 out.scalar = in.heapStats.totalFreed
237 "/gc/heap/frees:objects": {
238 deps: makeStatDepSet(heapStatsDep),
239 compute: func(in *statAggregate, out *metricValue) {
240 out.kind = metricKindUint64
241 out.scalar = in.heapStats.totalFrees
244 "/gc/heap/goal:bytes": {
245 deps: makeStatDepSet(sysStatsDep),
246 compute: func(in *statAggregate, out *metricValue) {
247 out.kind = metricKindUint64
248 out.scalar = in.sysStats.heapGoal
251 "/gc/heap/objects:objects": {
252 deps: makeStatDepSet(heapStatsDep),
253 compute: func(in *statAggregate, out *metricValue) {
254 out.kind = metricKindUint64
255 out.scalar = in.heapStats.numObjects
258 "/gc/heap/tiny/allocs:objects": {
259 deps: makeStatDepSet(heapStatsDep),
260 compute: func(in *statAggregate, out *metricValue) {
261 out.kind = metricKindUint64
262 out.scalar = uint64(in.heapStats.tinyAllocCount)
265 "/gc/limiter/last-enabled:gc-cycle": {
266 compute: func(_ *statAggregate, out *metricValue) {
267 out.kind = metricKindUint64
268 out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
271 "/gc/pauses:seconds": {
272 compute: func(_ *statAggregate, out *metricValue) {
273 hist := out.float64HistOrInit(timeHistBuckets)
274 // The bottom-most bucket, containing negative values, is tracked
275 // as a separately as underflow, so fill that in manually and then
276 // iterate over the rest.
277 hist.counts[0] = memstats.gcPauseDist.underflow.Load()
278 for i := range memstats.gcPauseDist.counts {
279 hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load()
281 hist.counts[len(hist.counts)-1] = memstats.gcPauseDist.overflow.Load()
284 "/gc/stack/starting-size:bytes": {
285 compute: func(in *statAggregate, out *metricValue) {
286 out.kind = metricKindUint64
287 out.scalar = uint64(startingStackSize)
290 "/memory/classes/heap/free:bytes": {
291 deps: makeStatDepSet(heapStatsDep),
292 compute: func(in *statAggregate, out *metricValue) {
293 out.kind = metricKindUint64
294 out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
295 in.heapStats.inStacks - in.heapStats.inWorkBufs -
296 in.heapStats.inPtrScalarBits)
299 "/memory/classes/heap/objects:bytes": {
300 deps: makeStatDepSet(heapStatsDep),
301 compute: func(in *statAggregate, out *metricValue) {
302 out.kind = metricKindUint64
303 out.scalar = in.heapStats.inObjects
306 "/memory/classes/heap/released:bytes": {
307 deps: makeStatDepSet(heapStatsDep),
308 compute: func(in *statAggregate, out *metricValue) {
309 out.kind = metricKindUint64
310 out.scalar = uint64(in.heapStats.released)
313 "/memory/classes/heap/stacks:bytes": {
314 deps: makeStatDepSet(heapStatsDep),
315 compute: func(in *statAggregate, out *metricValue) {
316 out.kind = metricKindUint64
317 out.scalar = uint64(in.heapStats.inStacks)
320 "/memory/classes/heap/unused:bytes": {
321 deps: makeStatDepSet(heapStatsDep),
322 compute: func(in *statAggregate, out *metricValue) {
323 out.kind = metricKindUint64
324 out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
327 "/memory/classes/metadata/mcache/free:bytes": {
328 deps: makeStatDepSet(sysStatsDep),
329 compute: func(in *statAggregate, out *metricValue) {
330 out.kind = metricKindUint64
331 out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
334 "/memory/classes/metadata/mcache/inuse:bytes": {
335 deps: makeStatDepSet(sysStatsDep),
336 compute: func(in *statAggregate, out *metricValue) {
337 out.kind = metricKindUint64
338 out.scalar = in.sysStats.mCacheInUse
341 "/memory/classes/metadata/mspan/free:bytes": {
342 deps: makeStatDepSet(sysStatsDep),
343 compute: func(in *statAggregate, out *metricValue) {
344 out.kind = metricKindUint64
345 out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
348 "/memory/classes/metadata/mspan/inuse:bytes": {
349 deps: makeStatDepSet(sysStatsDep),
350 compute: func(in *statAggregate, out *metricValue) {
351 out.kind = metricKindUint64
352 out.scalar = in.sysStats.mSpanInUse
355 "/memory/classes/metadata/other:bytes": {
356 deps: makeStatDepSet(heapStatsDep, sysStatsDep),
357 compute: func(in *statAggregate, out *metricValue) {
358 out.kind = metricKindUint64
359 out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys
362 "/memory/classes/os-stacks:bytes": {
363 deps: makeStatDepSet(sysStatsDep),
364 compute: func(in *statAggregate, out *metricValue) {
365 out.kind = metricKindUint64
366 out.scalar = in.sysStats.stacksSys
369 "/memory/classes/other:bytes": {
370 deps: makeStatDepSet(sysStatsDep),
371 compute: func(in *statAggregate, out *metricValue) {
372 out.kind = metricKindUint64
373 out.scalar = in.sysStats.otherSys
376 "/memory/classes/profiling/buckets:bytes": {
377 deps: makeStatDepSet(sysStatsDep),
378 compute: func(in *statAggregate, out *metricValue) {
379 out.kind = metricKindUint64
380 out.scalar = in.sysStats.buckHashSys
383 "/memory/classes/total:bytes": {
384 deps: makeStatDepSet(heapStatsDep, sysStatsDep),
385 compute: func(in *statAggregate, out *metricValue) {
386 out.kind = metricKindUint64
387 out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
388 in.sysStats.stacksSys + in.sysStats.mSpanSys +
389 in.sysStats.mCacheSys + in.sysStats.buckHashSys +
390 in.sysStats.gcMiscSys + in.sysStats.otherSys
393 "/sched/gomaxprocs:threads": {
394 compute: func(_ *statAggregate, out *metricValue) {
395 out.kind = metricKindUint64
396 out.scalar = uint64(gomaxprocs)
399 "/sched/goroutines:goroutines": {
400 compute: func(_ *statAggregate, out *metricValue) {
401 out.kind = metricKindUint64
402 out.scalar = uint64(gcount())
405 "/sched/latencies:seconds": {
406 compute: func(_ *statAggregate, out *metricValue) {
407 hist := out.float64HistOrInit(timeHistBuckets)
408 hist.counts[0] = sched.timeToRun.underflow.Load()
409 for i := range sched.timeToRun.counts {
410 hist.counts[i+1] = sched.timeToRun.counts[i].Load()
412 hist.counts[len(hist.counts)-1] = sched.timeToRun.overflow.Load()
415 "/sync/mutex/wait/total:seconds": {
416 compute: func(_ *statAggregate, out *metricValue) {
417 out.kind = metricKindFloat64
418 out.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load()))
423 for _, info := range godebugs.All {
425 metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0}
432 func compute0(_ *statAggregate, out *metricValue) {
433 out.kind = metricKindUint64
437 type metricReader func() uint64
439 func (f metricReader) compute(_ *statAggregate, out *metricValue) {
440 out.kind = metricKindUint64
444 //go:linkname godebug_registerMetric internal/godebug.registerMetric
445 func godebug_registerMetric(name string, read func() uint64) {
448 d, ok := metrics[name]
450 throw("runtime: unexpected metric registration for " + name)
452 d.compute = metricReader(read).compute
457 // statDep is a dependency on a group of statistics
458 // that a metric might have.
462 heapStatsDep statDep = iota // corresponds to heapStatsAggregate
463 sysStatsDep // corresponds to sysStatsAggregate
464 cpuStatsDep // corresponds to cpuStatsAggregate
468 // statDepSet represents a set of statDeps.
470 // Under the hood, it's a bitmap.
471 type statDepSet [1]uint64
473 // makeStatDepSet creates a new statDepSet from a list of statDeps.
474 func makeStatDepSet(deps ...statDep) statDepSet {
476 for _, d := range deps {
477 s[d/64] |= 1 << (d % 64)
482 // difference returns set difference of s from b as a new set.
483 func (s statDepSet) difference(b statDepSet) statDepSet {
491 // union returns the union of the two sets as a new set.
492 func (s statDepSet) union(b statDepSet) statDepSet {
500 // empty returns true if there are no dependencies in the set.
501 func (s *statDepSet) empty() bool {
502 for _, c := range s {
510 // has returns true if the set contains a given statDep.
511 func (s *statDepSet) has(d statDep) bool {
512 return s[d/64]&(1<<(d%64)) != 0
515 // heapStatsAggregate represents memory stats obtained from the
516 // runtime. This set of stats is grouped together because they
517 // depend on each other in some way to make sense of the runtime's
518 // current heap memory use. They're also sharded across Ps, so it
519 // makes sense to grab them all at once.
520 type heapStatsAggregate struct {
523 // Derived from values in heapStatsDelta.
525 // inObjects is the bytes of memory occupied by objects,
528 // numObjects is the number of live objects in the heap.
531 // totalAllocated is the total bytes of heap objects allocated
532 // over the lifetime of the program.
533 totalAllocated uint64
535 // totalFreed is the total bytes of heap objects freed
536 // over the lifetime of the program.
539 // totalAllocs is the number of heap objects allocated over
540 // the lifetime of the program.
543 // totalFrees is the number of heap objects freed over
544 // the lifetime of the program.
548 // compute populates the heapStatsAggregate with values from the runtime.
549 func (a *heapStatsAggregate) compute() {
550 memstats.heapStats.read(&a.heapStatsDelta)
552 // Calculate derived stats.
553 a.totalAllocs = a.largeAllocCount
554 a.totalFrees = a.largeFreeCount
555 a.totalAllocated = a.largeAlloc
556 a.totalFreed = a.largeFree
557 for i := range a.smallAllocCount {
558 na := a.smallAllocCount[i]
559 nf := a.smallFreeCount[i]
562 a.totalAllocated += na * uint64(class_to_size[i])
563 a.totalFreed += nf * uint64(class_to_size[i])
565 a.inObjects = a.totalAllocated - a.totalFreed
566 a.numObjects = a.totalAllocs - a.totalFrees
569 // sysStatsAggregate represents system memory stats obtained
570 // from the runtime. This set of stats is grouped together because
571 // they're all relatively cheap to acquire and generally independent
572 // of one another and other runtime memory stats. The fact that they
573 // may be acquired at different times, especially with respect to
574 // heapStatsAggregate, means there could be some skew, but because of
575 // these stats are independent, there's no real consistency issue here.
576 type sysStatsAggregate struct {
587 gcCyclesForced uint64
590 // compute populates the sysStatsAggregate with values from the runtime.
591 func (a *sysStatsAggregate) compute() {
592 a.stacksSys = memstats.stacks_sys.load()
593 a.buckHashSys = memstats.buckhash_sys.load()
594 a.gcMiscSys = memstats.gcMiscSys.load()
595 a.otherSys = memstats.other_sys.load()
596 a.heapGoal = gcController.heapGoal()
597 a.gcCyclesDone = uint64(memstats.numgc)
598 a.gcCyclesForced = uint64(memstats.numforcedgc)
602 a.mSpanSys = memstats.mspan_sys.load()
603 a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
604 a.mCacheSys = memstats.mcache_sys.load()
605 a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
610 // cpuStatsAggregate represents CPU stats obtained from the runtime
611 // acquired together to avoid skew and inconsistencies.
612 type cpuStatsAggregate struct {
616 // compute populates the cpuStatsAggregate with values from the runtime.
617 func (a *cpuStatsAggregate) compute() {
618 a.cpuStats = work.cpuStats
621 // nsToSec takes a duration in nanoseconds and converts it to seconds as
623 func nsToSec(ns int64) float64 {
624 return float64(ns) / 1e9
627 // statAggregate is the main driver of the metrics implementation.
629 // It contains multiple aggregates of runtime statistics, as well
630 // as a set of these aggregates that it has populated. The aggregates
631 // are populated lazily by its ensure method.
632 type statAggregate struct {
634 heapStats heapStatsAggregate
635 sysStats sysStatsAggregate
636 cpuStats cpuStatsAggregate
639 // ensure populates statistics aggregates determined by deps if they
640 // haven't yet been populated.
641 func (a *statAggregate) ensure(deps *statDepSet) {
642 missing := deps.difference(a.ensured)
646 for i := statDep(0); i < numStatsDeps; i++ {
652 a.heapStats.compute()
659 a.ensured = a.ensured.union(missing)
662 // metricKind is a runtime copy of runtime/metrics.ValueKind and
663 // must be kept structurally identical to that type.
667 // These values must be kept identical to their corresponding Kind* values
668 // in the runtime/metrics package.
669 metricKindBad metricKind = iota
672 metricKindFloat64Histogram
675 // metricSample is a runtime copy of runtime/metrics.Sample and
676 // must be kept structurally identical to that type.
677 type metricSample struct {
682 // metricValue is a runtime copy of runtime/metrics.Sample and
683 // must be kept structurally identical to that type.
684 type metricValue struct {
686 scalar uint64 // contains scalar values for scalar Kinds.
687 pointer unsafe.Pointer // contains non-scalar values.
690 // float64HistOrInit tries to pull out an existing float64Histogram
691 // from the value, but if none exists, then it allocates one with
692 // the given buckets.
693 func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
694 var hist *metricFloat64Histogram
695 if v.kind == metricKindFloat64Histogram && v.pointer != nil {
696 hist = (*metricFloat64Histogram)(v.pointer)
698 v.kind = metricKindFloat64Histogram
699 hist = new(metricFloat64Histogram)
700 v.pointer = unsafe.Pointer(hist)
702 hist.buckets = buckets
703 if len(hist.counts) != len(hist.buckets)-1 {
704 hist.counts = make([]uint64, len(buckets)-1)
709 // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
710 // and must be kept structurally identical to that type.
711 type metricFloat64Histogram struct {
716 // agg is used by readMetrics, and is protected by metricsSema.
718 // Managed as a global variable because its pointer will be
719 // an argument to a dynamically-defined function, and we'd
720 // like to avoid it escaping to the heap.
721 var agg statAggregate
723 type metricName struct {
728 // readMetricNames is the implementation of runtime/metrics.readMetricNames,
729 // used by the runtime/metrics test and otherwise unreferenced.
731 //go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
732 func readMetricNames() []string {
738 list := make([]string, 0, n)
741 for name := range metrics {
742 list = append(list, name)
749 // readMetrics is the implementation of runtime/metrics.Read.
751 //go:linkname readMetrics runtime/metrics.runtime_readMetrics
752 func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
753 // Construct a slice from the args.
754 sl := slice{samplesp, len, cap}
755 samples := *(*[]metricSample)(unsafe.Pointer(&sl))
759 // Ensure the map is initialized.
762 // Clear agg defensively.
763 agg = statAggregate{}
766 for i := range samples {
767 sample := &samples[i]
768 data, ok := metrics[sample.name]
770 sample.value.kind = metricKindBad
773 // Ensure we have all the stats we need.
774 // agg is populated lazily.
775 agg.ensure(&data.deps)
777 // Compute the value based on the stats we have.
778 data.compute(&agg, &sample.value)