},
},
"/gc/scan/globals:bytes": {
+ deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.globalsScan
},
},
"/gc/scan/heap:bytes": {
+ deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.heapScan
},
},
"/gc/scan/stack:bytes": {
+ deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.stackScan
},
},
"/gc/scan/total:bytes": {
+ deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.totalScan
// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
}
-// cpuStatsAggregate represents various GC stats obtained from the runtime
+// gcStatsAggregate represents various GC stats obtained from the runtime
// acquired together to avoid skew and inconsistencies.
type gcStatsAggregate struct {
heapScan uint64
if gc.pauses < gc.numGC*2 {
t.Errorf("fewer pauses than expected: got %d, want at least %d", gc.pauses, gc.numGC*2)
}
+ if totalScan.got <= 0 {
+ t.Errorf("scannable GC space is empty: %d", totalScan.got)
+ }
if totalScan.got != totalScan.want {
t.Errorf("/gc/scan/total:bytes doesn't line up with sum of /gc/scan*: total %d vs. sum %d", totalScan.got, totalScan.want)
}