1 // Copyright 2020 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // Metrics implementation exported to runtime/metrics.
14 // metrics is a map of runtime/metrics keys to data used by the runtime
15 // to sample each metric's value. metricsInit indicates it has been
18 // These fields are protected by metricsSema which should be
19 // locked/unlocked with metricsLock() / metricsUnlock().
20 metricsSema uint32 = 1
22 metrics map[string]metricData
24 sizeClassBuckets []float64
25 timeHistBuckets []float64
28 type metricData struct {
29 // deps is the set of runtime statistics that this metric
30 // depends on. Before compute is called, the statAggregate
31 // which will be passed must ensure() these dependencies.
34 // compute is a function that populates a metricValue
35 // given a populated statAggregate structure.
36 compute func(in *statAggregate, out *metricValue)
40 // Acquire the metricsSema but with handoff. Operations are typically
41 // expensive enough that queueing up goroutines and handing off between
42 // them will be noticeably better-behaved.
43 semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
45 raceacquire(unsafe.Pointer(&metricsSema))
49 func metricsUnlock() {
51 racerelease(unsafe.Pointer(&metricsSema))
53 semrelease(&metricsSema)
56 // initMetrics initializes the metrics map if it hasn't been yet.
58 // metricsSema must be held.
64 sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
65 // Skip size class 0 which is a stand-in for large objects, but large
66 // objects are tracked separately (and they actually get placed in
67 // the last bucket, not the first).
68 sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
69 for i := 1; i < _NumSizeClasses; i++ {
70 // Size classes have an inclusive upper-bound
71 // and exclusive lower bound (e.g. 48-byte size class is
72 // (32, 48]) whereas we want and inclusive lower-bound
73 // and exclusive upper-bound (e.g. 48-byte size class is
74 // [33, 49). We can achieve this by shifting all bucket
75 // boundaries up by 1.
77 // Also, a float64 can precisely represent integers with
78 // value up to 2^53 and size classes are relatively small
79 // (nowhere near 2^48 even) so this will give us exact
81 sizeClassBuckets[i] = float64(class_to_size[i] + 1)
83 sizeClassBuckets = append(sizeClassBuckets, float64Inf())
85 timeHistBuckets = timeHistogramMetricsBuckets()
86 metrics = map[string]metricData{
87 "/cgo/go-to-c-calls:calls": {
88 compute: func(_ *statAggregate, out *metricValue) {
89 out.kind = metricKindUint64
90 out.scalar = uint64(NumCgoCall())
93 "/cpu/classes/gc/mark/assist:cpu-seconds": {
94 deps: makeStatDepSet(cpuStatsDep),
95 compute: func(in *statAggregate, out *metricValue) {
96 out.kind = metricKindFloat64
97 out.scalar = float64bits(nsToSec(in.cpuStats.gcAssistTime))
100 "/cpu/classes/gc/mark/dedicated:cpu-seconds": {
101 deps: makeStatDepSet(cpuStatsDep),
102 compute: func(in *statAggregate, out *metricValue) {
103 out.kind = metricKindFloat64
104 out.scalar = float64bits(nsToSec(in.cpuStats.gcDedicatedTime))
107 "/cpu/classes/gc/mark/idle:cpu-seconds": {
108 deps: makeStatDepSet(cpuStatsDep),
109 compute: func(in *statAggregate, out *metricValue) {
110 out.kind = metricKindFloat64
111 out.scalar = float64bits(nsToSec(in.cpuStats.gcIdleTime))
114 "/cpu/classes/gc/pause:cpu-seconds": {
115 deps: makeStatDepSet(cpuStatsDep),
116 compute: func(in *statAggregate, out *metricValue) {
117 out.kind = metricKindFloat64
118 out.scalar = float64bits(nsToSec(in.cpuStats.gcPauseTime))
121 "/cpu/classes/gc/total:cpu-seconds": {
122 deps: makeStatDepSet(cpuStatsDep),
123 compute: func(in *statAggregate, out *metricValue) {
124 out.kind = metricKindFloat64
125 out.scalar = float64bits(nsToSec(in.cpuStats.gcTotalTime))
128 "/cpu/classes/idle:cpu-seconds": {
129 deps: makeStatDepSet(cpuStatsDep),
130 compute: func(in *statAggregate, out *metricValue) {
131 out.kind = metricKindFloat64
132 out.scalar = float64bits(nsToSec(in.cpuStats.idleTime))
135 "/cpu/classes/scavenge/assist:cpu-seconds": {
136 deps: makeStatDepSet(cpuStatsDep),
137 compute: func(in *statAggregate, out *metricValue) {
138 out.kind = metricKindFloat64
139 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeAssistTime))
142 "/cpu/classes/scavenge/background:cpu-seconds": {
143 deps: makeStatDepSet(cpuStatsDep),
144 compute: func(in *statAggregate, out *metricValue) {
145 out.kind = metricKindFloat64
146 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeBgTime))
149 "/cpu/classes/scavenge/total:cpu-seconds": {
150 deps: makeStatDepSet(cpuStatsDep),
151 compute: func(in *statAggregate, out *metricValue) {
152 out.kind = metricKindFloat64
153 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeTotalTime))
156 "/cpu/classes/total:cpu-seconds": {
157 deps: makeStatDepSet(cpuStatsDep),
158 compute: func(in *statAggregate, out *metricValue) {
159 out.kind = metricKindFloat64
160 out.scalar = float64bits(nsToSec(in.cpuStats.totalTime))
163 "/cpu/classes/user:cpu-seconds": {
164 deps: makeStatDepSet(cpuStatsDep),
165 compute: func(in *statAggregate, out *metricValue) {
166 out.kind = metricKindFloat64
167 out.scalar = float64bits(nsToSec(in.cpuStats.userTime))
170 "/gc/cycles/automatic:gc-cycles": {
171 deps: makeStatDepSet(sysStatsDep),
172 compute: func(in *statAggregate, out *metricValue) {
173 out.kind = metricKindUint64
174 out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
177 "/gc/cycles/forced:gc-cycles": {
178 deps: makeStatDepSet(sysStatsDep),
179 compute: func(in *statAggregate, out *metricValue) {
180 out.kind = metricKindUint64
181 out.scalar = in.sysStats.gcCyclesForced
184 "/gc/cycles/total:gc-cycles": {
185 deps: makeStatDepSet(sysStatsDep),
186 compute: func(in *statAggregate, out *metricValue) {
187 out.kind = metricKindUint64
188 out.scalar = in.sysStats.gcCyclesDone
191 "/gc/heap/allocs-by-size:bytes": {
192 deps: makeStatDepSet(heapStatsDep),
193 compute: func(in *statAggregate, out *metricValue) {
194 hist := out.float64HistOrInit(sizeClassBuckets)
195 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
196 // Cut off the first index which is ostensibly for size class 0,
197 // but large objects are tracked separately so it's actually unused.
198 for i, count := range in.heapStats.smallAllocCount[1:] {
199 hist.counts[i] = uint64(count)
203 "/gc/heap/allocs:bytes": {
204 deps: makeStatDepSet(heapStatsDep),
205 compute: func(in *statAggregate, out *metricValue) {
206 out.kind = metricKindUint64
207 out.scalar = in.heapStats.totalAllocated
210 "/gc/heap/allocs:objects": {
211 deps: makeStatDepSet(heapStatsDep),
212 compute: func(in *statAggregate, out *metricValue) {
213 out.kind = metricKindUint64
214 out.scalar = in.heapStats.totalAllocs
217 "/gc/heap/frees-by-size:bytes": {
218 deps: makeStatDepSet(heapStatsDep),
219 compute: func(in *statAggregate, out *metricValue) {
220 hist := out.float64HistOrInit(sizeClassBuckets)
221 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
222 // Cut off the first index which is ostensibly for size class 0,
223 // but large objects are tracked separately so it's actually unused.
224 for i, count := range in.heapStats.smallFreeCount[1:] {
225 hist.counts[i] = uint64(count)
229 "/gc/heap/frees:bytes": {
230 deps: makeStatDepSet(heapStatsDep),
231 compute: func(in *statAggregate, out *metricValue) {
232 out.kind = metricKindUint64
233 out.scalar = in.heapStats.totalFreed
236 "/gc/heap/frees:objects": {
237 deps: makeStatDepSet(heapStatsDep),
238 compute: func(in *statAggregate, out *metricValue) {
239 out.kind = metricKindUint64
240 out.scalar = in.heapStats.totalFrees
243 "/gc/heap/goal:bytes": {
244 deps: makeStatDepSet(sysStatsDep),
245 compute: func(in *statAggregate, out *metricValue) {
246 out.kind = metricKindUint64
247 out.scalar = in.sysStats.heapGoal
250 "/gc/heap/objects:objects": {
251 deps: makeStatDepSet(heapStatsDep),
252 compute: func(in *statAggregate, out *metricValue) {
253 out.kind = metricKindUint64
254 out.scalar = in.heapStats.numObjects
257 "/gc/heap/tiny/allocs:objects": {
258 deps: makeStatDepSet(heapStatsDep),
259 compute: func(in *statAggregate, out *metricValue) {
260 out.kind = metricKindUint64
261 out.scalar = uint64(in.heapStats.tinyAllocCount)
264 "/gc/limiter/last-enabled:gc-cycle": {
265 compute: func(_ *statAggregate, out *metricValue) {
266 out.kind = metricKindUint64
267 out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
270 "/gc/pauses:seconds": {
271 compute: func(_ *statAggregate, out *metricValue) {
272 hist := out.float64HistOrInit(timeHistBuckets)
273 // The bottom-most bucket, containing negative values, is tracked
274 // as a separately as underflow, so fill that in manually and then
275 // iterate over the rest.
276 hist.counts[0] = memstats.gcPauseDist.underflow.Load()
277 for i := range memstats.gcPauseDist.counts {
278 hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load()
280 hist.counts[len(hist.counts)-1] = memstats.gcPauseDist.overflow.Load()
283 "/gc/stack/starting-size:bytes": {
284 compute: func(in *statAggregate, out *metricValue) {
285 out.kind = metricKindUint64
286 out.scalar = uint64(startingStackSize)
289 "/godebug/non-default-behavior/execerrdot:events": {compute: compute0},
290 "/godebug/non-default-behavior/http2client:events": {compute: compute0},
291 "/godebug/non-default-behavior/http2server:events": {compute: compute0},
292 "/godebug/non-default-behavior/installgoroot:events": {compute: compute0},
293 "/godebug/non-default-behavior/panicnil:events": {compute: compute0},
294 "/godebug/non-default-behavior/randautoseed:events": {compute: compute0},
295 "/godebug/non-default-behavior/tarinsecurepath:events": {compute: compute0},
296 "/godebug/non-default-behavior/x509sha1:events": {compute: compute0},
297 "/godebug/non-default-behavior/x509usefallbackroots:events": {compute: compute0},
298 "/godebug/non-default-behavior/zipinsecurepath:events": {compute: compute0},
299 "/memory/classes/heap/free:bytes": {
300 deps: makeStatDepSet(heapStatsDep),
301 compute: func(in *statAggregate, out *metricValue) {
302 out.kind = metricKindUint64
303 out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
304 in.heapStats.inStacks - in.heapStats.inWorkBufs -
305 in.heapStats.inPtrScalarBits)
308 "/memory/classes/heap/objects:bytes": {
309 deps: makeStatDepSet(heapStatsDep),
310 compute: func(in *statAggregate, out *metricValue) {
311 out.kind = metricKindUint64
312 out.scalar = in.heapStats.inObjects
315 "/memory/classes/heap/released:bytes": {
316 deps: makeStatDepSet(heapStatsDep),
317 compute: func(in *statAggregate, out *metricValue) {
318 out.kind = metricKindUint64
319 out.scalar = uint64(in.heapStats.released)
322 "/memory/classes/heap/stacks:bytes": {
323 deps: makeStatDepSet(heapStatsDep),
324 compute: func(in *statAggregate, out *metricValue) {
325 out.kind = metricKindUint64
326 out.scalar = uint64(in.heapStats.inStacks)
329 "/memory/classes/heap/unused:bytes": {
330 deps: makeStatDepSet(heapStatsDep),
331 compute: func(in *statAggregate, out *metricValue) {
332 out.kind = metricKindUint64
333 out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
336 "/memory/classes/metadata/mcache/free:bytes": {
337 deps: makeStatDepSet(sysStatsDep),
338 compute: func(in *statAggregate, out *metricValue) {
339 out.kind = metricKindUint64
340 out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
343 "/memory/classes/metadata/mcache/inuse:bytes": {
344 deps: makeStatDepSet(sysStatsDep),
345 compute: func(in *statAggregate, out *metricValue) {
346 out.kind = metricKindUint64
347 out.scalar = in.sysStats.mCacheInUse
350 "/memory/classes/metadata/mspan/free:bytes": {
351 deps: makeStatDepSet(sysStatsDep),
352 compute: func(in *statAggregate, out *metricValue) {
353 out.kind = metricKindUint64
354 out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
357 "/memory/classes/metadata/mspan/inuse:bytes": {
358 deps: makeStatDepSet(sysStatsDep),
359 compute: func(in *statAggregate, out *metricValue) {
360 out.kind = metricKindUint64
361 out.scalar = in.sysStats.mSpanInUse
364 "/memory/classes/metadata/other:bytes": {
365 deps: makeStatDepSet(heapStatsDep, sysStatsDep),
366 compute: func(in *statAggregate, out *metricValue) {
367 out.kind = metricKindUint64
368 out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys
371 "/memory/classes/os-stacks:bytes": {
372 deps: makeStatDepSet(sysStatsDep),
373 compute: func(in *statAggregate, out *metricValue) {
374 out.kind = metricKindUint64
375 out.scalar = in.sysStats.stacksSys
378 "/memory/classes/other:bytes": {
379 deps: makeStatDepSet(sysStatsDep),
380 compute: func(in *statAggregate, out *metricValue) {
381 out.kind = metricKindUint64
382 out.scalar = in.sysStats.otherSys
385 "/memory/classes/profiling/buckets:bytes": {
386 deps: makeStatDepSet(sysStatsDep),
387 compute: func(in *statAggregate, out *metricValue) {
388 out.kind = metricKindUint64
389 out.scalar = in.sysStats.buckHashSys
392 "/memory/classes/total:bytes": {
393 deps: makeStatDepSet(heapStatsDep, sysStatsDep),
394 compute: func(in *statAggregate, out *metricValue) {
395 out.kind = metricKindUint64
396 out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
397 in.sysStats.stacksSys + in.sysStats.mSpanSys +
398 in.sysStats.mCacheSys + in.sysStats.buckHashSys +
399 in.sysStats.gcMiscSys + in.sysStats.otherSys
402 "/sched/gomaxprocs:threads": {
403 compute: func(_ *statAggregate, out *metricValue) {
404 out.kind = metricKindUint64
405 out.scalar = uint64(gomaxprocs)
408 "/sched/goroutines:goroutines": {
409 compute: func(_ *statAggregate, out *metricValue) {
410 out.kind = metricKindUint64
411 out.scalar = uint64(gcount())
414 "/sched/latencies:seconds": {
415 compute: func(_ *statAggregate, out *metricValue) {
416 hist := out.float64HistOrInit(timeHistBuckets)
417 hist.counts[0] = sched.timeToRun.underflow.Load()
418 for i := range sched.timeToRun.counts {
419 hist.counts[i+1] = sched.timeToRun.counts[i].Load()
421 hist.counts[len(hist.counts)-1] = sched.timeToRun.overflow.Load()
424 "/sync/mutex/wait/total:seconds": {
425 compute: func(_ *statAggregate, out *metricValue) {
426 out.kind = metricKindFloat64
427 out.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load()))
434 func compute0(_ *statAggregate, out *metricValue) {
435 out.kind = metricKindUint64
439 type metricReader func() uint64
441 func (f metricReader) compute(_ *statAggregate, out *metricValue) {
442 out.kind = metricKindUint64
446 var godebugNonDefaults = []string{
450 //go:linkname godebug_registerMetric internal/godebug.registerMetric
451 func godebug_registerMetric(name string, read func() uint64) {
454 d, ok := metrics[name]
456 throw("runtime: unexpected metric registration for " + name)
458 d.compute = metricReader(read).compute
463 // statDep is a dependency on a group of statistics
464 // that a metric might have.
468 heapStatsDep statDep = iota // corresponds to heapStatsAggregate
469 sysStatsDep // corresponds to sysStatsAggregate
470 cpuStatsDep // corresponds to cpuStatsAggregate
474 // statDepSet represents a set of statDeps.
476 // Under the hood, it's a bitmap.
477 type statDepSet [1]uint64
479 // makeStatDepSet creates a new statDepSet from a list of statDeps.
480 func makeStatDepSet(deps ...statDep) statDepSet {
482 for _, d := range deps {
483 s[d/64] |= 1 << (d % 64)
488 // difference returns set difference of s from b as a new set.
489 func (s statDepSet) difference(b statDepSet) statDepSet {
497 // union returns the union of the two sets as a new set.
498 func (s statDepSet) union(b statDepSet) statDepSet {
506 // empty returns true if there are no dependencies in the set.
507 func (s *statDepSet) empty() bool {
508 for _, c := range s {
516 // has returns true if the set contains a given statDep.
517 func (s *statDepSet) has(d statDep) bool {
518 return s[d/64]&(1<<(d%64)) != 0
521 // heapStatsAggregate represents memory stats obtained from the
522 // runtime. This set of stats is grouped together because they
523 // depend on each other in some way to make sense of the runtime's
524 // current heap memory use. They're also sharded across Ps, so it
525 // makes sense to grab them all at once.
526 type heapStatsAggregate struct {
529 // Derived from values in heapStatsDelta.
531 // inObjects is the bytes of memory occupied by objects,
534 // numObjects is the number of live objects in the heap.
537 // totalAllocated is the total bytes of heap objects allocated
538 // over the lifetime of the program.
539 totalAllocated uint64
541 // totalFreed is the total bytes of heap objects freed
542 // over the lifetime of the program.
545 // totalAllocs is the number of heap objects allocated over
546 // the lifetime of the program.
549 // totalFrees is the number of heap objects freed over
550 // the lifetime of the program.
554 // compute populates the heapStatsAggregate with values from the runtime.
555 func (a *heapStatsAggregate) compute() {
556 memstats.heapStats.read(&a.heapStatsDelta)
558 // Calculate derived stats.
559 a.totalAllocs = a.largeAllocCount
560 a.totalFrees = a.largeFreeCount
561 a.totalAllocated = a.largeAlloc
562 a.totalFreed = a.largeFree
563 for i := range a.smallAllocCount {
564 na := a.smallAllocCount[i]
565 nf := a.smallFreeCount[i]
568 a.totalAllocated += na * uint64(class_to_size[i])
569 a.totalFreed += nf * uint64(class_to_size[i])
571 a.inObjects = a.totalAllocated - a.totalFreed
572 a.numObjects = a.totalAllocs - a.totalFrees
575 // sysStatsAggregate represents system memory stats obtained
576 // from the runtime. This set of stats is grouped together because
577 // they're all relatively cheap to acquire and generally independent
578 // of one another and other runtime memory stats. The fact that they
579 // may be acquired at different times, especially with respect to
580 // heapStatsAggregate, means there could be some skew, but because of
581 // these stats are independent, there's no real consistency issue here.
582 type sysStatsAggregate struct {
593 gcCyclesForced uint64
596 // compute populates the sysStatsAggregate with values from the runtime.
597 func (a *sysStatsAggregate) compute() {
598 a.stacksSys = memstats.stacks_sys.load()
599 a.buckHashSys = memstats.buckhash_sys.load()
600 a.gcMiscSys = memstats.gcMiscSys.load()
601 a.otherSys = memstats.other_sys.load()
602 a.heapGoal = gcController.heapGoal()
603 a.gcCyclesDone = uint64(memstats.numgc)
604 a.gcCyclesForced = uint64(memstats.numforcedgc)
608 a.mSpanSys = memstats.mspan_sys.load()
609 a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
610 a.mCacheSys = memstats.mcache_sys.load()
611 a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
616 // cpuStatsAggregate represents CPU stats obtained from the runtime
617 // acquired together to avoid skew and inconsistencies.
618 type cpuStatsAggregate struct {
622 // compute populates the cpuStatsAggregate with values from the runtime.
623 func (a *cpuStatsAggregate) compute() {
624 a.cpuStats = work.cpuStats
627 // nsToSec takes a duration in nanoseconds and converts it to seconds as
629 func nsToSec(ns int64) float64 {
630 return float64(ns) / 1e9
633 // statAggregate is the main driver of the metrics implementation.
635 // It contains multiple aggregates of runtime statistics, as well
636 // as a set of these aggregates that it has populated. The aggregates
637 // are populated lazily by its ensure method.
638 type statAggregate struct {
640 heapStats heapStatsAggregate
641 sysStats sysStatsAggregate
642 cpuStats cpuStatsAggregate
645 // ensure populates statistics aggregates determined by deps if they
646 // haven't yet been populated.
647 func (a *statAggregate) ensure(deps *statDepSet) {
648 missing := deps.difference(a.ensured)
652 for i := statDep(0); i < numStatsDeps; i++ {
658 a.heapStats.compute()
665 a.ensured = a.ensured.union(missing)
668 // metricKind is a runtime copy of runtime/metrics.ValueKind and
669 // must be kept structurally identical to that type.
673 // These values must be kept identical to their corresponding Kind* values
674 // in the runtime/metrics package.
675 metricKindBad metricKind = iota
678 metricKindFloat64Histogram
681 // metricSample is a runtime copy of runtime/metrics.Sample and
682 // must be kept structurally identical to that type.
683 type metricSample struct {
688 // metricValue is a runtime copy of runtime/metrics.Sample and
689 // must be kept structurally identical to that type.
690 type metricValue struct {
692 scalar uint64 // contains scalar values for scalar Kinds.
693 pointer unsafe.Pointer // contains non-scalar values.
696 // float64HistOrInit tries to pull out an existing float64Histogram
697 // from the value, but if none exists, then it allocates one with
698 // the given buckets.
699 func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
700 var hist *metricFloat64Histogram
701 if v.kind == metricKindFloat64Histogram && v.pointer != nil {
702 hist = (*metricFloat64Histogram)(v.pointer)
704 v.kind = metricKindFloat64Histogram
705 hist = new(metricFloat64Histogram)
706 v.pointer = unsafe.Pointer(hist)
708 hist.buckets = buckets
709 if len(hist.counts) != len(hist.buckets)-1 {
710 hist.counts = make([]uint64, len(buckets)-1)
715 // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
716 // and must be kept structurally identical to that type.
717 type metricFloat64Histogram struct {
722 // agg is used by readMetrics, and is protected by metricsSema.
724 // Managed as a global variable because its pointer will be
725 // an argument to a dynamically-defined function, and we'd
726 // like to avoid it escaping to the heap.
727 var agg statAggregate
729 type metricName struct {
734 // readMetricNames is the implementation of runtime/metrics.readMetricNames,
735 // used by the runtime/metrics test and otherwise unreferenced.
737 //go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
738 func readMetricNames() []string {
744 list := make([]string, 0, n)
747 for name := range metrics {
748 list = append(list, name)
755 // readMetrics is the implementation of runtime/metrics.Read.
757 //go:linkname readMetrics runtime/metrics.runtime_readMetrics
758 func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
759 // Construct a slice from the args.
760 sl := slice{samplesp, len, cap}
761 samples := *(*[]metricSample)(unsafe.Pointer(&sl))
765 // Ensure the map is initialized.
768 // Clear agg defensively.
769 agg = statAggregate{}
772 for i := range samples {
773 sample := &samples[i]
774 data, ok := metrics[sample.name]
776 sample.value.kind = metricKindBad
779 // Ensure we have all the stats we need.
780 // agg is populated lazily.
781 agg.ensure(&data.deps)
783 // Compute the value based on the stats we have.
784 data.compute(&agg, &sample.value)