unsafe.Offsetof(p{}.timer0When),
unsafe.Offsetof(p{}.timerModifiedEarliest),
unsafe.Offsetof(p{}.gcFractionalMarkTime),
- unsafe.Offsetof(schedt{}.timeToRun),
- unsafe.Offsetof(timeHistogram{}.underflow),
unsafe.Offsetof(profBuf{}.overflow),
unsafe.Offsetof(profBuf{}.overflowTime),
unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount),
unsafe.Offsetof(lfnode{}.next),
unsafe.Offsetof(mstats{}.last_gc_nanotime),
unsafe.Offsetof(mstats{}.last_gc_unix),
- unsafe.Offsetof(mstats{}.gcPauseDist),
unsafe.Offsetof(ticksType{}.val),
unsafe.Offsetof(workType{}.bytesMarked),
- unsafe.Offsetof(timeHistogram{}.counts),
}
// AtomicVariables is the set of global variables on which we perform
t := (*timeHistogram)(th)
i := bucket*TimeHistNumSubBuckets + subBucket
if i >= uint(len(t.counts)) {
- return t.underflow, false
+ return t.underflow.Load(), false
}
- return t.counts[i], true
+ return t.counts[i].Load(), true
}
func (th *TimeHistogram) Record(duration int64) {
// It is an HDR histogram with exponentially-distributed
// buckets and linearly distributed sub-buckets.
//
-// Counts in the histogram are updated atomically, so it is safe
-// for concurrent use. It is also safe to read all the values
-// atomically.
+// The histogram is safe for concurrent reads and writes.
type timeHistogram struct {
- counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
+ counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]atomic.Uint64
// underflow counts all the times we got a negative duration
// sample. Because of how time works on some platforms, it's
// possible to measure negative durations. We could ignore them,
// but we record them anyway because it's better to have some
// signal that it's happening than just missing samples.
- underflow uint64
+ underflow atomic.Uint64
}
// record adds the given duration to the distribution.
//go:nosplit
func (h *timeHistogram) record(duration int64) {
if duration < 0 {
- atomic.Xadd64(&h.underflow, 1)
+ h.underflow.Add(1)
return
}
// The index of the exponential bucket is just the index
} else {
subBucket = uint(duration)
}
- atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
+ h.counts[superBucket*timeHistNumSubBuckets+subBucket].Add(1)
}
const (
// Metrics implementation exported to runtime/metrics.
import (
- "runtime/internal/atomic"
"unsafe"
)
// The bottom-most bucket, containing negative values, is tracked
// as a separately as underflow, so fill that in manually and then
// iterate over the rest.
- hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
+ hist.counts[0] = memstats.gcPauseDist.underflow.Load()
for i := range memstats.gcPauseDist.counts {
- hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
+ hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load()
}
},
},
"/sched/latencies:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(timeHistBuckets)
- hist.counts[0] = atomic.Load64(&sched.timeToRun.underflow)
+ hist.counts[0] = sched.timeToRun.underflow.Load()
for i := range sched.timeToRun.counts {
- hist.counts[i+1] = atomic.Load64(&sched.timeToRun.counts[i])
+ hist.counts[i+1] = sched.timeToRun.counts[i].Load()
}
},
},
println(offset)
throw("memstats.heapStats not aligned to 8 bytes")
}
- if offset := unsafe.Offsetof(memstats.gcPauseDist); offset%8 != 0 {
- println(offset)
- throw("memstats.gcPauseDist not aligned to 8 bytes")
- }
// Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
// [3]heapStatsDelta) to be 8-byte aligned.
if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
sigsave(&gp.m.sigmask)
initSigmask = gp.m.sigmask
- if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
- println(offset)
- throw("sched.timeToRun not aligned to 8 bytes")
- }
-
goargs()
goenvs()
parsedebugvars()
// timeToRun is a distribution of scheduling latencies, defined
// as the sum of time a G spends in the _Grunnable state before
// it transitions to _Grunning.
- //
- // timeToRun is protected by sched.lock.
timeToRun timeHistogram
}