]> Cypherpunks.ru repositories - gostls13.git/commitdiff
runtime: convert timeHistogram to atomic types
authorMichael Pratt <mpratt@google.com>
Mon, 25 Jul 2022 19:58:23 +0000 (15:58 -0400)
committerMichael Pratt <mpratt@google.com>
Fri, 12 Aug 2022 01:53:11 +0000 (01:53 +0000)
I've dropped the note that sched.timeToRun is protected by sched.lock,
as it does not seem to be true.

For #53821.

Change-Id: I03f8dc6ca0bcd4ccf3ec113010a0aa39c6f7d6ef
Reviewed-on: https://go-review.googlesource.com/c/go/+/419449
Reviewed-by: Austin Clements <austin@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Pratt <mpratt@google.com>

src/runtime/align_runtime_test.go
src/runtime/export_test.go
src/runtime/histogram.go
src/runtime/metrics.go
src/runtime/mstats.go
src/runtime/proc.go
src/runtime/runtime2.go

index e7af4cd6ff277338369171826af78cc763b4d5b5..2c448d4a0904fc4ecfb399930efc435f229d6947 100644 (file)
@@ -17,8 +17,6 @@ var AtomicFields = []uintptr{
        unsafe.Offsetof(p{}.timer0When),
        unsafe.Offsetof(p{}.timerModifiedEarliest),
        unsafe.Offsetof(p{}.gcFractionalMarkTime),
-       unsafe.Offsetof(schedt{}.timeToRun),
-       unsafe.Offsetof(timeHistogram{}.underflow),
        unsafe.Offsetof(profBuf{}.overflow),
        unsafe.Offsetof(profBuf{}.overflowTime),
        unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount),
@@ -37,10 +35,8 @@ var AtomicFields = []uintptr{
        unsafe.Offsetof(lfnode{}.next),
        unsafe.Offsetof(mstats{}.last_gc_nanotime),
        unsafe.Offsetof(mstats{}.last_gc_unix),
-       unsafe.Offsetof(mstats{}.gcPauseDist),
        unsafe.Offsetof(ticksType{}.val),
        unsafe.Offsetof(workType{}.bytesMarked),
-       unsafe.Offsetof(timeHistogram{}.counts),
 }
 
 // AtomicVariables is the set of global variables on which we perform
index 81f60b3ada6909a14f012977e4dd98ffdeff29ed..d9f36c06c24c740a0990e60b4a9528cbd62d1bdc 100644 (file)
@@ -1244,9 +1244,9 @@ func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
        t := (*timeHistogram)(th)
        i := bucket*TimeHistNumSubBuckets + subBucket
        if i >= uint(len(t.counts)) {
-               return t.underflow, false
+               return t.underflow.Load(), false
        }
-       return t.counts[i], true
+       return t.counts[i].Load(), true
 }
 
 func (th *TimeHistogram) Record(duration int64) {
index eddfbab3bc564a675bce4211eb3f2dbd53545bd2..d2e6367c84b637641db69c84c712fa80acb1e50c 100644 (file)
@@ -66,18 +66,16 @@ const (
 // It is an HDR histogram with exponentially-distributed
 // buckets and linearly distributed sub-buckets.
 //
-// Counts in the histogram are updated atomically, so it is safe
-// for concurrent use. It is also safe to read all the values
-// atomically.
+// The histogram is safe for concurrent reads and writes.
 type timeHistogram struct {
-       counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
+       counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]atomic.Uint64
 
        // underflow counts all the times we got a negative duration
        // sample. Because of how time works on some platforms, it's
        // possible to measure negative durations. We could ignore them,
        // but we record them anyway because it's better to have some
        // signal that it's happening than just missing samples.
-       underflow uint64
+       underflow atomic.Uint64
 }
 
 // record adds the given duration to the distribution.
@@ -88,7 +86,7 @@ type timeHistogram struct {
 //go:nosplit
 func (h *timeHistogram) record(duration int64) {
        if duration < 0 {
-               atomic.Xadd64(&h.underflow, 1)
+               h.underflow.Add(1)
                return
        }
        // The index of the exponential bucket is just the index
@@ -116,7 +114,7 @@ func (h *timeHistogram) record(duration int64) {
        } else {
                subBucket = uint(duration)
        }
-       atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
+       h.counts[superBucket*timeHistNumSubBuckets+subBucket].Add(1)
 }
 
 const (
index 986121b9c2f976a68b25586fb01e29836d2adbcd..313850a3a0054fbad85be36898740382008950f2 100644 (file)
@@ -7,7 +7,6 @@ package runtime
 // Metrics implementation exported to runtime/metrics.
 
 import (
-       "runtime/internal/atomic"
        "unsafe"
 )
 
@@ -197,9 +196,9 @@ func initMetrics() {
                                // The bottom-most bucket, containing negative values, is tracked
                                // as a separately as underflow, so fill that in manually and then
                                // iterate over the rest.
-                               hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
+                               hist.counts[0] = memstats.gcPauseDist.underflow.Load()
                                for i := range memstats.gcPauseDist.counts {
-                                       hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
+                                       hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load()
                                }
                        },
                },
@@ -327,9 +326,9 @@ func initMetrics() {
                "/sched/latencies:seconds": {
                        compute: func(_ *statAggregate, out *metricValue) {
                                hist := out.float64HistOrInit(timeHistBuckets)
-                               hist.counts[0] = atomic.Load64(&sched.timeToRun.underflow)
+                               hist.counts[0] = sched.timeToRun.underflow.Load()
                                for i := range sched.timeToRun.counts {
-                                       hist.counts[i+1] = atomic.Load64(&sched.timeToRun.counts[i])
+                                       hist.counts[i+1] = sched.timeToRun.counts[i].Load()
                                }
                        },
                },
index 0029ea956c5afe58e9a094db08cff1adb2928a09..458350da02ec8d015e01bba7c8b0db2303c00da0 100644 (file)
@@ -334,10 +334,6 @@ func init() {
                println(offset)
                throw("memstats.heapStats not aligned to 8 bytes")
        }
-       if offset := unsafe.Offsetof(memstats.gcPauseDist); offset%8 != 0 {
-               println(offset)
-               throw("memstats.gcPauseDist not aligned to 8 bytes")
-       }
        // Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
        // [3]heapStatsDelta) to be 8-byte aligned.
        if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
index a2a02ebf9abc6b4d86febcac2cbdfb5b0ea47a39..c3144b4ddedee4565a3fdbf4d20268e3492cda8b 100644 (file)
@@ -703,11 +703,6 @@ func schedinit() {
        sigsave(&gp.m.sigmask)
        initSigmask = gp.m.sigmask
 
-       if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
-               println(offset)
-               throw("sched.timeToRun not aligned to 8 bytes")
-       }
-
        goargs()
        goenvs()
        parsedebugvars()
index 9216765fc62b8879087cedd2109539b9991fa5f1..e706cf735453f461b77f1673424d0bba4545f757 100644 (file)
@@ -843,8 +843,6 @@ type schedt struct {
        // timeToRun is a distribution of scheduling latencies, defined
        // as the sum of time a G spends in the _Grunnable state before
        // it transitions to _Grunning.
-       //
-       // timeToRun is protected by sched.lock.
        timeToRun timeHistogram
 }