]> Cypherpunks.ru repositories - gostls13.git/commitdiff
runtime: replace raw traceEv with traceBlockReason in gopark
authorMichael Anthony Knyszek <mknyszek@google.com>
Wed, 17 May 2023 14:22:55 +0000 (14:22 +0000)
committerMichael Knyszek <mknyszek@google.com>
Fri, 19 May 2023 20:47:25 +0000 (20:47 +0000)
This change adds traceBlockReason which leaks fewer implementation
details of the tracer to the runtime. Currently, gopark is called with
an explicit trace event, but this leaks details about trace internals
throughout the runtime.

This change will make it easier to change out the trace implementation.

Change-Id: Id633e1704d2c8838c6abd1214d9695537c4ac7db
Reviewed-on: https://go-review.googlesource.com/c/go/+/494185
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>

15 files changed:
src/runtime/chan.go
src/runtime/debugcall.go
src/runtime/lock_js.go
src/runtime/mfinal.go
src/runtime/mgc.go
src/runtime/mgcmark.go
src/runtime/mgcscavenge.go
src/runtime/mgcsweep.go
src/runtime/netpoll.go
src/runtime/proc.go
src/runtime/runtime2.go
src/runtime/select.go
src/runtime/sema.go
src/runtime/time.go
src/runtime/trace.go

index aff4cf87b723d18df8cc2af81fae2897ff785f1a..ff9e2a9155af22cfccfdbbebb0bdd56df91d551c 100644 (file)
@@ -162,7 +162,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
                if !block {
                        return false
                }
-               gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2)
+               gopark(nil, nil, waitReasonChanSendNilChan, traceBlockForever, 2)
                throw("unreachable")
        }
 
@@ -256,7 +256,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
        // changes and when we set gp.activeStackChans is not safe for
        // stack shrinking.
        gp.parkingOnChan.Store(true)
-       gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
+       gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceBlockChanSend, 2)
        // Ensure the value being sent is kept alive until the
        // receiver copies it out. The sudog has a pointer to the
        // stack object, but sudogs aren't considered as roots of the
@@ -466,7 +466,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
                if !block {
                        return
                }
-               gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)
+               gopark(nil, nil, waitReasonChanReceiveNilChan, traceBlockForever, 2)
                throw("unreachable")
        }
 
@@ -580,7 +580,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
        // changes and when we set gp.activeStackChans is not safe for
        // stack shrinking.
        gp.parkingOnChan.Store(true)
-       gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
+       gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceBlockChanRecv, 2)
 
        // someone woke us up
        if mysg != gp.waiting {
index b78663715f8753a6bdfe8c3ba475400712cd4a2e..ea413bd0c5644a980d34e70bbf34821648e8b547 100644 (file)
@@ -162,7 +162,7 @@ func debugCallWrap(dispatch uintptr) {
 
                // Park the calling goroutine.
                if traceEnabled() {
-                       traceGoPark(traceEvGoBlock, 1)
+                       traceGoPark(traceBlockDebugCall, 1)
                }
                casGToWaiting(gp, _Grunning, waitReasonDebugCall)
                dropg()
index f87a94a8490fca8c6d00d39c8ce9e7ffcf612f17..ae2bb3db4750674246ffcbbe9870fe842ae712ee 100644 (file)
@@ -114,7 +114,7 @@ func notetsleepg(n *note, ns int64) bool {
                notesWithTimeout[n] = noteWithTimeout{gp: gp, deadline: deadline}
                releasem(mp)
 
-               gopark(nil, nil, waitReasonSleep, traceEvNone, 1)
+               gopark(nil, nil, waitReasonSleep, traceBlockSleep, 1)
 
                clearTimeoutEvent(id) // note might have woken early, clear timeout
                clearIdleID()
@@ -132,7 +132,7 @@ func notetsleepg(n *note, ns int64) bool {
                notes[n] = gp
                releasem(mp)
 
-               gopark(nil, nil, waitReasonZero, traceEvNone, 1)
+               gopark(nil, nil, waitReasonZero, traceBlockGeneric, 1)
 
                mp = acquirem()
                delete(notes, n)
@@ -256,7 +256,7 @@ func handleEvent() {
 
        // wait until all goroutines are idle
        e.returned = true
-       gopark(nil, nil, waitReasonZero, traceEvNone, 1)
+       gopark(nil, nil, waitReasonZero, traceBlockGeneric, 1)
 
        events[len(events)-1] = nil
        events = events[:len(events)-1]
index cfdbd79af66d811948153633a3e3e1817c8dd374..650db181056fb8e5fe07fd166b134068192be4ac 100644 (file)
@@ -190,7 +190,7 @@ func runfinq() {
                fb := finq
                finq = nil
                if fb == nil {
-                       gopark(finalizercommit, unsafe.Pointer(&finlock), waitReasonFinalizerWait, traceEvGoBlock, 1)
+                       gopark(finalizercommit, unsafe.Pointer(&finlock), waitReasonFinalizerWait, traceBlockSystemGoroutine, 1)
                        continue
                }
                argRegs = intArgRegs
index 599f688e6f8080a0e9da7cf4a1b1c637b7487586..c44b1164d389abdd1fff0f20633b5ba6b5f47a36 100644 (file)
@@ -500,7 +500,7 @@ func gcWaitOnMark(n uint32) {
                // Wait until sweep termination, mark, and mark
                // termination of cycle N complete.
                work.sweepWaiters.list.push(getg())
-               goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceEvGoBlock, 1)
+               goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceBlockUntilGCEnds, 1)
        }
 }
 
@@ -1315,7 +1315,7 @@ func gcBgMarkWorker() {
                        // Note that at this point, the G may immediately be
                        // rescheduled and may be running.
                        return true
-               }, unsafe.Pointer(node), waitReasonGCWorkerIdle, traceEvGoBlock, 0)
+               }, unsafe.Pointer(node), waitReasonGCWorkerIdle, traceBlockSystemGoroutine, 0)
 
                // Preemption must not occur here, or another G might see
                // p.gcMarkWorkerMode.
index 1a45847208f228e5a000ff5739a103014477ac46..2ed411ae6140a7931f5c7dda7ceaa67873144cf6 100644 (file)
@@ -649,7 +649,7 @@ func gcParkAssist() bool {
                return false
        }
        // Park.
-       goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
+       goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
        return true
 }
 
index 3f95bb04655f2bd7093fbcf587ad464b6697ae8c..10e93a13d38635cf90652d0a67e7526879225477 100644 (file)
@@ -422,7 +422,7 @@ func (s *scavengerState) park() {
                throw("tried to park scavenger from another goroutine")
        }
        s.parked = true
-       goparkunlock(&s.lock, waitReasonGCScavengeWait, traceEvGoBlock, 2)
+       goparkunlock(&s.lock, waitReasonGCScavengeWait, traceBlockSystemGoroutine, 2)
 }
 
 // ready signals to sysmon that the scavenger should be awoken.
@@ -501,7 +501,7 @@ func (s *scavengerState) sleep(worked float64) {
 
                // Mark ourselves as asleep and go to sleep.
                s.parked = true
-               goparkunlock(&s.lock, waitReasonSleep, traceEvGoSleep, 2)
+               goparkunlock(&s.lock, waitReasonSleep, traceBlockSleep, 2)
 
                // How long we actually slept for.
                slept = nanotime() - start
index e0e5bf0aefa276ecf2da54745bb3a6adbe345497..728a5bad7ed9d3b7dfa61a4147feac56ec054d52 100644 (file)
@@ -277,7 +277,7 @@ func bgsweep(c chan int) {
        lock(&sweep.lock)
        sweep.parked = true
        c <- 1
-       goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
+       goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
 
        for {
                // bgsweep attempts to be a "low priority" goroutine by intentionally
@@ -318,7 +318,7 @@ func bgsweep(c chan int) {
                        continue
                }
                sweep.parked = true
-               goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
+               goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
        }
 }
 
index a2b0be22611404f38f450f67000f10ab36885a9b..3e6a6961e37097816ed11c39ad78f20e36c17c3f 100644 (file)
@@ -561,7 +561,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
        // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
        // do the opposite: store to closing/rd/wd, publishInfo, load of rg/wg
        if waitio || netpollcheckerr(pd, mode) == pollNoError {
-               gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceEvGoBlockNet, 5)
+               gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceBlockNet, 5)
        }
        // be careful to not lose concurrent pdReady notification
        old := gpp.Swap(pdNil)
index 5ac32fb259731392fbbf0acfbc7deaf876f2953d..276d7355e9cc3f754c79dc12e5e175278ae9d58a 100644 (file)
@@ -284,7 +284,7 @@ func main() {
                }
        }
        if panicking.Load() != 0 {
-               gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
+               gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
        }
        runExitHooks(0)
 
@@ -319,7 +319,7 @@ func forcegchelper() {
                        throw("forcegc: phase error")
                }
                forcegc.idle.Store(true)
-               goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
+               goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
                // this goroutine is explicitly resumed by sysmon
                if debug.gctrace > 0 {
                        println("GC forced")
@@ -378,7 +378,7 @@ func goschedIfBusy() {
 // Reason explains why the goroutine has been parked. It is displayed in stack
 // traces and heap dumps. Reasons should be unique and descriptive. Do not
 // re-use reasons, add new ones.
-func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
+func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
        if reason != waitReasonSleep {
                checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
        }
@@ -391,8 +391,8 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason w
        mp.waitlock = lock
        mp.waitunlockf = unlockf
        gp.waitreason = reason
-       mp.waittraceev = traceEv
-       mp.waittraceskip = traceskip
+       mp.waitTraceBlockReason = traceReason
+       mp.waitTraceSkip = traceskip
        releasem(mp)
        // can't do anything that might move the G between Ms here.
        mcall(park_m)
@@ -400,8 +400,8 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason w
 
 // Puts the current goroutine into a waiting state and unlocks the lock.
 // The goroutine can be made runnable again by calling goready(gp).
-func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
-       gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
+func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
+       gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
 }
 
 func goready(gp *g, traceskip int) {
@@ -3678,7 +3678,7 @@ func park_m(gp *g) {
        mp := getg().m
 
        if traceEnabled() {
-               traceGoPark(mp.waittraceev, mp.waittraceskip)
+               traceGoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
        }
 
        // N.B. Not using casGToWaiting here because the waitreason is
@@ -3749,7 +3749,7 @@ func gopreempt_m(gp *g) {
 //go:systemstack
 func preemptPark(gp *g) {
        if traceEnabled() {
-               traceGoPark(traceEvGoBlock, 0)
+               traceGoPark(traceBlockPreempted, 0)
        }
        status := readgstatus(gp)
        if status&^_Gscan != _Grunning {
index a2075dddef080fabcc405fbad28ca8e1b851d372..59271a600125dfbf61f0a29b0e2d19115576cd38 100644 (file)
@@ -581,10 +581,10 @@ type m struct {
 
        // wait* are used to carry arguments from gopark into park_m, because
        // there's no stack to put them on. That is their sole purpose.
-       waitunlockf   func(*g, unsafe.Pointer) bool
-       waitlock      unsafe.Pointer
-       waittraceev   byte
-       waittraceskip int
+       waitunlockf          func(*g, unsafe.Pointer) bool
+       waitlock             unsafe.Pointer
+       waitTraceBlockReason traceBlockReason
+       waitTraceSkip        int
 
        syscalltick uint32
        freelink    *m // on sched.freem
index 339db75d4a7eb5a63253a9d5daa51357984f5f5f..34c06375c23a7c82bc327622fcead86afb145b40 100644 (file)
@@ -100,7 +100,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool {
 }
 
 func block() {
-       gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever
+       gopark(nil, nil, waitReasonSelectNoCases, traceBlockForever, 1) // forever
 }
 
 // selectgo implements the select statement.
@@ -324,7 +324,7 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
        // changes and when we set gp.activeStackChans is not safe for
        // stack shrinking.
        gp.parkingOnChan.Store(true)
-       gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
+       gopark(selparkcommit, nil, waitReasonSelect, traceBlockSelect, 1)
        gp.activeStackChans = false
 
        sellock(scases, lockorder)
index bc23a85e348948e4b4c271e318c1391f52e321a6..d0a81170c3c25631287e640dad1c6f8ea2d303e0 100644 (file)
@@ -157,7 +157,7 @@ func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes i
                // Any semrelease after the cansemacquire knows we're waiting
                // (we set nwait above), so go to sleep.
                root.queue(addr, s, lifo)
-               goparkunlock(&root.lock, reason, traceEvGoBlockSync, 4+skipframes)
+               goparkunlock(&root.lock, reason, traceBlockSync, 4+skipframes)
                if s.ticket != 0 || cansemacquire(addr) {
                        break
                }
@@ -524,7 +524,7 @@ func notifyListWait(l *notifyList, t uint32) {
                l.tail.next = s
        }
        l.tail = s
-       goparkunlock(&l.lock, waitReasonSyncCondWait, traceEvGoBlockCond, 3)
+       goparkunlock(&l.lock, waitReasonSyncCondWait, traceBlockCondWait, 3)
        if t0 != 0 {
                blockevent(s.releasetime-t0, 2)
        }
index 6cd70b7aedae92be5508a3e40bddd379fa77b542..93c927f57c08846610a2c70473da149a7f768273 100644 (file)
@@ -192,7 +192,7 @@ func timeSleep(ns int64) {
        if t.nextwhen < 0 { // check for overflow.
                t.nextwhen = maxWhen
        }
-       gopark(resetForSleep, unsafe.Pointer(t), waitReasonSleep, traceEvGoSleep, 1)
+       gopark(resetForSleep, unsafe.Pointer(t), waitReasonSleep, traceBlockSleep, 1)
 }
 
 // resetForSleep is called after the goroutine is parked for timeSleep.
index 703a4476742493dc19c9bf6148d549b5f4e54b84..0bf7c272c4324837a75830d87be6112419288979 100644 (file)
@@ -78,6 +78,35 @@ const (
        // That means, the max event type value is 63.
 )
 
+// traceBlockReason is an enumeration of reasons a goroutine might block.
+// This is the interface the rest of the runtime uses to tell the
+// tracer why a goroutine blocked. The tracer then propagates this information
+// into the trace however it sees fit.
+//
+// Note that traceBlockReasons should not be compared, since reasons that are
+// distinct by name may *not* be distinct by value.
+type traceBlockReason uint8
+
+// For maximal efficiency, just map the trace block reason directly to a trace
+// event.
+const (
+       traceBlockGeneric         traceBlockReason = traceEvGoBlock
+       traceBlockForever                          = traceEvGoStop
+       traceBlockNet                              = traceEvGoBlockNet
+       traceBlockSelect                           = traceEvGoBlockSelect
+       traceBlockCondWait                         = traceEvGoBlockCond
+       traceBlockSync                             = traceEvGoBlockSync
+       traceBlockChanSend                         = traceEvGoBlockSend
+       traceBlockChanRecv                         = traceEvGoBlockRecv
+       traceBlockGCMarkAssist                     = traceEvGoBlockGC
+       traceBlockGCSweep                          = traceEvGoBlock
+       traceBlockSystemGoroutine                  = traceEvGoBlock
+       traceBlockPreempted                        = traceEvGoBlock
+       traceBlockDebugCall                        = traceEvGoBlock
+       traceBlockUntilGCEnds                      = traceEvGoBlock
+       traceBlockSleep                            = traceEvGoSleep
+)
+
 const (
        // Timestamps in trace are cputicks/traceTickDiv.
        // This makes absolute values of timestamp diffs smaller,
@@ -511,7 +540,7 @@ top:
                        }
 
                        return true
-               }, nil, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
+               }, nil, waitReasonTraceReaderBlocked, traceBlockSystemGoroutine, 2)
                goto top
        }
 
@@ -1588,8 +1617,10 @@ func traceGoPreempt() {
        traceEvent(traceEvGoPreempt, 1)
 }
 
-func traceGoPark(traceEv byte, skip int) {
-       traceEvent(traceEv, skip)
+func traceGoPark(reason traceBlockReason, skip int) {
+       // Convert the block reason directly to a trace event type.
+       // See traceBlockReason for more information.
+       traceEvent(byte(reason), skip)
 }
 
 func traceGoUnpark(gp *g, skip int) {