]> Cypherpunks.ru repositories - gostls13.git/blobdiff - src/runtime/debugcall.go
runtime: refactor runtime->tracer API to appear more like a lock
[gostls13.git] / src / runtime / debugcall.go
index faddf59eed6b405b11709b69012d1148af9845c6..5dd83063ff9cb8bb88f6e34d42ab162c9f106812 100644 (file)
@@ -2,12 +2,17 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build amd64
-// +build amd64
+// Though the debug call function feature is not enabled on
+// ppc64, inserted ppc64 to avoid missing Go declaration error
+// for debugCallPanicked while building runtime.test
+//go:build amd64 || arm64 || ppc64le || ppc64
 
 package runtime
 
-import "unsafe"
+import (
+       "internal/abi"
+       "unsafe"
+)
 
 const (
        debugCallSystemStack = "executing on Go runtime stack"
@@ -17,7 +22,7 @@ const (
 )
 
 func debugCallV2()
-func debugCallPanicked(val interface{})
+func debugCallPanicked(val any)
 
 // debugCallCheck checks whether it is safe to inject a debugger
 // function call with return PC pc. If not, it returns a string
@@ -78,11 +83,11 @@ func debugCallCheck(pc uintptr) string {
                }
 
                // Check that this isn't an unsafe-point.
-               if pc != f.entry {
+               if pc != f.entry() {
                        pc--
                }
-               up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil)
-               if up != _PCDATA_UnsafePointSafe {
+               up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
+               if up != abi.UnsafePointSafe {
                        // Not at a safe point.
                        ret = debugCallUnsafePoint
                }
@@ -100,11 +105,18 @@ func debugCallCheck(pc uintptr) string {
 //
 //go:nosplit
 func debugCallWrap(dispatch uintptr) {
-       var lockedm bool
        var lockedExt uint32
        callerpc := getcallerpc()
        gp := getg()
 
+       // Lock ourselves to the OS thread.
+       //
+       // Debuggers rely on us running on the same thread until we get to
+       // dispatch the function they asked as to.
+       //
+       // We're going to transfer this to the new G we just created.
+       lockOSThread()
+
        // Create a new goroutine to execute the call on. Run this on
        // the system stack to avoid growing our stack.
        systemstack(func() {
@@ -112,34 +124,29 @@ func debugCallWrap(dispatch uintptr) {
                // closure and start the goroutine with that closure, but the compiler disallows
                // implicit closure allocation in the runtime.
                fn := debugCallWrap1
-               newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), nil, 0, gp, callerpc)
+               newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
                args := &debugCallWrapArgs{
                        dispatch: dispatch,
                        callingG: gp,
                }
                newg.param = unsafe.Pointer(args)
 
-               // If the current G is locked, then transfer that
-               // locked-ness to the new goroutine.
-               if gp.lockedm != 0 {
-                       // Save lock state to restore later.
-                       mp := gp.m
-                       if mp != gp.lockedm.ptr() {
-                               throw("inconsistent lockedm")
-                       }
-
-                       lockedm = true
-                       lockedExt = mp.lockedExt
-
-                       // Transfer external lock count to internal so
-                       // it can't be unlocked from the debug call.
-                       mp.lockedInt++
-                       mp.lockedExt = 0
-
-                       mp.lockedg.set(newg)
-                       newg.lockedm.set(mp)
-                       gp.lockedm = 0
+               // Transfer locked-ness to the new goroutine.
+               // Save lock state to restore later.
+               mp := gp.m
+               if mp != gp.lockedm.ptr() {
+                       throw("inconsistent lockedm")
                }
+               // Save the external lock count and clear it so
+               // that it can't be unlocked from the debug call.
+               // Note: we already locked internally to the thread,
+               // so if we were locked before we're still locked now.
+               lockedExt = mp.lockedExt
+               mp.lockedExt = 0
+
+               mp.lockedg.set(newg)
+               newg.lockedm.set(mp)
+               gp.lockedm = 0
 
                // Mark the calling goroutine as being at an async
                // safe-point, since it has a few conservative frames
@@ -159,11 +166,12 @@ func debugCallWrap(dispatch uintptr) {
                gp.schedlink = 0
 
                // Park the calling goroutine.
-               gp.waitreason = waitReasonDebugCall
-               if trace.enabled {
-                       traceGoPark(traceEvGoBlock, 1)
+               trace := traceAcquire()
+               casGToWaiting(gp, _Grunning, waitReasonDebugCall)
+               if trace.ok() {
+                       trace.GoPark(traceBlockDebugCall, 1)
+                       traceRelease(trace)
                }
-               casgstatus(gp, _Grunning, _Gwaiting)
                dropg()
 
                // Directly execute the new goroutine. The debug
@@ -176,13 +184,13 @@ func debugCallWrap(dispatch uintptr) {
        // We'll resume here when the call returns.
 
        // Restore locked state.
-       if lockedm {
-               mp := gp.m
-               mp.lockedExt = lockedExt
-               mp.lockedInt--
-               mp.lockedg.set(gp)
-               gp.lockedm.set(mp)
-       }
+       mp := gp.m
+       mp.lockedExt = lockedExt
+       mp.lockedg.set(gp)
+       gp.lockedm.set(mp)
+
+       // Undo the lockOSThread we did earlier.
+       unlockOSThread()
 
        gp.asyncSafePoint = false
 }
@@ -219,19 +227,23 @@ func debugCallWrap1() {
                // Switch back to the calling goroutine. At some point
                // the scheduler will schedule us again and we'll
                // finish exiting.
-               if trace.enabled {
-                       traceGoSched()
-               }
+               trace := traceAcquire()
                casgstatus(gp, _Grunning, _Grunnable)
+               if trace.ok() {
+                       trace.GoSched()
+                       traceRelease(trace)
+               }
                dropg()
                lock(&sched.lock)
                globrunqput(gp)
                unlock(&sched.lock)
 
-               if trace.enabled {
-                       traceGoUnpark(callingG, 0)
-               }
+               trace = traceAcquire()
                casgstatus(callingG, _Gwaiting, _Grunnable)
+               if trace.ok() {
+                       trace.GoUnpark(callingG, 0)
+                       traceRelease(trace)
+               }
                execute(callingG, true)
        })
 }