]> Cypherpunks.ru repositories - gostls13.git/blobdiff - src/runtime/debugcall.go
runtime: refactor runtime->tracer API to appear more like a lock
[gostls13.git] / src / runtime / debugcall.go
index 5cbe382ce7aac5e39f5856c3c11f1b151bfa3f65..5dd83063ff9cb8bb88f6e34d42ab162c9f106812 100644 (file)
@@ -2,11 +2,17 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build amd64
+// Though the debug call function feature is not enabled on
+// ppc64, inserted ppc64 to avoid missing Go declaration error
+// for debugCallPanicked while building runtime.test
+//go:build amd64 || arm64 || ppc64le || ppc64
 
 package runtime
 
-import "unsafe"
+import (
+       "internal/abi"
+       "unsafe"
+)
 
 const (
        debugCallSystemStack = "executing on Go runtime stack"
@@ -15,8 +21,8 @@ const (
        debugCallUnsafePoint = "call not at safe point"
 )
 
-func debugCallV1()
-func debugCallPanicked(val interface{})
+func debugCallV2()
+func debugCallPanicked(val any)
 
 // debugCallCheck checks whether it is safe to inject a debugger
 // function call with return PC pc. If not, it returns a string
@@ -61,7 +67,7 @@ func debugCallCheck(pc uintptr) string {
                        "debugCall16384",
                        "debugCall32768",
                        "debugCall65536":
-                       // These functions are whitelisted so that the debugger can initiate multiple function calls.
+                       // These functions are allowed so that the debugger can initiate multiple function calls.
                        // See: https://golang.org/cl/161137/
                        return
                }
@@ -76,32 +82,14 @@ func debugCallCheck(pc uintptr) string {
                        return
                }
 
-               if !go115ReduceLiveness {
-                       // Look up PC's register map.
-                       pcdata := int32(-1)
-                       if pc != f.entry {
-                               pc--
-                               pcdata = pcdatavalue(f, _PCDATA_RegMapIndex, pc, nil)
-                       }
-                       if pcdata == -1 {
-                               pcdata = 0 // in prologue
-                       }
-                       stkmap := (*stackmap)(funcdata(f, _FUNCDATA_RegPointerMaps))
-                       if pcdata == -2 || stkmap == nil {
-                               // Not at a safe point.
-                               ret = debugCallUnsafePoint
-                               return
-                       }
-               } else {
-                       // Check that this isn't an unsafe-point.
-                       if pc != f.entry {
-                               pc--
-                       }
-                       up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil)
-                       if up != _PCDATA_UnsafePointSafe {
-                               // Not at a safe point.
-                               ret = debugCallUnsafePoint
-                       }
+               // Check that this isn't an unsafe-point.
+               if pc != f.entry() {
+                       pc--
+               }
+               up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
+               if up != abi.UnsafePointSafe {
+                       // Not at a safe point.
+                       ret = debugCallUnsafePoint
                }
        })
        return ret
@@ -113,48 +101,52 @@ func debugCallCheck(pc uintptr) string {
 // function at PC dispatch.
 //
 // This must be deeply nosplit because there are untyped values on the
-// stack from debugCallV1.
+// stack from debugCallV2.
 //
 //go:nosplit
 func debugCallWrap(dispatch uintptr) {
-       var lockedm bool
        var lockedExt uint32
        callerpc := getcallerpc()
        gp := getg()
 
+       // Lock ourselves to the OS thread.
+       //
+       // Debuggers rely on us running on the same thread until we get to
+       // dispatch the function they asked as to.
+       //
+       // We're going to transfer this to the new G we just created.
+       lockOSThread()
+
        // Create a new goroutine to execute the call on. Run this on
        // the system stack to avoid growing our stack.
        systemstack(func() {
-               var args struct {
-                       dispatch uintptr
-                       callingG *g
-               }
-               args.dispatch = dispatch
-               args.callingG = gp
+               // TODO(mknyszek): It would be nice to wrap these arguments in an allocated
+               // closure and start the goroutine with that closure, but the compiler disallows
+               // implicit closure allocation in the runtime.
                fn := debugCallWrap1
-               newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), unsafe.Pointer(&args), int32(unsafe.Sizeof(args)), gp, callerpc)
+               newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
+               args := &debugCallWrapArgs{
+                       dispatch: dispatch,
+                       callingG: gp,
+               }
+               newg.param = unsafe.Pointer(args)
 
-               // If the current G is locked, then transfer that
-               // locked-ness to the new goroutine.
-               if gp.lockedm != 0 {
-                       // Save lock state to restore later.
-                       mp := gp.m
-                       if mp != gp.lockedm.ptr() {
-                               throw("inconsistent lockedm")
-                       }
-
-                       lockedm = true
-                       lockedExt = mp.lockedExt
-
-                       // Transfer external lock count to internal so
-                       // it can't be unlocked from the debug call.
-                       mp.lockedInt++
-                       mp.lockedExt = 0
-
-                       mp.lockedg.set(newg)
-                       newg.lockedm.set(mp)
-                       gp.lockedm = 0
+               // Transfer locked-ness to the new goroutine.
+               // Save lock state to restore later.
+               mp := gp.m
+               if mp != gp.lockedm.ptr() {
+                       throw("inconsistent lockedm")
                }
+               // Save the external lock count and clear it so
+               // that it can't be unlocked from the debug call.
+               // Note: we already locked internally to the thread,
+               // so if we were locked before we're still locked now.
+               lockedExt = mp.lockedExt
+               mp.lockedExt = 0
+
+               mp.lockedg.set(newg)
+               newg.lockedm.set(mp)
+               gp.lockedm = 0
 
                // Mark the calling goroutine as being at an async
                // safe-point, since it has a few conservative frames
@@ -174,11 +166,12 @@ func debugCallWrap(dispatch uintptr) {
                gp.schedlink = 0
 
                // Park the calling goroutine.
-               gp.waitreason = waitReasonDebugCall
-               if trace.enabled {
-                       traceGoPark(traceEvGoBlock, 1)
+               trace := traceAcquire()
+               casGToWaiting(gp, _Grunning, waitReasonDebugCall)
+               if trace.ok() {
+                       trace.GoPark(traceBlockDebugCall, 1)
+                       traceRelease(trace)
                }
-               casgstatus(gp, _Grunning, _Gwaiting)
                dropg()
 
                // Directly execute the new goroutine. The debug
@@ -191,20 +184,30 @@ func debugCallWrap(dispatch uintptr) {
        // We'll resume here when the call returns.
 
        // Restore locked state.
-       if lockedm {
-               mp := gp.m
-               mp.lockedExt = lockedExt
-               mp.lockedInt--
-               mp.lockedg.set(gp)
-               gp.lockedm.set(mp)
-       }
+       mp := gp.m
+       mp.lockedExt = lockedExt
+       mp.lockedg.set(gp)
+       gp.lockedm.set(mp)
+
+       // Undo the lockOSThread we did earlier.
+       unlockOSThread()
 
        gp.asyncSafePoint = false
 }
 
+type debugCallWrapArgs struct {
+       dispatch uintptr
+       callingG *g
+}
+
 // debugCallWrap1 is the continuation of debugCallWrap on the callee
 // goroutine.
-func debugCallWrap1(dispatch uintptr, callingG *g) {
+func debugCallWrap1() {
+       gp := getg()
+       args := (*debugCallWrapArgs)(gp.param)
+       dispatch, callingG := args.dispatch, args.callingG
+       gp.param = nil
+
        // Dispatch call and trap panics.
        debugCallWrap2(dispatch)
 
@@ -224,19 +227,23 @@ func debugCallWrap1(dispatch uintptr, callingG *g) {
                // Switch back to the calling goroutine. At some point
                // the scheduler will schedule us again and we'll
                // finish exiting.
-               if trace.enabled {
-                       traceGoSched()
-               }
+               trace := traceAcquire()
                casgstatus(gp, _Grunning, _Grunnable)
+               if trace.ok() {
+                       trace.GoSched()
+                       traceRelease(trace)
+               }
                dropg()
                lock(&sched.lock)
                globrunqput(gp)
                unlock(&sched.lock)
 
-               if trace.enabled {
-                       traceGoUnpark(callingG, 0)
-               }
+               trace = traceAcquire()
                casgstatus(callingG, _Gwaiting, _Grunnable)
+               if trace.ok() {
+                       trace.GoUnpark(callingG, 0)
+                       traceRelease(trace)
+               }
                execute(callingG, true)
        })
 }