// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build amd64
-// +build amd64
+// Though the debug call function feature is not enabled on
+// ppc64, inserted ppc64 to avoid missing Go declaration error
+// for debugCallPanicked while building runtime.test
+//go:build amd64 || arm64 || ppc64le || ppc64
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
const (
debugCallSystemStack = "executing on Go runtime stack"
)
func debugCallV2()
-func debugCallPanicked(val interface{})
+func debugCallPanicked(val any)
// debugCallCheck checks whether it is safe to inject a debugger
// function call with return PC pc. If not, it returns a string
}
// Check that this isn't an unsafe-point.
- if pc != f.entry {
+ if pc != f.entry() {
pc--
}
- up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil)
- if up != _PCDATA_UnsafePointSafe {
+ up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
+ if up != abi.UnsafePointSafe {
// Not at a safe point.
ret = debugCallUnsafePoint
}
//
//go:nosplit
func debugCallWrap(dispatch uintptr) {
- var lockedm bool
var lockedExt uint32
callerpc := getcallerpc()
gp := getg()
+ // Lock ourselves to the OS thread.
+ //
+ // Debuggers rely on us running on the same thread until we get to
+ // dispatch the function they asked as to.
+ //
+ // We're going to transfer this to the new G we just created.
+ lockOSThread()
+
// Create a new goroutine to execute the call on. Run this on
// the system stack to avoid growing our stack.
systemstack(func() {
}
newg.param = unsafe.Pointer(args)
- // If the current G is locked, then transfer that
- // locked-ness to the new goroutine.
- if gp.lockedm != 0 {
- // Save lock state to restore later.
- mp := gp.m
- if mp != gp.lockedm.ptr() {
- throw("inconsistent lockedm")
- }
-
- lockedm = true
- lockedExt = mp.lockedExt
-
- // Transfer external lock count to internal so
- // it can't be unlocked from the debug call.
- mp.lockedInt++
- mp.lockedExt = 0
-
- mp.lockedg.set(newg)
- newg.lockedm.set(mp)
- gp.lockedm = 0
+ // Transfer locked-ness to the new goroutine.
+ // Save lock state to restore later.
+ mp := gp.m
+ if mp != gp.lockedm.ptr() {
+ throw("inconsistent lockedm")
}
+ // Save the external lock count and clear it so
+ // that it can't be unlocked from the debug call.
+ // Note: we already locked internally to the thread,
+ // so if we were locked before we're still locked now.
+ lockedExt = mp.lockedExt
+ mp.lockedExt = 0
+
+ mp.lockedg.set(newg)
+ newg.lockedm.set(mp)
+ gp.lockedm = 0
// Mark the calling goroutine as being at an async
// safe-point, since it has a few conservative frames
gp.schedlink = 0
// Park the calling goroutine.
- gp.waitreason = waitReasonDebugCall
- if trace.enabled {
- traceGoPark(traceEvGoBlock, 1)
+ trace := traceAcquire()
+ casGToWaiting(gp, _Grunning, waitReasonDebugCall)
+ if trace.ok() {
+ trace.GoPark(traceBlockDebugCall, 1)
+ traceRelease(trace)
}
- casgstatus(gp, _Grunning, _Gwaiting)
dropg()
// Directly execute the new goroutine. The debug
// We'll resume here when the call returns.
// Restore locked state.
- if lockedm {
- mp := gp.m
- mp.lockedExt = lockedExt
- mp.lockedInt--
- mp.lockedg.set(gp)
- gp.lockedm.set(mp)
- }
+ mp := gp.m
+ mp.lockedExt = lockedExt
+ mp.lockedg.set(gp)
+ gp.lockedm.set(mp)
+
+ // Undo the lockOSThread we did earlier.
+ unlockOSThread()
gp.asyncSafePoint = false
}
// Switch back to the calling goroutine. At some point
// the scheduler will schedule us again and we'll
// finish exiting.
- if trace.enabled {
- traceGoSched()
- }
+ trace := traceAcquire()
casgstatus(gp, _Grunning, _Grunnable)
+ if trace.ok() {
+ trace.GoSched()
+ traceRelease(trace)
+ }
dropg()
lock(&sched.lock)
globrunqput(gp)
unlock(&sched.lock)
- if trace.enabled {
- traceGoUnpark(callingG, 0)
- }
+ trace = traceAcquire()
casgstatus(callingG, _Gwaiting, _Grunnable)
+ if trace.ok() {
+ trace.GoUnpark(callingG, 0)
+ traceRelease(trace)
+ }
execute(callingG, true)
})
}