1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Though the debug call function feature is not enabled on
6 // ppc64, inserted ppc64 to avoid missing Go declaration error
7 // for debugCallPanicked while building runtime.test
8 //go:build amd64 || arm64 || ppc64le || ppc64
18 debugCallSystemStack = "executing on Go runtime stack"
19 debugCallUnknownFunc = "call from unknown function"
20 debugCallRuntime = "call from within the Go runtime"
21 debugCallUnsafePoint = "call not at safe point"
25 func debugCallPanicked(val any)
27 // debugCallCheck checks whether it is safe to inject a debugger
28 // function call with return PC pc. If not, it returns a string
32 func debugCallCheck(pc uintptr) string {
33 // No user calls from the system stack.
34 if getg() != getg().m.curg {
35 return debugCallSystemStack
37 if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
38 // Fast syscalls (nanotime) and racecall switch to the
39 // g0 stack without switching g. We can't safely make
40 // a call in this state. (We can't even safely
42 return debugCallSystemStack
45 // Switch to the system stack to avoid overflowing the user
51 ret = debugCallUnknownFunc
70 // These functions are allowed so that the debugger can initiate multiple function calls.
71 // See: https://golang.org/cl/161137/
75 // Disallow calls from the runtime. We could
76 // potentially make this condition tighter (e.g., not
77 // when locks are held), but there are enough tightly
78 // coded sequences (e.g., defer handling) that it's
79 // better to play it safe.
80 if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
81 ret = debugCallRuntime
85 // Check that this isn't an unsafe-point.
89 up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
90 if up != abi.UnsafePointSafe {
91 // Not at a safe point.
92 ret = debugCallUnsafePoint
98 // debugCallWrap starts a new goroutine to run a debug call and blocks
99 // the calling goroutine. On the goroutine, it prepares to recover
100 // panics from the debug call, and then calls the call dispatching
101 // function at PC dispatch.
103 // This must be deeply nosplit because there are untyped values on the
104 // stack from debugCallV2.
107 func debugCallWrap(dispatch uintptr) {
109 callerpc := getcallerpc()
112 // Lock ourselves to the OS thread.
114 // Debuggers rely on us running on the same thread until we get to
115 // dispatch the function they asked as to.
117 // We're going to transfer this to the new G we just created.
120 // Create a new goroutine to execute the call on. Run this on
121 // the system stack to avoid growing our stack.
123 // TODO(mknyszek): It would be nice to wrap these arguments in an allocated
124 // closure and start the goroutine with that closure, but the compiler disallows
125 // implicit closure allocation in the runtime.
127 newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
128 args := &debugCallWrapArgs{
132 newg.param = unsafe.Pointer(args)
134 // Transfer locked-ness to the new goroutine.
135 // Save lock state to restore later.
137 if mp != gp.lockedm.ptr() {
138 throw("inconsistent lockedm")
140 // Save the external lock count and clear it so
141 // that it can't be unlocked from the debug call.
142 // Note: we already locked internally to the thread,
143 // so if we were locked before we're still locked now.
144 lockedExt = mp.lockedExt
151 // Mark the calling goroutine as being at an async
152 // safe-point, since it has a few conservative frames
153 // at the bottom of the stack. This also prevents
155 gp.asyncSafePoint = true
157 // Stash newg away so we can execute it below (mcall's
158 // closure can't capture anything).
159 gp.schedlink.set(newg)
162 // Switch to the new goroutine.
165 newg := gp.schedlink.ptr()
168 // Park the calling goroutine.
169 trace := traceAcquire()
170 casGToWaiting(gp, _Grunning, waitReasonDebugCall)
172 trace.GoPark(traceBlockDebugCall, 1)
177 // Directly execute the new goroutine. The debug
178 // protocol will continue on the new goroutine, so
179 // it's important we not just let the scheduler do
180 // this or it may resume a different goroutine.
184 // We'll resume here when the call returns.
186 // Restore locked state.
188 mp.lockedExt = lockedExt
192 // Undo the lockOSThread we did earlier.
195 gp.asyncSafePoint = false
198 type debugCallWrapArgs struct {
203 // debugCallWrap1 is the continuation of debugCallWrap on the callee
205 func debugCallWrap1() {
207 args := (*debugCallWrapArgs)(gp.param)
208 dispatch, callingG := args.dispatch, args.callingG
211 // Dispatch call and trap panics.
212 debugCallWrap2(dispatch)
214 // Resume the caller goroutine.
215 getg().schedlink.set(callingG)
217 callingG := gp.schedlink.ptr()
220 // Unlock this goroutine from the M if necessary. The
221 // calling G will relock.
227 // Switch back to the calling goroutine. At some point
228 // the scheduler will schedule us again and we'll
230 trace := traceAcquire()
231 casgstatus(gp, _Grunning, _Grunnable)
241 trace = traceAcquire()
242 casgstatus(callingG, _Gwaiting, _Grunnable)
244 trace.GoUnpark(callingG, 0)
247 execute(callingG, true)
251 func debugCallWrap2(dispatch uintptr) {
252 // Call the dispatch function and trap panics.
254 dispatchFV := funcval{dispatch}
255 *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
261 debugCallPanicked(err)