1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build amd64 || arm64
15 debugCallSystemStack = "executing on Go runtime stack"
16 debugCallUnknownFunc = "call from unknown function"
17 debugCallRuntime = "call from within the Go runtime"
18 debugCallUnsafePoint = "call not at safe point"
22 func debugCallPanicked(val any)
24 // debugCallCheck checks whether it is safe to inject a debugger
25 // function call with return PC pc. If not, it returns a string
29 func debugCallCheck(pc uintptr) string {
30 // No user calls from the system stack.
31 if getg() != getg().m.curg {
32 return debugCallSystemStack
34 if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
35 // Fast syscalls (nanotime) and racecall switch to the
36 // g0 stack without switching g. We can't safely make
37 // a call in this state. (We can't even safely
39 return debugCallSystemStack
42 // Switch to the system stack to avoid overflowing the user
48 ret = debugCallUnknownFunc
67 // These functions are allowed so that the debugger can initiate multiple function calls.
68 // See: https://golang.org/cl/161137/
72 // Disallow calls from the runtime. We could
73 // potentially make this condition tighter (e.g., not
74 // when locks are held), but there are enough tightly
75 // coded sequences (e.g., defer handling) that it's
76 // better to play it safe.
77 if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
78 ret = debugCallRuntime
82 // Check that this isn't an unsafe-point.
86 up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
87 if up != abi.UnsafePointSafe {
88 // Not at a safe point.
89 ret = debugCallUnsafePoint
95 // debugCallWrap starts a new goroutine to run a debug call and blocks
96 // the calling goroutine. On the goroutine, it prepares to recover
97 // panics from the debug call, and then calls the call dispatching
98 // function at PC dispatch.
100 // This must be deeply nosplit because there are untyped values on the
101 // stack from debugCallV2.
104 func debugCallWrap(dispatch uintptr) {
106 callerpc := getcallerpc()
109 // Lock ourselves to the OS thread.
111 // Debuggers rely on us running on the same thread until we get to
112 // dispatch the function they asked as to.
114 // We're going to transfer this to the new G we just created.
117 // Create a new goroutine to execute the call on. Run this on
118 // the system stack to avoid growing our stack.
120 // TODO(mknyszek): It would be nice to wrap these arguments in an allocated
121 // closure and start the goroutine with that closure, but the compiler disallows
122 // implicit closure allocation in the runtime.
124 newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
125 args := &debugCallWrapArgs{
129 newg.param = unsafe.Pointer(args)
131 // Transfer locked-ness to the new goroutine.
132 // Save lock state to restore later.
134 if mp != gp.lockedm.ptr() {
135 throw("inconsistent lockedm")
137 // Save the external lock count and clear it so
138 // that it can't be unlocked from the debug call.
139 // Note: we already locked internally to the thread,
140 // so if we were locked before we're still locked now.
141 lockedExt = mp.lockedExt
148 // Mark the calling goroutine as being at an async
149 // safe-point, since it has a few conservative frames
150 // at the bottom of the stack. This also prevents
152 gp.asyncSafePoint = true
154 // Stash newg away so we can execute it below (mcall's
155 // closure can't capture anything).
156 gp.schedlink.set(newg)
159 // Switch to the new goroutine.
162 newg := gp.schedlink.ptr()
165 // Park the calling goroutine.
167 traceGoPark(traceBlockDebugCall, 1)
169 casGToWaiting(gp, _Grunning, waitReasonDebugCall)
172 // Directly execute the new goroutine. The debug
173 // protocol will continue on the new goroutine, so
174 // it's important we not just let the scheduler do
175 // this or it may resume a different goroutine.
179 // We'll resume here when the call returns.
181 // Restore locked state.
183 mp.lockedExt = lockedExt
187 // Undo the lockOSThread we did earlier.
190 gp.asyncSafePoint = false
193 type debugCallWrapArgs struct {
198 // debugCallWrap1 is the continuation of debugCallWrap on the callee
200 func debugCallWrap1() {
202 args := (*debugCallWrapArgs)(gp.param)
203 dispatch, callingG := args.dispatch, args.callingG
206 // Dispatch call and trap panics.
207 debugCallWrap2(dispatch)
209 // Resume the caller goroutine.
210 getg().schedlink.set(callingG)
212 callingG := gp.schedlink.ptr()
215 // Unlock this goroutine from the M if necessary. The
216 // calling G will relock.
222 // Switch back to the calling goroutine. At some point
223 // the scheduler will schedule us again and we'll
228 casgstatus(gp, _Grunning, _Grunnable)
235 traceGoUnpark(callingG, 0)
237 casgstatus(callingG, _Gwaiting, _Grunnable)
238 execute(callingG, true)
242 func debugCallWrap2(dispatch uintptr) {
243 // Call the dispatch function and trap panics.
245 dispatchFV := funcval{dispatch}
246 *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
252 debugCallPanicked(err)