1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
12 debugCallSystemStack = "executing on Go runtime stack"
13 debugCallUnknownFunc = "call from unknown function"
14 debugCallRuntime = "call from within the Go runtime"
15 debugCallUnsafePoint = "call not at safe point"
19 func debugCallPanicked(val interface{})
21 // debugCallCheck checks whether it is safe to inject a debugger
22 // function call with return PC pc. If not, it returns a string
26 func debugCallCheck(pc uintptr) string {
27 // No user calls from the system stack.
28 if getg() != getg().m.curg {
29 return debugCallSystemStack
31 if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
32 // Fast syscalls (nanotime) and racecall switch to the
33 // g0 stack without switching g. We can't safely make
34 // a call in this state. (We can't even safely
36 return debugCallSystemStack
39 // Switch to the system stack to avoid overflowing the user
45 ret = debugCallUnknownFunc
64 // These functions are whitelisted so that the debugger can initiate multiple function calls.
65 // See: https://golang.org/cl/161137/
69 // Disallow calls from the runtime. We could
70 // potentially make this condition tighter (e.g., not
71 // when locks are held), but there are enough tightly
72 // coded sequences (e.g., defer handling) that it's
73 // better to play it safe.
74 if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
75 ret = debugCallRuntime
79 if !go115ReduceLiveness {
80 // Look up PC's register map.
84 pcdata = pcdatavalue(f, _PCDATA_RegMapIndex, pc, nil)
87 pcdata = 0 // in prologue
89 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_RegPointerMaps))
90 if pcdata == -2 || stkmap == nil {
91 // Not at a safe point.
92 ret = debugCallUnsafePoint
96 // Check that this isn't an unsafe-point.
100 up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil)
101 if up != _PCDATA_UnsafePointSafe {
102 // Not at a safe point.
103 ret = debugCallUnsafePoint
110 // debugCallWrap starts a new goroutine to run a debug call and blocks
111 // the calling goroutine. On the goroutine, it prepares to recover
112 // panics from the debug call, and then calls the call dispatching
113 // function at PC dispatch.
115 // This must be deeply nosplit because there are untyped values on the
116 // stack from debugCallV1.
119 func debugCallWrap(dispatch uintptr) {
122 callerpc := getcallerpc()
125 // Create a new goroutine to execute the call on. Run this on
126 // the system stack to avoid growing our stack.
132 args.dispatch = dispatch
135 newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), unsafe.Pointer(&args), int32(unsafe.Sizeof(args)), gp, callerpc)
137 // If the current G is locked, then transfer that
138 // locked-ness to the new goroutine.
140 // Save lock state to restore later.
142 if mp != gp.lockedm.ptr() {
143 throw("inconsistent lockedm")
147 lockedExt = mp.lockedExt
149 // Transfer external lock count to internal so
150 // it can't be unlocked from the debug call.
159 // Mark the calling goroutine as being at an async
160 // safe-point, since it has a few conservative frames
161 // at the bottom of the stack. This also prevents
163 gp.asyncSafePoint = true
165 // Stash newg away so we can execute it below (mcall's
166 // closure can't capture anything).
167 gp.schedlink.set(newg)
170 // Switch to the new goroutine.
173 newg := gp.schedlink.ptr()
176 // Park the calling goroutine.
177 gp.waitreason = waitReasonDebugCall
179 traceGoPark(traceEvGoBlock, 1)
181 casgstatus(gp, _Grunning, _Gwaiting)
184 // Directly execute the new goroutine. The debug
185 // protocol will continue on the new goroutine, so
186 // it's important we not just let the scheduler do
187 // this or it may resume a different goroutine.
191 // We'll resume here when the call returns.
193 // Restore locked state.
196 mp.lockedExt = lockedExt
202 gp.asyncSafePoint = false
205 // debugCallWrap1 is the continuation of debugCallWrap on the callee
207 func debugCallWrap1(dispatch uintptr, callingG *g) {
208 // Dispatch call and trap panics.
209 debugCallWrap2(dispatch)
211 // Resume the caller goroutine.
212 getg().schedlink.set(callingG)
214 callingG := gp.schedlink.ptr()
217 // Unlock this goroutine from the M if necessary. The
218 // calling G will relock.
224 // Switch back to the calling goroutine. At some point
225 // the scheduler will schedule us again and we'll
230 casgstatus(gp, _Grunning, _Grunnable)
237 traceGoUnpark(callingG, 0)
239 casgstatus(callingG, _Gwaiting, _Grunnable)
240 execute(callingG, true)
244 func debugCallWrap2(dispatch uintptr) {
245 // Call the dispatch function and trap panics.
247 dispatchFV := funcval{dispatch}
248 *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
254 debugCallPanicked(err)