]> Cypherpunks.ru repositories - gostls13.git/blob - src/runtime/debugcall.go
runtime: drop stack-allocated pcvalueCaches
[gostls13.git] / src / runtime / debugcall.go
1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build amd64 || arm64
6
7 package runtime
8
9 import (
10         "internal/abi"
11         "unsafe"
12 )
13
14 const (
15         debugCallSystemStack = "executing on Go runtime stack"
16         debugCallUnknownFunc = "call from unknown function"
17         debugCallRuntime     = "call from within the Go runtime"
18         debugCallUnsafePoint = "call not at safe point"
19 )
20
21 func debugCallV2()
22 func debugCallPanicked(val any)
23
24 // debugCallCheck checks whether it is safe to inject a debugger
25 // function call with return PC pc. If not, it returns a string
26 // explaining why.
27 //
28 //go:nosplit
29 func debugCallCheck(pc uintptr) string {
30         // No user calls from the system stack.
31         if getg() != getg().m.curg {
32                 return debugCallSystemStack
33         }
34         if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
35                 // Fast syscalls (nanotime) and racecall switch to the
36                 // g0 stack without switching g. We can't safely make
37                 // a call in this state. (We can't even safely
38                 // systemstack.)
39                 return debugCallSystemStack
40         }
41
42         // Switch to the system stack to avoid overflowing the user
43         // stack.
44         var ret string
45         systemstack(func() {
46                 f := findfunc(pc)
47                 if !f.valid() {
48                         ret = debugCallUnknownFunc
49                         return
50                 }
51
52                 name := funcname(f)
53
54                 switch name {
55                 case "debugCall32",
56                         "debugCall64",
57                         "debugCall128",
58                         "debugCall256",
59                         "debugCall512",
60                         "debugCall1024",
61                         "debugCall2048",
62                         "debugCall4096",
63                         "debugCall8192",
64                         "debugCall16384",
65                         "debugCall32768",
66                         "debugCall65536":
67                         // These functions are allowed so that the debugger can initiate multiple function calls.
68                         // See: https://golang.org/cl/161137/
69                         return
70                 }
71
72                 // Disallow calls from the runtime. We could
73                 // potentially make this condition tighter (e.g., not
74                 // when locks are held), but there are enough tightly
75                 // coded sequences (e.g., defer handling) that it's
76                 // better to play it safe.
77                 if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
78                         ret = debugCallRuntime
79                         return
80                 }
81
82                 // Check that this isn't an unsafe-point.
83                 if pc != f.entry() {
84                         pc--
85                 }
86                 up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
87                 if up != abi.UnsafePointSafe {
88                         // Not at a safe point.
89                         ret = debugCallUnsafePoint
90                 }
91         })
92         return ret
93 }
94
95 // debugCallWrap starts a new goroutine to run a debug call and blocks
96 // the calling goroutine. On the goroutine, it prepares to recover
97 // panics from the debug call, and then calls the call dispatching
98 // function at PC dispatch.
99 //
100 // This must be deeply nosplit because there are untyped values on the
101 // stack from debugCallV2.
102 //
103 //go:nosplit
104 func debugCallWrap(dispatch uintptr) {
105         var lockedm bool
106         var lockedExt uint32
107         callerpc := getcallerpc()
108         gp := getg()
109
110         // Create a new goroutine to execute the call on. Run this on
111         // the system stack to avoid growing our stack.
112         systemstack(func() {
113                 // TODO(mknyszek): It would be nice to wrap these arguments in an allocated
114                 // closure and start the goroutine with that closure, but the compiler disallows
115                 // implicit closure allocation in the runtime.
116                 fn := debugCallWrap1
117                 newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
118                 args := &debugCallWrapArgs{
119                         dispatch: dispatch,
120                         callingG: gp,
121                 }
122                 newg.param = unsafe.Pointer(args)
123
124                 // If the current G is locked, then transfer that
125                 // locked-ness to the new goroutine.
126                 if gp.lockedm != 0 {
127                         // Save lock state to restore later.
128                         mp := gp.m
129                         if mp != gp.lockedm.ptr() {
130                                 throw("inconsistent lockedm")
131                         }
132
133                         lockedm = true
134                         lockedExt = mp.lockedExt
135
136                         // Transfer external lock count to internal so
137                         // it can't be unlocked from the debug call.
138                         mp.lockedInt++
139                         mp.lockedExt = 0
140
141                         mp.lockedg.set(newg)
142                         newg.lockedm.set(mp)
143                         gp.lockedm = 0
144                 }
145
146                 // Mark the calling goroutine as being at an async
147                 // safe-point, since it has a few conservative frames
148                 // at the bottom of the stack. This also prevents
149                 // stack shrinks.
150                 gp.asyncSafePoint = true
151
152                 // Stash newg away so we can execute it below (mcall's
153                 // closure can't capture anything).
154                 gp.schedlink.set(newg)
155         })
156
157         // Switch to the new goroutine.
158         mcall(func(gp *g) {
159                 // Get newg.
160                 newg := gp.schedlink.ptr()
161                 gp.schedlink = 0
162
163                 // Park the calling goroutine.
164                 if traceEnabled() {
165                         traceGoPark(traceBlockDebugCall, 1)
166                 }
167                 casGToWaiting(gp, _Grunning, waitReasonDebugCall)
168                 dropg()
169
170                 // Directly execute the new goroutine. The debug
171                 // protocol will continue on the new goroutine, so
172                 // it's important we not just let the scheduler do
173                 // this or it may resume a different goroutine.
174                 execute(newg, true)
175         })
176
177         // We'll resume here when the call returns.
178
179         // Restore locked state.
180         if lockedm {
181                 mp := gp.m
182                 mp.lockedExt = lockedExt
183                 mp.lockedInt--
184                 mp.lockedg.set(gp)
185                 gp.lockedm.set(mp)
186         }
187
188         gp.asyncSafePoint = false
189 }
190
191 type debugCallWrapArgs struct {
192         dispatch uintptr
193         callingG *g
194 }
195
196 // debugCallWrap1 is the continuation of debugCallWrap on the callee
197 // goroutine.
198 func debugCallWrap1() {
199         gp := getg()
200         args := (*debugCallWrapArgs)(gp.param)
201         dispatch, callingG := args.dispatch, args.callingG
202         gp.param = nil
203
204         // Dispatch call and trap panics.
205         debugCallWrap2(dispatch)
206
207         // Resume the caller goroutine.
208         getg().schedlink.set(callingG)
209         mcall(func(gp *g) {
210                 callingG := gp.schedlink.ptr()
211                 gp.schedlink = 0
212
213                 // Unlock this goroutine from the M if necessary. The
214                 // calling G will relock.
215                 if gp.lockedm != 0 {
216                         gp.lockedm = 0
217                         gp.m.lockedg = 0
218                 }
219
220                 // Switch back to the calling goroutine. At some point
221                 // the scheduler will schedule us again and we'll
222                 // finish exiting.
223                 if traceEnabled() {
224                         traceGoSched()
225                 }
226                 casgstatus(gp, _Grunning, _Grunnable)
227                 dropg()
228                 lock(&sched.lock)
229                 globrunqput(gp)
230                 unlock(&sched.lock)
231
232                 if traceEnabled() {
233                         traceGoUnpark(callingG, 0)
234                 }
235                 casgstatus(callingG, _Gwaiting, _Grunnable)
236                 execute(callingG, true)
237         })
238 }
239
240 func debugCallWrap2(dispatch uintptr) {
241         // Call the dispatch function and trap panics.
242         var dispatchF func()
243         dispatchFV := funcval{dispatch}
244         *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
245
246         var ok bool
247         defer func() {
248                 if !ok {
249                         err := recover()
250                         debugCallPanicked(err)
251                 }
252         }()
253         dispatchF()
254         ok = true
255 }