1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // TODO: This test could be implemented on all (most?) UNIXes if we
6 // added syscall.Tgkill more widely.
8 // We skip all of these tests under race mode because our test thread
9 // spends all of its time in the race runtime, which isn't a safe
12 //go:build (amd64 || arm64 || ppc64le) && linux && !race
29 func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) {
30 // This can deadlock if run under a debugger because it
31 // depends on catching SIGTRAP, which is usually swallowed by
35 // This can deadlock if there aren't enough threads or if a GC
36 // tries to interrupt an atomic loop (see issue #10958). Execute
37 // an extra GC to ensure even the sweep phase is done (out of
38 // caution to prevent #49370 from happening).
39 // TODO(mknyszek): This extra GC cycle is likely unnecessary
40 // because preemption (which may happen during the sweep phase)
41 // isn't much of an issue anymore thanks to asynchronous preemption.
42 // The biggest risk is having a write barrier in the debug call
43 // injection test code fire, because it runs in a signal handler
44 // and may not have a P.
46 // We use 8 Ps so there's room for the debug call worker,
47 // something that's trying to preempt the call worker, and the
48 // goroutine that's trying to stop the call worker.
49 ogomaxprocs := runtime.GOMAXPROCS(8)
50 ogcpercent := debug.SetGCPercent(-1)
53 // ready is a buffered channel so debugCallWorker won't block
54 // on sending to it. This makes it less likely we'll catch
55 // debugCallWorker while it's in the runtime.
56 ready := make(chan *runtime.G, 1)
58 done := make(chan error)
59 go debugCallWorker(ready, &stop, done)
62 atomic.StoreUint32(&stop, 1)
67 runtime.GOMAXPROCS(ogomaxprocs)
68 debug.SetGCPercent(ogcpercent)
72 func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) {
73 runtime.LockOSThread()
74 defer runtime.UnlockOSThread()
76 ready <- runtime.Getg()
79 debugCallWorker2(stop, &x)
81 done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x)
86 // Don't inline this function, since we want to test adjusting
87 // pointers in the arguments.
90 func debugCallWorker2(stop *uint32, x *int) {
91 for atomic.LoadUint32(stop) == 0 {
92 // Strongly encourage x to live in a register so we
93 // can test pointer register adjustment.
99 func debugCallTKill(tid int) error {
100 return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP)
103 // skipUnderDebugger skips the current test when running under a
104 // debugger (specifically if this process has a tracer). This is
106 func skipUnderDebugger(t *testing.T) {
107 pid := syscall.Getpid()
108 status, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
110 t.Logf("couldn't get proc tracer: %s", err)
113 re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`)
114 sub := re.FindSubmatch(status)
116 t.Logf("couldn't find proc tracer PID")
119 if string(sub[1]) == "0" {
122 t.Skip("test will deadlock under a debugger")
125 func TestDebugCall(t *testing.T) {
126 g, after := startDebugCallWorker(t)
129 type stackArgs struct {
136 // Inject a call into the debugCallWorker goroutine and test
137 // basic argument and result passing.
138 fn := func(x int, y float64) (y0Ret int, y1Ret float64) {
139 return x + 1, y + 1.0
143 intRegs := regs.Ints[:]
144 floatRegs := regs.Floats[:]
145 fval := float64(42.0)
146 if len(intRegs) > 0 {
148 floatRegs[0] = math.Float64bits(fval)
156 if _, err := runtime.InjectDebugCall(g, fn, ®s, args, debugCallTKill, false); err != nil {
161 if len(intRegs) > 0 {
162 result0 = int(intRegs[0])
163 result1 = math.Float64frombits(floatRegs[0])
169 t.Errorf("want 43, got %d", result0)
171 if result1 != fval+1 {
172 t.Errorf("want 43, got %f", result1)
176 func TestDebugCallLarge(t *testing.T) {
177 g, after := startDebugCallWorker(t)
180 // Inject a call with a large call frame.
186 fn := func(in [N]int) (out [N]int) {
193 for i := range args.in {
197 if _, err := runtime.InjectDebugCall(g, fn, nil, &args, debugCallTKill, false); err != nil {
200 if want != args.out {
201 t.Fatalf("want %v, got %v", want, args.out)
205 func TestDebugCallGC(t *testing.T) {
206 g, after := startDebugCallWorker(t)
209 // Inject a call that performs a GC.
210 if _, err := runtime.InjectDebugCall(g, runtime.GC, nil, nil, debugCallTKill, false); err != nil {
215 func TestDebugCallGrowStack(t *testing.T) {
216 g, after := startDebugCallWorker(t)
219 // Inject a call that grows the stack. debugCallWorker checks
220 // for stack pointer breakage.
221 if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, nil, debugCallTKill, false); err != nil {
227 func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) {
228 // The nosplit causes this function to not contain safe-points
230 runtime.LockOSThread()
231 defer runtime.UnlockOSThread()
233 *gpp = runtime.Getg()
235 for atomic.LoadUint32(stop) == 0 {
236 atomic.StoreUint32(ready, 1)
240 func TestDebugCallUnsafePoint(t *testing.T) {
243 // This can deadlock if there aren't enough threads or if a GC
244 // tries to interrupt an atomic loop (see issue #10958).
245 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
247 // InjectDebugCall cannot be executed while a GC is actively in
248 // progress. Wait until the current GC is done, and turn it off.
252 defer debug.SetGCPercent(debug.SetGCPercent(-1))
254 // Test that the runtime refuses call injection at unsafe points.
256 var ready, stop uint32
257 defer atomic.StoreUint32(&stop, 1)
258 go debugCallUnsafePointWorker(&g, &ready, &stop)
259 for atomic.LoadUint32(&ready) == 0 {
263 _, err := runtime.InjectDebugCall(g, func() {}, nil, nil, debugCallTKill, true)
264 if msg := "call not at safe point"; err == nil || err.Error() != msg {
265 t.Fatalf("want %q, got %s", msg, err)
269 func TestDebugCallPanic(t *testing.T) {
272 // This can deadlock if there aren't enough threads.
273 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
275 // InjectDebugCall cannot be executed while a GC is actively in
276 // progress. Wait until the current GC is done, and turn it off.
278 // See #10958 and #49370.
279 defer debug.SetGCPercent(debug.SetGCPercent(-1))
280 // TODO(mknyszek): This extra GC cycle is likely unnecessary
281 // because preemption (which may happen during the sweep phase)
282 // isn't much of an issue anymore thanks to asynchronous preemption.
283 // The biggest risk is having a write barrier in the debug call
284 // injection test code fire, because it runs in a signal handler
285 // and may not have a P.
288 ready := make(chan *runtime.G)
290 defer atomic.StoreUint32(&stop, 1)
292 runtime.LockOSThread()
293 defer runtime.UnlockOSThread()
294 ready <- runtime.Getg()
295 for atomic.LoadUint32(&stop) == 0 {
300 p, err := runtime.InjectDebugCall(g, func() { panic("test") }, nil, nil, debugCallTKill, false)
304 if ps, ok := p.(string); !ok || ps != "test" {
305 t.Fatalf("wanted panic %v, got %v", "test", p)