1 // Copyright 2021 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
11 // The following thunks allow calling the gcc-compiled race runtime directly
12 // from Go code without going all the way through cgo.
13 // First, it's much faster (up to 50% speedup for real Go programs).
14 // Second, it eliminates race-related special cases from cgocall and scheduler.
15 // Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
17 // A brief recap of the s390x C calling convention.
18 // Arguments are passed in R2...R6, the rest is on stack.
19 // Callee-saved registers are: R6...R13, R15.
20 // Temporary registers are: R0...R5, R14.
22 // When calling racecalladdr, R1 is the call target address.
24 // The race ctx, ThreadState *thr below, is passed in R2 and loaded in racecalladdr.
26 // func runtime·raceread(addr uintptr)
27 // Called from instrumented code.
28 TEXT runtime·raceread(SB), NOSPLIT, $0-8
29 // void __tsan_read(ThreadState *thr, void *addr, void *pc);
30 MOVD $__tsan_read(SB), R1
33 JMP racecalladdr<>(SB)
35 // func runtime·RaceRead(addr uintptr)
36 TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
37 // This needs to be a tail call, because raceread reads caller pc.
38 JMP runtime·raceread(SB)
40 // func runtime·racereadpc(void *addr, void *callpc, void *pc)
41 TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
42 // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
43 MOVD $__tsan_read_pc(SB), R1
44 LMG addr+0(FP), R3, R5
45 JMP racecalladdr<>(SB)
47 // func runtime·racewrite(addr uintptr)
48 // Called from instrumented code.
49 TEXT runtime·racewrite(SB), NOSPLIT, $0-8
50 // void __tsan_write(ThreadState *thr, void *addr, void *pc);
51 MOVD $__tsan_write(SB), R1
54 JMP racecalladdr<>(SB)
56 // func runtime·RaceWrite(addr uintptr)
57 TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
58 // This needs to be a tail call, because racewrite reads caller pc.
59 JMP runtime·racewrite(SB)
61 // func runtime·racewritepc(void *addr, void *callpc, void *pc)
62 TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
63 // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
64 MOVD $__tsan_write_pc(SB), R1
65 LMG addr+0(FP), R3, R5
66 JMP racecalladdr<>(SB)
68 // func runtime·racereadrange(addr, size uintptr)
69 // Called from instrumented code.
70 TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
71 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
72 MOVD $__tsan_read_range(SB), R1
73 LMG addr+0(FP), R3, R4
75 JMP racecalladdr<>(SB)
77 // func runtime·RaceReadRange(addr, size uintptr)
78 TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
79 // This needs to be a tail call, because racereadrange reads caller pc.
80 JMP runtime·racereadrange(SB)
82 // func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
83 TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
84 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
85 MOVD $__tsan_read_range(SB), R1
86 LMG addr+0(FP), R3, R5
87 // pc is an interceptor address, but TSan expects it to point to the
88 // middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
90 JMP racecalladdr<>(SB)
92 // func runtime·racewriterange(addr, size uintptr)
93 // Called from instrumented code.
94 TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
95 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
96 MOVD $__tsan_write_range(SB), R1
97 LMG addr+0(FP), R3, R4
99 JMP racecalladdr<>(SB)
101 // func runtime·RaceWriteRange(addr, size uintptr)
102 TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
103 // This needs to be a tail call, because racewriterange reads caller pc.
104 JMP runtime·racewriterange(SB)
106 // func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
107 TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
108 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
109 MOVD $__tsan_write_range(SB), R1
110 LMG addr+0(FP), R3, R5
111 // pc is an interceptor address, but TSan expects it to point to the
112 // middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
114 JMP racecalladdr<>(SB)
116 // If R3 is out of range, do nothing. Otherwise, setup goroutine context and
117 // invoke racecall. Other arguments are already set.
118 TEXT racecalladdr<>(SB), NOSPLIT, $0-0
119 MOVD runtime·racearenastart(SB), R0
120 CMPUBLT R3, R0, data // Before racearena start?
121 MOVD runtime·racearenaend(SB), R0
122 CMPUBLT R3, R0, call // Before racearena end?
124 MOVD runtime·racedatastart(SB), R0
125 CMPUBLT R3, R0, ret // Before racedata start?
126 MOVD runtime·racedataend(SB), R0
127 CMPUBGE R3, R0, ret // At or after racedata end?
129 MOVD g_racectx(g), R2
134 // func runtime·racefuncenter(pc uintptr)
135 // Called from instrumented code.
136 TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
137 MOVD callpc+0(FP), R3
138 JMP racefuncenter<>(SB)
140 // Common code for racefuncenter
141 // R3 = caller's return address
142 TEXT racefuncenter<>(SB), NOSPLIT, $0-0
143 // void __tsan_func_enter(ThreadState *thr, void *pc);
144 MOVD $__tsan_func_enter(SB), R1
145 MOVD g_racectx(g), R2
149 // func runtime·racefuncexit()
150 // Called from instrumented code.
151 TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
152 // void __tsan_func_exit(ThreadState *thr);
153 MOVD $__tsan_func_exit(SB), R1
154 MOVD g_racectx(g), R2
157 // Atomic operations for sync/atomic package.
161 TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
163 MOVD $__tsan_go_atomic32_load(SB), R1
164 BL racecallatomic<>(SB)
167 TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
169 MOVD $__tsan_go_atomic64_load(SB), R1
170 BL racecallatomic<>(SB)
173 TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
175 JMP sync∕atomic·LoadInt32(SB)
177 TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
179 JMP sync∕atomic·LoadInt64(SB)
181 TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
183 JMP sync∕atomic·LoadInt64(SB)
185 TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
187 JMP sync∕atomic·LoadInt64(SB)
191 TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
193 MOVD $__tsan_go_atomic32_store(SB), R1
194 BL racecallatomic<>(SB)
197 TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
199 MOVD $__tsan_go_atomic64_store(SB), R1
200 BL racecallatomic<>(SB)
203 TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
205 JMP sync∕atomic·StoreInt32(SB)
207 TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
209 JMP sync∕atomic·StoreInt64(SB)
211 TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
213 JMP sync∕atomic·StoreInt64(SB)
217 TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
219 MOVD $__tsan_go_atomic32_exchange(SB), R1
220 BL racecallatomic<>(SB)
223 TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
225 MOVD $__tsan_go_atomic64_exchange(SB), R1
226 BL racecallatomic<>(SB)
229 TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
231 JMP sync∕atomic·SwapInt32(SB)
233 TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
235 JMP sync∕atomic·SwapInt64(SB)
237 TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
239 JMP sync∕atomic·SwapInt64(SB)
243 TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
245 MOVD $__tsan_go_atomic32_fetch_add(SB), R1
246 BL racecallatomic<>(SB)
247 // TSan performed fetch_add, but Go needs add_fetch.
254 TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
256 MOVD $__tsan_go_atomic64_fetch_add(SB), R1
257 BL racecallatomic<>(SB)
258 // TSan performed fetch_add, but Go needs add_fetch.
265 TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
267 JMP sync∕atomic·AddInt32(SB)
269 TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
271 JMP sync∕atomic·AddInt64(SB)
273 TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
275 JMP sync∕atomic·AddInt64(SB)
279 TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
281 MOVD $__tsan_go_atomic32_compare_exchange(SB), R1
282 BL racecallatomic<>(SB)
285 TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
287 MOVD $__tsan_go_atomic64_compare_exchange(SB), R1
288 BL racecallatomic<>(SB)
291 TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
293 JMP sync∕atomic·CompareAndSwapInt32(SB)
295 TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
297 JMP sync∕atomic·CompareAndSwapInt64(SB)
299 TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
301 JMP sync∕atomic·CompareAndSwapInt64(SB)
303 // Common code for atomic operations. Calls R1.
304 TEXT racecallatomic<>(SB), NOSPLIT, $0
305 MOVD 24(R15), R5 // Address (arg1, after 2xBL).
306 // If we pass an invalid pointer to the TSan runtime, it will cause a
307 // "fatal error: unknown caller pc". So trigger a SEGV here instead.
309 MOVD runtime·racearenastart(SB), R0
310 CMPUBLT R5, R0, racecallatomic_data // Before racearena start?
311 MOVD runtime·racearenaend(SB), R0
312 CMPUBLT R5, R0, racecallatomic_ok // Before racearena end?
314 MOVD runtime·racedatastart(SB), R0
315 CMPUBLT R5, R0, racecallatomic_ignore // Before racedata start?
316 MOVD runtime·racedataend(SB), R0
317 CMPUBGE R5, R0, racecallatomic_ignore // At or after racearena end?
319 MOVD g_racectx(g), R2 // ThreadState *.
320 MOVD 8(R15), R3 // Caller PC.
322 ADD $24, R15, R5 // Arguments.
323 // Tail call fails to restore R15, so use a normal one.
326 racecallatomic_ignore:
327 // Call __tsan_go_ignore_sync_begin to ignore synchronization during
328 // the atomic op. An attempt to synchronize on the address would cause
330 MOVD R1, R6 // Save target function.
331 MOVD R14, R7 // Save PC.
332 MOVD $__tsan_go_ignore_sync_begin(SB), R1
333 MOVD g_racectx(g), R2 // ThreadState *.
335 MOVD R6, R1 // Restore target function.
336 MOVD g_racectx(g), R2 // ThreadState *.
337 MOVD 8(R15), R3 // Caller PC.
339 ADD $24, R15, R5 // Arguments.
341 MOVD $__tsan_go_ignore_sync_end(SB), R1
342 MOVD g_racectx(g), R2 // ThreadState *.
346 // func runtime·racecall(void(*f)(...), ...)
347 // Calls C function f from race runtime and passes up to 4 arguments to it.
348 // The arguments are never heap-object-preserving pointers, so we pretend there
350 TEXT runtime·racecall(SB), NOSPLIT, $0-0
358 // Switches SP to g0 stack and calls R1. Arguments are already set.
359 TEXT racecall<>(SB), NOSPLIT, $0-0
360 BL runtime·save_g(SB) // Save g for callbacks.
361 MOVD R15, R7 // Save SP.
362 MOVD g_m(g), R8 // R8 = thread.
363 MOVD m_g0(R8), R8 // R8 = g0.
364 CMPBEQ R8, g, call // Already on g0?
365 MOVD (g_sched+gobuf_sp)(R8), R15 // Switch SP to g0.
366 call: SUB $160, R15 // Allocate C frame.
367 BL R1 // Call C code.
368 MOVD R7, R15 // Restore SP.
371 // C->Go callback thunk that allows to call runtime·racesymbolize from C
372 // code. racecall has only switched SP, finish g->g0 switch by setting correct
373 // g. R2 contains command code, R3 contains command-specific context. See
374 // racecallback for command codes.
375 TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
376 STMG R6, R15, 48(R15) // Save non-volatile regs.
377 BL runtime·load_g(SB) // Saved by racecall.
378 CMPBNE R2, $0, rest // raceGetProcCmd?
379 MOVD g_m(g), R2 // R2 = thread.
380 MOVD m_p(R2), R2 // R2 = processor.
381 MVC $8, p_raceprocctx(R2), (R3) // *R3 = ThreadState *.
382 LMG 48(R15), R6, R15 // Restore non-volatile regs.
383 BR R14 // Return to C.
384 rest: MOVD g_m(g), R4 // R4 = current thread.
385 MOVD m_g0(R4), g // Switch to g0.
386 SUB $24, R15 // Allocate Go argument slots.
387 STMG R2, R3, 8(R15) // Fill Go frame.
388 BL runtime·racecallback(SB) // Call Go code.
389 LMG 72(R15), R6, R15 // Restore non-volatile regs.
390 BR R14 // Return to C.