1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 // Public race detection API, present iff build with -race.
16 func RaceRead(addr unsafe.Pointer)
17 func RaceWrite(addr unsafe.Pointer)
18 func RaceReadRange(addr unsafe.Pointer, len int)
19 func RaceWriteRange(addr unsafe.Pointer, len int)
21 func RaceErrors() int {
23 racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
27 // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
28 // between goroutines. These inform the race detector about actual synchronization
29 // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
31 // RaceAcquire establishes a happens-before relation with the preceding
32 // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
33 // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
34 // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
37 func RaceAcquire(addr unsafe.Pointer) {
41 // RaceRelease performs a release operation on addr that
42 // can synchronize with a later RaceAcquire on addr.
44 // In terms of the C memory model, RaceRelease is equivalent to
45 // atomic_store(memory_order_release).
48 func RaceRelease(addr unsafe.Pointer) {
52 // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
53 // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
55 // In terms of the C memory model, RaceReleaseMerge is equivalent to
56 // atomic_exchange(memory_order_release).
59 func RaceReleaseMerge(addr unsafe.Pointer) {
60 racereleasemerge(addr)
63 // RaceDisable disables handling of race synchronization events in the current goroutine.
64 // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
65 // Non-synchronization events (memory accesses, function entry/exit) still affect
71 if gp.raceignore == 0 {
72 racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
77 // RaceEnable re-enables handling of race events in the current goroutine.
83 if gp.raceignore == 0 {
84 racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
88 // Private interface for the runtime.
90 const raceenabled = true
92 // For all functions accepting callerpc and pc,
93 // callerpc is a return PC of the function that calls this function,
94 // pc is start PC of the function that calls this function.
95 func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
96 kind := t.Kind_ & kindMask
97 if kind == kindArray || kind == kindStruct {
98 // for composite objects we have to read every address
99 // because a write might happen to any subobject.
100 racereadrangepc(addr, t.Size_, callerpc, pc)
102 // for non-composite objects we can read just the start
103 // address, as any write must write the first byte.
104 racereadpc(addr, callerpc, pc)
108 func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
109 kind := t.Kind_ & kindMask
110 if kind == kindArray || kind == kindStruct {
111 // for composite objects we have to write every address
112 // because a write might happen to any subobject.
113 racewriterangepc(addr, t.Size_, callerpc, pc)
115 // for non-composite objects we can write just the start
116 // address, as any write must write the first byte.
117 racewritepc(addr, callerpc, pc)
122 func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
125 func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
127 type symbolizeCodeContext struct {
136 var qq = [...]byte{'?', '?', 0}
137 var dash = [...]byte{'-', 0}
140 raceGetProcCmd = iota
145 // Callback from C into Go, runs on g0.
146 func racecallback(cmd uintptr, ctx unsafe.Pointer) {
149 throw("should have been handled by racecallbackthunk")
150 case raceSymbolizeCodeCmd:
151 raceSymbolizeCode((*symbolizeCodeContext)(ctx))
152 case raceSymbolizeDataCmd:
153 raceSymbolizeData((*symbolizeDataContext)(ctx))
155 throw("unknown command")
159 // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
160 // information about the code at that pc.
162 // The race detector has already subtracted 1 from pcs, so they point to the last
163 // byte of call instructions (including calls to runtime.racewrite and friends).
165 // If the incoming pc is part of an inlined function, *ctx is populated
166 // with information about the inlined function, and on return ctx.pc is set
167 // to a pc in the logically containing function. (The race detector should call this
168 // function again with that pc.)
170 // If the incoming pc is not part of an inlined function, the return pc is unchanged.
171 func raceSymbolizeCode(ctx *symbolizeCodeContext) {
175 u, uf := newInlineUnwinder(fi, pc)
176 for ; uf.valid(); uf = u.next(uf) {
178 if sf.funcID == abi.FuncIDWrapper && u.isInlined(uf) {
179 // Ignore wrappers, unless we're at the outermost frame of u.
180 // A non-inlined wrapper frame always means we have a physical
181 // frame consisting entirely of wrappers, in which case we'll
182 // take a outermost wrapper over nothing.
187 file, line := u.fileLine(uf)
189 // Failure to symbolize
192 ctx.fn = &bytes(name)[0] // assume NUL-terminated
193 ctx.line = uintptr(line)
194 ctx.file = &bytes(file)[0] // assume NUL-terminated
195 ctx.off = pc - fi.entry()
198 // Set ctx.pc to the "caller" so the race detector calls this again
199 // to further unwind.
213 type symbolizeDataContext struct {
224 func raceSymbolizeData(ctx *symbolizeDataContext) {
225 if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
228 ctx.size = span.elemsize
233 // Race runtime functions called via runtime·racecall.
235 //go:linkname __tsan_init __tsan_init
238 //go:linkname __tsan_fini __tsan_fini
241 //go:linkname __tsan_proc_create __tsan_proc_create
242 var __tsan_proc_create byte
244 //go:linkname __tsan_proc_destroy __tsan_proc_destroy
245 var __tsan_proc_destroy byte
247 //go:linkname __tsan_map_shadow __tsan_map_shadow
248 var __tsan_map_shadow byte
250 //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
251 var __tsan_finalizer_goroutine byte
253 //go:linkname __tsan_go_start __tsan_go_start
254 var __tsan_go_start byte
256 //go:linkname __tsan_go_end __tsan_go_end
257 var __tsan_go_end byte
259 //go:linkname __tsan_malloc __tsan_malloc
260 var __tsan_malloc byte
262 //go:linkname __tsan_free __tsan_free
265 //go:linkname __tsan_acquire __tsan_acquire
266 var __tsan_acquire byte
268 //go:linkname __tsan_release __tsan_release
269 var __tsan_release byte
271 //go:linkname __tsan_release_acquire __tsan_release_acquire
272 var __tsan_release_acquire byte
274 //go:linkname __tsan_release_merge __tsan_release_merge
275 var __tsan_release_merge byte
277 //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
278 var __tsan_go_ignore_sync_begin byte
280 //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
281 var __tsan_go_ignore_sync_end byte
283 //go:linkname __tsan_report_count __tsan_report_count
284 var __tsan_report_count byte
286 // Mimic what cmd/cgo would do.
288 //go:cgo_import_static __tsan_init
289 //go:cgo_import_static __tsan_fini
290 //go:cgo_import_static __tsan_proc_create
291 //go:cgo_import_static __tsan_proc_destroy
292 //go:cgo_import_static __tsan_map_shadow
293 //go:cgo_import_static __tsan_finalizer_goroutine
294 //go:cgo_import_static __tsan_go_start
295 //go:cgo_import_static __tsan_go_end
296 //go:cgo_import_static __tsan_malloc
297 //go:cgo_import_static __tsan_free
298 //go:cgo_import_static __tsan_acquire
299 //go:cgo_import_static __tsan_release
300 //go:cgo_import_static __tsan_release_acquire
301 //go:cgo_import_static __tsan_release_merge
302 //go:cgo_import_static __tsan_go_ignore_sync_begin
303 //go:cgo_import_static __tsan_go_ignore_sync_end
304 //go:cgo_import_static __tsan_report_count
306 // These are called from race_amd64.s.
308 //go:cgo_import_static __tsan_read
309 //go:cgo_import_static __tsan_read_pc
310 //go:cgo_import_static __tsan_read_range
311 //go:cgo_import_static __tsan_write
312 //go:cgo_import_static __tsan_write_pc
313 //go:cgo_import_static __tsan_write_range
314 //go:cgo_import_static __tsan_func_enter
315 //go:cgo_import_static __tsan_func_exit
317 //go:cgo_import_static __tsan_go_atomic32_load
318 //go:cgo_import_static __tsan_go_atomic64_load
319 //go:cgo_import_static __tsan_go_atomic32_store
320 //go:cgo_import_static __tsan_go_atomic64_store
321 //go:cgo_import_static __tsan_go_atomic32_exchange
322 //go:cgo_import_static __tsan_go_atomic64_exchange
323 //go:cgo_import_static __tsan_go_atomic32_fetch_add
324 //go:cgo_import_static __tsan_go_atomic64_fetch_add
325 //go:cgo_import_static __tsan_go_atomic32_compare_exchange
326 //go:cgo_import_static __tsan_go_atomic64_compare_exchange
328 // start/end of global data (data+bss).
329 var racedatastart uintptr
330 var racedataend uintptr
332 // start/end of heap for race_amd64.s
333 var racearenastart uintptr
334 var racearenaend uintptr
336 func racefuncenter(callpc uintptr)
337 func racefuncenterfp(fp uintptr)
339 func raceread(addr uintptr)
340 func racewrite(addr uintptr)
341 func racereadrange(addr, size uintptr)
342 func racewriterange(addr, size uintptr)
343 func racereadrangepc1(addr, size, pc uintptr)
344 func racewriterangepc1(addr, size, pc uintptr)
345 func racecallbackthunk(uintptr)
347 // racecall allows calling an arbitrary function fn from C race runtime
348 // with up to 4 uintptr arguments.
349 func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
351 // checks if the address has shadow (i.e. heap or data/bss).
354 func isvalidaddr(addr unsafe.Pointer) bool {
355 return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
356 racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
360 func raceinit() (gctx, pctx uintptr) {
361 lockInit(&raceFiniLock, lockRankRaceFini)
363 // On most machines, cgo is required to initialize libc, which is used by race runtime.
364 if !iscgo && GOOS != "darwin" {
365 throw("raceinit: race build must use cgo")
368 racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
370 // Round data segment to page boundaries, because it's used in mmap().
373 if start > firstmoduledata.noptrdata {
374 start = firstmoduledata.noptrdata
376 if start > firstmoduledata.data {
377 start = firstmoduledata.data
379 if start > firstmoduledata.noptrbss {
380 start = firstmoduledata.noptrbss
382 if start > firstmoduledata.bss {
383 start = firstmoduledata.bss
385 if end < firstmoduledata.enoptrdata {
386 end = firstmoduledata.enoptrdata
388 if end < firstmoduledata.edata {
389 end = firstmoduledata.edata
391 if end < firstmoduledata.enoptrbss {
392 end = firstmoduledata.enoptrbss
394 if end < firstmoduledata.ebss {
395 end = firstmoduledata.ebss
397 size := alignUp(end-start, _PageSize)
398 racecall(&__tsan_map_shadow, start, size, 0, 0)
399 racedatastart = start
400 racedataend = start + size
407 // racefini() can only be called once to avoid races.
408 // This eventually (via __tsan_fini) calls C.exit which has
409 // undefined behavior if called more than once. If the lock is
410 // already held it's assumed that the first caller exits the program
411 // so other calls can hang forever without an issue.
414 // __tsan_fini will run C atexit functions and C++ destructors,
415 // which can theoretically call back into Go.
416 // Tell the scheduler we entering external code.
419 // We're entering external code that may call ExitProcess on
421 osPreemptExtEnter(getg().m)
423 racecall(&__tsan_fini, 0, 0, 0, 0)
427 func raceproccreate() uintptr {
429 racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
434 func raceprocdestroy(ctx uintptr) {
435 racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
439 func racemapshadow(addr unsafe.Pointer, size uintptr) {
440 if racearenastart == 0 {
441 racearenastart = uintptr(addr)
443 if racearenaend < uintptr(addr)+size {
444 racearenaend = uintptr(addr) + size
446 racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
450 func racemalloc(p unsafe.Pointer, sz uintptr) {
451 racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
455 func racefree(p unsafe.Pointer, sz uintptr) {
456 racecall(&__tsan_free, uintptr(p), sz, 0, 0)
460 func racegostart(pc uintptr) uintptr {
463 if gp.m.curg != nil {
470 racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
476 racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
480 func racectxend(racectx uintptr) {
481 racecall(&__tsan_go_end, racectx, 0, 0, 0)
485 func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
488 // The call is coming from manual instrumentation of Go code running on g0/gsignal.
493 racefuncenter(callpc)
495 racewriterangepc1(uintptr(addr), sz, pc)
502 func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
505 // The call is coming from manual instrumentation of Go code running on g0/gsignal.
510 racefuncenter(callpc)
512 racereadrangepc1(uintptr(addr), sz, pc)
519 func raceacquire(addr unsafe.Pointer) {
520 raceacquireg(getg(), addr)
524 func raceacquireg(gp *g, addr unsafe.Pointer) {
525 if getg().raceignore != 0 || !isvalidaddr(addr) {
528 racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
532 func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
533 if !isvalidaddr(addr) {
536 racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
540 func racerelease(addr unsafe.Pointer) {
541 racereleaseg(getg(), addr)
545 func racereleaseg(gp *g, addr unsafe.Pointer) {
546 if getg().raceignore != 0 || !isvalidaddr(addr) {
549 racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
553 func racereleaseacquire(addr unsafe.Pointer) {
554 racereleaseacquireg(getg(), addr)
558 func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
559 if getg().raceignore != 0 || !isvalidaddr(addr) {
562 racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
566 func racereleasemerge(addr unsafe.Pointer) {
567 racereleasemergeg(getg(), addr)
571 func racereleasemergeg(gp *g, addr unsafe.Pointer) {
572 if getg().raceignore != 0 || !isvalidaddr(addr) {
575 racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
580 racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
583 // The declarations below generate ABI wrappers for functions
584 // implemented in assembly in this package but declared in another
587 //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
588 func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
590 //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
591 func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
593 //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
594 func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
596 //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
597 func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
599 //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
600 func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
602 //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
603 func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
605 //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
606 func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
608 //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
609 func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
611 //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
612 func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
614 //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
615 func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
617 //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
618 func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
620 //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
621 func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
623 //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
624 func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
626 //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
627 func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
629 //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
630 func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
632 //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
633 func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
635 //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
636 func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
638 //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
639 func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
641 //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
642 func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
644 //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
645 func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
647 //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
648 func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
650 //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
651 func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
653 //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
654 func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)