1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 // Public race detection API, present iff build with -race.
16 func RaceRead(addr unsafe.Pointer)
17 func RaceWrite(addr unsafe.Pointer)
18 func RaceReadRange(addr unsafe.Pointer, len int)
19 func RaceWriteRange(addr unsafe.Pointer, len int)
21 func RaceErrors() int {
23 racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
27 // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
28 // between goroutines. These inform the race detector about actual synchronization
29 // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
31 // RaceAcquire establishes a happens-before relation with the preceding
32 // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
33 // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
34 // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
37 func RaceAcquire(addr unsafe.Pointer) {
41 // RaceRelease performs a release operation on addr that
42 // can synchronize with a later RaceAcquire on addr.
44 // In terms of the C memory model, RaceRelease is equivalent to
45 // atomic_store(memory_order_release).
48 func RaceRelease(addr unsafe.Pointer) {
52 // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
53 // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
55 // In terms of the C memory model, RaceReleaseMerge is equivalent to
56 // atomic_exchange(memory_order_release).
59 func RaceReleaseMerge(addr unsafe.Pointer) {
60 racereleasemerge(addr)
63 // RaceDisable disables handling of race synchronization events in the current goroutine.
64 // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
65 // Non-synchronization events (memory accesses, function entry/exit) still affect
71 if gp.raceignore == 0 {
72 racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
77 // RaceEnable re-enables handling of race events in the current goroutine.
83 if gp.raceignore == 0 {
84 racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
88 // Private interface for the runtime.
90 const raceenabled = true
92 // For all functions accepting callerpc and pc,
93 // callerpc is a return PC of the function that calls this function,
94 // pc is start PC of the function that calls this function.
95 func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
96 kind := t.Kind_ & kindMask
97 if kind == kindArray || kind == kindStruct {
98 // for composite objects we have to read every address
99 // because a write might happen to any subobject.
100 racereadrangepc(addr, t.Size_, callerpc, pc)
102 // for non-composite objects we can read just the start
103 // address, as any write must write the first byte.
104 racereadpc(addr, callerpc, pc)
108 func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
109 kind := t.Kind_ & kindMask
110 if kind == kindArray || kind == kindStruct {
111 // for composite objects we have to write every address
112 // because a write might happen to any subobject.
113 racewriterangepc(addr, t.Size_, callerpc, pc)
115 // for non-composite objects we can write just the start
116 // address, as any write must write the first byte.
117 racewritepc(addr, callerpc, pc)
122 func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
125 func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
127 type symbolizeCodeContext struct {
136 var qq = [...]byte{'?', '?', 0}
137 var dash = [...]byte{'-', 0}
140 raceGetProcCmd = iota
145 // Callback from C into Go, runs on g0.
146 func racecallback(cmd uintptr, ctx unsafe.Pointer) {
149 throw("should have been handled by racecallbackthunk")
150 case raceSymbolizeCodeCmd:
151 raceSymbolizeCode((*symbolizeCodeContext)(ctx))
152 case raceSymbolizeDataCmd:
153 raceSymbolizeData((*symbolizeDataContext)(ctx))
155 throw("unknown command")
159 // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
160 // information about the code at that pc.
162 // The race detector has already subtracted 1 from pcs, so they point to the last
163 // byte of call instructions (including calls to runtime.racewrite and friends).
165 // If the incoming pc is part of an inlined function, *ctx is populated
166 // with information about the inlined function, and on return ctx.pc is set
167 // to a pc in the logically containing function. (The race detector should call this
168 // function again with that pc.)
170 // If the incoming pc is not part of an inlined function, the return pc is unchanged.
171 func raceSymbolizeCode(ctx *symbolizeCodeContext) {
175 u, uf := newInlineUnwinder(fi, pc)
176 for ; uf.valid(); uf = u.next(uf) {
178 if sf.funcID == abi.FuncIDWrapper && u.isInlined(uf) {
179 // Ignore wrappers, unless we're at the outermost frame of u.
180 // A non-inlined wrapper frame always means we have a physical
181 // frame consisting entirely of wrappers, in which case we'll
182 // take a outermost wrapper over nothing.
187 file, line := u.fileLine(uf)
189 // Failure to symbolize
192 ctx.fn = &bytes(name)[0] // assume NUL-terminated
193 ctx.line = uintptr(line)
194 ctx.file = &bytes(file)[0] // assume NUL-terminated
195 ctx.off = pc - fi.entry()
198 // Set ctx.pc to the "caller" so the race detector calls this again
199 // to further unwind.
213 type symbolizeDataContext struct {
224 func raceSymbolizeData(ctx *symbolizeDataContext) {
225 if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
226 // TODO: Does this need to handle malloc headers?
229 ctx.size = span.elemsize
234 // Race runtime functions called via runtime·racecall.
236 //go:linkname __tsan_init __tsan_init
239 //go:linkname __tsan_fini __tsan_fini
242 //go:linkname __tsan_proc_create __tsan_proc_create
243 var __tsan_proc_create byte
245 //go:linkname __tsan_proc_destroy __tsan_proc_destroy
246 var __tsan_proc_destroy byte
248 //go:linkname __tsan_map_shadow __tsan_map_shadow
249 var __tsan_map_shadow byte
251 //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
252 var __tsan_finalizer_goroutine byte
254 //go:linkname __tsan_go_start __tsan_go_start
255 var __tsan_go_start byte
257 //go:linkname __tsan_go_end __tsan_go_end
258 var __tsan_go_end byte
260 //go:linkname __tsan_malloc __tsan_malloc
261 var __tsan_malloc byte
263 //go:linkname __tsan_free __tsan_free
266 //go:linkname __tsan_acquire __tsan_acquire
267 var __tsan_acquire byte
269 //go:linkname __tsan_release __tsan_release
270 var __tsan_release byte
272 //go:linkname __tsan_release_acquire __tsan_release_acquire
273 var __tsan_release_acquire byte
275 //go:linkname __tsan_release_merge __tsan_release_merge
276 var __tsan_release_merge byte
278 //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
279 var __tsan_go_ignore_sync_begin byte
281 //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
282 var __tsan_go_ignore_sync_end byte
284 //go:linkname __tsan_report_count __tsan_report_count
285 var __tsan_report_count byte
287 // Mimic what cmd/cgo would do.
289 //go:cgo_import_static __tsan_init
290 //go:cgo_import_static __tsan_fini
291 //go:cgo_import_static __tsan_proc_create
292 //go:cgo_import_static __tsan_proc_destroy
293 //go:cgo_import_static __tsan_map_shadow
294 //go:cgo_import_static __tsan_finalizer_goroutine
295 //go:cgo_import_static __tsan_go_start
296 //go:cgo_import_static __tsan_go_end
297 //go:cgo_import_static __tsan_malloc
298 //go:cgo_import_static __tsan_free
299 //go:cgo_import_static __tsan_acquire
300 //go:cgo_import_static __tsan_release
301 //go:cgo_import_static __tsan_release_acquire
302 //go:cgo_import_static __tsan_release_merge
303 //go:cgo_import_static __tsan_go_ignore_sync_begin
304 //go:cgo_import_static __tsan_go_ignore_sync_end
305 //go:cgo_import_static __tsan_report_count
307 // These are called from race_amd64.s.
309 //go:cgo_import_static __tsan_read
310 //go:cgo_import_static __tsan_read_pc
311 //go:cgo_import_static __tsan_read_range
312 //go:cgo_import_static __tsan_write
313 //go:cgo_import_static __tsan_write_pc
314 //go:cgo_import_static __tsan_write_range
315 //go:cgo_import_static __tsan_func_enter
316 //go:cgo_import_static __tsan_func_exit
318 //go:cgo_import_static __tsan_go_atomic32_load
319 //go:cgo_import_static __tsan_go_atomic64_load
320 //go:cgo_import_static __tsan_go_atomic32_store
321 //go:cgo_import_static __tsan_go_atomic64_store
322 //go:cgo_import_static __tsan_go_atomic32_exchange
323 //go:cgo_import_static __tsan_go_atomic64_exchange
324 //go:cgo_import_static __tsan_go_atomic32_fetch_add
325 //go:cgo_import_static __tsan_go_atomic64_fetch_add
326 //go:cgo_import_static __tsan_go_atomic32_compare_exchange
327 //go:cgo_import_static __tsan_go_atomic64_compare_exchange
329 // start/end of global data (data+bss).
330 var racedatastart uintptr
331 var racedataend uintptr
333 // start/end of heap for race_amd64.s
334 var racearenastart uintptr
335 var racearenaend uintptr
337 func racefuncenter(callpc uintptr)
338 func racefuncenterfp(fp uintptr)
340 func raceread(addr uintptr)
341 func racewrite(addr uintptr)
342 func racereadrange(addr, size uintptr)
343 func racewriterange(addr, size uintptr)
344 func racereadrangepc1(addr, size, pc uintptr)
345 func racewriterangepc1(addr, size, pc uintptr)
346 func racecallbackthunk(uintptr)
348 // racecall allows calling an arbitrary function fn from C race runtime
349 // with up to 4 uintptr arguments.
350 func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
352 // checks if the address has shadow (i.e. heap or data/bss).
355 func isvalidaddr(addr unsafe.Pointer) bool {
356 return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
357 racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
361 func raceinit() (gctx, pctx uintptr) {
362 lockInit(&raceFiniLock, lockRankRaceFini)
364 // On most machines, cgo is required to initialize libc, which is used by race runtime.
365 if !iscgo && GOOS != "darwin" {
366 throw("raceinit: race build must use cgo")
369 racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
371 // Round data segment to page boundaries, because it's used in mmap().
374 if start > firstmoduledata.noptrdata {
375 start = firstmoduledata.noptrdata
377 if start > firstmoduledata.data {
378 start = firstmoduledata.data
380 if start > firstmoduledata.noptrbss {
381 start = firstmoduledata.noptrbss
383 if start > firstmoduledata.bss {
384 start = firstmoduledata.bss
386 if end < firstmoduledata.enoptrdata {
387 end = firstmoduledata.enoptrdata
389 if end < firstmoduledata.edata {
390 end = firstmoduledata.edata
392 if end < firstmoduledata.enoptrbss {
393 end = firstmoduledata.enoptrbss
395 if end < firstmoduledata.ebss {
396 end = firstmoduledata.ebss
398 size := alignUp(end-start, _PageSize)
399 racecall(&__tsan_map_shadow, start, size, 0, 0)
400 racedatastart = start
401 racedataend = start + size
408 // racefini() can only be called once to avoid races.
409 // This eventually (via __tsan_fini) calls C.exit which has
410 // undefined behavior if called more than once. If the lock is
411 // already held it's assumed that the first caller exits the program
412 // so other calls can hang forever without an issue.
415 // __tsan_fini will run C atexit functions and C++ destructors,
416 // which can theoretically call back into Go.
417 // Tell the scheduler we entering external code.
420 // We're entering external code that may call ExitProcess on
422 osPreemptExtEnter(getg().m)
424 racecall(&__tsan_fini, 0, 0, 0, 0)
428 func raceproccreate() uintptr {
430 racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
435 func raceprocdestroy(ctx uintptr) {
436 racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
440 func racemapshadow(addr unsafe.Pointer, size uintptr) {
441 if racearenastart == 0 {
442 racearenastart = uintptr(addr)
444 if racearenaend < uintptr(addr)+size {
445 racearenaend = uintptr(addr) + size
447 racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
451 func racemalloc(p unsafe.Pointer, sz uintptr) {
452 racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
456 func racefree(p unsafe.Pointer, sz uintptr) {
457 racecall(&__tsan_free, uintptr(p), sz, 0, 0)
461 func racegostart(pc uintptr) uintptr {
464 if gp.m.curg != nil {
471 racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
477 racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
481 func racectxend(racectx uintptr) {
482 racecall(&__tsan_go_end, racectx, 0, 0, 0)
486 func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
489 // The call is coming from manual instrumentation of Go code running on g0/gsignal.
494 racefuncenter(callpc)
496 racewriterangepc1(uintptr(addr), sz, pc)
503 func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
506 // The call is coming from manual instrumentation of Go code running on g0/gsignal.
511 racefuncenter(callpc)
513 racereadrangepc1(uintptr(addr), sz, pc)
520 func raceacquire(addr unsafe.Pointer) {
521 raceacquireg(getg(), addr)
525 func raceacquireg(gp *g, addr unsafe.Pointer) {
526 if getg().raceignore != 0 || !isvalidaddr(addr) {
529 racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
533 func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
534 if !isvalidaddr(addr) {
537 racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
541 func racerelease(addr unsafe.Pointer) {
542 racereleaseg(getg(), addr)
546 func racereleaseg(gp *g, addr unsafe.Pointer) {
547 if getg().raceignore != 0 || !isvalidaddr(addr) {
550 racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
554 func racereleaseacquire(addr unsafe.Pointer) {
555 racereleaseacquireg(getg(), addr)
559 func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
560 if getg().raceignore != 0 || !isvalidaddr(addr) {
563 racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
567 func racereleasemerge(addr unsafe.Pointer) {
568 racereleasemergeg(getg(), addr)
572 func racereleasemergeg(gp *g, addr unsafe.Pointer) {
573 if getg().raceignore != 0 || !isvalidaddr(addr) {
576 racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
581 racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
584 // The declarations below generate ABI wrappers for functions
585 // implemented in assembly in this package but declared in another
588 //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
589 func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
591 //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
592 func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
594 //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
595 func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
597 //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
598 func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
600 //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
601 func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
603 //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
604 func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
606 //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
607 func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
609 //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
610 func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
612 //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
613 func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
615 //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
616 func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
618 //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
619 func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
621 //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
622 func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
624 //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
625 func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
627 //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
628 func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
630 //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
631 func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
633 //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
634 func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
636 //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
637 func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
639 //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
640 func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
642 //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
643 func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
645 //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
646 func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
648 //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
649 func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
651 //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
652 func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
654 //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
655 func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)