1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
13 // Keep a cached value to make gotraceback fast,
14 // since we call it on every call to gentraceback.
15 // The cached value is a uint32 in which the low bits
16 // are the "crash" and "all" settings and the remaining
17 // bits are the traceback value (0 off, 1 on, 2 include system).
19 tracebackCrash = 1 << iota
24 var traceback_cache uint32 = 2 << tracebackShift
25 var traceback_env uint32
27 // gotraceback returns the current traceback settings.
29 // If level is 0, suppress all tracebacks.
30 // If level is 1, show tracebacks, but exclude runtime frames.
31 // If level is 2, show tracebacks including runtime frames.
32 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
33 // If crash is set, crash (core dump, etc) after tracebacking.
36 func gotraceback() (level int32, all, crash bool) {
38 all = _g_.m.throwing > 0
39 if _g_.m.traceback != 0 {
40 level = int32(_g_.m.traceback)
43 t := atomic.Load(&traceback_cache)
44 crash = t&tracebackCrash != 0
45 all = all || t&tracebackAll != 0
46 level = int32(t >> tracebackShift)
55 // nosplit for use in linux startup sysargs
57 func argv_index(argv **byte, i int32) *byte {
58 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
61 func args(c int32, v **byte) {
68 if GOOS == "windows" {
71 argslice = make([]string, argc)
72 for i := int32(0); i < argc; i++ {
73 argslice[i] = gostringnocopy(argv_index(argv, i))
78 // TODO(austin): ppc64 in dynamic linking mode doesn't
79 // guarantee env[] will immediately follow argv. Might cause
82 for argv_index(argv, argc+1+n) != nil {
86 envs = make([]string, n)
87 for i := int32(0); i < n; i++ {
88 envs[i] = gostring(argv_index(argv, argc+1+i))
92 func environ() []string {
96 // TODO: These should be locals in testAtomic64, but we don't 8-byte
97 // align stack variables on 386.
98 var test_z64, test_x64 uint64
100 func testAtomic64() {
103 prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
104 prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
105 prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
106 prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
107 if atomic.Cas64(&test_z64, test_x64, 1) {
108 throw("cas64 failed")
111 throw("cas64 failed")
114 if !atomic.Cas64(&test_z64, test_x64, 1) {
115 throw("cas64 failed")
117 if test_x64 != 42 || test_z64 != 1 {
118 throw("cas64 failed")
120 if atomic.Load64(&test_z64) != 1 {
121 throw("load64 failed")
123 atomic.Store64(&test_z64, (1<<40)+1)
124 if atomic.Load64(&test_z64) != (1<<40)+1 {
125 throw("store64 failed")
127 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
128 throw("xadd64 failed")
130 if atomic.Load64(&test_z64) != (2<<40)+2 {
131 throw("xadd64 failed")
133 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
134 throw("xchg64 failed")
136 if atomic.Load64(&test_z64) != (3<<40)+3 {
137 throw("xchg64 failed")
167 if unsafe.Sizeof(a) != 1 {
170 if unsafe.Sizeof(b) != 1 {
173 if unsafe.Sizeof(c) != 2 {
176 if unsafe.Sizeof(d) != 2 {
179 if unsafe.Sizeof(e) != 4 {
182 if unsafe.Sizeof(f) != 4 {
185 if unsafe.Sizeof(g) != 8 {
188 if unsafe.Sizeof(h) != 8 {
191 if unsafe.Sizeof(i) != 4 {
194 if unsafe.Sizeof(j) != 8 {
197 if unsafe.Sizeof(k) != sys.PtrSize {
200 if unsafe.Sizeof(l) != sys.PtrSize {
203 if unsafe.Sizeof(x1) != 1 {
204 throw("bad unsafe.Sizeof x1")
206 if unsafe.Offsetof(y1.y) != 1 {
207 throw("bad offsetof y1.y")
209 if unsafe.Sizeof(y1) != 2 {
210 throw("bad unsafe.Sizeof y1")
213 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
219 if !atomic.Cas(&z, 1, 2) {
227 if atomic.Cas(&z, 5, 6) {
235 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
242 k = unsafe.Pointer(uintptr(0xfedcb123))
243 if sys.PtrSize == 8 {
244 k = unsafe.Pointer(uintptr(k) << 10)
246 if casp(&k, nil, nil) {
250 if !casp(&k, k, k1) {
257 m = [4]byte{1, 1, 1, 1}
258 atomic.Or8(&m[1], 0xf0)
259 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
263 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
271 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
279 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
287 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
297 if _FixedStack != round2(_FixedStack) {
298 throw("FixedStack is not power-of-2")
302 throw("assembly checks failed")
311 // Holds variables parsed from GODEBUG env var,
312 // except for "memprofilerate" since there is an
313 // existing int var for that value, which may
314 // already have an initial value.
321 gcshrinkstackoff int32
322 gcstackbarrieroff int32
323 gcstackbarrierall int32
334 var dbgvars = []dbgVar{
335 {"allocfreetrace", &debug.allocfreetrace},
336 {"cgocheck", &debug.cgocheck},
337 {"efence", &debug.efence},
338 {"gccheckmark", &debug.gccheckmark},
339 {"gcpacertrace", &debug.gcpacertrace},
340 {"gcshrinkstackoff", &debug.gcshrinkstackoff},
341 {"gcstackbarrieroff", &debug.gcstackbarrieroff},
342 {"gcstackbarrierall", &debug.gcstackbarrierall},
343 {"gcstoptheworld", &debug.gcstoptheworld},
344 {"gctrace", &debug.gctrace},
345 {"invalidptr", &debug.invalidptr},
346 {"sbrk", &debug.sbrk},
347 {"scavenge", &debug.scavenge},
348 {"scheddetail", &debug.scheddetail},
349 {"schedtrace", &debug.schedtrace},
350 {"wbshadow", &debug.wbshadow},
353 func parsedebugvars() {
358 for p := gogetenv("GODEBUG"); p != ""; {
364 field, p = p[:i], p[i+1:]
366 i = index(field, "=")
370 key, value := field[:i], field[i+1:]
372 // Update MemProfileRate directly here since it
373 // is int, not int32, and should only be updated
374 // if specified in GODEBUG.
375 if key == "memprofilerate" {
376 MemProfileRate = atoi(value)
378 for _, v := range dbgvars {
380 *v.value = int32(atoi(value))
386 setTraceback(gogetenv("GOTRACEBACK"))
387 traceback_env = traceback_cache
389 if debug.gcstackbarrierall > 0 {
390 firstStackBarrierOffset = 0
393 // For cgocheck > 1, we turn on the write barrier at all times
394 // and check all pointer writes.
395 if debug.cgocheck > 1 {
396 writeBarrier.cgo = true
397 writeBarrier.enabled = true
401 //go:linkname setTraceback runtime/debug.SetTraceback
402 func setTraceback(level string) {
408 t = 1 << tracebackShift
410 t = 1<<tracebackShift | tracebackAll
412 t = 2<<tracebackShift | tracebackAll
414 t = 2<<tracebackShift | tracebackAll | tracebackCrash
416 t = uint32(atoi(level))<<tracebackShift | tracebackAll
418 // when C owns the process, simply exit'ing the process on fatal errors
419 // and panics is surprising. Be louder and abort instead.
420 if islibrary || isarchive {
426 atomic.Store(&traceback_cache, t)
429 // Poor mans 64-bit division.
430 // This is a very special function, do not use it if you are not sure what you are doing.
431 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
432 // Handles overflow in a time-specific manner.
434 func timediv(v int64, div int32, rem *int32) int32 {
436 for bit := 30; bit >= 0; bit-- {
437 if v >= int64(div)<<uint(bit) {
438 v = v - (int64(div) << uint(bit))
439 res += 1 << uint(bit)
454 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
464 func releasem(mp *m) {
467 if mp.locks == 0 && _g_.preempt {
468 // restore the preemption request in case we've cleared it in newstack
469 _g_.stackguard0 = stackPreempt
474 func gomcache() *mcache {
475 return getg().m.mcache
478 //go:linkname reflect_typelinks reflect.typelinks
479 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
480 sections := []unsafe.Pointer{unsafe.Pointer(firstmoduledata.types)}
481 ret := [][]int32{firstmoduledata.typelinks}
482 for datap := firstmoduledata.next; datap != nil; datap = datap.next {
483 sections = append(sections, unsafe.Pointer(datap.types))
484 ret = append(ret, datap.typelinks)
489 // reflect_resolveNameOff resolves a name offset from a base pointer.
490 //go:linkname reflect_resolveNameOff reflect.resolveNameOff
491 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
492 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
495 // reflect_resolveTypeOff resolves an *rtype offset from a base type.
496 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
497 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
498 return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
501 // reflect_resolveTextOff resolves an function pointer offset from a base type.
502 //go:linkname reflect_resolveTextOff reflect.resolveTextOff
503 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
504 return (*_type)(rtype).textOff(textOff(off))
508 // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
509 //go:linkname reflect_addReflectOff reflect.addReflectOff
510 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
512 if reflectOffs.m == nil {
513 reflectOffs.m = make(map[int32]unsafe.Pointer)
514 reflectOffs.minv = make(map[unsafe.Pointer]int32)
515 reflectOffs.next = -1
517 id, found := reflectOffs.minv[ptr]
519 id = reflectOffs.next
520 reflectOffs.next-- // use negative offsets as IDs to aid debugging
521 reflectOffs.m[id] = ptr
522 reflectOffs.minv[ptr] = id