1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "runtime/internal/atomic"
14 // Keep a cached value to make gotraceback fast,
15 // since we call it on every call to gentraceback.
16 // The cached value is a uint32 in which the low bits
17 // are the "crash" and "all" settings and the remaining
18 // bits are the traceback value (0 off, 1 on, 2 include system).
20 tracebackCrash = 1 << iota
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
28 // gotraceback returns the current traceback settings.
30 // If level is 0, suppress all tracebacks.
31 // If level is 1, show tracebacks, but exclude runtime frames.
32 // If level is 2, show tracebacks including runtime frames.
33 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
34 // If crash is set, crash (core dump, etc) after tracebacking.
37 func gotraceback() (level int32, all, crash bool) {
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42 if gp.m.traceback != 0 {
43 level = int32(gp.m.traceback)
44 } else if gp.m.throwing >= throwTypeRuntime {
45 // Always include runtime frames in runtime throws unless
46 // otherwise overridden by m.traceback.
49 level = int32(t >> tracebackShift)
59 // nosplit for use in linux startup sysargs.
62 func argv_index(argv **byte, i int32) *byte {
63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
66 func args(c int32, v **byte) {
73 if GOOS == "windows" {
76 argslice = make([]string, argc)
77 for i := int32(0); i < argc; i++ {
78 argslice[i] = gostringnocopy(argv_index(argv, i))
83 // TODO(austin): ppc64 in dynamic linking mode doesn't
84 // guarantee env[] will immediately follow argv. Might cause
87 for argv_index(argv, argc+1+n) != nil {
91 envs = make([]string, n)
92 for i := int32(0); i < n; i++ {
93 envs[i] = gostring(argv_index(argv, argc+1+i))
97 func environ() []string {
101 // TODO: These should be locals in testAtomic64, but we don't 8-byte
102 // align stack variables on 386.
103 var test_z64, test_x64 uint64
105 func testAtomic64() {
108 if atomic.Cas64(&test_z64, test_x64, 1) {
109 throw("cas64 failed")
112 throw("cas64 failed")
115 if !atomic.Cas64(&test_z64, test_x64, 1) {
116 throw("cas64 failed")
118 if test_x64 != 42 || test_z64 != 1 {
119 throw("cas64 failed")
121 if atomic.Load64(&test_z64) != 1 {
122 throw("load64 failed")
124 atomic.Store64(&test_z64, (1<<40)+1)
125 if atomic.Load64(&test_z64) != (1<<40)+1 {
126 throw("store64 failed")
128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129 throw("xadd64 failed")
131 if atomic.Load64(&test_z64) != (2<<40)+2 {
132 throw("xadd64 failed")
134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135 throw("xchg64 failed")
137 if atomic.Load64(&test_z64) != (3<<40)+3 {
138 throw("xchg64 failed")
168 if unsafe.Sizeof(a) != 1 {
171 if unsafe.Sizeof(b) != 1 {
174 if unsafe.Sizeof(c) != 2 {
177 if unsafe.Sizeof(d) != 2 {
180 if unsafe.Sizeof(e) != 4 {
183 if unsafe.Sizeof(f) != 4 {
186 if unsafe.Sizeof(g) != 8 {
189 if unsafe.Sizeof(h) != 8 {
192 if unsafe.Sizeof(i) != 4 {
195 if unsafe.Sizeof(j) != 8 {
198 if unsafe.Sizeof(k) != goarch.PtrSize {
201 if unsafe.Sizeof(l) != goarch.PtrSize {
204 if unsafe.Sizeof(x1) != 1 {
205 throw("bad unsafe.Sizeof x1")
207 if unsafe.Offsetof(y1.y) != 1 {
208 throw("bad offsetof y1.y")
210 if unsafe.Sizeof(y1) != 2 {
211 throw("bad unsafe.Sizeof y1")
214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
220 if !atomic.Cas(&z, 1, 2) {
228 if atomic.Cas(&z, 5, 6) {
236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
243 m = [4]byte{1, 1, 1, 1}
244 atomic.Or8(&m[1], 0xf0)
245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
249 m = [4]byte{0xff, 0xff, 0xff, 0xff}
250 atomic.And8(&m[1], 0x1)
251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
289 if _FixedStack != round2(_FixedStack) {
290 throw("FixedStack is not power-of-2")
294 throw("assembly checks failed")
300 value *int32 // for variables that can only be set at startup
301 atomic *atomic.Int32 // for variables that can be changed during execution
302 def int32 // default value (ideally zero)
305 // Holds variables parsed from GODEBUG env var,
306 // except for "memprofilerate" since there is an
307 // existing int var for that value, which may
308 // already have an initial value.
315 gcshrinkstackoff int32
319 madvdontneed int32 // for Linux; issue 28466
323 tracebackancestors int32
324 asyncpreemptoff int32
326 adaptivestackstart int32
328 // debug.malloc is used as a combined debug check
329 // in the malloc function and should be set
330 // if any of the below debug options is != 0.
336 panicnil atomic.Int32
339 var dbgvars = []*dbgVar{
340 {name: "allocfreetrace", value: &debug.allocfreetrace},
341 {name: "clobberfree", value: &debug.clobberfree},
342 {name: "cgocheck", value: &debug.cgocheck},
343 {name: "efence", value: &debug.efence},
344 {name: "gccheckmark", value: &debug.gccheckmark},
345 {name: "gcpacertrace", value: &debug.gcpacertrace},
346 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
347 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
348 {name: "gctrace", value: &debug.gctrace},
349 {name: "invalidptr", value: &debug.invalidptr},
350 {name: "madvdontneed", value: &debug.madvdontneed},
351 {name: "sbrk", value: &debug.sbrk},
352 {name: "scavtrace", value: &debug.scavtrace},
353 {name: "scheddetail", value: &debug.scheddetail},
354 {name: "schedtrace", value: &debug.schedtrace},
355 {name: "tracebackancestors", value: &debug.tracebackancestors},
356 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
357 {name: "inittrace", value: &debug.inittrace},
358 {name: "harddecommit", value: &debug.harddecommit},
359 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
360 {name: "panicnil", atomic: &debug.panicnil},
363 func parsedebugvars() {
367 debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
369 // On Linux, MADV_FREE is faster than MADV_DONTNEED,
370 // but doesn't affect many of the statistics that
371 // MADV_DONTNEED does until the memory is actually
372 // reclaimed. This generally leads to poor user
373 // experience, like confusing stats in top and other
374 // monitoring tools; and bad integration with
375 // management systems that respond to memory usage.
376 // Hence, default to MADV_DONTNEED.
377 debug.madvdontneed = 1
380 godebug := gogetenv("GODEBUG")
386 // apply runtime defaults, if any
387 for _, v := range dbgvars {
389 // Every var should have either v.value or v.atomic set.
392 } else if v.atomic != nil {
393 v.atomic.Store(v.def)
398 // apply compile-time GODEBUG settings
399 parsegodebug(godebugDefault, nil)
401 // apply environment settings
402 parsegodebug(godebug, nil)
404 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
406 setTraceback(gogetenv("GOTRACEBACK"))
407 traceback_env = traceback_cache
410 // reparsedebugvars reparses the runtime's debug variables
411 // because the environment variable has been changed to env.
412 func reparsedebugvars(env string) {
413 seen := make(map[string]bool)
414 // apply environment settings
415 parsegodebug(env, seen)
416 // apply compile-time GODEBUG settings for as-yet-unseen variables
417 parsegodebug(godebugDefault, seen)
418 // apply defaults for as-yet-unseen variables
419 for _, v := range dbgvars {
420 if v.atomic != nil && !seen[v.name] {
426 // parsegodebug parses the godebug string, updating variables listed in dbgvars.
427 // If seen == nil, this is startup time and we process the string left to right
428 // overwriting older settings with newer ones.
429 // If seen != nil, $GODEBUG has changed and we are doing an
430 // incremental update. To avoid flapping in the case where a value is
431 // set multiple times (perhaps in the default and the environment,
432 // or perhaps twice in the environment), we process the string right-to-left
433 // and only change values not already seen. After doing this for both
434 // the environment and the default settings, the caller must also call
435 // cleargodebug(seen) to reset any now-unset values back to their defaults.
436 func parsegodebug(godebug string, seen map[string]bool) {
437 for p := godebug; p != ""; {
440 // startup: process left to right, overwriting older settings with newer
441 i := bytealg.IndexByteString(p, ',')
445 field, p = p[:i], p[i+1:]
448 // incremental update: process right to left, updating and skipping seen
450 for i >= 0 && p[i] != ',' {
456 p, field = p[:i], p[i+1:]
459 i := bytealg.IndexByteString(field, '=')
463 key, value := field[:i], field[i+1:]
471 // Update MemProfileRate directly here since it
472 // is int, not int32, and should only be updated
473 // if specified in GODEBUG.
474 if seen == nil && key == "memprofilerate" {
475 if n, ok := atoi(value); ok {
479 for _, v := range dbgvars {
481 if n, ok := atoi32(value); ok {
482 if seen == nil && v.value != nil {
484 } else if v.atomic != nil {
493 if debug.cgocheck > 1 {
494 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
498 //go:linkname setTraceback runtime/debug.SetTraceback
499 func setTraceback(level string) {
505 t = 1 << tracebackShift
507 t = 1<<tracebackShift | tracebackAll
509 t = 2<<tracebackShift | tracebackAll
511 t = 2<<tracebackShift | tracebackAll | tracebackCrash
514 if n, ok := atoi(level); ok && n == int(uint32(n)) {
515 t |= uint32(n) << tracebackShift
518 // when C owns the process, simply exit'ing the process on fatal errors
519 // and panics is surprising. Be louder and abort instead.
520 if islibrary || isarchive {
526 atomic.Store(&traceback_cache, t)
529 // Poor mans 64-bit division.
530 // This is a very special function, do not use it if you are not sure what you are doing.
531 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
532 // Handles overflow in a time-specific manner.
533 // This keeps us within no-split stack limits on 32-bit processors.
536 func timediv(v int64, div int32, rem *int32) int32 {
538 for bit := 30; bit >= 0; bit-- {
539 if v >= int64(div)<<uint(bit) {
540 v = v - (int64(div) << uint(bit))
541 // Before this for loop, res was 0, thus all these
542 // power of 2 increments are now just bitsets.
543 res |= 1 << uint(bit)
558 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
568 func releasem(mp *m) {
571 if mp.locks == 0 && gp.preempt {
572 // restore the preemption request in case we've cleared it in newstack
573 gp.stackguard0 = stackPreempt
577 //go:linkname reflect_typelinks reflect.typelinks
578 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
579 modules := activeModules()
580 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
581 ret := [][]int32{modules[0].typelinks}
582 for _, md := range modules[1:] {
583 sections = append(sections, unsafe.Pointer(md.types))
584 ret = append(ret, md.typelinks)
589 // reflect_resolveNameOff resolves a name offset from a base pointer.
591 //go:linkname reflect_resolveNameOff reflect.resolveNameOff
592 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
593 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
596 // reflect_resolveTypeOff resolves an *rtype offset from a base type.
598 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
599 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
600 return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
603 // reflect_resolveTextOff resolves a function pointer offset from a base type.
605 //go:linkname reflect_resolveTextOff reflect.resolveTextOff
606 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
607 return (*_type)(rtype).textOff(textOff(off))
611 // reflectlite_resolveNameOff resolves a name offset from a base pointer.
613 //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
614 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
615 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
618 // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
620 //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
621 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
622 return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
625 // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
627 //go:linkname reflect_addReflectOff reflect.addReflectOff
628 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
630 if reflectOffs.m == nil {
631 reflectOffs.m = make(map[int32]unsafe.Pointer)
632 reflectOffs.minv = make(map[unsafe.Pointer]int32)
633 reflectOffs.next = -1
635 id, found := reflectOffs.minv[ptr]
637 id = reflectOffs.next
638 reflectOffs.next-- // use negative offsets as IDs to aid debugging
639 reflectOffs.m[id] = ptr
640 reflectOffs.minv[ptr] = id