1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "runtime/internal/atomic"
14 // Keep a cached value to make gotraceback fast,
15 // since we call it on every call to gentraceback.
16 // The cached value is a uint32 in which the low bits
17 // are the "crash" and "all" settings and the remaining
18 // bits are the traceback value (0 off, 1 on, 2 include system).
20 tracebackCrash = 1 << iota
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
28 // gotraceback returns the current traceback settings.
30 // If level is 0, suppress all tracebacks.
31 // If level is 1, show tracebacks, but exclude runtime frames.
32 // If level is 2, show tracebacks including runtime frames.
33 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
34 // If crash is set, crash (core dump, etc) after tracebacking.
37 func gotraceback() (level int32, all, crash bool) {
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42 if gp.m.traceback != 0 {
43 level = int32(gp.m.traceback)
44 } else if gp.m.throwing >= throwTypeRuntime {
45 // Always include runtime frames in runtime throws unless
46 // otherwise overridden by m.traceback.
49 level = int32(t >> tracebackShift)
59 // nosplit for use in linux startup sysargs.
62 func argv_index(argv **byte, i int32) *byte {
63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
66 func args(c int32, v **byte) {
73 if GOOS == "windows" {
76 argslice = make([]string, argc)
77 for i := int32(0); i < argc; i++ {
78 argslice[i] = gostringnocopy(argv_index(argv, i))
83 // TODO(austin): ppc64 in dynamic linking mode doesn't
84 // guarantee env[] will immediately follow argv. Might cause
87 for argv_index(argv, argc+1+n) != nil {
91 envs = make([]string, n)
92 for i := int32(0); i < n; i++ {
93 envs[i] = gostring(argv_index(argv, argc+1+i))
97 func environ() []string {
101 // TODO: These should be locals in testAtomic64, but we don't 8-byte
102 // align stack variables on 386.
103 var test_z64, test_x64 uint64
105 func testAtomic64() {
108 if atomic.Cas64(&test_z64, test_x64, 1) {
109 throw("cas64 failed")
112 throw("cas64 failed")
115 if !atomic.Cas64(&test_z64, test_x64, 1) {
116 throw("cas64 failed")
118 if test_x64 != 42 || test_z64 != 1 {
119 throw("cas64 failed")
121 if atomic.Load64(&test_z64) != 1 {
122 throw("load64 failed")
124 atomic.Store64(&test_z64, (1<<40)+1)
125 if atomic.Load64(&test_z64) != (1<<40)+1 {
126 throw("store64 failed")
128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129 throw("xadd64 failed")
131 if atomic.Load64(&test_z64) != (2<<40)+2 {
132 throw("xadd64 failed")
134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135 throw("xchg64 failed")
137 if atomic.Load64(&test_z64) != (3<<40)+3 {
138 throw("xchg64 failed")
168 if unsafe.Sizeof(a) != 1 {
171 if unsafe.Sizeof(b) != 1 {
174 if unsafe.Sizeof(c) != 2 {
177 if unsafe.Sizeof(d) != 2 {
180 if unsafe.Sizeof(e) != 4 {
183 if unsafe.Sizeof(f) != 4 {
186 if unsafe.Sizeof(g) != 8 {
189 if unsafe.Sizeof(h) != 8 {
192 if unsafe.Sizeof(i) != 4 {
195 if unsafe.Sizeof(j) != 8 {
198 if unsafe.Sizeof(k) != goarch.PtrSize {
201 if unsafe.Sizeof(l) != goarch.PtrSize {
204 if unsafe.Sizeof(x1) != 1 {
205 throw("bad unsafe.Sizeof x1")
207 if unsafe.Offsetof(y1.y) != 1 {
208 throw("bad offsetof y1.y")
210 if unsafe.Sizeof(y1) != 2 {
211 throw("bad unsafe.Sizeof y1")
214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
220 if !atomic.Cas(&z, 1, 2) {
228 if atomic.Cas(&z, 5, 6) {
236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
243 m = [4]byte{1, 1, 1, 1}
244 atomic.Or8(&m[1], 0xf0)
245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
249 m = [4]byte{0xff, 0xff, 0xff, 0xff}
250 atomic.And8(&m[1], 0x1)
251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
289 if fixedStack != round2(fixedStack) {
290 throw("FixedStack is not power-of-2")
294 throw("assembly checks failed")
300 value *int32 // for variables that can only be set at startup
301 atomic *atomic.Int32 // for variables that can be changed during execution
302 def int32 // default value (ideally zero)
305 // Holds variables parsed from GODEBUG env var,
306 // except for "memprofilerate" since there is an
307 // existing int var for that value, which may
308 // already have an initial value.
312 dontfreezetheworld int32
316 gcshrinkstackoff int32
320 madvdontneed int32 // for Linux; issue 28466
324 tracebackancestors int32
325 asyncpreemptoff int32
327 adaptivestackstart int32
328 tracefpunwindoff int32
329 traceadvanceperiod int32
331 // debug.malloc is used as a combined debug check
332 // in the malloc function and should be set
333 // if any of the below debug options is != 0.
339 panicnil atomic.Int32
342 var dbgvars = []*dbgVar{
343 {name: "allocfreetrace", value: &debug.allocfreetrace},
344 {name: "clobberfree", value: &debug.clobberfree},
345 {name: "cgocheck", value: &debug.cgocheck},
346 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
347 {name: "efence", value: &debug.efence},
348 {name: "gccheckmark", value: &debug.gccheckmark},
349 {name: "gcpacertrace", value: &debug.gcpacertrace},
350 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
351 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
352 {name: "gctrace", value: &debug.gctrace},
353 {name: "invalidptr", value: &debug.invalidptr},
354 {name: "madvdontneed", value: &debug.madvdontneed},
355 {name: "sbrk", value: &debug.sbrk},
356 {name: "scavtrace", value: &debug.scavtrace},
357 {name: "scheddetail", value: &debug.scheddetail},
358 {name: "schedtrace", value: &debug.schedtrace},
359 {name: "tracebackancestors", value: &debug.tracebackancestors},
360 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
361 {name: "inittrace", value: &debug.inittrace},
362 {name: "harddecommit", value: &debug.harddecommit},
363 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
364 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
365 {name: "panicnil", atomic: &debug.panicnil},
366 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
369 func parsedebugvars() {
373 debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
375 // On Linux, MADV_FREE is faster than MADV_DONTNEED,
376 // but doesn't affect many of the statistics that
377 // MADV_DONTNEED does until the memory is actually
378 // reclaimed. This generally leads to poor user
379 // experience, like confusing stats in top and other
380 // monitoring tools; and bad integration with
381 // management systems that respond to memory usage.
382 // Hence, default to MADV_DONTNEED.
383 debug.madvdontneed = 1
385 debug.traceadvanceperiod = defaultTraceAdvancePeriod
387 godebug := gogetenv("GODEBUG")
393 // apply runtime defaults, if any
394 for _, v := range dbgvars {
396 // Every var should have either v.value or v.atomic set.
399 } else if v.atomic != nil {
400 v.atomic.Store(v.def)
405 // apply compile-time GODEBUG settings
406 parsegodebug(godebugDefault, nil)
408 // apply environment settings
409 parsegodebug(godebug, nil)
411 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
413 setTraceback(gogetenv("GOTRACEBACK"))
414 traceback_env = traceback_cache
417 // reparsedebugvars reparses the runtime's debug variables
418 // because the environment variable has been changed to env.
419 func reparsedebugvars(env string) {
420 seen := make(map[string]bool)
421 // apply environment settings
422 parsegodebug(env, seen)
423 // apply compile-time GODEBUG settings for as-yet-unseen variables
424 parsegodebug(godebugDefault, seen)
425 // apply defaults for as-yet-unseen variables
426 for _, v := range dbgvars {
427 if v.atomic != nil && !seen[v.name] {
433 // parsegodebug parses the godebug string, updating variables listed in dbgvars.
434 // If seen == nil, this is startup time and we process the string left to right
435 // overwriting older settings with newer ones.
436 // If seen != nil, $GODEBUG has changed and we are doing an
437 // incremental update. To avoid flapping in the case where a value is
438 // set multiple times (perhaps in the default and the environment,
439 // or perhaps twice in the environment), we process the string right-to-left
440 // and only change values not already seen. After doing this for both
441 // the environment and the default settings, the caller must also call
442 // cleargodebug(seen) to reset any now-unset values back to their defaults.
443 func parsegodebug(godebug string, seen map[string]bool) {
444 for p := godebug; p != ""; {
447 // startup: process left to right, overwriting older settings with newer
448 i := bytealg.IndexByteString(p, ',')
452 field, p = p[:i], p[i+1:]
455 // incremental update: process right to left, updating and skipping seen
457 for i >= 0 && p[i] != ',' {
463 p, field = p[:i], p[i+1:]
466 i := bytealg.IndexByteString(field, '=')
470 key, value := field[:i], field[i+1:]
478 // Update MemProfileRate directly here since it
479 // is int, not int32, and should only be updated
480 // if specified in GODEBUG.
481 if seen == nil && key == "memprofilerate" {
482 if n, ok := atoi(value); ok {
486 for _, v := range dbgvars {
488 if n, ok := atoi32(value); ok {
489 if seen == nil && v.value != nil {
491 } else if v.atomic != nil {
500 if debug.cgocheck > 1 {
501 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
505 //go:linkname setTraceback runtime/debug.SetTraceback
506 func setTraceback(level string) {
512 t = 1 << tracebackShift
514 t = 1<<tracebackShift | tracebackAll
516 t = 2<<tracebackShift | tracebackAll
518 t = 2<<tracebackShift | tracebackAll | tracebackCrash
520 if GOOS == "windows" {
521 t = 2<<tracebackShift | tracebackAll | tracebackCrash
528 if n, ok := atoi(level); ok && n == int(uint32(n)) {
529 t |= uint32(n) << tracebackShift
532 // when C owns the process, simply exit'ing the process on fatal errors
533 // and panics is surprising. Be louder and abort instead.
534 if islibrary || isarchive {
540 atomic.Store(&traceback_cache, t)
543 // Poor mans 64-bit division.
544 // This is a very special function, do not use it if you are not sure what you are doing.
545 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
546 // Handles overflow in a time-specific manner.
547 // This keeps us within no-split stack limits on 32-bit processors.
550 func timediv(v int64, div int32, rem *int32) int32 {
552 for bit := 30; bit >= 0; bit-- {
553 if v >= int64(div)<<uint(bit) {
554 v = v - (int64(div) << uint(bit))
555 // Before this for loop, res was 0, thus all these
556 // power of 2 increments are now just bitsets.
557 res |= 1 << uint(bit)
572 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
582 func releasem(mp *m) {
585 if mp.locks == 0 && gp.preempt {
586 // restore the preemption request in case we've cleared it in newstack
587 gp.stackguard0 = stackPreempt
591 //go:linkname reflect_typelinks reflect.typelinks
592 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
593 modules := activeModules()
594 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
595 ret := [][]int32{modules[0].typelinks}
596 for _, md := range modules[1:] {
597 sections = append(sections, unsafe.Pointer(md.types))
598 ret = append(ret, md.typelinks)
603 // reflect_resolveNameOff resolves a name offset from a base pointer.
605 //go:linkname reflect_resolveNameOff reflect.resolveNameOff
606 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
607 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
610 // reflect_resolveTypeOff resolves an *rtype offset from a base type.
612 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
613 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
614 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
617 // reflect_resolveTextOff resolves a function pointer offset from a base type.
619 //go:linkname reflect_resolveTextOff reflect.resolveTextOff
620 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
621 return toRType((*_type)(rtype)).textOff(textOff(off))
625 // reflectlite_resolveNameOff resolves a name offset from a base pointer.
627 //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
628 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
629 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
632 // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
634 //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
635 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
636 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
639 // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
641 //go:linkname reflect_addReflectOff reflect.addReflectOff
642 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
644 if reflectOffs.m == nil {
645 reflectOffs.m = make(map[int32]unsafe.Pointer)
646 reflectOffs.minv = make(map[unsafe.Pointer]int32)
647 reflectOffs.next = -1
649 id, found := reflectOffs.minv[ptr]
651 id = reflectOffs.next
652 reflectOffs.next-- // use negative offsets as IDs to aid debugging
653 reflectOffs.m[id] = ptr
654 reflectOffs.minv[ptr] = id