1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "runtime/internal/atomic"
14 // Keep a cached value to make gotraceback fast,
15 // since we call it on every call to gentraceback.
16 // The cached value is a uint32 in which the low bits
17 // are the "crash" and "all" settings and the remaining
18 // bits are the traceback value (0 off, 1 on, 2 include system).
20 tracebackCrash = 1 << iota
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
28 // gotraceback returns the current traceback settings.
30 // If level is 0, suppress all tracebacks.
31 // If level is 1, show tracebacks, but exclude runtime frames.
32 // If level is 2, show tracebacks including runtime frames.
33 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
34 // If crash is set, crash (core dump, etc) after tracebacking.
37 func gotraceback() (level int32, all, crash bool) {
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42 if gp.m.traceback != 0 {
43 level = int32(gp.m.traceback)
44 } else if gp.m.throwing >= throwTypeRuntime {
45 // Always include runtime frames in runtime throws unless
46 // otherwise overridden by m.traceback.
49 level = int32(t >> tracebackShift)
59 // nosplit for use in linux startup sysargs.
62 func argv_index(argv **byte, i int32) *byte {
63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
66 func args(c int32, v **byte) {
73 if GOOS == "windows" {
76 argslice = make([]string, argc)
77 for i := int32(0); i < argc; i++ {
78 argslice[i] = gostringnocopy(argv_index(argv, i))
83 // TODO(austin): ppc64 in dynamic linking mode doesn't
84 // guarantee env[] will immediately follow argv. Might cause
87 for argv_index(argv, argc+1+n) != nil {
91 envs = make([]string, n)
92 for i := int32(0); i < n; i++ {
93 envs[i] = gostring(argv_index(argv, argc+1+i))
97 func environ() []string {
101 // TODO: These should be locals in testAtomic64, but we don't 8-byte
102 // align stack variables on 386.
103 var test_z64, test_x64 uint64
105 func testAtomic64() {
108 if atomic.Cas64(&test_z64, test_x64, 1) {
109 throw("cas64 failed")
112 throw("cas64 failed")
115 if !atomic.Cas64(&test_z64, test_x64, 1) {
116 throw("cas64 failed")
118 if test_x64 != 42 || test_z64 != 1 {
119 throw("cas64 failed")
121 if atomic.Load64(&test_z64) != 1 {
122 throw("load64 failed")
124 atomic.Store64(&test_z64, (1<<40)+1)
125 if atomic.Load64(&test_z64) != (1<<40)+1 {
126 throw("store64 failed")
128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129 throw("xadd64 failed")
131 if atomic.Load64(&test_z64) != (2<<40)+2 {
132 throw("xadd64 failed")
134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135 throw("xchg64 failed")
137 if atomic.Load64(&test_z64) != (3<<40)+3 {
138 throw("xchg64 failed")
168 if unsafe.Sizeof(a) != 1 {
171 if unsafe.Sizeof(b) != 1 {
174 if unsafe.Sizeof(c) != 2 {
177 if unsafe.Sizeof(d) != 2 {
180 if unsafe.Sizeof(e) != 4 {
183 if unsafe.Sizeof(f) != 4 {
186 if unsafe.Sizeof(g) != 8 {
189 if unsafe.Sizeof(h) != 8 {
192 if unsafe.Sizeof(i) != 4 {
195 if unsafe.Sizeof(j) != 8 {
198 if unsafe.Sizeof(k) != goarch.PtrSize {
201 if unsafe.Sizeof(l) != goarch.PtrSize {
204 if unsafe.Sizeof(x1) != 1 {
205 throw("bad unsafe.Sizeof x1")
207 if unsafe.Offsetof(y1.y) != 1 {
208 throw("bad offsetof y1.y")
210 if unsafe.Sizeof(y1) != 2 {
211 throw("bad unsafe.Sizeof y1")
214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
220 if !atomic.Cas(&z, 1, 2) {
228 if atomic.Cas(&z, 5, 6) {
236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
243 m = [4]byte{1, 1, 1, 1}
244 atomic.Or8(&m[1], 0xf0)
245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
249 m = [4]byte{0xff, 0xff, 0xff, 0xff}
250 atomic.And8(&m[1], 0x1)
251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
289 if fixedStack != round2(fixedStack) {
290 throw("FixedStack is not power-of-2")
294 throw("assembly checks failed")
300 value *int32 // for variables that can only be set at startup
301 atomic *atomic.Int32 // for variables that can be changed during execution
302 def int32 // default value (ideally zero)
305 // Holds variables parsed from GODEBUG env var,
306 // except for "memprofilerate" since there is an
307 // existing int var for that value, which may
308 // already have an initial value.
312 dontfreezetheworld int32
316 gcshrinkstackoff int32
320 madvdontneed int32 // for Linux; issue 28466
324 tracebackancestors int32
325 asyncpreemptoff int32
327 adaptivestackstart int32
328 tracefpunwindoff int32
330 // debug.malloc is used as a combined debug check
331 // in the malloc function and should be set
332 // if any of the below debug options is != 0.
338 panicnil atomic.Int32
341 var dbgvars = []*dbgVar{
342 {name: "allocfreetrace", value: &debug.allocfreetrace},
343 {name: "clobberfree", value: &debug.clobberfree},
344 {name: "cgocheck", value: &debug.cgocheck},
345 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
346 {name: "efence", value: &debug.efence},
347 {name: "gccheckmark", value: &debug.gccheckmark},
348 {name: "gcpacertrace", value: &debug.gcpacertrace},
349 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
350 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
351 {name: "gctrace", value: &debug.gctrace},
352 {name: "invalidptr", value: &debug.invalidptr},
353 {name: "madvdontneed", value: &debug.madvdontneed},
354 {name: "sbrk", value: &debug.sbrk},
355 {name: "scavtrace", value: &debug.scavtrace},
356 {name: "scheddetail", value: &debug.scheddetail},
357 {name: "schedtrace", value: &debug.schedtrace},
358 {name: "tracebackancestors", value: &debug.tracebackancestors},
359 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
360 {name: "inittrace", value: &debug.inittrace},
361 {name: "harddecommit", value: &debug.harddecommit},
362 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
363 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
364 {name: "panicnil", atomic: &debug.panicnil},
367 func parsedebugvars() {
371 debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
373 // On Linux, MADV_FREE is faster than MADV_DONTNEED,
374 // but doesn't affect many of the statistics that
375 // MADV_DONTNEED does until the memory is actually
376 // reclaimed. This generally leads to poor user
377 // experience, like confusing stats in top and other
378 // monitoring tools; and bad integration with
379 // management systems that respond to memory usage.
380 // Hence, default to MADV_DONTNEED.
381 debug.madvdontneed = 1
384 godebug := gogetenv("GODEBUG")
390 // apply runtime defaults, if any
391 for _, v := range dbgvars {
393 // Every var should have either v.value or v.atomic set.
396 } else if v.atomic != nil {
397 v.atomic.Store(v.def)
402 // apply compile-time GODEBUG settings
403 parsegodebug(godebugDefault, nil)
405 // apply environment settings
406 parsegodebug(godebug, nil)
408 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
410 setTraceback(gogetenv("GOTRACEBACK"))
411 traceback_env = traceback_cache
414 // reparsedebugvars reparses the runtime's debug variables
415 // because the environment variable has been changed to env.
416 func reparsedebugvars(env string) {
417 seen := make(map[string]bool)
418 // apply environment settings
419 parsegodebug(env, seen)
420 // apply compile-time GODEBUG settings for as-yet-unseen variables
421 parsegodebug(godebugDefault, seen)
422 // apply defaults for as-yet-unseen variables
423 for _, v := range dbgvars {
424 if v.atomic != nil && !seen[v.name] {
430 // parsegodebug parses the godebug string, updating variables listed in dbgvars.
431 // If seen == nil, this is startup time and we process the string left to right
432 // overwriting older settings with newer ones.
433 // If seen != nil, $GODEBUG has changed and we are doing an
434 // incremental update. To avoid flapping in the case where a value is
435 // set multiple times (perhaps in the default and the environment,
436 // or perhaps twice in the environment), we process the string right-to-left
437 // and only change values not already seen. After doing this for both
438 // the environment and the default settings, the caller must also call
439 // cleargodebug(seen) to reset any now-unset values back to their defaults.
440 func parsegodebug(godebug string, seen map[string]bool) {
441 for p := godebug; p != ""; {
444 // startup: process left to right, overwriting older settings with newer
445 i := bytealg.IndexByteString(p, ',')
449 field, p = p[:i], p[i+1:]
452 // incremental update: process right to left, updating and skipping seen
454 for i >= 0 && p[i] != ',' {
460 p, field = p[:i], p[i+1:]
463 i := bytealg.IndexByteString(field, '=')
467 key, value := field[:i], field[i+1:]
475 // Update MemProfileRate directly here since it
476 // is int, not int32, and should only be updated
477 // if specified in GODEBUG.
478 if seen == nil && key == "memprofilerate" {
479 if n, ok := atoi(value); ok {
483 for _, v := range dbgvars {
485 if n, ok := atoi32(value); ok {
486 if seen == nil && v.value != nil {
488 } else if v.atomic != nil {
497 if debug.cgocheck > 1 {
498 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
502 //go:linkname setTraceback runtime/debug.SetTraceback
503 func setTraceback(level string) {
509 t = 1 << tracebackShift
511 t = 1<<tracebackShift | tracebackAll
513 t = 2<<tracebackShift | tracebackAll
515 t = 2<<tracebackShift | tracebackAll | tracebackCrash
517 if GOOS == "windows" {
518 t = 2<<tracebackShift | tracebackAll | tracebackCrash
525 if n, ok := atoi(level); ok && n == int(uint32(n)) {
526 t |= uint32(n) << tracebackShift
529 // when C owns the process, simply exit'ing the process on fatal errors
530 // and panics is surprising. Be louder and abort instead.
531 if islibrary || isarchive {
537 atomic.Store(&traceback_cache, t)
540 // Poor mans 64-bit division.
541 // This is a very special function, do not use it if you are not sure what you are doing.
542 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
543 // Handles overflow in a time-specific manner.
544 // This keeps us within no-split stack limits on 32-bit processors.
547 func timediv(v int64, div int32, rem *int32) int32 {
549 for bit := 30; bit >= 0; bit-- {
550 if v >= int64(div)<<uint(bit) {
551 v = v - (int64(div) << uint(bit))
552 // Before this for loop, res was 0, thus all these
553 // power of 2 increments are now just bitsets.
554 res |= 1 << uint(bit)
569 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
579 func releasem(mp *m) {
582 if mp.locks == 0 && gp.preempt {
583 // restore the preemption request in case we've cleared it in newstack
584 gp.stackguard0 = stackPreempt
588 //go:linkname reflect_typelinks reflect.typelinks
589 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
590 modules := activeModules()
591 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
592 ret := [][]int32{modules[0].typelinks}
593 for _, md := range modules[1:] {
594 sections = append(sections, unsafe.Pointer(md.types))
595 ret = append(ret, md.typelinks)
600 // reflect_resolveNameOff resolves a name offset from a base pointer.
602 //go:linkname reflect_resolveNameOff reflect.resolveNameOff
603 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
604 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
607 // reflect_resolveTypeOff resolves an *rtype offset from a base type.
609 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
610 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
611 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
614 // reflect_resolveTextOff resolves a function pointer offset from a base type.
616 //go:linkname reflect_resolveTextOff reflect.resolveTextOff
617 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
618 return toRType((*_type)(rtype)).textOff(textOff(off))
622 // reflectlite_resolveNameOff resolves a name offset from a base pointer.
624 //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
625 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
626 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
629 // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
631 //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
632 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
633 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
636 // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
638 //go:linkname reflect_addReflectOff reflect.addReflectOff
639 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
641 if reflectOffs.m == nil {
642 reflectOffs.m = make(map[int32]unsafe.Pointer)
643 reflectOffs.minv = make(map[unsafe.Pointer]int32)
644 reflectOffs.next = -1
646 id, found := reflectOffs.minv[ptr]
648 id = reflectOffs.next
649 reflectOffs.next-- // use negative offsets as IDs to aid debugging
650 reflectOffs.m[id] = ptr
651 reflectOffs.minv[ptr] = id