1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
9 // Keep a cached value to make gotraceback fast,
10 // since we call it on every call to gentraceback.
11 // The cached value is a uint32 in which the low bit
12 // is the "crash" setting and the top 31 bits are the
14 var traceback_cache uint32 = 2 << 1
16 // The GOTRACEBACK environment variable controls the
17 // behavior of a Go program that is crashing and exiting.
18 // GOTRACEBACK=0 suppress all tracebacks
19 // GOTRACEBACK=1 default behavior - show tracebacks but exclude runtime frames
20 // GOTRACEBACK=2 show tracebacks including runtime frames
21 // GOTRACEBACK=crash show tracebacks including runtime frames, then crash (core dump etc)
23 func gotraceback(crash *bool) int32 {
28 if _g_.m.traceback != 0 {
29 return int32(_g_.m.traceback)
32 *crash = traceback_cache&1 != 0
34 return int32(traceback_cache >> 1)
42 // nosplit for use in linux/386 startup linux_setup_vdso
44 func argv_index(argv **byte, i int32) *byte {
45 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*ptrSize))
48 func args(c int32, v **byte) {
55 // TODO: Retire in favor of GOOS== checks.
61 // Information about what cpu features are available.
62 // Set on startup in asm_{x86/amd64}.s.
69 if GOOS == "windows" {
73 argslice = make([]string, argc)
74 for i := int32(0); i < argc; i++ {
75 argslice[i] = gostringnocopy(argv_index(argv, i))
81 for argv_index(argv, argc+1+n) != nil {
85 envs = make([]string, n)
86 for i := int32(0); i < n; i++ {
87 envs[i] = gostringnocopy(argv_index(argv, argc+1+i))
91 func environ() []string {
100 // TODO: PREFETCH((unsafe.Pointer)(&z64))
101 if cas64(&z64, x64, 1) {
102 gothrow("cas64 failed")
105 gothrow("cas64 failed")
108 if !cas64(&z64, x64, 1) {
109 gothrow("cas64 failed")
111 if x64 != 42 || z64 != 1 {
112 gothrow("cas64 failed")
114 if atomicload64(&z64) != 1 {
115 gothrow("load64 failed")
117 atomicstore64(&z64, (1<<40)+1)
118 if atomicload64(&z64) != (1<<40)+1 {
119 gothrow("store64 failed")
121 if xadd64(&z64, (1<<40)+1) != (2<<40)+2 {
122 gothrow("xadd64 failed")
124 if atomicload64(&z64) != (2<<40)+2 {
125 gothrow("xadd64 failed")
127 if xchg64(&z64, (3<<40)+3) != (2<<40)+2 {
128 gothrow("xchg64 failed")
130 if atomicload64(&z64) != (3<<40)+3 {
131 gothrow("xchg64 failed")
161 if unsafe.Sizeof(a) != 1 {
164 if unsafe.Sizeof(b) != 1 {
167 if unsafe.Sizeof(c) != 2 {
170 if unsafe.Sizeof(d) != 2 {
173 if unsafe.Sizeof(e) != 4 {
176 if unsafe.Sizeof(f) != 4 {
179 if unsafe.Sizeof(g) != 8 {
182 if unsafe.Sizeof(h) != 8 {
185 if unsafe.Sizeof(i) != 4 {
188 if unsafe.Sizeof(j) != 8 {
191 if unsafe.Sizeof(k) != ptrSize {
194 if unsafe.Sizeof(l) != ptrSize {
197 if unsafe.Sizeof(x1) != 1 {
198 gothrow("bad unsafe.Sizeof x1")
200 if unsafe.Offsetof(y1.y) != 1 {
201 gothrow("bad offsetof y1.y")
203 if unsafe.Sizeof(y1) != 2 {
204 gothrow("bad unsafe.Sizeof y1")
207 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
208 gothrow("bad timediv")
229 if !cas(&z, 0xffffffff, 0xfffffffe) {
236 k = unsafe.Pointer(uintptr(0xfedcb123))
238 k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
240 if casp(&k, nil, nil) {
244 if !casp(&k, k, k1) {
251 m = [4]byte{1, 1, 1, 1}
252 atomicor8(&m[1], 0xf0)
253 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
257 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
259 gothrow("float64nan")
262 gothrow("float64nan1")
265 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
267 gothrow("float64nan2")
270 gothrow("float64nan3")
273 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
275 gothrow("float32nan")
278 gothrow("float32nan1")
281 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
283 gothrow("float32nan2")
286 gothrow("float32nan3")
291 if _FixedStack != round2(_FixedStack) {
292 gothrow("FixedStack is not power-of-2")
301 // Do we report invalid pointers found during stack or heap scans?
302 //var invalidptr int32 = 1
304 var dbgvars = []dbgVar{
305 {"allocfreetrace", &debug.allocfreetrace},
306 {"invalidptr", &invalidptr},
307 {"efence", &debug.efence},
308 {"gctrace", &debug.gctrace},
309 {"gcdead", &debug.gcdead},
310 {"scheddetail", &debug.scheddetail},
311 {"schedtrace", &debug.schedtrace},
312 {"scavenge", &debug.scavenge},
315 func parsedebugvars() {
316 for p := gogetenv("GODEBUG"); p != ""; {
322 field, p = p[:i], p[i+1:]
324 i = index(field, "=")
328 key, value := field[:i], field[i+1:]
329 for _, v := range dbgvars {
331 *v.value = int32(goatoi(value))
336 switch p := gogetenv("GOTRACEBACK"); p {
338 traceback_cache = 1 << 1
340 traceback_cache = 2<<1 | 1
342 traceback_cache = uint32(goatoi(p)) << 1
346 // Poor mans 64-bit division.
347 // This is a very special function, do not use it if you are not sure what you are doing.
348 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
349 // Handles overflow in a time-specific manner.
351 func timediv(v int64, div int32, rem *int32) int32 {
353 for bit := 30; bit >= 0; bit-- {
354 if v >= int64(div)<<uint(bit) {
355 v = v - (int64(div) << uint(bit))
356 res += 1 << uint(bit)
371 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
381 func releasem(mp *m) {
384 if mp.locks == 0 && _g_.preempt {
385 // restore the preemption request in case we've cleared it in newstack
386 _g_.stackguard0 = stackPreempt
391 func gomcache() *mcache {
392 return getg().m.mcache
395 var typelink, etypelink [0]byte
398 func typelinks() []*_type {
400 sp := (*slice)(unsafe.Pointer(&ret))
401 sp.array = (*byte)(unsafe.Pointer(&typelink))
402 sp.len = uint((uintptr(unsafe.Pointer(&etypelink)) - uintptr(unsafe.Pointer(&typelink))) / unsafe.Sizeof(ret[0]))
407 // TODO: move back into mgc0.c when converted to Go
408 func readgogc() int32 {
409 p := gogetenv("GOGC")
416 return int32(goatoi(p))