]> Cypherpunks.ru repositories - gostls13.git/blob - src/runtime/runtime1.go
[dev.ssa] Merge branch 'master' into dev.ssa
[gostls13.git] / src / runtime / runtime1.go
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 package runtime
6
7 import (
8         "runtime/internal/atomic"
9         "runtime/internal/sys"
10         "unsafe"
11 )
12
13 // Keep a cached value to make gotraceback fast,
14 // since we call it on every call to gentraceback.
15 // The cached value is a uint32 in which the low bits
16 // are the "crash" and "all" settings and the remaining
17 // bits are the traceback value (0 off, 1 on, 2 include system).
18 const (
19         tracebackCrash = 1 << iota
20         tracebackAll
21         tracebackShift = iota
22 )
23
24 var traceback_cache uint32 = 2 << tracebackShift
25 var traceback_env uint32
26
27 // gotraceback returns the current traceback settings.
28 //
29 // If level is 0, suppress all tracebacks.
30 // If level is 1, show tracebacks, but exclude runtime frames.
31 // If level is 2, show tracebacks including runtime frames.
32 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
33 // If crash is set, crash (core dump, etc) after tracebacking.
34 //
35 //go:nosplit
36 func gotraceback() (level int32, all, crash bool) {
37         _g_ := getg()
38         all = _g_.m.throwing > 0
39         if _g_.m.traceback != 0 {
40                 level = int32(_g_.m.traceback)
41                 return
42         }
43         t := atomic.Load(&traceback_cache)
44         crash = t&tracebackCrash != 0
45         all = all || t&tracebackAll != 0
46         level = int32(t >> tracebackShift)
47         return
48 }
49
50 var (
51         argc int32
52         argv **byte
53 )
54
55 // nosplit for use in linux startup sysargs
56 //go:nosplit
57 func argv_index(argv **byte, i int32) *byte {
58         return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
59 }
60
61 func args(c int32, v **byte) {
62         argc = c
63         argv = v
64         sysargs(c, v)
65 }
66
67 func goargs() {
68         if GOOS == "windows" {
69                 return
70         }
71         argslice = make([]string, argc)
72         for i := int32(0); i < argc; i++ {
73                 argslice[i] = gostringnocopy(argv_index(argv, i))
74         }
75 }
76
77 func goenvs_unix() {
78         // TODO(austin): ppc64 in dynamic linking mode doesn't
79         // guarantee env[] will immediately follow argv. Might cause
80         // problems.
81         n := int32(0)
82         for argv_index(argv, argc+1+n) != nil {
83                 n++
84         }
85
86         envs = make([]string, n)
87         for i := int32(0); i < n; i++ {
88                 envs[i] = gostring(argv_index(argv, argc+1+i))
89         }
90 }
91
92 func environ() []string {
93         return envs
94 }
95
96 // TODO: These should be locals in testAtomic64, but we don't 8-byte
97 // align stack variables on 386.
98 var test_z64, test_x64 uint64
99
100 func testAtomic64() {
101         test_z64 = 42
102         test_x64 = 0
103         prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
104         prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
105         prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
106         prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
107         if atomic.Cas64(&test_z64, test_x64, 1) {
108                 throw("cas64 failed")
109         }
110         if test_x64 != 0 {
111                 throw("cas64 failed")
112         }
113         test_x64 = 42
114         if !atomic.Cas64(&test_z64, test_x64, 1) {
115                 throw("cas64 failed")
116         }
117         if test_x64 != 42 || test_z64 != 1 {
118                 throw("cas64 failed")
119         }
120         if atomic.Load64(&test_z64) != 1 {
121                 throw("load64 failed")
122         }
123         atomic.Store64(&test_z64, (1<<40)+1)
124         if atomic.Load64(&test_z64) != (1<<40)+1 {
125                 throw("store64 failed")
126         }
127         if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
128                 throw("xadd64 failed")
129         }
130         if atomic.Load64(&test_z64) != (2<<40)+2 {
131                 throw("xadd64 failed")
132         }
133         if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
134                 throw("xchg64 failed")
135         }
136         if atomic.Load64(&test_z64) != (3<<40)+3 {
137                 throw("xchg64 failed")
138         }
139 }
140
141 func check() {
142         var (
143                 a     int8
144                 b     uint8
145                 c     int16
146                 d     uint16
147                 e     int32
148                 f     uint32
149                 g     int64
150                 h     uint64
151                 i, i1 float32
152                 j, j1 float64
153                 k, k1 unsafe.Pointer
154                 l     *uint16
155                 m     [4]byte
156         )
157         type x1t struct {
158                 x uint8
159         }
160         type y1t struct {
161                 x1 x1t
162                 y  uint8
163         }
164         var x1 x1t
165         var y1 y1t
166
167         if unsafe.Sizeof(a) != 1 {
168                 throw("bad a")
169         }
170         if unsafe.Sizeof(b) != 1 {
171                 throw("bad b")
172         }
173         if unsafe.Sizeof(c) != 2 {
174                 throw("bad c")
175         }
176         if unsafe.Sizeof(d) != 2 {
177                 throw("bad d")
178         }
179         if unsafe.Sizeof(e) != 4 {
180                 throw("bad e")
181         }
182         if unsafe.Sizeof(f) != 4 {
183                 throw("bad f")
184         }
185         if unsafe.Sizeof(g) != 8 {
186                 throw("bad g")
187         }
188         if unsafe.Sizeof(h) != 8 {
189                 throw("bad h")
190         }
191         if unsafe.Sizeof(i) != 4 {
192                 throw("bad i")
193         }
194         if unsafe.Sizeof(j) != 8 {
195                 throw("bad j")
196         }
197         if unsafe.Sizeof(k) != sys.PtrSize {
198                 throw("bad k")
199         }
200         if unsafe.Sizeof(l) != sys.PtrSize {
201                 throw("bad l")
202         }
203         if unsafe.Sizeof(x1) != 1 {
204                 throw("bad unsafe.Sizeof x1")
205         }
206         if unsafe.Offsetof(y1.y) != 1 {
207                 throw("bad offsetof y1.y")
208         }
209         if unsafe.Sizeof(y1) != 2 {
210                 throw("bad unsafe.Sizeof y1")
211         }
212
213         if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
214                 throw("bad timediv")
215         }
216
217         var z uint32
218         z = 1
219         if !atomic.Cas(&z, 1, 2) {
220                 throw("cas1")
221         }
222         if z != 2 {
223                 throw("cas2")
224         }
225
226         z = 4
227         if atomic.Cas(&z, 5, 6) {
228                 throw("cas3")
229         }
230         if z != 4 {
231                 throw("cas4")
232         }
233
234         z = 0xffffffff
235         if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
236                 throw("cas5")
237         }
238         if z != 0xfffffffe {
239                 throw("cas6")
240         }
241
242         k = unsafe.Pointer(uintptr(0xfedcb123))
243         if sys.PtrSize == 8 {
244                 k = unsafe.Pointer(uintptr(k) << 10)
245         }
246         if casp(&k, nil, nil) {
247                 throw("casp1")
248         }
249         k1 = add(k, 1)
250         if !casp(&k, k, k1) {
251                 throw("casp2")
252         }
253         if k != k1 {
254                 throw("casp3")
255         }
256
257         m = [4]byte{1, 1, 1, 1}
258         atomic.Or8(&m[1], 0xf0)
259         if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
260                 throw("atomicor8")
261         }
262
263         *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
264         if j == j {
265                 throw("float64nan")
266         }
267         if !(j != j) {
268                 throw("float64nan1")
269         }
270
271         *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
272         if j == j1 {
273                 throw("float64nan2")
274         }
275         if !(j != j1) {
276                 throw("float64nan3")
277         }
278
279         *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
280         if i == i {
281                 throw("float32nan")
282         }
283         if i == i {
284                 throw("float32nan1")
285         }
286
287         *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
288         if i == i1 {
289                 throw("float32nan2")
290         }
291         if i == i1 {
292                 throw("float32nan3")
293         }
294
295         testAtomic64()
296
297         if _FixedStack != round2(_FixedStack) {
298                 throw("FixedStack is not power-of-2")
299         }
300
301         if !checkASM() {
302                 throw("assembly checks failed")
303         }
304 }
305
306 type dbgVar struct {
307         name  string
308         value *int32
309 }
310
311 // Holds variables parsed from GODEBUG env var,
312 // except for "memprofilerate" since there is an
313 // existing int var for that value, which may
314 // already have an initial value.
315 var debug struct {
316         allocfreetrace    int32
317         cgocheck          int32
318         efence            int32
319         gccheckmark       int32
320         gcpacertrace      int32
321         gcshrinkstackoff  int32
322         gcstackbarrieroff int32
323         gcstackbarrierall int32
324         gcstoptheworld    int32
325         gctrace           int32
326         invalidptr        int32
327         sbrk              int32
328         scavenge          int32
329         scheddetail       int32
330         schedtrace        int32
331         wbshadow          int32
332 }
333
334 var dbgvars = []dbgVar{
335         {"allocfreetrace", &debug.allocfreetrace},
336         {"cgocheck", &debug.cgocheck},
337         {"efence", &debug.efence},
338         {"gccheckmark", &debug.gccheckmark},
339         {"gcpacertrace", &debug.gcpacertrace},
340         {"gcshrinkstackoff", &debug.gcshrinkstackoff},
341         {"gcstackbarrieroff", &debug.gcstackbarrieroff},
342         {"gcstackbarrierall", &debug.gcstackbarrierall},
343         {"gcstoptheworld", &debug.gcstoptheworld},
344         {"gctrace", &debug.gctrace},
345         {"invalidptr", &debug.invalidptr},
346         {"sbrk", &debug.sbrk},
347         {"scavenge", &debug.scavenge},
348         {"scheddetail", &debug.scheddetail},
349         {"schedtrace", &debug.schedtrace},
350         {"wbshadow", &debug.wbshadow},
351 }
352
353 func parsedebugvars() {
354         // defaults
355         debug.cgocheck = 1
356         debug.invalidptr = 1
357
358         for p := gogetenv("GODEBUG"); p != ""; {
359                 field := ""
360                 i := index(p, ",")
361                 if i < 0 {
362                         field, p = p, ""
363                 } else {
364                         field, p = p[:i], p[i+1:]
365                 }
366                 i = index(field, "=")
367                 if i < 0 {
368                         continue
369                 }
370                 key, value := field[:i], field[i+1:]
371
372                 // Update MemProfileRate directly here since it
373                 // is int, not int32, and should only be updated
374                 // if specified in GODEBUG.
375                 if key == "memprofilerate" {
376                         MemProfileRate = atoi(value)
377                 } else {
378                         for _, v := range dbgvars {
379                                 if v.name == key {
380                                         *v.value = int32(atoi(value))
381                                 }
382                         }
383                 }
384         }
385
386         setTraceback(gogetenv("GOTRACEBACK"))
387         traceback_env = traceback_cache
388
389         if debug.gcstackbarrierall > 0 {
390                 firstStackBarrierOffset = 0
391         }
392
393         // For cgocheck > 1, we turn on the write barrier at all times
394         // and check all pointer writes.
395         if debug.cgocheck > 1 {
396                 writeBarrier.cgo = true
397                 writeBarrier.enabled = true
398         }
399 }
400
401 //go:linkname setTraceback runtime/debug.SetTraceback
402 func setTraceback(level string) {
403         var t uint32
404         switch level {
405         case "none":
406                 t = 0
407         case "single", "":
408                 t = 1 << tracebackShift
409         case "all":
410                 t = 1<<tracebackShift | tracebackAll
411         case "system":
412                 t = 2<<tracebackShift | tracebackAll
413         case "crash":
414                 t = 2<<tracebackShift | tracebackAll | tracebackCrash
415         default:
416                 t = uint32(atoi(level))<<tracebackShift | tracebackAll
417         }
418         // when C owns the process, simply exit'ing the process on fatal errors
419         // and panics is surprising. Be louder and abort instead.
420         if islibrary || isarchive {
421                 t |= tracebackCrash
422         }
423
424         t |= traceback_env
425
426         atomic.Store(&traceback_cache, t)
427 }
428
429 // Poor mans 64-bit division.
430 // This is a very special function, do not use it if you are not sure what you are doing.
431 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
432 // Handles overflow in a time-specific manner.
433 //go:nosplit
434 func timediv(v int64, div int32, rem *int32) int32 {
435         res := int32(0)
436         for bit := 30; bit >= 0; bit-- {
437                 if v >= int64(div)<<uint(bit) {
438                         v = v - (int64(div) << uint(bit))
439                         res += 1 << uint(bit)
440                 }
441         }
442         if v >= int64(div) {
443                 if rem != nil {
444                         *rem = 0
445                 }
446                 return 0x7fffffff
447         }
448         if rem != nil {
449                 *rem = int32(v)
450         }
451         return res
452 }
453
454 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
455
456 //go:nosplit
457 func acquirem() *m {
458         _g_ := getg()
459         _g_.m.locks++
460         return _g_.m
461 }
462
463 //go:nosplit
464 func releasem(mp *m) {
465         _g_ := getg()
466         mp.locks--
467         if mp.locks == 0 && _g_.preempt {
468                 // restore the preemption request in case we've cleared it in newstack
469                 _g_.stackguard0 = stackPreempt
470         }
471 }
472
473 //go:nosplit
474 func gomcache() *mcache {
475         return getg().m.mcache
476 }
477
478 //go:linkname reflect_typelinks reflect.typelinks
479 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
480         sections := []unsafe.Pointer{unsafe.Pointer(firstmoduledata.types)}
481         ret := [][]int32{firstmoduledata.typelinks}
482         for datap := firstmoduledata.next; datap != nil; datap = datap.next {
483                 sections = append(sections, unsafe.Pointer(datap.types))
484                 ret = append(ret, datap.typelinks)
485         }
486         return sections, ret
487 }
488
489 // reflect_resolveNameOff resolves a name offset from a base pointer.
490 //go:linkname reflect_resolveNameOff reflect.resolveNameOff
491 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
492         return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
493 }
494
495 // reflect_resolveTypeOff resolves an *rtype offset from a base type.
496 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
497 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
498         return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
499 }
500
501 // reflect_resolveTextOff resolves an function pointer offset from a base type.
502 //go:linkname reflect_resolveTextOff reflect.resolveTextOff
503 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
504         return (*_type)(rtype).textOff(textOff(off))
505
506 }
507
508 // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
509 //go:linkname reflect_addReflectOff reflect.addReflectOff
510 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
511         reflectOffsLock()
512         if reflectOffs.m == nil {
513                 reflectOffs.m = make(map[int32]unsafe.Pointer)
514                 reflectOffs.minv = make(map[unsafe.Pointer]int32)
515                 reflectOffs.next = -1
516         }
517         id, found := reflectOffs.minv[ptr]
518         if !found {
519                 id = reflectOffs.next
520                 reflectOffs.next-- // use negative offsets as IDs to aid debugging
521                 reflectOffs.m[id] = ptr
522                 reflectOffs.minv[ptr] = id
523         }
524         reflectOffsUnlock()
525         return id
526 }