1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 // If you add to this list, add to the list
16 // of "okay during garbage collection" status
19 _Grunnable // 1 runnable and on a run queue
23 _Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
25 _Genqueue // 7 Only the Gscanenqueue is used.
26 _Gcopystack // 8 in this state when newstack is moving the stack
27 // the following encode that the GC is scanning the stack and what to do when it is done
28 _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
29 // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
30 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
31 _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
32 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make is Gsyscall
33 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
34 // _Gscanmoribund_unused, // not possible
35 // _Gscandead, // not possible
36 _Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
48 // The next line makes 'go generate' write the zgen_*.go files with
49 // per-OS and per-arch information, including constants
50 // named goos_$GOOS and goarch_$GOARCH for every
51 // known GOOS and GOARCH. The constant is 1 on the
52 // current system, 0 otherwise; multiplying by them is
53 // useful for defining GOOS- or GOARCH-specific constants.
54 //go:generate go run gengoos.go
57 // Futex-based impl treats it as uint32 key,
58 // while sema-based impl as M* waitm.
59 // Used to be a union, but unions break precise GC.
64 // Futex-based impl treats it as uint32 key,
65 // while sema-based impl as M* waitm.
66 // Used to be a union, but unions break precise GC.
77 // variable-size, fn-specific data here
91 array *byte // actual data
92 len uint // number of elements
93 cap uint // allocated number of elements
97 // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
101 ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
106 // Known to compiler.
107 // Changes here must also be made in src/cmd/gc/select.c's selecttype.
113 elem unsafe.Pointer // data element
115 nrelease int32 // -1 for acquire
116 waitlink *sudog // g.waiting list
119 type gcstats struct {
120 // the struct must consist of only uint64's,
121 // because it is casted to uint64[].
129 type libcall struct {
131 n uintptr // number of parameters
132 args uintptr // parameters
133 r1 uintptr // return values
135 err uintptr // error number
138 // describes how to handle callback
139 type wincallbackcontext struct {
140 gobody unsafe.Pointer // go function to call
141 argsize uintptr // callback arguments size (in bytes)
142 restorestack uintptr // adjust stack on return by (in bytes) (386 only)
146 // Stack describes a Go execution stack.
147 // The bounds of the stack are exactly [lo, hi),
148 // with no implicit data structures on either side.
156 // stack describes the actual stack memory: [stack.lo, stack.hi).
157 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
158 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
159 // stackguard1 is the stack pointer compared in the C stack growth prologue.
160 // It is stack.lo+StackGuard on g0 and gsignal stacks.
161 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
162 stack stack // offset known to runtime/cgo
163 stackguard0 uintptr // offset known to liblink
164 stackguard1 uintptr // offset known to liblink
166 _panic *_panic // innermost panic - offset known to liblink
167 _defer *_defer // innermost defer
169 syscallsp uintptr // if status==gsyscall, syscallsp = sched.sp to use during gc
170 syscallpc uintptr // if status==gsyscall, syscallpc = sched.pc to use during gc
171 param unsafe.Pointer // passed parameter on wakeup
174 waitsince int64 // approx time when the g become blocked
175 waitreason string // if status==gwaiting
177 issystem bool // do not output in stack dump, ignore in deadlock detector
178 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
179 paniconfault bool // panic (instead of crash) on unexpected fault address
180 preemptscan bool // preempted g does scan for gc
181 gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
182 throwsplit bool // must not split stack
183 raceignore int8 // ignore race detection events
184 m *m // for debuggers, but offset not hard-coded
191 gopc uintptr // pc of go statement that created this goroutine
193 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
202 type mscratch struct {
207 g0 *g // goroutine with scheduling stack
208 morebuf gobuf // gobuf arg to morestack
210 // Fields not known to debuggers.
211 procid uint64 // for debuggers, but offset not hard-coded
212 gsignal *g // signal-handling g
213 tls [4]uintptr // thread-local storage (for x86 extern register)
214 mstartfn unsafe.Pointer // todo go func()
215 curg *g // current running goroutine
216 caughtsig *g // goroutine running during fatal signal
217 p *p // attached p for executing go code (nil if not executing go code)
228 spinning bool // m is out of work and is actively looking for work
229 blocked bool // m is blocked on a note
231 ncgocall uint64 // number of cgo calls in total
232 ncgo int32 // number of cgo calls currently in progress
235 alllink *m // on allm
237 machport uint32 // return address for mach ipc (os x)
240 createstack [32]uintptr // stack that created this thread.
241 freglo [16]uint32 // d[i] lsb and f[i]
242 freghi [16]uint32 // d[i] msb and f[i+16]
243 fflag uint32 // floating point compare flags
244 locked uint32 // tracking for lockosthread
245 nextwaitm *m // next m waiting for lock
246 waitsema uintptr // semaphore for parking on locks
252 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
253 waitlock unsafe.Pointer
254 //#ifdef GOOS_windows
255 thread uintptr // thread handle
256 // these are here because they are too large to be on the stack
257 // of low-level NOSPLIT functions.
259 libcallpc uintptr // for cpu profiler
263 //#ifdef GOOS_solaris
264 perrno *int32 // pointer to tls errno
265 // these are here because they are too large to be on the stack
266 // of low-level NOSPLIT functions.
282 status uint32 // one of pidle/prunning/...
284 schedtick uint32 // incremented on every scheduler call
285 syscalltick uint32 // incremented on every system call
286 m *m // back-link to associated m (nil if idle)
288 deferpool [5]*_defer // pool of available defer structs of different sizes (see panic.c)
290 // Cache of goroutine ids, amortizes accesses to runtimeĀ·sched.goidgen.
294 // Queue of runnable goroutines.
299 // Available G's (status == Gdead)
307 // The max value of GOMAXPROCS.
308 // There are no fundamental restrictions on the value.
309 _MaxGomaxprocs = 1 << 8
317 midle *m // idle m's waiting for work
318 nmidle int32 // number of idle m's waiting for work
319 nmidlelocked int32 // number of locked m's waiting for work
320 mcount int32 // number of m's that have been created
321 maxmcount int32 // maximum number of m's allowed (or die)
327 // Global runnable queue.
332 // Global cache of dead G's.
337 gcwaiting uint32 // gc is waiting to run
344 profilehz int32 // cpu profiling rate
347 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
348 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
349 // External locks are not recursive; a second lock is silently ignored.
350 // The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
351 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
352 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
353 // goroutine is holding the lock during the initialization phase.
359 type sigtabtt struct {
365 _SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel
366 _SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly
367 _SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly
368 _SigPanic = 1 << 3 // if the signal is from the kernel, panic
369 _SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it
370 _SigHandling = 1 << 5 // our signal handler is registered
371 _SigIgnored = 1 << 6 // the signal was ignored before we registered for it
372 _SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9).
375 // Layout of in-memory per-function information prepared by linker
376 // See http://golang.org/s/go12symtab.
377 // Keep in sync with linker and with ../../libmach/sym.c
378 // and with package debug/gosym and with symtab.go in package runtime.
380 entry uintptr // start pc
381 nameoff int32 // function name
383 args int32 // in/out args size
384 frame int32 // legacy frame size; use pcsp if possible
393 // layout of Itab known to compilers
394 // allocated in non-garbage-collected memory
404 // Lock-free stack node.
410 // Parallel for descriptor.
412 body unsafe.Pointer // go func(*parfor, uint32), executed for each element
413 done uint32 // number of idle threads
414 nthr uint32 // total number of threads
415 nthrmax uint32 // maximum number of threads
416 thrseq uint32 // thread id sequencer
417 cnt uint32 // iteration space [0, cnt)
418 ctx unsafe.Pointer // arbitrary user context
419 wait bool // if true, wait while all threads finish processing,
420 // otherwise parfor may return while other threads are still working
421 thr *parforthread // array of thread descriptors
422 pad uint32 // to align parforthread.pos for 64-bit atomic operations
431 // Track memory allocated by code not written in Go during a cgo call,
432 // so that the garbage collector can see them.
438 // Holds variables parsed from GODEBUG env var.
439 type debugvars struct {
449 // Indicates to write barrier and sychronization task to preform.
451 _GCoff = iota // stop and start nop
452 _GCquiesce // stop and start nop
453 _GCstw // stop the ps nop
454 _GCmark // scan the stacks and start no white to black
455 _GCsweep // stop and start nop
458 type forcegcstate struct {
473 var startup_random_data *byte
474 var startup_random_data_len uint32
479 // hashinit wants this many random bytes
480 _HashRandomBytes = 32
484 * deferred subroutine calls
489 argp uintptr // where args were copied from
492 _panic *_panic // panic that is running defer
500 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
501 arg interface{} // argument to panic
502 link *_panic // link to earlier panic
503 recovered bool // whether this panic is over
504 aborted bool // the panic was aborted
511 type stkframe struct {
512 fn *_func // function being run
513 pc uintptr // program counter within fn
514 continpc uintptr // program counter where execution can continue, or 0 if not
515 lr uintptr // program counter at caller aka link register
516 sp uintptr // stack pointer at pc
517 fp uintptr // stack pointer at caller aka frame pointer
518 varp uintptr // top of local variables
519 argp uintptr // pointer to function arguments
520 arglen uintptr // number of bytes at argp
521 argmap *bitvector // force use of this argmap
525 _TraceRuntimeFrames = 1 << 0 // include frames for internal runtime functions.
526 _TraceTrap = 1 << 1 // the initial PC, SP are from a trap, not a return PC from a call
530 // The maximum number of frames we print for a traceback
531 _TracebackMaxFrames = 100
540 allp [_MaxGomaxprocs + 1]*p
557 * mutual exclusion locks. in the uncontended case,
558 * as fast as spin locks (just a few user-level instructions),
559 * but on the contention path they sleep in the kernel.
560 * a zeroed Mutex is unlocked (no need to initialize each lock).
564 * sleep and wakeup on one-time events.
565 * before any calls to notesleep or notewakeup,
566 * must call noteclear to initialize the Note.
567 * then, exactly one thread can call notesleep
568 * and exactly one thread can call notewakeup (once).
569 * once notewakeup has been called, the notesleep
570 * will return. future notesleep will return immediately.
571 * subsequent noteclear must be called only after
572 * previous notesleep has returned, e.g. it's disallowed
573 * to call noteclear straight after notewakeup.
575 * notetsleep is like notesleep but wakes up after
576 * a given number of nanoseconds even if the event
577 * has not yet happened. if a goroutine uses notetsleep to
578 * wake up early, it must wait to call noteclear until it
579 * can be sure that no other goroutine is calling
582 * notesleep/notetsleep are generally called on g0,
583 * notetsleepg is similar to notetsleep but is called on user g.
585 // bool runtimeĀ·notetsleep(Note*, int64); // false - timeout
586 // bool runtimeĀ·notetsleepg(Note*, int64); // false - timeout
590 * Initialize uint64 head to 0, compare with 0 to test for emptiness.
591 * The stack does not keep pointers to nodes,
592 * so they can be garbage collected if there are no other pointers to nodes.
596 * Parallel for over [0, n).
597 * body() is executed for each iteration.
598 * nthr - total number of worker threads.
599 * ctx - arbitrary user context.
600 * if wait=true, threads return from parfor() when all work is done;
601 * otherwise, threads can return while other threads are still finishing processing.
604 // for mmap, we only pass the lower 32 bits of file offset to the
605 // assembly routine; the higher bits (if required), should be provided
606 // by the assembly routine as 0.