1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 // If you add to this list, add to the list
16 // of "okay during garbage collection" status
19 _Grunnable // 1 runnable and on a run queue
23 _Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
25 _Genqueue // 7 Only the Gscanenqueue is used.
26 _Gcopystack // 8 in this state when newstack is moving the stack
27 // the following encode that the GC is scanning the stack and what to do when it is done
28 _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
29 // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
30 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
31 _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
32 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make is Gsyscall
33 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
34 // _Gscanmoribund_unused, // not possible
35 // _Gscandead, // not possible
36 _Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
48 // XXX inserting below here
51 // Futex-based impl treats it as uint32 key,
52 // while sema-based impl as M* waitm.
53 // Used to be a union, but unions break precise GC.
58 // Futex-based impl treats it as uint32 key,
59 // while sema-based impl as M* waitm.
60 // Used to be a union, but unions break precise GC.
71 // variable-size, fn-specific data here
85 array *byte // actual data
86 len uint // number of elements
87 cap uint // allocated number of elements
91 // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
95 ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
100 // Known to compiler.
101 // Changes here must also be made in src/cmd/gc/select.c's selecttype.
107 elem unsafe.Pointer // data element
109 nrelease int32 // -1 for acquire
110 waitlink *sudog // g.waiting list
113 type gcstats struct {
114 // the struct must consist of only uint64's,
115 // because it is casted to uint64[].
123 type libcall struct {
125 n uintptr // number of parameters
126 args uintptr // parameters
127 r1 uintptr // return values
129 err uintptr // error number
132 // describes how to handle callback
133 type wincallbackcontext struct {
134 gobody unsafe.Pointer // go function to call
135 argsize uintptr // callback arguments size (in bytes)
136 restorestack uintptr // adjust stack on return by (in bytes) (386 only)
140 // Stack describes a Go execution stack.
141 // The bounds of the stack are exactly [lo, hi),
142 // with no implicit data structures on either side.
150 // stack describes the actual stack memory: [stack.lo, stack.hi).
151 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
152 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
153 // stackguard1 is the stack pointer compared in the C stack growth prologue.
154 // It is stack.lo+StackGuard on g0 and gsignal stacks.
155 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
156 stack stack // offset known to runtime/cgo
157 stackguard0 uintptr // offset known to liblink
158 stackguard1 uintptr // offset known to liblink
160 _panic *_panic // innermost panic - offset known to liblink
161 _defer *_defer // innermost defer
163 syscallsp uintptr // if status==gsyscall, syscallsp = sched.sp to use during gc
164 syscallpc uintptr // if status==gsyscall, syscallpc = sched.pc to use during gc
165 param unsafe.Pointer // passed parameter on wakeup
168 waitsince int64 // approx time when the g become blocked
169 waitreason string // if status==gwaiting
171 issystem bool // do not output in stack dump, ignore in deadlock detector
172 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
173 paniconfault bool // panic (instead of crash) on unexpected fault address
174 preemptscan bool // preempted g does scan for gc
175 gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
176 throwsplit bool // must not split stack
177 raceignore int8 // ignore race detection events
178 m *m // for debuggers, but offset not hard-coded
185 gopc uintptr // pc of go statement that created this goroutine
187 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
196 type mscratch struct {
201 g0 *g // goroutine with scheduling stack
202 morebuf gobuf // gobuf arg to morestack
204 // Fields not known to debuggers.
205 procid uint64 // for debuggers, but offset not hard-coded
206 gsignal *g // signal-handling g
207 tls [4]uintptr // thread-local storage (for x86 extern register)
208 mstartfn unsafe.Pointer // todo go func()
209 curg *g // current running goroutine
210 caughtsig *g // goroutine running during fatal signal
211 p *p // attached p for executing go code (nil if not executing go code)
222 spinning bool // m is out of work and is actively looking for work
223 blocked bool // m is blocked on a note
224 inwb bool // m is executing a write barrier
227 ncgocall uint64 // number of cgo calls in total
228 ncgo int32 // number of cgo calls currently in progress
231 alllink *m // on allm
233 machport uint32 // return address for mach ipc (os x)
236 createstack [32]uintptr // stack that created this thread.
237 freglo [16]uint32 // d[i] lsb and f[i]
238 freghi [16]uint32 // d[i] msb and f[i+16]
239 fflag uint32 // floating point compare flags
240 locked uint32 // tracking for lockosthread
241 nextwaitm *m // next m waiting for lock
242 waitsema uintptr // semaphore for parking on locks
248 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
249 waitlock unsafe.Pointer
250 //#ifdef GOOS_windows
251 thread uintptr // thread handle
252 // these are here because they are too large to be on the stack
253 // of low-level NOSPLIT functions.
255 libcallpc uintptr // for cpu profiler
259 //#ifdef GOOS_solaris
260 perrno *int32 // pointer to tls errno
261 // these are here because they are too large to be on the stack
262 // of low-level NOSPLIT functions.
278 status uint32 // one of pidle/prunning/...
280 schedtick uint32 // incremented on every scheduler call
281 syscalltick uint32 // incremented on every system call
282 m *m // back-link to associated m (nil if idle)
284 deferpool [5]*_defer // pool of available defer structs of different sizes (see panic.c)
286 // Cache of goroutine ids, amortizes accesses to runtimeĀ·sched.goidgen.
290 // Queue of runnable goroutines.
295 // Available G's (status == Gdead)
303 // The max value of GOMAXPROCS.
304 // There are no fundamental restrictions on the value.
305 _MaxGomaxprocs = 1 << 8
313 midle *m // idle m's waiting for work
314 nmidle int32 // number of idle m's waiting for work
315 nmidlelocked int32 // number of locked m's waiting for work
316 mcount int32 // number of m's that have been created
317 maxmcount int32 // maximum number of m's allowed (or die)
323 // Global runnable queue.
328 // Global cache of dead G's.
333 gcwaiting uint32 // gc is waiting to run
340 profilehz int32 // cpu profiling rate
343 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
344 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
345 // External locks are not recursive; a second lock is silently ignored.
346 // The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
347 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
348 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
349 // goroutine is holding the lock during the initialization phase.
355 type sigtabtt struct {
361 _SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel
362 _SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly
363 _SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly
364 _SigPanic = 1 << 3 // if the signal is from the kernel, panic
365 _SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it
366 _SigHandling = 1 << 5 // our signal handler is registered
367 _SigIgnored = 1 << 6 // the signal was ignored before we registered for it
368 _SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9).
371 // Layout of in-memory per-function information prepared by linker
372 // See http://golang.org/s/go12symtab.
373 // Keep in sync with linker and with ../../libmach/sym.c
374 // and with package debug/gosym and with symtab.go in package runtime.
376 entry uintptr // start pc
377 nameoff int32 // function name
379 args int32 // in/out args size
380 frame int32 // legacy frame size; use pcsp if possible
389 // layout of Itab known to compilers
390 // allocated in non-garbage-collected memory
401 // TODO: Generate in cmd/dist.
408 // Lock-free stack node.
409 // // Also known to export_test.go.
415 // Parallel for descriptor.
417 body unsafe.Pointer // go func(*parfor, uint32), executed for each element
418 done uint32 // number of idle threads
419 nthr uint32 // total number of threads
420 nthrmax uint32 // maximum number of threads
421 thrseq uint32 // thread id sequencer
422 cnt uint32 // iteration space [0, cnt)
423 ctx unsafe.Pointer // arbitrary user context
424 wait bool // if true, wait while all threads finish processing,
425 // otherwise parfor may return while other threads are still working
426 thr *parforthread // array of thread descriptors
427 pad uint32 // to align parforthread.pos for 64-bit atomic operations
436 // Track memory allocated by code not written in Go during a cgo call,
437 // so that the garbage collector can see them.
443 // Holds variables parsed from GODEBUG env var.
444 type debugvars struct {
454 // Indicates to write barrier and sychronization task to preform.
456 _GCoff = iota // GC not running, write barrier disabled
457 _GCquiesce // unused state
458 _GCstw // unused state
459 _GCscan // GC collecting roots into workbufs, write barrier disabled
460 _GCmark // GC marking from workbufs, write barrier ENABLED
461 _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
462 _GCsweep // GC mark completed; sweeping in background, write barrier disabled
465 type forcegcstate struct {
480 var startup_random_data *byte
481 var startup_random_data_len uint32
486 // hashinit wants this many random bytes
487 _HashRandomBytes = 32
491 * deferred subroutine calls
496 argp uintptr // where args were copied from
499 _panic *_panic // panic that is running defer
507 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
508 arg interface{} // argument to panic
509 link *_panic // link to earlier panic
510 recovered bool // whether this panic is over
511 aborted bool // the panic was aborted
518 type stkframe struct {
519 fn *_func // function being run
520 pc uintptr // program counter within fn
521 continpc uintptr // program counter where execution can continue, or 0 if not
522 lr uintptr // program counter at caller aka link register
523 sp uintptr // stack pointer at pc
524 fp uintptr // stack pointer at caller aka frame pointer
525 varp uintptr // top of local variables
526 argp uintptr // pointer to function arguments
527 arglen uintptr // number of bytes at argp
528 argmap *bitvector // force use of this argmap
532 _TraceRuntimeFrames = 1 << 0 // include frames for internal runtime functions.
533 _TraceTrap = 1 << 1 // the initial PC, SP are from a trap, not a return PC from a call
537 // The maximum number of frames we print for a traceback
538 _TracebackMaxFrames = 100
547 allp [_MaxGomaxprocs + 1]*p
564 * mutual exclusion locks. in the uncontended case,
565 * as fast as spin locks (just a few user-level instructions),
566 * but on the contention path they sleep in the kernel.
567 * a zeroed Mutex is unlocked (no need to initialize each lock).
571 * sleep and wakeup on one-time events.
572 * before any calls to notesleep or notewakeup,
573 * must call noteclear to initialize the Note.
574 * then, exactly one thread can call notesleep
575 * and exactly one thread can call notewakeup (once).
576 * once notewakeup has been called, the notesleep
577 * will return. future notesleep will return immediately.
578 * subsequent noteclear must be called only after
579 * previous notesleep has returned, e.g. it's disallowed
580 * to call noteclear straight after notewakeup.
582 * notetsleep is like notesleep but wakes up after
583 * a given number of nanoseconds even if the event
584 * has not yet happened. if a goroutine uses notetsleep to
585 * wake up early, it must wait to call noteclear until it
586 * can be sure that no other goroutine is calling
589 * notesleep/notetsleep are generally called on g0,
590 * notetsleepg is similar to notetsleep but is called on user g.
592 // bool runtimeĀ·notetsleep(Note*, int64); // false - timeout
593 // bool runtimeĀ·notetsleepg(Note*, int64); // false - timeout
597 * Initialize uint64 head to 0, compare with 0 to test for emptiness.
598 * The stack does not keep pointers to nodes,
599 * so they can be garbage collected if there are no other pointers to nodes.
603 * Parallel for over [0, n).
604 * body() is executed for each iteration.
605 * nthr - total number of worker threads.
606 * ctx - arbitrary user context.
607 * if wait=true, threads return from parfor() when all work is done;
608 * otherwise, threads can return while other threads are still finishing processing.
611 // for mmap, we only pass the lower 32 bits of file offset to the
612 // assembly routine; the higher bits (if required), should be provided
613 // by the assembly routine as 0.