1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 // If you add to this list, add to the list
16 // of "okay during garbage collection" status
19 _Grunnable // 1 runnable and on a run queue
23 _Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
25 _Genqueue // 7 Only the Gscanenqueue is used.
26 _Gcopystack // 8 in this state when newstack is moving the stack
27 // the following encode that the GC is scanning the stack and what to do when it is done
28 _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
29 // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
30 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
31 _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
32 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make is Gsyscall
33 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
34 // _Gscanmoribund_unused, // not possible
35 // _Gscandead, // not possible
36 _Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
48 // The next line makes 'go generate' write the zgen_*.go files with
49 // per-OS and per-arch information, including constants
50 // named goos_$GOOS and goarch_$GOARCH for every
51 // known GOOS and GOARCH. The constant is 1 on the
52 // current system, 0 otherwise; multiplying by them is
53 // useful for defining GOOS- or GOARCH-specific constants.
54 //go:generate go run gengoos.go
57 // Futex-based impl treats it as uint32 key,
58 // while sema-based impl as M* waitm.
59 // Used to be a union, but unions break precise GC.
64 // Futex-based impl treats it as uint32 key,
65 // while sema-based impl as M* waitm.
66 // Used to be a union, but unions break precise GC.
77 // variable-size, fn-specific data here
91 array *byte // actual data
92 len uint // number of elements
93 cap uint // allocated number of elements
97 // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
101 ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
106 // Known to compiler.
107 // Changes here must also be made in src/cmd/gc/select.c's selecttype.
113 elem unsafe.Pointer // data element
115 nrelease int32 // -1 for acquire
116 waitlink *sudog // g.waiting list
119 type gcstats struct {
120 // the struct must consist of only uint64's,
121 // because it is casted to uint64[].
129 type libcall struct {
131 n uintptr // number of parameters
132 args uintptr // parameters
133 r1 uintptr // return values
135 err uintptr // error number
138 // describes how to handle callback
139 type wincallbackcontext struct {
140 gobody unsafe.Pointer // go function to call
141 argsize uintptr // callback arguments size (in bytes)
142 restorestack uintptr // adjust stack on return by (in bytes) (386 only)
146 // Stack describes a Go execution stack.
147 // The bounds of the stack are exactly [lo, hi),
148 // with no implicit data structures on either side.
156 // stack describes the actual stack memory: [stack.lo, stack.hi).
157 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
158 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
159 // stackguard1 is the stack pointer compared in the C stack growth prologue.
160 // It is stack.lo+StackGuard on g0 and gsignal stacks.
161 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
162 stack stack // offset known to runtime/cgo
163 stackguard0 uintptr // offset known to liblink
164 stackguard1 uintptr // offset known to liblink
166 _panic *_panic // innermost panic - offset known to liblink
167 _defer *_defer // innermost defer
169 syscallsp uintptr // if status==gsyscall, syscallsp = sched.sp to use during gc
170 syscallpc uintptr // if status==gsyscall, syscallpc = sched.pc to use during gc
171 param unsafe.Pointer // passed parameter on wakeup
174 waitsince int64 // approx time when the g become blocked
175 waitreason string // if status==gwaiting
177 issystem bool // do not output in stack dump, ignore in deadlock detector
178 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
179 paniconfault bool // panic (instead of crash) on unexpected fault address
180 preemptscan bool // preempted g does scan for gc
181 gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
182 throwsplit bool // must not split stack
183 raceignore int8 // ignore race detection events
184 m *m // for debuggers, but offset not hard-coded
191 gopc uintptr // pc of go statement that created this goroutine
193 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
202 type mscratch struct {
207 g0 *g // goroutine with scheduling stack
208 morebuf gobuf // gobuf arg to morestack
210 // Fields not known to debuggers.
211 procid uint64 // for debuggers, but offset not hard-coded
212 gsignal *g // signal-handling g
213 tls [4]uintptr // thread-local storage (for x86 extern register)
214 mstartfn unsafe.Pointer // todo go func()
215 curg *g // current running goroutine
216 caughtsig *g // goroutine running during fatal signal
217 p *p // attached p for executing go code (nil if not executing go code)
228 spinning bool // m is out of work and is actively looking for work
229 blocked bool // m is blocked on a note
230 inwb bool // m is executing a write barrier
233 ncgocall uint64 // number of cgo calls in total
234 ncgo int32 // number of cgo calls currently in progress
237 alllink *m // on allm
239 machport uint32 // return address for mach ipc (os x)
242 createstack [32]uintptr // stack that created this thread.
243 freglo [16]uint32 // d[i] lsb and f[i]
244 freghi [16]uint32 // d[i] msb and f[i+16]
245 fflag uint32 // floating point compare flags
246 locked uint32 // tracking for lockosthread
247 nextwaitm *m // next m waiting for lock
248 waitsema uintptr // semaphore for parking on locks
254 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
255 waitlock unsafe.Pointer
256 //#ifdef GOOS_windows
257 thread uintptr // thread handle
258 // these are here because they are too large to be on the stack
259 // of low-level NOSPLIT functions.
261 libcallpc uintptr // for cpu profiler
265 //#ifdef GOOS_solaris
266 perrno *int32 // pointer to tls errno
267 // these are here because they are too large to be on the stack
268 // of low-level NOSPLIT functions.
284 status uint32 // one of pidle/prunning/...
286 schedtick uint32 // incremented on every scheduler call
287 syscalltick uint32 // incremented on every system call
288 m *m // back-link to associated m (nil if idle)
290 deferpool [5]*_defer // pool of available defer structs of different sizes (see panic.c)
292 // Cache of goroutine ids, amortizes accesses to runtimeĀ·sched.goidgen.
296 // Queue of runnable goroutines.
301 // Available G's (status == Gdead)
309 // The max value of GOMAXPROCS.
310 // There are no fundamental restrictions on the value.
311 _MaxGomaxprocs = 1 << 8
319 midle *m // idle m's waiting for work
320 nmidle int32 // number of idle m's waiting for work
321 nmidlelocked int32 // number of locked m's waiting for work
322 mcount int32 // number of m's that have been created
323 maxmcount int32 // maximum number of m's allowed (or die)
329 // Global runnable queue.
334 // Global cache of dead G's.
339 gcwaiting uint32 // gc is waiting to run
346 profilehz int32 // cpu profiling rate
349 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
350 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
351 // External locks are not recursive; a second lock is silently ignored.
352 // The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
353 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
354 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
355 // goroutine is holding the lock during the initialization phase.
361 type sigtabtt struct {
367 _SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel
368 _SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly
369 _SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly
370 _SigPanic = 1 << 3 // if the signal is from the kernel, panic
371 _SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it
372 _SigHandling = 1 << 5 // our signal handler is registered
373 _SigIgnored = 1 << 6 // the signal was ignored before we registered for it
374 _SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9).
377 // Layout of in-memory per-function information prepared by linker
378 // See http://golang.org/s/go12symtab.
379 // Keep in sync with linker and with ../../libmach/sym.c
380 // and with package debug/gosym and with symtab.go in package runtime.
382 entry uintptr // start pc
383 nameoff int32 // function name
385 args int32 // in/out args size
386 frame int32 // legacy frame size; use pcsp if possible
395 // layout of Itab known to compilers
396 // allocated in non-garbage-collected memory
406 // Lock-free stack node.
407 // // Also known to export_test.go.
413 // Parallel for descriptor.
415 body unsafe.Pointer // go func(*parfor, uint32), executed for each element
416 done uint32 // number of idle threads
417 nthr uint32 // total number of threads
418 nthrmax uint32 // maximum number of threads
419 thrseq uint32 // thread id sequencer
420 cnt uint32 // iteration space [0, cnt)
421 ctx unsafe.Pointer // arbitrary user context
422 wait bool // if true, wait while all threads finish processing,
423 // otherwise parfor may return while other threads are still working
424 thr *parforthread // array of thread descriptors
425 pad uint32 // to align parforthread.pos for 64-bit atomic operations
434 // Track memory allocated by code not written in Go during a cgo call,
435 // so that the garbage collector can see them.
441 // Holds variables parsed from GODEBUG env var.
442 type debugvars struct {
452 // Indicates to write barrier and sychronization task to preform.
454 _GCoff = iota // GC not running, write barrier disabled
455 _GCquiesce // unused state
456 _GCstw // unused state
457 _GCscan // GC collecting roots into workbufs, write barrier disabled
458 _GCmark // GC marking from workbufs, write barrier ENABLED
459 _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
460 _GCsweep // GC mark completed; sweeping in background, write barrier disabled
463 type forcegcstate struct {
478 var startup_random_data *byte
479 var startup_random_data_len uint32
484 // hashinit wants this many random bytes
485 _HashRandomBytes = 32
489 * deferred subroutine calls
494 argp uintptr // where args were copied from
497 _panic *_panic // panic that is running defer
505 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
506 arg interface{} // argument to panic
507 link *_panic // link to earlier panic
508 recovered bool // whether this panic is over
509 aborted bool // the panic was aborted
516 type stkframe struct {
517 fn *_func // function being run
518 pc uintptr // program counter within fn
519 continpc uintptr // program counter where execution can continue, or 0 if not
520 lr uintptr // program counter at caller aka link register
521 sp uintptr // stack pointer at pc
522 fp uintptr // stack pointer at caller aka frame pointer
523 varp uintptr // top of local variables
524 argp uintptr // pointer to function arguments
525 arglen uintptr // number of bytes at argp
526 argmap *bitvector // force use of this argmap
530 _TraceRuntimeFrames = 1 << 0 // include frames for internal runtime functions.
531 _TraceTrap = 1 << 1 // the initial PC, SP are from a trap, not a return PC from a call
535 // The maximum number of frames we print for a traceback
536 _TracebackMaxFrames = 100
545 allp [_MaxGomaxprocs + 1]*p
562 * mutual exclusion locks. in the uncontended case,
563 * as fast as spin locks (just a few user-level instructions),
564 * but on the contention path they sleep in the kernel.
565 * a zeroed Mutex is unlocked (no need to initialize each lock).
569 * sleep and wakeup on one-time events.
570 * before any calls to notesleep or notewakeup,
571 * must call noteclear to initialize the Note.
572 * then, exactly one thread can call notesleep
573 * and exactly one thread can call notewakeup (once).
574 * once notewakeup has been called, the notesleep
575 * will return. future notesleep will return immediately.
576 * subsequent noteclear must be called only after
577 * previous notesleep has returned, e.g. it's disallowed
578 * to call noteclear straight after notewakeup.
580 * notetsleep is like notesleep but wakes up after
581 * a given number of nanoseconds even if the event
582 * has not yet happened. if a goroutine uses notetsleep to
583 * wake up early, it must wait to call noteclear until it
584 * can be sure that no other goroutine is calling
587 * notesleep/notetsleep are generally called on g0,
588 * notetsleepg is similar to notetsleep but is called on user g.
590 // bool runtimeĀ·notetsleep(Note*, int64); // false - timeout
591 // bool runtimeĀ·notetsleepg(Note*, int64); // false - timeout
595 * Initialize uint64 head to 0, compare with 0 to test for emptiness.
596 * The stack does not keep pointers to nodes,
597 * so they can be garbage collected if there are no other pointers to nodes.
601 * Parallel for over [0, n).
602 * body() is executed for each iteration.
603 * nthr - total number of worker threads.
604 * ctx - arbitrary user context.
605 * if wait=true, threads return from parfor() when all work is done;
606 * otherwise, threads can return while other threads are still finishing processing.
609 // for mmap, we only pass the lower 32 bits of file offset to the
610 // assembly routine; the higher bits (if required), should be provided
611 // by the assembly routine as 0.