1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
19 // Beyond indicating the general state of a G, the G status
20 // acts like a lock on the goroutine's stack (and hence its
21 // ability to execute user code).
23 // If you add to this list, add to the list
24 // of "okay during garbage collection" status
27 // TODO(austin): The _Gscan bit could be much lighter-weight.
28 // For example, we could choose not to run _Gscanrunnable
29 // goroutines found in the run queue, rather than CAS-looping
30 // until they become _Grunnable. And transitions like
31 // _Gscanwaiting -> _Gscanrunnable are actually okay because
32 // they don't affect stack ownership.
34 // _Gidle means this goroutine was just allocated and has not
35 // yet been initialized.
38 // _Grunnable means this goroutine is on a run queue. It is
39 // not currently executing user code. The stack is not owned.
42 // _Grunning means this goroutine may execute user code. The
43 // stack is owned by this goroutine. It is not on a run queue.
44 // It is assigned an M and a P (g.m and g.m.p are valid).
47 // _Gsyscall means this goroutine is executing a system call.
48 // It is not executing user code. The stack is owned by this
49 // goroutine. It is not on a run queue. It is assigned an M.
52 // _Gwaiting means this goroutine is blocked in the runtime.
53 // It is not executing user code. It is not on a run queue,
54 // but should be recorded somewhere (e.g., a channel wait
55 // queue) so it can be ready()d when necessary. The stack is
56 // not owned *except* that a channel operation may read or
57 // write parts of the stack under the appropriate channel
58 // lock. Otherwise, it is not safe to access the stack after a
59 // goroutine enters _Gwaiting (e.g., it may get moved).
62 // _Gmoribund_unused is currently unused, but hardcoded in gdb
64 _Gmoribund_unused // 5
66 // _Gdead means this goroutine is currently unused. It may be
67 // just exited, on a free list, or just being initialized. It
68 // is not executing user code. It may or may not have a stack
69 // allocated. The G and its stack (if any) are owned by the M
70 // that is exiting the G or that obtained the G from the free
74 // _Genqueue_unused is currently unused.
77 // _Gcopystack means this goroutine's stack is being moved. It
78 // is not executing user code and is not on a run queue. The
79 // stack is owned by the goroutine that put it in _Gcopystack.
82 // _Gpreempted means this goroutine stopped itself for a
83 // suspendG preemption. It is like _Gwaiting, but nothing is
84 // yet responsible for ready()ing it. Some suspendG must CAS
85 // the status to _Gwaiting to take responsibility for
89 // _Gscan combined with one of the above states other than
90 // _Grunning indicates that GC is scanning the stack. The
91 // goroutine is not executing user code and the stack is owned
92 // by the goroutine that set the _Gscan bit.
94 // _Gscanrunning is different: it is used to briefly block
95 // state transitions while GC signals the G to scan its own
96 // stack. This is otherwise like _Grunning.
98 // atomicstatus&~Gscan gives the state the goroutine will
99 // return to when the scan completes.
101 _Gscanrunnable = _Gscan + _Grunnable // 0x1001
102 _Gscanrunning = _Gscan + _Grunning // 0x1002
103 _Gscansyscall = _Gscan + _Gsyscall // 0x1003
104 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004
105 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009
111 // _Pidle means a P is not being used to run user code or the
112 // scheduler. Typically, it's on the idle P list and available
113 // to the scheduler, but it may just be transitioning between
116 // The P is owned by the idle list or by whatever is
117 // transitioning its state. Its run queue is empty.
120 // _Prunning means a P is owned by an M and is being used to
121 // run user code or the scheduler. Only the M that owns this P
122 // is allowed to change the P's status from _Prunning. The M
123 // may transition the P to _Pidle (if it has no more work to
124 // do), _Psyscall (when entering a syscall), or _Pgcstop (to
125 // halt for the GC). The M may also hand ownership of the P
126 // off directly to another M (e.g., to schedule a locked G).
129 // _Psyscall means a P is not running user code. It has
130 // affinity to an M in a syscall but is not owned by it and
131 // may be stolen by another M. This is similar to _Pidle but
132 // uses lightweight transitions and maintains M affinity.
134 // Leaving _Psyscall must be done with a CAS, either to steal
135 // or retake the P. Note that there's an ABA hazard: even if
136 // an M successfully CASes its original P back to _Prunning
137 // after a syscall, it must understand the P may have been
138 // used by another M in the interim.
141 // _Pgcstop means a P is halted for STW and owned by the M
142 // that stopped the world. The M that stopped the world
143 // continues to use its P, even in _Pgcstop. Transitioning
144 // from _Prunning to _Pgcstop causes an M to release its P and
147 // The P retains its run queue and startTheWorld will restart
148 // the scheduler on Ps with non-empty run queues.
151 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We
152 // reuse Ps if GOMAXPROCS increases. A dead P is mostly
153 // stripped of its resources, though a few things remain
154 // (e.g., trace buffers).
158 // Mutual exclusion locks. In the uncontended case,
159 // as fast as spin locks (just a few user-level instructions),
160 // but on the contention path they sleep in the kernel.
161 // A zeroed Mutex is unlocked (no need to initialize each lock).
162 // Initialization is helpful for static lock ranking, but not required.
164 // Empty struct if lock ranking is disabled, otherwise includes the lock rank
166 // Futex-based impl treats it as uint32 key,
167 // while sema-based impl as M* waitm.
168 // Used to be a union, but unions break precise GC.
172 // sleep and wakeup on one-time events.
173 // before any calls to notesleep or notewakeup,
174 // must call noteclear to initialize the Note.
175 // then, exactly one thread can call notesleep
176 // and exactly one thread can call notewakeup (once).
177 // once notewakeup has been called, the notesleep
178 // will return. future notesleep will return immediately.
179 // subsequent noteclear must be called only after
180 // previous notesleep has returned, e.g. it's disallowed
181 // to call noteclear straight after notewakeup.
183 // notetsleep is like notesleep but wakes up after
184 // a given number of nanoseconds even if the event
185 // has not yet happened. if a goroutine uses notetsleep to
186 // wake up early, it must wait to call noteclear until it
187 // can be sure that no other goroutine is calling
190 // notesleep/notetsleep are generally called on g0,
191 // notetsleepg is similar to notetsleep but is called on user g.
193 // Futex-based impl treats it as uint32 key,
194 // while sema-based impl as M* waitm.
195 // Used to be a union, but unions break precise GC.
199 type funcval struct {
201 // variable-size, fn-specific data here
214 func efaceOf(ep *any) *eface {
215 return (*eface)(unsafe.Pointer(ep))
218 // The guintptr, muintptr, and puintptr are all used to bypass write barriers.
219 // It is particularly important to avoid write barriers when the current P has
220 // been released, because the GC thinks the world is stopped, and an
221 // unexpected write barrier would not be synchronized with the GC,
222 // which can lead to a half-executed write barrier that has marked the object
223 // but not queued it. If the GC skips the object and completes before the
224 // queuing can occur, it will incorrectly free the object.
226 // We tried using special assignment functions invoked only when not
227 // holding a running P, but then some updates to a particular memory
228 // word went through write barriers and some did not. This breaks the
229 // write barrier shadow checking mode, and it is also scary: better to have
230 // a word that is completely ignored by the GC than to have one for which
231 // only a few updates are ignored.
233 // Gs and Ps are always reachable via true pointers in the
234 // allgs and allp lists or (during allocation before they reach those lists)
235 // from stack variables.
237 // Ms are always reachable via true pointers either from allm or
238 // freem. Unlike Gs and Ps we do free Ms, so it's important that
239 // nothing ever hold an muintptr across a safe point.
241 // A guintptr holds a goroutine pointer, but typed as a uintptr
242 // to bypass write barriers. It is used in the Gobuf goroutine state
243 // and in scheduling lists that are manipulated without a P.
245 // The Gobuf.g goroutine pointer is almost always updated by assembly code.
246 // In one of the few places it is updated by Go code - func save - it must be
247 // treated as a uintptr to avoid a write barrier being emitted at a bad time.
248 // Instead of figuring out how to emit the write barriers missing in the
249 // assembly manipulation, we change the type of the field to uintptr,
250 // so that it does not require write barriers at all.
252 // Goroutine structs are published in the allg list and never freed.
253 // That will keep the goroutine structs from being collected.
254 // There is never a time that Gobuf.g's contain the only references
255 // to a goroutine: the publishing of the goroutine in allg comes first.
256 // Goroutine pointers are also kept in non-GC-visible places like TLS,
257 // so I can't see them ever moving. If we did want to start moving data
258 // in the GC, we'd need to allocate the goroutine structs from an
259 // alternate arena. Using guintptr doesn't make that problem any worse.
260 // Note that pollDesc.rg, pollDesc.wg also store g in uintptr form,
261 // so they would need to be updated too if g's start moving.
262 type guintptr uintptr
265 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
268 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
271 func (gp *guintptr) cas(old, new guintptr) bool {
272 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
276 func (gp *g) guintptr() guintptr {
277 return guintptr(unsafe.Pointer(gp))
280 // setGNoWB performs *gp = new without a write barrier.
281 // For times when it's impractical to use a guintptr.
285 func setGNoWB(gp **g, new *g) {
286 (*guintptr)(unsafe.Pointer(gp)).set(new)
289 type puintptr uintptr
292 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
295 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
297 // muintptr is a *m that is not tracked by the garbage collector.
299 // Because we do free Ms, there are some additional constrains on
302 // 1. Never hold an muintptr locally across a safe point.
304 // 2. Any muintptr in the heap must be owned by the M itself so it can
305 // ensure it is not in use when the last true *m is released.
306 type muintptr uintptr
309 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
312 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
314 // setMNoWB performs *mp = new without a write barrier.
315 // For times when it's impractical to use an muintptr.
319 func setMNoWB(mp **m, new *m) {
320 (*muintptr)(unsafe.Pointer(mp)).set(new)
324 // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
326 // ctxt is unusual with respect to GC: it may be a
327 // heap-allocated funcval, so GC needs to track it, but it
328 // needs to be set and cleared from assembly, where it's
329 // difficult to have write barriers. However, ctxt is really a
330 // saved, live register, and we only ever exchange it between
331 // the real register and the gobuf. Hence, we treat it as a
332 // root during stack scanning, which means assembly that saves
333 // and restores it doesn't need write barriers. It's still
334 // typed as a pointer so that any other writes from Go get
342 bp uintptr // for framepointer-enabled architectures
345 // sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving
348 // sudog is necessary because the g ↔ synchronization object relation
349 // is many-to-many. A g can be on many wait lists, so there may be
350 // many sudogs for one g; and many gs may be waiting on the same
351 // synchronization object, so there may be many sudogs for one object.
353 // sudogs are allocated from a special pool. Use acquireSudog and
354 // releaseSudog to allocate and free them.
356 // The following fields are protected by the hchan.lock of the
357 // channel this sudog is blocking on. shrinkstack depends on
358 // this for sudogs involved in channel ops.
364 elem unsafe.Pointer // data element (may point to stack)
366 // The following fields are never accessed concurrently.
367 // For channels, waitlink is only accessed by g.
368 // For semaphores, all fields (including the ones above)
369 // are only accessed when holding a semaRoot lock.
375 // isSelect indicates g is participating in a select, so
376 // g.selectDone must be CAS'd to win the wake-up race.
379 // success indicates whether communication over channel c
380 // succeeded. It is true if the goroutine was awoken because a
381 // value was delivered over channel c, and false if awoken
382 // because c was closed.
385 // waiters is a count of semaRoot waiting list other than head of list,
386 // clamped to a uint16 to fit in unused space.
387 // Only meaningful at the head of the list.
388 // (If we wanted to be overly clever, we could store a high 16 bits
389 // in the second entry in the list.)
392 parent *sudog // semaRoot binary tree
393 waitlink *sudog // g.waiting list or semaRoot
394 waittail *sudog // semaRoot
398 type libcall struct {
400 n uintptr // number of parameters
401 args uintptr // parameters
402 r1 uintptr // return values
404 err uintptr // error number
407 // Stack describes a Go execution stack.
408 // The bounds of the stack are exactly [lo, hi),
409 // with no implicit data structures on either side.
415 // heldLockInfo gives info on a held lock and the rank of that lock
416 type heldLockInfo struct {
423 // stack describes the actual stack memory: [stack.lo, stack.hi).
424 // stackguard0 is the stack pointer compared in the Go stack growth prologue.
425 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
426 // stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
427 // It is stack.lo+StackGuard on g0 and gsignal stacks.
428 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
429 stack stack // offset known to runtime/cgo
430 stackguard0 uintptr // offset known to liblink
431 stackguard1 uintptr // offset known to liblink
433 _panic *_panic // innermost panic - offset known to liblink
434 _defer *_defer // innermost defer
435 m *m // current m; offset known to arm liblink
437 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
438 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
439 stktopsp uintptr // expected sp at top of stack, to check in traceback
440 // param is a generic pointer parameter field used to pass
441 // values in particular contexts where other storage for the
442 // parameter would be difficult to find. It is currently used
444 // 1. When a channel operation wakes up a blocked goroutine, it sets param to
445 // point to the sudog of the completed blocking operation.
446 // 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
447 // the GC cycle. It is unsafe to do so in any other way, because the goroutine's
448 // stack may have moved in the meantime.
449 // 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
450 // closure in the runtime is forbidden.
451 // 4. When a panic is recovered and control returns to the respective frame,
452 // param may point to a savedOpenDeferState.
454 atomicstatus atomic.Uint32
455 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
458 waitsince int64 // approx time when the g become blocked
459 waitreason waitReason // if status==Gwaiting
461 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
462 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
463 preemptShrink bool // shrink stack at synchronous safe point
465 // asyncSafePoint is set if g is stopped at an asynchronous
466 // safe point. This means there are frames on the stack
467 // without precise pointer information.
470 paniconfault bool // panic (instead of crash) on unexpected fault address
471 gcscandone bool // g has scanned stack; protected by _Gscan bit in status
472 throwsplit bool // must not split stack
473 // activeStackChans indicates that there are unlocked channels
474 // pointing into this goroutine's stack. If true, stack
475 // copying needs to acquire channel locks to protect these
476 // areas of the stack.
477 activeStackChans bool
478 // parkingOnChan indicates that the goroutine is about to
479 // park on a chansend or chanrecv. Used to signal an unsafe point
480 // for stack shrinking.
481 parkingOnChan atomic.Bool
483 raceignore int8 // ignore race detection events
484 nocgocallback bool // whether disable callback from C
485 tracking bool // whether we're tracking this G for sched latency statistics
486 trackingSeq uint8 // used to decide whether to track this G
487 trackingStamp int64 // timestamp of when the G last started being tracked
488 runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
495 parentGoid uint64 // goid of goroutine that created this goroutine
496 gopc uintptr // pc of go statement that created this goroutine
497 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
498 startpc uintptr // pc of goroutine function
500 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
501 cgoCtxt []uintptr // cgo traceback context
502 labels unsafe.Pointer // profiler labels
503 timer *timer // cached timer for time.Sleep
504 selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
506 // goroutineProfiled indicates the status of this goroutine's stack for the
507 // current in-progress goroutine profile
508 goroutineProfiled goroutineProfileStateHolder
510 // Per-G tracer state.
515 // gcAssistBytes is this G's GC assist credit in terms of
516 // bytes allocated. If this is positive, then the G has credit
517 // to allocate gcAssistBytes bytes without assisting. If this
518 // is negative, then the G must correct this by performing
519 // scan work. We track this in bytes to make it fast to update
520 // and check for debt in the malloc hot path. The assist ratio
521 // determines how this corresponds to scan work debt.
525 // gTrackingPeriod is the number of transitions out of _Grunning between
526 // latency tracking runs.
527 const gTrackingPeriod = 8
530 // tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
533 tlsSize = tlsSlots * goarch.PtrSize
536 // Values for m.freeWait.
538 freeMStack = 0 // M done, free stack and reference.
539 freeMRef = 1 // M done, free reference.
540 freeMWait = 2 // M still in use.
544 g0 *g // goroutine with scheduling stack
545 morebuf gobuf // gobuf arg to morestack
546 divmod uint32 // div/mod denominator for arm - known to liblink
547 _ uint32 // align next field to 8 bytes
549 // Fields not known to debuggers.
550 procid uint64 // for debuggers, but offset not hard-coded
551 gsignal *g // signal-handling g
552 goSigStack gsignalStack // Go-allocated signal handling stack
553 sigmask sigset // storage for saved signal mask
554 tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
556 curg *g // current running goroutine
557 caughtsig guintptr // goroutine running during fatal signal
558 p puintptr // attached p for executing go code (nil if not executing go code)
560 oldp puintptr // the p that was attached before executing a syscall
564 preemptoff string // if != "", keep curg running on this m
568 spinning bool // m is out of work and is actively looking for work
569 blocked bool // m is blocked on a note
570 newSigstack bool // minit on C thread called sigaltstack
572 incgo bool // m is executing a cgo call
573 isextra bool // m is an extra m
574 isExtraInC bool // m is an extra m that is not executing Go code
575 freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait)
579 ncgocall uint64 // number of cgo calls in total
580 ncgo int32 // number of cgo calls currently in progress
581 cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily
582 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
584 alllink *m // on allm
587 createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
588 lockedExt uint32 // tracking for external LockOSThread
589 lockedInt uint32 // tracking for internal lockOSThread
590 nextwaitm muintptr // next m waiting for lock
592 // wait* are used to carry arguments from gopark into park_m, because
593 // there's no stack to put them on. That is their sole purpose.
594 waitunlockf func(*g, unsafe.Pointer) bool
595 waitlock unsafe.Pointer
596 waitTraceBlockReason traceBlockReason
600 freelink *m // on sched.freem
603 // these are here because they are too large to be on the stack
604 // of low-level NOSPLIT functions.
606 libcallpc uintptr // for cpu profiler
609 syscall libcall // stores syscall parameters on windows
611 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call)
612 vdsoPC uintptr // PC for traceback while in VDSO call
614 // preemptGen counts the number of completed preemption
615 // signals. This is used to detect when a preemption is
616 // requested, but fails.
617 preemptGen atomic.Uint32
619 // Whether this is a pending preemption signal on this M.
620 signalPending atomic.Uint32
622 // pcvalue lookup cache
623 pcvalueCache pcvalueCache
629 // Up to 10 locks held by this m, maintained by the lock ranking code.
631 locksHeld [10]heldLockInfo
636 status uint32 // one of pidle/prunning/...
638 schedtick uint32 // incremented on every scheduler call
639 syscalltick uint32 // incremented on every system call
640 sysmontick sysmontick // last tick observed by sysmon
641 m muintptr // back-link to associated m (nil if idle)
646 deferpool []*_defer // pool of available defer structs (see panic.go)
647 deferpoolbuf [32]*_defer
649 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
653 // Queue of runnable goroutines. Accessed without lock.
657 // runnext, if non-nil, is a runnable G that was ready'd by
658 // the current G and should be run next instead of what's in
659 // runq if there's time remaining in the running G's time
660 // slice. It will inherit the time left in the current time
661 // slice. If a set of goroutines is locked in a
662 // communicate-and-wait pattern, this schedules that set as a
663 // unit and eliminates the (potentially large) scheduling
664 // latency that otherwise arises from adding the ready'd
665 // goroutines to the end of the run queue.
667 // Note that while other P's may atomically CAS this to zero,
668 // only the owner P can CAS it to a valid G.
671 // Available G's (status == Gdead)
680 // Cache of mspan objects from the heap.
682 // We need an explicit length here because this field is used
683 // in allocation codepaths where write barriers are not allowed,
684 // and eliminating the write barrier/keeping it eliminated from
685 // slice updates is tricky, more so than just managing the length
691 // Cache of a single pinner object to reduce allocations from repeated
697 palloc persistentAlloc // per-P to avoid mutex
699 // The when field of the first entry on the timer heap.
700 // This is 0 if the timer heap is empty.
701 timer0When atomic.Int64
703 // The earliest known nextwhen field of a timer with
704 // timerModifiedEarlier status. Because the timer may have been
705 // modified again, there need not be any timer with this value.
706 // This is 0 if there are no timerModifiedEarlier timers.
707 timerModifiedEarliest atomic.Int64
710 gcAssistTime int64 // Nanoseconds in assistAlloc
711 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
713 // limiterEvent tracks events for the GC CPU limiter.
714 limiterEvent limiterEvent
716 // gcMarkWorkerMode is the mode for the next mark worker to run in.
717 // That is, this is used to communicate with the worker goroutine
718 // selected for immediate execution by
719 // gcController.findRunnableGCWorker. When scheduling other goroutines,
720 // this field must be set to gcMarkWorkerNotWorker.
721 gcMarkWorkerMode gcMarkWorkerMode
722 // gcMarkWorkerStartTime is the nanotime() at which the most recent
723 // mark worker started.
724 gcMarkWorkerStartTime int64
726 // gcw is this P's GC work buffer cache. The work buffer is
727 // filled by write barriers, drained by mutator assists, and
728 // disposed on certain GC state transitions.
731 // wbBuf is this P's GC write barrier buffer.
733 // TODO: Consider caching this in the running G.
736 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
738 // statsSeq is a counter indicating whether this P is currently
739 // writing any stats. Its value is even when not, odd when it is.
740 statsSeq atomic.Uint32
742 // Lock for timers. We normally access the timers while running
743 // on this P, but the scheduler can also do it from a different P.
746 // Actions to take at some time. This is used to implement the
747 // standard library's time package.
748 // Must hold timersLock to access.
751 // Number of timers in P's heap.
752 numTimers atomic.Uint32
754 // Number of timerDeleted timers in P's heap.
755 deletedTimers atomic.Uint32
757 // Race context used while executing timer functions.
760 // maxStackScanDelta accumulates the amount of stack space held by
761 // live goroutines (i.e. those eligible for stack scanning).
762 // Flushed to gcController.maxStackScan once maxStackScanSlack
763 // or -maxStackScanSlack is reached.
764 maxStackScanDelta int64
766 // gc-time statistics about current goroutines
767 // Note that this differs from maxStackScan in that this
768 // accumulates the actual stack observed to be used at GC time (hi - sp),
769 // not an instantaneous measure of the total stack size that might need
770 // to be scanned (hi - lo).
771 scannedStackSize uint64 // stack size of goroutines scanned by this P
772 scannedStacks uint64 // number of goroutines scanned by this P
774 // preempt is set to indicate that this P should be enter the
775 // scheduler ASAP (regardless of what G is running on it).
778 // pageTraceBuf is a buffer for writing out page allocation/free/scavenge traces.
780 // Used only if GOEXPERIMENT=pagetrace.
781 pageTraceBuf pageTraceBuf
783 // Padding is no longer needed. False sharing is now not a worry because p is large enough
784 // that its size class is an integer multiple of the cache line size (for any of our architectures).
788 goidgen atomic.Uint64
789 lastpoll atomic.Int64 // time of last network poll, 0 if currently polling
790 pollUntil atomic.Int64 // time to which current poll is sleeping
794 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
795 // sure to call checkdead().
797 midle muintptr // idle m's waiting for work
798 nmidle int32 // number of idle m's waiting for work
799 nmidlelocked int32 // number of locked m's waiting for work
800 mnext int64 // number of m's that have been created and next M ID
801 maxmcount int32 // maximum number of m's allowed (or die)
802 nmsys int32 // number of system m's not counted for deadlock
803 nmfreed int64 // cumulative number of freed m's
805 ngsys atomic.Int32 // number of system goroutines
807 pidle puintptr // idle p's
809 nmspinning atomic.Int32 // See "Worker thread parking/unparking" comment in proc.go.
810 needspinning atomic.Uint32 // See "Delicate dance" comment in proc.go. Boolean. Must hold sched.lock to set to 1.
812 // Global runnable queue.
816 // disable controls selective disabling of the scheduler.
818 // Use schedEnableUser to control this.
820 // disable is protected by sched.lock.
822 // user disables scheduling of user goroutines.
824 runnable gQueue // pending runnable Gs
825 n int32 // length of runnable
828 // Global cache of dead G's.
831 stack gList // Gs with stacks
832 noStack gList // Gs without stacks
836 // Central cache of sudog structs.
840 // Central pool of available defer structs.
844 // freem is the list of m's waiting to be freed when their
845 // m.exited is set. Linked through m.freelink.
848 gcwaiting atomic.Bool // gc is waiting to run
851 sysmonwait atomic.Bool
854 // safepointFn should be called on each P at the next GC
855 // safepoint if p.runSafePointFn is set.
860 profilehz int32 // cpu profiling rate
862 procresizetime int64 // nanotime() of last change to gomaxprocs
863 totaltime int64 // ∫gomaxprocs dt up to procresizetime
865 // sysmonlock protects sysmon's actions on the runtime.
867 // Acquire and hold this mutex to block sysmon from interacting
868 // with the rest of the runtime.
871 // timeToRun is a distribution of scheduling latencies, defined
872 // as the sum of time a G spends in the _Grunnable state before
873 // it transitions to _Grunning.
874 timeToRun timeHistogram
876 // idleTime is the total CPU time Ps have "spent" idle.
878 // Reset on each GC cycle.
879 idleTime atomic.Int64
881 // totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting
882 // with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock.
883 totalMutexWaitTime atomic.Int64
886 // Values for the flags field of a sigTabT.
888 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
889 _SigKill // if signal.Notify doesn't take it, exit quietly
890 _SigThrow // if signal.Notify doesn't take it, exit loudly
891 _SigPanic // if the signal is from the kernel, panic
892 _SigDefault // if the signal isn't explicitly requested, don't monitor it
893 _SigGoExit // cause all runtime procs to exit (only used on Plan 9).
894 _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler
895 _SigUnblock // always unblock; see blockableSig
896 _SigIgn // _SIG_DFL action is to ignore the signal
899 // Layout of in-memory per-function information prepared by linker
900 // See https://golang.org/s/go12symtab.
901 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
902 // and with package debug/gosym and with symtab.go in package runtime.
904 sys.NotInHeap // Only in static data
906 entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
907 nameOff int32 // function name, as index into moduledata.funcnametab.
909 args int32 // in/out args size
910 deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
916 cuOffset uint32 // runtime.cutab offset of this function's CU
917 startLine int32 // line number of start of function (func keyword/TEXT directive)
918 funcID abi.FuncID // set for certain special runtime functions
921 nfuncdata uint8 // must be last, must end on a uint32-aligned boundary
923 // The end of the struct is followed immediately by two variable-length
924 // arrays that reference the pcdata and funcdata locations for this
927 // pcdata contains the offset into moduledata.pctab for the start of
928 // that index's table. e.g.,
929 // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
930 // the unsafe point table.
932 // An offset of 0 indicates that there is no table.
934 // pcdata [npcdata]uint32
936 // funcdata contains the offset past moduledata.gofunc which contains a
937 // pointer to that index's funcdata. e.g.,
938 // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
939 // the argument pointer map.
941 // An offset of ^uint32(0) indicates that there is no entry.
943 // funcdata [nfuncdata]uint32
946 // Pseudo-Func that is returned for PCs that occur in inlined code.
947 // A *Func can be either a *_func or a *funcinl, and they are distinguished
948 // by the first uintptr.
950 // TODO(austin): Can we merge this with inlinedCall?
951 type funcinl struct {
952 ones uint32 // set to ^0 to distinguish from _func
953 entry uintptr // entry of the real (the "outermost") frame
960 // layout of Itab known to compilers
961 // allocated in non-garbage-collected memory
962 // Needs to be in sync with
963 // ../cmd/compile/internal/reflectdata/reflect.go:/^func.WritePluginTable.
967 hash uint32 // copy of _type.hash. Used for type switches.
969 fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
972 // Lock-free stack node.
973 // Also known to export_test.go.
979 type forcegcstate struct {
985 // extendRandom extends the random numbers in r[:n] to the whole slice r.
986 // Treats n<0 as n==0.
987 func extendRandom(r []byte, n int) {
992 // Extend random bits using hash function & time seed
997 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
998 for i := 0; i < goarch.PtrSize && n < len(r); i++ {
1006 // A _defer holds an entry on the list of deferred calls.
1007 // If you add a field here, add code to clear it in deferProcStack.
1008 // This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
1009 // and cmd/compile/internal/ssagen/ssa.go:(*state).call.
1010 // Some defers will be allocated on the stack and some on the heap.
1011 // All defers are logically part of the stack, so write barriers to
1012 // initialize them are not required. All defers must be manually scanned,
1013 // and for heap defers, marked.
1014 type _defer struct {
1016 rangefunc bool // true for rangefunc list
1017 sp uintptr // sp at time of defer
1018 pc uintptr // pc at time of defer
1019 fn func() // can be nil for open-coded defers
1020 link *_defer // next defer on G; can point to either heap or stack!
1022 // If rangefunc is true, *head is the head of the atomic linked list
1023 // during a range-over-func execution.
1024 head *atomic.Pointer[_defer]
1027 // A _panic holds information about an active panic.
1029 // A _panic value must only ever live on the stack.
1031 // The argp and link fields are stack pointers, but don't need special
1032 // handling during stack growth: because they are pointer-typed and
1033 // _panic values only live on the stack, regular stack pointer
1034 // adjustment takes care of them.
1035 type _panic struct {
1036 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
1037 arg any // argument to panic
1038 link *_panic // link to earlier panic
1040 // startPC and startSP track where _panic.start was called.
1042 startSP unsafe.Pointer
1044 // The current stack frame that we're running deferred calls for.
1049 // retpc stores the PC where the panic should jump back to, if the
1050 // function last returned by _panic.next() recovers the panic.
1053 // Extra state for handling open-coded defers.
1055 slotsPtr unsafe.Pointer
1057 recovered bool // whether this panic has been recovered
1062 // savedOpenDeferState tracks the extra state from _panic that's
1063 // necessary for deferreturn to pick up where gopanic left off,
1064 // without needing to unwind the stack.
1065 type savedOpenDeferState struct {
1067 deferBitsOffset uintptr
1071 // ancestorInfo records details of where a goroutine was started.
1072 type ancestorInfo struct {
1073 pcs []uintptr // pcs from the stack of this goroutine
1074 goid uint64 // goroutine id of this goroutine; original goroutine possibly dead
1075 gopc uintptr // pc of go statement that created this goroutine
1078 // A waitReason explains why a goroutine has been stopped.
1079 // See gopark. Do not re-use waitReasons, add new ones.
1080 type waitReason uint8
1083 waitReasonZero waitReason = iota // ""
1084 waitReasonGCAssistMarking // "GC assist marking"
1085 waitReasonIOWait // "IO wait"
1086 waitReasonChanReceiveNilChan // "chan receive (nil chan)"
1087 waitReasonChanSendNilChan // "chan send (nil chan)"
1088 waitReasonDumpingHeap // "dumping heap"
1089 waitReasonGarbageCollection // "garbage collection"
1090 waitReasonGarbageCollectionScan // "garbage collection scan"
1091 waitReasonPanicWait // "panicwait"
1092 waitReasonSelect // "select"
1093 waitReasonSelectNoCases // "select (no cases)"
1094 waitReasonGCAssistWait // "GC assist wait"
1095 waitReasonGCSweepWait // "GC sweep wait"
1096 waitReasonGCScavengeWait // "GC scavenge wait"
1097 waitReasonChanReceive // "chan receive"
1098 waitReasonChanSend // "chan send"
1099 waitReasonFinalizerWait // "finalizer wait"
1100 waitReasonForceGCIdle // "force gc (idle)"
1101 waitReasonSemacquire // "semacquire"
1102 waitReasonSleep // "sleep"
1103 waitReasonSyncCondWait // "sync.Cond.Wait"
1104 waitReasonSyncMutexLock // "sync.Mutex.Lock"
1105 waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock"
1106 waitReasonSyncRWMutexLock // "sync.RWMutex.Lock"
1107 waitReasonTraceReaderBlocked // "trace reader (blocked)"
1108 waitReasonWaitForGCCycle // "wait for GC cycle"
1109 waitReasonGCWorkerIdle // "GC worker (idle)"
1110 waitReasonGCWorkerActive // "GC worker (active)"
1111 waitReasonPreempted // "preempted"
1112 waitReasonDebugCall // "debug call"
1113 waitReasonGCMarkTermination // "GC mark termination"
1114 waitReasonStoppingTheWorld // "stopping the world"
1117 var waitReasonStrings = [...]string{
1119 waitReasonGCAssistMarking: "GC assist marking",
1120 waitReasonIOWait: "IO wait",
1121 waitReasonChanReceiveNilChan: "chan receive (nil chan)",
1122 waitReasonChanSendNilChan: "chan send (nil chan)",
1123 waitReasonDumpingHeap: "dumping heap",
1124 waitReasonGarbageCollection: "garbage collection",
1125 waitReasonGarbageCollectionScan: "garbage collection scan",
1126 waitReasonPanicWait: "panicwait",
1127 waitReasonSelect: "select",
1128 waitReasonSelectNoCases: "select (no cases)",
1129 waitReasonGCAssistWait: "GC assist wait",
1130 waitReasonGCSweepWait: "GC sweep wait",
1131 waitReasonGCScavengeWait: "GC scavenge wait",
1132 waitReasonChanReceive: "chan receive",
1133 waitReasonChanSend: "chan send",
1134 waitReasonFinalizerWait: "finalizer wait",
1135 waitReasonForceGCIdle: "force gc (idle)",
1136 waitReasonSemacquire: "semacquire",
1137 waitReasonSleep: "sleep",
1138 waitReasonSyncCondWait: "sync.Cond.Wait",
1139 waitReasonSyncMutexLock: "sync.Mutex.Lock",
1140 waitReasonSyncRWMutexRLock: "sync.RWMutex.RLock",
1141 waitReasonSyncRWMutexLock: "sync.RWMutex.Lock",
1142 waitReasonTraceReaderBlocked: "trace reader (blocked)",
1143 waitReasonWaitForGCCycle: "wait for GC cycle",
1144 waitReasonGCWorkerIdle: "GC worker (idle)",
1145 waitReasonGCWorkerActive: "GC worker (active)",
1146 waitReasonPreempted: "preempted",
1147 waitReasonDebugCall: "debug call",
1148 waitReasonGCMarkTermination: "GC mark termination",
1149 waitReasonStoppingTheWorld: "stopping the world",
1152 func (w waitReason) String() string {
1153 if w < 0 || w >= waitReason(len(waitReasonStrings)) {
1154 return "unknown wait reason"
1156 return waitReasonStrings[w]
1159 func (w waitReason) isMutexWait() bool {
1160 return w == waitReasonSyncMutexLock ||
1161 w == waitReasonSyncRWMutexRLock ||
1162 w == waitReasonSyncRWMutexLock
1169 forcegc forcegcstate
1173 // allpLock protects P-less reads and size changes of allp, idlepMask,
1174 // and timerpMask, and all writes to allp.
1176 // len(allp) == gomaxprocs; may change at safe points, otherwise
1179 // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must
1180 // be atomic. Length may change at safe points.
1182 // Each P must update only its own bit. In order to maintain
1183 // consistency, a P going idle must the idle mask simultaneously with
1184 // updates to the idle P list under the sched.lock, otherwise a racing
1185 // pidleget may clear the mask before pidleput sets the mask,
1186 // corrupting the bitmap.
1188 // N.B., procresize takes ownership of all Ps in stopTheWorldWithSema.
1190 // Bitmask of Ps that may have a timer, one bit per P. Reads and writes
1191 // must be atomic. Length may change at safe points.
1194 // Pool of GC parked background workers. Entries are type
1195 // *gcBgMarkWorkerNode.
1196 gcBgMarkWorkerPool lfstack
1198 // Total number of gcBgMarkWorker goroutines. Protected by worldsema.
1199 gcBgMarkWorkerCount int32
1201 // Information about what cpu features are available.
1202 // Packages outside the runtime should not use these
1203 // as they are not an external api.
1204 // Set on startup in asm_{386,amd64}.s
1205 processorVersionInfo uint32
1208 goarm uint8 // set by cmd/link on arm systems
1211 // Set by the linker so the runtime can determine the buildmode.
1213 islibrary bool // -buildmode=c-shared
1214 isarchive bool // -buildmode=c-archive
1217 // Must agree with internal/buildcfg.FramePointerEnabled.
1218 const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64"