package runtime
import (
+ "internal/abi"
"internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
bp uintptr // for framepointer-enabled architectures
}
-// sudog represents a g in a wait list, such as for sending/receiving
+// sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving
// on a channel.
//
// sudog is necessary because the g ↔ synchronization object relation
// because c was closed.
success bool
+ // waiters is a count of semaRoot waiting list other than head of list,
+ // clamped to a uint16 to fit in unused space.
+ // Only meaningful at the head of the list.
+ // (If we wanted to be overly clever, we could store a high 16 bits
+ // in the second entry in the list.)
+ waiters uint16
+
parent *sudog // semaRoot binary tree
waitlink *sudog // g.waiting list or semaRoot
waittail *sudog // semaRoot
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
- // stackguard1 is the stack pointer compared in the C stack growth prologue.
+ // stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack stack // offset known to runtime/cgo
// param is a generic pointer parameter field used to pass
// values in particular contexts where other storage for the
// parameter would be difficult to find. It is currently used
- // in three ways:
+ // in four ways:
// 1. When a channel operation wakes up a blocked goroutine, it sets param to
// point to the sudog of the completed blocking operation.
// 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
// stack may have moved in the meantime.
// 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
// closure in the runtime is forbidden.
+ // 4. When a panic is recovered and control returns to the respective frame,
+ // param may point to a savedOpenDeferState.
param unsafe.Pointer
atomicstatus atomic.Uint32
stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
// for stack shrinking.
parkingOnChan atomic.Bool
- raceignore int8 // ignore race detection events
- sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
- tracking bool // whether we're tracking this G for sched latency statistics
- trackingSeq uint8 // used to decide whether to track this G
- trackingStamp int64 // timestamp of when the G last started being tracked
- runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
- sysexitticks int64 // cputicks when syscall has returned (for tracing)
- traceseq uint64 // trace event sequencer
- tracelastp puintptr // last P emitted an event for this goroutine
- lockedm muintptr
- sig uint32
- writebuf []byte
- sigcode0 uintptr
- sigcode1 uintptr
- sigpc uintptr
- parentGoid uint64 // goid of goroutine that created this goroutine
- gopc uintptr // pc of go statement that created this goroutine
- ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
- startpc uintptr // pc of goroutine function
- racectx uintptr
- waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
- cgoCtxt []uintptr // cgo traceback context
- labels unsafe.Pointer // profiler labels
- timer *timer // cached timer for time.Sleep
- selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
+ raceignore int8 // ignore race detection events
+ nocgocallback bool // whether disable callback from C
+ tracking bool // whether we're tracking this G for sched latency statistics
+ trackingSeq uint8 // used to decide whether to track this G
+ trackingStamp int64 // timestamp of when the G last started being tracked
+ runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
+ lockedm muintptr
+ sig uint32
+ writebuf []byte
+ sigcode0 uintptr
+ sigcode1 uintptr
+ sigpc uintptr
+ parentGoid uint64 // goid of goroutine that created this goroutine
+ gopc uintptr // pc of go statement that created this goroutine
+ ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
+ startpc uintptr // pc of goroutine function
+ racectx uintptr
+ waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
+ cgoCtxt []uintptr // cgo traceback context
+ labels unsafe.Pointer // profiler labels
+ timer *timer // cached timer for time.Sleep
+ selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
// goroutineProfiled indicates the status of this goroutine's stack for the
// current in-progress goroutine profile
goroutineProfiled goroutineProfileStateHolder
+ // Per-G tracer state.
+ trace gTraceState
+
// Per-G GC state
// gcAssistBytes is this G's GC assist credit in terms of
alllink *m // on allm
schedlink muintptr
lockedg guintptr
- createstack [32]uintptr // stack that created this thread.
+ createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
lockedExt uint32 // tracking for external LockOSThread
lockedInt uint32 // tracking for internal lockOSThread
nextwaitm muintptr // next m waiting for lock
- waitunlockf func(*g, unsafe.Pointer) bool
- waitlock unsafe.Pointer
- waittraceev byte
- waittraceskip int
- startingtrace bool
- syscalltick uint32
- freelink *m // on sched.freem
+
+ // wait* are used to carry arguments from gopark into park_m, because
+ // there's no stack to put them on. That is their sole purpose.
+ waitunlockf func(*g, unsafe.Pointer) bool
+ waitlock unsafe.Pointer
+ waitTraceBlockReason traceBlockReason
+ waitTraceSkip int
+
+ syscalltick uint32
+ freelink *m // on sched.freem
+ trace mTraceState
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
// Whether this is a pending preemption signal on this M.
signalPending atomic.Uint32
+ // pcvalue lookup cache
+ pcvalueCache pcvalueCache
+
dlogPerM
mOS
// We need an explicit length here because this field is used
// in allocation codepaths where write barriers are not allowed,
// and eliminating the write barrier/keeping it eliminated from
- // slice updates is tricky, moreso than just managing the length
+ // slice updates is tricky, more so than just managing the length
// ourselves.
len int
buf [128]*mspan
}
- tracebuf traceBufPtr
+ // Cache of a single pinner object to reduce allocations from repeated
+ // pinner creation.
+ pinnerCache *pinner
- // traceSweep indicates the sweep events should be traced.
- // This is used to defer the sweep start event until a span
- // has actually been swept.
- traceSweep bool
- // traceSwept and traceReclaimed track the number of bytes
- // swept and reclaimed by sweeping in the current sweep loop.
- traceSwept, traceReclaimed uintptr
+ trace pTraceState
palloc persistentAlloc // per-P to avoid mutex
pcfile uint32
pcln uint32
npcdata uint32
- cuOffset uint32 // runtime.cutab offset of this function's CU
- startLine int32 // line number of start of function (func keyword/TEXT directive)
- funcID funcID // set for certain special runtime functions
- flag funcFlag
+ cuOffset uint32 // runtime.cutab offset of this function's CU
+ startLine int32 // line number of start of function (func keyword/TEXT directive)
+ funcID abi.FuncID // set for certain special runtime functions
+ flag abi.FuncFlag
_ [1]byte // pad
nfuncdata uint8 // must be last, must end on a uint32-aligned boundary
// layout of Itab known to compilers
// allocated in non-garbage-collected memory
// Needs to be in sync with
-// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WriteTabs.
+// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WritePluginTable.
type itab struct {
inter *interfacetype
_type *_type
// initialize them are not required. All defers must be manually scanned,
// and for heap defers, marked.
type _defer struct {
- started bool
- heap bool
- // openDefer indicates that this _defer is for a frame with open-coded
- // defers. We have only one defer record for the entire frame (which may
- // currently have 0, 1, or more defers active).
- openDefer bool
+ heap bool
+ rangefunc bool // true for rangefunc list
sp uintptr // sp at time of defer
pc uintptr // pc at time of defer
fn func() // can be nil for open-coded defers
- _panic *_panic // panic that is running defer
link *_defer // next defer on G; can point to either heap or stack!
- // If openDefer is true, the fields below record values about the stack
- // frame and associated function that has the open-coded defer(s). sp
- // above will be the sp for the frame, and pc will be address of the
- // deferreturn call in the function.
- fd unsafe.Pointer // funcdata for the function associated with the frame
- varp uintptr // value of varp for the stack frame
- // framepc is the current pc associated with the stack frame. Together,
- // with sp above (which is the sp associated with the stack frame),
- // framepc/sp can be used as pc/sp pair to continue a stack trace via
- // gentraceback().
- framepc uintptr
+ // If rangefunc is true, *head is the head of the atomic linked list
+ // during a range-over-func execution.
+ head *atomic.Pointer[_defer]
}
// A _panic holds information about an active panic.
// _panic values only live on the stack, regular stack pointer
// adjustment takes care of them.
type _panic struct {
- argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
- arg any // argument to panic
- link *_panic // link to earlier panic
- pc uintptr // where to return to in runtime if this panic is bypassed
- sp unsafe.Pointer // where to return to in runtime if this panic is bypassed
- recovered bool // whether this panic is over
- aborted bool // the panic was aborted
- goexit bool
+ argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
+ arg any // argument to panic
+ link *_panic // link to earlier panic
+
+ // startPC and startSP track where _panic.start was called.
+ startPC uintptr
+ startSP unsafe.Pointer
+
+ // The current stack frame that we're running deferred calls for.
+ sp unsafe.Pointer
+ lr uintptr
+ fp unsafe.Pointer
+
+ // retpc stores the PC where the panic should jump back to, if the
+ // function last returned by _panic.next() recovers the panic.
+ retpc uintptr
+
+ // Extra state for handling open-coded defers.
+ deferBitsPtr *uint8
+ slotsPtr unsafe.Pointer
+
+ recovered bool // whether this panic has been recovered
+ goexit bool
+ deferreturn bool
+}
+
+// savedOpenDeferState tracks the extra state from _panic that's
+// necessary for deferreturn to pick up where gopanic left off,
+// without needing to unwind the stack.
+type savedOpenDeferState struct {
+ retpc uintptr
+ deferBitsOffset uintptr
+ slotsOffset uintptr
}
// ancestorInfo records details of where a goroutine was started.
waitReasonDebugCall // "debug call"
waitReasonGCMarkTermination // "GC mark termination"
waitReasonStoppingTheWorld // "stopping the world"
+ waitReasonFlushProcCaches // "flushing proc caches"
)
var waitReasonStrings = [...]string{
waitReasonDebugCall: "debug call",
waitReasonGCMarkTermination: "GC mark termination",
waitReasonStoppingTheWorld: "stopping the world",
+ waitReasonFlushProcCaches: "flushing proc caches",
}
func (w waitReason) String() string {