1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Patterned after tcmalloc's algorithms; shorter code.
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
17 // NOTE(rsc): Everything here could use cas if contention became an issue.
19 // profInsertLock protects changes to the start of all *bucket linked lists
21 // profBlockLock protects the contents of every blockRecord struct
23 // profMemActiveLock protects the active field of every memRecord struct
24 profMemActiveLock mutex
25 // profMemFutureLock is a set of locks that protect the respective elements
26 // of the future array of every memRecord struct
27 profMemFutureLock [len(memRecord{}.future)]mutex
30 // All memory allocations are local and do not escape outside of the profiler.
31 // The profiler is forbidden from referring to garbage-collected memory.
35 memProfile bucketType = 1 + iota
39 // size of bucket hash table
42 // max depth of stack to record in bucket
48 // A bucket holds per-call-stack profiling information.
49 // The representation is a bit sleazy, inherited from C.
50 // This struct defines the bucket header. It is followed in
51 // memory by the stack words and then the actual record
52 // data, either a memRecord or a blockRecord.
54 // Per-call-stack profiling information.
55 // Lookup by hashing call stack into a linked-list hash table.
57 // None of the fields in this bucket header are modified after
58 // creation, including its next and allnext links.
65 typ bucketType // memBucket or blockBucket (includes mutexProfile)
71 // A memRecord is the bucket data for a bucket of type memProfile,
72 // part of the memory profile.
73 type memRecord struct {
74 // The following complex 3-stage scheme of stats accumulation
75 // is required to obtain a consistent picture of mallocs and frees
76 // for some point in time.
77 // The problem is that mallocs come in real time, while frees
78 // come only after a GC during concurrent sweeping. So if we would
79 // naively count them, we would get a skew toward mallocs.
81 // Hence, we delay information to get consistent snapshots as
82 // of mark termination. Allocations count toward the next mark
83 // termination's snapshot, while sweep frees count toward the
84 // previous mark termination's snapshot:
88 // .·˙ | .·˙ | .·˙ | .·˙ |
89 // .·˙ | .·˙ | .·˙ | .·˙ |
90 // .·˙ |.·˙ |.·˙ |.·˙ |
100 // Since we can't publish a consistent snapshot until all of
101 // the sweep frees are accounted for, we wait until the next
102 // mark termination ("MT" above) to publish the previous mark
103 // termination's snapshot ("P" above). To do this, allocation
104 // and free events are accounted to *future* heap profile
105 // cycles ("C+n" above) and we only publish a cycle once all
106 // of the events from that cycle must be done. Specifically:
108 // Mallocs are accounted to cycle C+2.
109 // Explicit frees are accounted to cycle C+2.
110 // GC frees (done during sweeping) are accounted to cycle C+1.
112 // After mark termination, we increment the global heap
113 // profile cycle counter and accumulate the stats from cycle C
114 // into the active profile.
116 // active is the currently published profile. A profiling
117 // cycle can be accumulated into active once its complete.
118 active memRecordCycle
120 // future records the profile events we're counting for cycles
121 // that have not yet been published. This is ring buffer
122 // indexed by the global heap profile cycle C and stores
123 // cycles C, C+1, and C+2. Unlike active, these counts are
124 // only for a single cycle; they are not cumulative across
127 // We store cycle C here because there's a window between when
128 // C becomes the active cycle and when we've flushed it to
130 future [3]memRecordCycle
134 type memRecordCycle struct {
135 allocs, frees uintptr
136 alloc_bytes, free_bytes uintptr
139 // add accumulates b into a. It does not zero b.
140 func (a *memRecordCycle) add(b *memRecordCycle) {
143 a.alloc_bytes += b.alloc_bytes
144 a.free_bytes += b.free_bytes
147 // A blockRecord is the bucket data for a bucket of type blockProfile,
148 // which is used in blocking and mutex profiles.
149 type blockRecord struct {
155 mbuckets atomic.UnsafePointer // *bucket, memory profile buckets
156 bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets
157 xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets
158 buckhash atomic.UnsafePointer // *buckhashArray
160 mProfCycle mProfCycleHolder
163 type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket
165 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
167 // mProfCycleHolder holds the global heap profile cycle number (wrapped at
168 // mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to
169 // indicate whether future[cycle] in all buckets has been queued to flush into
170 // the active profile.
171 type mProfCycleHolder struct {
175 // read returns the current cycle count.
176 func (c *mProfCycleHolder) read() (cycle uint32) {
182 // setFlushed sets the flushed flag. It returns the current cycle count and the
183 // previous value of the flushed flag.
184 func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
186 prev := c.value.Load()
188 alreadyFlushed = (prev & 0x1) != 0
190 if c.value.CompareAndSwap(prev, next) {
191 return cycle, alreadyFlushed
196 // increment increases the cycle count by one, wrapping the value at
197 // mProfCycleWrap. It clears the flushed flag.
198 func (c *mProfCycleHolder) increment() {
199 // We explicitly wrap mProfCycle rather than depending on
200 // uint wraparound because the memRecord.future ring does not
201 // itself wrap at a power of two.
203 prev := c.value.Load()
205 cycle = (cycle + 1) % mProfCycleWrap
207 if c.value.CompareAndSwap(prev, next) {
213 // newBucket allocates a bucket with the given type and number of stack entries.
214 func newBucket(typ bucketType, nstk int) *bucket {
215 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
218 throw("invalid profile bucket type")
220 size += unsafe.Sizeof(memRecord{})
221 case blockProfile, mutexProfile:
222 size += unsafe.Sizeof(blockRecord{})
225 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
227 b.nstk = uintptr(nstk)
231 // stk returns the slice in b holding the stack.
232 func (b *bucket) stk() []uintptr {
233 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
234 return stk[:b.nstk:b.nstk]
237 // mp returns the memRecord associated with the memProfile bucket b.
238 func (b *bucket) mp() *memRecord {
239 if b.typ != memProfile {
240 throw("bad use of bucket.mp")
242 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
243 return (*memRecord)(data)
246 // bp returns the blockRecord associated with the blockProfile bucket b.
247 func (b *bucket) bp() *blockRecord {
248 if b.typ != blockProfile && b.typ != mutexProfile {
249 throw("bad use of bucket.bp")
251 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
252 return (*blockRecord)(data)
255 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
256 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
257 bh := (*buckhashArray)(buckhash.Load())
259 lock(&profInsertLock)
260 // check again under the lock
261 bh = (*buckhashArray)(buckhash.Load())
263 bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
265 throw("runtime: cannot allocate memory")
267 buckhash.StoreNoWB(unsafe.Pointer(bh))
269 unlock(&profInsertLock)
274 for _, pc := range stk {
287 i := int(h % buckHashSize)
288 // first check optimistically, without the lock
289 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
290 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
299 lock(&profInsertLock)
300 // check again under the insertion lock
301 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
302 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
303 unlock(&profInsertLock)
308 // Create new bucket.
309 b := newBucket(typ, len(stk))
314 var allnext *atomic.UnsafePointer
315 if typ == memProfile {
317 } else if typ == mutexProfile {
323 b.next = (*bucket)(bh[i].Load())
324 b.allnext = (*bucket)(allnext.Load())
326 bh[i].StoreNoWB(unsafe.Pointer(b))
327 allnext.StoreNoWB(unsafe.Pointer(b))
329 unlock(&profInsertLock)
333 func eqslice(x, y []uintptr) bool {
334 if len(x) != len(y) {
337 for i, xi := range x {
345 // mProf_NextCycle publishes the next heap profile cycle and creates a
346 // fresh heap profile cycle. This operation is fast and can be done
347 // during STW. The caller must call mProf_Flush before calling
348 // mProf_NextCycle again.
350 // This is called by mark termination during STW so allocations and
351 // frees after the world is started again count towards a new heap
353 func mProf_NextCycle() {
354 mProfCycle.increment()
357 // mProf_Flush flushes the events from the current heap profiling
358 // cycle into the active profile. After this it is safe to start a new
359 // heap profiling cycle with mProf_NextCycle.
361 // This is called by GC after mark termination starts the world. In
362 // contrast with mProf_NextCycle, this is somewhat expensive, but safe
363 // to do concurrently.
365 cycle, alreadyFlushed := mProfCycle.setFlushed()
370 index := cycle % uint32(len(memRecord{}.future))
371 lock(&profMemActiveLock)
372 lock(&profMemFutureLock[index])
373 mProf_FlushLocked(index)
374 unlock(&profMemFutureLock[index])
375 unlock(&profMemActiveLock)
378 // mProf_FlushLocked flushes the events from the heap profiling cycle at index
379 // into the active profile. The caller must hold the lock for the active profile
380 // (profMemActiveLock) and for the profiling cycle at index
381 // (profMemFutureLock[index]).
382 func mProf_FlushLocked(index uint32) {
383 assertLockHeld(&profMemActiveLock)
384 assertLockHeld(&profMemFutureLock[index])
385 head := (*bucket)(mbuckets.Load())
386 for b := head; b != nil; b = b.allnext {
389 // Flush cycle C into the published profile and clear
391 mpc := &mp.future[index]
393 *mpc = memRecordCycle{}
397 // mProf_PostSweep records that all sweep frees for this GC cycle have
398 // completed. This has the effect of publishing the heap profile
399 // snapshot as of the last mark termination without advancing the heap
401 func mProf_PostSweep() {
402 // Flush cycle C+1 to the active profile so everything as of
403 // the last mark termination becomes visible. *Don't* advance
404 // the cycle, since we're still accumulating allocs in cycle
405 // C+2, which have to become C+1 in the next mark termination
407 cycle := mProfCycle.read() + 1
409 index := cycle % uint32(len(memRecord{}.future))
410 lock(&profMemActiveLock)
411 lock(&profMemFutureLock[index])
412 mProf_FlushLocked(index)
413 unlock(&profMemFutureLock[index])
414 unlock(&profMemActiveLock)
417 // Called by malloc to record a profiled block.
418 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
419 var stk [maxStack]uintptr
420 nstk := callers(4, stk[:])
422 index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
424 b := stkbucket(memProfile, size, stk[:nstk], true)
426 mpc := &mp.future[index]
428 lock(&profMemFutureLock[index])
430 mpc.alloc_bytes += size
431 unlock(&profMemFutureLock[index])
433 // Setprofilebucket locks a bunch of other mutexes, so we call it outside of
434 // the profiler locks. This reduces potential contention and chances of
435 // deadlocks. Since the object must be alive during the call to
436 // mProf_Malloc, it's fine to do this non-atomically.
438 setprofilebucket(p, b)
442 // Called when freeing a profiled block.
443 func mProf_Free(b *bucket, size uintptr) {
444 index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
447 mpc := &mp.future[index]
449 lock(&profMemFutureLock[index])
451 mpc.free_bytes += size
452 unlock(&profMemFutureLock[index])
455 var blockprofilerate uint64 // in CPU ticks
457 // SetBlockProfileRate controls the fraction of goroutine blocking events
458 // that are reported in the blocking profile. The profiler aims to sample
459 // an average of one blocking event per rate nanoseconds spent blocked.
461 // To include every blocking event in the profile, pass rate = 1.
462 // To turn off profiling entirely, pass rate <= 0.
463 func SetBlockProfileRate(rate int) {
466 r = 0 // disable profiling
467 } else if rate == 1 {
468 r = 1 // profile everything
470 // convert ns to cycles, use float64 to prevent overflow during multiplication
471 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
477 atomic.Store64(&blockprofilerate, uint64(r))
480 func blockevent(cycles int64, skip int) {
485 rate := int64(atomic.Load64(&blockprofilerate))
486 if blocksampled(cycles, rate) {
487 saveblockevent(cycles, rate, skip+1, blockProfile)
491 // blocksampled returns true for all events where cycles >= rate. Shorter
492 // events have a cycles/rate random chance of returning true.
493 func blocksampled(cycles, rate int64) bool {
494 if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
500 func saveblockevent(cycles, rate int64, skip int, which bucketType) {
503 var stk [maxStack]uintptr
504 if gp.m.curg == nil || gp.m.curg == gp {
505 nstk = callers(skip, stk[:])
507 nstk = gcallers(gp.m.curg, skip, stk[:])
509 b := stkbucket(which, 0, stk[:nstk], true)
513 // We want to up-scale the count and cycles according to the
514 // probability that the event was sampled. For block profile events,
515 // the sample probability is 1 if cycles >= rate, and cycles / rate
516 // otherwise. For mutex profile events, the sample probability is 1 / rate.
517 // We scale the events by 1 / (probability the event was sampled).
518 if which == blockProfile && cycles < rate {
519 // Remove sampling bias, see discussion on http://golang.org/cl/299991.
520 bp.count += float64(rate) / float64(cycles)
522 } else if which == mutexProfile {
523 bp.count += float64(rate)
524 bp.cycles += rate * cycles
529 unlock(&profBlockLock)
532 var mutexprofilerate uint64 // fraction sampled
534 // SetMutexProfileFraction controls the fraction of mutex contention events
535 // that are reported in the mutex profile. On average 1/rate events are
536 // reported. The previous rate is returned.
538 // To turn off profiling entirely, pass rate 0.
539 // To just read the current rate, pass rate < 0.
540 // (For n>1 the details of sampling may change.)
541 func SetMutexProfileFraction(rate int) int {
543 return int(mutexprofilerate)
545 old := mutexprofilerate
546 atomic.Store64(&mutexprofilerate, uint64(rate))
550 //go:linkname mutexevent sync.event
551 func mutexevent(cycles int64, skip int) {
555 rate := int64(atomic.Load64(&mutexprofilerate))
556 if rate > 0 && int64(fastrand())%rate == 0 {
557 saveblockevent(cycles, rate, skip+1, mutexProfile)
561 // Go interface to profile data.
563 // A StackRecord describes a single execution stack.
564 type StackRecord struct {
565 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
568 // Stack returns the stack trace associated with the record,
569 // a prefix of r.Stack0.
570 func (r *StackRecord) Stack() []uintptr {
571 for i, v := range r.Stack0 {
579 // MemProfileRate controls the fraction of memory allocations
580 // that are recorded and reported in the memory profile.
581 // The profiler aims to sample an average of
582 // one allocation per MemProfileRate bytes allocated.
584 // To include every allocated block in the profile, set MemProfileRate to 1.
585 // To turn off profiling entirely, set MemProfileRate to 0.
587 // The tools that process the memory profiles assume that the
588 // profile rate is constant across the lifetime of the program
589 // and equal to the current value. Programs that change the
590 // memory profiling rate should do so just once, as early as
591 // possible in the execution of the program (for example,
592 // at the beginning of main).
593 var MemProfileRate int = 512 * 1024
595 // disableMemoryProfiling is set by the linker if runtime.MemProfile
596 // is not used and the link type guarantees nobody else could use it
598 var disableMemoryProfiling bool
600 // A MemProfileRecord describes the live objects allocated
601 // by a particular call sequence (stack trace).
602 type MemProfileRecord struct {
603 AllocBytes, FreeBytes int64 // number of bytes allocated, freed
604 AllocObjects, FreeObjects int64 // number of objects allocated, freed
605 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
608 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
609 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
611 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
612 func (r *MemProfileRecord) InUseObjects() int64 {
613 return r.AllocObjects - r.FreeObjects
616 // Stack returns the stack trace associated with the record,
617 // a prefix of r.Stack0.
618 func (r *MemProfileRecord) Stack() []uintptr {
619 for i, v := range r.Stack0 {
627 // MemProfile returns a profile of memory allocated and freed per allocation
630 // MemProfile returns n, the number of records in the current memory profile.
631 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
632 // If len(p) < n, MemProfile does not change p and returns n, false.
634 // If inuseZero is true, the profile includes allocation records
635 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
636 // These are sites where memory was allocated, but it has all
637 // been released back to the runtime.
639 // The returned profile may be up to two garbage collection cycles old.
640 // This is to avoid skewing the profile toward allocations; because
641 // allocations happen in real time but frees are delayed until the garbage
642 // collector performs sweeping, the profile only accounts for allocations
643 // that have had a chance to be freed by the garbage collector.
645 // Most clients should use the runtime/pprof package or
646 // the testing package's -test.memprofile flag instead
647 // of calling MemProfile directly.
648 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
649 cycle := mProfCycle.read()
650 // If we're between mProf_NextCycle and mProf_Flush, take care
651 // of flushing to the active profile so we only have to look
652 // at the active profile below.
653 index := cycle % uint32(len(memRecord{}.future))
654 lock(&profMemActiveLock)
655 lock(&profMemFutureLock[index])
656 mProf_FlushLocked(index)
657 unlock(&profMemFutureLock[index])
659 head := (*bucket)(mbuckets.Load())
660 for b := head; b != nil; b = b.allnext {
662 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
665 if mp.active.allocs != 0 || mp.active.frees != 0 {
670 // Absolutely no data, suggesting that a garbage collection
671 // has not yet happened. In order to allow profiling when
672 // garbage collection is disabled from the beginning of execution,
673 // accumulate all of the cycles, and recount buckets.
675 for b := head; b != nil; b = b.allnext {
677 for c := range mp.future {
678 lock(&profMemFutureLock[c])
679 mp.active.add(&mp.future[c])
680 mp.future[c] = memRecordCycle{}
681 unlock(&profMemFutureLock[c])
683 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
691 for b := head; b != nil; b = b.allnext {
693 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
699 unlock(&profMemActiveLock)
703 // Write b's data to r.
704 func record(r *MemProfileRecord, b *bucket) {
706 r.AllocBytes = int64(mp.active.alloc_bytes)
707 r.FreeBytes = int64(mp.active.free_bytes)
708 r.AllocObjects = int64(mp.active.allocs)
709 r.FreeObjects = int64(mp.active.frees)
711 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
714 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
717 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
719 copy(r.Stack0[:], b.stk())
720 for i := int(b.nstk); i < len(r.Stack0); i++ {
725 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
726 lock(&profMemActiveLock)
727 head := (*bucket)(mbuckets.Load())
728 for b := head; b != nil; b = b.allnext {
730 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
732 unlock(&profMemActiveLock)
735 // BlockProfileRecord describes blocking events originated
736 // at a particular call sequence (stack trace).
737 type BlockProfileRecord struct {
743 // BlockProfile returns n, the number of records in the current blocking profile.
744 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
745 // If len(p) < n, BlockProfile does not change p and returns n, false.
747 // Most clients should use the runtime/pprof package or
748 // the testing package's -test.blockprofile flag instead
749 // of calling BlockProfile directly.
750 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
752 head := (*bucket)(bbuckets.Load())
753 for b := head; b != nil; b = b.allnext {
758 for b := head; b != nil; b = b.allnext {
761 r.Count = int64(bp.count)
762 // Prevent callers from having to worry about division by zero errors.
763 // See discussion on http://golang.org/cl/299991.
769 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
772 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
775 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
777 i := copy(r.Stack0[:], b.stk())
778 for ; i < len(r.Stack0); i++ {
784 unlock(&profBlockLock)
788 // MutexProfile returns n, the number of records in the current mutex profile.
789 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
790 // Otherwise, MutexProfile does not change p, and returns n, false.
792 // Most clients should use the runtime/pprof package
793 // instead of calling MutexProfile directly.
794 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
796 head := (*bucket)(xbuckets.Load())
797 for b := head; b != nil; b = b.allnext {
802 for b := head; b != nil; b = b.allnext {
805 r.Count = int64(bp.count)
807 i := copy(r.Stack0[:], b.stk())
808 for ; i < len(r.Stack0); i++ {
814 unlock(&profBlockLock)
818 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
819 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
820 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
822 // Most clients should use the runtime/pprof package instead
823 // of calling ThreadCreateProfile directly.
824 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
825 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
826 for mp := first; mp != nil; mp = mp.alllink {
832 for mp := first; mp != nil; mp = mp.alllink {
833 p[i].Stack0 = mp.createstack
840 //go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels
841 func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
842 return goroutineProfileWithLabels(p, labels)
845 // labels may be nil. If labels is non-nil, it must have the same length as p.
846 func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
847 if labels != nil && len(labels) != len(p) {
851 return goroutineProfileWithLabelsConcurrent(p, labels)
854 var goroutineProfile = struct {
858 records []StackRecord
859 labels []unsafe.Pointer
864 // goroutineProfileState indicates the status of a goroutine's stack for the
865 // current in-progress goroutine profile. Goroutines' stacks are initially
866 // "Absent" from the profile, and end up "Satisfied" by the time the profile is
867 // complete. While a goroutine's stack is being captured, its
868 // goroutineProfileState will be "InProgress" and it will not be able to run
869 // until the capture completes and the state moves to "Satisfied".
871 // Some goroutines (the finalizer goroutine, which at various times can be
872 // either a "system" or a "user" goroutine, and the goroutine that is
873 // coordinating the profile, any goroutines created during the profile) move
874 // directly to the "Satisfied" state.
875 type goroutineProfileState uint32
878 goroutineProfileAbsent goroutineProfileState = iota
879 goroutineProfileInProgress
880 goroutineProfileSatisfied
883 type goroutineProfileStateHolder atomic.Uint32
885 func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
886 return goroutineProfileState((*atomic.Uint32)(p).Load())
889 func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
890 (*atomic.Uint32)(p).Store(uint32(value))
893 func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
894 return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
897 func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
898 semacquire(&goroutineProfile.sema)
902 stopTheWorld(stwGoroutineProfile)
903 // Using gcount while the world is stopped should give us a consistent view
904 // of the number of live goroutines, minus the number of goroutines that are
905 // alive and permanently marked as "system". But to make this count agree
906 // with what we'd get from isSystemGoroutine, we need special handling for
907 // goroutines that can vary between user and system to ensure that the count
908 // doesn't change during the collection. So, check the finalizer goroutine
911 if fingStatus.Load()&fingRunningFinalizer != 0 {
916 // There's not enough space in p to store the whole profile, so (per the
917 // contract of runtime.GoroutineProfile) we're not allowed to write to p
918 // at all and must return n, false.
920 semrelease(&goroutineProfile.sema)
924 // Save current goroutine.
928 saveg(pc, sp, ourg, &p[0])
930 ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
931 goroutineProfile.offset.Store(1)
933 // Prepare for all other goroutines to enter the profile. Aside from ourg,
934 // every goroutine struct in the allgs list has its goroutineProfiled field
935 // cleared. Any goroutine created from this point on (while
936 // goroutineProfile.active is set) will start with its goroutineProfiled
937 // field set to goroutineProfileSatisfied.
938 goroutineProfile.active = true
939 goroutineProfile.records = p
940 goroutineProfile.labels = labels
941 // The finalizer goroutine needs special handling because it can vary over
942 // time between being a user goroutine (eligible for this profile) and a
943 // system goroutine (to be excluded). Pick one before restarting the world.
945 fing.goroutineProfiled.Store(goroutineProfileSatisfied)
946 if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
947 doRecordGoroutineProfile(fing)
952 // Visit each goroutine that existed as of the startTheWorld call above.
954 // New goroutines may not be in this list, but we didn't want to know about
955 // them anyway. If they do appear in this list (via reusing a dead goroutine
956 // struct, or racing to launch between the world restarting and us getting
957 // the list), they will already have their goroutineProfiled field set to
958 // goroutineProfileSatisfied before their state transitions out of _Gdead.
960 // Any goroutine that the scheduler tries to execute concurrently with this
961 // call will start by adding itself to the profile (before the act of
962 // executing can cause any changes in its stack).
963 forEachGRace(func(gp1 *g) {
964 tryRecordGoroutineProfile(gp1, Gosched)
967 stopTheWorld(stwGoroutineProfileCleanup)
968 endOffset := goroutineProfile.offset.Swap(0)
969 goroutineProfile.active = false
970 goroutineProfile.records = nil
971 goroutineProfile.labels = nil
974 // Restore the invariant that every goroutine struct in allgs has its
975 // goroutineProfiled field cleared.
976 forEachGRace(func(gp1 *g) {
977 gp1.goroutineProfiled.Store(goroutineProfileAbsent)
981 raceacquire(unsafe.Pointer(&labelSync))
984 if n != int(endOffset) {
985 // It's a big surprise that the number of goroutines changed while we
986 // were collecting the profile. But probably better to return a
987 // truncated profile than to crash the whole process.
989 // For instance, needm moves a goroutine out of the _Gdead state and so
990 // might be able to change the goroutine count without interacting with
991 // the scheduler. For code like that, the race windows are small and the
992 // combination of features is uncommon, so it's hard to be (and remain)
993 // sure we've caught them all.
996 semrelease(&goroutineProfile.sema)
1000 // tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
1001 // tryRecordGoroutineProfile.
1003 //go:yeswritebarrierrec
1004 func tryRecordGoroutineProfileWB(gp1 *g) {
1005 if getg().m.p.ptr() == nil {
1006 throw("no P available, write barriers are forbidden")
1008 tryRecordGoroutineProfile(gp1, osyield)
1011 // tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
1012 // in the current goroutine profile: either that it should not be profiled, or
1013 // that a snapshot of its call stack and labels are now in the profile.
1014 func tryRecordGoroutineProfile(gp1 *g, yield func()) {
1015 if readgstatus(gp1) == _Gdead {
1016 // Dead goroutines should not appear in the profile. Goroutines that
1017 // start while profile collection is active will get goroutineProfiled
1018 // set to goroutineProfileSatisfied before transitioning out of _Gdead,
1019 // so here we check _Gdead first.
1022 if isSystemGoroutine(gp1, true) {
1023 // System goroutines should not appear in the profile. (The finalizer
1024 // goroutine is marked as "already profiled".)
1029 prev := gp1.goroutineProfiled.Load()
1030 if prev == goroutineProfileSatisfied {
1031 // This goroutine is already in the profile (or is new since the
1032 // start of collection, so shouldn't appear in the profile).
1035 if prev == goroutineProfileInProgress {
1036 // Something else is adding gp1 to the goroutine profile right now.
1037 // Give that a moment to finish.
1042 // While we have gp1.goroutineProfiled set to
1043 // goroutineProfileInProgress, gp1 may appear _Grunnable but will not
1044 // actually be able to run. Disable preemption for ourselves, to make
1045 // sure we finish profiling gp1 right away instead of leaving it stuck
1048 if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
1049 doRecordGoroutineProfile(gp1)
1050 gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
1056 // doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
1057 // goroutine profile. Preemption is disabled.
1059 // This may be called via tryRecordGoroutineProfile in two ways: by the
1060 // goroutine that is coordinating the goroutine profile (running on its own
1061 // stack), or from the scheduler in preparation to execute gp1 (running on the
1063 func doRecordGoroutineProfile(gp1 *g) {
1064 if readgstatus(gp1) == _Grunning {
1065 print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
1066 throw("cannot read stack of running goroutine")
1069 offset := int(goroutineProfile.offset.Add(1)) - 1
1071 if offset >= len(goroutineProfile.records) {
1072 // Should be impossible, but better to return a truncated profile than
1073 // to crash the entire process at this point. Instead, deal with it in
1074 // goroutineProfileWithLabelsConcurrent where we have more context.
1078 // saveg calls gentraceback, which may call cgo traceback functions. When
1079 // called from the scheduler, this is on the system stack already so
1080 // traceback.go:cgoContextPCs will avoid calling back into the scheduler.
1082 // When called from the goroutine coordinating the profile, we still have
1083 // set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
1084 // preventing it from being truly _Grunnable. So we'll use the system stack
1085 // to avoid schedule delays.
1086 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
1088 if goroutineProfile.labels != nil {
1089 goroutineProfile.labels[offset] = gp1.labels
1093 func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1096 isOK := func(gp1 *g) bool {
1097 // Checking isSystemGoroutine here makes GoroutineProfile
1098 // consistent with both NumGoroutine and Stack.
1099 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
1102 stopTheWorld(stwGoroutineProfile)
1104 // World is stopped, no locking required.
1106 forEachGRace(func(gp1 *g) {
1116 // Save current goroutine.
1119 systemstack(func() {
1120 saveg(pc, sp, gp, &r[0])
1124 // If we have a place to put our goroutine labelmap, insert it there.
1130 // Save other goroutines.
1131 forEachGRace(func(gp1 *g) {
1137 // Should be impossible, but better to return a
1138 // truncated profile than to crash the entire process.
1141 // saveg calls gentraceback, which may call cgo traceback functions.
1142 // The world is stopped, so it cannot use cgocall (which will be
1143 // blocked at exitsyscall). Do it on the system stack so it won't
1144 // call into the schedular (see traceback.go:cgoContextPCs).
1145 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
1155 raceacquire(unsafe.Pointer(&labelSync))
1162 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
1163 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
1164 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
1166 // Most clients should use the runtime/pprof package instead
1167 // of calling GoroutineProfile directly.
1168 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
1170 return goroutineProfileWithLabels(p, nil)
1173 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
1175 u.initAt(pc, sp, 0, gp, unwindSilentErrors)
1176 n := tracebackPCs(&u, 0, r.Stack0[:])
1177 if n < len(r.Stack0) {
1182 // Stack formats a stack trace of the calling goroutine into buf
1183 // and returns the number of bytes written to buf.
1184 // If all is true, Stack formats stack traces of all other goroutines
1185 // into buf after the trace for the current goroutine.
1186 func Stack(buf []byte, all bool) int {
1188 stopTheWorld(stwAllGoroutinesStack)
1196 systemstack(func() {
1198 // Force traceback=1 to override GOTRACEBACK setting,
1199 // so that Stack's results are consistent.
1200 // GOTRACEBACK is only about crash dumps.
1202 g0.writebuf = buf[0:0:len(buf)]
1204 traceback(pc, sp, 0, gp)
1209 n = len(g0.writebuf)
1220 // Tracing of alloc/free/gc.
1224 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
1229 print("tracealloc(", p, ", ", hex(size), ")\n")
1231 print("tracealloc(", p, ", ", hex(size), ", ", toRType(typ).string(), ")\n")
1233 if gp.m.curg == nil || gp == gp.m.curg {
1237 systemstack(func() {
1238 traceback(pc, sp, 0, gp)
1241 goroutineheader(gp.m.curg)
1242 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
1249 func tracefree(p unsafe.Pointer, size uintptr) {
1253 print("tracefree(", p, ", ", hex(size), ")\n")
1257 systemstack(func() {
1258 traceback(pc, sp, 0, gp)
1269 print("tracegc()\n")
1270 // running on m->g0 stack; show all non-g0 goroutines
1272 print("end tracegc\n")