1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Patterned after tcmalloc's algorithms; shorter code.
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
17 // NOTE(rsc): Everything here could use cas if contention became an issue.
19 // profInsertLock protects changes to the start of all *bucket linked lists
21 // profBlockLock protects the contents of every blockRecord struct
23 // profMemActiveLock protects the active field of every memRecord struct
24 profMemActiveLock mutex
25 // profMemFutureLock is a set of locks that protect the respective elements
26 // of the future array of every memRecord struct
27 profMemFutureLock [len(memRecord{}.future)]mutex
30 // All memory allocations are local and do not escape outside of the profiler.
31 // The profiler is forbidden from referring to garbage-collected memory.
35 memProfile bucketType = 1 + iota
39 // size of bucket hash table
42 // maxStack is the max depth of stack to record in bucket.
43 // Note that it's only used internally as a guard against
44 // wildly out-of-bounds slicing of the PCs that come after
45 // a bucket struct, and it could increase in the future.
51 // A bucket holds per-call-stack profiling information.
52 // The representation is a bit sleazy, inherited from C.
53 // This struct defines the bucket header. It is followed in
54 // memory by the stack words and then the actual record
55 // data, either a memRecord or a blockRecord.
57 // Per-call-stack profiling information.
58 // Lookup by hashing call stack into a linked-list hash table.
60 // None of the fields in this bucket header are modified after
61 // creation, including its next and allnext links.
68 typ bucketType // memBucket or blockBucket (includes mutexProfile)
74 // A memRecord is the bucket data for a bucket of type memProfile,
75 // part of the memory profile.
76 type memRecord struct {
77 // The following complex 3-stage scheme of stats accumulation
78 // is required to obtain a consistent picture of mallocs and frees
79 // for some point in time.
80 // The problem is that mallocs come in real time, while frees
81 // come only after a GC during concurrent sweeping. So if we would
82 // naively count them, we would get a skew toward mallocs.
84 // Hence, we delay information to get consistent snapshots as
85 // of mark termination. Allocations count toward the next mark
86 // termination's snapshot, while sweep frees count toward the
87 // previous mark termination's snapshot:
91 // .·˙ | .·˙ | .·˙ | .·˙ |
92 // .·˙ | .·˙ | .·˙ | .·˙ |
93 // .·˙ |.·˙ |.·˙ |.·˙ |
103 // Since we can't publish a consistent snapshot until all of
104 // the sweep frees are accounted for, we wait until the next
105 // mark termination ("MT" above) to publish the previous mark
106 // termination's snapshot ("P" above). To do this, allocation
107 // and free events are accounted to *future* heap profile
108 // cycles ("C+n" above) and we only publish a cycle once all
109 // of the events from that cycle must be done. Specifically:
111 // Mallocs are accounted to cycle C+2.
112 // Explicit frees are accounted to cycle C+2.
113 // GC frees (done during sweeping) are accounted to cycle C+1.
115 // After mark termination, we increment the global heap
116 // profile cycle counter and accumulate the stats from cycle C
117 // into the active profile.
119 // active is the currently published profile. A profiling
120 // cycle can be accumulated into active once its complete.
121 active memRecordCycle
123 // future records the profile events we're counting for cycles
124 // that have not yet been published. This is ring buffer
125 // indexed by the global heap profile cycle C and stores
126 // cycles C, C+1, and C+2. Unlike active, these counts are
127 // only for a single cycle; they are not cumulative across
130 // We store cycle C here because there's a window between when
131 // C becomes the active cycle and when we've flushed it to
133 future [3]memRecordCycle
137 type memRecordCycle struct {
138 allocs, frees uintptr
139 alloc_bytes, free_bytes uintptr
142 // add accumulates b into a. It does not zero b.
143 func (a *memRecordCycle) add(b *memRecordCycle) {
146 a.alloc_bytes += b.alloc_bytes
147 a.free_bytes += b.free_bytes
150 // A blockRecord is the bucket data for a bucket of type blockProfile,
151 // which is used in blocking and mutex profiles.
152 type blockRecord struct {
158 mbuckets atomic.UnsafePointer // *bucket, memory profile buckets
159 bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets
160 xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets
161 buckhash atomic.UnsafePointer // *buckhashArray
163 mProfCycle mProfCycleHolder
166 type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket
168 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
170 // mProfCycleHolder holds the global heap profile cycle number (wrapped at
171 // mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to
172 // indicate whether future[cycle] in all buckets has been queued to flush into
173 // the active profile.
174 type mProfCycleHolder struct {
178 // read returns the current cycle count.
179 func (c *mProfCycleHolder) read() (cycle uint32) {
185 // setFlushed sets the flushed flag. It returns the current cycle count and the
186 // previous value of the flushed flag.
187 func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
189 prev := c.value.Load()
191 alreadyFlushed = (prev & 0x1) != 0
193 if c.value.CompareAndSwap(prev, next) {
194 return cycle, alreadyFlushed
199 // increment increases the cycle count by one, wrapping the value at
200 // mProfCycleWrap. It clears the flushed flag.
201 func (c *mProfCycleHolder) increment() {
202 // We explicitly wrap mProfCycle rather than depending on
203 // uint wraparound because the memRecord.future ring does not
204 // itself wrap at a power of two.
206 prev := c.value.Load()
208 cycle = (cycle + 1) % mProfCycleWrap
210 if c.value.CompareAndSwap(prev, next) {
216 // newBucket allocates a bucket with the given type and number of stack entries.
217 func newBucket(typ bucketType, nstk int) *bucket {
218 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
221 throw("invalid profile bucket type")
223 size += unsafe.Sizeof(memRecord{})
224 case blockProfile, mutexProfile:
225 size += unsafe.Sizeof(blockRecord{})
228 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
230 b.nstk = uintptr(nstk)
234 // stk returns the slice in b holding the stack.
235 func (b *bucket) stk() []uintptr {
236 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
237 return stk[:b.nstk:b.nstk]
240 // mp returns the memRecord associated with the memProfile bucket b.
241 func (b *bucket) mp() *memRecord {
242 if b.typ != memProfile {
243 throw("bad use of bucket.mp")
245 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
246 return (*memRecord)(data)
249 // bp returns the blockRecord associated with the blockProfile bucket b.
250 func (b *bucket) bp() *blockRecord {
251 if b.typ != blockProfile && b.typ != mutexProfile {
252 throw("bad use of bucket.bp")
254 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
255 return (*blockRecord)(data)
258 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
259 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
260 bh := (*buckhashArray)(buckhash.Load())
262 lock(&profInsertLock)
263 // check again under the lock
264 bh = (*buckhashArray)(buckhash.Load())
266 bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
268 throw("runtime: cannot allocate memory")
270 buckhash.StoreNoWB(unsafe.Pointer(bh))
272 unlock(&profInsertLock)
277 for _, pc := range stk {
290 i := int(h % buckHashSize)
291 // first check optimistically, without the lock
292 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
293 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
302 lock(&profInsertLock)
303 // check again under the insertion lock
304 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
305 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
306 unlock(&profInsertLock)
311 // Create new bucket.
312 b := newBucket(typ, len(stk))
317 var allnext *atomic.UnsafePointer
318 if typ == memProfile {
320 } else if typ == mutexProfile {
326 b.next = (*bucket)(bh[i].Load())
327 b.allnext = (*bucket)(allnext.Load())
329 bh[i].StoreNoWB(unsafe.Pointer(b))
330 allnext.StoreNoWB(unsafe.Pointer(b))
332 unlock(&profInsertLock)
336 func eqslice(x, y []uintptr) bool {
337 if len(x) != len(y) {
340 for i, xi := range x {
348 // mProf_NextCycle publishes the next heap profile cycle and creates a
349 // fresh heap profile cycle. This operation is fast and can be done
350 // during STW. The caller must call mProf_Flush before calling
351 // mProf_NextCycle again.
353 // This is called by mark termination during STW so allocations and
354 // frees after the world is started again count towards a new heap
356 func mProf_NextCycle() {
357 mProfCycle.increment()
360 // mProf_Flush flushes the events from the current heap profiling
361 // cycle into the active profile. After this it is safe to start a new
362 // heap profiling cycle with mProf_NextCycle.
364 // This is called by GC after mark termination starts the world. In
365 // contrast with mProf_NextCycle, this is somewhat expensive, but safe
366 // to do concurrently.
368 cycle, alreadyFlushed := mProfCycle.setFlushed()
373 index := cycle % uint32(len(memRecord{}.future))
374 lock(&profMemActiveLock)
375 lock(&profMemFutureLock[index])
376 mProf_FlushLocked(index)
377 unlock(&profMemFutureLock[index])
378 unlock(&profMemActiveLock)
381 // mProf_FlushLocked flushes the events from the heap profiling cycle at index
382 // into the active profile. The caller must hold the lock for the active profile
383 // (profMemActiveLock) and for the profiling cycle at index
384 // (profMemFutureLock[index]).
385 func mProf_FlushLocked(index uint32) {
386 assertLockHeld(&profMemActiveLock)
387 assertLockHeld(&profMemFutureLock[index])
388 head := (*bucket)(mbuckets.Load())
389 for b := head; b != nil; b = b.allnext {
392 // Flush cycle C into the published profile and clear
394 mpc := &mp.future[index]
396 *mpc = memRecordCycle{}
400 // mProf_PostSweep records that all sweep frees for this GC cycle have
401 // completed. This has the effect of publishing the heap profile
402 // snapshot as of the last mark termination without advancing the heap
404 func mProf_PostSweep() {
405 // Flush cycle C+1 to the active profile so everything as of
406 // the last mark termination becomes visible. *Don't* advance
407 // the cycle, since we're still accumulating allocs in cycle
408 // C+2, which have to become C+1 in the next mark termination
410 cycle := mProfCycle.read() + 1
412 index := cycle % uint32(len(memRecord{}.future))
413 lock(&profMemActiveLock)
414 lock(&profMemFutureLock[index])
415 mProf_FlushLocked(index)
416 unlock(&profMemFutureLock[index])
417 unlock(&profMemActiveLock)
420 // Called by malloc to record a profiled block.
421 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
422 var stk [maxStack]uintptr
423 nstk := callers(4, stk[:])
425 index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
427 b := stkbucket(memProfile, size, stk[:nstk], true)
429 mpc := &mp.future[index]
431 lock(&profMemFutureLock[index])
433 mpc.alloc_bytes += size
434 unlock(&profMemFutureLock[index])
436 // Setprofilebucket locks a bunch of other mutexes, so we call it outside of
437 // the profiler locks. This reduces potential contention and chances of
438 // deadlocks. Since the object must be alive during the call to
439 // mProf_Malloc, it's fine to do this non-atomically.
441 setprofilebucket(p, b)
445 // Called when freeing a profiled block.
446 func mProf_Free(b *bucket, size uintptr) {
447 index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
450 mpc := &mp.future[index]
452 lock(&profMemFutureLock[index])
454 mpc.free_bytes += size
455 unlock(&profMemFutureLock[index])
458 var blockprofilerate uint64 // in CPU ticks
460 // SetBlockProfileRate controls the fraction of goroutine blocking events
461 // that are reported in the blocking profile. The profiler aims to sample
462 // an average of one blocking event per rate nanoseconds spent blocked.
464 // To include every blocking event in the profile, pass rate = 1.
465 // To turn off profiling entirely, pass rate <= 0.
466 func SetBlockProfileRate(rate int) {
469 r = 0 // disable profiling
470 } else if rate == 1 {
471 r = 1 // profile everything
473 // convert ns to cycles, use float64 to prevent overflow during multiplication
474 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
480 atomic.Store64(&blockprofilerate, uint64(r))
483 func blockevent(cycles int64, skip int) {
488 rate := int64(atomic.Load64(&blockprofilerate))
489 if blocksampled(cycles, rate) {
490 saveblockevent(cycles, rate, skip+1, blockProfile)
494 // blocksampled returns true for all events where cycles >= rate. Shorter
495 // events have a cycles/rate random chance of returning true.
496 func blocksampled(cycles, rate int64) bool {
497 if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
503 func saveblockevent(cycles, rate int64, skip int, which bucketType) {
506 var stk [maxStack]uintptr
507 if gp.m.curg == nil || gp.m.curg == gp {
508 nstk = callers(skip, stk[:])
510 nstk = gcallers(gp.m.curg, skip, stk[:])
512 b := stkbucket(which, 0, stk[:nstk], true)
516 // We want to up-scale the count and cycles according to the
517 // probability that the event was sampled. For block profile events,
518 // the sample probability is 1 if cycles >= rate, and cycles / rate
519 // otherwise. For mutex profile events, the sample probability is 1 / rate.
520 // We scale the events by 1 / (probability the event was sampled).
521 if which == blockProfile && cycles < rate {
522 // Remove sampling bias, see discussion on http://golang.org/cl/299991.
523 bp.count += float64(rate) / float64(cycles)
525 } else if which == mutexProfile {
526 bp.count += float64(rate)
527 bp.cycles += rate * cycles
532 unlock(&profBlockLock)
535 var mutexprofilerate uint64 // fraction sampled
537 // SetMutexProfileFraction controls the fraction of mutex contention events
538 // that are reported in the mutex profile. On average 1/rate events are
539 // reported. The previous rate is returned.
541 // To turn off profiling entirely, pass rate 0.
542 // To just read the current rate, pass rate < 0.
543 // (For n>1 the details of sampling may change.)
544 func SetMutexProfileFraction(rate int) int {
546 return int(mutexprofilerate)
548 old := mutexprofilerate
549 atomic.Store64(&mutexprofilerate, uint64(rate))
553 //go:linkname mutexevent sync.event
554 func mutexevent(cycles int64, skip int) {
558 rate := int64(atomic.Load64(&mutexprofilerate))
559 if rate > 0 && int64(fastrand())%rate == 0 {
560 saveblockevent(cycles, rate, skip+1, mutexProfile)
564 // Go interface to profile data.
566 // A StackRecord describes a single execution stack.
567 type StackRecord struct {
568 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
571 // Stack returns the stack trace associated with the record,
572 // a prefix of r.Stack0.
573 func (r *StackRecord) Stack() []uintptr {
574 for i, v := range r.Stack0 {
582 // MemProfileRate controls the fraction of memory allocations
583 // that are recorded and reported in the memory profile.
584 // The profiler aims to sample an average of
585 // one allocation per MemProfileRate bytes allocated.
587 // To include every allocated block in the profile, set MemProfileRate to 1.
588 // To turn off profiling entirely, set MemProfileRate to 0.
590 // The tools that process the memory profiles assume that the
591 // profile rate is constant across the lifetime of the program
592 // and equal to the current value. Programs that change the
593 // memory profiling rate should do so just once, as early as
594 // possible in the execution of the program (for example,
595 // at the beginning of main).
596 var MemProfileRate int = 512 * 1024
598 // disableMemoryProfiling is set by the linker if runtime.MemProfile
599 // is not used and the link type guarantees nobody else could use it
601 var disableMemoryProfiling bool
603 // A MemProfileRecord describes the live objects allocated
604 // by a particular call sequence (stack trace).
605 type MemProfileRecord struct {
606 AllocBytes, FreeBytes int64 // number of bytes allocated, freed
607 AllocObjects, FreeObjects int64 // number of objects allocated, freed
608 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
611 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
612 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
614 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
615 func (r *MemProfileRecord) InUseObjects() int64 {
616 return r.AllocObjects - r.FreeObjects
619 // Stack returns the stack trace associated with the record,
620 // a prefix of r.Stack0.
621 func (r *MemProfileRecord) Stack() []uintptr {
622 for i, v := range r.Stack0 {
630 // MemProfile returns a profile of memory allocated and freed per allocation
633 // MemProfile returns n, the number of records in the current memory profile.
634 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
635 // If len(p) < n, MemProfile does not change p and returns n, false.
637 // If inuseZero is true, the profile includes allocation records
638 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
639 // These are sites where memory was allocated, but it has all
640 // been released back to the runtime.
642 // The returned profile may be up to two garbage collection cycles old.
643 // This is to avoid skewing the profile toward allocations; because
644 // allocations happen in real time but frees are delayed until the garbage
645 // collector performs sweeping, the profile only accounts for allocations
646 // that have had a chance to be freed by the garbage collector.
648 // Most clients should use the runtime/pprof package or
649 // the testing package's -test.memprofile flag instead
650 // of calling MemProfile directly.
651 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
652 cycle := mProfCycle.read()
653 // If we're between mProf_NextCycle and mProf_Flush, take care
654 // of flushing to the active profile so we only have to look
655 // at the active profile below.
656 index := cycle % uint32(len(memRecord{}.future))
657 lock(&profMemActiveLock)
658 lock(&profMemFutureLock[index])
659 mProf_FlushLocked(index)
660 unlock(&profMemFutureLock[index])
662 head := (*bucket)(mbuckets.Load())
663 for b := head; b != nil; b = b.allnext {
665 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
668 if mp.active.allocs != 0 || mp.active.frees != 0 {
673 // Absolutely no data, suggesting that a garbage collection
674 // has not yet happened. In order to allow profiling when
675 // garbage collection is disabled from the beginning of execution,
676 // accumulate all of the cycles, and recount buckets.
678 for b := head; b != nil; b = b.allnext {
680 for c := range mp.future {
681 lock(&profMemFutureLock[c])
682 mp.active.add(&mp.future[c])
683 mp.future[c] = memRecordCycle{}
684 unlock(&profMemFutureLock[c])
686 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
694 for b := head; b != nil; b = b.allnext {
696 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
702 unlock(&profMemActiveLock)
706 // Write b's data to r.
707 func record(r *MemProfileRecord, b *bucket) {
709 r.AllocBytes = int64(mp.active.alloc_bytes)
710 r.FreeBytes = int64(mp.active.free_bytes)
711 r.AllocObjects = int64(mp.active.allocs)
712 r.FreeObjects = int64(mp.active.frees)
714 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
717 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
720 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
722 copy(r.Stack0[:], b.stk())
723 for i := int(b.nstk); i < len(r.Stack0); i++ {
728 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
729 lock(&profMemActiveLock)
730 head := (*bucket)(mbuckets.Load())
731 for b := head; b != nil; b = b.allnext {
733 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
735 unlock(&profMemActiveLock)
738 // BlockProfileRecord describes blocking events originated
739 // at a particular call sequence (stack trace).
740 type BlockProfileRecord struct {
746 // BlockProfile returns n, the number of records in the current blocking profile.
747 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
748 // If len(p) < n, BlockProfile does not change p and returns n, false.
750 // Most clients should use the runtime/pprof package or
751 // the testing package's -test.blockprofile flag instead
752 // of calling BlockProfile directly.
753 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
755 head := (*bucket)(bbuckets.Load())
756 for b := head; b != nil; b = b.allnext {
761 for b := head; b != nil; b = b.allnext {
764 r.Count = int64(bp.count)
765 // Prevent callers from having to worry about division by zero errors.
766 // See discussion on http://golang.org/cl/299991.
772 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
775 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
778 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
780 i := copy(r.Stack0[:], b.stk())
781 for ; i < len(r.Stack0); i++ {
787 unlock(&profBlockLock)
791 // MutexProfile returns n, the number of records in the current mutex profile.
792 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
793 // Otherwise, MutexProfile does not change p, and returns n, false.
795 // Most clients should use the runtime/pprof package
796 // instead of calling MutexProfile directly.
797 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
799 head := (*bucket)(xbuckets.Load())
800 for b := head; b != nil; b = b.allnext {
805 for b := head; b != nil; b = b.allnext {
808 r.Count = int64(bp.count)
810 i := copy(r.Stack0[:], b.stk())
811 for ; i < len(r.Stack0); i++ {
817 unlock(&profBlockLock)
821 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
822 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
823 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
825 // Most clients should use the runtime/pprof package instead
826 // of calling ThreadCreateProfile directly.
827 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
828 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
829 for mp := first; mp != nil; mp = mp.alllink {
835 for mp := first; mp != nil; mp = mp.alllink {
836 p[i].Stack0 = mp.createstack
843 //go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels
844 func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
845 return goroutineProfileWithLabels(p, labels)
848 // labels may be nil. If labels is non-nil, it must have the same length as p.
849 func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
850 if labels != nil && len(labels) != len(p) {
854 return goroutineProfileWithLabelsConcurrent(p, labels)
857 var goroutineProfile = struct {
861 records []StackRecord
862 labels []unsafe.Pointer
867 // goroutineProfileState indicates the status of a goroutine's stack for the
868 // current in-progress goroutine profile. Goroutines' stacks are initially
869 // "Absent" from the profile, and end up "Satisfied" by the time the profile is
870 // complete. While a goroutine's stack is being captured, its
871 // goroutineProfileState will be "InProgress" and it will not be able to run
872 // until the capture completes and the state moves to "Satisfied".
874 // Some goroutines (the finalizer goroutine, which at various times can be
875 // either a "system" or a "user" goroutine, and the goroutine that is
876 // coordinating the profile, any goroutines created during the profile) move
877 // directly to the "Satisfied" state.
878 type goroutineProfileState uint32
881 goroutineProfileAbsent goroutineProfileState = iota
882 goroutineProfileInProgress
883 goroutineProfileSatisfied
886 type goroutineProfileStateHolder atomic.Uint32
888 func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
889 return goroutineProfileState((*atomic.Uint32)(p).Load())
892 func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
893 (*atomic.Uint32)(p).Store(uint32(value))
896 func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
897 return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
900 func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
901 semacquire(&goroutineProfile.sema)
905 stopTheWorld(stwGoroutineProfile)
906 // Using gcount while the world is stopped should give us a consistent view
907 // of the number of live goroutines, minus the number of goroutines that are
908 // alive and permanently marked as "system". But to make this count agree
909 // with what we'd get from isSystemGoroutine, we need special handling for
910 // goroutines that can vary between user and system to ensure that the count
911 // doesn't change during the collection. So, check the finalizer goroutine
914 if fingStatus.Load()&fingRunningFinalizer != 0 {
919 // There's not enough space in p to store the whole profile, so (per the
920 // contract of runtime.GoroutineProfile) we're not allowed to write to p
921 // at all and must return n, false.
923 semrelease(&goroutineProfile.sema)
927 // Save current goroutine.
931 saveg(pc, sp, ourg, &p[0])
934 labels[0] = ourg.labels
936 ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
937 goroutineProfile.offset.Store(1)
939 // Prepare for all other goroutines to enter the profile. Aside from ourg,
940 // every goroutine struct in the allgs list has its goroutineProfiled field
941 // cleared. Any goroutine created from this point on (while
942 // goroutineProfile.active is set) will start with its goroutineProfiled
943 // field set to goroutineProfileSatisfied.
944 goroutineProfile.active = true
945 goroutineProfile.records = p
946 goroutineProfile.labels = labels
947 // The finalizer goroutine needs special handling because it can vary over
948 // time between being a user goroutine (eligible for this profile) and a
949 // system goroutine (to be excluded). Pick one before restarting the world.
951 fing.goroutineProfiled.Store(goroutineProfileSatisfied)
952 if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
953 doRecordGoroutineProfile(fing)
958 // Visit each goroutine that existed as of the startTheWorld call above.
960 // New goroutines may not be in this list, but we didn't want to know about
961 // them anyway. If they do appear in this list (via reusing a dead goroutine
962 // struct, or racing to launch between the world restarting and us getting
963 // the list), they will already have their goroutineProfiled field set to
964 // goroutineProfileSatisfied before their state transitions out of _Gdead.
966 // Any goroutine that the scheduler tries to execute concurrently with this
967 // call will start by adding itself to the profile (before the act of
968 // executing can cause any changes in its stack).
969 forEachGRace(func(gp1 *g) {
970 tryRecordGoroutineProfile(gp1, Gosched)
973 stopTheWorld(stwGoroutineProfileCleanup)
974 endOffset := goroutineProfile.offset.Swap(0)
975 goroutineProfile.active = false
976 goroutineProfile.records = nil
977 goroutineProfile.labels = nil
980 // Restore the invariant that every goroutine struct in allgs has its
981 // goroutineProfiled field cleared.
982 forEachGRace(func(gp1 *g) {
983 gp1.goroutineProfiled.Store(goroutineProfileAbsent)
987 raceacquire(unsafe.Pointer(&labelSync))
990 if n != int(endOffset) {
991 // It's a big surprise that the number of goroutines changed while we
992 // were collecting the profile. But probably better to return a
993 // truncated profile than to crash the whole process.
995 // For instance, needm moves a goroutine out of the _Gdead state and so
996 // might be able to change the goroutine count without interacting with
997 // the scheduler. For code like that, the race windows are small and the
998 // combination of features is uncommon, so it's hard to be (and remain)
999 // sure we've caught them all.
1002 semrelease(&goroutineProfile.sema)
1006 // tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
1007 // tryRecordGoroutineProfile.
1009 //go:yeswritebarrierrec
1010 func tryRecordGoroutineProfileWB(gp1 *g) {
1011 if getg().m.p.ptr() == nil {
1012 throw("no P available, write barriers are forbidden")
1014 tryRecordGoroutineProfile(gp1, osyield)
1017 // tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
1018 // in the current goroutine profile: either that it should not be profiled, or
1019 // that a snapshot of its call stack and labels are now in the profile.
1020 func tryRecordGoroutineProfile(gp1 *g, yield func()) {
1021 if readgstatus(gp1) == _Gdead {
1022 // Dead goroutines should not appear in the profile. Goroutines that
1023 // start while profile collection is active will get goroutineProfiled
1024 // set to goroutineProfileSatisfied before transitioning out of _Gdead,
1025 // so here we check _Gdead first.
1028 if isSystemGoroutine(gp1, true) {
1029 // System goroutines should not appear in the profile. (The finalizer
1030 // goroutine is marked as "already profiled".)
1035 prev := gp1.goroutineProfiled.Load()
1036 if prev == goroutineProfileSatisfied {
1037 // This goroutine is already in the profile (or is new since the
1038 // start of collection, so shouldn't appear in the profile).
1041 if prev == goroutineProfileInProgress {
1042 // Something else is adding gp1 to the goroutine profile right now.
1043 // Give that a moment to finish.
1048 // While we have gp1.goroutineProfiled set to
1049 // goroutineProfileInProgress, gp1 may appear _Grunnable but will not
1050 // actually be able to run. Disable preemption for ourselves, to make
1051 // sure we finish profiling gp1 right away instead of leaving it stuck
1054 if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
1055 doRecordGoroutineProfile(gp1)
1056 gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
1062 // doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
1063 // goroutine profile. Preemption is disabled.
1065 // This may be called via tryRecordGoroutineProfile in two ways: by the
1066 // goroutine that is coordinating the goroutine profile (running on its own
1067 // stack), or from the scheduler in preparation to execute gp1 (running on the
1069 func doRecordGoroutineProfile(gp1 *g) {
1070 if readgstatus(gp1) == _Grunning {
1071 print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
1072 throw("cannot read stack of running goroutine")
1075 offset := int(goroutineProfile.offset.Add(1)) - 1
1077 if offset >= len(goroutineProfile.records) {
1078 // Should be impossible, but better to return a truncated profile than
1079 // to crash the entire process at this point. Instead, deal with it in
1080 // goroutineProfileWithLabelsConcurrent where we have more context.
1084 // saveg calls gentraceback, which may call cgo traceback functions. When
1085 // called from the scheduler, this is on the system stack already so
1086 // traceback.go:cgoContextPCs will avoid calling back into the scheduler.
1088 // When called from the goroutine coordinating the profile, we still have
1089 // set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
1090 // preventing it from being truly _Grunnable. So we'll use the system stack
1091 // to avoid schedule delays.
1092 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
1094 if goroutineProfile.labels != nil {
1095 goroutineProfile.labels[offset] = gp1.labels
1099 func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1102 isOK := func(gp1 *g) bool {
1103 // Checking isSystemGoroutine here makes GoroutineProfile
1104 // consistent with both NumGoroutine and Stack.
1105 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
1108 stopTheWorld(stwGoroutineProfile)
1110 // World is stopped, no locking required.
1112 forEachGRace(func(gp1 *g) {
1122 // Save current goroutine.
1125 systemstack(func() {
1126 saveg(pc, sp, gp, &r[0])
1130 // If we have a place to put our goroutine labelmap, insert it there.
1136 // Save other goroutines.
1137 forEachGRace(func(gp1 *g) {
1143 // Should be impossible, but better to return a
1144 // truncated profile than to crash the entire process.
1147 // saveg calls gentraceback, which may call cgo traceback functions.
1148 // The world is stopped, so it cannot use cgocall (which will be
1149 // blocked at exitsyscall). Do it on the system stack so it won't
1150 // call into the schedular (see traceback.go:cgoContextPCs).
1151 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
1161 raceacquire(unsafe.Pointer(&labelSync))
1168 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
1169 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
1170 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
1172 // Most clients should use the runtime/pprof package instead
1173 // of calling GoroutineProfile directly.
1174 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
1176 return goroutineProfileWithLabels(p, nil)
1179 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
1181 u.initAt(pc, sp, 0, gp, unwindSilentErrors)
1182 n := tracebackPCs(&u, 0, r.Stack0[:])
1183 if n < len(r.Stack0) {
1188 // Stack formats a stack trace of the calling goroutine into buf
1189 // and returns the number of bytes written to buf.
1190 // If all is true, Stack formats stack traces of all other goroutines
1191 // into buf after the trace for the current goroutine.
1192 func Stack(buf []byte, all bool) int {
1194 stopTheWorld(stwAllGoroutinesStack)
1202 systemstack(func() {
1204 // Force traceback=1 to override GOTRACEBACK setting,
1205 // so that Stack's results are consistent.
1206 // GOTRACEBACK is only about crash dumps.
1208 g0.writebuf = buf[0:0:len(buf)]
1210 traceback(pc, sp, 0, gp)
1215 n = len(g0.writebuf)
1226 // Tracing of alloc/free/gc.
1230 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
1235 print("tracealloc(", p, ", ", hex(size), ")\n")
1237 print("tracealloc(", p, ", ", hex(size), ", ", toRType(typ).string(), ")\n")
1239 if gp.m.curg == nil || gp == gp.m.curg {
1243 systemstack(func() {
1244 traceback(pc, sp, 0, gp)
1247 goroutineheader(gp.m.curg)
1248 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
1255 func tracefree(p unsafe.Pointer, size uintptr) {
1259 print("tracefree(", p, ", ", hex(size), ")\n")
1263 systemstack(func() {
1264 traceback(pc, sp, 0, gp)
1275 print("tracegc()\n")
1276 // running on m->g0 stack; show all non-g0 goroutines
1278 print("end tracegc\n")