1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Patterned after tcmalloc's algorithms; shorter code.
12 "runtime/internal/atomic"
16 // NOTE(rsc): Everything here could use cas if contention became an issue.
19 // All memory allocations are local and do not escape outside of the profiler.
20 // The profiler is forbidden from referring to garbage-collected memory.
24 memProfile bucketType = 1 + iota
28 // size of bucket hash table
31 // max depth of stack to record in bucket
37 // A bucket holds per-call-stack profiling information.
38 // The representation is a bit sleazy, inherited from C.
39 // This struct defines the bucket header. It is followed in
40 // memory by the stack words and then the actual record
41 // data, either a memRecord or a blockRecord.
43 // Per-call-stack profiling information.
44 // Lookup by hashing call stack into a linked-list hash table.
52 typ bucketType // memBucket or blockBucket (includes mutexProfile)
58 // A memRecord is the bucket data for a bucket of type memProfile,
59 // part of the memory profile.
60 type memRecord struct {
61 // The following complex 3-stage scheme of stats accumulation
62 // is required to obtain a consistent picture of mallocs and frees
63 // for some point in time.
64 // The problem is that mallocs come in real time, while frees
65 // come only after a GC during concurrent sweeping. So if we would
66 // naively count them, we would get a skew toward mallocs.
68 // Hence, we delay information to get consistent snapshots as
69 // of mark termination. Allocations count toward the next mark
70 // termination's snapshot, while sweep frees count toward the
71 // previous mark termination's snapshot:
75 // .·˙ | .·˙ | .·˙ | .·˙ |
76 // .·˙ | .·˙ | .·˙ | .·˙ |
77 // .·˙ |.·˙ |.·˙ |.·˙ |
87 // Since we can't publish a consistent snapshot until all of
88 // the sweep frees are accounted for, we wait until the next
89 // mark termination ("MT" above) to publish the previous mark
90 // termination's snapshot ("P" above). To do this, allocation
91 // and free events are accounted to *future* heap profile
92 // cycles ("C+n" above) and we only publish a cycle once all
93 // of the events from that cycle must be done. Specifically:
95 // Mallocs are accounted to cycle C+2.
96 // Explicit frees are accounted to cycle C+2.
97 // GC frees (done during sweeping) are accounted to cycle C+1.
99 // After mark termination, we increment the global heap
100 // profile cycle counter and accumulate the stats from cycle C
101 // into the active profile.
103 // active is the currently published profile. A profiling
104 // cycle can be accumulated into active once its complete.
105 active memRecordCycle
107 // future records the profile events we're counting for cycles
108 // that have not yet been published. This is ring buffer
109 // indexed by the global heap profile cycle C and stores
110 // cycles C, C+1, and C+2. Unlike active, these counts are
111 // only for a single cycle; they are not cumulative across
114 // We store cycle C here because there's a window between when
115 // C becomes the active cycle and when we've flushed it to
117 future [3]memRecordCycle
121 type memRecordCycle struct {
122 allocs, frees uintptr
123 alloc_bytes, free_bytes uintptr
126 // add accumulates b into a. It does not zero b.
127 func (a *memRecordCycle) add(b *memRecordCycle) {
130 a.alloc_bytes += b.alloc_bytes
131 a.free_bytes += b.free_bytes
134 // A blockRecord is the bucket data for a bucket of type blockProfile,
135 // which is used in blocking and mutex profiles.
136 type blockRecord struct {
142 mbuckets *bucket // memory profile buckets
143 bbuckets *bucket // blocking profile buckets
144 xbuckets *bucket // mutex profile buckets
145 buckhash *[buckHashSize]*bucket
149 // All fields in mProf are protected by proflock.
151 // cycle is the global heap profile cycle. This wraps
152 // at mProfCycleWrap.
154 // flushed indicates that future[cycle] in all buckets
155 // has been flushed to the active profile.
160 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
162 // newBucket allocates a bucket with the given type and number of stack entries.
163 func newBucket(typ bucketType, nstk int) *bucket {
164 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
167 throw("invalid profile bucket type")
169 size += unsafe.Sizeof(memRecord{})
170 case blockProfile, mutexProfile:
171 size += unsafe.Sizeof(blockRecord{})
174 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
177 b.nstk = uintptr(nstk)
181 // stk returns the slice in b holding the stack.
182 func (b *bucket) stk() []uintptr {
183 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
184 return stk[:b.nstk:b.nstk]
187 // mp returns the memRecord associated with the memProfile bucket b.
188 func (b *bucket) mp() *memRecord {
189 if b.typ != memProfile {
190 throw("bad use of bucket.mp")
192 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
193 return (*memRecord)(data)
196 // bp returns the blockRecord associated with the blockProfile bucket b.
197 func (b *bucket) bp() *blockRecord {
198 if b.typ != blockProfile && b.typ != mutexProfile {
199 throw("bad use of bucket.bp")
201 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
202 return (*blockRecord)(data)
205 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
206 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
208 buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
210 throw("runtime: cannot allocate memory")
216 for _, pc := range stk {
229 i := int(h % buckHashSize)
230 for b := buckhash[i]; b != nil; b = b.next {
231 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
240 // Create new bucket.
241 b := newBucket(typ, len(stk))
247 if typ == memProfile {
250 } else if typ == mutexProfile {
260 func eqslice(x, y []uintptr) bool {
261 if len(x) != len(y) {
264 for i, xi := range x {
272 // mProf_NextCycle publishes the next heap profile cycle and creates a
273 // fresh heap profile cycle. This operation is fast and can be done
274 // during STW. The caller must call mProf_Flush before calling
275 // mProf_NextCycle again.
277 // This is called by mark termination during STW so allocations and
278 // frees after the world is started again count towards a new heap
280 func mProf_NextCycle() {
282 // We explicitly wrap mProf.cycle rather than depending on
283 // uint wraparound because the memRecord.future ring does not
284 // itself wrap at a power of two.
285 mProf.cycle = (mProf.cycle + 1) % mProfCycleWrap
286 mProf.flushed = false
290 // mProf_Flush flushes the events from the current heap profiling
291 // cycle into the active profile. After this it is safe to start a new
292 // heap profiling cycle with mProf_NextCycle.
294 // This is called by GC after mark termination starts the world. In
295 // contrast with mProf_NextCycle, this is somewhat expensive, but safe
296 // to do concurrently.
306 func mProf_FlushLocked() {
308 for b := mbuckets; b != nil; b = b.allnext {
311 // Flush cycle C into the published profile and clear
313 mpc := &mp.future[c%uint32(len(mp.future))]
315 *mpc = memRecordCycle{}
319 // mProf_PostSweep records that all sweep frees for this GC cycle have
320 // completed. This has the effect of publishing the heap profile
321 // snapshot as of the last mark termination without advancing the heap
323 func mProf_PostSweep() {
325 // Flush cycle C+1 to the active profile so everything as of
326 // the last mark termination becomes visible. *Don't* advance
327 // the cycle, since we're still accumulating allocs in cycle
328 // C+2, which have to become C+1 in the next mark termination
331 for b := mbuckets; b != nil; b = b.allnext {
333 mpc := &mp.future[(c+1)%uint32(len(mp.future))]
335 *mpc = memRecordCycle{}
340 // Called by malloc to record a profiled block.
341 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
342 var stk [maxStack]uintptr
343 nstk := callers(4, stk[:])
345 b := stkbucket(memProfile, size, stk[:nstk], true)
348 mpc := &mp.future[(c+2)%uint32(len(mp.future))]
350 mpc.alloc_bytes += size
353 // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
354 // This reduces potential contention and chances of deadlocks.
355 // Since the object must be alive during call to mProf_Malloc,
356 // it's fine to do this non-atomically.
358 setprofilebucket(p, b)
362 // Called when freeing a profiled block.
363 func mProf_Free(b *bucket, size uintptr) {
367 mpc := &mp.future[(c+1)%uint32(len(mp.future))]
369 mpc.free_bytes += size
373 var blockprofilerate uint64 // in CPU ticks
375 // SetBlockProfileRate controls the fraction of goroutine blocking events
376 // that are reported in the blocking profile. The profiler aims to sample
377 // an average of one blocking event per rate nanoseconds spent blocked.
379 // To include every blocking event in the profile, pass rate = 1.
380 // To turn off profiling entirely, pass rate <= 0.
381 func SetBlockProfileRate(rate int) {
384 r = 0 // disable profiling
385 } else if rate == 1 {
386 r = 1 // profile everything
388 // convert ns to cycles, use float64 to prevent overflow during multiplication
389 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
395 atomic.Store64(&blockprofilerate, uint64(r))
398 func blockevent(cycles int64, skip int) {
403 rate := int64(atomic.Load64(&blockprofilerate))
404 if blocksampled(cycles, rate) {
405 saveblockevent(cycles, rate, skip+1, blockProfile)
409 // blocksampled returns true for all events where cycles >= rate. Shorter
410 // events have a cycles/rate random chance of returning true.
411 func blocksampled(cycles, rate int64) bool {
412 if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
418 func saveblockevent(cycles, rate int64, skip int, which bucketType) {
421 var stk [maxStack]uintptr
422 if gp.m.curg == nil || gp.m.curg == gp {
423 nstk = callers(skip, stk[:])
425 nstk = gcallers(gp.m.curg, skip, stk[:])
428 b := stkbucket(which, 0, stk[:nstk], true)
430 if which == blockProfile && cycles < rate {
431 // Remove sampling bias, see discussion on http://golang.org/cl/299991.
432 b.bp().count += float64(rate) / float64(cycles)
433 b.bp().cycles += rate
436 b.bp().cycles += cycles
441 var mutexprofilerate uint64 // fraction sampled
443 // SetMutexProfileFraction controls the fraction of mutex contention events
444 // that are reported in the mutex profile. On average 1/rate events are
445 // reported. The previous rate is returned.
447 // To turn off profiling entirely, pass rate 0.
448 // To just read the current rate, pass rate < 0.
449 // (For n>1 the details of sampling may change.)
450 func SetMutexProfileFraction(rate int) int {
452 return int(mutexprofilerate)
454 old := mutexprofilerate
455 atomic.Store64(&mutexprofilerate, uint64(rate))
459 //go:linkname mutexevent sync.event
460 func mutexevent(cycles int64, skip int) {
464 rate := int64(atomic.Load64(&mutexprofilerate))
465 // TODO(pjw): measure impact of always calling fastrand vs using something
466 // like malloc.go:nextSample()
467 if rate > 0 && int64(fastrand())%rate == 0 {
468 saveblockevent(cycles, rate, skip+1, mutexProfile)
472 // Go interface to profile data.
474 // A StackRecord describes a single execution stack.
475 type StackRecord struct {
476 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
479 // Stack returns the stack trace associated with the record,
480 // a prefix of r.Stack0.
481 func (r *StackRecord) Stack() []uintptr {
482 for i, v := range r.Stack0 {
490 // MemProfileRate controls the fraction of memory allocations
491 // that are recorded and reported in the memory profile.
492 // The profiler aims to sample an average of
493 // one allocation per MemProfileRate bytes allocated.
495 // To include every allocated block in the profile, set MemProfileRate to 1.
496 // To turn off profiling entirely, set MemProfileRate to 0.
498 // The tools that process the memory profiles assume that the
499 // profile rate is constant across the lifetime of the program
500 // and equal to the current value. Programs that change the
501 // memory profiling rate should do so just once, as early as
502 // possible in the execution of the program (for example,
503 // at the beginning of main).
504 var MemProfileRate int = defaultMemProfileRate(512 * 1024)
506 // defaultMemProfileRate returns 0 if disableMemoryProfiling is set.
507 // It exists primarily for the godoc rendering of MemProfileRate
509 func defaultMemProfileRate(v int) int {
510 if disableMemoryProfiling {
516 // disableMemoryProfiling is set by the linker if runtime.MemProfile
517 // is not used and the link type guarantees nobody else could use it
519 var disableMemoryProfiling bool
521 // A MemProfileRecord describes the live objects allocated
522 // by a particular call sequence (stack trace).
523 type MemProfileRecord struct {
524 AllocBytes, FreeBytes int64 // number of bytes allocated, freed
525 AllocObjects, FreeObjects int64 // number of objects allocated, freed
526 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
529 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
530 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
532 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
533 func (r *MemProfileRecord) InUseObjects() int64 {
534 return r.AllocObjects - r.FreeObjects
537 // Stack returns the stack trace associated with the record,
538 // a prefix of r.Stack0.
539 func (r *MemProfileRecord) Stack() []uintptr {
540 for i, v := range r.Stack0 {
548 // MemProfile returns a profile of memory allocated and freed per allocation
551 // MemProfile returns n, the number of records in the current memory profile.
552 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
553 // If len(p) < n, MemProfile does not change p and returns n, false.
555 // If inuseZero is true, the profile includes allocation records
556 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
557 // These are sites where memory was allocated, but it has all
558 // been released back to the runtime.
560 // The returned profile may be up to two garbage collection cycles old.
561 // This is to avoid skewing the profile toward allocations; because
562 // allocations happen in real time but frees are delayed until the garbage
563 // collector performs sweeping, the profile only accounts for allocations
564 // that have had a chance to be freed by the garbage collector.
566 // Most clients should use the runtime/pprof package or
567 // the testing package's -test.memprofile flag instead
568 // of calling MemProfile directly.
569 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
571 // If we're between mProf_NextCycle and mProf_Flush, take care
572 // of flushing to the active profile so we only have to look
573 // at the active profile below.
576 for b := mbuckets; b != nil; b = b.allnext {
578 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
581 if mp.active.allocs != 0 || mp.active.frees != 0 {
586 // Absolutely no data, suggesting that a garbage collection
587 // has not yet happened. In order to allow profiling when
588 // garbage collection is disabled from the beginning of execution,
589 // accumulate all of the cycles, and recount buckets.
591 for b := mbuckets; b != nil; b = b.allnext {
593 for c := range mp.future {
594 mp.active.add(&mp.future[c])
595 mp.future[c] = memRecordCycle{}
597 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
605 for b := mbuckets; b != nil; b = b.allnext {
607 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
617 // Write b's data to r.
618 func record(r *MemProfileRecord, b *bucket) {
620 r.AllocBytes = int64(mp.active.alloc_bytes)
621 r.FreeBytes = int64(mp.active.free_bytes)
622 r.AllocObjects = int64(mp.active.allocs)
623 r.FreeObjects = int64(mp.active.frees)
625 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
628 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
630 copy(r.Stack0[:], b.stk())
631 for i := int(b.nstk); i < len(r.Stack0); i++ {
636 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
638 for b := mbuckets; b != nil; b = b.allnext {
640 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
645 // BlockProfileRecord describes blocking events originated
646 // at a particular call sequence (stack trace).
647 type BlockProfileRecord struct {
653 // BlockProfile returns n, the number of records in the current blocking profile.
654 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
655 // If len(p) < n, BlockProfile does not change p and returns n, false.
657 // Most clients should use the runtime/pprof package or
658 // the testing package's -test.blockprofile flag instead
659 // of calling BlockProfile directly.
660 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
662 for b := bbuckets; b != nil; b = b.allnext {
667 for b := bbuckets; b != nil; b = b.allnext {
670 r.Count = int64(bp.count)
671 // Prevent callers from having to worry about division by zero errors.
672 // See discussion on http://golang.org/cl/299991.
678 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
681 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
683 i := copy(r.Stack0[:], b.stk())
684 for ; i < len(r.Stack0); i++ {
694 // MutexProfile returns n, the number of records in the current mutex profile.
695 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
696 // Otherwise, MutexProfile does not change p, and returns n, false.
698 // Most clients should use the runtime/pprof package
699 // instead of calling MutexProfile directly.
700 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
702 for b := xbuckets; b != nil; b = b.allnext {
707 for b := xbuckets; b != nil; b = b.allnext {
710 r.Count = int64(bp.count)
712 i := copy(r.Stack0[:], b.stk())
713 for ; i < len(r.Stack0); i++ {
723 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
724 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
725 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
727 // Most clients should use the runtime/pprof package instead
728 // of calling ThreadCreateProfile directly.
729 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
730 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
731 for mp := first; mp != nil; mp = mp.alllink {
737 for mp := first; mp != nil; mp = mp.alllink {
738 p[i].Stack0 = mp.createstack
745 //go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels
746 func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
747 return goroutineProfileWithLabels(p, labels)
750 // labels may be nil. If labels is non-nil, it must have the same length as p.
751 func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
752 if labels != nil && len(labels) != len(p) {
757 isOK := func(gp1 *g) bool {
758 // Checking isSystemGoroutine here makes GoroutineProfile
759 // consistent with both NumGoroutine and Stack.
760 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
763 stopTheWorld("profile")
765 // World is stopped, no locking required.
767 forEachGRace(func(gp1 *g) {
777 // Save current goroutine.
781 saveg(pc, sp, gp, &r[0])
785 // If we have a place to put our goroutine labelmap, insert it there.
791 // Save other goroutines.
792 forEachGRace(func(gp1 *g) {
798 // Should be impossible, but better to return a
799 // truncated profile than to crash the entire process.
802 saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
815 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
816 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
817 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
819 // Most clients should use the runtime/pprof package instead
820 // of calling GoroutineProfile directly.
821 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
823 return goroutineProfileWithLabels(p, nil)
826 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
827 n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
828 if n < len(r.Stack0) {
833 // Stack formats a stack trace of the calling goroutine into buf
834 // and returns the number of bytes written to buf.
835 // If all is true, Stack formats stack traces of all other goroutines
836 // into buf after the trace for the current goroutine.
837 func Stack(buf []byte, all bool) int {
839 stopTheWorld("stack trace")
849 // Force traceback=1 to override GOTRACEBACK setting,
850 // so that Stack's results are consistent.
851 // GOTRACEBACK is only about crash dumps.
853 g0.writebuf = buf[0:0:len(buf)]
855 traceback(pc, sp, 0, gp)
871 // Tracing of alloc/free/gc.
875 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
880 print("tracealloc(", p, ", ", hex(size), ")\n")
882 print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
884 if gp.m.curg == nil || gp == gp.m.curg {
889 traceback(pc, sp, 0, gp)
892 goroutineheader(gp.m.curg)
893 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
900 func tracefree(p unsafe.Pointer, size uintptr) {
904 print("tracefree(", p, ", ", hex(size), ")\n")
909 traceback(pc, sp, 0, gp)
921 // running on m->g0 stack; show all non-g0 goroutines
923 print("end tracegc\n")