1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Patterned after tcmalloc's algorithms; shorter code.
11 "runtime/internal/atomic"
15 // NOTE(rsc): Everything here could use cas if contention became an issue.
18 // All memory allocations are local and do not escape outside of the profiler.
19 // The profiler is forbidden from referring to garbage-collected memory.
23 memProfile bucketType = 1 + iota
27 // size of bucket hash table
30 // max depth of stack to record in bucket
36 // A bucket holds per-call-stack profiling information.
37 // The representation is a bit sleazy, inherited from C.
38 // This struct defines the bucket header. It is followed in
39 // memory by the stack words and then the actual record
40 // data, either a memRecord or a blockRecord.
42 // Per-call-stack profiling information.
43 // Lookup by hashing call stack into a linked-list hash table.
51 typ bucketType // memBucket or blockBucket (includes mutexProfile)
57 // A memRecord is the bucket data for a bucket of type memProfile,
58 // part of the memory profile.
59 type memRecord struct {
60 // The following complex 3-stage scheme of stats accumulation
61 // is required to obtain a consistent picture of mallocs and frees
62 // for some point in time.
63 // The problem is that mallocs come in real time, while frees
64 // come only after a GC during concurrent sweeping. So if we would
65 // naively count them, we would get a skew toward mallocs.
67 // Mallocs are accounted in recent stats.
68 // Explicit frees are accounted in recent stats.
69 // GC frees are accounted in prev stats.
70 // After GC prev stats are added to final stats and
71 // recent stats are moved into prev stats.
77 // changes between next-to-last GC and last GC
80 prev_alloc_bytes uintptr
81 prev_free_bytes uintptr
83 // changes since last GC
86 recent_alloc_bytes uintptr
87 recent_free_bytes uintptr
90 // A blockRecord is the bucket data for a bucket of type blockProfile,
91 // which is used in blocking and mutex profiles.
92 type blockRecord struct {
98 mbuckets *bucket // memory profile buckets
99 bbuckets *bucket // blocking profile buckets
100 xbuckets *bucket // mutex profile buckets
101 buckhash *[179999]*bucket
105 // newBucket allocates a bucket with the given type and number of stack entries.
106 func newBucket(typ bucketType, nstk int) *bucket {
107 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
110 throw("invalid profile bucket type")
112 size += unsafe.Sizeof(memRecord{})
113 case blockProfile, mutexProfile:
114 size += unsafe.Sizeof(blockRecord{})
117 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
120 b.nstk = uintptr(nstk)
124 // stk returns the slice in b holding the stack.
125 func (b *bucket) stk() []uintptr {
126 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
127 return stk[:b.nstk:b.nstk]
130 // mp returns the memRecord associated with the memProfile bucket b.
131 func (b *bucket) mp() *memRecord {
132 if b.typ != memProfile {
133 throw("bad use of bucket.mp")
135 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
136 return (*memRecord)(data)
139 // bp returns the blockRecord associated with the blockProfile bucket b.
140 func (b *bucket) bp() *blockRecord {
141 if b.typ != blockProfile && b.typ != mutexProfile {
142 throw("bad use of bucket.bp")
144 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
145 return (*blockRecord)(data)
148 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
149 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
151 buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
153 throw("runtime: cannot allocate memory")
159 for _, pc := range stk {
172 i := int(h % buckHashSize)
173 for b := buckhash[i]; b != nil; b = b.next {
174 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
183 // Create new bucket.
184 b := newBucket(typ, len(stk))
190 if typ == memProfile {
193 } else if typ == mutexProfile {
203 func eqslice(x, y []uintptr) bool {
204 if len(x) != len(y) {
207 for i, xi := range x {
216 for b := mbuckets; b != nil; b = b.allnext {
218 mp.allocs += mp.prev_allocs
219 mp.frees += mp.prev_frees
220 mp.alloc_bytes += mp.prev_alloc_bytes
221 mp.free_bytes += mp.prev_free_bytes
223 mp.prev_allocs = mp.recent_allocs
224 mp.prev_frees = mp.recent_frees
225 mp.prev_alloc_bytes = mp.recent_alloc_bytes
226 mp.prev_free_bytes = mp.recent_free_bytes
230 mp.recent_alloc_bytes = 0
231 mp.recent_free_bytes = 0
235 // Record that a gc just happened: all the 'recent' statistics are now real.
242 // Called by malloc to record a profiled block.
243 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
244 var stk [maxStack]uintptr
245 nstk := callers(4, stk[:])
247 b := stkbucket(memProfile, size, stk[:nstk], true)
250 mp.recent_alloc_bytes += size
253 // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
254 // This reduces potential contention and chances of deadlocks.
255 // Since the object must be alive during call to mProf_Malloc,
256 // it's fine to do this non-atomically.
258 setprofilebucket(p, b)
262 // Called when freeing a profiled block.
263 func mProf_Free(b *bucket, size uintptr) {
267 mp.prev_free_bytes += size
271 var blockprofilerate uint64 // in CPU ticks
273 // SetBlockProfileRate controls the fraction of goroutine blocking events
274 // that are reported in the blocking profile. The profiler aims to sample
275 // an average of one blocking event per rate nanoseconds spent blocked.
277 // To include every blocking event in the profile, pass rate = 1.
278 // To turn off profiling entirely, pass rate <= 0.
279 func SetBlockProfileRate(rate int) {
282 r = 0 // disable profiling
283 } else if rate == 1 {
284 r = 1 // profile everything
286 // convert ns to cycles, use float64 to prevent overflow during multiplication
287 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
293 atomic.Store64(&blockprofilerate, uint64(r))
296 func blockevent(cycles int64, skip int) {
300 if blocksampled(cycles) {
301 saveblockevent(cycles, skip+1, blockProfile, &blockprofilerate)
305 func blocksampled(cycles int64) bool {
306 rate := int64(atomic.Load64(&blockprofilerate))
307 if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
313 func saveblockevent(cycles int64, skip int, which bucketType, ratep *uint64) {
316 var stk [maxStack]uintptr
317 if gp.m.curg == nil || gp.m.curg == gp {
318 nstk = callers(skip, stk[:])
320 nstk = gcallers(gp.m.curg, skip, stk[:])
323 b := stkbucket(which, 0, stk[:nstk], true)
325 b.bp().cycles += cycles
329 var mutexprofilerate uint64 // fraction sampled
331 // SetMutexProfileFraction controls the fraction of mutex contention events
332 // that are reported in the mutex profile. On average 1/rate events are
333 // reported. The previous rate is returned.
335 // To turn off profiling entirely, pass rate 0.
336 // To just read the current rate, pass rate -1.
337 // (For n>1 the details of sampling may change.)
338 func SetMutexProfileFraction(rate int) int {
340 return int(mutexprofilerate)
342 old := mutexprofilerate
343 atomic.Store64(&mutexprofilerate, uint64(rate))
347 //go:linkname mutexevent sync.event
348 func mutexevent(cycles int64, skip int) {
352 rate := int64(atomic.Load64(&mutexprofilerate))
353 // TODO(pjw): measure impact of always calling fastrand vs using something
354 // like malloc.go:nextSample()
355 if rate > 0 && int64(fastrand())%rate == 0 {
356 saveblockevent(cycles, skip+1, mutexProfile, &mutexprofilerate)
360 // Go interface to profile data.
362 // A StackRecord describes a single execution stack.
363 type StackRecord struct {
364 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
367 // Stack returns the stack trace associated with the record,
368 // a prefix of r.Stack0.
369 func (r *StackRecord) Stack() []uintptr {
370 for i, v := range r.Stack0 {
378 // MemProfileRate controls the fraction of memory allocations
379 // that are recorded and reported in the memory profile.
380 // The profiler aims to sample an average of
381 // one allocation per MemProfileRate bytes allocated.
383 // To include every allocated block in the profile, set MemProfileRate to 1.
384 // To turn off profiling entirely, set MemProfileRate to 0.
386 // The tools that process the memory profiles assume that the
387 // profile rate is constant across the lifetime of the program
388 // and equal to the current value. Programs that change the
389 // memory profiling rate should do so just once, as early as
390 // possible in the execution of the program (for example,
391 // at the beginning of main).
392 var MemProfileRate int = 512 * 1024
394 // A MemProfileRecord describes the live objects allocated
395 // by a particular call sequence (stack trace).
396 type MemProfileRecord struct {
397 AllocBytes, FreeBytes int64 // number of bytes allocated, freed
398 AllocObjects, FreeObjects int64 // number of objects allocated, freed
399 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
402 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
403 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
405 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
406 func (r *MemProfileRecord) InUseObjects() int64 {
407 return r.AllocObjects - r.FreeObjects
410 // Stack returns the stack trace associated with the record,
411 // a prefix of r.Stack0.
412 func (r *MemProfileRecord) Stack() []uintptr {
413 for i, v := range r.Stack0 {
421 // MemProfile returns a profile of memory allocated and freed per allocation
424 // MemProfile returns n, the number of records in the current memory profile.
425 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
426 // If len(p) < n, MemProfile does not change p and returns n, false.
428 // If inuseZero is true, the profile includes allocation records
429 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
430 // These are sites where memory was allocated, but it has all
431 // been released back to the runtime.
433 // The returned profile may be up to two garbage collection cycles old.
434 // This is to avoid skewing the profile toward allocations; because
435 // allocations happen in real time but frees are delayed until the garbage
436 // collector performs sweeping, the profile only accounts for allocations
437 // that have had a chance to be freed by the garbage collector.
439 // Most clients should use the runtime/pprof package or
440 // the testing package's -test.memprofile flag instead
441 // of calling MemProfile directly.
442 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
445 for b := mbuckets; b != nil; b = b.allnext {
447 if inuseZero || mp.alloc_bytes != mp.free_bytes {
450 if mp.allocs != 0 || mp.frees != 0 {
455 // Absolutely no data, suggesting that a garbage collection
456 // has not yet happened. In order to allow profiling when
457 // garbage collection is disabled from the beginning of execution,
458 // accumulate stats as if a GC just happened, and recount buckets.
462 for b := mbuckets; b != nil; b = b.allnext {
464 if inuseZero || mp.alloc_bytes != mp.free_bytes {
472 for b := mbuckets; b != nil; b = b.allnext {
474 if inuseZero || mp.alloc_bytes != mp.free_bytes {
484 // Write b's data to r.
485 func record(r *MemProfileRecord, b *bucket) {
487 r.AllocBytes = int64(mp.alloc_bytes)
488 r.FreeBytes = int64(mp.free_bytes)
489 r.AllocObjects = int64(mp.allocs)
490 r.FreeObjects = int64(mp.frees)
492 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&r)), funcPC(MemProfile))
495 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
497 copy(r.Stack0[:], b.stk())
498 for i := int(b.nstk); i < len(r.Stack0); i++ {
503 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
505 for b := mbuckets; b != nil; b = b.allnext {
507 fn(b, b.nstk, &b.stk()[0], b.size, mp.allocs, mp.frees)
512 // BlockProfileRecord describes blocking events originated
513 // at a particular call sequence (stack trace).
514 type BlockProfileRecord struct {
520 // BlockProfile returns n, the number of records in the current blocking profile.
521 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
522 // If len(p) < n, BlockProfile does not change p and returns n, false.
524 // Most clients should use the runtime/pprof package or
525 // the testing package's -test.blockprofile flag instead
526 // of calling BlockProfile directly.
527 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
529 for b := bbuckets; b != nil; b = b.allnext {
534 for b := bbuckets; b != nil; b = b.allnext {
540 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&p)), funcPC(BlockProfile))
543 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
545 i := copy(r.Stack0[:], b.stk())
546 for ; i < len(r.Stack0); i++ {
556 // MutexProfile returns n, the number of records in the current mutex profile.
557 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
558 // Otherwise, MutexProfile does not change p, and returns n, false.
560 // Most clients should use the runtime/pprof package
561 // instead of calling MutexProfile directly.
562 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
564 for b := xbuckets; b != nil; b = b.allnext {
569 for b := xbuckets; b != nil; b = b.allnext {
572 r.Count = int64(bp.count)
574 i := copy(r.Stack0[:], b.stk())
575 for ; i < len(r.Stack0); i++ {
585 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
586 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
587 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
589 // Most clients should use the runtime/pprof package instead
590 // of calling ThreadCreateProfile directly.
591 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
592 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
593 for mp := first; mp != nil; mp = mp.alllink {
599 for mp := first; mp != nil; mp = mp.alllink {
600 p[i].Stack0 = mp.createstack
607 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
608 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
609 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
611 // Most clients should use the runtime/pprof package instead
612 // of calling GoroutineProfile directly.
613 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
616 isOK := func(gp1 *g) bool {
617 // Checking isSystemGoroutine here makes GoroutineProfile
618 // consistent with both NumGoroutine and Stack.
619 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1)
622 stopTheWorld("profile")
625 for _, gp1 := range allgs {
635 // Save current goroutine.
636 sp := getcallersp(unsafe.Pointer(&p))
637 pc := getcallerpc(unsafe.Pointer(&p))
639 saveg(pc, sp, gp, &r[0])
643 // Save other goroutines.
644 for _, gp1 := range allgs {
647 // Should be impossible, but better to return a
648 // truncated profile than to crash the entire process.
651 saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
662 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
663 n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
664 if n < len(r.Stack0) {
669 // Stack formats a stack trace of the calling goroutine into buf
670 // and returns the number of bytes written to buf.
671 // If all is true, Stack formats stack traces of all other goroutines
672 // into buf after the trace for the current goroutine.
673 func Stack(buf []byte, all bool) int {
675 stopTheWorld("stack trace")
681 sp := getcallersp(unsafe.Pointer(&buf))
682 pc := getcallerpc(unsafe.Pointer(&buf))
685 // Force traceback=1 to override GOTRACEBACK setting,
686 // so that Stack's results are consistent.
687 // GOTRACEBACK is only about crash dumps.
689 g0.writebuf = buf[0:0:len(buf)]
691 traceback(pc, sp, 0, gp)
707 // Tracing of alloc/free/gc.
711 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
716 print("tracealloc(", p, ", ", hex(size), ")\n")
718 print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
720 if gp.m.curg == nil || gp == gp.m.curg {
722 pc := getcallerpc(unsafe.Pointer(&p))
723 sp := getcallersp(unsafe.Pointer(&p))
725 traceback(pc, sp, 0, gp)
728 goroutineheader(gp.m.curg)
729 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
736 func tracefree(p unsafe.Pointer, size uintptr) {
740 print("tracefree(", p, ", ", hex(size), ")\n")
742 pc := getcallerpc(unsafe.Pointer(&p))
743 sp := getcallersp(unsafe.Pointer(&p))
745 traceback(pc, sp, 0, gp)
757 // running on m->g0 stack; show all non-g0 goroutines
759 print("end tracegc\n")