1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Patterned after tcmalloc's algorithms; shorter code.
14 // NOTE(rsc): Everything here could use cas if contention became an issue.
17 // All memory allocations are local and do not escape outside of the profiler.
18 // The profiler is forbidden from referring to garbage-collected memory.
22 memProfile bucketType = 1 + iota
25 // size of bucket hash table
28 // max depth of stack to record in bucket
34 // A bucket holds per-call-stack profiling information.
35 // The representation is a bit sleazy, inherited from C.
36 // This struct defines the bucket header. It is followed in
37 // memory by the stack words and then the actual record
38 // data, either a memRecord or a blockRecord.
40 // Per-call-stack profiling information.
41 // Lookup by hashing call stack into a linked-list hash table.
45 typ bucketType // memBucket or blockBucket
51 // A memRecord is the bucket data for a bucket of type memProfile,
52 // part of the memory profile.
53 type memRecord struct {
54 // The following complex 3-stage scheme of stats accumulation
55 // is required to obtain a consistent picture of mallocs and frees
56 // for some point in time.
57 // The problem is that mallocs come in real time, while frees
58 // come only after a GC during concurrent sweeping. So if we would
59 // naively count them, we would get a skew toward mallocs.
61 // Mallocs are accounted in recent stats.
62 // Explicit frees are accounted in recent stats.
63 // GC frees are accounted in prev stats.
64 // After GC prev stats are added to final stats and
65 // recent stats are moved into prev stats.
71 // changes between next-to-last GC and last GC
74 prev_alloc_bytes uintptr
75 prev_free_bytes uintptr
77 // changes since last GC
80 recent_alloc_bytes uintptr
81 recent_free_bytes uintptr
84 // A blockRecord is the bucket data for a bucket of type blockProfile,
85 // part of the blocking profile.
86 type blockRecord struct {
92 mbuckets *bucket // memory profile buckets
93 bbuckets *bucket // blocking profile buckets
94 buckhash *[179999]*bucket
98 // newBucket allocates a bucket with the given type and number of stack entries.
99 func newBucket(typ bucketType, nstk int) *bucket {
100 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
103 throw("invalid profile bucket type")
105 size += unsafe.Sizeof(memRecord{})
107 size += unsafe.Sizeof(blockRecord{})
110 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
113 b.nstk = uintptr(nstk)
117 // stk returns the slice in b holding the stack.
118 func (b *bucket) stk() []uintptr {
119 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
120 return stk[:b.nstk:b.nstk]
123 // mp returns the memRecord associated with the memProfile bucket b.
124 func (b *bucket) mp() *memRecord {
125 if b.typ != memProfile {
126 throw("bad use of bucket.mp")
128 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
129 return (*memRecord)(data)
132 // bp returns the blockRecord associated with the blockProfile bucket b.
133 func (b *bucket) bp() *blockRecord {
134 if b.typ != blockProfile {
135 throw("bad use of bucket.bp")
137 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
138 return (*blockRecord)(data)
141 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
142 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
144 buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
146 throw("runtime: cannot allocate memory")
152 for _, pc := range stk {
165 i := int(h % buckHashSize)
166 for b := buckhash[i]; b != nil; b = b.next {
167 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
176 // Create new bucket.
177 b := newBucket(typ, len(stk))
183 if typ == memProfile {
193 func eqslice(x, y []uintptr) bool {
194 if len(x) != len(y) {
197 for i, xi := range x {
206 for b := mbuckets; b != nil; b = b.allnext {
208 mp.allocs += mp.prev_allocs
209 mp.frees += mp.prev_frees
210 mp.alloc_bytes += mp.prev_alloc_bytes
211 mp.free_bytes += mp.prev_free_bytes
213 mp.prev_allocs = mp.recent_allocs
214 mp.prev_frees = mp.recent_frees
215 mp.prev_alloc_bytes = mp.recent_alloc_bytes
216 mp.prev_free_bytes = mp.recent_free_bytes
220 mp.recent_alloc_bytes = 0
221 mp.recent_free_bytes = 0
225 // Record that a gc just happened: all the 'recent' statistics are now real.
232 // Called by malloc to record a profiled block.
233 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
234 var stk [maxStack]uintptr
235 nstk := callers(4, stk[:])
237 b := stkbucket(memProfile, size, stk[:nstk], true)
240 mp.recent_alloc_bytes += size
243 // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
244 // This reduces potential contention and chances of deadlocks.
245 // Since the object must be alive during call to mProf_Malloc,
246 // it's fine to do this non-atomically.
248 setprofilebucket(p, b)
252 // Called when freeing a profiled block.
253 func mProf_Free(b *bucket, size uintptr) {
257 mp.prev_free_bytes += size
261 var blockprofilerate uint64 // in CPU ticks
263 // SetBlockProfileRate controls the fraction of goroutine blocking events
264 // that are reported in the blocking profile. The profiler aims to sample
265 // an average of one blocking event per rate nanoseconds spent blocked.
267 // To include every blocking event in the profile, pass rate = 1.
268 // To turn off profiling entirely, pass rate <= 0.
269 func SetBlockProfileRate(rate int) {
272 r = 0 // disable profiling
273 } else if rate == 1 {
274 r = 1 // profile everything
276 // convert ns to cycles, use float64 to prevent overflow during multiplication
277 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
283 atomicstore64(&blockprofilerate, uint64(r))
286 func blockevent(cycles int64, skip int) {
290 rate := int64(atomicload64(&blockprofilerate))
291 if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
296 var stk [maxStack]uintptr
297 if gp.m.curg == nil || gp.m.curg == gp {
298 nstk = callers(skip, stk[:])
300 nstk = gcallers(gp.m.curg, skip, stk[:])
303 b := stkbucket(blockProfile, 0, stk[:nstk], true)
305 b.bp().cycles += cycles
309 // Go interface to profile data.
311 // A StackRecord describes a single execution stack.
312 type StackRecord struct {
313 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
316 // Stack returns the stack trace associated with the record,
317 // a prefix of r.Stack0.
318 func (r *StackRecord) Stack() []uintptr {
319 for i, v := range r.Stack0 {
327 // MemProfileRate controls the fraction of memory allocations
328 // that are recorded and reported in the memory profile.
329 // The profiler aims to sample an average of
330 // one allocation per MemProfileRate bytes allocated.
332 // To include every allocated block in the profile, set MemProfileRate to 1.
333 // To turn off profiling entirely, set MemProfileRate to 0.
335 // The tools that process the memory profiles assume that the
336 // profile rate is constant across the lifetime of the program
337 // and equal to the current value. Programs that change the
338 // memory profiling rate should do so just once, as early as
339 // possible in the execution of the program (for example,
340 // at the beginning of main).
341 var MemProfileRate int = 512 * 1024
343 // A MemProfileRecord describes the live objects allocated
344 // by a particular call sequence (stack trace).
345 type MemProfileRecord struct {
346 AllocBytes, FreeBytes int64 // number of bytes allocated, freed
347 AllocObjects, FreeObjects int64 // number of objects allocated, freed
348 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
351 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
352 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
354 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
355 func (r *MemProfileRecord) InUseObjects() int64 {
356 return r.AllocObjects - r.FreeObjects
359 // Stack returns the stack trace associated with the record,
360 // a prefix of r.Stack0.
361 func (r *MemProfileRecord) Stack() []uintptr {
362 for i, v := range r.Stack0 {
370 // MemProfile returns n, the number of records in the current memory profile.
371 // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
372 // If len(p) < n, MemProfile does not change p and returns n, false.
374 // If inuseZero is true, the profile includes allocation records
375 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
376 // These are sites where memory was allocated, but it has all
377 // been released back to the runtime.
379 // Most clients should use the runtime/pprof package or
380 // the testing package's -test.memprofile flag instead
381 // of calling MemProfile directly.
382 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
385 for b := mbuckets; b != nil; b = b.allnext {
387 if inuseZero || mp.alloc_bytes != mp.free_bytes {
390 if mp.allocs != 0 || mp.frees != 0 {
395 // Absolutely no data, suggesting that a garbage collection
396 // has not yet happened. In order to allow profiling when
397 // garbage collection is disabled from the beginning of execution,
398 // accumulate stats as if a GC just happened, and recount buckets.
402 for b := mbuckets; b != nil; b = b.allnext {
404 if inuseZero || mp.alloc_bytes != mp.free_bytes {
412 for b := mbuckets; b != nil; b = b.allnext {
414 if inuseZero || mp.alloc_bytes != mp.free_bytes {
424 // Write b's data to r.
425 func record(r *MemProfileRecord, b *bucket) {
427 r.AllocBytes = int64(mp.alloc_bytes)
428 r.FreeBytes = int64(mp.free_bytes)
429 r.AllocObjects = int64(mp.allocs)
430 r.FreeObjects = int64(mp.frees)
431 copy(r.Stack0[:], b.stk())
432 for i := int(b.nstk); i < len(r.Stack0); i++ {
437 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
439 for b := mbuckets; b != nil; b = b.allnext {
441 fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees)
446 // BlockProfileRecord describes blocking events originated
447 // at a particular call sequence (stack trace).
448 type BlockProfileRecord struct {
454 // BlockProfile returns n, the number of records in the current blocking profile.
455 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
456 // If len(p) < n, BlockProfile does not change p and returns n, false.
458 // Most clients should use the runtime/pprof package or
459 // the testing package's -test.blockprofile flag instead
460 // of calling BlockProfile directly.
461 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
463 for b := bbuckets; b != nil; b = b.allnext {
468 for b := bbuckets; b != nil; b = b.allnext {
471 r.Count = int64(bp.count)
472 r.Cycles = int64(bp.cycles)
473 i := copy(r.Stack0[:], b.stk())
474 for ; i < len(r.Stack0); i++ {
484 // ThreadCreateProfile returns n, the number of records in the thread creation profile.
485 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
486 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
488 // Most clients should use the runtime/pprof package instead
489 // of calling ThreadCreateProfile directly.
490 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
491 first := (*m)(atomicloadp(unsafe.Pointer(&allm)))
492 for mp := first; mp != nil; mp = mp.alllink {
498 for mp := first; mp != nil; mp = mp.alllink {
499 for s := range mp.createstack {
500 p[i].Stack0[s] = uintptr(mp.createstack[s])
508 // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
509 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
510 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
512 // Most clients should use the runtime/pprof package instead
513 // of calling GoroutineProfile directly.
514 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
519 stopTheWorld("profile")
525 sp := getcallersp(unsafe.Pointer(&p))
526 pc := getcallerpc(unsafe.Pointer(&p))
528 saveg(pc, sp, gp, &r[0])
531 for _, gp1 := range allgs {
532 if gp1 == gp || readgstatus(gp1) == _Gdead {
535 saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
546 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
547 n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
548 if n < len(r.Stack0) {
553 // Stack formats a stack trace of the calling goroutine into buf
554 // and returns the number of bytes written to buf.
555 // If all is true, Stack formats stack traces of all other goroutines
556 // into buf after the trace for the current goroutine.
557 func Stack(buf []byte, all bool) int {
559 stopTheWorld("stack trace")
565 sp := getcallersp(unsafe.Pointer(&buf))
566 pc := getcallerpc(unsafe.Pointer(&buf))
569 g0.writebuf = buf[0:0:len(buf)]
571 traceback(pc, sp, 0, gp)
586 // Tracing of alloc/free/gc.
590 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
595 print("tracealloc(", p, ", ", hex(size), ")\n")
597 print("tracealloc(", p, ", ", hex(size), ", ", *typ._string, ")\n")
599 if gp.m.curg == nil || gp == gp.m.curg {
601 pc := getcallerpc(unsafe.Pointer(&p))
602 sp := getcallersp(unsafe.Pointer(&p))
604 traceback(pc, sp, 0, gp)
607 goroutineheader(gp.m.curg)
608 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
615 func tracefree(p unsafe.Pointer, size uintptr) {
619 print("tracefree(", p, ", ", hex(size), ")\n")
621 pc := getcallerpc(unsafe.Pointer(&p))
622 sp := getcallersp(unsafe.Pointer(&p))
624 traceback(pc, sp, 0, gp)
636 // running on m->g0 stack; show all non-g0 goroutines
638 print("end tracegc\n")