// size of bucket hash table
buckHashSize = 179999
- // max depth of stack to record in bucket
+ // maxStack is the max depth of stack to record in bucket.
+ // Note that it's only used internally as a guard against
+ // wildly out-of-bounds slicing of the PCs that come after
+ // a bucket struct, and it could increase in the future.
maxStack = 32
)
r = 1 // profile everything
} else {
// convert ns to cycles, use float64 to prevent overflow during multiplication
- r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
+ r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000))
if r == 0 {
r = 1
}
cycles = 0
}
rate := int64(atomic.Load64(&mutexprofilerate))
- // TODO(pjw): measure impact of always calling fastrand vs using something
- // like malloc.go:nextSample()
if rate > 0 && int64(fastrand())%rate == 0 {
saveblockevent(cycles, rate, skip+1, mutexProfile)
}
// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
// Otherwise, MutexProfile does not change p, and returns n, false.
//
-// Most clients should use the runtime/pprof package
+// Most clients should use the [runtime/pprof] package
// instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
lock(&profBlockLock)
systemstack(func() {
saveg(pc, sp, ourg, &p[0])
})
+ if labels != nil {
+ labels[0] = ourg.labels
+ }
ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
goroutineProfile.offset.Store(1)
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
//
-// Most clients should use the runtime/pprof package instead
+// Most clients should use the [runtime/pprof] package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool) {