1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Based on algorithms and data structures used in
7 // http://code.google.com/p/google-perftools/.
9 // The main difference between this code and the google-perftools
10 // code is that this code is written to allow copying the profile data
11 // to an arbitrary io.Writer, while the google-perftools code always
12 // writes to an operating system file.
14 // The signal handler for the profiling clock tick adds a new stack trace
15 // to a hash table tracking counts for recent traces. Most clock ticks
16 // hit in the cache. In the event of a cache miss, an entry must be
17 // evicted from the hash table, copied to a log that will eventually be
18 // written as profile data. The google-perftools code flushed the
19 // log itself during the signal handler. This code cannot do that, because
20 // the io.Writer might block or need system calls or locks that are not
21 // safe to use from within the signal handler. Instead, we split the log
22 // into two halves and let the signal handler fill one half while a goroutine
23 // is writing out the other half. When the signal handler fills its half, it
24 // offers to swap with the goroutine. If the writer is not done with its half,
25 // we lose the stack trace for this clock tick (and record that loss).
26 // The goroutine interacts with the signal handler by calling getprofile() to
27 // get the next log piece to write, implicitly handing back the last log
30 // The state of this dance between the signal handler and the goroutine
31 // is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
32 // is not using either log half and is waiting (or will soon be waiting) for
33 // a new piece by calling notesleep(&p.wait). If the signal handler
34 // changes handoff from 0 to non-zero, it must call notewakeup(&p.wait)
35 // to wake the goroutine. The value indicates the number of entries in the
36 // log half being handed off. The goroutine leaves the non-zero value in
37 // place until it has finished processing the log half and then flips the number
38 // back to zero. Setting the high bit in handoff means that the profiling is over,
39 // and the goroutine is now in charge of flushing the data left in the hash table
40 // to the log and returning that data.
42 // The handoff field is manipulated using atomic operations.
43 // For the most part, the manipulation of handoff is orderly: if handoff == 0
44 // then the signal handler owns it and can change it to non-zero.
45 // If handoff != 0 then the goroutine owns it and can change it to zero.
46 // If that were the end of the story then we would not need to manipulate
47 // handoff using atomic operations. The operations are needed, however,
48 // in order to let the log closer set the high bit to indicate "EOF" safely
49 // in the situation when normally the goroutine "owns" handoff.
54 "runtime/internal/atomic"
65 type cpuprofEntry struct {
68 stack [maxCPUProfStack]uintptr
71 type cpuProfile struct {
72 on bool // profiling is on
73 wait note // goroutine waits here
74 count uintptr // tick count
75 evicts uintptr // eviction count
76 lost uintptr // lost ticks that need to be logged
78 // Active recent stack traces.
79 hash [numBuckets]struct {
80 entry [assoc]cpuprofEntry
83 // Log of traces evicted from hash.
84 // Signal handler has filled log[toggle][:nlog].
85 // Goroutine is writing log[1-toggle][:handoff].
86 log [2][logSize / 2]uintptr
92 // Writer maintains its own toggle to avoid races
93 // looking at signal handler's toggle.
95 wholding bool // holding & need to release a log half
96 flushing bool // flushing hash table - profile is over
97 eodSent bool // special end-of-data record sent; => flushing
104 eod = [3]uintptr{0, 1, 0}
107 func setcpuprofilerate(hz int32) {
109 setcpuprofilerate_m(hz)
113 // lostProfileData is a no-op function used in profiles
114 // to mark the number of profiling stack traces that were
115 // discarded due to slow data writers.
116 func lostProfileData() {}
118 // SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
119 // If hz <= 0, SetCPUProfileRate turns off profiling.
120 // If the profiler is on, the rate cannot be changed without first turning it off.
122 // Most clients should use the runtime/pprof package or
123 // the testing package's -test.cpuprofile flag instead of calling
124 // SetCPUProfileRate directly.
125 func SetCPUProfileRate(hz int) {
126 // Clamp hz to something reasonable.
137 cpuprof = (*cpuProfile)(sysAlloc(unsafe.Sizeof(cpuProfile{}), &memstats.other_sys))
139 print("runtime: cpu profiling cannot allocate memory\n")
144 if cpuprof.on || cpuprof.handoff != 0 {
145 print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
151 // pprof binary header format.
152 // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117
154 p[0] = 0 // count for header
155 p[1] = 3 // depth for header
156 p[2] = 0 // version number
157 p[3] = uintptr(1e6 / hz) // period (microseconds)
161 cpuprof.wholding = false
163 cpuprof.flushing = false
164 cpuprof.eodSent = false
165 noteclear(&cpuprof.wait)
167 setcpuprofilerate(int32(hz))
168 } else if cpuprof != nil && cpuprof.on {
172 // Now add is not running anymore, and getprofile owns the entire log.
173 // Set the high bit in cpuprof.handoff to tell getprofile.
176 if n&0x80000000 != 0 {
177 print("runtime: setcpuprofile(off) twice\n")
179 if atomic.Cas(&cpuprof.handoff, n, n|0x80000000) {
181 // we did the transition from 0 -> nonzero so we wake getprofile
182 notewakeup(&cpuprof.wait)
191 // add adds the stack trace to the profile.
192 // It is called from signal handlers and other limited environments
193 // and cannot allocate memory or acquire locks that might be
194 // held at the time of the signal, nor can it use substantial amounts
195 // of stack. It is allowed to call evict.
196 func (p *cpuProfile) add(pc []uintptr) {
197 if len(pc) > maxCPUProfStack {
198 pc = pc[:maxCPUProfStack]
203 for _, x := range pc {
204 h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
209 // Add to entry count if already present in table.
210 b := &p.hash[h%numBuckets]
212 for i := range b.entry {
214 if e.depth != len(pc) {
218 if e.stack[j] != pc[j] {
226 // Evict entry with smallest count.
228 for i := range b.entry {
229 if e == nil || b.entry[i].count < e.count {
235 // Could not evict entry. Record lost stack.
242 // Reuse the newly evicted entry.
248 // evict copies the given entry's data into the log, so that
249 // the entry can be reused. evict is called from add, which
250 // is called from the profiling signal handler, so it must not
251 // allocate memory or block. It is safe to call flushlog.
252 // evict returns true if the entry was copied to the log,
253 // false if there was no room available.
254 func (p *cpuProfile) evict(e *cpuprofEntry) bool {
257 log := &p.log[p.toggle]
258 if p.nlog+nslot > len(log) {
262 log = &p.log[p.toggle]
270 copy(log[q:], e.stack[:d])
277 // flushlog tries to flush the current log and switch to the other one.
278 // flushlog is called from evict, called from add, called from the signal handler,
279 // so it cannot allocate memory or block. It can try to swap logs with
280 // the writing goroutine, as explained in the comment at the top of this file.
281 func (p *cpuProfile) flushlog() bool {
282 if !atomic.Cas(&p.handoff, 0, uint32(p.nlog)) {
287 p.toggle = 1 - p.toggle
288 log := &p.log[p.toggle]
291 lostPC := funcPC(lostProfileData)
302 // getprofile blocks until the next block of profiling data is available
303 // and returns it as a []byte. It is called from the writing goroutine.
304 func (p *cpuProfile) getprofile() []byte {
310 // Release previous log to signal handling side.
311 // Loop because we are racing against SetCPUProfileRate(0).
315 print("runtime: phase error during cpu profile handoff\n")
318 if n&0x80000000 != 0 {
319 p.wtoggle = 1 - p.wtoggle
324 if atomic.Cas(&p.handoff, n, 0) {
328 p.wtoggle = 1 - p.wtoggle
336 if !p.on && p.handoff == 0 {
341 notetsleepg(&p.wait, -1)
344 switch n := p.handoff; {
346 print("runtime: phase error during cpu profile wait\n")
348 case n == 0x80000000:
354 // Return new log to caller.
357 return uintptrBytes(p.log[p.wtoggle][:n])
361 // Add is no longer being called. We own the log.
362 // Also, p.handoff is non-zero, so flushlog will return false.
363 // Evict the hash table into the log and return it.
365 for i := range p.hash {
367 for j := range b.entry {
369 if e.count > 0 && !p.evict(e) {
370 // Filled the log. Stop the loop and return what we've got.
376 // Return pending log data.
378 // Note that we're using toggle now, not wtoggle,
379 // because we're working on the log directly.
382 return uintptrBytes(p.log[p.toggle][:n])
385 // Made it through the table without finding anything to log.
387 // We may not have space to append this to the partial log buf,
388 // so we always return a new slice for the end-of-data marker.
390 return uintptrBytes(eod[:])
393 // Finally done. Clean up and return nil.
395 if !atomic.Cas(&p.handoff, p.handoff, 0) {
396 print("runtime: profile flush racing with something\n")
401 func uintptrBytes(p []uintptr) (ret []byte) {
402 pp := (*slice)(unsafe.Pointer(&p))
403 rp := (*slice)(unsafe.Pointer(&ret))
406 rp.len = pp.len * int(unsafe.Sizeof(p[0]))
412 // CPUProfile returns the next chunk of binary CPU profiling stack trace data,
413 // blocking until data is available. If profiling is turned off and all the profile
414 // data accumulated while it was on has been returned, CPUProfile returns nil.
415 // The caller must save the returned data before calling CPUProfile again.
417 // Most clients should use the runtime/pprof package or
418 // the testing package's -test.cpuprofile flag instead of calling
419 // CPUProfile directly.
420 func CPUProfile() []byte {
421 return cpuprof.getprofile()
424 //go:linkname runtime_pprof_runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
425 func runtime_pprof_runtime_cyclesPerSecond() int64 {
426 return tickspersecond()