1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Go execution tracer.
6 // The tracer captures a wide range of execution events like goroutine
7 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
8 // changes of heap size, processor start/stop, etc and writes them to a buffer
9 // in a compact form. A precise nanosecond-precision timestamp and a stack
10 // trace is captured for most events.
11 // See https://golang.org/s/go15trace for more info.
16 "runtime/internal/atomic"
20 // Event types in the trace, args are given in square brackets.
22 traceEvNone = 0 // unused
23 traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
24 traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
25 traceEvStack = 3 // stack [stack id, number of PCs, array of PCs]
26 traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
27 traceEvProcStart = 5 // start of P [timestamp, thread id]
28 traceEvProcStop = 6 // stop of P [timestamp]
29 traceEvGCStart = 7 // GC start [timestamp, stack id]
30 traceEvGCDone = 8 // GC done [timestamp]
31 traceEvGCScanStart = 9 // GC scan start [timestamp]
32 traceEvGCScanDone = 10 // GC scan done [timestamp]
33 traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
34 traceEvGCSweepDone = 12 // GC sweep done [timestamp]
35 traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, start PC, stack id]
36 traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id]
37 traceEvGoEnd = 15 // goroutine ends [timestamp]
38 traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
39 traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
40 traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
41 traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
42 traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
43 traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, stack]
44 traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
45 traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
46 traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
47 traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
48 traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
49 traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
50 traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
51 traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, real timestamp]
52 traceEvGoSysBlock = 30 // syscall blocks [timestamp]
53 traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [goroutine id]
54 traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [goroutine id]
55 traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc]
56 traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
57 traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
58 traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
63 // Timestamps in trace are cputicks/traceTickDiv.
64 // This makes absolute values of timestamp diffs smaller,
65 // and so they are encoded in less number of bytes.
66 // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
67 // The suggested increment frequency for PowerPC's time base register is
68 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
70 // Tracing won't work reliably for architectures where cputicks is emulated
71 // by nanotime, so the value doesn't matter for those architectures.
72 traceTickDiv = 16 + 48*(goarch_386|goarch_amd64|goarch_amd64p32)
73 // Maximum number of PCs in a single stack trace.
74 // Since events contain only stack id rather than whole stack trace,
75 // we can allow quite large values here.
77 // Identifier of a fake P that is used when we trace without a real P.
79 // Maximum number of bytes to encode uint64 in base-128.
80 traceBytesPerNumber = 10
81 // Shift of the number of arguments in the first event byte.
82 traceArgCountShift = 6
83 // Flag passed to traceGoPark to denote that the previous wakeup of this
84 // goroutine was futile. For example, a goroutine was unblocked on a mutex,
85 // but another goroutine got ahead and acquired the mutex before the first
86 // goroutine is scheduled, so the first goroutine has to block again.
87 // Such wakeups happen on buffered channels and sync.Mutex,
88 // but are generally not interesting for end user.
89 traceFutileWakeup byte = 128
92 // trace is global tracing context.
94 lock mutex // protects the following members
95 lockOwner *g // to avoid deadlocks during recursive lock locks
96 enabled bool // when set runtime traces events
97 shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false
98 headerWritten bool // whether ReadTrace has emitted trace header
99 footerWritten bool // whether ReadTrace has emitted trace footer
100 shutdownSema uint32 // used to wait for ReadTrace completion
101 seqStart uint64 // sequence number when tracing was started
102 ticksStart int64 // cputicks when tracing was started
103 ticksEnd int64 // cputicks when tracing was stopped
104 timeStart int64 // nanotime when tracing was started
105 timeEnd int64 // nanotime when tracing was stopped
106 reading traceBufPtr // buffer currently handed off to user
107 empty traceBufPtr // stack of empty buffers
108 fullHead traceBufPtr // queue of full buffers
110 reader *g // goroutine that called ReadTrace, or nil
111 stackTab traceStackTable // maps stack traces to unique ids
113 bufLock mutex // protects buf
114 buf traceBufPtr // global trace buffer, used when running without a p
117 var traceseq uint64 // global trace sequence number
119 // tracestamp returns a consistent sequence number, time stamp pair
120 // for use in a trace. We need to make sure that time stamp ordering
121 // (assuming synchronized CPUs) and sequence ordering match.
122 // To do that, we increment traceseq, grab ticks, and increment traceseq again.
123 // We treat odd traceseq as a sign that another thread is in the middle
124 // of the sequence and spin until it is done.
125 // Not splitting stack to avoid preemption, just in case the call sites
126 // that used to call xadd64 and cputicks are sensitive to that.
128 func tracestamp() (seq uint64, ts int64) {
129 seq = atomic.Load64(&traceseq)
130 for seq&1 != 0 || !atomic.Cas64(&traceseq, seq, seq+1) {
131 seq = atomic.Load64(&traceseq)
134 atomic.Store64(&traceseq, seq+2)
138 // traceBufHeader is per-P tracing buffer.
139 type traceBufHeader struct {
140 link traceBufPtr // in trace.empty/full
141 lastSeq uint64 // sequence number of last event
142 lastTicks uint64 // when we wrote the last event
143 pos int // next write offset in arr
144 stk [traceStackSize]uintptr // scratch buffer for traceback
147 // traceBuf is per-P tracing buffer.
148 type traceBuf struct {
150 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
153 // traceBufPtr is a *traceBuf that is not traced by the garbage
154 // collector and doesn't have write barriers. traceBufs are not
155 // allocated from the GC'd heap, so this is safe, and are often
156 // manipulated in contexts where write barriers are not allowed, so
157 // this is necessary.
158 type traceBufPtr uintptr
160 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
161 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
162 func traceBufPtrOf(b *traceBuf) traceBufPtr {
163 return traceBufPtr(unsafe.Pointer(b))
166 // StartTrace enables tracing for the current process.
167 // While tracing, the data will be buffered and available via ReadTrace.
168 // StartTrace returns an error if tracing is already enabled.
169 // Most clients should use the runtime/trace package or the testing package's
170 // -test.trace flag instead of calling StartTrace directly.
171 func StartTrace() error {
172 // Stop the world, so that we can take a consistent snapshot
173 // of all goroutines at the beginning of the trace.
174 stopTheWorld("start tracing")
176 // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
177 // Exitsyscall could check trace.enabled long before and then suddenly wake up
178 // and decide to write to trace at a random point in time.
179 // However, such syscall will use the global trace.buf buffer, because we've
180 // acquired all p's by doing stop-the-world. So this protects us from such races.
183 if trace.enabled || trace.shutdown {
184 unlock(&trace.bufLock)
186 return errorString("tracing is already enabled")
189 trace.seqStart, trace.ticksStart = tracestamp()
190 trace.timeStart = nanotime()
191 trace.headerWritten = false
192 trace.footerWritten = false
194 // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
195 // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
196 // That would lead to an inconsistent trace:
197 // - either GoSysExit appears before EvGoInSyscall,
198 // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
199 // To instruct traceEvent that it must not ignore events below, we set startingtrace.
200 // trace.enabled is set afterwards once we have emitted all preliminary events.
202 _g_.m.startingtrace = true
203 for _, gp := range allgs {
204 status := readgstatus(gp)
205 if status != _Gdead {
206 traceGoCreate(gp, gp.startpc)
208 if status == _Gwaiting {
209 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
211 if status == _Gsyscall {
212 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
214 gp.sysblocktraced = false
219 _g_.m.startingtrace = false
222 unlock(&trace.bufLock)
228 // StopTrace stops tracing, if it was previously enabled.
229 // StopTrace only returns after all the reads for the trace have completed.
231 // Stop the world so that we can collect the trace buffers from all p's below,
232 // and also to avoid races with traceEvent.
233 stopTheWorld("stop tracing")
235 // See the comment in StartTrace.
239 unlock(&trace.bufLock)
246 for _, p := range &allp {
256 if trace.buf != 0 && trace.buf.ptr().pos != 0 {
263 trace.ticksEnd = cputicks()
264 trace.timeEnd = nanotime()
265 // Windows time can tick only every 15ms, wait for at least one tick.
266 if trace.timeEnd != trace.timeStart {
272 trace.enabled = false
273 trace.shutdown = true
274 trace.stackTab.dump()
276 unlock(&trace.bufLock)
280 // The world is started but we've set trace.shutdown, so new tracing can't start.
281 // Wait for the trace reader to flush pending buffers and stop.
282 semacquire(&trace.shutdownSema, false)
284 raceacquire(unsafe.Pointer(&trace.shutdownSema))
287 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
289 for _, p := range &allp {
294 throw("trace: non-empty trace buffer in proc")
298 throw("trace: non-empty global trace buffer")
300 if trace.fullHead != 0 || trace.fullTail != 0 {
301 throw("trace: non-empty full trace buffer")
303 if trace.reading != 0 || trace.reader != nil {
304 throw("trace: reading after shutdown")
306 for trace.empty != 0 {
308 trace.empty = buf.ptr().link
309 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
311 trace.shutdown = false
315 // ReadTrace returns the next chunk of binary tracing data, blocking until data
316 // is available. If tracing is turned off and all the data accumulated while it
317 // was on has been returned, ReadTrace returns nil. The caller must copy the
318 // returned data before calling ReadTrace again.
319 // ReadTrace must be called from one goroutine at a time.
320 func ReadTrace() []byte {
321 // This function may need to lock trace.lock recursively
322 // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
323 // To allow this we use trace.lockOwner.
324 // Also this function must not allocate while holding trace.lock:
325 // allocation can call heap allocate, which will try to emit a trace
326 // event while holding heap lock.
328 trace.lockOwner = getg()
330 if trace.reader != nil {
331 // More than one goroutine reads trace. This is bad.
332 // But we rather do not crash the program because of tracing,
333 // because tracing can be enabled at runtime on prod servers.
334 trace.lockOwner = nil
336 println("runtime: ReadTrace called from multiple goroutines simultaneously")
339 // Recycle the old buffer.
340 if buf := trace.reading; buf != 0 {
341 buf.ptr().link = trace.empty
345 // Write trace header.
346 if !trace.headerWritten {
347 trace.headerWritten = true
348 trace.lockOwner = nil
350 return []byte("go 1.5 trace\x00\x00\x00\x00")
352 // Wait for new data.
353 if trace.fullHead == 0 && !trace.shutdown {
354 trace.reader = getg()
355 goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
359 if trace.fullHead != 0 {
360 buf := traceFullDequeue()
362 trace.lockOwner = nil
364 return buf.ptr().arr[:buf.ptr().pos]
366 // Write footer with timer frequency.
367 if !trace.footerWritten {
368 trace.footerWritten = true
369 // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
370 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
371 trace.lockOwner = nil
374 data = append(data, traceEvFrequency|0<<traceArgCountShift)
375 data = traceAppend(data, uint64(freq))
376 data = traceAppend(data, 0)
377 if timers.gp != nil {
378 data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
379 data = traceAppend(data, uint64(timers.gp.goid))
380 data = traceAppend(data, 0)
386 trace.lockOwner = nil
389 // Model synchronization on trace.shutdownSema, which race
390 // detector does not see. This is required to avoid false
391 // race reports on writer passed to trace.Start.
392 racerelease(unsafe.Pointer(&trace.shutdownSema))
394 // trace.enabled is already reset, so can call traceable functions.
395 semrelease(&trace.shutdownSema)
398 // Also bad, but see the comment above.
399 trace.lockOwner = nil
401 println("runtime: spurious wakeup of trace reader")
405 // traceReader returns the trace reader that should be woken up, if any.
406 func traceReader() *g {
407 if trace.reader == nil || (trace.fullHead == 0 && !trace.shutdown) {
411 if trace.reader == nil || (trace.fullHead == 0 && !trace.shutdown) {
421 // traceProcFree frees trace buffer associated with pp.
422 func traceProcFree(pp *p) {
433 // traceFullQueue queues buf into queue of full buffers.
434 func traceFullQueue(buf traceBufPtr) {
436 if trace.fullHead == 0 {
439 trace.fullTail.ptr().link = buf
444 // traceFullDequeue dequeues from queue of full buffers.
445 func traceFullDequeue() traceBufPtr {
446 buf := trace.fullHead
450 trace.fullHead = buf.ptr().link
451 if trace.fullHead == 0 {
458 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
460 // If skip > 0, write current stack id as the last argument (skipping skip top frames).
461 // If skip = 0, this event type should contain a stack, but we don't want
462 // to collect and remember it for this particular call.
463 func traceEvent(ev byte, skip int, args ...uint64) {
464 mp, pid, bufp := traceAcquireBuffer()
465 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
466 // This protects from races between traceEvent and StartTrace/StopTrace.
468 // The caller checked that trace.enabled == true, but trace.enabled might have been
469 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
470 // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
471 // so if we see trace.enabled == true now, we know it's true for the rest of the function.
472 // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
473 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
474 if !trace.enabled && !mp.startingtrace {
475 traceReleaseBuffer(pid)
479 const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
480 if buf == nil || len(buf.arr)-buf.pos < maxSize {
481 buf = traceFlush(traceBufPtrOf(buf)).ptr()
485 seq, ticksraw := tracestamp()
486 seqDiff := seq - buf.lastSeq
487 ticks := uint64(ticksraw) / traceTickDiv
488 tickDiff := ticks - buf.lastTicks
490 buf.byte(traceEvBatch | 1<<traceArgCountShift)
491 buf.varint(uint64(pid))
498 buf.lastTicks = ticks
499 narg := byte(len(args))
503 // We have only 2 bits for number of arguments.
504 // If number is >= 3, then the event type is followed by event length in bytes.
509 buf.byte(ev | narg<<traceArgCountShift)
512 // Reserve the byte for length assuming that length < 128.
514 lenp = &buf.arr[buf.pos-1]
518 for _, a := range args {
528 nstk = callers(skip, buf.stk[:])
529 } else if gp != nil {
531 nstk = gcallers(gp, skip, buf.stk[:])
534 nstk-- // skip runtime.goexit
536 if nstk > 0 && gp.goid == 1 {
537 nstk-- // skip runtime.main
539 id := trace.stackTab.put(buf.stk[:nstk])
540 buf.varint(uint64(id))
542 evSize := buf.pos - startPos
543 if evSize > maxSize {
544 throw("invalid length of trace event")
547 // Fill in actual length.
548 *lenp = byte(evSize - 2)
550 traceReleaseBuffer(pid)
553 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
554 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
556 if p := mp.p.ptr(); p != nil {
557 return mp, p.id, &p.tracebuf
560 return mp, traceGlobProc, &trace.buf
563 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
564 func traceReleaseBuffer(pid int32) {
565 if pid == traceGlobProc {
566 unlock(&trace.bufLock)
571 // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
572 func traceFlush(buf traceBufPtr) traceBufPtr {
573 owner := trace.lockOwner
574 dolock := owner == nil || owner != getg().m.curg
581 if trace.empty != 0 {
583 trace.empty = buf.ptr().link
585 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
587 throw("trace: out of memory")
600 // traceAppend appends v to buf in little-endian-base-128 encoding.
601 func traceAppend(buf []byte, v uint64) []byte {
602 for ; v >= 0x80; v >>= 7 {
603 buf = append(buf, 0x80|byte(v))
605 buf = append(buf, byte(v))
609 // varint appends v to buf in little-endian-base-128 encoding.
610 func (buf *traceBuf) varint(v uint64) {
612 for ; v >= 0x80; v >>= 7 {
613 buf.arr[pos] = 0x80 | byte(v)
616 buf.arr[pos] = byte(v)
621 // byte appends v to buf.
622 func (buf *traceBuf) byte(v byte) {
627 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
628 // It is lock-free for reading.
629 type traceStackTable struct {
633 tab [1 << 13]traceStackPtr
636 // traceStack is a single stack in traceStackTable.
637 type traceStack struct {
642 stk [0]uintptr // real type [n]uintptr
645 type traceStackPtr uintptr
647 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
649 // stack returns slice of PCs.
650 func (ts *traceStack) stack() []uintptr {
651 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
654 // put returns a unique id for the stack trace pcs and caches it in the table,
655 // if it sees the trace for the first time.
656 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
660 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
661 // First, search the hashtable w/o the mutex.
662 if id := tab.find(pcs, hash); id != 0 {
665 // Now, double check under the mutex.
667 if id := tab.find(pcs, hash); id != 0 {
671 // Create new record.
673 stk := tab.newStack(len(pcs))
678 for i, pc := range pcs {
681 part := int(hash % uintptr(len(tab.tab)))
682 stk.link = tab.tab[part]
683 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
688 // find checks if the stack trace pcs is already present in the table.
689 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
690 part := int(hash % uintptr(len(tab.tab)))
692 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
693 if stk.hash == hash && stk.n == len(pcs) {
694 for i, stkpc := range stk.stack() {
705 // newStack allocates a new stack of size n.
706 func (tab *traceStackTable) newStack(n int) *traceStack {
707 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*ptrSize))
710 // dump writes all previously cached stacks to trace buffers,
711 // releases all memory and resets state.
712 func (tab *traceStackTable) dump() {
713 var tmp [(2 + traceStackSize) * traceBytesPerNumber]byte
714 buf := traceFlush(0).ptr()
715 for _, stk := range tab.tab {
717 for ; stk != nil; stk = stk.link.ptr() {
718 maxSize := 1 + (3+stk.n)*traceBytesPerNumber
719 if len(buf.arr)-buf.pos < maxSize {
720 buf = traceFlush(traceBufPtrOf(buf)).ptr()
722 // Form the event in the temp buffer, we need to know the actual length.
724 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
725 tmpbuf = traceAppend(tmpbuf, uint64(stk.n))
726 for _, pc := range stk.stack() {
727 tmpbuf = traceAppend(tmpbuf, uint64(pc))
729 // Now copy to the buffer.
730 buf.byte(traceEvStack | 3<<traceArgCountShift)
731 buf.varint(uint64(len(tmpbuf)))
732 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
737 traceFullQueue(traceBufPtrOf(buf))
741 *tab = traceStackTable{}
744 // traceAlloc is a non-thread-safe region allocator.
745 // It holds a linked list of traceAllocBlock.
746 type traceAlloc struct {
747 head *traceAllocBlock
751 // traceAllocBlock is a block in traceAlloc.
752 type traceAllocBlock struct {
753 next *traceAllocBlock
754 data [64<<10 - ptrSize]byte
757 // alloc allocates n-byte block.
758 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
759 n = round(n, ptrSize)
760 if a.head == nil || a.off+n > uintptr(len(a.head.data)) {
761 if n > uintptr(len(a.head.data)) {
762 throw("trace: alloc too large")
764 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
766 throw("trace: out of memory")
772 p := &a.head.data[a.off]
774 return unsafe.Pointer(p)
777 // drop frees all previously allocated memory and resets the allocator.
778 func (a *traceAlloc) drop() {
782 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
786 // The following functions write specific events to trace.
788 func traceGomaxprocs(procs int32) {
789 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
792 func traceProcStart() {
793 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
796 func traceProcStop(pp *p) {
797 // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
798 // to handle this we temporary employ the P.
802 traceEvent(traceEvProcStop, -1)
807 func traceGCStart() {
808 traceEvent(traceEvGCStart, 3)
812 traceEvent(traceEvGCDone, -1)
815 func traceGCScanStart() {
816 traceEvent(traceEvGCScanStart, -1)
819 func traceGCScanDone() {
820 traceEvent(traceEvGCScanDone, -1)
823 func traceGCSweepStart() {
824 traceEvent(traceEvGCSweepStart, 1)
827 func traceGCSweepDone() {
828 traceEvent(traceEvGCSweepDone, -1)
831 func traceGoCreate(newg *g, pc uintptr) {
832 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(pc))
835 func traceGoStart() {
836 traceEvent(traceEvGoStart, -1, uint64(getg().m.curg.goid))
840 traceEvent(traceEvGoEnd, -1)
843 func traceGoSched() {
844 traceEvent(traceEvGoSched, 1)
847 func traceGoPreempt() {
848 traceEvent(traceEvGoPreempt, 1)
851 func traceGoPark(traceEv byte, skip int, gp *g) {
852 if traceEv&traceFutileWakeup != 0 {
853 traceEvent(traceEvFutileWakeup, -1)
855 traceEvent(traceEv & ^traceFutileWakeup, skip)
858 func traceGoUnpark(gp *g, skip int) {
859 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid))
862 func traceGoSysCall() {
863 traceEvent(traceEvGoSysCall, 1)
866 func traceGoSysExit(seq uint64, ts int64) {
867 if int64(seq)-int64(trace.seqStart) < 0 {
868 // The timestamp was obtained during a previous tracing session, ignore.
871 traceEvent(traceEvGoSysExit, -1, uint64(getg().m.curg.goid), seq, uint64(ts)/traceTickDiv)
874 func traceGoSysBlock(pp *p) {
875 // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
876 // to handle this we temporary employ the P.
880 traceEvent(traceEvGoSysBlock, -1)
885 func traceHeapAlloc() {
886 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
890 traceEvent(traceEvNextGC, -1, memstats.next_gc)