1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build !goexperiment.exectracer2
7 // Go execution tracer.
8 // The tracer captures a wide range of execution events like goroutine
9 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
10 // changes of heap size, processor start/stop, etc and writes them to a buffer
11 // in a compact form. A precise nanosecond-precision timestamp and a stack
12 // trace is captured for most events.
13 // See https://golang.org/s/go15trace for more info.
21 "runtime/internal/atomic"
22 "runtime/internal/sys"
26 // Event types in the trace, args are given in square brackets.
28 traceEvNone = 0 // unused
29 traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
30 traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
31 traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
32 traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
33 traceEvProcStart = 5 // start of P [timestamp, thread id]
34 traceEvProcStop = 6 // stop of P [timestamp]
35 traceEvGCStart = 7 // GC start [timestamp, seq, stack id]
36 traceEvGCDone = 8 // GC done [timestamp]
37 traceEvSTWStart = 9 // STW start [timestamp, kind]
38 traceEvSTWDone = 10 // STW done [timestamp]
39 traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
40 traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
41 traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
42 traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
43 traceEvGoEnd = 15 // goroutine ends [timestamp]
44 traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
45 traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
46 traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
47 traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
48 traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
49 traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
50 traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
51 traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
52 traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
53 traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
54 traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
55 traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
56 traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
57 traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
58 traceEvGoSysBlock = 30 // syscall blocks [timestamp]
59 traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
60 traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
61 traceEvHeapAlloc = 33 // gcController.heapLive change [timestamp, heap_alloc]
62 traceEvHeapGoal = 34 // gcController.heapGoal() (formerly next_gc) change [timestamp, heap goal in bytes]
63 traceEvTimerGoroutine = 35 // not currently used; previously denoted timer goroutine [timer goroutine id]
64 traceEvFutileWakeup = 36 // not currently used; denotes that the previous wakeup of this goroutine was futile [timestamp]
65 traceEvString = 37 // string dictionary entry [ID, length, string]
66 traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
67 traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
68 traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
69 traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
70 traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
71 traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
72 traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
73 traceEvUserTaskCreate = 45 // trace.NewTask [timestamp, internal task id, internal parent task id, name string, stack]
74 traceEvUserTaskEnd = 46 // end of a task [timestamp, internal task id, stack]
75 traceEvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), name string, stack]
76 traceEvUserLog = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
77 traceEvCPUSample = 49 // CPU profiling sample [timestamp, real timestamp, real P id (-1 when absent), goroutine id, stack]
79 // Byte is used but only 6 bits are available for event type.
80 // The remaining 2 bits are used to specify the number of arguments.
81 // That means, the max event type value is 63.
84 // traceBlockReason is an enumeration of reasons a goroutine might block.
85 // This is the interface the rest of the runtime uses to tell the
86 // tracer why a goroutine blocked. The tracer then propagates this information
87 // into the trace however it sees fit.
89 // Note that traceBlockReasons should not be compared, since reasons that are
90 // distinct by name may *not* be distinct by value.
91 type traceBlockReason uint8
93 // For maximal efficiency, just map the trace block reason directly to a trace
96 traceBlockGeneric traceBlockReason = traceEvGoBlock
97 traceBlockForever = traceEvGoStop
98 traceBlockNet = traceEvGoBlockNet
99 traceBlockSelect = traceEvGoBlockSelect
100 traceBlockCondWait = traceEvGoBlockCond
101 traceBlockSync = traceEvGoBlockSync
102 traceBlockChanSend = traceEvGoBlockSend
103 traceBlockChanRecv = traceEvGoBlockRecv
104 traceBlockGCMarkAssist = traceEvGoBlockGC
105 traceBlockGCSweep = traceEvGoBlock
106 traceBlockSystemGoroutine = traceEvGoBlock
107 traceBlockPreempted = traceEvGoBlock
108 traceBlockDebugCall = traceEvGoBlock
109 traceBlockUntilGCEnds = traceEvGoBlock
110 traceBlockSleep = traceEvGoSleep
114 // Timestamps in trace are cputicks/traceTickDiv.
115 // This makes absolute values of timestamp diffs smaller,
116 // and so they are encoded in less number of bytes.
117 // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
118 // The suggested increment frequency for PowerPC's time base register is
119 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
121 traceTimeDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
122 // Maximum number of PCs in a single stack trace.
123 // Since events contain only stack id rather than whole stack trace,
124 // we can allow quite large values here.
126 // Identifier of a fake P that is used when we trace without a real P.
128 // Maximum number of bytes to encode uint64 in base-128.
129 traceBytesPerNumber = 10
130 // Shift of the number of arguments in the first event byte.
131 traceArgCountShift = 6
134 // trace is global tracing context.
136 // trace.lock must only be acquired on the system stack where
137 // stack splits cannot happen while it is held.
138 lock mutex // protects the following members
139 enabled bool // when set runtime traces events
140 shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false
141 headerWritten bool // whether ReadTrace has emitted trace header
142 footerWritten bool // whether ReadTrace has emitted trace footer
143 shutdownSema uint32 // used to wait for ReadTrace completion
144 seqStart uint64 // sequence number when tracing was started
145 startTicks int64 // cputicks when tracing was started
146 endTicks int64 // cputicks when tracing was stopped
147 startNanotime int64 // nanotime when tracing was started
148 endNanotime int64 // nanotime when tracing was stopped
149 startTime traceTime // traceClockNow when tracing started
150 endTime traceTime // traceClockNow when tracing stopped
151 seqGC uint64 // GC start/done sequencer
152 reading traceBufPtr // buffer currently handed off to user
153 empty traceBufPtr // stack of empty buffers
154 fullHead traceBufPtr // queue of full buffers
156 stackTab traceStackTable // maps stack traces to unique ids
157 // cpuLogRead accepts CPU profile samples from the signal handler where
158 // they're generated. It uses a two-word header to hold the IDs of the P and
159 // G (respectively) that were active at the time of the sample. Because
160 // profBuf uses a record with all zeros in its header to indicate overflow,
161 // we make sure to make the P field always non-zero: The ID of a real P will
162 // start at bit 1, and bit 0 will be set. Samples that arrive while no P is
163 // running (such as near syscalls) will set the first header field to 0b10.
164 // This careful handling of the first header field allows us to store ID of
165 // the active G directly in the second field, even though that will be 0
168 // cpuLogBuf is a trace buffer to hold events corresponding to CPU profile
169 // samples, which arrive out of band and not directly connected to a
171 cpuLogBuf traceBufPtr
173 reader atomic.Pointer[g] // goroutine that called ReadTrace, or nil
175 signalLock atomic.Uint32 // protects use of the following member, only usable in signal handlers
176 cpuLogWrite *profBuf // copy of cpuLogRead for use in signal handlers, set without signalLock
178 // Dictionary for traceEvString.
180 // TODO: central lock to access the map is not ideal.
181 // option: pre-assign ids to all user annotation region names and tags
182 // option: per-P cache
183 // option: sync.Map like data structure
185 strings map[string]uint64
188 // markWorkerLabels maps gcMarkWorkerMode to string ID.
189 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
191 bufLock mutex // protects buf
192 buf traceBufPtr // global trace buffer, used when running without a p
195 // gTraceState is per-G state for the tracer.
196 type gTraceState struct {
197 sysExitTime traceTime // timestamp when syscall has returned
198 tracedSyscallEnter bool // syscall or cgo was entered while trace was enabled or StartTrace has emitted EvGoInSyscall about this goroutine
199 seq uint64 // trace event sequencer
200 lastP puintptr // last P emitted an event for this goroutine
203 // Unused; for compatibility with the new tracer.
204 func (s *gTraceState) reset() {}
206 // mTraceState is per-M state for the tracer.
207 type mTraceState struct {
208 startingTrace bool // this M is in TraceStart, potentially before traceEnabled is true
209 tracedSTWStart bool // this M traced a STW start, so it should trace an end
212 // pTraceState is per-P state for the tracer.
213 type pTraceState struct {
216 // inSweep indicates the sweep events should be traced.
217 // This is used to defer the sweep start event until a span
218 // has actually been swept.
221 // swept and reclaimed track the number of bytes swept and reclaimed
222 // by sweeping in the current sweep loop (while inSweep was true).
223 swept, reclaimed uintptr
226 // traceLockInit initializes global trace locks.
227 func traceLockInit() {
228 lockInit(&trace.bufLock, lockRankTraceBuf)
229 lockInit(&trace.stringsLock, lockRankTraceStrings)
230 lockInit(&trace.lock, lockRankTrace)
231 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
234 // traceBufHeader is per-P tracing buffer.
235 type traceBufHeader struct {
236 link traceBufPtr // in trace.empty/full
237 lastTime traceTime // when we wrote the last event
238 pos int // next write offset in arr
239 stk [traceStackSize]uintptr // scratch buffer for traceback
242 // traceBuf is per-P tracing buffer.
243 type traceBuf struct {
246 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
249 // traceBufPtr is a *traceBuf that is not traced by the garbage
250 // collector and doesn't have write barriers. traceBufs are not
251 // allocated from the GC'd heap, so this is safe, and are often
252 // manipulated in contexts where write barriers are not allowed, so
253 // this is necessary.
255 // TODO: Since traceBuf is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
256 type traceBufPtr uintptr
258 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
259 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
260 func traceBufPtrOf(b *traceBuf) traceBufPtr {
261 return traceBufPtr(unsafe.Pointer(b))
264 // traceEnabled returns true if the trace is currently enabled.
266 // nosplit because it's called on the syscall path when stack movement is forbidden.
269 func traceEnabled() bool {
273 // traceShuttingDown returns true if the trace is currently shutting down.
276 func traceShuttingDown() bool {
277 return trace.shutdown
280 // traceLocker represents an M writing trace events. While a traceLocker value
281 // is valid, the tracer observes all operations on the G/M/P or trace events being
282 // written as happening atomically.
284 // This doesn't do much for the current tracer, because the current tracer doesn't
285 // need atomicity around non-trace runtime operations. All the state it needs it
286 // collects carefully during a STW.
287 type traceLocker struct {
291 // traceAcquire prepares this M for writing one or more trace events.
293 // This exists for compatibility with the upcoming new tracer; it doesn't do much
294 // in the current tracer.
296 // nosplit because it's called on the syscall path when stack movement is forbidden.
299 func traceAcquire() traceLocker {
301 return traceLocker{false}
303 return traceLocker{true}
306 // ok returns true if the traceLocker is valid (i.e. tracing is enabled).
308 // nosplit because it's called on the syscall path when stack movement is forbidden.
311 func (tl traceLocker) ok() bool {
315 // traceRelease indicates that this M is done writing trace events.
317 // This exists for compatibility with the upcoming new tracer; it doesn't do anything
318 // in the current tracer.
320 // nosplit because it's called on the syscall path when stack movement is forbidden.
323 func traceRelease(tl traceLocker) {
326 // StartTrace enables tracing for the current process.
327 // While tracing, the data will be buffered and available via ReadTrace.
328 // StartTrace returns an error if tracing is already enabled.
329 // Most clients should use the runtime/trace package or the testing package's
330 // -test.trace flag instead of calling StartTrace directly.
331 func StartTrace() error {
332 // Stop the world so that we can take a consistent snapshot
333 // of all goroutines at the beginning of the trace.
334 // Do not stop the world during GC so we ensure we always see
335 // a consistent view of GC-related events (e.g. a start is always
336 // paired with an end).
337 stopTheWorldGC(stwStartTrace)
339 // Prevent sysmon from running any code that could generate events.
340 lock(&sched.sysmonlock)
342 // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
343 // Exitsyscall could check trace.enabled long before and then suddenly wake up
344 // and decide to write to trace at a random point in time.
345 // However, such syscall will use the global trace.buf buffer, because we've
346 // acquired all p's by doing stop-the-world. So this protects us from such races.
349 if trace.enabled || trace.shutdown {
350 unlock(&trace.bufLock)
351 unlock(&sched.sysmonlock)
353 return errorString("tracing is already enabled")
356 // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
357 // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
358 // That would lead to an inconsistent trace:
359 // - either GoSysExit appears before EvGoInSyscall,
360 // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
361 // To instruct traceEvent that it must not ignore events below, we set trace.startingTrace.
362 // trace.enabled is set afterwards once we have emitted all preliminary events.
364 mp.trace.startingTrace = true
366 // Obtain current stack ID to use in all traceEvGoCreate events below.
367 stkBuf := make([]uintptr, traceStackSize)
368 stackID := traceStackID(mp, stkBuf, 2)
370 profBuf := newProfBuf(2, profBufWordCount, profBufTagCount) // after the timestamp, header is [pp.id, gp.goid]
371 trace.cpuLogRead = profBuf
373 // We must not acquire trace.signalLock outside of a signal handler: a
374 // profiling signal may arrive at any time and try to acquire it, leading to
375 // deadlock. Because we can't use that lock to protect updates to
376 // trace.cpuLogWrite (only use of the structure it references), reads and
377 // writes of the pointer must be atomic. (And although this field is never
378 // the sole pointer to the profBuf value, it's best to allow a write barrier
380 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), unsafe.Pointer(profBuf))
382 // World is stopped, no need to lock.
383 forEachGRace(func(gp *g) {
384 status := readgstatus(gp)
385 if status != _Gdead {
387 gp.trace.lastP = getg().m.p
388 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
389 id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(gp.startpc) + sys.PCQuantum})
390 traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
392 if status == _Gwaiting {
393 // traceEvGoWaiting is implied to have seq=1.
395 traceEvent(traceEvGoWaiting, -1, gp.goid)
397 if status == _Gsyscall {
399 gp.trace.tracedSyscallEnter = true
400 traceEvent(traceEvGoInSyscall, -1, gp.goid)
401 } else if status == _Gdead && gp.m != nil && gp.m.isextra {
402 // Trigger two trace events for the dead g in the extra m,
403 // since the next event of the g will be traceEvGoSysExit in exitsyscall,
404 // while calling from C thread to Go.
406 gp.trace.lastP = getg().m.p
407 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
408 id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(0) + sys.PCQuantum}) // no start pc
409 traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
411 gp.trace.tracedSyscallEnter = true
412 traceEvent(traceEvGoInSyscall, -1, gp.goid)
414 // We need to explicitly clear the flag. A previous trace might have ended with a goroutine
415 // not emitting a GoSysExit and clearing the flag, leaving it in a stale state. Clearing
416 // it here makes it unambiguous to any goroutine exiting a syscall racing with us that
417 // no EvGoInSyscall event was emitted for it. (It's not racy to set this flag here, because
418 // it'll only get checked when the goroutine runs again, which will be after the world starts
420 gp.trace.tracedSyscallEnter = false
423 // Use a dummy traceLocker. The trace isn't enabled yet, but we can still write events.
427 // Note: startTicks needs to be set after we emit traceEvGoInSyscall events.
428 // If we do it the other way around, it is possible that exitsyscall will
429 // query sysExitTime after startTicks but before traceEvGoInSyscall timestamp.
430 // It will lead to a false conclusion that cputicks is broken.
431 trace.startTime = traceClockNow()
432 trace.startTicks = cputicks()
433 trace.startNanotime = nanotime()
434 trace.headerWritten = false
435 trace.footerWritten = false
437 // string to id mapping
438 // 0 : reserved for an empty string
439 // remaining: other strings registered by traceString
441 trace.strings = make(map[string]uint64)
444 mp.trace.startingTrace = false
447 // Register runtime goroutine labels.
448 _, pid, bufp := traceAcquireBuffer()
449 for i, label := range gcMarkWorkerModeStrings[:] {
450 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
452 traceReleaseBuffer(mp, pid)
454 unlock(&trace.bufLock)
456 unlock(&sched.sysmonlock)
458 // Record the current state of HeapGoal to avoid information loss in trace.
460 // Use the same dummy trace locker. The trace can't end until after we start
461 // the world, and we can safely trace from here.
468 // StopTrace stops tracing, if it was previously enabled.
469 // StopTrace only returns after all the reads for the trace have completed.
471 // Stop the world so that we can collect the trace buffers from all p's below,
472 // and also to avoid races with traceEvent.
473 stopTheWorldGC(stwStopTrace)
475 // See the comment in StartTrace.
476 lock(&sched.sysmonlock)
478 // See the comment in StartTrace.
482 unlock(&trace.bufLock)
483 unlock(&sched.sysmonlock)
488 // Trace GoSched for us, and use a dummy locker. The world is stopped
489 // and we control whether the trace is enabled, so this is safe.
493 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
494 trace.cpuLogRead.close()
497 // Loop over all allocated Ps because dead Ps may still have
499 for _, p := range allp[:cap(allp)] {
509 if buf.ptr().pos != 0 {
513 if trace.cpuLogBuf != 0 {
514 buf := trace.cpuLogBuf
516 if buf.ptr().pos != 0 {
521 // Wait for startNanotime != endNanotime. On Windows the default interval between
522 // system clock ticks is typically between 1 and 15 milliseconds, which may not
523 // have passed since the trace started. Without nanotime moving forward, trace
524 // tooling has no way of identifying how much real time each cputicks time deltas
527 trace.endTime = traceClockNow()
528 trace.endTicks = cputicks()
529 trace.endNanotime = nanotime()
531 if trace.endNanotime != trace.startNanotime || faketime != 0 {
537 trace.enabled = false
538 trace.shutdown = true
539 unlock(&trace.bufLock)
541 unlock(&sched.sysmonlock)
545 // The world is started but we've set trace.shutdown, so new tracing can't start.
546 // Wait for the trace reader to flush pending buffers and stop.
547 semacquire(&trace.shutdownSema)
549 raceacquire(unsafe.Pointer(&trace.shutdownSema))
553 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
555 for _, p := range allp[:cap(allp)] {
556 if p.trace.buf != 0 {
557 throw("trace: non-empty trace buffer in proc")
561 throw("trace: non-empty global trace buffer")
563 if trace.fullHead != 0 || trace.fullTail != 0 {
564 throw("trace: non-empty full trace buffer")
566 if trace.reading != 0 || trace.reader.Load() != nil {
567 throw("trace: reading after shutdown")
569 for trace.empty != 0 {
571 trace.empty = buf.ptr().link
572 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
575 trace.shutdown = false
576 trace.cpuLogRead = nil
581 // ReadTrace returns the next chunk of binary tracing data, blocking until data
582 // is available. If tracing is turned off and all the data accumulated while it
583 // was on has been returned, ReadTrace returns nil. The caller must copy the
584 // returned data before calling ReadTrace again.
585 // ReadTrace must be called from one goroutine at a time.
586 func ReadTrace() []byte {
591 buf, park = readTrace0()
594 gopark(func(gp *g, _ unsafe.Pointer) bool {
595 if !trace.reader.CompareAndSwapNoWB(nil, gp) {
596 // We're racing with another reader.
597 // Wake up and handle this case.
601 if g2 := traceReader(); gp == g2 {
602 // New data arrived between unlocking
603 // and the CAS and we won the wake-up
604 // race, so wake up directly.
606 } else if g2 != nil {
608 println("runtime: got trace reader", g2, g2.goid)
609 throw("unexpected trace reader")
613 }, nil, waitReasonTraceReaderBlocked, traceBlockSystemGoroutine, 2)
620 // readTrace0 is ReadTrace's continuation on g0. This must run on the
621 // system stack because it acquires trace.lock.
624 func readTrace0() (buf []byte, park bool) {
626 // g0 doesn't have a race context. Borrow the user G's.
627 if getg().racectx != 0 {
628 throw("expected racectx == 0")
630 getg().racectx = getg().m.curg.racectx
631 // (This defer should get open-coded, which is safe on
632 // the system stack.)
633 defer func() { getg().racectx = 0 }()
636 // Optimistically look for CPU profile samples. This may write new stack
637 // records, and may write new tracing buffers. This must be done with the
638 // trace lock not held. footerWritten and shutdown are safe to access
639 // here. They are only mutated by this goroutine or during a STW.
640 if !trace.footerWritten && !trace.shutdown {
644 // This function must not allocate while holding trace.lock:
645 // allocation can call heap allocate, which will try to emit a trace
646 // event while holding heap lock.
649 if trace.reader.Load() != nil {
650 // More than one goroutine reads trace. This is bad.
651 // But we rather do not crash the program because of tracing,
652 // because tracing can be enabled at runtime on prod servers.
654 println("runtime: ReadTrace called from multiple goroutines simultaneously")
657 // Recycle the old buffer.
658 if buf := trace.reading; buf != 0 {
659 buf.ptr().link = trace.empty
663 // Write trace header.
664 if !trace.headerWritten {
665 trace.headerWritten = true
667 return []byte("go 1.21 trace\x00\x00\x00"), false
669 // Wait for new data.
670 if trace.fullHead == 0 && !trace.shutdown {
671 // We don't simply use a note because the scheduler
672 // executes this goroutine directly when it wakes up
673 // (also a note would consume an M).
678 assertLockHeld(&trace.lock)
680 if trace.fullHead != 0 {
681 buf := traceFullDequeue()
684 return buf.ptr().arr[:buf.ptr().pos], false
687 // Write footer with timer frequency.
688 if !trace.footerWritten {
689 trace.footerWritten = true
690 freq := (float64(trace.endTicks-trace.startTicks) / traceTimeDiv) / (float64(trace.endNanotime-trace.startNanotime) / 1e9)
692 throw("trace: ReadTrace got invalid frequency")
696 // Write frequency event.
697 bufp := traceFlush(0, 0)
699 buf.byte(traceEvFrequency | 0<<traceArgCountShift)
700 buf.varint(uint64(freq))
703 // This will emit a bunch of full buffers, we will pick them up
704 // on the next iteration.
705 bufp = trace.stackTab.dump(bufp)
707 // Flush final buffer.
710 goto newFull // trace.lock should be held at newFull
716 // Model synchronization on trace.shutdownSema, which race
717 // detector does not see. This is required to avoid false
718 // race reports on writer passed to trace.Start.
719 racerelease(unsafe.Pointer(&trace.shutdownSema))
721 // trace.enabled is already reset, so can call traceable functions.
722 semrelease(&trace.shutdownSema)
725 // Also bad, but see the comment above.
727 println("runtime: spurious wakeup of trace reader")
731 // traceReader returns the trace reader that should be woken up, if any.
732 // Callers should first check that trace.enabled or trace.shutdown is set.
734 // This must run on the system stack because it acquires trace.lock.
737 func traceReader() *g {
738 // Optimistic check first
739 if traceReaderAvailable() == nil {
743 gp := traceReaderAvailable()
744 if gp == nil || !trace.reader.CompareAndSwapNoWB(gp, nil) {
752 // traceReaderAvailable returns the trace reader if it is not currently
753 // scheduled and should be. Callers should first check that trace.enabled
754 // or trace.shutdown is set.
755 func traceReaderAvailable() *g {
756 if trace.fullHead != 0 || trace.shutdown {
757 return trace.reader.Load()
762 // traceProcFree frees trace buffer associated with pp.
764 // This must run on the system stack because it acquires trace.lock.
767 func traceProcFree(pp *p) {
778 // ThreadDestroy is a no-op. It exists as a stub to support the new tracer.
780 // This must run on the system stack, just to match the new tracer.
781 func traceThreadDestroy(_ *m) {
782 // No-op in old tracer.
785 // traceFullQueue queues buf into queue of full buffers.
786 func traceFullQueue(buf traceBufPtr) {
788 if trace.fullHead == 0 {
791 trace.fullTail.ptr().link = buf
796 // traceFullDequeue dequeues from queue of full buffers.
797 func traceFullDequeue() traceBufPtr {
798 buf := trace.fullHead
802 trace.fullHead = buf.ptr().link
803 if trace.fullHead == 0 {
810 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
812 // If skip > 0, write current stack id as the last argument (skipping skip top frames).
813 // If skip = 0, this event type should contain a stack, but we don't want
814 // to collect and remember it for this particular call.
815 func traceEvent(ev byte, skip int, args ...uint64) {
816 mp, pid, bufp := traceAcquireBuffer()
817 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
818 // This protects from races between traceEvent and StartTrace/StopTrace.
820 // The caller checked that trace.enabled == true, but trace.enabled might have been
821 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
822 // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
823 // so if we see trace.enabled == true now, we know it's true for the rest of the function.
824 // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
825 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
827 // Note trace_userTaskCreate runs the same check.
828 if !trace.enabled && !mp.trace.startingTrace {
829 traceReleaseBuffer(mp, pid)
834 if getg() == mp.curg {
835 skip++ // +1 because stack is captured in traceEventLocked.
838 traceEventLocked(0, mp, pid, bufp, ev, 0, skip, args...)
839 traceReleaseBuffer(mp, pid)
842 // traceEventLocked writes a single event of type ev to the trace buffer bufp,
843 // flushing the buffer if necessary. pid is the id of the current P, or
844 // traceGlobProc if we're tracing without a real P.
846 // Preemption is disabled, and if running without a real P the global tracing
849 // Events types that do not include a stack set skip to -1. Event types that
850 // include a stack may explicitly reference a stackID from the trace.stackTab
851 // (obtained by an earlier call to traceStackID). Without an explicit stackID,
852 // this function will automatically capture the stack of the goroutine currently
853 // running on mp, skipping skip top frames or, if skip is 0, writing out an
854 // empty stack record.
856 // It records the event's args to the traceBuf, and also makes an effort to
857 // reserve extraBytes bytes of additional space immediately following the event,
858 // in the same traceBuf.
859 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, stackID uint32, skip int, args ...uint64) {
861 // TODO: test on non-zero extraBytes param.
862 maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
863 if buf == nil || len(buf.arr)-buf.pos < maxSize {
865 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
870 ts := traceClockNow()
871 if ts <= buf.lastTime {
872 ts = buf.lastTime + 1
874 tsDiff := uint64(ts - buf.lastTime)
876 narg := byte(len(args))
877 if stackID != 0 || skip >= 0 {
880 // We have only 2 bits for number of arguments.
881 // If number is >= 3, then the event type is followed by event length in bytes.
886 buf.byte(ev | narg<<traceArgCountShift)
889 // Reserve the byte for length assuming that length < 128.
891 lenp = &buf.arr[buf.pos-1]
894 for _, a := range args {
898 buf.varint(uint64(stackID))
899 } else if skip == 0 {
902 buf.varint(traceStackID(mp, buf.stk[:], skip))
904 evSize := buf.pos - startPos
905 if evSize > maxSize {
906 throw("invalid length of trace event")
909 // Fill in actual length.
910 *lenp = byte(evSize - 2)
914 // traceCPUSample writes a CPU profile sample stack to the execution tracer's
915 // profiling buffer. It is called from a signal handler, so is limited in what
917 func traceCPUSample(gp *g, _ *m, pp *p, stk []uintptr) {
919 // Tracing is usually turned off; don't spend time acquiring the signal
920 // lock unless it's active.
924 // Match the clock used in traceEventLocked
925 now := traceClockNow()
926 // The "header" here is the ID of the P that was running the profiled code,
927 // followed by the ID of the goroutine. (For normal CPU profiling, it's
928 // usually the number of samples with the given stack.) Near syscalls, pp
929 // may be nil. Reporting goid of 0 is fine for either g0 or a nil gp.
932 // Overflow records in profBuf have all header values set to zero. Make
933 // sure that real headers have at least one bit set.
934 hdr[0] = uint64(pp.id)<<1 | 0b1
942 // Allow only one writer at a time
943 for !trace.signalLock.CompareAndSwap(0, 1) {
944 // TODO: Is it safe to osyield here? https://go.dev/issue/52672
948 if log := (*profBuf)(atomic.Loadp(unsafe.Pointer(&trace.cpuLogWrite))); log != nil {
949 // Note: we don't pass a tag pointer here (how should profiling tags
950 // interact with the execution tracer?), but if we did we'd need to be
951 // careful about write barriers. See the long comment in profBuf.write.
952 log.write(nil, int64(now), hdr[:], stk)
955 trace.signalLock.Store(0)
958 func traceReadCPU() {
959 bufp := &trace.cpuLogBuf
962 data, tags, _ := trace.cpuLogRead.read(profBufNonBlocking)
967 if len(data) < 4 || data[0] > uint64(len(data)) {
968 break // truncated profile
970 if data[0] < 4 || tags != nil && len(tags) < 1 {
971 break // malformed profile
974 break // mismatched profile records and tags
978 if hasP := (data[2] & 0b1) != 0; !hasP {
982 stk := data[4:data[0]]
983 empty := len(stk) == 1 && data[2] == 0 && data[3] == 0
984 data = data[data[0]:]
985 // No support here for reporting goroutine tags at the moment; if
986 // that information is to be part of the execution trace, we'd
987 // probably want to see when the tags are applied and when they
988 // change, instead of only seeing them when we get a CPU sample.
992 // Looks like an overflow record from the profBuf. Not much to
993 // do here, we only want to report full records.
995 // TODO: should we start a goroutine to drain the profBuf,
996 // rather than relying on a high-enough volume of tracing events
997 // to keep ReadTrace busy? https://go.dev/issue/52674
1003 systemstack(func() {
1004 *bufp = traceFlush(*bufp, 0)
1009 buf.stk[0] = logicalStackSentinel
1010 for ; nstk < len(buf.stk) && nstk-1 < len(stk); nstk++ {
1011 buf.stk[nstk] = uintptr(stk[nstk-1])
1013 stackID := trace.stackTab.put(buf.stk[:nstk])
1015 traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp, ppid, goid)
1020 // logicalStackSentinel is a sentinel value at pcBuf[0] signifying that
1021 // pcBuf[1:] holds a logical stack requiring no further processing. Any other
1022 // value at pcBuf[0] represents a skip value to apply to the physical stack in
1023 // pcBuf[1:] after inline expansion.
1024 const logicalStackSentinel = ^uintptr(0)
1026 // traceStackID captures a stack trace into pcBuf, registers it in the trace
1027 // stack table, and returns its unique ID. pcBuf should have a length equal to
1028 // traceStackSize. skip controls the number of leaf frames to omit in order to
1029 // hide tracer internals from stack traces, see CL 5523.
1030 func traceStackID(mp *m, pcBuf []uintptr, skip int) uint64 {
1034 if tracefpunwindoff() || mp.hasCgoOnStack() {
1035 // Slow path: Unwind using default unwinder. Used when frame pointer
1036 // unwinding is unavailable or disabled (tracefpunwindoff), or might
1037 // produce incomplete results or crashes (hasCgoOnStack). Note that no
1038 // cgo callback related crashes have been observed yet. The main
1039 // motivation is to take advantage of a potentially registered cgo
1041 pcBuf[0] = logicalStackSentinel
1043 nstk += callers(skip+1, pcBuf[1:])
1044 } else if curgp != nil {
1045 nstk += gcallers(curgp, skip, pcBuf[1:])
1048 // Fast path: Unwind using frame pointers.
1049 pcBuf[0] = uintptr(skip)
1051 nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
1052 } else if curgp != nil {
1053 // We're called on the g0 stack through mcall(fn) or systemstack(fn). To
1054 // behave like gcallers above, we start unwinding from sched.bp, which
1055 // points to the caller frame of the leaf frame on g's stack. The return
1056 // address of the leaf frame is stored in sched.pc, which we manually
1058 pcBuf[1] = curgp.sched.pc
1059 nstk += 1 + fpTracebackPCs(unsafe.Pointer(curgp.sched.bp), pcBuf[2:])
1063 nstk-- // skip runtime.goexit
1065 if nstk > 0 && curgp.goid == 1 {
1066 nstk-- // skip runtime.main
1068 id := trace.stackTab.put(pcBuf[:nstk])
1072 // tracefpunwindoff returns true if frame pointer unwinding for the tracer is
1073 // disabled via GODEBUG or not supported by the architecture.
1074 // TODO(#60254): support frame pointer unwinding on plan9/amd64.
1075 func tracefpunwindoff() bool {
1076 return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64) || goos.IsPlan9 == 1
1079 // fpTracebackPCs populates pcBuf with the return addresses for each frame and
1080 // returns the number of PCs written to pcBuf. The returned PCs correspond to
1081 // "physical frames" rather than "logical frames"; that is if A is inlined into
1082 // B, this will return a PC for only B.
1083 func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int) {
1084 for i = 0; i < len(pcBuf) && fp != nil; i++ {
1085 // return addr sits one word above the frame pointer
1086 pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
1087 // follow the frame pointer to the next one
1088 fp = unsafe.Pointer(*(*uintptr)(fp))
1093 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
1094 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
1095 // Any time we acquire a buffer, we may end up flushing it,
1096 // but flushes are rare. Record the lock edge even if it
1097 // doesn't happen this time.
1098 lockRankMayTraceFlush()
1101 if p := mp.p.ptr(); p != nil {
1102 return mp, p.id, &p.trace.buf
1104 lock(&trace.bufLock)
1105 return mp, traceGlobProc, &trace.buf
1108 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
1109 func traceReleaseBuffer(mp *m, pid int32) {
1110 if pid == traceGlobProc {
1111 unlock(&trace.bufLock)
1116 // lockRankMayTraceFlush records the lock ranking effects of a
1117 // potential call to traceFlush.
1118 func lockRankMayTraceFlush() {
1119 lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
1122 // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
1124 // This must run on the system stack because it acquires trace.lock.
1127 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
1132 if trace.empty != 0 {
1134 trace.empty = buf.ptr().link
1136 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
1138 throw("trace: out of memory")
1145 // initialize the buffer for a new batch
1146 ts := traceClockNow()
1147 if ts <= bufp.lastTime {
1148 ts = bufp.lastTime + 1
1151 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
1152 bufp.varint(uint64(pid))
1153 bufp.varint(uint64(ts))
1159 // traceString adds a string to the trace.strings and returns the id.
1160 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
1165 lock(&trace.stringsLock)
1167 // raceacquire is necessary because the map access
1168 // below is race annotated.
1169 raceacquire(unsafe.Pointer(&trace.stringsLock))
1172 if id, ok := trace.strings[s]; ok {
1174 racerelease(unsafe.Pointer(&trace.stringsLock))
1176 unlock(&trace.stringsLock)
1182 id := trace.stringSeq
1183 trace.strings[s] = id
1186 racerelease(unsafe.Pointer(&trace.stringsLock))
1188 unlock(&trace.stringsLock)
1190 // memory allocation in above may trigger tracing and
1191 // cause *bufp changes. Following code now works with *bufp,
1192 // so there must be no memory allocation or any activities
1193 // that causes tracing after this point.
1196 size := 1 + 2*traceBytesPerNumber + len(s)
1197 if buf == nil || len(buf.arr)-buf.pos < size {
1198 systemstack(func() {
1199 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
1203 buf.byte(traceEvString)
1206 // double-check the string and the length can fit.
1207 // Otherwise, truncate the string.
1209 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1213 buf.varint(uint64(slen))
1214 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
1220 // varint appends v to buf in little-endian-base-128 encoding.
1221 func (buf *traceBuf) varint(v uint64) {
1223 for ; v >= 0x80; v >>= 7 {
1224 buf.arr[pos] = 0x80 | byte(v)
1227 buf.arr[pos] = byte(v)
1232 // varintAt writes varint v at byte position pos in buf. This always
1233 // consumes traceBytesPerNumber bytes. This is intended for when the
1234 // caller needs to reserve space for a varint but can't populate it
1236 func (buf *traceBuf) varintAt(pos int, v uint64) {
1237 for i := 0; i < traceBytesPerNumber; i++ {
1238 if i < traceBytesPerNumber-1 {
1239 buf.arr[pos] = 0x80 | byte(v)
1241 buf.arr[pos] = byte(v)
1248 // byte appends v to buf.
1249 func (buf *traceBuf) byte(v byte) {
1250 buf.arr[buf.pos] = v
1254 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
1255 // It is lock-free for reading.
1256 type traceStackTable struct {
1257 lock mutex // Must be acquired on the system stack
1260 tab [1 << 13]traceStackPtr
1263 // traceStack is a single stack in traceStackTable.
1264 type traceStack struct {
1269 stk [0]uintptr // real type [n]uintptr
1272 type traceStackPtr uintptr
1274 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
1276 // stack returns slice of PCs.
1277 func (ts *traceStack) stack() []uintptr {
1278 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
1281 // put returns a unique id for the stack trace pcs and caches it in the table,
1282 // if it sees the trace for the first time.
1283 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
1287 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
1288 // First, search the hashtable w/o the mutex.
1289 if id := tab.find(pcs, hash); id != 0 {
1292 // Now, double check under the mutex.
1293 // Switch to the system stack so we can acquire tab.lock
1295 systemstack(func() {
1297 if id = tab.find(pcs, hash); id != 0 {
1301 // Create new record.
1303 stk := tab.newStack(len(pcs))
1308 stkpc := stk.stack()
1310 part := int(hash % uintptr(len(tab.tab)))
1311 stk.link = tab.tab[part]
1312 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
1318 // find checks if the stack trace pcs is already present in the table.
1319 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
1320 part := int(hash % uintptr(len(tab.tab)))
1322 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
1323 if stk.hash == hash && stk.n == len(pcs) {
1324 for i, stkpc := range stk.stack() {
1325 if stkpc != pcs[i] {
1335 // newStack allocates a new stack of size n.
1336 func (tab *traceStackTable) newStack(n int) *traceStack {
1337 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
1340 // traceFrames returns the frames corresponding to pcs. It may
1341 // allocate and may emit trace events.
1342 func traceFrames(bufp traceBufPtr, pcs []uintptr) ([]traceFrame, traceBufPtr) {
1343 frames := make([]traceFrame, 0, len(pcs))
1344 ci := CallersFrames(pcs)
1346 var frame traceFrame
1347 f, more := ci.Next()
1348 frame, bufp = traceFrameForPC(bufp, 0, f)
1349 frames = append(frames, frame)
1356 // dump writes all previously cached stacks to trace buffers,
1357 // releases all memory and resets state.
1359 // This must run on the system stack because it calls traceFlush.
1362 func (tab *traceStackTable) dump(bufp traceBufPtr) traceBufPtr {
1363 for i := range tab.tab {
1364 stk := tab.tab[i].ptr()
1365 for ; stk != nil; stk = stk.link.ptr() {
1366 var frames []traceFrame
1367 frames, bufp = traceFrames(bufp, fpunwindExpand(stk.stack()))
1369 // Estimate the size of this record. This
1370 // bound is pretty loose, but avoids counting
1371 // lots of varint sizes.
1372 maxSize := 1 + traceBytesPerNumber + (2+4*len(frames))*traceBytesPerNumber
1373 // Make sure we have enough buffer space.
1374 if buf := bufp.ptr(); len(buf.arr)-buf.pos < maxSize {
1375 bufp = traceFlush(bufp, 0)
1378 // Emit header, with space reserved for length.
1380 buf.byte(traceEvStack | 3<<traceArgCountShift)
1382 buf.pos += traceBytesPerNumber
1386 buf.varint(uint64(stk.id))
1387 buf.varint(uint64(len(frames)))
1388 for _, frame := range frames {
1389 buf.varint(uint64(frame.PC))
1390 buf.varint(frame.funcID)
1391 buf.varint(frame.fileID)
1392 buf.varint(frame.line)
1395 // Fill in size header.
1396 buf.varintAt(lenPos, uint64(buf.pos-recPos))
1401 *tab = traceStackTable{}
1402 lockInit(&((*tab).lock), lockRankTraceStackTab)
1407 // fpunwindExpand checks if pcBuf contains logical frames (which include inlined
1408 // frames) or physical frames (produced by frame pointer unwinding) using a
1409 // sentinel value in pcBuf[0]. Logical frames are simply returned without the
1410 // sentinel. Physical frames are turned into logical frames via inline unwinding
1411 // and by applying the skip value that's stored in pcBuf[0].
1412 func fpunwindExpand(pcBuf []uintptr) []uintptr {
1413 if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
1414 // pcBuf contains logical rather than inlined frames, skip has already been
1415 // applied, just return it without the sentinel value in pcBuf[0].
1420 lastFuncID = abi.FuncIDNormal
1421 newPCBuf = make([]uintptr, 0, traceStackSize)
1423 // skipOrAdd skips or appends retPC to newPCBuf and returns true if more
1424 // pcs can be added.
1425 skipOrAdd = func(retPC uintptr) bool {
1429 newPCBuf = append(newPCBuf, retPC)
1431 return len(newPCBuf) < cap(newPCBuf)
1436 for _, retPC := range pcBuf[1:] {
1438 fi := findfunc(callPC)
1440 // There is no funcInfo if callPC belongs to a C function. In this case
1441 // we still keep the pc, but don't attempt to expand inlined frames.
1442 if more := skipOrAdd(retPC); !more {
1448 u, uf := newInlineUnwinder(fi, callPC)
1449 for ; uf.valid(); uf = u.next(uf) {
1451 if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
1453 } else if more := skipOrAdd(uf.pc + 1); !more {
1456 lastFuncID = sf.funcID
1462 type traceFrame struct {
1469 // traceFrameForPC records the frame information.
1470 // It may allocate memory.
1471 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
1473 var frame traceFrame
1477 const maxLen = 1 << 10
1478 if len(fn) > maxLen {
1479 fn = fn[len(fn)-maxLen:]
1481 frame.funcID, bufp = traceString(bufp, pid, fn)
1482 frame.line = uint64(f.Line)
1484 if len(file) > maxLen {
1485 file = file[len(file)-maxLen:]
1487 frame.fileID, bufp = traceString(bufp, pid, file)
1488 return frame, (*bufp)
1491 // traceAlloc is a non-thread-safe region allocator.
1492 // It holds a linked list of traceAllocBlock.
1493 type traceAlloc struct {
1494 head traceAllocBlockPtr
1498 // traceAllocBlock is a block in traceAlloc.
1500 // traceAllocBlock is allocated from non-GC'd memory, so it must not
1501 // contain heap pointers. Writes to pointers to traceAllocBlocks do
1502 // not need write barriers.
1503 type traceAllocBlock struct {
1505 next traceAllocBlockPtr
1506 data [64<<10 - goarch.PtrSize]byte
1509 // TODO: Since traceAllocBlock is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
1510 type traceAllocBlockPtr uintptr
1512 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
1513 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
1515 // alloc allocates n-byte block.
1516 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
1517 n = alignUp(n, goarch.PtrSize)
1518 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
1519 if n > uintptr(len(a.head.ptr().data)) {
1520 throw("trace: alloc too large")
1522 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
1524 throw("trace: out of memory")
1526 block.next.set(a.head.ptr())
1530 p := &a.head.ptr().data[a.off]
1532 return unsafe.Pointer(p)
1535 // drop frees all previously allocated memory and resets the allocator.
1536 func (a *traceAlloc) drop() {
1538 block := a.head.ptr()
1539 a.head.set(block.next.ptr())
1540 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
1544 // The following functions write specific events to trace.
1546 func (_ traceLocker) Gomaxprocs(procs int32) {
1547 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
1550 func (_ traceLocker) ProcStart() {
1551 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
1554 func (_ traceLocker) ProcStop(pp *p) {
1555 // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
1556 // to handle this we temporary employ the P.
1560 traceEvent(traceEvProcStop, -1)
1565 func (_ traceLocker) GCStart() {
1566 traceEvent(traceEvGCStart, 3, trace.seqGC)
1570 func (_ traceLocker) GCDone() {
1571 traceEvent(traceEvGCDone, -1)
1574 func (_ traceLocker) STWStart(reason stwReason) {
1575 // Don't trace if this STW is for trace start/stop, since traceEnabled
1576 // switches during a STW.
1577 if reason == stwStartTrace || reason == stwStopTrace {
1580 getg().m.trace.tracedSTWStart = true
1581 traceEvent(traceEvSTWStart, -1, uint64(reason))
1584 func (_ traceLocker) STWDone() {
1586 if !mp.trace.tracedSTWStart {
1589 mp.trace.tracedSTWStart = false
1590 traceEvent(traceEvSTWDone, -1)
1593 // traceGCSweepStart prepares to trace a sweep loop. This does not
1594 // emit any events until traceGCSweepSpan is called.
1596 // traceGCSweepStart must be paired with traceGCSweepDone and there
1597 // must be no preemption points between these two calls.
1598 func (_ traceLocker) GCSweepStart() {
1599 // Delay the actual GCSweepStart event until the first span
1600 // sweep. If we don't sweep anything, don't emit any events.
1601 pp := getg().m.p.ptr()
1602 if pp.trace.inSweep {
1603 throw("double traceGCSweepStart")
1605 pp.trace.inSweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
1608 // traceGCSweepSpan traces the sweep of a single page.
1610 // This may be called outside a traceGCSweepStart/traceGCSweepDone
1611 // pair; however, it will not emit any trace events in this case.
1612 func (_ traceLocker) GCSweepSpan(bytesSwept uintptr) {
1613 pp := getg().m.p.ptr()
1614 if pp.trace.inSweep {
1615 if pp.trace.swept == 0 {
1616 traceEvent(traceEvGCSweepStart, 1)
1618 pp.trace.swept += bytesSwept
1622 func (_ traceLocker) GCSweepDone() {
1623 pp := getg().m.p.ptr()
1624 if !pp.trace.inSweep {
1625 throw("missing traceGCSweepStart")
1627 if pp.trace.swept != 0 {
1628 traceEvent(traceEvGCSweepDone, -1, uint64(pp.trace.swept), uint64(pp.trace.reclaimed))
1630 pp.trace.inSweep = false
1633 func (_ traceLocker) GCMarkAssistStart() {
1634 traceEvent(traceEvGCMarkAssistStart, 1)
1637 func (_ traceLocker) GCMarkAssistDone() {
1638 traceEvent(traceEvGCMarkAssistDone, -1)
1641 func (_ traceLocker) GoCreate(newg *g, pc uintptr) {
1643 newg.trace.lastP = getg().m.p
1644 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
1645 id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(pc) + sys.PCQuantum})
1646 traceEvent(traceEvGoCreate, 2, newg.goid, uint64(id))
1649 func (_ traceLocker) GoStart() {
1653 if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
1654 traceEvent(traceEvGoStartLabel, -1, gp.goid, gp.trace.seq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
1655 } else if gp.trace.lastP == pp {
1656 traceEvent(traceEvGoStartLocal, -1, gp.goid)
1659 traceEvent(traceEvGoStart, -1, gp.goid, gp.trace.seq)
1663 func (_ traceLocker) GoEnd() {
1664 traceEvent(traceEvGoEnd, -1)
1667 func (_ traceLocker) GoSched() {
1669 gp.trace.lastP = gp.m.p
1670 traceEvent(traceEvGoSched, 1)
1673 func (_ traceLocker) GoPreempt() {
1675 gp.trace.lastP = gp.m.p
1676 traceEvent(traceEvGoPreempt, 1)
1679 func (_ traceLocker) GoPark(reason traceBlockReason, skip int) {
1680 // Convert the block reason directly to a trace event type.
1681 // See traceBlockReason for more information.
1682 traceEvent(byte(reason), skip)
1685 func (_ traceLocker) GoUnpark(gp *g, skip int) {
1688 if gp.trace.lastP == pp {
1689 traceEvent(traceEvGoUnblockLocal, skip, gp.goid)
1692 traceEvent(traceEvGoUnblock, skip, gp.goid, gp.trace.seq)
1696 func (_ traceLocker) GoSysCall() {
1699 case tracefpunwindoff():
1700 // Unwind by skipping 1 frame relative to gp.syscallsp which is captured 3
1701 // frames above this frame. For frame pointer unwinding we produce the same
1702 // results by hard coding the number of frames in between our caller and the
1703 // actual syscall, see cases below.
1704 // TODO(felixge): Implement gp.syscallbp to avoid this workaround?
1706 case GOOS == "solaris" || GOOS == "illumos":
1707 // These platforms don't use a libc_read_trampoline.
1710 // Skip the extra trampoline frame used on most systems.
1713 getg().m.curg.trace.tracedSyscallEnter = true
1714 traceEvent(traceEvGoSysCall, skip)
1717 func (_ traceLocker) GoSysExit(lostP bool) {
1719 throw("lostP must always be true in the old tracer for GoSysExit")
1722 if !gp.trace.tracedSyscallEnter {
1723 // There was no syscall entry traced for us at all, so there's definitely
1724 // no EvGoSysBlock or EvGoInSyscall before us, which EvGoSysExit requires.
1727 gp.trace.tracedSyscallEnter = false
1728 ts := gp.trace.sysExitTime
1729 if ts != 0 && ts < trace.startTime {
1730 // There is a race between the code that initializes sysExitTimes
1731 // (in exitsyscall, which runs without a P, and therefore is not
1732 // stopped with the rest of the world) and the code that initializes
1733 // a new trace. The recorded sysExitTime must therefore be treated
1734 // as "best effort". If they are valid for this trace, then great,
1735 // use them for greater accuracy. But if they're not valid for this
1736 // trace, assume that the trace was started after the actual syscall
1737 // exit (but before we actually managed to start the goroutine,
1738 // aka right now), and assign a fresh time stamp to keep the log consistent.
1741 gp.trace.sysExitTime = 0
1743 gp.trace.lastP = gp.m.p
1744 traceEvent(traceEvGoSysExit, -1, gp.goid, gp.trace.seq, uint64(ts))
1747 // nosplit because it's called from exitsyscall without a P.
1750 func (_ traceLocker) RecordSyscallExitedTime(gp *g, oldp *p) {
1751 // Wait till traceGoSysBlock event is emitted.
1752 // This ensures consistency of the trace (the goroutine is started after it is blocked).
1753 for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
1756 // We can't trace syscall exit right now because we don't have a P.
1757 // Tracing code can invoke write barriers that cannot run without a P.
1758 // So instead we remember the syscall exit time and emit the event
1759 // in execute when we have a P.
1760 gp.trace.sysExitTime = traceClockNow()
1763 func (_ traceLocker) GoSysBlock(pp *p) {
1764 // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
1765 // to handle this we temporary employ the P.
1769 traceEvent(traceEvGoSysBlock, -1)
1774 func (t traceLocker) ProcSteal(pp *p, forMe bool) {
1778 func (_ traceLocker) HeapAlloc(live uint64) {
1779 traceEvent(traceEvHeapAlloc, -1, live)
1782 func (_ traceLocker) HeapGoal() {
1783 heapGoal := gcController.heapGoal()
1784 if heapGoal == ^uint64(0) {
1785 // Heap-based triggering is disabled.
1786 traceEvent(traceEvHeapGoal, -1, 0)
1788 traceEvent(traceEvHeapGoal, -1, heapGoal)
1792 // To access runtime functions from runtime/trace.
1793 // See runtime/trace/annotation.go
1795 //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
1796 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1801 // Same as in traceEvent.
1802 mp, pid, bufp := traceAcquireBuffer()
1803 if !trace.enabled && !mp.trace.startingTrace {
1804 traceReleaseBuffer(mp, pid)
1808 typeStringID, bufp := traceString(bufp, pid, taskType)
1809 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 0, 3, id, parentID, typeStringID)
1810 traceReleaseBuffer(mp, pid)
1813 //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
1814 func trace_userTaskEnd(id uint64) {
1815 traceEvent(traceEvUserTaskEnd, 2, id)
1818 //go:linkname trace_userRegion runtime/trace.userRegion
1819 func trace_userRegion(id, mode uint64, name string) {
1824 mp, pid, bufp := traceAcquireBuffer()
1825 if !trace.enabled && !mp.trace.startingTrace {
1826 traceReleaseBuffer(mp, pid)
1830 nameStringID, bufp := traceString(bufp, pid, name)
1831 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 0, 3, id, mode, nameStringID)
1832 traceReleaseBuffer(mp, pid)
1835 //go:linkname trace_userLog runtime/trace.userLog
1836 func trace_userLog(id uint64, category, message string) {
1841 mp, pid, bufp := traceAcquireBuffer()
1842 if !trace.enabled && !mp.trace.startingTrace {
1843 traceReleaseBuffer(mp, pid)
1847 categoryID, bufp := traceString(bufp, pid, category)
1849 // The log message is recorded after all of the normal trace event
1850 // arguments, including the task, category, and stack IDs. We must ask
1851 // traceEventLocked to reserve extra space for the length of the message
1852 // and the message itself.
1853 extraSpace := traceBytesPerNumber + len(message)
1854 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 0, 3, id, categoryID)
1857 // double-check the message and its length can fit.
1858 // Otherwise, truncate the message.
1859 slen := len(message)
1860 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1863 buf.varint(uint64(slen))
1864 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1866 traceReleaseBuffer(mp, pid)
1869 // the start PC of a goroutine for tracing purposes. If pc is a wrapper,
1870 // it returns the PC of the wrapped function. Otherwise it returns pc.
1871 func startPCforTrace(pc uintptr) uintptr {
1874 return pc // may happen for locked g in extra M since its pc is 0.
1876 w := funcdata(f, abi.FUNCDATA_WrapInfo)
1878 return pc // not a wrapper
1880 return f.datap.textAddr(*(*uint32)(w))
1883 // OneNewExtraM registers the fact that a new extra M was created with
1884 // the tracer. This matters if the M (which has an attached G) is used while
1885 // the trace is still active because if it is, we need the fact that it exists
1886 // to show up in the final trace.
1887 func (tl traceLocker) OneNewExtraM(gp *g) {
1888 // Trigger two trace events for the locked g in the extra m,
1889 // since the next event of the g will be traceEvGoSysExit in exitsyscall,
1890 // while calling from C thread to Go.
1891 tl.GoCreate(gp, 0) // no start pc
1893 traceEvent(traceEvGoInSyscall, -1, gp.goid)
1896 // Used only in the new tracer.
1897 func (tl traceLocker) GoCreateSyscall(gp *g) {
1900 // Used only in the new tracer.
1901 func (tl traceLocker) GoDestroySyscall() {
1904 // traceTime represents a timestamp for the trace.
1905 type traceTime uint64
1907 // traceClockNow returns a monotonic timestamp. The clock this function gets
1908 // the timestamp from is specific to tracing, and shouldn't be mixed with other
1911 // nosplit because it's called from exitsyscall, which is nosplit.
1914 func traceClockNow() traceTime {
1915 return traceTime(cputicks() / traceTimeDiv)
1918 func traceExitingSyscall() {
1921 func traceExitedSyscall() {
1924 // Not used in the old tracer. Defined for compatibility.
1925 const defaultTraceAdvancePeriod = 0