1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build goexperiment.exectracer2
7 // Trace buffer management.
12 "runtime/internal/sys"
16 // Maximum number of bytes required to encode uint64 in base-128.
17 const traceBytesPerNumber = 10
19 // traceWriter is the interface for writing all trace data.
21 // This type is passed around as a value, and all of its methods return
22 // a new traceWriter. This allows for chaining together calls in a fluent-style
23 // API. This is partly stylistic, and very slightly for performance, since
24 // the compiler can destructure this value and pass it between calls as
25 // just regular arguments. However, this style is not load-bearing, and
26 // we can change it if it's deemed too error-prone.
27 type traceWriter struct {
32 // write returns an a traceWriter that writes into the current M's stream.
33 func (tl traceLocker) writer() traceWriter {
34 return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2]}
37 // unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
39 // It should only be used in contexts where either:
40 // - Another traceLocker is held.
41 // - trace.gen is prevented from advancing.
44 func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
45 return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
48 // end writes the buffer back into the m.
49 func (w traceWriter) end() {
51 // Tolerate a nil mp. It makes code that creates traceWriters directly
55 w.mp.trace.buf[w.gen%2] = w.traceBuf
58 // ensure makes sure that at least maxSize bytes are available to write.
60 // Returns whether the buffer was flushed.
61 func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
62 refill := w.traceBuf == nil || !w.available(maxSize)
69 // flush puts w.traceBuf on the queue of full buffers.
70 func (w traceWriter) flush() traceWriter {
73 if w.traceBuf != nil {
74 traceBufFlush(w.traceBuf, w.gen)
82 // refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
83 func (w traceWriter) refill() traceWriter {
86 if w.traceBuf != nil {
87 traceBufFlush(w.traceBuf, w.gen)
89 if trace.empty != nil {
90 w.traceBuf = trace.empty
91 trace.empty = w.traceBuf.link
95 w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
96 if w.traceBuf == nil {
97 throw("trace: out of memory")
101 // Initialize the buffer.
102 ts := traceClockNow()
103 if ts <= w.traceBuf.lastTime {
104 ts = w.traceBuf.lastTime + 1
106 w.traceBuf.lastTime = ts
107 w.traceBuf.link = nil
110 // Tolerate a nil mp.
113 mID = uint64(w.mp.procid)
116 // Write the buffer's header.
117 w.byte(byte(traceEvEventBatch))
118 w.varint(uint64(w.gen))
119 w.varint(uint64(mID))
121 w.traceBuf.lenPos = w.varintReserve()
125 // traceBufQueue is a FIFO of traceBufs.
126 type traceBufQueue struct {
130 // push queues buf into queue of buffers.
131 func (q *traceBufQueue) push(buf *traceBuf) {
141 // pop dequeues from the queue of buffers.
142 func (q *traceBufQueue) pop() *traceBuf {
155 func (q *traceBufQueue) empty() bool {
159 // traceBufHeader is per-P tracing buffer.
160 type traceBufHeader struct {
161 link *traceBuf // in trace.empty/full
162 lastTime traceTime // when we wrote the last event
163 pos int // next write offset in arr
164 lenPos int // position of batch length value
167 // traceBuf is per-M tracing buffer.
169 // TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.
170 type traceBuf struct {
173 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
176 // byte appends v to buf.
177 func (buf *traceBuf) byte(v byte) {
182 // varint appends v to buf in little-endian-base-128 encoding.
183 func (buf *traceBuf) varint(v uint64) {
185 arr := buf.arr[pos : pos+traceBytesPerNumber]
192 arr[i] = 0x80 | byte(v)
198 // varintReserve reserves enough space in buf to hold any varint.
200 // Space reserved this way can be filled in with the varintAt method.
201 func (buf *traceBuf) varintReserve() int {
203 buf.pos += traceBytesPerNumber
207 // stringData appends s's data directly to buf.
208 func (buf *traceBuf) stringData(s string) {
209 buf.pos += copy(buf.arr[buf.pos:], s)
212 func (buf *traceBuf) available(size int) bool {
213 return len(buf.arr)-buf.pos >= size
216 // varintAt writes varint v at byte position pos in buf. This always
217 // consumes traceBytesPerNumber bytes. This is intended for when the caller
218 // needs to reserve space for a varint but can't populate it until later.
219 // Use varintReserve to reserve this space.
220 func (buf *traceBuf) varintAt(pos int, v uint64) {
221 for i := 0; i < traceBytesPerNumber; i++ {
222 if i < traceBytesPerNumber-1 {
223 buf.arr[pos] = 0x80 | byte(v)
225 buf.arr[pos] = byte(v)
231 throw("v could not fit in traceBytesPerNumber")
235 // traceBufFlush flushes a trace buffer.
237 // Must run on the system stack because trace.lock must be held.
240 func traceBufFlush(buf *traceBuf, gen uintptr) {
241 assertLockHeld(&trace.lock)
243 // Write out the non-header length of the batch in the header.
245 // Note: the length of the header is not included to make it easier
246 // to calculate this value when deserializing and reserializing the
247 // trace. Varints can have additional padding of zero bits that is
248 // quite difficult to preserve, and if we include the header we
249 // force serializers to do more work. Nothing else actually needs
251 buf.varintAt(buf.lenPos, uint64(buf.pos-(buf.lenPos+traceBytesPerNumber)))
252 trace.full[gen%2].push(buf)
254 // Notify the scheduler that there's work available and that the trace
255 // reader should be scheduled.
256 if !trace.workAvailable.Load() {
257 trace.workAvailable.Store(true)