1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build goexperiment.exectracer2
7 // Runtime -> tracer API.
12 "runtime/internal/atomic"
13 _ "unsafe" // for go:linkname
16 // gTraceState is per-G state for the tracer.
17 type gTraceState struct {
18 traceSchedResourceState
21 // reset resets the gTraceState for a new goroutine.
22 func (s *gTraceState) reset() {
24 // N.B. s.statusTraced is managed and cleared separately.
27 // mTraceState is per-M state for the tracer.
28 type mTraceState struct {
29 seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
30 buf [2]*traceBuf // Per-M traceBuf for writing. Indexed by trace.gen%2.
31 link *m // Snapshot of alllink or freelink.
34 // pTraceState is per-P state for the tracer.
35 type pTraceState struct {
36 traceSchedResourceState
38 // mSyscallID is the ID of the M this was bound to before entering a syscall.
41 // maySweep indicates the sweep events should be traced.
42 // This is used to defer the sweep start event until a span
43 // has actually been swept.
46 // inSweep indicates that at least one sweep event has been traced.
49 // swept and reclaimed track the number of bytes swept and reclaimed
50 // by sweeping in the current sweep loop (while maySweep was true).
51 swept, reclaimed uintptr
54 // traceLockInit initializes global trace locks.
55 func traceLockInit() {
56 // Sharing a lock rank here is fine because they should never be accessed
57 // together. If they are, we want to find out immediately.
58 lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
59 lockInit(&trace.stringTab[0].tab.lock, lockRankTraceStrings)
60 lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
61 lockInit(&trace.stringTab[1].tab.lock, lockRankTraceStrings)
62 lockInit(&trace.stackTab[0].tab.lock, lockRankTraceStackTab)
63 lockInit(&trace.stackTab[1].tab.lock, lockRankTraceStackTab)
64 lockInit(&trace.lock, lockRankTrace)
67 // lockRankMayTraceFlush records the lock ranking effects of a
68 // potential call to traceFlush.
70 // nosplit because traceAcquire is nosplit.
73 func lockRankMayTraceFlush() {
74 lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
77 // traceBlockReason is an enumeration of reasons a goroutine might block.
78 // This is the interface the rest of the runtime uses to tell the
79 // tracer why a goroutine blocked. The tracer then propagates this information
80 // into the trace however it sees fit.
82 // Note that traceBlockReasons should not be compared, since reasons that are
83 // distinct by name may *not* be distinct by value.
84 type traceBlockReason uint8
87 traceBlockGeneric traceBlockReason = iota
95 traceBlockGCMarkAssist
97 traceBlockSystemGoroutine
100 traceBlockUntilGCEnds
104 var traceBlockReasonStrings = [...]string{
105 traceBlockGeneric: "unspecified",
106 traceBlockForever: "forever",
107 traceBlockNet: "network",
108 traceBlockSelect: "select",
109 traceBlockCondWait: "sync.(*Cond).Wait",
110 traceBlockSync: "sync",
111 traceBlockChanSend: "chan send",
112 traceBlockChanRecv: "chan receive",
113 traceBlockGCMarkAssist: "GC mark assist wait for work",
114 traceBlockGCSweep: "GC background sweeper wait",
115 traceBlockSystemGoroutine: "system goroutine wait",
116 traceBlockPreempted: "preempted",
117 traceBlockDebugCall: "wait for debug call",
118 traceBlockUntilGCEnds: "wait until GC ends",
119 traceBlockSleep: "sleep",
122 // traceGoStopReason is an enumeration of reasons a goroutine might yield.
124 // Note that traceGoStopReasons should not be compared, since reasons that are
125 // distinct by name may *not* be distinct by value.
126 type traceGoStopReason uint8
129 traceGoStopGeneric traceGoStopReason = iota
134 var traceGoStopReasonStrings = [...]string{
135 traceGoStopGeneric: "unspecified",
136 traceGoStopGoSched: "runtime.GoSched",
137 traceGoStopPreempted: "preempted",
140 // traceEnabled returns true if the trace is currently enabled.
143 func traceEnabled() bool {
144 return trace.gen.Load() != 0
147 // traceShuttingDown returns true if the trace is currently shutting down.
148 func traceShuttingDown() bool {
149 return trace.shutdown.Load()
152 // traceLocker represents an M writing trace events. While a traceLocker value
153 // is valid, the tracer observes all operations on the G/M/P or trace events being
154 // written as happening atomically.
155 type traceLocker struct {
160 // debugTraceReentrancy checks if the trace is reentrant.
162 // This is optional because throwing in a function makes it instantly
163 // not inlineable, and we want traceAcquire to be inlineable for
164 // low overhead when the trace is disabled.
165 const debugTraceReentrancy = false
167 // traceAcquire prepares this M for writing one or more trace events.
169 // nosplit because it's called on the syscall path when stack movement is forbidden.
172 func traceAcquire() traceLocker {
176 return traceAcquireEnabled()
179 // traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
180 // broken out to make traceAcquire inlineable to keep the overhead of the tracer
181 // when it's disabled low.
183 // nosplit because it's called by traceAcquire, which is nosplit.
186 func traceAcquireEnabled() traceLocker {
187 // Any time we acquire a traceLocker, we may flush a trace buffer. But
188 // buffer flushes are rare. Record the lock edge even if it doesn't happen
190 lockRankMayTraceFlush()
192 // Prevent preemption.
195 // Acquire the trace seqlock.
196 seq := mp.trace.seqlock.Add(1)
197 if debugTraceReentrancy && seq%2 != 1 {
198 throw("bad use of trace.seqlock or tracer is reentrant")
201 // N.B. This load of gen appears redundant with the one in traceEnabled.
202 // However, it's very important that the gen we use for writing to the trace
203 // is acquired under a traceLocker so traceAdvance can make sure no stale
204 // gen values are being used.
206 // Because we're doing this load again, it also means that the trace
207 // might end up being disabled when we load it. In that case we need to undo
208 // what we did and bail.
209 gen := trace.gen.Load()
211 mp.trace.seqlock.Add(1)
215 return traceLocker{mp, gen}
218 // ok returns true if the traceLocker is valid (i.e. tracing is enabled).
220 // nosplit because it's called on the syscall path when stack movement is forbidden.
223 func (tl traceLocker) ok() bool {
227 // traceRelease indicates that this M is done writing trace events.
229 // nosplit because it's called on the syscall path when stack movement is forbidden.
232 func traceRelease(tl traceLocker) {
233 seq := tl.mp.trace.seqlock.Add(1)
234 if debugTraceReentrancy && seq%2 != 0 {
235 print("runtime: seq=", seq, "\n")
236 throw("bad use of trace.seqlock")
241 // traceExitingSyscall marks a goroutine as exiting the syscall slow path.
243 // Must be paired with a traceExitedSyscall call.
244 func traceExitingSyscall() {
245 trace.exitingSyscall.Add(1)
248 // traceExitedSyscall marks a goroutine as having exited the syscall slow path.
249 func traceExitedSyscall() {
250 trace.exitingSyscall.Add(-1)
253 // Gomaxprocs emits a ProcsChange event.
254 func (tl traceLocker) Gomaxprocs(procs int32) {
255 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
258 // ProcStart traces a ProcStart event.
260 // Must be called with a valid P.
261 func (tl traceLocker) ProcStart() {
263 // Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
264 // it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
265 // is during a syscall.
266 tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
269 // ProcStop traces a ProcStop event.
270 func (tl traceLocker) ProcStop(pp *p) {
271 // The only time a goroutine is allowed to have its Proc moved around
272 // from under it is during a syscall.
273 tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
276 // GCActive traces a GCActive event.
278 // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
279 // easily and only depends on where it's currently called.
280 func (tl traceLocker) GCActive() {
281 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
282 // N.B. Only one GC can be running at a time, so this is naturally
283 // serialized by the caller.
287 // GCStart traces a GCBegin event.
289 // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
290 // easily and only depends on where it's currently called.
291 func (tl traceLocker) GCStart() {
292 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
293 // N.B. Only one GC can be running at a time, so this is naturally
294 // serialized by the caller.
298 // GCDone traces a GCEnd event.
300 // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
301 // easily and only depends on where it's currently called.
302 func (tl traceLocker) GCDone() {
303 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
304 // N.B. Only one GC can be running at a time, so this is naturally
305 // serialized by the caller.
309 // STWStart traces a STWBegin event.
310 func (tl traceLocker) STWStart(reason stwReason) {
311 // Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
312 // runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
313 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
316 // STWDone traces a STWEnd event.
317 func (tl traceLocker) STWDone() {
318 // Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
319 // runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
320 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
323 // GCSweepStart prepares to trace a sweep loop. This does not
324 // emit any events until traceGCSweepSpan is called.
326 // GCSweepStart must be paired with traceGCSweepDone and there
327 // must be no preemption points between these two calls.
329 // Must be called with a valid P.
330 func (tl traceLocker) GCSweepStart() {
331 // Delay the actual GCSweepBegin event until the first span
332 // sweep. If we don't sweep anything, don't emit any events.
334 if pp.trace.maySweep {
335 throw("double traceGCSweepStart")
337 pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
340 // GCSweepSpan traces the sweep of a single span. If this is
341 // the first span swept since traceGCSweepStart was called, this
342 // will emit a GCSweepBegin event.
344 // This may be called outside a traceGCSweepStart/traceGCSweepDone
345 // pair; however, it will not emit any trace events in this case.
347 // Must be called with a valid P.
348 func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
350 if pp.trace.maySweep {
351 if pp.trace.swept == 0 {
352 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
353 pp.trace.inSweep = true
355 pp.trace.swept += bytesSwept
359 // GCSweepDone finishes tracing a sweep loop. If any memory was
360 // swept (i.e. traceGCSweepSpan emitted an event) then this will emit
361 // a GCSweepEnd event.
363 // Must be called with a valid P.
364 func (tl traceLocker) GCSweepDone() {
366 if !pp.trace.maySweep {
367 throw("missing traceGCSweepStart")
369 if pp.trace.inSweep {
370 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
371 pp.trace.inSweep = false
373 pp.trace.maySweep = false
376 // GCMarkAssistStart emits a MarkAssistBegin event.
377 func (tl traceLocker) GCMarkAssistStart() {
378 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
381 // GCMarkAssistDone emits a MarkAssistEnd event.
382 func (tl traceLocker) GCMarkAssistDone() {
383 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
386 // GoCreate emits a GoCreate event.
387 func (tl traceLocker) GoCreate(newg *g, pc uintptr) {
388 newg.trace.setStatusTraced(tl.gen)
389 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoCreate, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
392 // GoStart emits a GoStart event.
394 // Must be called with a valid P.
395 func (tl traceLocker) GoStart() {
398 w := tl.eventWriter(traceGoRunnable, traceProcRunning)
399 w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
400 if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
401 w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
406 // GoEnd emits a GoDestroy event.
408 // TODO(mknyszek): Rename this to GoDestroy.
409 func (tl traceLocker) GoEnd() {
410 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
413 // GoSched emits a GoStop event with a GoSched reason.
414 func (tl traceLocker) GoSched() {
415 tl.GoStop(traceGoStopGoSched)
418 // GoPreempt emits a GoStop event with a GoPreempted reason.
419 func (tl traceLocker) GoPreempt() {
420 tl.GoStop(traceGoStopPreempted)
423 // GoStop emits a GoStop event with the provided reason.
424 func (tl traceLocker) GoStop(reason traceGoStopReason) {
425 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
428 // GoPark emits a GoBlock event with the provided reason.
430 // TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
431 // that we have both, and waitReason is way more descriptive.
432 func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
433 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
436 // GoUnpark emits a GoUnblock event.
437 func (tl traceLocker) GoUnpark(gp *g, skip int) {
438 // Emit a GoWaiting status if necessary for the unblocked goroutine.
439 w := tl.eventWriter(traceGoRunning, traceProcRunning)
440 if !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
441 // Careful: don't use the event writer. We never want status or in-progress events
442 // to trigger more in-progress events.
443 w.w = w.w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist)
445 w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
448 // GoSysCall emits a GoSyscallBegin event.
450 // Must be called with a valid P.
451 func (tl traceLocker) GoSysCall() {
454 case tracefpunwindoff():
455 // Unwind by skipping 1 frame relative to gp.syscallsp which is captured 3
456 // results by hard coding the number of frames in between our caller and the
457 // actual syscall, see cases below.
458 // TODO(felixge): Implement gp.syscallbp to avoid this workaround?
460 case GOOS == "solaris" || GOOS == "illumos":
461 // These platforms don't use a libc_read_trampoline.
464 // Skip the extra trampoline frame used on most systems.
467 // Scribble down the M that the P is currently attached to.
468 tl.mp.p.ptr().trace.mSyscallID = int64(tl.mp.procid)
469 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, tl.stack(skip))
472 // GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
475 // lostP must be true in all cases that a goroutine loses its P during a syscall.
476 // This means it's not sufficient to check if it has no P. In particular, it needs to be
477 // true in the following cases:
478 // - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
479 // - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
480 // - The goroutine lost its P and acquired a different one, and is now running with that P.
481 func (tl traceLocker) GoSysExit(lostP bool) {
482 ev := traceEvGoSyscallEnd
483 procStatus := traceProcSyscall // Procs implicitly enter traceProcSyscall on GoSyscallBegin.
485 ev = traceEvGoSyscallEndBlocked
486 procStatus = traceProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
488 tl.mp.p.ptr().trace.mSyscallID = -1
490 tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
493 // ProcSteal indicates that our current M stole a P from another M.
495 // forMe indicates that the caller is stealing pp to wire it up to itself.
497 // The caller must have ownership of pp.
498 func (tl traceLocker) ProcSteal(pp *p, forMe bool) {
499 // Grab the M ID we stole from.
500 mStolenFrom := pp.trace.mSyscallID
501 pp.trace.mSyscallID = -1
503 // The status of the proc and goroutine, if we need to emit one here, is not evident from the
504 // context of just emitting this event alone. There are two cases. Either we're trying to steal
505 // the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
506 // ourselves specifically to keep running. The two contexts look different, but can be summarized
507 // fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
508 // In the latter, we're a goroutine in a syscall,
509 goStatus := traceGoRunning
510 procStatus := traceProcRunning
512 goStatus = traceGoSyscall
513 procStatus = traceProcSyscallAbandoned
515 w := tl.eventWriter(goStatus, procStatus)
517 // Emit the status of the P we're stealing. We may have *just* done this, but we may not have,
518 // even if forMe is true, depending on whether we wired the P to ourselves already.
519 if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
520 // Careful: don't use the event writer. We never want status or in-progress events
521 // to trigger more in-progress events.
522 w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
524 w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
527 // GoSysBlock is a no-op in the new tracer.
528 func (tl traceLocker) GoSysBlock(pp *p) {
531 // HeapAlloc emits a HeapAlloc event.
532 func (tl traceLocker) HeapAlloc(live uint64) {
533 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
536 // HeapGoal reads the current heap goal and emits a HeapGoal event.
537 func (tl traceLocker) HeapGoal() {
538 heapGoal := gcController.heapGoal()
539 if heapGoal == ^uint64(0) {
540 // Heap-based triggering is disabled.
543 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
546 // OneNewExtraM is a no-op in the new tracer. This is worth keeping around though because
547 // it's a good place to insert a thread-level event about the new extra M.
548 func (tl traceLocker) OneNewExtraM(_ *g) {
551 // GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
553 // Unlike GoCreate, the caller must be running on gp.
555 // This occurs when C code calls into Go. On pthread platforms it occurs only when
556 // a C thread calls into Go code for the first time.
557 func (tl traceLocker) GoCreateSyscall(gp *g) {
558 // N.B. We should never trace a status for this goroutine (which we're currently running on),
559 // since we want this to appear like goroutine creation.
560 gp.trace.setStatusTraced(tl.gen)
561 tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
564 // GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
566 // Must not have a P.
568 // This occurs when Go code returns back to C. On pthread platforms it occurs only when
569 // the C thread is destroyed.
570 func (tl traceLocker) GoDestroySyscall() {
571 // N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
572 // that is in the syscall state.
573 tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
576 // To access runtime functions from runtime/trace.
577 // See runtime/trace/annotation.go
579 // trace_userTaskCreate emits a UserTaskCreate event.
581 //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
582 func trace_userTaskCreate(id, parentID uint64, taskType string) {
585 // Need to do this check because the caller won't have it.
588 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
592 // trace_userTaskEnd emits a UserTaskEnd event.
594 //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
595 func trace_userTaskEnd(id uint64) {
598 // Need to do this check because the caller won't have it.
601 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
605 // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event,
606 // depending on mode (0 == Begin, 1 == End).
608 // TODO(mknyszek): Just make this two functions.
610 //go:linkname trace_userRegion runtime/trace.userRegion
611 func trace_userRegion(id, mode uint64, name string) {
614 // Need to do this check because the caller won't have it.
620 ev = traceEvUserRegionBegin
622 ev = traceEvUserRegionEnd
626 tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
630 // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event.
632 //go:linkname trace_userLog runtime/trace.userLog
633 func trace_userLog(id uint64, category, message string) {
636 // Need to do this check because the caller won't have it.
639 tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
643 // traceProcFree is called when a P is destroyed.
645 // This must run on the system stack to match the old tracer.
648 func traceProcFree(_ *p) {
651 // traceThreadDestroy is called when a thread is removed from
654 // mp must not be able to emit trace events anymore.
656 // sched.lock must be held to synchronize with traceAdvance.
657 func traceThreadDestroy(mp *m) {
658 assertLockHeld(&sched.lock)
660 // Flush all outstanding buffers to maintain the invariant
661 // that an M only has active buffers while on sched.freem
664 // Perform a traceAcquire/traceRelease on behalf of mp to
665 // synchronize with the tracer trying to flush our buffer
667 seq := mp.trace.seqlock.Add(1)
668 if debugTraceReentrancy && seq%2 != 1 {
669 throw("bad use of trace.seqlock or tracer is reentrant")
673 for i := range mp.trace.buf {
674 if mp.trace.buf[i] != nil {
675 // N.B. traceBufFlush accepts a generation, but it
676 // really just cares about gen%2.
677 traceBufFlush(mp.trace.buf[i], uintptr(i))
678 mp.trace.buf[i] = nil
683 seq1 := mp.trace.seqlock.Add(1)
685 print("runtime: seq1=", seq1, "\n")
686 throw("bad use of trace.seqlock")
690 // Not used in the new tracer; solely for compatibility with the old tracer.
691 // nosplit because it's called from exitsyscall without a P.
694 func (_ traceLocker) RecordSyscallExitedTime(_ *g, _ *p) {