pp := gp.m.oldp.ptr()
lock(&sched.lock)
+ trace := traceAcquire()
if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
- trace := traceAcquire()
if trace.ok() {
if goexperiment.ExecTracer2 {
// This is a steal in the new tracer. While it's very likely
if sched.stopwait--; sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
+ } else if trace.ok() {
+ traceRelease(trace)
}
unlock(&sched.lock)
}
}
// Try to re-acquire the last P.
+ trace := traceAcquire()
if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
// There's a cpu for us, so we can run.
wirep(oldp)
- exitsyscallfast_reacquired()
+ exitsyscallfast_reacquired(trace)
+ if trace.ok() {
+ traceRelease(trace)
+ }
return true
}
+ if trace.ok() {
+ traceRelease(trace)
+ }
// Try to get any other idle P.
if sched.pidle != 0 {
// syscall.
//
//go:nosplit
-func exitsyscallfast_reacquired() {
+func exitsyscallfast_reacquired(trace traceLocker) {
gp := getg()
if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
- trace := traceAcquire()
if trace.ok() {
// The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
// traceGoSysBlock for this syscall was already emitted,
// Denote completion of the current syscall.
trace.GoSysExit(true)
}
- traceRelease(trace)
})
}
gp.m.p.ptr().syscalltick++
// Otherwise the M from which we retake can exit the syscall,
// increment nmidle and report deadlock.
incidlelocked(-1)
+ trace := traceAcquire()
if atomic.Cas(&pp.status, s, _Pidle) {
- trace := traceAcquire()
if trace.ok() {
trace.GoSysBlock(pp)
trace.ProcSteal(pp, false)
n++
pp.syscalltick++
handoffp(pp)
+ } else if trace.ok() {
+ traceRelease(trace)
}
incidlelocked(1)
lock(&allpLock)