From: Michael Anthony Knyszek Date: Mon, 23 Oct 2023 19:30:35 +0000 (+0000) Subject: runtime: make all GC mark workers yield for forEachP X-Git-Tag: go1.22rc1~358 X-Git-Url: http://www.git.cypherpunks.ru/?a=commitdiff_plain;ds=sidebyside;h=25895d1c995d71ca505e1e7a3c79daa49620db74;p=gostls13.git runtime: make all GC mark workers yield for forEachP Currently dedicated GC mark workers really try to avoid getting preempted. The one exception is for a pending STW, indicated by sched.gcwaiting. This is currently fine because other kinds of preemptions don't matter to the mark workers: they're intentionally bound to their P. With the new execution tracer we're going to want to use forEachP to get the attention of all Ps. We may want to do this during a GC cycle. forEachP doesn't set sched.gcwaiting, so it may end up waiting the full GC mark phase, burning a thread and a P in the meantime. This can mean basically seconds of waiting and trying to preempt GC mark workers. This change makes all mark workers yield if (*p).runSafePointFn != 0 so that the workers actually yield somewhat promptly in response to a forEachP attempt. Change-Id: I7430baf326886b9f7a868704482a224dae7c9bba Reviewed-on: https://go-review.googlesource.com/c/go/+/537235 Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI Reviewed-by: Mauri de Souza Meneguzzo Auto-Submit: Michael Knyszek --- diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 17412bf723..004dc88828 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -1068,7 +1068,7 @@ func gcDrainMarkWorkerFractional(gcw *gcWork) { // credit to gcController.bgScanCredit every gcCreditSlack units of // scan work. // -// gcDrain will always return if there is a pending STW. +// gcDrain will always return if there is a pending STW or forEachP. // // Disabling write barriers is necessary to ensure that after we've // confirmed that we've drained gcw, that we don't accidentally end @@ -1084,7 +1084,10 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { throw("gcDrain phase incorrect") } + // N.B. We must be running in a non-preemptible context, so it's + // safe to hold a reference to our P here. gp := getg().m.curg + pp := gp.m.p.ptr() preemptible := flags&gcDrainUntilPreempt != 0 flushBgCredit := flags&gcDrainFlushBgCredit != 0 idle := flags&gcDrainIdle != 0 @@ -1106,8 +1109,9 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { // Drain root marking jobs. if work.markrootNext < work.markrootJobs { - // Stop if we're preemptible or if someone wants to STW. - for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) { + // Stop if we're preemptible, if someone wants to STW, or if + // someone is calling forEachP. + for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { job := atomic.Xadd(&work.markrootNext, +1) - 1 if job >= work.markrootJobs { break @@ -1120,8 +1124,16 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { } // Drain heap marking jobs. - // Stop if we're preemptible or if someone wants to STW. - for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) { + // + // Stop if we're preemptible, if someone wants to STW, or if + // someone is calling forEachP. + // + // TODO(mknyszek): Consider always checking gp.preempt instead + // of having the preempt flag, and making an exception for certain + // mark workers in retake. That might be simpler than trying to + // enumerate all the reasons why we might want to preempt, even + // if we're supposed to be mostly non-preemptible. + for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { // Try to keep work available on the global queue. We used to // check if there were waiting workers, but it's better to // just keep work available than to make workers wait. In the