]> Cypherpunks.ru repositories - gostls13.git/blobdiff - src/runtime/mgcmark.go
runtime: make all GC mark workers yield for forEachP
[gostls13.git] / src / runtime / mgcmark.go
index 96f88f782f436aae30725b491cdbe6005fa2942c..004dc88828a0a4efe7fc3b122583631bcad39a73 100644 (file)
@@ -420,7 +420,11 @@ retry:
                // If the CPU limiter is enabled, intentionally don't
                // assist to reduce the amount of CPU time spent in the GC.
                if traced {
-                       traceGCMarkAssistDone()
+                       trace := traceAcquire()
+                       if trace.ok() {
+                               trace.GCMarkAssistDone()
+                               traceRelease(trace)
+                       }
                }
                return
        }
@@ -461,15 +465,22 @@ retry:
                        // We were able to steal all of the credit we
                        // needed.
                        if traced {
-                               traceGCMarkAssistDone()
+                               trace := traceAcquire()
+                               if trace.ok() {
+                                       trace.GCMarkAssistDone()
+                                       traceRelease(trace)
+                               }
                        }
                        return
                }
        }
-
        if traceEnabled() && !traced {
-               traced = true
-               traceGCMarkAssistStart()
+               trace := traceAcquire()
+               if trace.ok() {
+                       traced = true
+                       trace.GCMarkAssistStart()
+                       traceRelease(trace)
+               }
        }
 
        // Perform assist work
@@ -515,7 +526,11 @@ retry:
                // this G's assist debt, or the GC cycle is over.
        }
        if traced {
-               traceGCMarkAssistDone()
+               trace := traceAcquire()
+               if trace.ok() {
+                       trace.GCMarkAssistDone()
+                       traceRelease(trace)
+               }
        }
 }
 
@@ -1053,7 +1068,7 @@ func gcDrainMarkWorkerFractional(gcw *gcWork) {
 // credit to gcController.bgScanCredit every gcCreditSlack units of
 // scan work.
 //
-// gcDrain will always return if there is a pending STW.
+// gcDrain will always return if there is a pending STW or forEachP.
 //
 // Disabling write barriers is necessary to ensure that after we've
 // confirmed that we've drained gcw, that we don't accidentally end
@@ -1069,7 +1084,10 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
                throw("gcDrain phase incorrect")
        }
 
+       // N.B. We must be running in a non-preemptible context, so it's
+       // safe to hold a reference to our P here.
        gp := getg().m.curg
+       pp := gp.m.p.ptr()
        preemptible := flags&gcDrainUntilPreempt != 0
        flushBgCredit := flags&gcDrainFlushBgCredit != 0
        idle := flags&gcDrainIdle != 0
@@ -1091,8 +1109,9 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
 
        // Drain root marking jobs.
        if work.markrootNext < work.markrootJobs {
-               // Stop if we're preemptible or if someone wants to STW.
-               for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) {
+               // Stop if we're preemptible, if someone wants to STW, or if
+               // someone is calling forEachP.
+               for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
                        job := atomic.Xadd(&work.markrootNext, +1) - 1
                        if job >= work.markrootJobs {
                                break
@@ -1105,8 +1124,16 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
        }
 
        // Drain heap marking jobs.
-       // Stop if we're preemptible or if someone wants to STW.
-       for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) {
+       //
+       // Stop if we're preemptible, if someone wants to STW, or if
+       // someone is calling forEachP.
+       //
+       // TODO(mknyszek): Consider always checking gp.preempt instead
+       // of having the preempt flag, and making an exception for certain
+       // mark workers in retake. That might be simpler than trying to
+       // enumerate all the reasons why we might want to preempt, even
+       // if we're supposed to be mostly non-preemptible.
+       for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
                // Try to keep work available on the global queue. We used to
                // check if there were waiting workers, but it's better to
                // just keep work available than to make workers wait. In the