]> Cypherpunks.ru repositories - gostls13.git/blobdiff - src/runtime/mgc.go
runtime: make it harder to introduce deadlocks with forEachP
[gostls13.git] / src / runtime / mgc.go
index 44ff5fb08b2d854299dc7099f9b30d090637c813..d015d6dbabaa8035f28b5debad991c9da3840195 100644 (file)
@@ -218,7 +218,6 @@ var gcphase uint32
 var writeBarrier struct {
        enabled bool    // compiler emits a check of this before calling write barrier
        pad     [3]byte // compiler uses 32-bit load for "enabled" field
-       needed  bool    // identical to enabled, for now (TODO: dedup)
        alignme uint64  // guarantee alignment so that compiler can use a 32 or 64-bit load
 }
 
@@ -236,8 +235,7 @@ const (
 //go:nosplit
 func setGCPhase(x uint32) {
        atomic.Store(&gcphase, x)
-       writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
-       writeBarrier.enabled = writeBarrier.needed
+       writeBarrier.enabled = gcphase == _GCmark || gcphase == _GCmarktermination
 }
 
 // gcMarkWorkerMode represents the mode that a concurrent mark worker
@@ -476,7 +474,6 @@ func GC() {
        // as part of tests and benchmarks to get the system into a
        // relatively stable and isolated state.
        for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
-               sweep.nbgsweep++
                Gosched()
        }
 
@@ -575,10 +572,6 @@ func (t gcTrigger) test() bool {
        }
        switch t.kind {
        case gcTriggerHeap:
-               // Non-atomic access to gcController.heapLive for performance. If
-               // we are going to trigger on this, this thread just
-               // atomically wrote gcController.heapLive anyway and we'll see our
-               // own write.
                trigger, _ := gcController.trigger()
                return gcController.heapLive.Load() >= trigger
        case gcTriggerTime:
@@ -624,7 +617,6 @@ func gcStart(trigger gcTrigger) {
        // We check the transition condition continuously here in case
        // this G gets delayed in to the next GC cycle.
        for trigger.test() && sweepone() != ^uintptr(0) {
-               sweep.nbgsweep++
        }
 
        // Perform GC initialization and the sweep termination
@@ -655,8 +647,10 @@ func gcStart(trigger gcTrigger) {
        // Update it under gcsema to avoid gctrace getting wrong values.
        work.userForced = trigger.kind == gcTriggerCycle
 
-       if traceEnabled() {
-               traceGCStart()
+       trace := traceAcquire()
+       if trace.ok() {
+               trace.GCStart()
+               traceRelease(trace)
        }
 
        // Check that all Ps have finished deferred mcache flushes.
@@ -690,7 +684,7 @@ func gcStart(trigger gcTrigger) {
                finishsweep_m()
        })
 
-       // clearpools before we start the GC. If we wait they memory will not be
+       // clearpools before we start the GC. If we wait the memory will not be
        // reclaimed until the next GC cycle.
        clearpools()
 
@@ -722,11 +716,11 @@ func gcStart(trigger gcTrigger) {
        // enabled because they must be enabled before
        // any non-leaf heap objects are marked. Since
        // allocations are blocked until assists can
-       // happen, we want enable assists as early as
+       // happen, we want to enable assists as early as
        // possible.
        setGCPhase(_GCmark)
 
-       gcBgMarkPrepare() // Must happen before assist enable.
+       gcBgMarkPrepare() // Must happen before assists are enabled.
        gcMarkRootPrepare()
 
        // Mark all active tinyalloc blocks. Since we're
@@ -830,31 +824,22 @@ top:
 
        // Flush all local buffers and collect flushedWork flags.
        gcMarkDoneFlushed = 0
-       systemstack(func() {
-               gp := getg().m.curg
-               // Mark the user stack as preemptible so that it may be scanned.
-               // Otherwise, our attempt to force all P's to a safepoint could
-               // result in a deadlock as we attempt to preempt a worker that's
-               // trying to preempt us (e.g. for a stack scan).
-               casGToWaiting(gp, _Grunning, waitReasonGCMarkTermination)
-               forEachP(func(pp *p) {
-                       // Flush the write barrier buffer, since this may add
-                       // work to the gcWork.
-                       wbBufFlush1(pp)
-
-                       // Flush the gcWork, since this may create global work
-                       // and set the flushedWork flag.
-                       //
-                       // TODO(austin): Break up these workbufs to
-                       // better distribute work.
-                       pp.gcw.dispose()
-                       // Collect the flushedWork flag.
-                       if pp.gcw.flushedWork {
-                               atomic.Xadd(&gcMarkDoneFlushed, 1)
-                               pp.gcw.flushedWork = false
-                       }
-               })
-               casgstatus(gp, _Gwaiting, _Grunning)
+       forEachP(waitReasonGCMarkTermination, func(pp *p) {
+               // Flush the write barrier buffer, since this may add
+               // work to the gcWork.
+               wbBufFlush1(pp)
+
+               // Flush the gcWork, since this may create global work
+               // and set the flushedWork flag.
+               //
+               // TODO(austin): Break up these workbufs to
+               // better distribute work.
+               pp.gcw.dispose()
+               // Collect the flushedWork flag.
+               if pp.gcw.flushedWork {
+                       atomic.Xadd(&gcMarkDoneFlushed, 1)
+                       pp.gcw.flushedWork = false
+               }
        })
 
        if gcMarkDoneFlushed != 0 {
@@ -997,8 +982,10 @@ func gcMarkTermination() {
        mp.traceback = 0
        casgstatus(curgp, _Gwaiting, _Grunning)
 
-       if traceEnabled() {
-               traceGCDone()
+       trace := traceAcquire()
+       if trace.ok() {
+               trace.GCDone()
+               traceRelease(trace)
        }
 
        // all done
@@ -1051,10 +1038,6 @@ func gcMarkTermination() {
        // Reset idle time stat.
        sched.idleTime.Store(0)
 
-       // Reset sweep state.
-       sweep.nbgsweep = 0
-       sweep.npausesweep = 0
-
        if work.userForced {
                memstats.numforcedgc++
        }
@@ -1124,18 +1107,16 @@ func gcMarkTermination() {
        //
        // Also, flush the pinner cache, to avoid leaking that memory
        // indefinitely.
-       systemstack(func() {
-               forEachP(func(pp *p) {
-                       pp.mcache.prepareForSweep()
-                       if pp.status == _Pidle {
-                               systemstack(func() {
-                                       lock(&mheap_.lock)
-                                       pp.pcache.flush(&mheap_.pages)
-                                       unlock(&mheap_.lock)
-                               })
-                       }
-                       pp.pinnerCache = nil
-               })
+       forEachP(waitReasonFlushProcCaches, func(pp *p) {
+               pp.mcache.prepareForSweep()
+               if pp.status == _Pidle {
+                       systemstack(func() {
+                               lock(&mheap_.lock)
+                               pp.pcache.flush(&mheap_.pages)
+                               unlock(&mheap_.lock)
+                       })
+               }
+               pp.pinnerCache = nil
        })
        if sl.valid {
                // Now that we've swept stale spans in mcaches, they don't
@@ -1593,7 +1574,6 @@ func gcSweep(mode gcMode) bool {
                }
                // Sweep all spans eagerly.
                for sweepone() != ^uintptr(0) {
-                       sweep.npausesweep++
                }
                // Free workbufs eagerly.
                prepareFreeWorkbufs()