var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
pad [3]byte // compiler uses 32-bit load for "enabled" field
- needed bool // identical to enabled, for now (TODO: dedup)
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
//go:nosplit
func setGCPhase(x uint32) {
atomic.Store(&gcphase, x)
- writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
- writeBarrier.enabled = writeBarrier.needed
+ writeBarrier.enabled = gcphase == _GCmark || gcphase == _GCmarktermination
}
// gcMarkWorkerMode represents the mode that a concurrent mark worker
// as part of tests and benchmarks to get the system into a
// relatively stable and isolated state.
for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
Gosched()
}
}
switch t.kind {
case gcTriggerHeap:
- // Non-atomic access to gcController.heapLive for performance. If
- // we are going to trigger on this, this thread just
- // atomically wrote gcController.heapLive anyway and we'll see our
- // own write.
trigger, _ := gcController.trigger()
return gcController.heapLive.Load() >= trigger
case gcTriggerTime:
// We check the transition condition continuously here in case
// this G gets delayed in to the next GC cycle.
for trigger.test() && sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
}
// Perform GC initialization and the sweep termination
// Update it under gcsema to avoid gctrace getting wrong values.
work.userForced = trigger.kind == gcTriggerCycle
- if traceEnabled() {
- traceGCStart()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCStart()
+ traceRelease(trace)
}
// Check that all Ps have finished deferred mcache flushes.
finishsweep_m()
})
- // clearpools before we start the GC. If we wait they memory will not be
+ // clearpools before we start the GC. If we wait the memory will not be
// reclaimed until the next GC cycle.
clearpools()
// enabled because they must be enabled before
// any non-leaf heap objects are marked. Since
// allocations are blocked until assists can
- // happen, we want enable assists as early as
+ // happen, we want to enable assists as early as
// possible.
setGCPhase(_GCmark)
- gcBgMarkPrepare() // Must happen before assist enable.
+ gcBgMarkPrepare() // Must happen before assists are enabled.
gcMarkRootPrepare()
// Mark all active tinyalloc blocks. Since we're
// Flush all local buffers and collect flushedWork flags.
gcMarkDoneFlushed = 0
- systemstack(func() {
- gp := getg().m.curg
- // Mark the user stack as preemptible so that it may be scanned.
- // Otherwise, our attempt to force all P's to a safepoint could
- // result in a deadlock as we attempt to preempt a worker that's
- // trying to preempt us (e.g. for a stack scan).
- casGToWaiting(gp, _Grunning, waitReasonGCMarkTermination)
- forEachP(func(pp *p) {
- // Flush the write barrier buffer, since this may add
- // work to the gcWork.
- wbBufFlush1(pp)
-
- // Flush the gcWork, since this may create global work
- // and set the flushedWork flag.
- //
- // TODO(austin): Break up these workbufs to
- // better distribute work.
- pp.gcw.dispose()
- // Collect the flushedWork flag.
- if pp.gcw.flushedWork {
- atomic.Xadd(&gcMarkDoneFlushed, 1)
- pp.gcw.flushedWork = false
- }
- })
- casgstatus(gp, _Gwaiting, _Grunning)
+ forEachP(waitReasonGCMarkTermination, func(pp *p) {
+ // Flush the write barrier buffer, since this may add
+ // work to the gcWork.
+ wbBufFlush1(pp)
+
+ // Flush the gcWork, since this may create global work
+ // and set the flushedWork flag.
+ //
+ // TODO(austin): Break up these workbufs to
+ // better distribute work.
+ pp.gcw.dispose()
+ // Collect the flushedWork flag.
+ if pp.gcw.flushedWork {
+ atomic.Xadd(&gcMarkDoneFlushed, 1)
+ pp.gcw.flushedWork = false
+ }
})
if gcMarkDoneFlushed != 0 {
mp.traceback = 0
casgstatus(curgp, _Gwaiting, _Grunning)
- if traceEnabled() {
- traceGCDone()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCDone()
+ traceRelease(trace)
}
// all done
// Reset idle time stat.
sched.idleTime.Store(0)
- // Reset sweep state.
- sweep.nbgsweep = 0
- sweep.npausesweep = 0
-
if work.userForced {
memstats.numforcedgc++
}
//
// Also, flush the pinner cache, to avoid leaking that memory
// indefinitely.
- systemstack(func() {
- forEachP(func(pp *p) {
- pp.mcache.prepareForSweep()
- if pp.status == _Pidle {
- systemstack(func() {
- lock(&mheap_.lock)
- pp.pcache.flush(&mheap_.pages)
- unlock(&mheap_.lock)
- })
- }
- pp.pinnerCache = nil
- })
+ forEachP(waitReasonFlushProcCaches, func(pp *p) {
+ pp.mcache.prepareForSweep()
+ if pp.status == _Pidle {
+ systemstack(func() {
+ lock(&mheap_.lock)
+ pp.pcache.flush(&mheap_.pages)
+ unlock(&mheap_.lock)
+ })
+ }
+ pp.pinnerCache = nil
})
if sl.valid {
// Now that we've swept stale spans in mcaches, they don't
}
// Sweep all spans eagerly.
for sweepone() != ^uintptr(0) {
- sweep.npausesweep++
}
// Free workbufs eagerly.
prepareFreeWorkbufs()