)
const (
- _DebugGC = 0
- _ConcurrentSweep = true
- _FinBlockSize = 4 * 1024
+ _DebugGC = 0
+ _FinBlockSize = 4 * 1024
+
+ // concurrentSweep is a debug flag. Disabling this flag
+ // ensures all spans are swept while the world is stopped.
+ concurrentSweep = true
// debugScanConservative enables debug logging for stack
// frames that are scanned conservatively.
sweepMinHeapDistance = 1024 * 1024
)
-// heapObjectsCanMove is always false in the current garbage collector.
+// heapObjectsCanMove always returns false in the current garbage collector.
// It exists for go4.org/unsafe/assume-no-moving-gc, which is an
// unfortunate idea that had an even more unfortunate implementation.
// Every time a new Go release happened, the package stopped building,
//
// If the Go garbage collector ever does move heap objects, we can set
// this to true to break all the programs using assume-no-moving-gc.
-var heapObjectsCanMove = false
+//
+//go:linkname heapObjectsCanMove
+func heapObjectsCanMove() bool {
+ return false
+}
func gcinit() {
if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
pad [3]byte // compiler uses 32-bit load for "enabled" field
- needed bool // identical to enabled, for now (TODO: dedup)
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
//go:nosplit
func setGCPhase(x uint32) {
atomic.Store(&gcphase, x)
- writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
- writeBarrier.enabled = writeBarrier.needed
+ writeBarrier.enabled = gcphase == _GCmark || gcphase == _GCmarktermination
}
// gcMarkWorkerMode represents the mode that a concurrent mark worker
// as part of tests and benchmarks to get the system into a
// relatively stable and isolated state.
for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
Gosched()
}
}
switch t.kind {
case gcTriggerHeap:
- // Non-atomic access to gcController.heapLive for performance. If
- // we are going to trigger on this, this thread just
- // atomically wrote gcController.heapLive anyway and we'll see our
- // own write.
trigger, _ := gcController.trigger()
return gcController.heapLive.Load() >= trigger
case gcTriggerTime:
// We check the transition condition continuously here in case
// this G gets delayed in to the next GC cycle.
for trigger.test() && sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
}
// Perform GC initialization and the sweep termination
// Update it under gcsema to avoid gctrace getting wrong values.
work.userForced = trigger.kind == gcTriggerCycle
- if traceEnabled() {
- traceGCStart()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCStart()
+ traceRelease(trace)
}
// Check that all Ps have finished deferred mcache flushes.
finishsweep_m()
})
- // clearpools before we start the GC. If we wait they memory will not be
+ // clearpools before we start the GC. If we wait the memory will not be
// reclaimed until the next GC cycle.
clearpools()
// enabled because they must be enabled before
// any non-leaf heap objects are marked. Since
// allocations are blocked until assists can
- // happen, we want enable assists as early as
+ // happen, we want to enable assists as early as
// possible.
setGCPhase(_GCmark)
- gcBgMarkPrepare() // Must happen before assist enable.
+ gcBgMarkPrepare() // Must happen before assists are enabled.
gcMarkRootPrepare()
// Mark all active tinyalloc blocks. Since we're
// before continuing.
})
+ var stwSwept bool
systemstack(func() {
work.heap2 = work.bytesMarked
if debug.gccheckmark > 0 {
// marking is complete so we can turn the write barrier off
setGCPhase(_GCoff)
- gcSweep(work.mode)
+ stwSwept = gcSweep(work.mode)
})
mp.traceback = 0
casgstatus(curgp, _Gwaiting, _Grunning)
- if traceEnabled() {
- traceGCDone()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCDone()
+ traceRelease(trace)
}
// all done
// Reset idle time stat.
sched.idleTime.Store(0)
- // Reset sweep state.
- sweep.nbgsweep = 0
- sweep.npausesweep = 0
-
if work.userForced {
memstats.numforcedgc++
}
// Those aren't tracked in any sweep lists, so we need to
// count them against sweep completion until we ensure all
// those spans have been forced out.
+ //
+ // If gcSweep fully swept the heap (for example if the sweep
+ // is not concurrent due to a GODEBUG setting), then we expect
+ // the sweepLocker to be invalid, since sweeping is done.
+ //
+ // N.B. Below we might duplicate some work from gcSweep; this is
+ // fine as all that work is idempotent within a GC cycle, and
+ // we're still holding worldsema so a new cycle can't start.
sl := sweep.active.begin()
- if !sl.valid {
+ if !stwSwept && !sl.valid {
throw("failed to set sweep barrier")
+ } else if stwSwept && sl.valid {
+ throw("non-concurrent sweep failed to drain all sweep queues")
}
systemstack(func() { startTheWorldWithSema() })
pp.pinnerCache = nil
})
})
- // Now that we've swept stale spans in mcaches, they don't
- // count against unswept spans.
- sweep.active.end(sl)
+ if sl.valid {
+ // Now that we've swept stale spans in mcaches, they don't
+ // count against unswept spans.
+ //
+ // Note: this sweepLocker may not be valid if sweeping had
+ // already completed during the STW. See the corresponding
+ // begin() call that produced sl.
+ sweep.active.end(sl)
+ }
// Print gctrace before dropping worldsema. As soon as we drop
// worldsema another cycle could start and smash the stats
default:
throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
case gcMarkWorkerDedicatedMode:
- gcDrain(&pp.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
+ gcDrainMarkWorkerDedicated(&pp.gcw, true)
if gp.preempt {
// We were preempted. This is
// a useful signal to kick
}
// Go back to draining, this time
// without preemption.
- gcDrain(&pp.gcw, gcDrainFlushBgCredit)
+ gcDrainMarkWorkerDedicated(&pp.gcw, false)
case gcMarkWorkerFractionalMode:
- gcDrain(&pp.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+ gcDrainMarkWorkerFractional(&pp.gcw)
case gcMarkWorkerIdleMode:
- gcDrain(&pp.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+ gcDrainMarkWorkerIdle(&pp.gcw)
}
casgstatus(gp, _Gwaiting, _Grunning)
})
// gcSweep must be called on the system stack because it acquires the heap
// lock. See mheap for details.
//
+// Returns true if the heap was fully swept by this function.
+//
// The world must be stopped.
//
//go:systemstack
-func gcSweep(mode gcMode) {
+func gcSweep(mode gcMode) bool {
assertWorldStopped()
if gcphase != _GCoff {
sweep.centralIndex.clear()
- if !_ConcurrentSweep || mode == gcForceBlockMode {
+ if !concurrentSweep || mode == gcForceBlockMode {
// Special case synchronous sweep.
// Record that no proportional sweeping has to happen.
lock(&mheap_.lock)
mheap_.sweepPagesPerByte = 0
unlock(&mheap_.lock)
+ // Flush all mcaches.
+ for _, pp := range allp {
+ pp.mcache.prepareForSweep()
+ }
// Sweep all spans eagerly.
for sweepone() != ^uintptr(0) {
- sweep.npausesweep++
}
// Free workbufs eagerly.
prepareFreeWorkbufs()
// available immediately.
mProf_NextCycle()
mProf_Flush()
- return
+ return true
}
// Background sweep.
ready(sweep.g, 0, true)
}
unlock(&sweep.lock)
+ return false
}
// gcResetMarkState resets global state prior to marking (concurrent