"internal/cpu"
"internal/goexperiment"
"runtime/internal/atomic"
- "unsafe"
+ _ "unsafe" // for go:linkname
)
-// go119MemoryLimitSupport is a feature flag for a number of changes
-// related to the memory limit feature (#48409). Disabling this flag
-// disables those features, as well as the memory limit mechanism,
-// which becomes a no-op.
-const go119MemoryLimitSupport = true
-
const (
// gcGoalUtilization is the goal CPU utilization for
// marking as a fraction of GOMAXPROCS.
defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
(1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
- // scannableStackSizeSlack is the bytes of stack space allocated or freed
+ // maxStackScanSlack is the bytes of stack space allocated or freed
// that can accumulate on a P before updating gcController.stackSize.
- scannableStackSizeSlack = 8 << 10
+ maxStackScanSlack = 8 << 10
+
+ // memoryLimitMinHeapGoalHeadroom is the minimum amount of headroom the
+ // pacer gives to the heap goal when operating in the memory-limited regime.
+ // That is, it'll reduce the heap goal by this many extra bytes off of the
+ // base calculation, at minimum.
+ memoryLimitMinHeapGoalHeadroom = 1 << 20
+
+ // memoryLimitHeapGoalHeadroomPercent is how headroom the memory-limit-based
+ // heap goal should have as a percent of the maximum possible heap goal allowed
+ // to maintain the memory limit.
+ memoryLimitHeapGoalHeadroomPercent = 3
)
-func init() {
- if offset := unsafe.Offsetof(gcController.heapLive); offset%8 != 0 {
- println(offset)
- throw("gcController.heapLive not aligned to 8 bytes")
- }
-}
-
// gcController implements the GC pacing controller that determines
// when to trigger concurrent garbage collection and how much marking
// work to do in mutator assists and background marking.
// Initialized from GOGC. GOGC=off means no GC.
gcPercent atomic.Int32
- _ uint32 // padding so following 64-bit values are 8-byte aligned
-
// memoryLimit is the soft memory limit in bytes.
//
// Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64
// Updated at the end of each GC cycle, in endCycle.
consMark float64
- // consMarkController holds the state for the mark-cons ratio
- // estimation over time.
- //
- // Its purpose is to smooth out noisiness in the computation of
- // consMark; see consMark for details.
- consMarkController piController
-
- _ uint32 // Padding for atomics on 32-bit platforms.
+ // lastConsMark is the computed cons/mark value for the previous 4 GC
+ // cycles. Note that this is *not* the last value of consMark, but the
+ // measured cons/mark value in endCycle.
+ lastConsMark [4]float64
// gcPercentHeapGoal is the goal heapLive for when next GC ends derived
// from gcPercent.
// hence goes up as we allocate and down as we sweep) while heapLive
// excludes these objects (and hence only goes up between GCs).
//
- // This is updated atomically without locking. To reduce
- // contention, this is updated only when obtaining a span from
- // an mcentral and at this point it counts all of the
- // unallocated slots in that span (which will be allocated
- // before that mcache obtains another span from that
- // mcentral). Hence, it slightly overestimates the "true" live
- // heap size. It's better to overestimate than to
- // underestimate because 1) this triggers the GC earlier than
- // necessary rather than potentially too late and 2) this
- // leads to a conservative GC rate rather than a GC rate that
- // is potentially too low.
- //
- // Reads should likewise be atomic (or during STW).
+ // To reduce contention, this is updated only when obtaining a span
+ // from an mcentral and at this point it counts all of the unallocated
+ // slots in that span (which will be allocated before that mcache
+ // obtains another span from that mcentral). Hence, it slightly
+ // overestimates the "true" live heap size. It's better to overestimate
+ // than to underestimate because 1) this triggers the GC earlier than
+ // necessary rather than potentially too late and 2) this leads to a
+ // conservative GC rate rather than a GC rate that is potentially too
+ // low.
//
// Whenever this is updated, call traceHeapAlloc() and
// this gcControllerState's revise() method.
- heapLive uint64
+ heapLive atomic.Uint64
- // heapScan is the number of bytes of "scannable" heap. This
- // is the live heap (as counted by heapLive), but omitting
- // no-scan objects and no-scan tails of objects.
+ // heapScan is the number of bytes of "scannable" heap. This is the
+ // live heap (as counted by heapLive), but omitting no-scan objects and
+ // no-scan tails of objects.
//
- // This value is fixed at the start of a GC cycle, so during a
- // GC cycle it is safe to read without atomics, and it represents
- // the maximum scannable heap.
- heapScan uint64
+ // This value is fixed at the start of a GC cycle. It represents the
+ // maximum scannable heap.
+ heapScan atomic.Uint64
// lastHeapScan is the number of bytes of heap that were scanned
// last GC cycle. It is the same as heapMarked, but only
// Updated when the world is stopped.
lastHeapScan uint64
- // stackScan is a snapshot of scannableStackSize taken at each GC
- // STW pause and is used in pacing decisions.
- //
- // Updated only while the world is stopped.
- stackScan uint64
+ // lastStackScan is the number of bytes of stack that were scanned
+ // last GC cycle.
+ lastStackScan atomic.Uint64
- // scannableStackSize is the amount of allocated goroutine stack space in
+ // maxStackScan is the amount of allocated goroutine stack space in
// use by goroutines.
//
// This number tracks allocated goroutine stack space rather than used
// goroutine stack space is much harder to measure cheaply. By using
// allocated space, we make an overestimate; this is OK, it's better
// to conservatively overcount than undercount.
- //
- // Read and updated atomically.
- scannableStackSize uint64
+ maxStackScan atomic.Uint64
// globalsScan is the total amount of global variable space
// that is scannable.
- //
- // Read and updated atomically.
- globalsScan uint64
+ globalsScan atomic.Uint64
// heapMarked is the number of bytes marked by the previous
// GC. After mark termination, heapLive == heapMarked, but
// Currently these are measured in bytes. For most uses, this is an
// opaque unit of work, but for estimation the definition is important.
//
- // Note that stackScanWork includes all allocated space, not just the
- // size of the stack itself, mirroring stackSize.
+ // Note that stackScanWork includes only stack space scanned, not all
+ // of the allocated stack.
heapScanWork atomic.Int64
stackScanWork atomic.Int64
globalsScanWork atomic.Int64
- // bgScanCredit is the scan work credit accumulated by the
- // concurrent background scan. This credit is accumulated by
- // the background scan and stolen by mutator assists. This is
- // updated atomically. Updates occur in bounded batches, since
- // it is both written and read throughout the cycle.
- bgScanCredit int64
+ // bgScanCredit is the scan work credit accumulated by the concurrent
+ // background scan. This credit is accumulated by the background scan
+ // and stolen by mutator assists. Updates occur in bounded batches,
+ // since it is both written and read throughout the cycle.
+ bgScanCredit atomic.Int64
// assistTime is the nanoseconds spent in mutator assists
// during this cycle. This is updated atomically, and must also
// written and read throughout the cycle.
assistTime atomic.Int64
- // dedicatedMarkTime is the nanoseconds spent in dedicated
- // mark workers during this cycle. This is updated atomically
- // at the end of the concurrent mark phase.
- dedicatedMarkTime int64
+ // dedicatedMarkTime is the nanoseconds spent in dedicated mark workers
+ // during this cycle. This is updated at the end of the concurrent mark
+ // phase.
+ dedicatedMarkTime atomic.Int64
- // fractionalMarkTime is the nanoseconds spent in the
- // fractional mark worker during this cycle. This is updated
- // atomically throughout the cycle and will be up-to-date if
- // the fractional mark worker is not currently running.
- fractionalMarkTime int64
+ // fractionalMarkTime is the nanoseconds spent in the fractional mark
+ // worker during this cycle. This is updated throughout the cycle and
+ // will be up-to-date if the fractional mark worker is not currently
+ // running.
+ fractionalMarkTime atomic.Int64
- // idleMarkTime is the nanoseconds spent in idle marking
- // during this cycle. This is updated atomically throughout
- // the cycle.
- idleMarkTime int64
+ // idleMarkTime is the nanoseconds spent in idle marking during this
+ // cycle. This is updated throughout the cycle.
+ idleMarkTime atomic.Int64
// markStartTime is the absolute start time in nanoseconds
// that assists and background mark workers started.
markStartTime int64
- // dedicatedMarkWorkersNeeded is the number of dedicated mark
- // workers that need to be started. This is computed at the
- // beginning of each cycle and decremented atomically as
- // dedicated mark workers get started.
- dedicatedMarkWorkersNeeded int64
+ // dedicatedMarkWorkersNeeded is the number of dedicated mark workers
+ // that need to be started. This is computed at the beginning of each
+ // cycle and decremented as dedicated mark workers get started.
+ dedicatedMarkWorkersNeeded atomic.Int64
// idleMarkWorkers is two packed int32 values in a single uint64.
// These two values are always updated simultaneously.
func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) {
c.heapMinimum = defaultHeapMinimum
c.triggered = ^uint64(0)
-
- c.consMarkController = piController{
- // Tuned first via the Ziegler-Nichols process in simulation,
- // then the integral time was manually tuned against real-world
- // applications to deal with noisiness in the measured cons/mark
- // ratio.
- kp: 0.9,
- ti: 4.0,
-
- // Set a high reset time in GC cycles.
- // This is inversely proportional to the rate at which we
- // accumulate error from clipping. By making this very high
- // we make the accumulation slow. In general, clipping is
- // OK in our situation, hence the choice.
- //
- // Tune this if we get unintended effects from clipping for
- // a long time.
- tt: 1000,
- min: -1000,
- max: 1000,
- }
-
c.setGCPercent(gcPercent)
c.setMemoryLimit(memoryLimit)
c.commit(true) // No sweep phase in the first GC cycle.
-
// N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at
// initialization time.
+ // N.B. No need to call revise; there's no GC enabled during
+ // initialization.
}
// startCycle resets the GC controller's state and computes estimates
c.heapScanWork.Store(0)
c.stackScanWork.Store(0)
c.globalsScanWork.Store(0)
- c.bgScanCredit = 0
+ c.bgScanCredit.Store(0)
c.assistTime.Store(0)
- c.dedicatedMarkTime = 0
- c.fractionalMarkTime = 0
- c.idleMarkTime = 0
+ c.dedicatedMarkTime.Store(0)
+ c.fractionalMarkTime.Store(0)
+ c.idleMarkTime.Store(0)
c.markStartTime = markStartTime
- c.stackScan = atomic.Load64(&c.scannableStackSize)
- c.triggered = c.heapLive
+ c.triggered = c.heapLive.Load()
// Compute the background mark utilization goal. In general,
// this may not come out exactly. We round the number of
// 25%. For small GOMAXPROCS, this would introduce too much
// error, so we add fractional workers in that case.
totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
- c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal + 0.5)
- utilError := float64(c.dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
+ dedicatedMarkWorkersNeeded := int64(totalUtilizationGoal + 0.5)
+ utilError := float64(dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
const maxUtilError = 0.3
if utilError < -maxUtilError || utilError > maxUtilError {
// Rounding put us more than 30% off our goal. With
// gcBackgroundUtilization of 25%, this happens for
// GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
// workers to compensate.
- if float64(c.dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
+ if float64(dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
// Too many dedicated workers.
- c.dedicatedMarkWorkersNeeded--
+ dedicatedMarkWorkersNeeded--
}
- c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)) / float64(procs)
+ c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(dedicatedMarkWorkersNeeded)) / float64(procs)
} else {
c.fractionalUtilizationGoal = 0
}
// In STW mode, we just want dedicated workers.
if debug.gcstoptheworld > 0 {
- c.dedicatedMarkWorkersNeeded = int64(procs)
+ dedicatedMarkWorkersNeeded = int64(procs)
c.fractionalUtilizationGoal = 0
}
// required. However, we need at least one dedicated mark worker or
// idle GC worker to ensure GC progress in some scenarios (see comment
// on maxIdleMarkWorkers).
- if c.dedicatedMarkWorkersNeeded > 0 {
+ if dedicatedMarkWorkersNeeded > 0 {
c.setMaxIdleMarkWorkers(0)
} else {
// TODO(mknyszek): The fundamental reason why we need this is because
c.setMaxIdleMarkWorkers(1)
}
} else {
- // N.B. gomaxprocs and dedicatedMarkWorkersNeeded is guaranteed not to
+ // N.B. gomaxprocs and dedicatedMarkWorkersNeeded are guaranteed not to
// change during a GC cycle.
- c.setMaxIdleMarkWorkers(int32(procs) - int32(c.dedicatedMarkWorkersNeeded))
+ c.setMaxIdleMarkWorkers(int32(procs) - int32(dedicatedMarkWorkersNeeded))
}
// Compute initial values for controls that are updated
// throughout the cycle.
+ c.dedicatedMarkWorkersNeeded.Store(dedicatedMarkWorkersNeeded)
c.revise()
if debug.gcpacertrace > 0 {
heapGoal := c.heapGoal()
assistRatio := c.assistWorkPerByte.Load()
print("pacer: assist ratio=", assistRatio,
- " (scan ", gcController.heapScan>>20, " MB in ",
+ " (scan ", gcController.heapScan.Load()>>20, " MB in ",
work.initialHeapLive>>20, "->",
heapGoal>>20, " MB)",
- " workers=", c.dedicatedMarkWorkersNeeded,
+ " workers=", dedicatedMarkWorkersNeeded,
"+", c.fractionalUtilizationGoal, "\n")
}
}
// act like GOGC is huge for the below calculations.
gcPercent = 100000
}
- live := atomic.Load64(&c.heapLive)
- scan := atomic.Load64(&c.heapScan)
+ live := c.heapLive.Load()
+ scan := c.heapScan.Load()
work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
// Assume we're under the soft goal. Pace GC to complete at
heapGoal := int64(c.heapGoal())
// The expected scan work is computed as the amount of bytes scanned last
- // GC cycle, plus our estimate of stacks and globals work for this cycle.
- scanWorkExpected := int64(c.lastHeapScan + c.stackScan + c.globalsScan)
+ // GC cycle (both heap and stack), plus our estimate of globals work for this cycle.
+ scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan.Load() + c.globalsScan.Load())
// maxScanWork is a worst-case estimate of the amount of scan work that
// needs to be performed in this GC cycle. Specifically, it represents
- // the case where *all* scannable memory turns out to be live.
- maxScanWork := int64(scan + c.stackScan + c.globalsScan)
+ // the case where *all* scannable memory turns out to be live, and
+ // *all* allocated stack space is scannable.
+ maxStackScan := c.maxStackScan.Load()
+ maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load())
if work > scanWorkExpected {
// We've already done more scan work than expected. Because our expectation
// is based on a steady-state scannable heap size, we assume this means our
utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs))
}
- if c.heapLive <= c.triggered {
+ if c.heapLive.Load() <= c.triggered {
// Shouldn't happen, but let's be very safe about this in case the
// GC is somehow extremely short.
//
}
idleUtilization := 0.0
if assistDuration > 0 {
- idleUtilization = float64(c.idleMarkTime) / float64(assistDuration*int64(procs))
+ idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs))
}
// Determine the cons/mark ratio.
//
//
// assistDuration * procs * (utilization + idleUtilization)
//
- // In this case, we *include* idle utilization, because that is additional CPU time that the
+ // In this case, we *include* idle utilization, because that is additional CPU time that
// the GC had available to it.
//
// In effect, idle GC time is sort of double-counted here, but it's very weird compared
//
// So this calculation is really:
// (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
- // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)
+ // (scanWork) / (assistDuration * procs * (utilization+idleUtilization))
//
// Note that because we only care about the ratio, assistDuration and procs cancel out.
scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
- currentConsMark := (float64(c.heapLive-c.triggered) * (utilization + idleUtilization)) /
+ currentConsMark := (float64(c.heapLive.Load()-c.triggered) * (utilization + idleUtilization)) /
(float64(scanWork) * (1 - utilization))
- // Update cons/mark controller. The time period for this is 1 GC cycle.
- //
- // This use of a PI controller might seem strange. So, here's an explanation:
- //
- // currentConsMark represents the consMark we *should've* had to be perfectly
- // on-target for this cycle. Given that we assume the next GC will be like this
- // one in the steady-state, it stands to reason that we should just pick that
- // as our next consMark. In practice, however, currentConsMark is too noisy:
- // we're going to be wildly off-target in each GC cycle if we do that.
- //
- // What we do instead is make a long-term assumption: there is some steady-state
- // consMark value, but it's obscured by noise. By constantly shooting for this
- // noisy-but-perfect consMark value, the controller will bounce around a bit,
- // but its average behavior, in aggregate, should be less noisy and closer to
- // the true long-term consMark value, provided its tuned to be slightly overdamped.
- var ok bool
+ // Update our cons/mark estimate. This is the maximum of the value we just computed and the last
+ // 4 cons/mark values we measured. The reason we take the maximum here is to bias a noisy
+ // cons/mark measurement toward fewer assists at the expense of additional GC cycles (starting
+ // earlier).
oldConsMark := c.consMark
- c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
- if !ok {
- // The error spiraled out of control. This is incredibly unlikely seeing
- // as this controller is essentially just a smoothing function, but it might
- // mean that something went very wrong with how currentConsMark was calculated.
- // Just reset consMark and keep going.
- c.consMark = 0
+ c.consMark = currentConsMark
+ for i := range c.lastConsMark {
+ if c.lastConsMark[i] > c.consMark {
+ c.consMark = c.lastConsMark[i]
+ }
}
+ copy(c.lastConsMark[:], c.lastConsMark[1:])
+ c.lastConsMark[len(c.lastConsMark)-1] = currentConsMark
if debug.gcpacertrace > 0 {
- heapGoal := c.heapGoal()
printlock()
goal := gcGoalUtilization * 100
print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
- print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
- print("in ", c.triggered, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(heapGoal), ", cons/mark ", oldConsMark, ")")
- if !ok {
- print("[controller reset]")
- }
+ print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ")
+ live := c.heapLive.Load()
+ print("in ", c.triggered, " B -> ", live, " B (∆goal ", int64(live)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")")
println()
printunlock()
}
// If there are idle Ps, wake one so it will run an idle worker.
// NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
//
- // if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
+ // if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 {
// wakep()
// return
// }
// There are no idle Ps. If we need more dedicated workers,
// try to preempt a running P so it will switch to a worker.
- if c.dedicatedMarkWorkersNeeded <= 0 {
+ if c.dedicatedMarkWorkersNeeded.Load() <= 0 {
return
}
// Pick a random other P to preempt.
}
}
-// findRunnableGCWorker returns a background mark worker for _p_ if it
+// findRunnableGCWorker returns a background mark worker for pp if it
// should be run. This must only be called when gcBlackenEnabled != 0.
-func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) *g {
+func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
if gcBlackenEnabled == 0 {
throw("gcControllerState.findRunnable: blackening not enabled")
}
// hasn't had an update in a while. This check is necessary in
// case the limiter is on but hasn't been checked in a while and
// so may have left sufficient headroom to turn off again.
+ if now == 0 {
+ now = nanotime()
+ }
if gcCPULimiter.needUpdate(now) {
- gcCPULimiter.update(gcController.assistTime.Load(), now)
+ gcCPULimiter.update(now)
}
- if !gcMarkWorkAvailable(_p_) {
+ if !gcMarkWorkAvailable(pp) {
// No work to be done right now. This can happen at
// the end of the mark phase when there are still
// assists tapering off. Don't bother running a worker
// now because it'll just return immediately.
- return nil
+ return nil, now
}
// Grab a worker before we commit to running below.
// it will always do so with queued global work. Thus, that P
// will be immediately eligible to re-run the worker G it was
// just using, ensuring work can complete.
- return nil
+ return nil, now
}
- decIfPositive := func(ptr *int64) bool {
+ decIfPositive := func(val *atomic.Int64) bool {
for {
- v := atomic.Loadint64(ptr)
+ v := val.Load()
if v <= 0 {
return false
}
- if atomic.Casint64(ptr, v, v-1) {
+ if val.CompareAndSwap(v, v-1) {
return true
}
}
if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
// This P is now dedicated to marking until the end of
// the concurrent mark phase.
- _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
+ pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
} else if c.fractionalUtilizationGoal == 0 {
// No need for fractional workers.
gcBgMarkWorkerPool.push(&node.node)
- return nil
+ return nil, now
} else {
// Is this P behind on the fractional utilization
// goal?
//
// This should be kept in sync with pollFractionalWorkerExit.
delta := now - c.markStartTime
- if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
+ if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
// Nope. No need to run a fractional worker.
gcBgMarkWorkerPool.push(&node.node)
- return nil
+ return nil, now
}
// Run a fractional worker.
- _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
+ pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode
}
// Run the background mark worker.
gp := node.gp.ptr()
+ trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
+ if trace.ok() {
+ trace.GoUnpark(gp, 0)
+ traceRelease(trace)
}
- return gp
+ return gp, now
}
// resetLive sets up the controller state for the next mark phase after the end
// The world must be stopped.
func (c *gcControllerState) resetLive(bytesMarked uint64) {
c.heapMarked = bytesMarked
- c.heapLive = bytesMarked
- c.heapScan = uint64(c.heapScanWork.Load())
+ c.heapLive.Store(bytesMarked)
+ c.heapScan.Store(uint64(c.heapScanWork.Load()))
c.lastHeapScan = uint64(c.heapScanWork.Load())
+ c.lastStackScan.Store(uint64(c.stackScanWork.Load()))
c.triggered = ^uint64(0) // Reset triggered.
// heapLive was updated, so emit a trace event.
- if trace.enabled {
- traceHeapAlloc()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.HeapAlloc(bytesMarked)
+ traceRelease(trace)
}
}
func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) {
switch mode {
case gcMarkWorkerDedicatedMode:
- atomic.Xaddint64(&c.dedicatedMarkTime, duration)
- atomic.Xaddint64(&c.dedicatedMarkWorkersNeeded, 1)
+ c.dedicatedMarkTime.Add(duration)
+ c.dedicatedMarkWorkersNeeded.Add(1)
case gcMarkWorkerFractionalMode:
- atomic.Xaddint64(&c.fractionalMarkTime, duration)
+ c.fractionalMarkTime.Add(duration)
case gcMarkWorkerIdleMode:
- atomic.Xaddint64(&c.idleMarkTime, duration)
+ c.idleMarkTime.Add(duration)
c.removeIdleMarkWorker()
default:
throw("markWorkerStop: unknown mark worker mode")
func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
if dHeapLive != 0 {
- atomic.Xadd64(&gcController.heapLive, dHeapLive)
- if trace.enabled {
+ trace := traceAcquire()
+ live := gcController.heapLive.Add(dHeapLive)
+ if trace.ok() {
// gcController.heapLive changed.
- traceHeapAlloc()
+ trace.HeapAlloc(live)
+ traceRelease(trace)
}
}
if gcBlackenEnabled == 0 {
// Update heapScan when we're not in a current GC. It is fixed
// at the beginning of a cycle.
if dHeapScan != 0 {
- atomic.Xadd64(&gcController.heapScan, dHeapScan)
+ gcController.heapScan.Add(dHeapScan)
}
} else {
// gcController.heapLive changed.
func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
if pp == nil {
- atomic.Xadd64(&c.scannableStackSize, amount)
+ c.maxStackScan.Add(amount)
return
}
- pp.scannableStackSizeDelta += amount
- if pp.scannableStackSizeDelta >= scannableStackSizeSlack || pp.scannableStackSizeDelta <= -scannableStackSizeSlack {
- atomic.Xadd64(&c.scannableStackSize, pp.scannableStackSizeDelta)
- pp.scannableStackSizeDelta = 0
+ pp.maxStackScanDelta += amount
+ if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack {
+ c.maxStackScan.Add(pp.maxStackScanDelta)
+ pp.maxStackScanDelta = 0
}
}
func (c *gcControllerState) addGlobals(amount int64) {
- atomic.Xadd64(&c.globalsScan, amount)
+ c.globalsScan.Add(amount)
}
// heapGoal returns the current heap goal.
func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) {
// Start with the goal calculated for gcPercent.
goal = c.gcPercentHeapGoal.Load()
- sweepDistTrigger := c.sweepDistMinTrigger.Load()
-
- // Don't set a goal below the minimum heap size or the minimum
- // trigger determined at commit time.
- minGoal := c.heapMinimum
- if sweepDistTrigger > minGoal {
- minGoal = sweepDistTrigger
- }
- if goal < minGoal {
- goal = minGoal
- }
-
- // Ensure that the heap goal is at least a little larger than
- // the point at which we triggered. This may not be the case if GC
- // start is delayed or if the allocation that pushed gcController.heapLive
- // over trigger is large or if the trigger is really close to
- // GOGC. Assist is proportional to this distance, so enforce a
- // minimum distance, even if it means going over the GOGC goal
- // by a tiny bit.
- const minRunway = 64 << 10
- if c.triggered != ^uint64(0) && goal < c.triggered+minRunway {
- goal = c.triggered + minRunway
- }
- return goal, sweepDistTrigger
+
+ // Check if the memory-limit-based goal is smaller, and if so, pick that.
+ if newGoal := c.memoryLimitHeapGoal(); newGoal < goal {
+ goal = newGoal
+ } else {
+ // We're not limited by the memory limit goal, so perform a series of
+ // adjustments that might move the goal forward in a variety of circumstances.
+
+ sweepDistTrigger := c.sweepDistMinTrigger.Load()
+ if sweepDistTrigger > goal {
+ // Set the goal to maintain a minimum sweep distance since
+ // the last call to commit. Note that we never want to do this
+ // if we're in the memory limit regime, because it could push
+ // the goal up.
+ goal = sweepDistTrigger
+ }
+ // Since we ignore the sweep distance trigger in the memory
+ // limit regime, we need to ensure we don't propagate it to
+ // the trigger, because it could cause a violation of the
+ // invariant that the trigger < goal.
+ minTrigger = sweepDistTrigger
+
+ // Ensure that the heap goal is at least a little larger than
+ // the point at which we triggered. This may not be the case if GC
+ // start is delayed or if the allocation that pushed gcController.heapLive
+ // over trigger is large or if the trigger is really close to
+ // GOGC. Assist is proportional to this distance, so enforce a
+ // minimum distance, even if it means going over the GOGC goal
+ // by a tiny bit.
+ //
+ // Ignore this if we're in the memory limit regime: we'd prefer to
+ // have the GC respond hard about how close we are to the goal than to
+ // push the goal back in such a manner that it could cause us to exceed
+ // the memory limit.
+ const minRunway = 64 << 10
+ if c.triggered != ^uint64(0) && goal < c.triggered+minRunway {
+ goal = c.triggered + minRunway
+ }
+ }
+ return
+}
+
+// memoryLimitHeapGoal returns a heap goal derived from memoryLimit.
+func (c *gcControllerState) memoryLimitHeapGoal() uint64 {
+ // Start by pulling out some values we'll need. Be careful about overflow.
+ var heapFree, heapAlloc, mappedReady uint64
+ for {
+ heapFree = c.heapFree.load() // Free and unscavenged memory.
+ heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use.
+ mappedReady = c.mappedReady.Load() // Total unreleased mapped memory.
+ if heapFree+heapAlloc <= mappedReady {
+ break
+ }
+ // It is impossible for total unreleased mapped memory to exceed heap memory, but
+ // because these stats are updated independently, we may observe a partial update
+ // including only some values. Thus, we appear to break the invariant. However,
+ // this condition is necessarily transient, so just try again. In the case of a
+ // persistent accounting error, we'll deadlock here.
+ }
+
+ // Below we compute a goal from memoryLimit. There are a few things to be aware of.
+ // Firstly, the memoryLimit does not easily compare to the heap goal: the former
+ // is total mapped memory by the runtime that hasn't been released, while the latter is
+ // only heap object memory. Intuitively, the way we convert from one to the other is to
+ // subtract everything from memoryLimit that both contributes to the memory limit (so,
+ // ignore scavenged memory) and doesn't contain heap objects. This isn't quite what
+ // lines up with reality, but it's a good starting point.
+ //
+ // In practice this computation looks like the following:
+ //
+ // goal := memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0))
+ // ^1 ^2
+ // goal -= goal / 100 * memoryLimitHeapGoalHeadroomPercent
+ // ^3
+ //
+ // Let's break this down.
+ //
+ // The first term (marker 1) is everything that contributes to the memory limit and isn't
+ // or couldn't become heap objects. It represents, broadly speaking, non-heap overheads.
+ // One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged
+ // memory that may contain heap objects in the future.
+ //
+ // Let's take a step back. In an ideal world, this term would look something like just
+ // the heap goal. That is, we "reserve" enough space for the heap to grow to the heap
+ // goal, and subtract out everything else. This is of course impossible; the definition
+ // is circular! However, this impossible definition contains a key insight: the amount
+ // we're *going* to use matters just as much as whatever we're currently using.
+ //
+ // Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and
+ // unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free
+ // and unscavenged memory, pushing the goal down significantly.
+ //
+ // heapFree is also safe to exclude from the memory limit because in the steady-state, it's
+ // just a pool of memory for future heap allocations, and making new allocations from heapFree
+ // memory doesn't increase overall memory use. In transient states, the scavenger and the
+ // allocator actively manage the pool of heapFree memory to maintain the memory limit.
+ //
+ // The second term (marker 2) is the amount of memory we've exceeded the limit by, and is
+ // intended to help recover from such a situation. By pushing the heap goal down, we also
+ // push the trigger down, triggering and finishing a GC sooner in order to make room for
+ // other memory sources. Note that since we're effectively reducing the heap goal by X bytes,
+ // we're actually giving more than X bytes of headroom back, because the heap goal is in
+ // terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store
+ // X bytes worth of objects.
+ //
+ // The final adjustment (marker 3) reduces the maximum possible memory limit heap goal by
+ // memoryLimitHeapGoalPercent. As the name implies, this is to provide additional headroom in
+ // the face of pacing inaccuracies, and also to leave a buffer of unscavenged memory so the
+ // allocator isn't constantly scavenging. The reduction amount also has a fixed minimum
+ // (memoryLimitMinHeapGoalHeadroom, not pictured) because the aforementioned pacing inaccuracies
+ // disproportionately affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier.
+ // Shorter GC cycles and less GC work means noisy external factors like the OS scheduler have a
+ // greater impact.
+
+ memoryLimit := uint64(c.memoryLimit.Load())
+
+ // Compute term 1.
+ nonHeapMemory := mappedReady - heapFree - heapAlloc
+
+ // Compute term 2.
+ var overage uint64
+ if mappedReady > memoryLimit {
+ overage = mappedReady - memoryLimit
+ }
+
+ if nonHeapMemory+overage >= memoryLimit {
+ // We're at a point where non-heap memory exceeds the memory limit on its own.
+ // There's honestly not much we can do here but just trigger GCs continuously
+ // and let the CPU limiter reign that in. Something has to give at this point.
+ // Set it to heapMarked, the lowest possible goal.
+ return c.heapMarked
+ }
+
+ // Compute the goal.
+ goal := memoryLimit - (nonHeapMemory + overage)
+
+ // Apply some headroom to the goal to account for pacing inaccuracies and to reduce
+ // the impact of scavenging at allocation time in response to a high allocation rate
+ // when GOGC=off. See issue #57069. Also, be careful about small limits.
+ headroom := goal / 100 * memoryLimitHeapGoalHeadroomPercent
+ if headroom < memoryLimitMinHeapGoalHeadroom {
+ // Set a fixed minimum to deal with the particularly large effect pacing inaccuracies
+ // have for smaller heaps.
+ headroom = memoryLimitMinHeapGoalHeadroom
+ }
+ if goal < headroom || goal-headroom < headroom {
+ goal = headroom
+ } else {
+ goal = goal - headroom
+ }
+ // Don't let us go below the live heap. A heap goal below the live heap doesn't make sense.
+ if goal < c.heapMarked {
+ goal = c.heapMarked
+ }
+ return goal
}
+const (
+ // These constants determine the bounds on the GC trigger as a fraction
+ // of heap bytes allocated between the start of a GC (heapLive == heapMarked)
+ // and the end of a GC (heapLive == heapGoal).
+ //
+ // The constants are obscured in this way for efficiency. The denominator
+ // of the fraction is always a power-of-two for a quick division, so that
+ // the numerator is a single constant integer multiplication.
+ triggerRatioDen = 64
+
+ // The minimum trigger constant was chosen empirically: given a sufficiently
+ // fast/scalable allocator with 48 Ps that could drive the trigger ratio
+ // to <0.05, this constant causes applications to retain the same peak
+ // RSS compared to not having this allocator.
+ minTriggerRatioNum = 45 // ~0.7
+
+ // The maximum trigger constant is chosen somewhat arbitrarily, but the
+ // current constant has served us well over the years.
+ maxTriggerRatioNum = 61 // ~0.95
+)
+
// trigger returns the current point at which a GC should trigger along with
// the heap goal.
//
goal, minTrigger := c.heapGoalInternal()
// Invariant: the trigger must always be less than the heap goal.
+ //
+ // Note that the memory limit sets a hard maximum on our heap goal,
+ // but the live heap may grow beyond it.
+
+ if c.heapMarked >= goal {
+ // The goal should never be smaller than heapMarked, but let's be
+ // defensive about it. The only reasonable trigger here is one that
+ // causes a continuous GC cycle at heapMarked, but respect the goal
+ // if it came out as smaller than that.
+ return goal, goal
+ }
+
+ // Below this point, c.heapMarked < goal.
- // heapMarked is our absolute minumum, and it's possible the trigger
+ // heapMarked is our absolute minimum, and it's possible the trigger
// bound we get from heapGoalinternal is less than that.
if minTrigger < c.heapMarked {
minTrigger = c.heapMarked
// increase in RSS. By capping us at a point >0, we're essentially
// saying that we're OK using more CPU during the GC to prevent
// this growth in RSS.
- //
- // The current constant was chosen empirically: given a sufficiently
- // fast/scalable allocator with 48 Ps that could drive the trigger ratio
- // to <0.05, this constant causes applications to retain the same peak
- // RSS compared to not having this allocator.
- if triggerBound := uint64(0.7*float64(goal-c.heapMarked)) + c.heapMarked; minTrigger < triggerBound {
- minTrigger = triggerBound
+ triggerLowerBound := ((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum + c.heapMarked
+ if minTrigger < triggerLowerBound {
+ minTrigger = triggerLowerBound
}
- // For small heaps, set the max trigger point at 95% of the way from the
- // live heap to the heap goal. This ensures we always have *some* headroom
- // when the GC actually starts. For larger heaps, set the max trigger point
- // at the goal, minus the minimum heap size.
+ // For small heaps, set the max trigger point at maxTriggerRatio of the way
+ // from the live heap to the heap goal. This ensures we always have *some*
+ // headroom when the GC actually starts. For larger heaps, set the max trigger
+ // point at the goal, minus the minimum heap size.
//
// This choice follows from the fact that the minimum heap size is chosen
// to reflect the costs of a GC with no work to do. With a large heap but
// very little scan work to perform, this gives us exactly as much runway
// as we would need, in the worst case.
- maxTrigger := uint64(0.95*float64(goal-c.heapMarked)) + c.heapMarked
+ maxTrigger := ((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum + c.heapMarked
if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger {
maxTrigger = goal - defaultHeapMinimum
}
- if maxTrigger < minTrigger {
- maxTrigger = minTrigger
- }
+ maxTrigger = max(maxTrigger, minTrigger)
// Compute the trigger from our bounds and the runway stored by commit.
var trigger uint64
} else {
trigger = goal - runway
}
- if trigger < minTrigger {
- trigger = minTrigger
- }
- if trigger > maxTrigger {
- trigger = maxTrigger
- }
+ trigger = max(trigger, minTrigger)
+ trigger = min(trigger, maxTrigger)
if trigger > goal {
print("trigger=", trigger, " heapGoal=", goal, "\n")
print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n")
// This depends on gcPercent, gcController.heapMarked, and
// gcController.heapLive. These must be up to date.
//
+// Callers must call gcControllerState.revise after calling this
+// function if the GC is enabled.
+//
// mheap_.lock must be held or the world must be stopped.
func (c *gcControllerState) commit(isSweepDone bool) {
if !c.test {
// Concurrent sweep happens in the heap growth
// from gcController.heapLive to trigger. Make sure we
// give the sweeper some runway if it doesn't have enough.
- c.sweepDistMinTrigger.Store(atomic.Load64(&c.heapLive) + sweepMinHeapDistance)
+ c.sweepDistMinTrigger.Store(c.heapLive.Load() + sweepMinHeapDistance)
}
// Compute the next GC goal, which is when the allocated heap
// plus additional runway for non-heap sources of GC work.
gcPercentHeapGoal := ^uint64(0)
if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
- gcPercentHeapGoal = c.heapMarked + (c.heapMarked+atomic.Load64(&c.stackScan)+atomic.Load64(&c.globalsScan))*uint64(gcPercent)/100
+ gcPercentHeapGoal = c.heapMarked + (c.heapMarked+c.lastStackScan.Load()+c.globalsScan.Load())*uint64(gcPercent)/100
+ }
+ // Apply the minimum heap size here. It's defined in terms of gcPercent
+ // and is only updated by functions that call commit.
+ if gcPercentHeapGoal < c.heapMinimum {
+ gcPercentHeapGoal = c.heapMinimum
}
c.gcPercentHeapGoal.Store(gcPercentHeapGoal)
// Furthermore, by setting the runway so that CPU resources are divided
// this way, assuming that the cons/mark ratio is correct, we make that
// division a reality.
- c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.stackScan+c.globalsScan)))
-
- // Update mark pacing.
- if gcphase != _GCoff {
- c.revise()
- }
-}
-
-// effectiveGrowthRatio returns the current effective heap growth
-// ratio (GOGC/100) based on heapMarked from the previous GC and
-// heapGoal for the current GC.
-//
-// This may differ from gcPercent/100 because of various upper and
-// lower bounds on gcPercent. For example, if the heap is smaller than
-// heapMinimum, this can be higher than gcPercent/100.
-//
-// mheap_.lock must be held or the world must be stopped.
-func (c *gcControllerState) effectiveGrowthRatio() float64 {
- if !c.test {
- assertWorldStoppedOrLockHeld(&mheap_.lock)
- }
- heapGoal := c.heapGoal()
- egogc := float64(heapGoal-c.heapMarked) / float64(c.heapMarked)
- if egogc < 0 {
- // Shouldn't happen, but just in case.
- egogc = 0
- }
- return egogc
+ c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load())))
}
// setGCPercent updates gcPercent. commit must be called after.
// If we just disabled GC, wait for any concurrent GC mark to
// finish so we always return with no GC running.
if in < 0 {
- gcWaitOnMark(atomic.Load(&work.cycles))
+ gcWaitOnMark(work.cycles.Load())
}
return out
return n
}
-type piController struct {
- kp float64 // Proportional constant.
- ti float64 // Integral time constant.
- tt float64 // Reset time.
-
- min, max float64 // Output boundaries.
-
- // PI controller state.
-
- errIntegral float64 // Integral of the error from t=0 to now.
-
- // Error flags.
- errOverflow bool // Set if errIntegral ever overflowed.
- inputOverflow bool // Set if an operation with the input overflowed.
-}
-
-// next provides a new sample to the controller.
-//
-// input is the sample, setpoint is the desired point, and period is how much
-// time (in whatever unit makes the most sense) has passed since the last sample.
-//
-// Returns a new value for the variable it's controlling, and whether the operation
-// completed successfully. One reason this might fail is if error has been growing
-// in an unbounded manner, to the point of overflow.
-//
-// In the specific case of an error overflow occurs, the errOverflow field will be
-// set and the rest of the controller's internal state will be fully reset.
-func (c *piController) next(input, setpoint, period float64) (float64, bool) {
- // Compute the raw output value.
- prop := c.kp * (setpoint - input)
- rawOutput := prop + c.errIntegral
-
- // Clamp rawOutput into output.
- output := rawOutput
- if isInf(output) || isNaN(output) {
- // The input had a large enough magnitude that either it was already
- // overflowed, or some operation with it overflowed.
- // Set a flag and reset. That's the safest thing to do.
- c.reset()
- c.inputOverflow = true
- return c.min, false
- }
- if output < c.min {
- output = c.min
- } else if output > c.max {
- output = c.max
- }
-
- // Update the controller's state.
- if c.ti != 0 && c.tt != 0 {
- c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
- if isInf(c.errIntegral) || isNaN(c.errIntegral) {
- // So much error has accumulated that we managed to overflow.
- // The assumptions around the controller have likely broken down.
- // Set a flag and reset. That's the safest thing to do.
- c.reset()
- c.errOverflow = true
- return c.min, false
- }
- }
- return output, true
-}
-
-// reset resets the controller state, except for controller error flags.
-func (c *piController) reset() {
- c.errIntegral = 0
-}
-
// addIdleMarkWorker attempts to add a new idle mark worker.
//
// If this returns true, the caller must become an idle mark worker unless
// If this returns false, the caller must NOT become an idle mark worker.
//
// nosplit because it may be called without a P.
+//
//go:nosplit
func (c *gcControllerState) addIdleMarkWorker() bool {
for {
// useful for a quick check before an expensive operation.
//
// nosplit because it may be called without a P.
+//
//go:nosplit
func (c *gcControllerState) needIdleMarkWorker() bool {
p := c.idleMarkWorkers.Load()
gcController.commit(isSweepDone())
+ // Update mark pacing.
+ if gcphase != _GCoff {
+ gcController.revise()
+ }
+
// TODO(mknyszek): This isn't really accurate any longer because the heap
// goal is computed dynamically. Still useful to snapshot, but not as useful.
- if trace.enabled {
- traceHeapGoal()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.HeapGoal()
+ traceRelease(trace)
}
trigger, heapGoal := gcController.trigger()
gcPaceSweeper(trigger)
- gcPaceScavenger(heapGoal, gcController.lastHeapGoal)
+ gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal)
}