1 // Copyright 2021 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
9 "internal/goexperiment"
10 "runtime/internal/atomic"
15 // gcGoalUtilization is the goal CPU utilization for
16 // marking as a fraction of GOMAXPROCS.
17 gcGoalUtilization = goexperiment.PacerRedesignInt*gcBackgroundUtilization +
18 (1-goexperiment.PacerRedesignInt)*(gcBackgroundUtilization+0.05)
20 // gcBackgroundUtilization is the fixed CPU utilization for background
21 // marking. It must be <= gcGoalUtilization. The difference between
22 // gcGoalUtilization and gcBackgroundUtilization will be made up by
23 // mark assists. The scheduler will aim to use within 50% of this
26 // Setting this to < gcGoalUtilization avoids saturating the trigger
27 // feedback controller when there are no assists, which allows it to
28 // better control CPU and heap growth. However, the larger the gap,
29 // the more mutator assists are expected to happen, which impact
32 // If goexperiment.PacerRedesign, the trigger feedback controller
33 // is replaced with an estimate of the mark/cons ratio that doesn't
34 // have the same saturation issues, so this is set equal to
36 gcBackgroundUtilization = 0.25
38 // gcCreditSlack is the amount of scan work credit that can
39 // accumulate locally before updating gcController.heapScanWork and,
40 // optionally, gcController.bgScanCredit. Lower values give a more
41 // accurate assist ratio and make it more likely that assists will
42 // successfully steal background credit. Higher values reduce memory
46 // gcAssistTimeSlack is the nanoseconds of mutator assist time that
47 // can accumulate on a P before updating gcController.assistTime.
48 gcAssistTimeSlack = 5000
50 // gcOverAssistWork determines how many extra units of scan work a GC
51 // assist does when an assist happens. This amortizes the cost of an
52 // assist by pre-paying for this many bytes of future allocations.
53 gcOverAssistWork = 64 << 10
55 // defaultHeapMinimum is the value of heapMinimum for GOGC==100.
56 defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
57 (1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
59 // scannableStackSizeSlack is the bytes of stack space allocated or freed
60 // that can accumulate on a P before updating gcController.stackSize.
61 scannableStackSizeSlack = 8 << 10
65 if offset := unsafe.Offsetof(gcController.heapLive); offset%8 != 0 {
67 throw("gcController.heapLive not aligned to 8 bytes")
71 // gcController implements the GC pacing controller that determines
72 // when to trigger concurrent garbage collection and how much marking
73 // work to do in mutator assists and background marking.
75 // It uses a feedback control algorithm to adjust the gcController.trigger
76 // trigger based on the heap growth and GC CPU utilization each cycle.
77 // This algorithm optimizes for heap growth to match GOGC and for CPU
78 // utilization between assist and background marking to be 25% of
79 // GOMAXPROCS. The high-level design of this algorithm is documented
80 // at https://golang.org/s/go15gcpacing.
82 // All fields of gcController are used only during a single mark
84 var gcController gcControllerState
86 type gcControllerState struct {
88 // Initialized from GOGC. GOGC=off means no GC.
89 gcPercent atomic.Int32
91 _ uint32 // padding so following 64-bit values are 8-byte aligned
93 // heapMinimum is the minimum heap size at which to trigger GC.
94 // For small heaps, this overrides the usual GOGC*live set rule.
96 // When there is a very small live set but a lot of allocation, simply
97 // collecting when the heap reaches GOGC*live results in many GC
98 // cycles and high total per-GC overhead. This minimum amortizes this
99 // per-GC overhead while keeping the heap reasonably small.
101 // During initialization this is set to 4MB*GOGC/100. In the case of
102 // GOGC==0, this will set heapMinimum to 0, resulting in constant
103 // collection even when the heap size is small, which is useful for
107 // triggerRatio is the heap growth ratio that triggers marking.
109 // E.g., if this is 0.6, then GC should start when the live
110 // heap has reached 1.6 times the heap size marked by the
111 // previous cycle. This should be ≤ GOGC/100 so the trigger
112 // heap size is less than the goal heap size. This is set
113 // during mark termination for the next cycle's trigger.
115 // Protected by mheap_.lock or a STW.
117 // Used if !goexperiment.PacerRedesign.
120 // trigger is the heap size that triggers marking.
122 // When heapLive ≥ trigger, the mark phase will start.
123 // This is also the heap size by which proportional sweeping
126 // This is computed from triggerRatio during mark termination
127 // for the next cycle's trigger.
129 // Protected by mheap_.lock or a STW.
132 // consMark is the estimated per-CPU consMark ratio for the application.
134 // It represents the ratio between the application's allocation
135 // rate, as bytes allocated per CPU-time, and the GC's scan rate,
136 // as bytes scanned per CPU-time.
137 // The units of this ratio are (B / cpu-ns) / (B / cpu-ns).
139 // At a high level, this value is computed as the bytes of memory
140 // allocated (cons) per unit of scan work completed (mark) in a GC
141 // cycle, divided by the CPU time spent on each activity.
143 // Updated at the end of each GC cycle, in endCycle.
145 // For goexperiment.PacerRedesign.
148 // consMarkController holds the state for the mark-cons ratio
149 // estimation over time.
151 // Its purpose is to smooth out noisiness in the computation of
152 // consMark; see consMark for details.
154 // For goexperiment.PacerRedesign.
155 consMarkController piController
157 _ uint32 // Padding for atomics on 32-bit platforms.
159 // heapGoal is the goal heapLive for when next GC ends.
160 // Set to ^uint64(0) if disabled.
162 // Read and written atomically, unless the world is stopped.
165 // lastHeapGoal is the value of heapGoal for the previous GC.
166 // Note that this is distinct from the last value heapGoal had,
167 // because it could change if e.g. gcPercent changes.
169 // Read and written with the world stopped or with mheap_.lock held.
172 // heapLive is the number of bytes considered live by the GC.
173 // That is: retained by the most recent GC plus allocated
174 // since then. heapLive ≤ memstats.heapAlloc, since heapAlloc includes
175 // unmarked objects that have not yet been swept (and hence goes up as we
176 // allocate and down as we sweep) while heapLive excludes these
177 // objects (and hence only goes up between GCs).
179 // This is updated atomically without locking. To reduce
180 // contention, this is updated only when obtaining a span from
181 // an mcentral and at this point it counts all of the
182 // unallocated slots in that span (which will be allocated
183 // before that mcache obtains another span from that
184 // mcentral). Hence, it slightly overestimates the "true" live
185 // heap size. It's better to overestimate than to
186 // underestimate because 1) this triggers the GC earlier than
187 // necessary rather than potentially too late and 2) this
188 // leads to a conservative GC rate rather than a GC rate that
189 // is potentially too low.
191 // Reads should likewise be atomic (or during STW).
193 // Whenever this is updated, call traceHeapAlloc() and
194 // this gcControllerState's revise() method.
197 // heapScan is the number of bytes of "scannable" heap. This
198 // is the live heap (as counted by heapLive), but omitting
199 // no-scan objects and no-scan tails of objects.
201 // For !goexperiment.PacerRedesign: Whenever this is updated,
202 // call this gcControllerState's revise() method. It is read
203 // and written atomically or with the world stopped.
205 // For goexperiment.PacerRedesign: This value is fixed at the
206 // start of a GC cycle, so during a GC cycle it is safe to
207 // read without atomics, and it represents the maximum scannable
211 // lastHeapScan is the number of bytes of heap that were scanned
212 // last GC cycle. It is the same as heapMarked, but only
213 // includes the "scannable" parts of objects.
215 // Updated when the world is stopped.
218 // stackScan is a snapshot of scannableStackSize taken at each GC
219 // STW pause and is used in pacing decisions.
221 // Updated only while the world is stopped.
224 // scannableStackSize is the amount of allocated goroutine stack space in
225 // use by goroutines.
227 // This number tracks allocated goroutine stack space rather than used
228 // goroutine stack space (i.e. what is actually scanned) because used
229 // goroutine stack space is much harder to measure cheaply. By using
230 // allocated space, we make an overestimate; this is OK, it's better
231 // to conservatively overcount than undercount.
233 // Read and updated atomically.
234 scannableStackSize uint64
236 // globalsScan is the total amount of global variable space
237 // that is scannable.
239 // Read and updated atomically.
242 // heapMarked is the number of bytes marked by the previous
243 // GC. After mark termination, heapLive == heapMarked, but
244 // unlike heapLive, heapMarked does not change until the
245 // next mark termination.
248 // heapScanWork is the total heap scan work performed this cycle.
249 // stackScanWork is the total stack scan work performed this cycle.
250 // globalsScanWork is the total globals scan work performed this cycle.
252 // These are updated atomically during the cycle. Updates occur in
253 // bounded batches, since they are both written and read
254 // throughout the cycle. At the end of the cycle, heapScanWork is how
255 // much of the retained heap is scannable.
257 // Currently these are measured in bytes. For most uses, this is an
258 // opaque unit of work, but for estimation the definition is important.
260 // Note that stackScanWork includes all allocated space, not just the
261 // size of the stack itself, mirroring stackSize.
263 // For !goexperiment.PacerRedesign, stackScanWork and globalsScanWork
265 heapScanWork atomic.Int64
266 stackScanWork atomic.Int64
267 globalsScanWork atomic.Int64
269 // bgScanCredit is the scan work credit accumulated by the
270 // concurrent background scan. This credit is accumulated by
271 // the background scan and stolen by mutator assists. This is
272 // updated atomically. Updates occur in bounded batches, since
273 // it is both written and read throughout the cycle.
276 // assistTime is the nanoseconds spent in mutator assists
277 // during this cycle. This is updated atomically. Updates
278 // occur in bounded batches, since it is both written and read
279 // throughout the cycle.
282 // dedicatedMarkTime is the nanoseconds spent in dedicated
283 // mark workers during this cycle. This is updated atomically
284 // at the end of the concurrent mark phase.
285 dedicatedMarkTime int64
287 // fractionalMarkTime is the nanoseconds spent in the
288 // fractional mark worker during this cycle. This is updated
289 // atomically throughout the cycle and will be up-to-date if
290 // the fractional mark worker is not currently running.
291 fractionalMarkTime int64
293 // idleMarkTime is the nanoseconds spent in idle marking
294 // during this cycle. This is updated atomically throughout
298 // markStartTime is the absolute start time in nanoseconds
299 // that assists and background mark workers started.
302 // dedicatedMarkWorkersNeeded is the number of dedicated mark
303 // workers that need to be started. This is computed at the
304 // beginning of each cycle and decremented atomically as
305 // dedicated mark workers get started.
306 dedicatedMarkWorkersNeeded int64
308 // assistWorkPerByte is the ratio of scan work to allocated
309 // bytes that should be performed by mutator assists. This is
310 // computed at the beginning of each cycle and updated every
311 // time heapScan is updated.
312 assistWorkPerByte atomic.Float64
314 // assistBytesPerWork is 1/assistWorkPerByte.
316 // Note that because this is read and written independently
317 // from assistWorkPerByte users may notice a skew between
318 // the two values, and such a state should be safe.
319 assistBytesPerWork atomic.Float64
321 // fractionalUtilizationGoal is the fraction of wall clock
322 // time that should be spent in the fractional mark worker on
323 // each P that isn't running a dedicated worker.
325 // For example, if the utilization goal is 25% and there are
326 // no dedicated workers, this will be 0.25. If the goal is
327 // 25%, there is one dedicated worker, and GOMAXPROCS is 5,
328 // this will be 0.05 to make up the missing 5%.
330 // If this is zero, no fractional workers are needed.
331 fractionalUtilizationGoal float64
333 // test indicates that this is a test-only copy of gcControllerState.
339 func (c *gcControllerState) init(gcPercent int32) {
340 c.heapMinimum = defaultHeapMinimum
342 if goexperiment.PacerRedesign {
343 c.consMarkController = piController{
344 // Tuned first via the Ziegler-Nichols process in simulation,
345 // then the integral time was manually tuned against real-world
346 // applications to deal with noisiness in the measured cons/mark
351 // Set a high reset time in GC cycles.
352 // This is inversely proportional to the rate at which we
353 // accumulate error from clipping. By making this very high
354 // we make the accumulation slow. In general, clipping is
355 // OK in our situation, hence the choice.
357 // Tune this if we get unintended effects from clipping for
364 // Set a reasonable initial GC trigger.
365 c.triggerRatio = 7 / 8.0
367 // Fake a heapMarked value so it looks like a trigger at
368 // heapMinimum is the appropriate growth from heapMarked.
369 // This will go into computing the initial GC goal.
370 c.heapMarked = uint64(float64(c.heapMinimum) / (1 + c.triggerRatio))
373 // This will also compute and set the GC trigger and goal.
374 c.setGCPercent(gcPercent)
377 // startCycle resets the GC controller's state and computes estimates
378 // for a new GC cycle. The caller must hold worldsema and the world
380 func (c *gcControllerState) startCycle(markStartTime int64, procs int) {
381 c.heapScanWork.Store(0)
382 c.stackScanWork.Store(0)
383 c.globalsScanWork.Store(0)
386 c.dedicatedMarkTime = 0
387 c.fractionalMarkTime = 0
389 c.markStartTime = markStartTime
390 c.stackScan = atomic.Load64(&c.scannableStackSize)
392 // Ensure that the heap goal is at least a little larger than
393 // the current live heap size. This may not be the case if GC
394 // start is delayed or if the allocation that pushed gcController.heapLive
395 // over trigger is large or if the trigger is really close to
396 // GOGC. Assist is proportional to this distance, so enforce a
397 // minimum distance, even if it means going over the GOGC goal
399 if goexperiment.PacerRedesign {
400 if c.heapGoal < c.heapLive+64<<10 {
401 c.heapGoal = c.heapLive + 64<<10
404 if c.heapGoal < c.heapLive+1<<20 {
405 c.heapGoal = c.heapLive + 1<<20
409 // Compute the background mark utilization goal. In general,
410 // this may not come out exactly. We round the number of
411 // dedicated workers so that the utilization is closest to
412 // 25%. For small GOMAXPROCS, this would introduce too much
413 // error, so we add fractional workers in that case.
414 totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
415 c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal + 0.5)
416 utilError := float64(c.dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
417 const maxUtilError = 0.3
418 if utilError < -maxUtilError || utilError > maxUtilError {
419 // Rounding put us more than 30% off our goal. With
420 // gcBackgroundUtilization of 25%, this happens for
421 // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
422 // workers to compensate.
423 if float64(c.dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
424 // Too many dedicated workers.
425 c.dedicatedMarkWorkersNeeded--
427 c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)) / float64(procs)
429 c.fractionalUtilizationGoal = 0
432 // In STW mode, we just want dedicated workers.
433 if debug.gcstoptheworld > 0 {
434 c.dedicatedMarkWorkersNeeded = int64(procs)
435 c.fractionalUtilizationGoal = 0
439 for _, p := range allp {
441 p.gcFractionalMarkTime = 0
444 // Compute initial values for controls that are updated
445 // throughout the cycle.
448 if debug.gcpacertrace > 0 {
449 assistRatio := c.assistWorkPerByte.Load()
450 print("pacer: assist ratio=", assistRatio,
451 " (scan ", gcController.heapScan>>20, " MB in ",
452 work.initialHeapLive>>20, "->",
453 c.heapGoal>>20, " MB)",
454 " workers=", c.dedicatedMarkWorkersNeeded,
455 "+", c.fractionalUtilizationGoal, "\n")
459 // revise updates the assist ratio during the GC cycle to account for
460 // improved estimates. This should be called whenever gcController.heapScan,
461 // gcController.heapLive, or gcController.heapGoal is updated. It is safe to
462 // call concurrently, but it may race with other calls to revise.
464 // The result of this race is that the two assist ratio values may not line
465 // up or may be stale. In practice this is OK because the assist ratio
466 // moves slowly throughout a GC cycle, and the assist ratio is a best-effort
467 // heuristic anyway. Furthermore, no part of the heuristic depends on
468 // the two assist ratio values being exact reciprocals of one another, since
469 // the two values are used to convert values from different sources.
471 // The worst case result of this raciness is that we may miss a larger shift
472 // in the ratio (say, if we decide to pace more aggressively against the
473 // hard heap goal) but even this "hard goal" is best-effort (see #40460).
474 // The dedicated GC should ensure we don't exceed the hard goal by too much
475 // in the rare case we do exceed it.
477 // It should only be called when gcBlackenEnabled != 0 (because this
478 // is when assists are enabled and the necessary statistics are
480 func (c *gcControllerState) revise() {
481 gcPercent := c.gcPercent.Load()
483 // If GC is disabled but we're running a forced GC,
484 // act like GOGC is huge for the below calculations.
487 live := atomic.Load64(&c.heapLive)
488 scan := atomic.Load64(&c.heapScan)
489 work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
491 // Assume we're under the soft goal. Pace GC to complete at
492 // heapGoal assuming the heap is in steady-state.
493 heapGoal := int64(atomic.Load64(&c.heapGoal))
495 var scanWorkExpected int64
496 if goexperiment.PacerRedesign {
497 // The expected scan work is computed as the amount of bytes scanned last
498 // GC cycle, plus our estimate of stacks and globals work for this cycle.
499 scanWorkExpected = int64(c.lastHeapScan + c.stackScan + c.globalsScan)
501 // maxScanWork is a worst-case estimate of the amount of scan work that
502 // needs to be performed in this GC cycle. Specifically, it represents
503 // the case where *all* scannable memory turns out to be live.
504 maxScanWork := int64(scan + c.stackScan + c.globalsScan)
505 if work > scanWorkExpected {
506 // We've already done more scan work than expected. Because our expectation
507 // is based on a steady-state scannable heap size, we assume this means our
508 // heap is growing. Compute a new heap goal that takes our existing runway
509 // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case
510 // scan work. This keeps our assist ratio stable if the heap continues to grow.
512 // The effect of this mechanism is that assists stay flat in the face of heap
513 // growths. It's OK to use more memory this cycle to scan all the live heap,
514 // because the next GC cycle is inevitably going to use *at least* that much
516 extHeapGoal := int64(float64(heapGoal-int64(c.trigger))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.trigger)
517 scanWorkExpected = maxScanWork
519 // hardGoal is a hard limit on the amount that we're willing to push back the
520 // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or
521 // stacks and/or globals grow to twice their size, this limits the current GC cycle's
522 // growth to 4x the original live heap's size).
524 // This maintains the invariant that we use no more memory than the next GC cycle
526 hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal))
527 if extHeapGoal > hardGoal {
528 extHeapGoal = hardGoal
530 heapGoal = extHeapGoal
532 if int64(live) > heapGoal {
533 // We're already past our heap goal, even the extrapolated one.
534 // Leave ourselves some extra runway, so in the worst case we
535 // finish by that point.
536 const maxOvershoot = 1.1
537 heapGoal = int64(float64(heapGoal) * maxOvershoot)
539 // Compute the upper bound on the scan work remaining.
540 scanWorkExpected = maxScanWork
543 // Compute the expected scan work remaining.
545 // This is estimated based on the expected
546 // steady-state scannable heap. For example, with
547 // GOGC=100, only half of the scannable heap is
548 // expected to be live, so that's what we target.
550 // (This is a float calculation to avoid overflowing on
552 scanWorkExpected = int64(float64(scan) * 100 / float64(100+gcPercent))
553 if int64(live) > heapGoal || work > scanWorkExpected {
554 // We're past the soft goal, or we've already done more scan
555 // work than we expected. Pace GC so that in the worst case it
556 // will complete by the hard goal.
557 const maxOvershoot = 1.1
558 heapGoal = int64(float64(heapGoal) * maxOvershoot)
560 // Compute the upper bound on the scan work remaining.
561 scanWorkExpected = int64(scan)
565 // Compute the remaining scan work estimate.
567 // Note that we currently count allocations during GC as both
568 // scannable heap (heapScan) and scan work completed
569 // (scanWork), so allocation will change this difference
570 // slowly in the soft regime and not at all in the hard
572 scanWorkRemaining := scanWorkExpected - work
573 if scanWorkRemaining < 1000 {
574 // We set a somewhat arbitrary lower bound on
575 // remaining scan work since if we aim a little high,
576 // we can miss by a little.
578 // We *do* need to enforce that this is at least 1,
579 // since marking is racy and double-scanning objects
580 // may legitimately make the remaining scan work
581 // negative, even in the hard goal regime.
582 scanWorkRemaining = 1000
585 // Compute the heap distance remaining.
586 heapRemaining := heapGoal - int64(live)
587 if heapRemaining <= 0 {
588 // This shouldn't happen, but if it does, avoid
589 // dividing by zero or setting the assist negative.
593 // Compute the mutator assist ratio so by the time the mutator
594 // allocates the remaining heap bytes up to heapGoal, it will
595 // have done (or stolen) the remaining amount of scan work.
596 // Note that the assist ratio values are updated atomically
597 // but not together. This means there may be some degree of
598 // skew between the two values. This is generally OK as the
599 // values shift relatively slowly over the course of a GC
601 assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
602 assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
603 c.assistWorkPerByte.Store(assistWorkPerByte)
604 c.assistBytesPerWork.Store(assistBytesPerWork)
607 // endCycle computes the trigger ratio (!goexperiment.PacerRedesign)
608 // or the consMark estimate (goexperiment.PacerRedesign) for the next cycle.
609 // Returns the trigger ratio if application, or 0 (goexperiment.PacerRedesign).
610 // userForced indicates whether the current GC cycle was forced
611 // by the application.
612 func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) float64 {
613 // Record last heap goal for the scavenger.
614 // We'll be updating the heap goal soon.
615 gcController.lastHeapGoal = gcController.heapGoal
617 // Compute the duration of time for which assists were turned on.
618 assistDuration := now - c.markStartTime
620 // Assume background mark hit its utilization goal.
621 utilization := gcBackgroundUtilization
622 // Add assist utilization; avoid divide by zero.
623 if assistDuration > 0 {
624 utilization += float64(c.assistTime) / float64(assistDuration*int64(procs))
627 if goexperiment.PacerRedesign {
628 if c.heapLive <= c.trigger {
629 // Shouldn't happen, but let's be very safe about this in case the
630 // GC is somehow extremely short.
632 // In this case though, the only reasonable value for c.heapLive-c.trigger
633 // would be 0, which isn't really all that useful, i.e. the GC was so short
634 // that it didn't matter.
636 // Ignore this case and don't update anything.
639 idleUtilization := 0.0
640 if assistDuration > 0 {
641 idleUtilization = float64(c.idleMarkTime) / float64(assistDuration*int64(procs))
643 // Determine the cons/mark ratio.
645 // The units we want for the numerator and denominator are both B / cpu-ns.
646 // We get this by taking the bytes allocated or scanned, and divide by the amount of
647 // CPU time it took for those operations. For allocations, that CPU time is
649 // assistDuration * procs * (1 - utilization)
651 // Where utilization includes just background GC workers and assists. It does *not*
652 // include idle GC work time, because in theory the mutator is free to take that at
655 // For scanning, that CPU time is
657 // assistDuration * procs * (utilization + idleUtilization)
659 // In this case, we *include* idle utilization, because that is additional CPU time that the
660 // the GC had available to it.
662 // In effect, idle GC time is sort of double-counted here, but it's very weird compared
663 // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is
664 // *always* free to take it.
666 // So this calculation is really:
667 // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
668 // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)
670 // Note that because we only care about the ratio, assistDuration and procs cancel out.
671 scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
672 currentConsMark := (float64(c.heapLive-c.trigger) * (utilization + idleUtilization)) /
673 (float64(scanWork) * (1 - utilization))
675 // Update cons/mark controller. The time period for this is 1 GC cycle.
677 // This use of a PI controller might seem strange. So, here's an explanation:
679 // currentConsMark represents the consMark we *should've* had to be perfectly
680 // on-target for this cycle. Given that we assume the next GC will be like this
681 // one in the steady-state, it stands to reason that we should just pick that
682 // as our next consMark. In practice, however, currentConsMark is too noisy:
683 // we're going to be wildly off-target in each GC cycle if we do that.
685 // What we do instead is make a long-term assumption: there is some steady-state
686 // consMark value, but it's obscured by noise. By constantly shooting for this
687 // noisy-but-perfect consMark value, the controller will bounce around a bit,
688 // but its average behavior, in aggregate, should be less noisy and closer to
689 // the true long-term consMark value, provided its tuned to be slightly overdamped.
691 oldConsMark := c.consMark
692 c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
694 // The error spiraled out of control. This is incredibly unlikely seeing
695 // as this controller is essentially just a smoothing function, but it might
696 // mean that something went very wrong with how currentConsMark was calculated.
697 // Just reset consMark and keep going.
701 if debug.gcpacertrace > 0 {
703 goal := gcGoalUtilization * 100
704 print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
705 print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
706 print("in ", c.trigger, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.heapGoal), ", cons/mark ", oldConsMark, ")")
708 print("[controller reset]")
716 // !goexperiment.PacerRedesign below.
719 // Forced GC means this cycle didn't start at the
720 // trigger, so where it finished isn't good
721 // information about how to adjust the trigger.
722 // Just leave it where it is.
723 return c.triggerRatio
726 // Proportional response gain for the trigger controller. Must
727 // be in [0, 1]. Lower values smooth out transient effects but
728 // take longer to respond to phase changes. Higher values
729 // react to phase changes quickly, but are more affected by
730 // transient changes. Values near 1 may be unstable.
731 const triggerGain = 0.5
733 // Compute next cycle trigger ratio. First, this computes the
734 // "error" for this cycle; that is, how far off the trigger
735 // was from what it should have been, accounting for both heap
736 // growth and GC CPU utilization. We compute the actual heap
737 // growth during this cycle and scale that by how far off from
738 // the goal CPU utilization we were (to estimate the heap
739 // growth if we had the desired CPU utilization). The
740 // difference between this estimate and the GOGC-based goal
741 // heap growth is the error.
742 goalGrowthRatio := c.effectiveGrowthRatio()
743 actualGrowthRatio := float64(c.heapLive)/float64(c.heapMarked) - 1
744 triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
746 // Finally, we adjust the trigger for next time by this error,
747 // damped by the proportional gain.
748 triggerRatio := c.triggerRatio + triggerGain*triggerError
750 if debug.gcpacertrace > 0 {
751 // Print controller state in terms of the design
753 H_m_prev := c.heapMarked
754 h_t := c.triggerRatio
756 h_a := actualGrowthRatio
758 h_g := goalGrowthRatio
759 H_g := int64(float64(H_m_prev) * (1 + h_g))
761 u_g := gcGoalUtilization
762 W_a := c.heapScanWork.Load()
763 print("pacer: H_m_prev=", H_m_prev,
764 " h_t=", h_t, " H_T=", H_T,
765 " h_a=", h_a, " H_a=", H_a,
766 " h_g=", h_g, " H_g=", H_g,
767 " u_a=", u_a, " u_g=", u_g,
769 " goalΔ=", goalGrowthRatio-h_t,
770 " actualΔ=", h_a-h_t,
771 " u_a/u_g=", u_a/u_g,
778 // enlistWorker encourages another dedicated mark worker to start on
779 // another P if there are spare worker slots. It is used by putfull
780 // when more work is made available.
783 func (c *gcControllerState) enlistWorker() {
784 // If there are idle Ps, wake one so it will run an idle worker.
785 // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
787 // if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
792 // There are no idle Ps. If we need more dedicated workers,
793 // try to preempt a running P so it will switch to a worker.
794 if c.dedicatedMarkWorkersNeeded <= 0 {
797 // Pick a random other P to preempt.
802 if gp == nil || gp.m == nil || gp.m.p == 0 {
805 myID := gp.m.p.ptr().id
806 for tries := 0; tries < 5; tries++ {
807 id := int32(fastrandn(uint32(gomaxprocs - 1)))
812 if p.status != _Prunning {
821 // findRunnableGCWorker returns a background mark worker for _p_ if it
822 // should be run. This must only be called when gcBlackenEnabled != 0.
823 func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
824 if gcBlackenEnabled == 0 {
825 throw("gcControllerState.findRunnable: blackening not enabled")
828 if !gcMarkWorkAvailable(_p_) {
829 // No work to be done right now. This can happen at
830 // the end of the mark phase when there are still
831 // assists tapering off. Don't bother running a worker
832 // now because it'll just return immediately.
836 // Grab a worker before we commit to running below.
837 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
839 // There is at least one worker per P, so normally there are
840 // enough workers to run on all Ps, if necessary. However, once
841 // a worker enters gcMarkDone it may park without rejoining the
842 // pool, thus freeing a P with no corresponding worker.
843 // gcMarkDone never depends on another worker doing work, so it
844 // is safe to simply do nothing here.
846 // If gcMarkDone bails out without completing the mark phase,
847 // it will always do so with queued global work. Thus, that P
848 // will be immediately eligible to re-run the worker G it was
849 // just using, ensuring work can complete.
853 decIfPositive := func(ptr *int64) bool {
855 v := atomic.Loadint64(ptr)
860 if atomic.Casint64(ptr, v, v-1) {
866 if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
867 // This P is now dedicated to marking until the end of
868 // the concurrent mark phase.
869 _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
870 } else if c.fractionalUtilizationGoal == 0 {
871 // No need for fractional workers.
872 gcBgMarkWorkerPool.push(&node.node)
875 // Is this P behind on the fractional utilization
878 // This should be kept in sync with pollFractionalWorkerExit.
879 delta := nanotime() - c.markStartTime
880 if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
881 // Nope. No need to run a fractional worker.
882 gcBgMarkWorkerPool.push(&node.node)
885 // Run a fractional worker.
886 _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
889 // Run the background mark worker.
891 casgstatus(gp, _Gwaiting, _Grunnable)
898 // resetLive sets up the controller state for the next mark phase after the end
899 // of the previous one. Must be called after endCycle and before commit, before
900 // the world is started.
902 // The world must be stopped.
903 func (c *gcControllerState) resetLive(bytesMarked uint64) {
904 c.heapMarked = bytesMarked
905 c.heapLive = bytesMarked
906 c.heapScan = uint64(c.heapScanWork.Load())
907 c.lastHeapScan = uint64(c.heapScanWork.Load())
909 // heapLive was updated, so emit a trace event.
915 // logWorkTime updates mark work accounting in the controller by a duration of
916 // work in nanoseconds.
918 // Safe to execute at any time.
919 func (c *gcControllerState) logWorkTime(mode gcMarkWorkerMode, duration int64) {
921 case gcMarkWorkerDedicatedMode:
922 atomic.Xaddint64(&c.dedicatedMarkTime, duration)
923 atomic.Xaddint64(&c.dedicatedMarkWorkersNeeded, 1)
924 case gcMarkWorkerFractionalMode:
925 atomic.Xaddint64(&c.fractionalMarkTime, duration)
926 case gcMarkWorkerIdleMode:
927 atomic.Xaddint64(&c.idleMarkTime, duration)
929 throw("logWorkTime: unknown mark worker mode")
933 func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
935 atomic.Xadd64(&gcController.heapLive, dHeapLive)
937 // gcController.heapLive changed.
941 // Only update heapScan in the new pacer redesign if we're not
942 // currently in a GC.
943 if !goexperiment.PacerRedesign || gcBlackenEnabled == 0 {
945 atomic.Xadd64(&gcController.heapScan, dHeapScan)
948 if gcBlackenEnabled != 0 {
949 // gcController.heapLive and heapScan changed.
954 func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
956 atomic.Xadd64(&c.scannableStackSize, amount)
959 pp.scannableStackSizeDelta += amount
960 if pp.scannableStackSizeDelta >= scannableStackSizeSlack || pp.scannableStackSizeDelta <= -scannableStackSizeSlack {
961 atomic.Xadd64(&c.scannableStackSize, pp.scannableStackSizeDelta)
962 pp.scannableStackSizeDelta = 0
966 func (c *gcControllerState) addGlobals(amount int64) {
967 atomic.Xadd64(&c.globalsScan, amount)
970 // commit recomputes all pacing parameters from scratch, namely
971 // absolute trigger, the heap goal, mark pacing, and sweep pacing.
973 // If goexperiment.PacerRedesign is true, triggerRatio is ignored.
975 // This can be called any time. If GC is the in the middle of a
976 // concurrent phase, it will adjust the pacing of that phase.
978 // This depends on gcPercent, gcController.heapMarked, and
979 // gcController.heapLive. These must be up to date.
981 // mheap_.lock must be held or the world must be stopped.
982 func (c *gcControllerState) commit(triggerRatio float64) {
984 assertWorldStoppedOrLockHeld(&mheap_.lock)
987 if !goexperiment.PacerRedesign {
988 c.oldCommit(triggerRatio)
992 // Compute the next GC goal, which is when the allocated heap
993 // has grown by GOGC/100 over where it started the last cycle,
994 // plus additional runway for non-heap sources of GC work.
996 if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
997 goal = c.heapMarked + (c.heapMarked+atomic.Load64(&c.stackScan)+atomic.Load64(&c.globalsScan))*uint64(gcPercent)/100
1000 // Don't trigger below the minimum heap size.
1001 minTrigger := c.heapMinimum
1003 // Concurrent sweep happens in the heap growth
1004 // from gcController.heapLive to trigger, so ensure
1005 // that concurrent sweep has some heap growth
1006 // in which to perform sweeping before we
1007 // start the next GC cycle.
1008 sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
1009 if sweepMin > minTrigger {
1010 minTrigger = sweepMin
1014 // If we let the trigger go too low, then if the application
1015 // is allocating very rapidly we might end up in a situation
1016 // where we're allocating black during a nearly always-on GC.
1017 // The result of this is a growing heap and ultimately an
1018 // increase in RSS. By capping us at a point >0, we're essentially
1019 // saying that we're OK using more CPU during the GC to prevent
1020 // this growth in RSS.
1022 // The current constant was chosen empirically: given a sufficiently
1023 // fast/scalable allocator with 48 Ps that could drive the trigger ratio
1024 // to <0.05, this constant causes applications to retain the same peak
1025 // RSS compared to not having this allocator.
1026 if triggerBound := uint64(0.7*float64(goal-c.heapMarked)) + c.heapMarked; minTrigger < triggerBound {
1027 minTrigger = triggerBound
1030 // For small heaps, set the max trigger point at 95% of the heap goal.
1031 // This ensures we always have *some* headroom when the GC actually starts.
1032 // For larger heaps, set the max trigger point at the goal, minus the
1033 // minimum heap size.
1034 // This choice follows from the fact that the minimum heap size is chosen
1035 // to reflect the costs of a GC with no work to do. With a large heap but
1036 // very little scan work to perform, this gives us exactly as much runway
1037 // as we would need, in the worst case.
1038 maxRunway := uint64(0.95 * float64(goal-c.heapMarked))
1039 if largeHeapMaxRunway := goal - c.heapMinimum; goal > c.heapMinimum && maxRunway < largeHeapMaxRunway {
1040 maxRunway = largeHeapMaxRunway
1042 maxTrigger := maxRunway + c.heapMarked
1043 if maxTrigger < minTrigger {
1044 maxTrigger = minTrigger
1047 // Compute the trigger by using our estimate of the cons/mark ratio.
1049 // The idea is to take our expected scan work, and multiply it by
1050 // the cons/mark ratio to determine how long it'll take to complete
1051 // that scan work in terms of bytes allocated. This gives us our GC's
1054 // However, the cons/mark ratio is a ratio of rates per CPU-second, but
1055 // here we care about the relative rates for some division of CPU
1056 // resources among the mutator and the GC.
1058 // To summarize, we have B / cpu-ns, and we want B / ns. We get that
1059 // by multiplying by our desired division of CPU resources. We choose
1060 // to express CPU resources as GOMAPROCS*fraction. Note that because
1061 // we're working with a ratio here, we can omit the number of CPU cores,
1062 // because they'll appear in the numerator and denominator and cancel out.
1063 // As a result, this is basically just "weighing" the cons/mark ratio by
1064 // our desired division of resources.
1066 // Furthermore, by setting the trigger so that CPU resources are divided
1067 // this way, assuming that the cons/mark ratio is correct, we make that
1068 // division a reality.
1070 runway := uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.stackScan+c.globalsScan))
1072 trigger = minTrigger
1074 trigger = goal - runway
1076 if trigger < minTrigger {
1077 trigger = minTrigger
1079 if trigger > maxTrigger {
1080 trigger = maxTrigger
1086 // Commit to the trigger and goal.
1088 atomic.Store64(&c.heapGoal, goal)
1093 // Update mark pacing.
1094 if gcphase != _GCoff {
1099 // oldCommit sets the trigger ratio and updates everything
1100 // derived from it: the absolute trigger, the heap goal, mark pacing,
1101 // and sweep pacing.
1103 // This can be called any time. If GC is the in the middle of a
1104 // concurrent phase, it will adjust the pacing of that phase.
1106 // This depends on gcPercent, gcController.heapMarked, and
1107 // gcController.heapLive. These must be up to date.
1109 // For !goexperiment.PacerRedesign.
1110 func (c *gcControllerState) oldCommit(triggerRatio float64) {
1111 gcPercent := c.gcPercent.Load()
1113 // Compute the next GC goal, which is when the allocated heap
1114 // has grown by GOGC/100 over the heap marked by the last
1118 goal = c.heapMarked + c.heapMarked*uint64(gcPercent)/100
1121 // Set the trigger ratio, capped to reasonable bounds.
1123 scalingFactor := float64(gcPercent) / 100
1124 // Ensure there's always a little margin so that the
1125 // mutator assist ratio isn't infinity.
1126 maxTriggerRatio := 0.95 * scalingFactor
1127 if triggerRatio > maxTriggerRatio {
1128 triggerRatio = maxTriggerRatio
1131 // If we let triggerRatio go too low, then if the application
1132 // is allocating very rapidly we might end up in a situation
1133 // where we're allocating black during a nearly always-on GC.
1134 // The result of this is a growing heap and ultimately an
1135 // increase in RSS. By capping us at a point >0, we're essentially
1136 // saying that we're OK using more CPU during the GC to prevent
1137 // this growth in RSS.
1139 // The current constant was chosen empirically: given a sufficiently
1140 // fast/scalable allocator with 48 Ps that could drive the trigger ratio
1141 // to <0.05, this constant causes applications to retain the same peak
1142 // RSS compared to not having this allocator.
1143 minTriggerRatio := 0.6 * scalingFactor
1144 if triggerRatio < minTriggerRatio {
1145 triggerRatio = minTriggerRatio
1147 } else if triggerRatio < 0 {
1148 // gcPercent < 0, so just make sure we're not getting a negative
1149 // triggerRatio. This case isn't expected to happen in practice,
1150 // and doesn't really matter because if gcPercent < 0 then we won't
1151 // ever consume triggerRatio further on in this function, but let's
1152 // just be defensive here; the triggerRatio being negative is almost
1153 // certainly undesirable.
1156 c.triggerRatio = triggerRatio
1158 // Compute the absolute GC trigger from the trigger ratio.
1160 // We trigger the next GC cycle when the allocated heap has
1161 // grown by the trigger ratio over the marked heap size.
1162 trigger := ^uint64(0)
1164 trigger = uint64(float64(c.heapMarked) * (1 + triggerRatio))
1165 // Don't trigger below the minimum heap size.
1166 minTrigger := c.heapMinimum
1168 // Concurrent sweep happens in the heap growth
1169 // from gcController.heapLive to trigger, so ensure
1170 // that concurrent sweep has some heap growth
1171 // in which to perform sweeping before we
1172 // start the next GC cycle.
1173 sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
1174 if sweepMin > minTrigger {
1175 minTrigger = sweepMin
1178 if trigger < minTrigger {
1179 trigger = minTrigger
1181 if int64(trigger) < 0 {
1182 print("runtime: heapGoal=", c.heapGoal, " heapMarked=", c.heapMarked, " gcController.heapLive=", c.heapLive, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
1183 throw("trigger underflow")
1186 // The trigger ratio is always less than GOGC/100, but
1187 // other bounds on the trigger may have raised it.
1188 // Push up the goal, too.
1193 // Commit to the trigger and goal.
1195 atomic.Store64(&c.heapGoal, goal)
1200 // Update mark pacing.
1201 if gcphase != _GCoff {
1206 // effectiveGrowthRatio returns the current effective heap growth
1207 // ratio (GOGC/100) based on heapMarked from the previous GC and
1208 // heapGoal for the current GC.
1210 // This may differ from gcPercent/100 because of various upper and
1211 // lower bounds on gcPercent. For example, if the heap is smaller than
1212 // heapMinimum, this can be higher than gcPercent/100.
1214 // mheap_.lock must be held or the world must be stopped.
1215 func (c *gcControllerState) effectiveGrowthRatio() float64 {
1217 assertWorldStoppedOrLockHeld(&mheap_.lock)
1220 egogc := float64(atomic.Load64(&c.heapGoal)-c.heapMarked) / float64(c.heapMarked)
1222 // Shouldn't happen, but just in case.
1228 // setGCPercent updates gcPercent and all related pacer state.
1229 // Returns the old value of gcPercent.
1231 // Calls gcControllerState.commit.
1233 // The world must be stopped, or mheap_.lock must be held.
1234 func (c *gcControllerState) setGCPercent(in int32) int32 {
1236 assertWorldStoppedOrLockHeld(&mheap_.lock)
1239 out := c.gcPercent.Load()
1243 c.heapMinimum = defaultHeapMinimum * uint64(in) / 100
1244 c.gcPercent.Store(in)
1245 // Update pacing in response to gcPercent change.
1246 c.commit(c.triggerRatio)
1251 //go:linkname setGCPercent runtime/debug.setGCPercent
1252 func setGCPercent(in int32) (out int32) {
1253 // Run on the system stack since we grab the heap lock.
1254 systemstack(func() {
1256 out = gcController.setGCPercent(in)
1257 gcPaceSweeper(gcController.trigger)
1258 gcPaceScavenger(gcController.heapGoal, gcController.lastHeapGoal)
1259 unlock(&mheap_.lock)
1262 // If we just disabled GC, wait for any concurrent GC mark to
1263 // finish so we always return with no GC running.
1265 gcWaitOnMark(atomic.Load(&work.cycles))
1271 func readGOGC() int32 {
1272 p := gogetenv("GOGC")
1276 if n, ok := atoi32(p); ok {
1282 type piController struct {
1283 kp float64 // Proportional constant.
1284 ti float64 // Integral time constant.
1285 tt float64 // Reset time.
1287 min, max float64 // Output boundaries.
1289 // PI controller state.
1291 errIntegral float64 // Integral of the error from t=0 to now.
1294 errOverflow bool // Set if errIntegral ever overflowed.
1295 inputOverflow bool // Set if an operation with the input overflowed.
1298 // next provides a new sample to the controller.
1300 // input is the sample, setpoint is the desired point, and period is how much
1301 // time (in whatever unit makes the most sense) has passed since the last sample.
1303 // Returns a new value for the variable it's controlling, and whether the operation
1304 // completed successfully. One reason this might fail is if error has been growing
1305 // in an unbounded manner, to the point of overflow.
1307 // In the specific case of an error overflow occurs, the errOverflow field will be
1308 // set and the rest of the controller's internal state will be fully reset.
1309 func (c *piController) next(input, setpoint, period float64) (float64, bool) {
1310 // Compute the raw output value.
1311 prop := c.kp * (setpoint - input)
1312 rawOutput := prop + c.errIntegral
1314 // Clamp rawOutput into output.
1316 if isInf(output) || isNaN(output) {
1317 // The input had a large enough magnitude that either it was already
1318 // overflowed, or some operation with it overflowed.
1319 // Set a flag and reset. That's the safest thing to do.
1321 c.inputOverflow = true
1326 } else if output > c.max {
1330 // Update the controller's state.
1331 if c.ti != 0 && c.tt != 0 {
1332 c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
1333 if isInf(c.errIntegral) || isNaN(c.errIntegral) {
1334 // So much error has accumulated that we managed to overflow.
1335 // The assumptions around the controller have likely broken down.
1336 // Set a flag and reset. That's the safest thing to do.
1338 c.errOverflow = true
1345 // reset resets the controller state, except for controller error flags.
1346 func (c *piController) reset() {