1 // Copyright 2021 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
9 "internal/goexperiment"
10 "runtime/internal/atomic"
14 // go119MemoryLimitSupport is a feature flag for a number of changes
15 // related to the memory limit feature (#48409). Disabling this flag
16 // disables those features, as well as the memory limit mechanism,
17 // which becomes a no-op.
18 const go119MemoryLimitSupport = true
21 // gcGoalUtilization is the goal CPU utilization for
22 // marking as a fraction of GOMAXPROCS.
24 // Increasing the goal utilization will shorten GC cycles as the GC
25 // has more resources behind it, lessening costs from the write barrier,
26 // but comes at the cost of increasing mutator latency.
27 gcGoalUtilization = gcBackgroundUtilization
29 // gcBackgroundUtilization is the fixed CPU utilization for background
30 // marking. It must be <= gcGoalUtilization. The difference between
31 // gcGoalUtilization and gcBackgroundUtilization will be made up by
32 // mark assists. The scheduler will aim to use within 50% of this
35 // As a general rule, there's little reason to set gcBackgroundUtilization
36 // < gcGoalUtilization. One reason might be in mostly idle applications,
37 // where goroutines are unlikely to assist at all, so the actual
38 // utilization will be lower than the goal. But this is moot point
39 // because the idle mark workers already soak up idle CPU resources.
40 // These two values are still kept separate however because they are
41 // distinct conceptually, and in previous iterations of the pacer the
42 // distinction was more important.
43 gcBackgroundUtilization = 0.25
45 // gcCreditSlack is the amount of scan work credit that can
46 // accumulate locally before updating gcController.heapScanWork and,
47 // optionally, gcController.bgScanCredit. Lower values give a more
48 // accurate assist ratio and make it more likely that assists will
49 // successfully steal background credit. Higher values reduce memory
53 // gcAssistTimeSlack is the nanoseconds of mutator assist time that
54 // can accumulate on a P before updating gcController.assistTime.
55 gcAssistTimeSlack = 5000
57 // gcOverAssistWork determines how many extra units of scan work a GC
58 // assist does when an assist happens. This amortizes the cost of an
59 // assist by pre-paying for this many bytes of future allocations.
60 gcOverAssistWork = 64 << 10
62 // defaultHeapMinimum is the value of heapMinimum for GOGC==100.
63 defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
64 (1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
66 // scannableStackSizeSlack is the bytes of stack space allocated or freed
67 // that can accumulate on a P before updating gcController.stackSize.
68 scannableStackSizeSlack = 8 << 10
72 if offset := unsafe.Offsetof(gcController.heapLive); offset%8 != 0 {
74 throw("gcController.heapLive not aligned to 8 bytes")
78 // gcController implements the GC pacing controller that determines
79 // when to trigger concurrent garbage collection and how much marking
80 // work to do in mutator assists and background marking.
82 // It calculates the ratio between the allocation rate (in terms of CPU
83 // time) and the GC scan throughput to determine the heap size at which to
84 // trigger a GC cycle such that no GC assists are required to finish on time.
85 // This algorithm thus optimizes GC CPU utilization to the dedicated background
86 // mark utilization of 25% of GOMAXPROCS by minimizing GC assists.
87 // GOMAXPROCS. The high-level design of this algorithm is documented
88 // at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md.
89 // See https://golang.org/s/go15gcpacing for additional historical context.
90 var gcController gcControllerState
92 type gcControllerState struct {
93 // Initialized from GOGC. GOGC=off means no GC.
94 gcPercent atomic.Int32
96 _ uint32 // padding so following 64-bit values are 8-byte aligned
98 // memoryLimit is the soft memory limit in bytes.
100 // Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64
101 // which means no soft memory limit in practice.
103 // This is an int64 instead of a uint64 to more easily maintain parity with
104 // the SetMemoryLimit API, which sets a maximum at MaxInt64. This value
105 // should never be negative.
106 memoryLimit atomic.Int64
108 // heapMinimum is the minimum heap size at which to trigger GC.
109 // For small heaps, this overrides the usual GOGC*live set rule.
111 // When there is a very small live set but a lot of allocation, simply
112 // collecting when the heap reaches GOGC*live results in many GC
113 // cycles and high total per-GC overhead. This minimum amortizes this
114 // per-GC overhead while keeping the heap reasonably small.
116 // During initialization this is set to 4MB*GOGC/100. In the case of
117 // GOGC==0, this will set heapMinimum to 0, resulting in constant
118 // collection even when the heap size is small, which is useful for
122 // trigger is the heap size that triggers marking.
124 // When heapLive ≥ trigger, the mark phase will start.
125 // This is also the heap size by which proportional sweeping
128 // This is computed from consMark during mark termination for
129 // the next cycle's trigger.
131 // Protected by mheap_.lock or a STW.
134 // consMark is the estimated per-CPU consMark ratio for the application.
136 // It represents the ratio between the application's allocation
137 // rate, as bytes allocated per CPU-time, and the GC's scan rate,
138 // as bytes scanned per CPU-time.
139 // The units of this ratio are (B / cpu-ns) / (B / cpu-ns).
141 // At a high level, this value is computed as the bytes of memory
142 // allocated (cons) per unit of scan work completed (mark) in a GC
143 // cycle, divided by the CPU time spent on each activity.
145 // Updated at the end of each GC cycle, in endCycle.
148 // consMarkController holds the state for the mark-cons ratio
149 // estimation over time.
151 // Its purpose is to smooth out noisiness in the computation of
152 // consMark; see consMark for details.
153 consMarkController piController
155 _ uint32 // Padding for atomics on 32-bit platforms.
157 // heapGoal is the goal heapLive for when next GC ends.
158 // Set to ^uint64(0) if disabled.
160 // Read and written atomically, unless the world is stopped.
163 // lastHeapGoal is the value of heapGoal for the previous GC.
164 // Note that this is distinct from the last value heapGoal had,
165 // because it could change if e.g. gcPercent changes.
167 // Read and written with the world stopped or with mheap_.lock held.
170 // heapLive is the number of bytes considered live by the GC.
171 // That is: retained by the most recent GC plus allocated
172 // since then. heapLive ≤ memstats.heapAlloc, since heapAlloc includes
173 // unmarked objects that have not yet been swept (and hence goes up as we
174 // allocate and down as we sweep) while heapLive excludes these
175 // objects (and hence only goes up between GCs).
177 // This is updated atomically without locking. To reduce
178 // contention, this is updated only when obtaining a span from
179 // an mcentral and at this point it counts all of the
180 // unallocated slots in that span (which will be allocated
181 // before that mcache obtains another span from that
182 // mcentral). Hence, it slightly overestimates the "true" live
183 // heap size. It's better to overestimate than to
184 // underestimate because 1) this triggers the GC earlier than
185 // necessary rather than potentially too late and 2) this
186 // leads to a conservative GC rate rather than a GC rate that
187 // is potentially too low.
189 // Reads should likewise be atomic (or during STW).
191 // Whenever this is updated, call traceHeapAlloc() and
192 // this gcControllerState's revise() method.
195 // heapScan is the number of bytes of "scannable" heap. This
196 // is the live heap (as counted by heapLive), but omitting
197 // no-scan objects and no-scan tails of objects.
199 // This value is fixed at the start of a GC cycle, so during a
200 // GC cycle it is safe to read without atomics, and it represents
201 // the maximum scannable heap.
204 // lastHeapScan is the number of bytes of heap that were scanned
205 // last GC cycle. It is the same as heapMarked, but only
206 // includes the "scannable" parts of objects.
208 // Updated when the world is stopped.
211 // stackScan is a snapshot of scannableStackSize taken at each GC
212 // STW pause and is used in pacing decisions.
214 // Updated only while the world is stopped.
217 // scannableStackSize is the amount of allocated goroutine stack space in
218 // use by goroutines.
220 // This number tracks allocated goroutine stack space rather than used
221 // goroutine stack space (i.e. what is actually scanned) because used
222 // goroutine stack space is much harder to measure cheaply. By using
223 // allocated space, we make an overestimate; this is OK, it's better
224 // to conservatively overcount than undercount.
226 // Read and updated atomically.
227 scannableStackSize uint64
229 // globalsScan is the total amount of global variable space
230 // that is scannable.
232 // Read and updated atomically.
235 // heapMarked is the number of bytes marked by the previous
236 // GC. After mark termination, heapLive == heapMarked, but
237 // unlike heapLive, heapMarked does not change until the
238 // next mark termination.
241 // heapScanWork is the total heap scan work performed this cycle.
242 // stackScanWork is the total stack scan work performed this cycle.
243 // globalsScanWork is the total globals scan work performed this cycle.
245 // These are updated atomically during the cycle. Updates occur in
246 // bounded batches, since they are both written and read
247 // throughout the cycle. At the end of the cycle, heapScanWork is how
248 // much of the retained heap is scannable.
250 // Currently these are measured in bytes. For most uses, this is an
251 // opaque unit of work, but for estimation the definition is important.
253 // Note that stackScanWork includes all allocated space, not just the
254 // size of the stack itself, mirroring stackSize.
255 heapScanWork atomic.Int64
256 stackScanWork atomic.Int64
257 globalsScanWork atomic.Int64
259 // bgScanCredit is the scan work credit accumulated by the
260 // concurrent background scan. This credit is accumulated by
261 // the background scan and stolen by mutator assists. This is
262 // updated atomically. Updates occur in bounded batches, since
263 // it is both written and read throughout the cycle.
266 // assistTime is the nanoseconds spent in mutator assists
267 // during this cycle. This is updated atomically, and must also
268 // be updated atomically even during a STW, because it is read
269 // by sysmon. Updates occur in bounded batches, since it is both
270 // written and read throughout the cycle.
271 assistTime atomic.Int64
273 // dedicatedMarkTime is the nanoseconds spent in dedicated
274 // mark workers during this cycle. This is updated atomically
275 // at the end of the concurrent mark phase.
276 dedicatedMarkTime int64
278 // fractionalMarkTime is the nanoseconds spent in the
279 // fractional mark worker during this cycle. This is updated
280 // atomically throughout the cycle and will be up-to-date if
281 // the fractional mark worker is not currently running.
282 fractionalMarkTime int64
284 // idleMarkTime is the nanoseconds spent in idle marking
285 // during this cycle. This is updated atomically throughout
289 // markStartTime is the absolute start time in nanoseconds
290 // that assists and background mark workers started.
293 // dedicatedMarkWorkersNeeded is the number of dedicated mark
294 // workers that need to be started. This is computed at the
295 // beginning of each cycle and decremented atomically as
296 // dedicated mark workers get started.
297 dedicatedMarkWorkersNeeded int64
299 // idleMarkWorkers is two packed int32 values in a single uint64.
300 // These two values are always updated simultaneously.
302 // The bottom int32 is the current number of idle mark workers executing.
304 // The top int32 is the maximum number of idle mark workers allowed to
305 // execute concurrently. Normally, this number is just gomaxprocs. However,
306 // during periodic GC cycles it is set to 0 because the system is idle
307 // anyway; there's no need to go full blast on all of GOMAXPROCS.
309 // The maximum number of idle mark workers is used to prevent new workers
310 // from starting, but it is not a hard maximum. It is possible (but
311 // exceedingly rare) for the current number of idle mark workers to
312 // transiently exceed the maximum. This could happen if the maximum changes
313 // just after a GC ends, and an M with no P.
315 // Note that if we have no dedicated mark workers, we set this value to
316 // 1 in this case we only have fractional GC workers which aren't scheduled
317 // strictly enough to ensure GC progress. As a result, idle-priority mark
318 // workers are vital to GC progress in these situations.
320 // For example, consider a situation in which goroutines block on the GC
321 // (such as via runtime.GOMAXPROCS) and only fractional mark workers are
322 // scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the
323 // last running M might skip scheduling a fractional mark worker if its
324 // utilization goal is met, such that once it goes to sleep (because there's
325 // nothing to do), there will be nothing else to spin up a new M for the
326 // fractional worker in the future, stalling GC progress and causing a
327 // deadlock. However, idle-priority workers will *always* run when there is
328 // nothing left to do, ensuring the GC makes progress.
330 // See github.com/golang/go/issues/44163 for more details.
331 idleMarkWorkers atomic.Uint64
333 // assistWorkPerByte is the ratio of scan work to allocated
334 // bytes that should be performed by mutator assists. This is
335 // computed at the beginning of each cycle and updated every
336 // time heapScan is updated.
337 assistWorkPerByte atomic.Float64
339 // assistBytesPerWork is 1/assistWorkPerByte.
341 // Note that because this is read and written independently
342 // from assistWorkPerByte users may notice a skew between
343 // the two values, and such a state should be safe.
344 assistBytesPerWork atomic.Float64
346 // fractionalUtilizationGoal is the fraction of wall clock
347 // time that should be spent in the fractional mark worker on
348 // each P that isn't running a dedicated worker.
350 // For example, if the utilization goal is 25% and there are
351 // no dedicated workers, this will be 0.25. If the goal is
352 // 25%, there is one dedicated worker, and GOMAXPROCS is 5,
353 // this will be 0.05 to make up the missing 5%.
355 // If this is zero, no fractional workers are needed.
356 fractionalUtilizationGoal float64
358 // These memory stats are effectively duplicates of fields from
359 // memstats.heapStats but are updated atomically or with the world
360 // stopped and don't provide the same consistency guarantees.
362 // Because the runtime is responsible for managing a memory limit, it's
363 // useful to couple these stats more tightly to the gcController, which
364 // is intimately connected to how that memory limit is maintained.
365 heapInUse sysMemStat // bytes in mSpanInUse spans
366 heapReleased sysMemStat // bytes released to the OS
367 heapFree sysMemStat // bytes not in any span, but not released to the OS
368 totalAlloc atomic.Uint64 // total bytes allocated
369 totalFree atomic.Uint64 // total bytes freed
370 mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go).
372 // test indicates that this is a test-only copy of gcControllerState.
378 func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) {
379 c.heapMinimum = defaultHeapMinimum
381 c.consMarkController = piController{
382 // Tuned first via the Ziegler-Nichols process in simulation,
383 // then the integral time was manually tuned against real-world
384 // applications to deal with noisiness in the measured cons/mark
389 // Set a high reset time in GC cycles.
390 // This is inversely proportional to the rate at which we
391 // accumulate error from clipping. By making this very high
392 // we make the accumulation slow. In general, clipping is
393 // OK in our situation, hence the choice.
395 // Tune this if we get unintended effects from clipping for
402 c.setGCPercent(gcPercent)
403 c.setMemoryLimit(memoryLimit)
407 // startCycle resets the GC controller's state and computes estimates
408 // for a new GC cycle. The caller must hold worldsema and the world
410 func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) {
411 c.heapScanWork.Store(0)
412 c.stackScanWork.Store(0)
413 c.globalsScanWork.Store(0)
415 c.assistTime.Store(0)
416 c.dedicatedMarkTime = 0
417 c.fractionalMarkTime = 0
419 c.markStartTime = markStartTime
420 c.stackScan = atomic.Load64(&c.scannableStackSize)
422 // Ensure that the heap goal is at least a little larger than
423 // the current live heap size. This may not be the case if GC
424 // start is delayed or if the allocation that pushed gcController.heapLive
425 // over trigger is large or if the trigger is really close to
426 // GOGC. Assist is proportional to this distance, so enforce a
427 // minimum distance, even if it means going over the GOGC goal
429 if c.heapGoal < c.heapLive+64<<10 {
430 c.heapGoal = c.heapLive + 64<<10
433 // Compute the background mark utilization goal. In general,
434 // this may not come out exactly. We round the number of
435 // dedicated workers so that the utilization is closest to
436 // 25%. For small GOMAXPROCS, this would introduce too much
437 // error, so we add fractional workers in that case.
438 totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
439 c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal + 0.5)
440 utilError := float64(c.dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
441 const maxUtilError = 0.3
442 if utilError < -maxUtilError || utilError > maxUtilError {
443 // Rounding put us more than 30% off our goal. With
444 // gcBackgroundUtilization of 25%, this happens for
445 // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
446 // workers to compensate.
447 if float64(c.dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
448 // Too many dedicated workers.
449 c.dedicatedMarkWorkersNeeded--
451 c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)) / float64(procs)
453 c.fractionalUtilizationGoal = 0
456 // In STW mode, we just want dedicated workers.
457 if debug.gcstoptheworld > 0 {
458 c.dedicatedMarkWorkersNeeded = int64(procs)
459 c.fractionalUtilizationGoal = 0
463 for _, p := range allp {
465 p.gcFractionalMarkTime = 0
468 if trigger.kind == gcTriggerTime {
469 // During a periodic GC cycle, reduce the number of idle mark workers
470 // required. However, we need at least one dedicated mark worker or
471 // idle GC worker to ensure GC progress in some scenarios (see comment
472 // on maxIdleMarkWorkers).
473 if c.dedicatedMarkWorkersNeeded > 0 {
474 c.setMaxIdleMarkWorkers(0)
476 // TODO(mknyszek): The fundamental reason why we need this is because
477 // we can't count on the fractional mark worker to get scheduled.
478 // Fix that by ensuring it gets scheduled according to its quota even
479 // if the rest of the application is idle.
480 c.setMaxIdleMarkWorkers(1)
483 // N.B. gomaxprocs and dedicatedMarkWorkersNeeded is guaranteed not to
484 // change during a GC cycle.
485 c.setMaxIdleMarkWorkers(int32(procs) - int32(c.dedicatedMarkWorkersNeeded))
488 // Compute initial values for controls that are updated
489 // throughout the cycle.
492 if debug.gcpacertrace > 0 {
493 assistRatio := c.assistWorkPerByte.Load()
494 print("pacer: assist ratio=", assistRatio,
495 " (scan ", gcController.heapScan>>20, " MB in ",
496 work.initialHeapLive>>20, "->",
497 c.heapGoal>>20, " MB)",
498 " workers=", c.dedicatedMarkWorkersNeeded,
499 "+", c.fractionalUtilizationGoal, "\n")
503 // revise updates the assist ratio during the GC cycle to account for
504 // improved estimates. This should be called whenever gcController.heapScan,
505 // gcController.heapLive, or gcController.heapGoal is updated. It is safe to
506 // call concurrently, but it may race with other calls to revise.
508 // The result of this race is that the two assist ratio values may not line
509 // up or may be stale. In practice this is OK because the assist ratio
510 // moves slowly throughout a GC cycle, and the assist ratio is a best-effort
511 // heuristic anyway. Furthermore, no part of the heuristic depends on
512 // the two assist ratio values being exact reciprocals of one another, since
513 // the two values are used to convert values from different sources.
515 // The worst case result of this raciness is that we may miss a larger shift
516 // in the ratio (say, if we decide to pace more aggressively against the
517 // hard heap goal) but even this "hard goal" is best-effort (see #40460).
518 // The dedicated GC should ensure we don't exceed the hard goal by too much
519 // in the rare case we do exceed it.
521 // It should only be called when gcBlackenEnabled != 0 (because this
522 // is when assists are enabled and the necessary statistics are
524 func (c *gcControllerState) revise() {
525 gcPercent := c.gcPercent.Load()
527 // If GC is disabled but we're running a forced GC,
528 // act like GOGC is huge for the below calculations.
531 live := atomic.Load64(&c.heapLive)
532 scan := atomic.Load64(&c.heapScan)
533 work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
535 // Assume we're under the soft goal. Pace GC to complete at
536 // heapGoal assuming the heap is in steady-state.
537 heapGoal := int64(atomic.Load64(&c.heapGoal))
539 // The expected scan work is computed as the amount of bytes scanned last
540 // GC cycle, plus our estimate of stacks and globals work for this cycle.
541 scanWorkExpected := int64(c.lastHeapScan + c.stackScan + c.globalsScan)
543 // maxScanWork is a worst-case estimate of the amount of scan work that
544 // needs to be performed in this GC cycle. Specifically, it represents
545 // the case where *all* scannable memory turns out to be live.
546 maxScanWork := int64(scan + c.stackScan + c.globalsScan)
547 if work > scanWorkExpected {
548 // We've already done more scan work than expected. Because our expectation
549 // is based on a steady-state scannable heap size, we assume this means our
550 // heap is growing. Compute a new heap goal that takes our existing runway
551 // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case
552 // scan work. This keeps our assist ratio stable if the heap continues to grow.
554 // The effect of this mechanism is that assists stay flat in the face of heap
555 // growths. It's OK to use more memory this cycle to scan all the live heap,
556 // because the next GC cycle is inevitably going to use *at least* that much
558 extHeapGoal := int64(float64(heapGoal-int64(c.trigger))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.trigger)
559 scanWorkExpected = maxScanWork
561 // hardGoal is a hard limit on the amount that we're willing to push back the
562 // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or
563 // stacks and/or globals grow to twice their size, this limits the current GC cycle's
564 // growth to 4x the original live heap's size).
566 // This maintains the invariant that we use no more memory than the next GC cycle
568 hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal))
569 if extHeapGoal > hardGoal {
570 extHeapGoal = hardGoal
572 heapGoal = extHeapGoal
574 if int64(live) > heapGoal {
575 // We're already past our heap goal, even the extrapolated one.
576 // Leave ourselves some extra runway, so in the worst case we
577 // finish by that point.
578 const maxOvershoot = 1.1
579 heapGoal = int64(float64(heapGoal) * maxOvershoot)
581 // Compute the upper bound on the scan work remaining.
582 scanWorkExpected = maxScanWork
585 // Compute the remaining scan work estimate.
587 // Note that we currently count allocations during GC as both
588 // scannable heap (heapScan) and scan work completed
589 // (scanWork), so allocation will change this difference
590 // slowly in the soft regime and not at all in the hard
592 scanWorkRemaining := scanWorkExpected - work
593 if scanWorkRemaining < 1000 {
594 // We set a somewhat arbitrary lower bound on
595 // remaining scan work since if we aim a little high,
596 // we can miss by a little.
598 // We *do* need to enforce that this is at least 1,
599 // since marking is racy and double-scanning objects
600 // may legitimately make the remaining scan work
601 // negative, even in the hard goal regime.
602 scanWorkRemaining = 1000
605 // Compute the heap distance remaining.
606 heapRemaining := heapGoal - int64(live)
607 if heapRemaining <= 0 {
608 // This shouldn't happen, but if it does, avoid
609 // dividing by zero or setting the assist negative.
613 // Compute the mutator assist ratio so by the time the mutator
614 // allocates the remaining heap bytes up to heapGoal, it will
615 // have done (or stolen) the remaining amount of scan work.
616 // Note that the assist ratio values are updated atomically
617 // but not together. This means there may be some degree of
618 // skew between the two values. This is generally OK as the
619 // values shift relatively slowly over the course of a GC
621 assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
622 assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
623 c.assistWorkPerByte.Store(assistWorkPerByte)
624 c.assistBytesPerWork.Store(assistBytesPerWork)
627 // endCycle computes the consMark estimate for the next cycle.
628 // userForced indicates whether the current GC cycle was forced
629 // by the application.
630 func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) {
631 // Record last heap goal for the scavenger.
632 // We'll be updating the heap goal soon.
633 gcController.lastHeapGoal = gcController.heapGoal
635 // Compute the duration of time for which assists were turned on.
636 assistDuration := now - c.markStartTime
638 // Assume background mark hit its utilization goal.
639 utilization := gcBackgroundUtilization
640 // Add assist utilization; avoid divide by zero.
641 if assistDuration > 0 {
642 utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs))
645 if c.heapLive <= c.trigger {
646 // Shouldn't happen, but let's be very safe about this in case the
647 // GC is somehow extremely short.
649 // In this case though, the only reasonable value for c.heapLive-c.trigger
650 // would be 0, which isn't really all that useful, i.e. the GC was so short
651 // that it didn't matter.
653 // Ignore this case and don't update anything.
656 idleUtilization := 0.0
657 if assistDuration > 0 {
658 idleUtilization = float64(c.idleMarkTime) / float64(assistDuration*int64(procs))
660 // Determine the cons/mark ratio.
662 // The units we want for the numerator and denominator are both B / cpu-ns.
663 // We get this by taking the bytes allocated or scanned, and divide by the amount of
664 // CPU time it took for those operations. For allocations, that CPU time is
666 // assistDuration * procs * (1 - utilization)
668 // Where utilization includes just background GC workers and assists. It does *not*
669 // include idle GC work time, because in theory the mutator is free to take that at
672 // For scanning, that CPU time is
674 // assistDuration * procs * (utilization + idleUtilization)
676 // In this case, we *include* idle utilization, because that is additional CPU time that the
677 // the GC had available to it.
679 // In effect, idle GC time is sort of double-counted here, but it's very weird compared
680 // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is
681 // *always* free to take it.
683 // So this calculation is really:
684 // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
685 // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)
687 // Note that because we only care about the ratio, assistDuration and procs cancel out.
688 scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
689 currentConsMark := (float64(c.heapLive-c.trigger) * (utilization + idleUtilization)) /
690 (float64(scanWork) * (1 - utilization))
692 // Update cons/mark controller. The time period for this is 1 GC cycle.
694 // This use of a PI controller might seem strange. So, here's an explanation:
696 // currentConsMark represents the consMark we *should've* had to be perfectly
697 // on-target for this cycle. Given that we assume the next GC will be like this
698 // one in the steady-state, it stands to reason that we should just pick that
699 // as our next consMark. In practice, however, currentConsMark is too noisy:
700 // we're going to be wildly off-target in each GC cycle if we do that.
702 // What we do instead is make a long-term assumption: there is some steady-state
703 // consMark value, but it's obscured by noise. By constantly shooting for this
704 // noisy-but-perfect consMark value, the controller will bounce around a bit,
705 // but its average behavior, in aggregate, should be less noisy and closer to
706 // the true long-term consMark value, provided its tuned to be slightly overdamped.
708 oldConsMark := c.consMark
709 c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
711 // The error spiraled out of control. This is incredibly unlikely seeing
712 // as this controller is essentially just a smoothing function, but it might
713 // mean that something went very wrong with how currentConsMark was calculated.
714 // Just reset consMark and keep going.
718 if debug.gcpacertrace > 0 {
720 goal := gcGoalUtilization * 100
721 print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
722 print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
723 print("in ", c.trigger, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.heapGoal), ", cons/mark ", oldConsMark, ")")
725 print("[controller reset]")
732 // enlistWorker encourages another dedicated mark worker to start on
733 // another P if there are spare worker slots. It is used by putfull
734 // when more work is made available.
737 func (c *gcControllerState) enlistWorker() {
738 // If there are idle Ps, wake one so it will run an idle worker.
739 // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
741 // if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
746 // There are no idle Ps. If we need more dedicated workers,
747 // try to preempt a running P so it will switch to a worker.
748 if c.dedicatedMarkWorkersNeeded <= 0 {
751 // Pick a random other P to preempt.
756 if gp == nil || gp.m == nil || gp.m.p == 0 {
759 myID := gp.m.p.ptr().id
760 for tries := 0; tries < 5; tries++ {
761 id := int32(fastrandn(uint32(gomaxprocs - 1)))
766 if p.status != _Prunning {
775 // findRunnableGCWorker returns a background mark worker for _p_ if it
776 // should be run. This must only be called when gcBlackenEnabled != 0.
777 func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) *g {
778 if gcBlackenEnabled == 0 {
779 throw("gcControllerState.findRunnable: blackening not enabled")
782 // Since we have the current time, check if the GC CPU limiter
783 // hasn't had an update in a while. This check is necessary in
784 // case the limiter is on but hasn't been checked in a while and
785 // so may have left sufficient headroom to turn off again.
786 if gcCPULimiter.needUpdate(now) {
787 gcCPULimiter.update(gcController.assistTime.Load(), now)
790 if !gcMarkWorkAvailable(_p_) {
791 // No work to be done right now. This can happen at
792 // the end of the mark phase when there are still
793 // assists tapering off. Don't bother running a worker
794 // now because it'll just return immediately.
798 // Grab a worker before we commit to running below.
799 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
801 // There is at least one worker per P, so normally there are
802 // enough workers to run on all Ps, if necessary. However, once
803 // a worker enters gcMarkDone it may park without rejoining the
804 // pool, thus freeing a P with no corresponding worker.
805 // gcMarkDone never depends on another worker doing work, so it
806 // is safe to simply do nothing here.
808 // If gcMarkDone bails out without completing the mark phase,
809 // it will always do so with queued global work. Thus, that P
810 // will be immediately eligible to re-run the worker G it was
811 // just using, ensuring work can complete.
815 decIfPositive := func(ptr *int64) bool {
817 v := atomic.Loadint64(ptr)
822 if atomic.Casint64(ptr, v, v-1) {
828 if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
829 // This P is now dedicated to marking until the end of
830 // the concurrent mark phase.
831 _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
832 } else if c.fractionalUtilizationGoal == 0 {
833 // No need for fractional workers.
834 gcBgMarkWorkerPool.push(&node.node)
837 // Is this P behind on the fractional utilization
840 // This should be kept in sync with pollFractionalWorkerExit.
841 delta := now - c.markStartTime
842 if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
843 // Nope. No need to run a fractional worker.
844 gcBgMarkWorkerPool.push(&node.node)
847 // Run a fractional worker.
848 _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
851 // Run the background mark worker.
853 casgstatus(gp, _Gwaiting, _Grunnable)
860 // resetLive sets up the controller state for the next mark phase after the end
861 // of the previous one. Must be called after endCycle and before commit, before
862 // the world is started.
864 // The world must be stopped.
865 func (c *gcControllerState) resetLive(bytesMarked uint64) {
866 c.heapMarked = bytesMarked
867 c.heapLive = bytesMarked
868 c.heapScan = uint64(c.heapScanWork.Load())
869 c.lastHeapScan = uint64(c.heapScanWork.Load())
871 // heapLive was updated, so emit a trace event.
877 // markWorkerStop must be called whenever a mark worker stops executing.
879 // It updates mark work accounting in the controller by a duration of
880 // work in nanoseconds and other bookkeeping.
882 // Safe to execute at any time.
883 func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) {
885 case gcMarkWorkerDedicatedMode:
886 atomic.Xaddint64(&c.dedicatedMarkTime, duration)
887 atomic.Xaddint64(&c.dedicatedMarkWorkersNeeded, 1)
888 case gcMarkWorkerFractionalMode:
889 atomic.Xaddint64(&c.fractionalMarkTime, duration)
890 case gcMarkWorkerIdleMode:
891 atomic.Xaddint64(&c.idleMarkTime, duration)
892 c.removeIdleMarkWorker()
894 throw("markWorkerStop: unknown mark worker mode")
898 func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
900 atomic.Xadd64(&gcController.heapLive, dHeapLive)
902 // gcController.heapLive changed.
906 if gcBlackenEnabled == 0 {
907 // Update heapScan when we're not in a current GC. It is fixed
908 // at the beginning of a cycle.
910 atomic.Xadd64(&gcController.heapScan, dHeapScan)
913 // gcController.heapLive changed.
918 func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
920 atomic.Xadd64(&c.scannableStackSize, amount)
923 pp.scannableStackSizeDelta += amount
924 if pp.scannableStackSizeDelta >= scannableStackSizeSlack || pp.scannableStackSizeDelta <= -scannableStackSizeSlack {
925 atomic.Xadd64(&c.scannableStackSize, pp.scannableStackSizeDelta)
926 pp.scannableStackSizeDelta = 0
930 func (c *gcControllerState) addGlobals(amount int64) {
931 atomic.Xadd64(&c.globalsScan, amount)
934 // commit recomputes all pacing parameters from scratch, namely
935 // absolute trigger, the heap goal, mark pacing, and sweep pacing.
937 // This can be called any time. If GC is the in the middle of a
938 // concurrent phase, it will adjust the pacing of that phase.
940 // This depends on gcPercent, gcController.heapMarked, and
941 // gcController.heapLive. These must be up to date.
943 // mheap_.lock must be held or the world must be stopped.
944 func (c *gcControllerState) commit() {
946 assertWorldStoppedOrLockHeld(&mheap_.lock)
949 // Compute the next GC goal, which is when the allocated heap
950 // has grown by GOGC/100 over where it started the last cycle,
951 // plus additional runway for non-heap sources of GC work.
953 if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
954 goal = c.heapMarked + (c.heapMarked+atomic.Load64(&c.stackScan)+atomic.Load64(&c.globalsScan))*uint64(gcPercent)/100
957 // Don't trigger below the minimum heap size.
958 minTrigger := c.heapMinimum
960 // Concurrent sweep happens in the heap growth
961 // from gcController.heapLive to trigger, so ensure
962 // that concurrent sweep has some heap growth
963 // in which to perform sweeping before we
964 // start the next GC cycle.
965 sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
966 if sweepMin > minTrigger {
967 minTrigger = sweepMin
971 // If we let the trigger go too low, then if the application
972 // is allocating very rapidly we might end up in a situation
973 // where we're allocating black during a nearly always-on GC.
974 // The result of this is a growing heap and ultimately an
975 // increase in RSS. By capping us at a point >0, we're essentially
976 // saying that we're OK using more CPU during the GC to prevent
977 // this growth in RSS.
979 // The current constant was chosen empirically: given a sufficiently
980 // fast/scalable allocator with 48 Ps that could drive the trigger ratio
981 // to <0.05, this constant causes applications to retain the same peak
982 // RSS compared to not having this allocator.
983 if triggerBound := uint64(0.7*float64(goal-c.heapMarked)) + c.heapMarked; minTrigger < triggerBound {
984 minTrigger = triggerBound
987 // For small heaps, set the max trigger point at 95% of the heap goal.
988 // This ensures we always have *some* headroom when the GC actually starts.
989 // For larger heaps, set the max trigger point at the goal, minus the
990 // minimum heap size.
991 // This choice follows from the fact that the minimum heap size is chosen
992 // to reflect the costs of a GC with no work to do. With a large heap but
993 // very little scan work to perform, this gives us exactly as much runway
994 // as we would need, in the worst case.
995 maxRunway := uint64(0.95 * float64(goal-c.heapMarked))
996 if largeHeapMaxRunway := goal - c.heapMinimum; goal > c.heapMinimum && maxRunway < largeHeapMaxRunway {
997 maxRunway = largeHeapMaxRunway
999 maxTrigger := maxRunway + c.heapMarked
1000 if maxTrigger < minTrigger {
1001 maxTrigger = minTrigger
1004 // Compute the trigger by using our estimate of the cons/mark ratio.
1006 // The idea is to take our expected scan work, and multiply it by
1007 // the cons/mark ratio to determine how long it'll take to complete
1008 // that scan work in terms of bytes allocated. This gives us our GC's
1011 // However, the cons/mark ratio is a ratio of rates per CPU-second, but
1012 // here we care about the relative rates for some division of CPU
1013 // resources among the mutator and the GC.
1015 // To summarize, we have B / cpu-ns, and we want B / ns. We get that
1016 // by multiplying by our desired division of CPU resources. We choose
1017 // to express CPU resources as GOMAPROCS*fraction. Note that because
1018 // we're working with a ratio here, we can omit the number of CPU cores,
1019 // because they'll appear in the numerator and denominator and cancel out.
1020 // As a result, this is basically just "weighing" the cons/mark ratio by
1021 // our desired division of resources.
1023 // Furthermore, by setting the trigger so that CPU resources are divided
1024 // this way, assuming that the cons/mark ratio is correct, we make that
1025 // division a reality.
1027 runway := uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.stackScan+c.globalsScan))
1029 trigger = minTrigger
1031 trigger = goal - runway
1033 if trigger < minTrigger {
1034 trigger = minTrigger
1036 if trigger > maxTrigger {
1037 trigger = maxTrigger
1043 // Commit to the trigger and goal.
1045 atomic.Store64(&c.heapGoal, goal)
1050 // Update mark pacing.
1051 if gcphase != _GCoff {
1056 // effectiveGrowthRatio returns the current effective heap growth
1057 // ratio (GOGC/100) based on heapMarked from the previous GC and
1058 // heapGoal for the current GC.
1060 // This may differ from gcPercent/100 because of various upper and
1061 // lower bounds on gcPercent. For example, if the heap is smaller than
1062 // heapMinimum, this can be higher than gcPercent/100.
1064 // mheap_.lock must be held or the world must be stopped.
1065 func (c *gcControllerState) effectiveGrowthRatio() float64 {
1067 assertWorldStoppedOrLockHeld(&mheap_.lock)
1070 egogc := float64(atomic.Load64(&c.heapGoal)-c.heapMarked) / float64(c.heapMarked)
1072 // Shouldn't happen, but just in case.
1078 // setGCPercent updates gcPercent. commit must be called after.
1079 // Returns the old value of gcPercent.
1081 // The world must be stopped, or mheap_.lock must be held.
1082 func (c *gcControllerState) setGCPercent(in int32) int32 {
1084 assertWorldStoppedOrLockHeld(&mheap_.lock)
1087 out := c.gcPercent.Load()
1091 c.heapMinimum = defaultHeapMinimum * uint64(in) / 100
1092 c.gcPercent.Store(in)
1097 //go:linkname setGCPercent runtime/debug.setGCPercent
1098 func setGCPercent(in int32) (out int32) {
1099 // Run on the system stack since we grab the heap lock.
1100 systemstack(func() {
1102 out = gcController.setGCPercent(in)
1103 gcController.commit()
1104 gcPaceSweeper(gcController.trigger)
1105 gcPaceScavenger(gcController.heapGoal, gcController.lastHeapGoal)
1106 unlock(&mheap_.lock)
1109 // If we just disabled GC, wait for any concurrent GC mark to
1110 // finish so we always return with no GC running.
1112 gcWaitOnMark(atomic.Load(&work.cycles))
1118 func readGOGC() int32 {
1119 p := gogetenv("GOGC")
1123 if n, ok := atoi32(p); ok {
1129 // setMemoryLimit updates memoryLimit. commit must be called after
1130 // Returns the old value of memoryLimit.
1132 // The world must be stopped, or mheap_.lock must be held.
1133 func (c *gcControllerState) setMemoryLimit(in int64) int64 {
1135 assertWorldStoppedOrLockHeld(&mheap_.lock)
1138 out := c.memoryLimit.Load()
1140 c.memoryLimit.Store(in)
1146 //go:linkname setMemoryLimit runtime/debug.setMemoryLimit
1147 func setMemoryLimit(in int64) (out int64) {
1148 // Run on the system stack since we grab the heap lock.
1149 systemstack(func() {
1151 out = gcController.setMemoryLimit(in)
1152 if in < 0 || out == in {
1153 // If we're just checking the value or not changing
1154 // it, there's no point in doing the rest.
1155 unlock(&mheap_.lock)
1158 gcController.commit()
1159 gcPaceSweeper(gcController.trigger)
1160 gcPaceScavenger(gcController.heapGoal, gcController.lastHeapGoal)
1161 unlock(&mheap_.lock)
1166 func readGOMEMLIMIT() int64 {
1167 p := gogetenv("GOMEMLIMIT")
1168 if p == "" || p == "off" {
1171 n, ok := parseByteCount(p)
1173 print("GOMEMLIMIT=", p, "\n")
1174 throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`")
1179 type piController struct {
1180 kp float64 // Proportional constant.
1181 ti float64 // Integral time constant.
1182 tt float64 // Reset time.
1184 min, max float64 // Output boundaries.
1186 // PI controller state.
1188 errIntegral float64 // Integral of the error from t=0 to now.
1191 errOverflow bool // Set if errIntegral ever overflowed.
1192 inputOverflow bool // Set if an operation with the input overflowed.
1195 // next provides a new sample to the controller.
1197 // input is the sample, setpoint is the desired point, and period is how much
1198 // time (in whatever unit makes the most sense) has passed since the last sample.
1200 // Returns a new value for the variable it's controlling, and whether the operation
1201 // completed successfully. One reason this might fail is if error has been growing
1202 // in an unbounded manner, to the point of overflow.
1204 // In the specific case of an error overflow occurs, the errOverflow field will be
1205 // set and the rest of the controller's internal state will be fully reset.
1206 func (c *piController) next(input, setpoint, period float64) (float64, bool) {
1207 // Compute the raw output value.
1208 prop := c.kp * (setpoint - input)
1209 rawOutput := prop + c.errIntegral
1211 // Clamp rawOutput into output.
1213 if isInf(output) || isNaN(output) {
1214 // The input had a large enough magnitude that either it was already
1215 // overflowed, or some operation with it overflowed.
1216 // Set a flag and reset. That's the safest thing to do.
1218 c.inputOverflow = true
1223 } else if output > c.max {
1227 // Update the controller's state.
1228 if c.ti != 0 && c.tt != 0 {
1229 c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
1230 if isInf(c.errIntegral) || isNaN(c.errIntegral) {
1231 // So much error has accumulated that we managed to overflow.
1232 // The assumptions around the controller have likely broken down.
1233 // Set a flag and reset. That's the safest thing to do.
1235 c.errOverflow = true
1242 // reset resets the controller state, except for controller error flags.
1243 func (c *piController) reset() {
1247 // addIdleMarkWorker attempts to add a new idle mark worker.
1249 // If this returns true, the caller must become an idle mark worker unless
1250 // there's no background mark worker goroutines in the pool. This case is
1251 // harmless because there are already background mark workers running.
1252 // If this returns false, the caller must NOT become an idle mark worker.
1254 // nosplit because it may be called without a P.
1256 func (c *gcControllerState) addIdleMarkWorker() bool {
1258 old := c.idleMarkWorkers.Load()
1259 n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
1261 // See the comment on idleMarkWorkers for why
1262 // n > max is tolerated.
1266 print("n=", n, " max=", max, "\n")
1267 throw("negative idle mark workers")
1269 new := uint64(uint32(n+1)) | (uint64(max) << 32)
1270 if c.idleMarkWorkers.CompareAndSwap(old, new) {
1276 // needIdleMarkWorker is a hint as to whether another idle mark worker is needed.
1278 // The caller must still call addIdleMarkWorker to become one. This is mainly
1279 // useful for a quick check before an expensive operation.
1281 // nosplit because it may be called without a P.
1283 func (c *gcControllerState) needIdleMarkWorker() bool {
1284 p := c.idleMarkWorkers.Load()
1285 n, max := int32(p&uint64(^uint32(0))), int32(p>>32)
1289 // removeIdleMarkWorker must be called when an new idle mark worker stops executing.
1290 func (c *gcControllerState) removeIdleMarkWorker() {
1292 old := c.idleMarkWorkers.Load()
1293 n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
1295 print("n=", n, " max=", max, "\n")
1296 throw("negative idle mark workers")
1298 new := uint64(uint32(n-1)) | (uint64(max) << 32)
1299 if c.idleMarkWorkers.CompareAndSwap(old, new) {
1305 // setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed.
1307 // This method is optimistic in that it does not wait for the number of
1308 // idle mark workers to reduce to max before returning; it assumes the workers
1309 // will deschedule themselves.
1310 func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) {
1312 old := c.idleMarkWorkers.Load()
1313 n := int32(old & uint64(^uint32(0)))
1315 print("n=", n, " max=", max, "\n")
1316 throw("negative idle mark workers")
1318 new := uint64(uint32(n)) | (uint64(max) << 32)
1319 if c.idleMarkWorkers.CompareAndSwap(old, new) {