1 // Copyright 2021 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
9 "internal/goexperiment"
10 "runtime/internal/atomic"
11 _ "unsafe" // for go:linkname
15 // gcGoalUtilization is the goal CPU utilization for
16 // marking as a fraction of GOMAXPROCS.
18 // Increasing the goal utilization will shorten GC cycles as the GC
19 // has more resources behind it, lessening costs from the write barrier,
20 // but comes at the cost of increasing mutator latency.
21 gcGoalUtilization = gcBackgroundUtilization
23 // gcBackgroundUtilization is the fixed CPU utilization for background
24 // marking. It must be <= gcGoalUtilization. The difference between
25 // gcGoalUtilization and gcBackgroundUtilization will be made up by
26 // mark assists. The scheduler will aim to use within 50% of this
29 // As a general rule, there's little reason to set gcBackgroundUtilization
30 // < gcGoalUtilization. One reason might be in mostly idle applications,
31 // where goroutines are unlikely to assist at all, so the actual
32 // utilization will be lower than the goal. But this is moot point
33 // because the idle mark workers already soak up idle CPU resources.
34 // These two values are still kept separate however because they are
35 // distinct conceptually, and in previous iterations of the pacer the
36 // distinction was more important.
37 gcBackgroundUtilization = 0.25
39 // gcCreditSlack is the amount of scan work credit that can
40 // accumulate locally before updating gcController.heapScanWork and,
41 // optionally, gcController.bgScanCredit. Lower values give a more
42 // accurate assist ratio and make it more likely that assists will
43 // successfully steal background credit. Higher values reduce memory
47 // gcAssistTimeSlack is the nanoseconds of mutator assist time that
48 // can accumulate on a P before updating gcController.assistTime.
49 gcAssistTimeSlack = 5000
51 // gcOverAssistWork determines how many extra units of scan work a GC
52 // assist does when an assist happens. This amortizes the cost of an
53 // assist by pre-paying for this many bytes of future allocations.
54 gcOverAssistWork = 64 << 10
56 // defaultHeapMinimum is the value of heapMinimum for GOGC==100.
57 defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
58 (1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
60 // maxStackScanSlack is the bytes of stack space allocated or freed
61 // that can accumulate on a P before updating gcController.stackSize.
62 maxStackScanSlack = 8 << 10
64 // memoryLimitHeapGoalHeadroom is the amount of headroom the pacer gives to
65 // the heap goal when operating in the memory-limited regime. That is,
66 // it'll reduce the heap goal by this many extra bytes off of the base
68 memoryLimitHeapGoalHeadroom = 1 << 20
71 // gcController implements the GC pacing controller that determines
72 // when to trigger concurrent garbage collection and how much marking
73 // work to do in mutator assists and background marking.
75 // It calculates the ratio between the allocation rate (in terms of CPU
76 // time) and the GC scan throughput to determine the heap size at which to
77 // trigger a GC cycle such that no GC assists are required to finish on time.
78 // This algorithm thus optimizes GC CPU utilization to the dedicated background
79 // mark utilization of 25% of GOMAXPROCS by minimizing GC assists.
80 // GOMAXPROCS. The high-level design of this algorithm is documented
81 // at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md.
82 // See https://golang.org/s/go15gcpacing for additional historical context.
83 var gcController gcControllerState
85 type gcControllerState struct {
86 // Initialized from GOGC. GOGC=off means no GC.
87 gcPercent atomic.Int32
89 // memoryLimit is the soft memory limit in bytes.
91 // Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64
92 // which means no soft memory limit in practice.
94 // This is an int64 instead of a uint64 to more easily maintain parity with
95 // the SetMemoryLimit API, which sets a maximum at MaxInt64. This value
96 // should never be negative.
97 memoryLimit atomic.Int64
99 // heapMinimum is the minimum heap size at which to trigger GC.
100 // For small heaps, this overrides the usual GOGC*live set rule.
102 // When there is a very small live set but a lot of allocation, simply
103 // collecting when the heap reaches GOGC*live results in many GC
104 // cycles and high total per-GC overhead. This minimum amortizes this
105 // per-GC overhead while keeping the heap reasonably small.
107 // During initialization this is set to 4MB*GOGC/100. In the case of
108 // GOGC==0, this will set heapMinimum to 0, resulting in constant
109 // collection even when the heap size is small, which is useful for
113 // runway is the amount of runway in heap bytes allocated by the
114 // application that we want to give the GC once it starts.
116 // This is computed from consMark during mark termination.
119 // consMark is the estimated per-CPU consMark ratio for the application.
121 // It represents the ratio between the application's allocation
122 // rate, as bytes allocated per CPU-time, and the GC's scan rate,
123 // as bytes scanned per CPU-time.
124 // The units of this ratio are (B / cpu-ns) / (B / cpu-ns).
126 // At a high level, this value is computed as the bytes of memory
127 // allocated (cons) per unit of scan work completed (mark) in a GC
128 // cycle, divided by the CPU time spent on each activity.
130 // Updated at the end of each GC cycle, in endCycle.
133 // lastConsMark is the computed cons/mark value for the previous GC
134 // cycle. Note that this is *not* the last value of cons/mark, but the
135 // actual computed value. See endCycle for details.
138 // gcPercentHeapGoal is the goal heapLive for when next GC ends derived
141 // Set to ^uint64(0) if gcPercent is disabled.
142 gcPercentHeapGoal atomic.Uint64
144 // sweepDistMinTrigger is the minimum trigger to ensure a minimum
147 // This bound is also special because it applies to both the trigger
148 // *and* the goal (all other trigger bounds must be based *on* the goal).
150 // It is computed ahead of time, at commit time. The theory is that,
151 // absent a sudden change to a parameter like gcPercent, the trigger
152 // will be chosen to always give the sweeper enough headroom. However,
153 // such a change might dramatically and suddenly move up the trigger,
154 // in which case we need to ensure the sweeper still has enough headroom.
155 sweepDistMinTrigger atomic.Uint64
157 // triggered is the point at which the current GC cycle actually triggered.
158 // Only valid during the mark phase of a GC cycle, otherwise set to ^uint64(0).
160 // Updated while the world is stopped.
163 // lastHeapGoal is the value of heapGoal at the moment the last GC
164 // ended. Note that this is distinct from the last value heapGoal had,
165 // because it could change if e.g. gcPercent changes.
167 // Read and written with the world stopped or with mheap_.lock held.
170 // heapLive is the number of bytes considered live by the GC.
171 // That is: retained by the most recent GC plus allocated
172 // since then. heapLive ≤ memstats.totalAlloc-memstats.totalFree, since
173 // heapAlloc includes unmarked objects that have not yet been swept (and
174 // hence goes up as we allocate and down as we sweep) while heapLive
175 // excludes these objects (and hence only goes up between GCs).
177 // To reduce contention, this is updated only when obtaining a span
178 // from an mcentral and at this point it counts all of the unallocated
179 // slots in that span (which will be allocated before that mcache
180 // obtains another span from that mcentral). Hence, it slightly
181 // overestimates the "true" live heap size. It's better to overestimate
182 // than to underestimate because 1) this triggers the GC earlier than
183 // necessary rather than potentially too late and 2) this leads to a
184 // conservative GC rate rather than a GC rate that is potentially too
187 // Whenever this is updated, call traceHeapAlloc() and
188 // this gcControllerState's revise() method.
189 heapLive atomic.Uint64
191 // heapScan is the number of bytes of "scannable" heap. This is the
192 // live heap (as counted by heapLive), but omitting no-scan objects and
193 // no-scan tails of objects.
195 // This value is fixed at the start of a GC cycle. It represents the
196 // maximum scannable heap.
197 heapScan atomic.Uint64
199 // lastHeapScan is the number of bytes of heap that were scanned
200 // last GC cycle. It is the same as heapMarked, but only
201 // includes the "scannable" parts of objects.
203 // Updated when the world is stopped.
206 // lastStackScan is the number of bytes of stack that were scanned
208 lastStackScan atomic.Uint64
210 // maxStackScan is the amount of allocated goroutine stack space in
211 // use by goroutines.
213 // This number tracks allocated goroutine stack space rather than used
214 // goroutine stack space (i.e. what is actually scanned) because used
215 // goroutine stack space is much harder to measure cheaply. By using
216 // allocated space, we make an overestimate; this is OK, it's better
217 // to conservatively overcount than undercount.
218 maxStackScan atomic.Uint64
220 // globalsScan is the total amount of global variable space
221 // that is scannable.
222 globalsScan atomic.Uint64
224 // heapMarked is the number of bytes marked by the previous
225 // GC. After mark termination, heapLive == heapMarked, but
226 // unlike heapLive, heapMarked does not change until the
227 // next mark termination.
230 // heapScanWork is the total heap scan work performed this cycle.
231 // stackScanWork is the total stack scan work performed this cycle.
232 // globalsScanWork is the total globals scan work performed this cycle.
234 // These are updated atomically during the cycle. Updates occur in
235 // bounded batches, since they are both written and read
236 // throughout the cycle. At the end of the cycle, heapScanWork is how
237 // much of the retained heap is scannable.
239 // Currently these are measured in bytes. For most uses, this is an
240 // opaque unit of work, but for estimation the definition is important.
242 // Note that stackScanWork includes only stack space scanned, not all
243 // of the allocated stack.
244 heapScanWork atomic.Int64
245 stackScanWork atomic.Int64
246 globalsScanWork atomic.Int64
248 // bgScanCredit is the scan work credit accumulated by the concurrent
249 // background scan. This credit is accumulated by the background scan
250 // and stolen by mutator assists. Updates occur in bounded batches,
251 // since it is both written and read throughout the cycle.
252 bgScanCredit atomic.Int64
254 // assistTime is the nanoseconds spent in mutator assists
255 // during this cycle. This is updated atomically, and must also
256 // be updated atomically even during a STW, because it is read
257 // by sysmon. Updates occur in bounded batches, since it is both
258 // written and read throughout the cycle.
259 assistTime atomic.Int64
261 // dedicatedMarkTime is the nanoseconds spent in dedicated mark workers
262 // during this cycle. This is updated at the end of the concurrent mark
264 dedicatedMarkTime atomic.Int64
266 // fractionalMarkTime is the nanoseconds spent in the fractional mark
267 // worker during this cycle. This is updated throughout the cycle and
268 // will be up-to-date if the fractional mark worker is not currently
270 fractionalMarkTime atomic.Int64
272 // idleMarkTime is the nanoseconds spent in idle marking during this
273 // cycle. This is updated throughout the cycle.
274 idleMarkTime atomic.Int64
276 // markStartTime is the absolute start time in nanoseconds
277 // that assists and background mark workers started.
280 // dedicatedMarkWorkersNeeded is the number of dedicated mark workers
281 // that need to be started. This is computed at the beginning of each
282 // cycle and decremented as dedicated mark workers get started.
283 dedicatedMarkWorkersNeeded atomic.Int64
285 // idleMarkWorkers is two packed int32 values in a single uint64.
286 // These two values are always updated simultaneously.
288 // The bottom int32 is the current number of idle mark workers executing.
290 // The top int32 is the maximum number of idle mark workers allowed to
291 // execute concurrently. Normally, this number is just gomaxprocs. However,
292 // during periodic GC cycles it is set to 0 because the system is idle
293 // anyway; there's no need to go full blast on all of GOMAXPROCS.
295 // The maximum number of idle mark workers is used to prevent new workers
296 // from starting, but it is not a hard maximum. It is possible (but
297 // exceedingly rare) for the current number of idle mark workers to
298 // transiently exceed the maximum. This could happen if the maximum changes
299 // just after a GC ends, and an M with no P.
301 // Note that if we have no dedicated mark workers, we set this value to
302 // 1 in this case we only have fractional GC workers which aren't scheduled
303 // strictly enough to ensure GC progress. As a result, idle-priority mark
304 // workers are vital to GC progress in these situations.
306 // For example, consider a situation in which goroutines block on the GC
307 // (such as via runtime.GOMAXPROCS) and only fractional mark workers are
308 // scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the
309 // last running M might skip scheduling a fractional mark worker if its
310 // utilization goal is met, such that once it goes to sleep (because there's
311 // nothing to do), there will be nothing else to spin up a new M for the
312 // fractional worker in the future, stalling GC progress and causing a
313 // deadlock. However, idle-priority workers will *always* run when there is
314 // nothing left to do, ensuring the GC makes progress.
316 // See github.com/golang/go/issues/44163 for more details.
317 idleMarkWorkers atomic.Uint64
319 // assistWorkPerByte is the ratio of scan work to allocated
320 // bytes that should be performed by mutator assists. This is
321 // computed at the beginning of each cycle and updated every
322 // time heapScan is updated.
323 assistWorkPerByte atomic.Float64
325 // assistBytesPerWork is 1/assistWorkPerByte.
327 // Note that because this is read and written independently
328 // from assistWorkPerByte users may notice a skew between
329 // the two values, and such a state should be safe.
330 assistBytesPerWork atomic.Float64
332 // fractionalUtilizationGoal is the fraction of wall clock
333 // time that should be spent in the fractional mark worker on
334 // each P that isn't running a dedicated worker.
336 // For example, if the utilization goal is 25% and there are
337 // no dedicated workers, this will be 0.25. If the goal is
338 // 25%, there is one dedicated worker, and GOMAXPROCS is 5,
339 // this will be 0.05 to make up the missing 5%.
341 // If this is zero, no fractional workers are needed.
342 fractionalUtilizationGoal float64
344 // These memory stats are effectively duplicates of fields from
345 // memstats.heapStats but are updated atomically or with the world
346 // stopped and don't provide the same consistency guarantees.
348 // Because the runtime is responsible for managing a memory limit, it's
349 // useful to couple these stats more tightly to the gcController, which
350 // is intimately connected to how that memory limit is maintained.
351 heapInUse sysMemStat // bytes in mSpanInUse spans
352 heapReleased sysMemStat // bytes released to the OS
353 heapFree sysMemStat // bytes not in any span, but not released to the OS
354 totalAlloc atomic.Uint64 // total bytes allocated
355 totalFree atomic.Uint64 // total bytes freed
356 mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go).
358 // test indicates that this is a test-only copy of gcControllerState.
364 func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) {
365 c.heapMinimum = defaultHeapMinimum
366 c.triggered = ^uint64(0)
367 c.setGCPercent(gcPercent)
368 c.setMemoryLimit(memoryLimit)
369 c.commit(true) // No sweep phase in the first GC cycle.
370 // N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at
371 // initialization time.
372 // N.B. No need to call revise; there's no GC enabled during
376 // startCycle resets the GC controller's state and computes estimates
377 // for a new GC cycle. The caller must hold worldsema and the world
379 func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) {
380 c.heapScanWork.Store(0)
381 c.stackScanWork.Store(0)
382 c.globalsScanWork.Store(0)
383 c.bgScanCredit.Store(0)
384 c.assistTime.Store(0)
385 c.dedicatedMarkTime.Store(0)
386 c.fractionalMarkTime.Store(0)
387 c.idleMarkTime.Store(0)
388 c.markStartTime = markStartTime
389 c.triggered = c.heapLive.Load()
391 // Compute the background mark utilization goal. In general,
392 // this may not come out exactly. We round the number of
393 // dedicated workers so that the utilization is closest to
394 // 25%. For small GOMAXPROCS, this would introduce too much
395 // error, so we add fractional workers in that case.
396 totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
397 dedicatedMarkWorkersNeeded := int64(totalUtilizationGoal + 0.5)
398 utilError := float64(dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
399 const maxUtilError = 0.3
400 if utilError < -maxUtilError || utilError > maxUtilError {
401 // Rounding put us more than 30% off our goal. With
402 // gcBackgroundUtilization of 25%, this happens for
403 // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
404 // workers to compensate.
405 if float64(dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
406 // Too many dedicated workers.
407 dedicatedMarkWorkersNeeded--
409 c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(dedicatedMarkWorkersNeeded)) / float64(procs)
411 c.fractionalUtilizationGoal = 0
414 // In STW mode, we just want dedicated workers.
415 if debug.gcstoptheworld > 0 {
416 dedicatedMarkWorkersNeeded = int64(procs)
417 c.fractionalUtilizationGoal = 0
421 for _, p := range allp {
423 p.gcFractionalMarkTime = 0
426 if trigger.kind == gcTriggerTime {
427 // During a periodic GC cycle, reduce the number of idle mark workers
428 // required. However, we need at least one dedicated mark worker or
429 // idle GC worker to ensure GC progress in some scenarios (see comment
430 // on maxIdleMarkWorkers).
431 if dedicatedMarkWorkersNeeded > 0 {
432 c.setMaxIdleMarkWorkers(0)
434 // TODO(mknyszek): The fundamental reason why we need this is because
435 // we can't count on the fractional mark worker to get scheduled.
436 // Fix that by ensuring it gets scheduled according to its quota even
437 // if the rest of the application is idle.
438 c.setMaxIdleMarkWorkers(1)
441 // N.B. gomaxprocs and dedicatedMarkWorkersNeeded are guaranteed not to
442 // change during a GC cycle.
443 c.setMaxIdleMarkWorkers(int32(procs) - int32(dedicatedMarkWorkersNeeded))
446 // Compute initial values for controls that are updated
447 // throughout the cycle.
448 c.dedicatedMarkWorkersNeeded.Store(dedicatedMarkWorkersNeeded)
451 if debug.gcpacertrace > 0 {
452 heapGoal := c.heapGoal()
453 assistRatio := c.assistWorkPerByte.Load()
454 print("pacer: assist ratio=", assistRatio,
455 " (scan ", gcController.heapScan.Load()>>20, " MB in ",
456 work.initialHeapLive>>20, "->",
457 heapGoal>>20, " MB)",
458 " workers=", dedicatedMarkWorkersNeeded,
459 "+", c.fractionalUtilizationGoal, "\n")
463 // revise updates the assist ratio during the GC cycle to account for
464 // improved estimates. This should be called whenever gcController.heapScan,
465 // gcController.heapLive, or if any inputs to gcController.heapGoal are
466 // updated. It is safe to call concurrently, but it may race with other
469 // The result of this race is that the two assist ratio values may not line
470 // up or may be stale. In practice this is OK because the assist ratio
471 // moves slowly throughout a GC cycle, and the assist ratio is a best-effort
472 // heuristic anyway. Furthermore, no part of the heuristic depends on
473 // the two assist ratio values being exact reciprocals of one another, since
474 // the two values are used to convert values from different sources.
476 // The worst case result of this raciness is that we may miss a larger shift
477 // in the ratio (say, if we decide to pace more aggressively against the
478 // hard heap goal) but even this "hard goal" is best-effort (see #40460).
479 // The dedicated GC should ensure we don't exceed the hard goal by too much
480 // in the rare case we do exceed it.
482 // It should only be called when gcBlackenEnabled != 0 (because this
483 // is when assists are enabled and the necessary statistics are
485 func (c *gcControllerState) revise() {
486 gcPercent := c.gcPercent.Load()
488 // If GC is disabled but we're running a forced GC,
489 // act like GOGC is huge for the below calculations.
492 live := c.heapLive.Load()
493 scan := c.heapScan.Load()
494 work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
496 // Assume we're under the soft goal. Pace GC to complete at
497 // heapGoal assuming the heap is in steady-state.
498 heapGoal := int64(c.heapGoal())
500 // The expected scan work is computed as the amount of bytes scanned last
501 // GC cycle (both heap and stack), plus our estimate of globals work for this cycle.
502 scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan.Load() + c.globalsScan.Load())
504 // maxScanWork is a worst-case estimate of the amount of scan work that
505 // needs to be performed in this GC cycle. Specifically, it represents
506 // the case where *all* scannable memory turns out to be live, and
507 // *all* allocated stack space is scannable.
508 maxStackScan := c.maxStackScan.Load()
509 maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load())
510 if work > scanWorkExpected {
511 // We've already done more scan work than expected. Because our expectation
512 // is based on a steady-state scannable heap size, we assume this means our
513 // heap is growing. Compute a new heap goal that takes our existing runway
514 // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case
515 // scan work. This keeps our assist ratio stable if the heap continues to grow.
517 // The effect of this mechanism is that assists stay flat in the face of heap
518 // growths. It's OK to use more memory this cycle to scan all the live heap,
519 // because the next GC cycle is inevitably going to use *at least* that much
521 extHeapGoal := int64(float64(heapGoal-int64(c.triggered))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.triggered)
522 scanWorkExpected = maxScanWork
524 // hardGoal is a hard limit on the amount that we're willing to push back the
525 // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or
526 // stacks and/or globals grow to twice their size, this limits the current GC cycle's
527 // growth to 4x the original live heap's size).
529 // This maintains the invariant that we use no more memory than the next GC cycle
531 hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal))
532 if extHeapGoal > hardGoal {
533 extHeapGoal = hardGoal
535 heapGoal = extHeapGoal
537 if int64(live) > heapGoal {
538 // We're already past our heap goal, even the extrapolated one.
539 // Leave ourselves some extra runway, so in the worst case we
540 // finish by that point.
541 const maxOvershoot = 1.1
542 heapGoal = int64(float64(heapGoal) * maxOvershoot)
544 // Compute the upper bound on the scan work remaining.
545 scanWorkExpected = maxScanWork
548 // Compute the remaining scan work estimate.
550 // Note that we currently count allocations during GC as both
551 // scannable heap (heapScan) and scan work completed
552 // (scanWork), so allocation will change this difference
553 // slowly in the soft regime and not at all in the hard
555 scanWorkRemaining := scanWorkExpected - work
556 if scanWorkRemaining < 1000 {
557 // We set a somewhat arbitrary lower bound on
558 // remaining scan work since if we aim a little high,
559 // we can miss by a little.
561 // We *do* need to enforce that this is at least 1,
562 // since marking is racy and double-scanning objects
563 // may legitimately make the remaining scan work
564 // negative, even in the hard goal regime.
565 scanWorkRemaining = 1000
568 // Compute the heap distance remaining.
569 heapRemaining := heapGoal - int64(live)
570 if heapRemaining <= 0 {
571 // This shouldn't happen, but if it does, avoid
572 // dividing by zero or setting the assist negative.
576 // Compute the mutator assist ratio so by the time the mutator
577 // allocates the remaining heap bytes up to heapGoal, it will
578 // have done (or stolen) the remaining amount of scan work.
579 // Note that the assist ratio values are updated atomically
580 // but not together. This means there may be some degree of
581 // skew between the two values. This is generally OK as the
582 // values shift relatively slowly over the course of a GC
584 assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
585 assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
586 c.assistWorkPerByte.Store(assistWorkPerByte)
587 c.assistBytesPerWork.Store(assistBytesPerWork)
590 // endCycle computes the consMark estimate for the next cycle.
591 // userForced indicates whether the current GC cycle was forced
592 // by the application.
593 func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) {
594 // Record last heap goal for the scavenger.
595 // We'll be updating the heap goal soon.
596 gcController.lastHeapGoal = c.heapGoal()
598 // Compute the duration of time for which assists were turned on.
599 assistDuration := now - c.markStartTime
601 // Assume background mark hit its utilization goal.
602 utilization := gcBackgroundUtilization
603 // Add assist utilization; avoid divide by zero.
604 if assistDuration > 0 {
605 utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs))
608 if c.heapLive.Load() <= c.triggered {
609 // Shouldn't happen, but let's be very safe about this in case the
610 // GC is somehow extremely short.
612 // In this case though, the only reasonable value for c.heapLive-c.triggered
613 // would be 0, which isn't really all that useful, i.e. the GC was so short
614 // that it didn't matter.
616 // Ignore this case and don't update anything.
619 idleUtilization := 0.0
620 if assistDuration > 0 {
621 idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs))
623 // Determine the cons/mark ratio.
625 // The units we want for the numerator and denominator are both B / cpu-ns.
626 // We get this by taking the bytes allocated or scanned, and divide by the amount of
627 // CPU time it took for those operations. For allocations, that CPU time is
629 // assistDuration * procs * (1 - utilization)
631 // Where utilization includes just background GC workers and assists. It does *not*
632 // include idle GC work time, because in theory the mutator is free to take that at
635 // For scanning, that CPU time is
637 // assistDuration * procs * (utilization + idleUtilization)
639 // In this case, we *include* idle utilization, because that is additional CPU time that
640 // the GC had available to it.
642 // In effect, idle GC time is sort of double-counted here, but it's very weird compared
643 // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is
644 // *always* free to take it.
646 // So this calculation is really:
647 // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
648 // (scanWork) / (assistDuration * procs * (utilization+idleUtilization))
650 // Note that because we only care about the ratio, assistDuration and procs cancel out.
651 scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
652 currentConsMark := (float64(c.heapLive.Load()-c.triggered) * (utilization + idleUtilization)) /
653 (float64(scanWork) * (1 - utilization))
655 // Update our cons/mark estimate. This is the raw value above, but averaged over 2 GC cycles
656 // because it tends to be jittery, even in the steady-state. The smoothing helps the GC to
657 // maintain much more stable cycle-by-cycle behavior.
658 oldConsMark := c.consMark
659 c.consMark = (currentConsMark + c.lastConsMark) / 2
660 c.lastConsMark = currentConsMark
662 if debug.gcpacertrace > 0 {
664 goal := gcGoalUtilization * 100
665 print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
666 print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ")
667 live := c.heapLive.Load()
668 print("in ", c.triggered, " B -> ", live, " B (∆goal ", int64(live)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")")
674 // enlistWorker encourages another dedicated mark worker to start on
675 // another P if there are spare worker slots. It is used by putfull
676 // when more work is made available.
679 func (c *gcControllerState) enlistWorker() {
680 // If there are idle Ps, wake one so it will run an idle worker.
681 // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
683 // if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 {
688 // There are no idle Ps. If we need more dedicated workers,
689 // try to preempt a running P so it will switch to a worker.
690 if c.dedicatedMarkWorkersNeeded.Load() <= 0 {
693 // Pick a random other P to preempt.
698 if gp == nil || gp.m == nil || gp.m.p == 0 {
701 myID := gp.m.p.ptr().id
702 for tries := 0; tries < 5; tries++ {
703 id := int32(fastrandn(uint32(gomaxprocs - 1)))
708 if p.status != _Prunning {
717 // findRunnableGCWorker returns a background mark worker for pp if it
718 // should be run. This must only be called when gcBlackenEnabled != 0.
719 func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
720 if gcBlackenEnabled == 0 {
721 throw("gcControllerState.findRunnable: blackening not enabled")
724 // Since we have the current time, check if the GC CPU limiter
725 // hasn't had an update in a while. This check is necessary in
726 // case the limiter is on but hasn't been checked in a while and
727 // so may have left sufficient headroom to turn off again.
731 if gcCPULimiter.needUpdate(now) {
732 gcCPULimiter.update(now)
735 if !gcMarkWorkAvailable(pp) {
736 // No work to be done right now. This can happen at
737 // the end of the mark phase when there are still
738 // assists tapering off. Don't bother running a worker
739 // now because it'll just return immediately.
743 // Grab a worker before we commit to running below.
744 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
746 // There is at least one worker per P, so normally there are
747 // enough workers to run on all Ps, if necessary. However, once
748 // a worker enters gcMarkDone it may park without rejoining the
749 // pool, thus freeing a P with no corresponding worker.
750 // gcMarkDone never depends on another worker doing work, so it
751 // is safe to simply do nothing here.
753 // If gcMarkDone bails out without completing the mark phase,
754 // it will always do so with queued global work. Thus, that P
755 // will be immediately eligible to re-run the worker G it was
756 // just using, ensuring work can complete.
760 decIfPositive := func(val *atomic.Int64) bool {
767 if val.CompareAndSwap(v, v-1) {
773 if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
774 // This P is now dedicated to marking until the end of
775 // the concurrent mark phase.
776 pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
777 } else if c.fractionalUtilizationGoal == 0 {
778 // No need for fractional workers.
779 gcBgMarkWorkerPool.push(&node.node)
782 // Is this P behind on the fractional utilization
785 // This should be kept in sync with pollFractionalWorkerExit.
786 delta := now - c.markStartTime
787 if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
788 // Nope. No need to run a fractional worker.
789 gcBgMarkWorkerPool.push(&node.node)
792 // Run a fractional worker.
793 pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode
796 // Run the background mark worker.
798 casgstatus(gp, _Gwaiting, _Grunnable)
805 // resetLive sets up the controller state for the next mark phase after the end
806 // of the previous one. Must be called after endCycle and before commit, before
807 // the world is started.
809 // The world must be stopped.
810 func (c *gcControllerState) resetLive(bytesMarked uint64) {
811 c.heapMarked = bytesMarked
812 c.heapLive.Store(bytesMarked)
813 c.heapScan.Store(uint64(c.heapScanWork.Load()))
814 c.lastHeapScan = uint64(c.heapScanWork.Load())
815 c.lastStackScan.Store(uint64(c.stackScanWork.Load()))
816 c.triggered = ^uint64(0) // Reset triggered.
818 // heapLive was updated, so emit a trace event.
820 traceHeapAlloc(bytesMarked)
824 // markWorkerStop must be called whenever a mark worker stops executing.
826 // It updates mark work accounting in the controller by a duration of
827 // work in nanoseconds and other bookkeeping.
829 // Safe to execute at any time.
830 func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) {
832 case gcMarkWorkerDedicatedMode:
833 c.dedicatedMarkTime.Add(duration)
834 c.dedicatedMarkWorkersNeeded.Add(1)
835 case gcMarkWorkerFractionalMode:
836 c.fractionalMarkTime.Add(duration)
837 case gcMarkWorkerIdleMode:
838 c.idleMarkTime.Add(duration)
839 c.removeIdleMarkWorker()
841 throw("markWorkerStop: unknown mark worker mode")
845 func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
847 live := gcController.heapLive.Add(dHeapLive)
849 // gcController.heapLive changed.
853 if gcBlackenEnabled == 0 {
854 // Update heapScan when we're not in a current GC. It is fixed
855 // at the beginning of a cycle.
857 gcController.heapScan.Add(dHeapScan)
860 // gcController.heapLive changed.
865 func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
867 c.maxStackScan.Add(amount)
870 pp.maxStackScanDelta += amount
871 if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack {
872 c.maxStackScan.Add(pp.maxStackScanDelta)
873 pp.maxStackScanDelta = 0
877 func (c *gcControllerState) addGlobals(amount int64) {
878 c.globalsScan.Add(amount)
881 // heapGoal returns the current heap goal.
882 func (c *gcControllerState) heapGoal() uint64 {
883 goal, _ := c.heapGoalInternal()
887 // heapGoalInternal is the implementation of heapGoal which returns additional
888 // information that is necessary for computing the trigger.
890 // The returned minTrigger is always <= goal.
891 func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) {
892 // Start with the goal calculated for gcPercent.
893 goal = c.gcPercentHeapGoal.Load()
895 // Check if the memory-limit-based goal is smaller, and if so, pick that.
896 if newGoal := c.memoryLimitHeapGoal(); newGoal < goal {
899 // We're not limited by the memory limit goal, so perform a series of
900 // adjustments that might move the goal forward in a variety of circumstances.
902 sweepDistTrigger := c.sweepDistMinTrigger.Load()
903 if sweepDistTrigger > goal {
904 // Set the goal to maintain a minimum sweep distance since
905 // the last call to commit. Note that we never want to do this
906 // if we're in the memory limit regime, because it could push
908 goal = sweepDistTrigger
910 // Since we ignore the sweep distance trigger in the memory
911 // limit regime, we need to ensure we don't propagate it to
912 // the trigger, because it could cause a violation of the
913 // invariant that the trigger < goal.
914 minTrigger = sweepDistTrigger
916 // Ensure that the heap goal is at least a little larger than
917 // the point at which we triggered. This may not be the case if GC
918 // start is delayed or if the allocation that pushed gcController.heapLive
919 // over trigger is large or if the trigger is really close to
920 // GOGC. Assist is proportional to this distance, so enforce a
921 // minimum distance, even if it means going over the GOGC goal
924 // Ignore this if we're in the memory limit regime: we'd prefer to
925 // have the GC respond hard about how close we are to the goal than to
926 // push the goal back in such a manner that it could cause us to exceed
928 const minRunway = 64 << 10
929 if c.triggered != ^uint64(0) && goal < c.triggered+minRunway {
930 goal = c.triggered + minRunway
936 // memoryLimitHeapGoal returns a heap goal derived from memoryLimit.
937 func (c *gcControllerState) memoryLimitHeapGoal() uint64 {
938 // Start by pulling out some values we'll need. Be careful about overflow.
939 var heapFree, heapAlloc, mappedReady uint64
941 heapFree = c.heapFree.load() // Free and unscavenged memory.
942 heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use.
943 mappedReady = c.mappedReady.Load() // Total unreleased mapped memory.
944 if heapFree+heapAlloc <= mappedReady {
947 // It is impossible for total unreleased mapped memory to exceed heap memory, but
948 // because these stats are updated independently, we may observe a partial update
949 // including only some values. Thus, we appear to break the invariant. However,
950 // this condition is necessarily transient, so just try again. In the case of a
951 // persistent accounting error, we'll deadlock here.
954 // Below we compute a goal from memoryLimit. There are a few things to be aware of.
955 // Firstly, the memoryLimit does not easily compare to the heap goal: the former
956 // is total mapped memory by the runtime that hasn't been released, while the latter is
957 // only heap object memory. Intuitively, the way we convert from one to the other is to
958 // subtract everything from memoryLimit that both contributes to the memory limit (so,
959 // ignore scavenged memory) and doesn't contain heap objects. This isn't quite what
960 // lines up with reality, but it's a good starting point.
962 // In practice this computation looks like the following:
964 // memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0)) - memoryLimitHeapGoalHeadroom
967 // Let's break this down.
969 // The first term (marker 1) is everything that contributes to the memory limit and isn't
970 // or couldn't become heap objects. It represents, broadly speaking, non-heap overheads.
971 // One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged
972 // memory that may contain heap objects in the future.
974 // Let's take a step back. In an ideal world, this term would look something like just
975 // the heap goal. That is, we "reserve" enough space for the heap to grow to the heap
976 // goal, and subtract out everything else. This is of course impossible; the definition
977 // is circular! However, this impossible definition contains a key insight: the amount
978 // we're *going* to use matters just as much as whatever we're currently using.
980 // Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and
981 // unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free
982 // and unscavenged memory, pushing the goal down significantly.
984 // heapFree is also safe to exclude from the memory limit because in the steady-state, it's
985 // just a pool of memory for future heap allocations, and making new allocations from heapFree
986 // memory doesn't increase overall memory use. In transient states, the scavenger and the
987 // allocator actively manage the pool of heapFree memory to maintain the memory limit.
989 // The second term (marker 2) is the amount of memory we've exceeded the limit by, and is
990 // intended to help recover from such a situation. By pushing the heap goal down, we also
991 // push the trigger down, triggering and finishing a GC sooner in order to make room for
992 // other memory sources. Note that since we're effectively reducing the heap goal by X bytes,
993 // we're actually giving more than X bytes of headroom back, because the heap goal is in
994 // terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store
995 // X bytes worth of objects.
997 // The third term (marker 3) subtracts an additional memoryLimitHeapGoalHeadroom bytes from the
998 // heap goal. As the name implies, this is to provide additional headroom in the face of pacing
999 // inaccuracies. This is a fixed number of bytes because these inaccuracies disproportionately
1000 // affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier. Shorter GC cycles
1001 // and less GC work means noisy external factors like the OS scheduler have a greater impact.
1003 memoryLimit := uint64(c.memoryLimit.Load())
1006 nonHeapMemory := mappedReady - heapFree - heapAlloc
1010 if mappedReady > memoryLimit {
1011 overage = mappedReady - memoryLimit
1014 if nonHeapMemory+overage >= memoryLimit {
1015 // We're at a point where non-heap memory exceeds the memory limit on its own.
1016 // There's honestly not much we can do here but just trigger GCs continuously
1017 // and let the CPU limiter reign that in. Something has to give at this point.
1018 // Set it to heapMarked, the lowest possible goal.
1022 // Compute the goal.
1023 goal := memoryLimit - (nonHeapMemory + overage)
1025 // Apply some headroom to the goal to account for pacing inaccuracies.
1026 // Be careful about small limits.
1027 if goal < memoryLimitHeapGoalHeadroom || goal-memoryLimitHeapGoalHeadroom < memoryLimitHeapGoalHeadroom {
1028 goal = memoryLimitHeapGoalHeadroom
1030 goal = goal - memoryLimitHeapGoalHeadroom
1032 // Don't let us go below the live heap. A heap goal below the live heap doesn't make sense.
1033 if goal < c.heapMarked {
1040 // These constants determine the bounds on the GC trigger as a fraction
1041 // of heap bytes allocated between the start of a GC (heapLive == heapMarked)
1042 // and the end of a GC (heapLive == heapGoal).
1044 // The constants are obscured in this way for efficiency. The denominator
1045 // of the fraction is always a power-of-two for a quick division, so that
1046 // the numerator is a single constant integer multiplication.
1047 triggerRatioDen = 64
1049 // The minimum trigger constant was chosen empirically: given a sufficiently
1050 // fast/scalable allocator with 48 Ps that could drive the trigger ratio
1051 // to <0.05, this constant causes applications to retain the same peak
1052 // RSS compared to not having this allocator.
1053 minTriggerRatioNum = 45 // ~0.7
1055 // The maximum trigger constant is chosen somewhat arbitrarily, but the
1056 // current constant has served us well over the years.
1057 maxTriggerRatioNum = 61 // ~0.95
1060 // trigger returns the current point at which a GC should trigger along with
1063 // The returned value may be compared against heapLive to determine whether
1064 // the GC should trigger. Thus, the GC trigger condition should be (but may
1065 // not be, in the case of small movements for efficiency) checked whenever
1066 // the heap goal may change.
1067 func (c *gcControllerState) trigger() (uint64, uint64) {
1068 goal, minTrigger := c.heapGoalInternal()
1070 // Invariant: the trigger must always be less than the heap goal.
1072 // Note that the memory limit sets a hard maximum on our heap goal,
1073 // but the live heap may grow beyond it.
1075 if c.heapMarked >= goal {
1076 // The goal should never be smaller than heapMarked, but let's be
1077 // defensive about it. The only reasonable trigger here is one that
1078 // causes a continuous GC cycle at heapMarked, but respect the goal
1079 // if it came out as smaller than that.
1083 // Below this point, c.heapMarked < goal.
1085 // heapMarked is our absolute minimum, and it's possible the trigger
1086 // bound we get from heapGoalinternal is less than that.
1087 if minTrigger < c.heapMarked {
1088 minTrigger = c.heapMarked
1091 // If we let the trigger go too low, then if the application
1092 // is allocating very rapidly we might end up in a situation
1093 // where we're allocating black during a nearly always-on GC.
1094 // The result of this is a growing heap and ultimately an
1095 // increase in RSS. By capping us at a point >0, we're essentially
1096 // saying that we're OK using more CPU during the GC to prevent
1097 // this growth in RSS.
1098 triggerLowerBound := uint64(((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum) + c.heapMarked
1099 if minTrigger < triggerLowerBound {
1100 minTrigger = triggerLowerBound
1103 // For small heaps, set the max trigger point at maxTriggerRatio of the way
1104 // from the live heap to the heap goal. This ensures we always have *some*
1105 // headroom when the GC actually starts. For larger heaps, set the max trigger
1106 // point at the goal, minus the minimum heap size.
1108 // This choice follows from the fact that the minimum heap size is chosen
1109 // to reflect the costs of a GC with no work to do. With a large heap but
1110 // very little scan work to perform, this gives us exactly as much runway
1111 // as we would need, in the worst case.
1112 maxTrigger := uint64(((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum) + c.heapMarked
1113 if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger {
1114 maxTrigger = goal - defaultHeapMinimum
1116 if maxTrigger < minTrigger {
1117 maxTrigger = minTrigger
1120 // Compute the trigger from our bounds and the runway stored by commit.
1122 runway := c.runway.Load()
1124 trigger = minTrigger
1126 trigger = goal - runway
1128 if trigger < minTrigger {
1129 trigger = minTrigger
1131 if trigger > maxTrigger {
1132 trigger = maxTrigger
1135 print("trigger=", trigger, " heapGoal=", goal, "\n")
1136 print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n")
1137 throw("produced a trigger greater than the heap goal")
1139 return trigger, goal
1142 // commit recomputes all pacing parameters needed to derive the
1143 // trigger and the heap goal. Namely, the gcPercent-based heap goal,
1144 // and the amount of runway we want to give the GC this cycle.
1146 // This can be called any time. If GC is the in the middle of a
1147 // concurrent phase, it will adjust the pacing of that phase.
1149 // isSweepDone should be the result of calling isSweepDone(),
1150 // unless we're testing or we know we're executing during a GC cycle.
1152 // This depends on gcPercent, gcController.heapMarked, and
1153 // gcController.heapLive. These must be up to date.
1155 // Callers must call gcControllerState.revise after calling this
1156 // function if the GC is enabled.
1158 // mheap_.lock must be held or the world must be stopped.
1159 func (c *gcControllerState) commit(isSweepDone bool) {
1161 assertWorldStoppedOrLockHeld(&mheap_.lock)
1165 // The sweep is done, so there aren't any restrictions on the trigger
1166 // we need to think about.
1167 c.sweepDistMinTrigger.Store(0)
1169 // Concurrent sweep happens in the heap growth
1170 // from gcController.heapLive to trigger. Make sure we
1171 // give the sweeper some runway if it doesn't have enough.
1172 c.sweepDistMinTrigger.Store(c.heapLive.Load() + sweepMinHeapDistance)
1175 // Compute the next GC goal, which is when the allocated heap
1176 // has grown by GOGC/100 over where it started the last cycle,
1177 // plus additional runway for non-heap sources of GC work.
1178 gcPercentHeapGoal := ^uint64(0)
1179 if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
1180 gcPercentHeapGoal = c.heapMarked + (c.heapMarked+c.lastStackScan.Load()+c.globalsScan.Load())*uint64(gcPercent)/100
1182 // Apply the minimum heap size here. It's defined in terms of gcPercent
1183 // and is only updated by functions that call commit.
1184 if gcPercentHeapGoal < c.heapMinimum {
1185 gcPercentHeapGoal = c.heapMinimum
1187 c.gcPercentHeapGoal.Store(gcPercentHeapGoal)
1189 // Compute the amount of runway we want the GC to have by using our
1190 // estimate of the cons/mark ratio.
1192 // The idea is to take our expected scan work, and multiply it by
1193 // the cons/mark ratio to determine how long it'll take to complete
1194 // that scan work in terms of bytes allocated. This gives us our GC's
1197 // However, the cons/mark ratio is a ratio of rates per CPU-second, but
1198 // here we care about the relative rates for some division of CPU
1199 // resources among the mutator and the GC.
1201 // To summarize, we have B / cpu-ns, and we want B / ns. We get that
1202 // by multiplying by our desired division of CPU resources. We choose
1203 // to express CPU resources as GOMAPROCS*fraction. Note that because
1204 // we're working with a ratio here, we can omit the number of CPU cores,
1205 // because they'll appear in the numerator and denominator and cancel out.
1206 // As a result, this is basically just "weighing" the cons/mark ratio by
1207 // our desired division of resources.
1209 // Furthermore, by setting the runway so that CPU resources are divided
1210 // this way, assuming that the cons/mark ratio is correct, we make that
1211 // division a reality.
1212 c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load())))
1215 // setGCPercent updates gcPercent. commit must be called after.
1216 // Returns the old value of gcPercent.
1218 // The world must be stopped, or mheap_.lock must be held.
1219 func (c *gcControllerState) setGCPercent(in int32) int32 {
1221 assertWorldStoppedOrLockHeld(&mheap_.lock)
1224 out := c.gcPercent.Load()
1228 c.heapMinimum = defaultHeapMinimum * uint64(in) / 100
1229 c.gcPercent.Store(in)
1234 //go:linkname setGCPercent runtime/debug.setGCPercent
1235 func setGCPercent(in int32) (out int32) {
1236 // Run on the system stack since we grab the heap lock.
1237 systemstack(func() {
1239 out = gcController.setGCPercent(in)
1240 gcControllerCommit()
1241 unlock(&mheap_.lock)
1244 // If we just disabled GC, wait for any concurrent GC mark to
1245 // finish so we always return with no GC running.
1247 gcWaitOnMark(work.cycles.Load())
1253 func readGOGC() int32 {
1254 p := gogetenv("GOGC")
1258 if n, ok := atoi32(p); ok {
1264 // setMemoryLimit updates memoryLimit. commit must be called after
1265 // Returns the old value of memoryLimit.
1267 // The world must be stopped, or mheap_.lock must be held.
1268 func (c *gcControllerState) setMemoryLimit(in int64) int64 {
1270 assertWorldStoppedOrLockHeld(&mheap_.lock)
1273 out := c.memoryLimit.Load()
1275 c.memoryLimit.Store(in)
1281 //go:linkname setMemoryLimit runtime/debug.setMemoryLimit
1282 func setMemoryLimit(in int64) (out int64) {
1283 // Run on the system stack since we grab the heap lock.
1284 systemstack(func() {
1286 out = gcController.setMemoryLimit(in)
1287 if in < 0 || out == in {
1288 // If we're just checking the value or not changing
1289 // it, there's no point in doing the rest.
1290 unlock(&mheap_.lock)
1293 gcControllerCommit()
1294 unlock(&mheap_.lock)
1299 func readGOMEMLIMIT() int64 {
1300 p := gogetenv("GOMEMLIMIT")
1301 if p == "" || p == "off" {
1304 n, ok := parseByteCount(p)
1306 print("GOMEMLIMIT=", p, "\n")
1307 throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`")
1312 // addIdleMarkWorker attempts to add a new idle mark worker.
1314 // If this returns true, the caller must become an idle mark worker unless
1315 // there's no background mark worker goroutines in the pool. This case is
1316 // harmless because there are already background mark workers running.
1317 // If this returns false, the caller must NOT become an idle mark worker.
1319 // nosplit because it may be called without a P.
1322 func (c *gcControllerState) addIdleMarkWorker() bool {
1324 old := c.idleMarkWorkers.Load()
1325 n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
1327 // See the comment on idleMarkWorkers for why
1328 // n > max is tolerated.
1332 print("n=", n, " max=", max, "\n")
1333 throw("negative idle mark workers")
1335 new := uint64(uint32(n+1)) | (uint64(max) << 32)
1336 if c.idleMarkWorkers.CompareAndSwap(old, new) {
1342 // needIdleMarkWorker is a hint as to whether another idle mark worker is needed.
1344 // The caller must still call addIdleMarkWorker to become one. This is mainly
1345 // useful for a quick check before an expensive operation.
1347 // nosplit because it may be called without a P.
1350 func (c *gcControllerState) needIdleMarkWorker() bool {
1351 p := c.idleMarkWorkers.Load()
1352 n, max := int32(p&uint64(^uint32(0))), int32(p>>32)
1356 // removeIdleMarkWorker must be called when an new idle mark worker stops executing.
1357 func (c *gcControllerState) removeIdleMarkWorker() {
1359 old := c.idleMarkWorkers.Load()
1360 n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
1362 print("n=", n, " max=", max, "\n")
1363 throw("negative idle mark workers")
1365 new := uint64(uint32(n-1)) | (uint64(max) << 32)
1366 if c.idleMarkWorkers.CompareAndSwap(old, new) {
1372 // setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed.
1374 // This method is optimistic in that it does not wait for the number of
1375 // idle mark workers to reduce to max before returning; it assumes the workers
1376 // will deschedule themselves.
1377 func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) {
1379 old := c.idleMarkWorkers.Load()
1380 n := int32(old & uint64(^uint32(0)))
1382 print("n=", n, " max=", max, "\n")
1383 throw("negative idle mark workers")
1385 new := uint64(uint32(n)) | (uint64(max) << 32)
1386 if c.idleMarkWorkers.CompareAndSwap(old, new) {
1392 // gcControllerCommit is gcController.commit, but passes arguments from live
1393 // (non-test) data. It also updates any consumers of the GC pacing, such as
1394 // sweep pacing and the background scavenger.
1396 // Calls gcController.commit.
1398 // The heap lock must be held, so this must be executed on the system stack.
1401 func gcControllerCommit() {
1402 assertWorldStoppedOrLockHeld(&mheap_.lock)
1404 gcController.commit(isSweepDone())
1406 // Update mark pacing.
1407 if gcphase != _GCoff {
1408 gcController.revise()
1411 // TODO(mknyszek): This isn't really accurate any longer because the heap
1412 // goal is computed dynamically. Still useful to snapshot, but not as useful.
1417 trigger, heapGoal := gcController.trigger()
1418 gcPaceSweeper(trigger)
1419 gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal)