1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: marking and scanning
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
17 fixedRootFinalizers = iota
21 // rootBlockBytes is the number of bytes to scan per data or
23 rootBlockBytes = 256 << 10
25 // maxObletBytes is the maximum bytes of an object to scan at
26 // once. Larger objects will be split up into "oblets" of at
27 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
28 // scan preemption at ~100 µs.
30 // This must be > _MaxSmallSize so that the object base is the
32 maxObletBytes = 128 << 10
34 // drainCheckThreshold specifies how many units of work to do
35 // between self-preemption checks in gcDrain. Assuming a scan
36 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher
37 // overhead in the scan loop (the scheduler check may perform
38 // a syscall, so its overhead is nontrivial). Higher values
39 // make the system less responsive to incoming work.
40 drainCheckThreshold = 100000
42 // pagesPerSpanRoot indicates how many pages to scan from a span root
43 // at a time. Used by special root marking.
45 // Higher values improve throughput by increasing locality, but
46 // increase the minimum latency of a marking operation.
48 // Must be a multiple of the pageInUse bitmap element size and
49 // must also evenly divide pagesPerArena.
50 pagesPerSpanRoot = 512
53 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
54 // some miscellany) and initializes scanning-related state.
56 // The world must be stopped.
57 func gcMarkRootPrepare() {
60 // Compute how many data and BSS root blocks there are.
61 nBlocks := func(bytes uintptr) int {
62 return int(divRoundUp(bytes, rootBlockBytes))
69 for _, datap := range activeModules() {
70 nDataRoots := nBlocks(datap.edata - datap.data)
71 if nDataRoots > work.nDataRoots {
72 work.nDataRoots = nDataRoots
76 for _, datap := range activeModules() {
77 nBSSRoots := nBlocks(datap.ebss - datap.bss)
78 if nBSSRoots > work.nBSSRoots {
79 work.nBSSRoots = nBSSRoots
83 // Scan span roots for finalizer specials.
85 // We depend on addfinalizer to mark objects that get
86 // finalizers after root marking.
88 // We're going to scan the whole heap (that was available at the time the
89 // mark phase started, i.e. markArenas) for in-use spans which have specials.
91 // Break up the work into arenas, and further into chunks.
93 // Snapshot allArenas as markArenas. This snapshot is safe because allArenas
95 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
96 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
100 // Gs may be created after this point, but it's okay that we
101 // ignore them because they begin life without any roots, so
102 // there's nothing to scan, and any roots they create during
103 // the concurrent phase will be caught by the write barrier.
104 work.nStackRoots = int(atomic.Loaduintptr(&allglen))
106 work.markrootNext = 0
107 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
109 // Calculate base indexes of each root type
110 work.baseData = uint32(fixedRootCount)
111 work.baseBSS = work.baseData + uint32(work.nDataRoots)
112 work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
113 work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
114 work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
117 // gcMarkRootCheck checks that all roots have been scanned. It is
118 // purely for debugging.
119 func gcMarkRootCheck() {
120 if work.markrootNext < work.markrootJobs {
121 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
122 throw("left over markroot jobs")
125 // Check that stacks have been scanned.
127 // We only check the first nStackRoots Gs that we should have scanned.
128 // Since we don't care about newer Gs (see comment in
129 // gcMarkRootPrepare), no locking is required.
131 forEachGRace(func(gp *g) {
132 if i >= work.nStackRoots {
137 println("gp", gp, "goid", gp.goid,
138 "status", readgstatus(gp),
139 "gcscandone", gp.gcscandone)
140 throw("scan missed a g")
147 // ptrmask for an allocation containing a single pointer.
148 var oneptrmask = [...]uint8{1}
150 // markroot scans the i'th root.
152 // Preemption must be disabled (because this uses a gcWork).
154 // nowritebarrier is only advisory here.
157 func markroot(gcw *gcWork, i uint32) {
158 // Note: if you add a case here, please also update heapdump.go:dumproots.
160 case work.baseData <= i && i < work.baseBSS:
161 for _, datap := range activeModules() {
162 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
165 case work.baseBSS <= i && i < work.baseSpans:
166 for _, datap := range activeModules() {
167 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
170 case i == fixedRootFinalizers:
171 for fb := allfin; fb != nil; fb = fb.alllink {
172 cnt := uintptr(atomic.Load(&fb.cnt))
173 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
176 case i == fixedRootFreeGStacks:
177 // Switch to the system stack so we can call
179 systemstack(markrootFreeGStacks)
181 case work.baseSpans <= i && i < work.baseStacks:
182 // mark mspan.specials
183 markrootSpans(gcw, int(i-work.baseSpans))
186 // the rest is scanning goroutine stacks
188 if work.baseStacks <= i && i < work.baseEnd {
189 // N.B. Atomic read of allglen in gcMarkRootPrepare
190 // acts as a barrier to ensure that allgs must be large
191 // enough to contain all relevant Gs.
192 gp = allgs[i-work.baseStacks]
194 throw("markroot: bad index")
197 // remember when we've first observed the G blocked
198 // needed only to output in traceback
199 status := readgstatus(gp) // We are not in a scan state
200 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
201 gp.waitsince = work.tstart
204 // scanstack must be done on the system stack in case
205 // we're trying to scan our own stack.
207 // If this is a self-scan, put the user G in
208 // _Gwaiting to prevent self-deadlock. It may
209 // already be in _Gwaiting if this is a mark
210 // worker or we're in mark termination.
211 userG := getg().m.curg
212 selfScan := gp == userG && readgstatus(userG) == _Grunning
214 casgstatus(userG, _Grunning, _Gwaiting)
215 userG.waitreason = waitReasonGarbageCollectionScan
218 // TODO: suspendG blocks (and spins) until gp
219 // stops, which may take a while for
220 // running goroutines. Consider doing this in
221 // two phases where the first is non-blocking:
222 // we scan the stacks we can and ask running
223 // goroutines to scan themselves; and the
225 stopped := suspendG(gp)
231 throw("g already scanned")
238 casgstatus(userG, _Gwaiting, _Grunning)
244 // markrootBlock scans the shard'th shard of the block of memory [b0,
245 // b0+n0), with the given pointer mask.
248 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
249 if rootBlockBytes%(8*goarch.PtrSize) != 0 {
250 // This is necessary to pick byte offsets in ptrmask0.
251 throw("rootBlockBytes must be a multiple of 8*ptrSize")
254 // Note that if b0 is toward the end of the address space,
255 // then b0 + rootBlockBytes might wrap around.
256 // These tests are written to avoid any possible overflow.
257 off := uintptr(shard) * rootBlockBytes
262 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
263 n := uintptr(rootBlockBytes)
269 scanblock(b, n, ptrmask, gcw, nil)
272 // markrootFreeGStacks frees stacks of dead Gs.
274 // This does not free stacks of dead Gs cached on Ps, but having a few
275 // cached stacks around isn't a problem.
276 func markrootFreeGStacks() {
277 // Take list of dead Gs with stacks.
278 lock(&sched.gFree.lock)
279 list := sched.gFree.stack
280 sched.gFree.stack = gList{}
281 unlock(&sched.gFree.lock)
287 q := gQueue{list.head, list.head}
288 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
292 // Manipulate the queue directly since the Gs are
293 // already all linked the right way.
297 // Put Gs back on the free list.
298 lock(&sched.gFree.lock)
299 sched.gFree.noStack.pushAll(q)
300 unlock(&sched.gFree.lock)
303 // markrootSpans marks roots for one shard of markArenas.
306 func markrootSpans(gcw *gcWork, shard int) {
307 // Objects with finalizers have two GC-related invariants:
309 // 1) Everything reachable from the object must be marked.
310 // This ensures that when we pass the object to its finalizer,
311 // everything the finalizer can reach will be retained.
313 // 2) Finalizer specials (which are not in the garbage
314 // collected heap) are roots. In practice, this means the fn
315 // field must be scanned.
316 sg := mheap_.sweepgen
318 // Find the arena and page index into that arena for this shard.
319 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
320 ha := mheap_.arenas[ai.l1()][ai.l2()]
321 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
323 // Construct slice of bitmap which we'll iterate over.
324 specialsbits := ha.pageSpecials[arenaPage/8:]
325 specialsbits = specialsbits[:pagesPerSpanRoot/8]
326 for i := range specialsbits {
327 // Find set bits, which correspond to spans with specials.
328 specials := atomic.Load8(&specialsbits[i])
332 for j := uint(0); j < 8; j++ {
333 if specials&(1<<j) == 0 {
336 // Find the span for this bit.
338 // This value is guaranteed to be non-nil because having
339 // specials implies that the span is in-use, and since we're
340 // currently marking we can be sure that we don't have to worry
341 // about the span being freed and re-used.
342 s := ha.spans[arenaPage+uint(i)*8+j]
344 // The state must be mSpanInUse if the specials bit is set, so
345 // sanity check that.
346 if state := s.state.get(); state != mSpanInUse {
347 print("s.state = ", state, "\n")
348 throw("non in-use span found with specials bit set")
350 // Check that this span was swept (it may be cached or uncached).
351 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
352 // sweepgen was updated (+2) during non-checkmark GC pass
353 print("sweep ", s.sweepgen, " ", sg, "\n")
354 throw("gc: unswept span")
357 // Lock the specials to prevent a special from being
358 // removed from the list while we're traversing it.
360 for sp := s.specials; sp != nil; sp = sp.next {
361 if sp.kind != _KindSpecialFinalizer {
364 // don't mark finalized object, but scan it so we
365 // retain everything it points to.
366 spf := (*specialfinalizer)(unsafe.Pointer(sp))
367 // A finalizer can be set for an inner byte of an object, find object beginning.
368 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
370 // Mark everything that can be reached from
371 // the object (but *not* the object itself or
372 // we'll never collect it).
375 // The special itself is a root.
376 scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
378 unlock(&s.speciallock)
383 // gcAssistAlloc performs GC work to make gp's assist debt positive.
384 // gp must be the calling user gorountine.
386 // This must be called with preemption enabled.
387 func gcAssistAlloc(gp *g) {
388 // Don't assist in non-preemptible contexts. These are
389 // generally fragile and won't allow the assist to block.
390 if getg() == gp.m.g0 {
393 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
399 // Compute the amount of scan work we need to do to make the
400 // balance positive. When the required amount of work is low,
401 // we over-assist to build up credit for future allocations
402 // and amortize the cost of assisting.
403 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
404 assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
405 debtBytes := -gp.gcAssistBytes
406 scanWork := int64(assistWorkPerByte * float64(debtBytes))
407 if scanWork < gcOverAssistWork {
408 scanWork = gcOverAssistWork
409 debtBytes = int64(assistBytesPerWork * float64(scanWork))
412 // Steal as much credit as we can from the background GC's
413 // scan credit. This is racy and may drop the background
414 // credit below 0 if two mutators steal at the same time. This
415 // will just cause steals to fail until credit is accumulated
416 // again, so in the long run it doesn't really matter, but we
417 // do have to handle the negative credit case.
418 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
420 if bgScanCredit > 0 {
421 if bgScanCredit < scanWork {
422 stolen = bgScanCredit
423 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
426 gp.gcAssistBytes += debtBytes
428 atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
433 // We were able to steal all of the credit we
436 traceGCMarkAssistDone()
442 if trace.enabled && !traced {
444 traceGCMarkAssistStart()
447 // Perform assist work
449 gcAssistAlloc1(gp, scanWork)
450 // The user stack may have moved, so this can't touch
451 // anything on it until it returns from systemstack.
454 completed := gp.param != nil
460 if gp.gcAssistBytes < 0 {
461 // We were unable steal enough credit or perform
462 // enough work to pay off the assist debt. We need to
463 // do one of these before letting the mutator allocate
464 // more to prevent over-allocation.
466 // If this is because we were preempted, reschedule
467 // and try some more.
473 // Add this G to an assist queue and park. When the GC
474 // has more background credit, it will satisfy queued
475 // assists before flushing to the global credit pool.
477 // Note that this does *not* get woken up when more
478 // work is added to the work list. The theory is that
479 // there wasn't enough work to do anyway, so we might
480 // as well let background marking take care of the
481 // work that is available.
486 // At this point either background GC has satisfied
487 // this G's assist debt, or the GC cycle is over.
490 traceGCMarkAssistDone()
494 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
495 // stack. This is a separate function to make it easier to see that
496 // we're not capturing anything from the user stack, since the user
497 // stack may move while we're in this function.
499 // gcAssistAlloc1 indicates whether this assist completed the mark
500 // phase by setting gp.param to non-nil. This can't be communicated on
501 // the stack since it may move.
504 func gcAssistAlloc1(gp *g, scanWork int64) {
505 // Clear the flag indicating that this assist completed the
509 if atomic.Load(&gcBlackenEnabled) == 0 {
510 // The gcBlackenEnabled check in malloc races with the
511 // store that clears it but an atomic check in every malloc
512 // would be a performance hit.
513 // Instead we recheck it here on the non-preemptable system
514 // stack to determine if we should perform an assist.
516 // GC is done, so ignore any remaining debt.
520 // Track time spent in this assist. Since we're on the
521 // system stack, this is non-preemptible, so we can
522 // just measure start and end time.
523 startTime := nanotime()
525 decnwait := atomic.Xadd(&work.nwait, -1)
526 if decnwait == work.nproc {
527 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
528 throw("nwait > work.nprocs")
531 // gcDrainN requires the caller to be preemptible.
532 casgstatus(gp, _Grunning, _Gwaiting)
533 gp.waitreason = waitReasonGCAssistMarking
535 // drain own cached work first in the hopes that it
536 // will be more cache friendly.
537 gcw := &getg().m.p.ptr().gcw
538 workDone := gcDrainN(gcw, scanWork)
540 casgstatus(gp, _Gwaiting, _Grunning)
542 // Record that we did this much scan work.
544 // Back out the number of bytes of assist credit that
545 // this scan work counts for. The "1+" is a poor man's
546 // round-up, to ensure this adds credit even if
547 // assistBytesPerWork is very low.
548 assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
549 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
551 // If this is the last worker and we ran out of work,
552 // signal a completion point.
553 incnwait := atomic.Xadd(&work.nwait, +1)
554 if incnwait > work.nproc {
555 println("runtime: work.nwait=", incnwait,
556 "work.nproc=", work.nproc)
557 throw("work.nwait > work.nproc")
560 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
561 // This has reached a background completion point. Set
562 // gp.param to a non-nil value to indicate this. It
563 // doesn't matter what we set it to (it just has to be
565 gp.param = unsafe.Pointer(gp)
567 duration := nanotime() - startTime
569 _p_.gcAssistTime += duration
570 if _p_.gcAssistTime > gcAssistTimeSlack {
571 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
576 // gcWakeAllAssists wakes all currently blocked assists. This is used
577 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent
578 // new assists from going to sleep after this point.
579 func gcWakeAllAssists() {
580 lock(&work.assistQueue.lock)
581 list := work.assistQueue.q.popList()
583 unlock(&work.assistQueue.lock)
586 // gcParkAssist puts the current goroutine on the assist queue and parks.
588 // gcParkAssist reports whether the assist is now satisfied. If it
589 // returns false, the caller must retry the assist.
590 func gcParkAssist() bool {
591 lock(&work.assistQueue.lock)
592 // If the GC cycle finished while we were getting the lock,
593 // exit the assist. The cycle can't finish while we hold the
595 if atomic.Load(&gcBlackenEnabled) == 0 {
596 unlock(&work.assistQueue.lock)
601 oldList := work.assistQueue.q
602 work.assistQueue.q.pushBack(gp)
604 // Recheck for background credit now that this G is in
605 // the queue, but can still back out. This avoids a
606 // race in case background marking has flushed more
607 // credit since we checked above.
608 if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
609 work.assistQueue.q = oldList
610 if oldList.tail != 0 {
611 oldList.tail.ptr().schedlink.set(nil)
613 unlock(&work.assistQueue.lock)
617 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
621 // gcFlushBgCredit flushes scanWork units of background scan work
622 // credit. This first satisfies blocked assists on the
623 // work.assistQueue and then flushes any remaining credit to
624 // gcController.bgScanCredit.
626 // Write barriers are disallowed because this is used by gcDrain after
627 // it has ensured that all work is drained and this must preserve that
630 //go:nowritebarrierrec
631 func gcFlushBgCredit(scanWork int64) {
632 if work.assistQueue.q.empty() {
633 // Fast path; there are no blocked assists. There's a
634 // small window here where an assist may add itself to
635 // the blocked queue and park. If that happens, we'll
636 // just get it on the next flush.
637 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
641 assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
642 scanBytes := int64(float64(scanWork) * assistBytesPerWork)
644 lock(&work.assistQueue.lock)
645 for !work.assistQueue.q.empty() && scanBytes > 0 {
646 gp := work.assistQueue.q.pop()
647 // Note that gp.gcAssistBytes is negative because gp
648 // is in debt. Think carefully about the signs below.
649 if scanBytes+gp.gcAssistBytes >= 0 {
650 // Satisfy this entire assist debt.
651 scanBytes += gp.gcAssistBytes
653 // It's important that we *not* put gp in
654 // runnext. Otherwise, it's possible for user
655 // code to exploit the GC worker's high
656 // scheduler priority to get itself always run
657 // before other goroutines and always in the
658 // fresh quantum started by GC.
661 // Partially satisfy this assist.
662 gp.gcAssistBytes += scanBytes
664 // As a heuristic, we move this assist to the
665 // back of the queue so that large assists
666 // can't clog up the assist queue and
667 // substantially delay small assists.
668 work.assistQueue.q.pushBack(gp)
674 // Convert from scan bytes back to work.
675 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
676 scanWork = int64(float64(scanBytes) * assistWorkPerByte)
677 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
679 unlock(&work.assistQueue.lock)
682 // scanstack scans gp's stack, greying all pointers found on the stack.
684 // scanstack will also shrink the stack if it is safe to do so. If it
685 // is not, it schedules a stack shrink for the next synchronous safe
688 // scanstack is marked go:systemstack because it must not be preempted
689 // while using a workbuf.
693 func scanstack(gp *g, gcw *gcWork) {
694 if readgstatus(gp)&_Gscan == 0 {
695 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
696 throw("scanstack - bad status")
699 switch readgstatus(gp) &^ _Gscan {
701 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
702 throw("mark - bad status")
706 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
707 throw("scanstack: goroutine not stopped")
708 case _Grunnable, _Gsyscall, _Gwaiting:
713 throw("can't scan our own stack")
716 if isShrinkStackSafe(gp) {
717 // Shrink the stack if not much of it is being used.
720 // Otherwise, shrink the stack at the next sync safe point.
721 gp.preemptShrink = true
724 var state stackScanState
725 state.stack = gp.stack
728 println("stack trace goroutine", gp.goid)
731 if debugScanConservative && gp.asyncSafePoint {
732 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
735 // Scan the saved context register. This is effectively a live
736 // register that gets moved back and forth between the
737 // register and sched.ctxt without a write barrier.
738 if gp.sched.ctxt != nil {
739 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
742 // Scan the stack. Accumulate a list of stack objects.
743 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
744 scanframeworker(frame, &state, gcw)
747 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
749 // Find additional pointers that point into the stack from the heap.
750 // Currently this includes defers and panics. See also function copystack.
752 // Find and trace other pointers in defer records.
753 for d := gp._defer; d != nil; d = d.link {
755 // Scan the func value, which could be a stack allocated closure.
757 scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
760 // The link field of a stack-allocated defer record might point
761 // to a heap-allocated defer record. Keep that heap record live.
762 scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
764 // Retain defers records themselves.
765 // Defer records might not be reachable from the G through regular heap
766 // tracing because the defer linked list might weave between the stack and the heap.
768 scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
771 if gp._panic != nil {
772 // Panics are always stack allocated.
773 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
776 // Find and scan all reachable stack objects.
778 // The state's pointer queue prioritizes precise pointers over
779 // conservative pointers so that we'll prefer scanning stack
780 // objects precisely.
783 p, conservative := state.getPtr()
787 obj := state.findObject(p)
793 // We've already scanned this object.
796 obj.setRecord(nil) // Don't scan it again.
799 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
801 print(" (conservative)")
809 // This path is pretty unlikely, an object large enough
810 // to have a GC program allocated on the stack.
811 // We need some space to unpack the program into a straight
812 // bitmask, which we allocate/free here.
813 // TODO: it would be nice if there were a way to run a GC
814 // program without having to store all its bits. We'd have
815 // to change from a Lempel-Ziv style program to something else.
816 // Or we can forbid putting objects on stacks if they require
817 // a gc program (see issue 27447).
818 s = materializeGCProg(r.ptrdata(), gcdata)
819 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
822 b := state.stack.lo + uintptr(obj.off)
824 scanConservative(b, r.ptrdata(), gcdata, gcw, &state)
826 scanblock(b, r.ptrdata(), gcdata, gcw, &state)
830 dematerializeGCProg(s)
834 // Deallocate object buffers.
835 // (Pointer buffers were all deallocated in the loop above.)
836 for state.head != nil {
840 for i := 0; i < x.nobj; i++ {
842 if obj.r == nil { // reachable
845 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
846 // Note: not necessarily really dead - only reachable-from-ptr dead.
850 putempty((*workbuf)(unsafe.Pointer(x)))
852 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
853 throw("remaining pointer buffers")
857 // Scan a stack frame: local variables and function arguments/results.
859 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
860 if _DebugGC > 1 && frame.continpc != 0 {
861 print("scanframe ", funcname(frame.fn), "\n")
864 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt
865 isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV2
866 if state.conservative || isAsyncPreempt || isDebugCall {
867 if debugScanConservative {
868 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
871 // Conservatively scan the frame. Unlike the precise
872 // case, this includes the outgoing argument space
873 // since we may have stopped while this function was
874 // setting up a call.
876 // TODO: We could narrow this down if the compiler
877 // produced a single map per function of stack slots
878 // and registers that ever contain a pointer.
880 size := frame.varp - frame.sp
882 scanConservative(frame.sp, size, nil, gcw, state)
886 // Scan arguments to this frame.
887 if frame.arglen != 0 {
888 // TODO: We could pass the entry argument map
889 // to narrow this down further.
890 scanConservative(frame.argp, frame.arglen, nil, gcw, state)
893 if isAsyncPreempt || isDebugCall {
894 // This function's frame contained the
895 // registers for the asynchronously stopped
896 // parent frame. Scan the parent
898 state.conservative = true
900 // We only wanted to scan those two frames
901 // conservatively. Clear the flag for future
903 state.conservative = false
908 locals, args, objs := getStackMap(frame, &state.cache, false)
910 // Scan local variables if stack frame has been allocated.
912 size := uintptr(locals.n) * goarch.PtrSize
913 scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
918 scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
921 // Add all stack objects to the stack object list.
923 // varp is 0 for defers, where there are no locals.
924 // In that case, there can't be a pointer to its args, either.
925 // (And all args would be scanned above anyway.)
926 for i, obj := range objs {
928 base := frame.varp // locals base pointer
930 base = frame.argp // arguments and return values base pointer
932 ptr := base + uintptr(off)
934 // object hasn't been allocated in the frame yet.
938 println("stkobj at", hex(ptr), "of size", obj.size)
940 state.addObject(ptr, &objs[i])
945 type gcDrainFlags int
948 gcDrainUntilPreempt gcDrainFlags = 1 << iota
954 // gcDrain scans roots and objects in work buffers, blackening grey
955 // objects until it is unable to get more work. It may return before
956 // GC is done; it's the caller's responsibility to balance work from
959 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
962 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work
965 // If flags&gcDrainFractional != 0, gcDrain self-preempts when
966 // pollFractionalWorkerExit() returns true. This implies
969 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
970 // credit to gcController.bgScanCredit every gcCreditSlack units of
973 // gcDrain will always return if there is a pending STW.
976 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
977 if !writeBarrier.needed {
978 throw("gcDrain phase incorrect")
982 preemptible := flags&gcDrainUntilPreempt != 0
983 flushBgCredit := flags&gcDrainFlushBgCredit != 0
984 idle := flags&gcDrainIdle != 0
986 initScanWork := gcw.scanWork
988 // checkWork is the scan work before performing the next
989 // self-preempt check.
990 checkWork := int64(1<<63 - 1)
991 var check func() bool
992 if flags&(gcDrainIdle|gcDrainFractional) != 0 {
993 checkWork = initScanWork + drainCheckThreshold
996 } else if flags&gcDrainFractional != 0 {
997 check = pollFractionalWorkerExit
1001 // Drain root marking jobs.
1002 if work.markrootNext < work.markrootJobs {
1003 // Stop if we're preemptible or if someone wants to STW.
1004 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
1005 job := atomic.Xadd(&work.markrootNext, +1) - 1
1006 if job >= work.markrootJobs {
1010 if check != nil && check() {
1016 // Drain heap marking jobs.
1017 // Stop if we're preemptible or if someone wants to STW.
1018 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
1019 // Try to keep work available on the global queue. We used to
1020 // check if there were waiting workers, but it's better to
1021 // just keep work available than to make workers wait. In the
1022 // worst case, we'll do O(log(_WorkbufSize)) unnecessary
1028 b := gcw.tryGetFast()
1032 // Flush the write barrier
1033 // buffer; this may create
1040 // Unable to get work.
1045 // Flush background scan work credit to the global
1046 // account if we've accumulated enough locally so
1047 // mutator assists can draw on it.
1048 if gcw.scanWork >= gcCreditSlack {
1049 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1051 gcFlushBgCredit(gcw.scanWork - initScanWork)
1054 checkWork -= gcw.scanWork
1058 checkWork += drainCheckThreshold
1059 if check != nil && check() {
1067 // Flush remaining scan work credit.
1068 if gcw.scanWork > 0 {
1069 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1071 gcFlushBgCredit(gcw.scanWork - initScanWork)
1077 // gcDrainN blackens grey objects until it has performed roughly
1078 // scanWork units of scan work or the G is preempted. This is
1079 // best-effort, so it may perform less work if it fails to get a work
1080 // buffer. Otherwise, it will perform at least n units of work, but
1081 // may perform more because scanning is always done in whole object
1082 // increments. It returns the amount of scan work performed.
1084 // The caller goroutine must be in a preemptible state (e.g.,
1085 // _Gwaiting) to prevent deadlocks during stack scanning. As a
1086 // consequence, this must be called on the system stack.
1090 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1091 if !writeBarrier.needed {
1092 throw("gcDrainN phase incorrect")
1095 // There may already be scan work on the gcw, which we don't
1096 // want to claim was done by this call.
1097 workFlushed := -gcw.scanWork
1100 for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
1101 // See gcDrain comment.
1106 b := gcw.tryGetFast()
1110 // Flush the write barrier buffer;
1111 // this may create more work.
1118 // Try to do a root job.
1120 // TODO: Assists should get credit for this
1122 if work.markrootNext < work.markrootJobs {
1123 job := atomic.Xadd(&work.markrootNext, +1) - 1
1124 if job < work.markrootJobs {
1129 // No heap or root jobs.
1135 // Flush background scan work credit.
1136 if gcw.scanWork >= gcCreditSlack {
1137 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1138 workFlushed += gcw.scanWork
1143 // Unlike gcDrain, there's no need to flush remaining work
1144 // here because this never flushes to bgScanCredit and
1145 // gcw.dispose will flush any remaining work to scanWork.
1147 return workFlushed + gcw.scanWork
1150 // scanblock scans b as scanobject would, but using an explicit
1151 // pointer bitmap instead of the heap bitmap.
1153 // This is used to scan non-heap roots, so it does not update
1154 // gcw.bytesMarked or gcw.scanWork.
1156 // If stk != nil, possible stack pointers are also reported to stk.putPtr.
1158 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
1159 // Use local copies of original parameters, so that a stack trace
1160 // due to one of the throws below shows the original block
1165 for i := uintptr(0); i < n; {
1166 // Find bits for the next word.
1167 bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
1169 i += goarch.PtrSize * 8
1172 for j := 0; j < 8 && i < n; j++ {
1174 // Same work as in scanobject; see comments there.
1175 p := *(*uintptr)(unsafe.Pointer(b + i))
1177 if obj, span, objIndex := findObject(p, b, i); obj != 0 {
1178 greyobject(obj, b, i, span, gcw, objIndex)
1179 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
1180 stk.putPtr(p, false)
1190 // scanobject scans the object starting at b, adding pointers to gcw.
1191 // b must point to the beginning of a heap object or an oblet.
1192 // scanobject consults the GC bitmap for the pointer mask and the
1193 // spans for the size of the object.
1196 func scanobject(b uintptr, gcw *gcWork) {
1197 // Prefetch object before we scan it.
1199 // This will overlap fetching the beginning of the object with initial
1200 // setup before we start scanning the object.
1203 // Find the bits for b and the size of the object at b.
1205 // b is either the beginning of an object, in which case this
1206 // is the size of the object to scan, or it points to an
1207 // oblet, in which case we compute the size to scan below.
1208 hbits := heapBitsForAddr(b)
1209 s := spanOfUnchecked(b)
1212 throw("scanobject n == 0")
1215 if n > maxObletBytes {
1216 // Large object. Break into oblets for better
1217 // parallelism and lower latency.
1219 // It's possible this is a noscan object (not
1220 // from greyobject, but from other code
1221 // paths), in which case we must *not* enqueue
1222 // oblets since their bitmaps will be
1224 if s.spanclass.noscan() {
1225 // Bypass the whole scan.
1226 gcw.bytesMarked += uint64(n)
1230 // Enqueue the other oblets to scan later.
1231 // Some oblets may be in b's scalar tail, but
1232 // these will be marked as "no more pointers",
1233 // so we'll drop out immediately when we go to
1235 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1236 if !gcw.putFast(oblet) {
1242 // Compute the size of the oblet. Since this object
1243 // must be a large object, s.base() is the beginning
1245 n = s.base() + s.elemsize - b
1246 if n > maxObletBytes {
1252 for i = 0; i < n; i, hbits = i+goarch.PtrSize, hbits.next() {
1253 // Load bits once. See CL 22712 and issue 16973 for discussion.
1254 bits := hbits.bits()
1255 if bits&bitScan == 0 {
1256 break // no more pointers in this object
1258 if bits&bitPointer == 0 {
1259 continue // not a pointer
1262 // Work here is duplicated in scanblock and above.
1263 // If you make changes here, make changes there too.
1264 obj := *(*uintptr)(unsafe.Pointer(b + i))
1266 // At this point we have extracted the next potential pointer.
1267 // Quickly filter out nil and pointers back to the current object.
1268 if obj != 0 && obj-b >= n {
1269 // Test if obj points into the Go heap and, if so,
1272 // Note that it's possible for findObject to
1273 // fail if obj points to a just-allocated heap
1274 // object because of a race with growing the
1275 // heap. In this case, we know the object was
1276 // just allocated and hence will be marked by
1277 // allocation itself.
1278 if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
1279 greyobject(obj, b, i, span, gcw, objIndex)
1283 gcw.bytesMarked += uint64(n)
1284 gcw.scanWork += int64(i)
1287 // scanConservative scans block [b, b+n) conservatively, treating any
1288 // pointer-like value in the block as a pointer.
1290 // If ptrmask != nil, only words that are marked in ptrmask are
1291 // considered as potential pointers.
1293 // If state != nil, it's assumed that [b, b+n) is a block in the stack
1294 // and may contain pointers to stack objects.
1295 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
1296 if debugScanConservative {
1298 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
1299 hexdumpWords(b, b+n, func(p uintptr) byte {
1301 word := (p - b) / goarch.PtrSize
1302 bits := *addb(ptrmask, word/8)
1303 if (bits>>(word%8))&1 == 0 {
1308 val := *(*uintptr)(unsafe.Pointer(p))
1309 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1313 span := spanOfHeap(val)
1317 idx := span.objIndex(val)
1318 if span.isFree(idx) {
1326 for i := uintptr(0); i < n; i += goarch.PtrSize {
1328 word := i / goarch.PtrSize
1329 bits := *addb(ptrmask, word/8)
1331 // Skip 8 words (the loop increment will do the 8th)
1333 // This must be the first time we've
1334 // seen this word of ptrmask, so i
1335 // must be 8-word-aligned, but check
1336 // our reasoning just in case.
1337 if i%(goarch.PtrSize*8) != 0 {
1338 throw("misaligned mask")
1340 i += goarch.PtrSize*8 - goarch.PtrSize
1343 if (bits>>(word%8))&1 == 0 {
1348 val := *(*uintptr)(unsafe.Pointer(b + i))
1350 // Check if val points into the stack.
1351 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1352 // val may point to a stack object. This
1353 // object may be dead from last cycle and
1354 // hence may contain pointers to unallocated
1355 // objects, but unlike heap objects we can't
1356 // tell if it's already dead. Hence, if all
1357 // pointers to this object are from
1358 // conservative scanning, we have to scan it
1359 // defensively, too.
1360 state.putPtr(val, true)
1364 // Check if val points to a heap span.
1365 span := spanOfHeap(val)
1370 // Check if val points to an allocated object.
1371 idx := span.objIndex(val)
1372 if span.isFree(idx) {
1376 // val points to an allocated object. Mark it.
1377 obj := span.base() + idx*span.elemsize
1378 greyobject(obj, b, i, span, gcw, idx)
1382 // Shade the object if it isn't already.
1383 // The object is not nil and known to be in the heap.
1384 // Preemption must be disabled.
1386 func shade(b uintptr) {
1387 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
1388 gcw := &getg().m.p.ptr().gcw
1389 greyobject(obj, 0, 0, span, gcw, objIndex)
1393 // obj is the start of an object with mark mbits.
1394 // If it isn't already marked, mark it and enqueue into gcw.
1395 // base and off are for debugging only and could be removed.
1397 // See also wbBufFlush1, which partially duplicates this logic.
1399 //go:nowritebarrierrec
1400 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
1401 // obj should be start of allocation, and so must be at least pointer-aligned.
1402 if obj&(goarch.PtrSize-1) != 0 {
1403 throw("greyobject: obj not pointer-aligned")
1405 mbits := span.markBitsForIndex(objIndex)
1408 if setCheckmark(obj, base, off, mbits) {
1413 if debug.gccheckmark > 0 && span.isFree(objIndex) {
1414 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1415 gcDumpObject("base", base, off)
1416 gcDumpObject("obj", obj, ^uintptr(0))
1417 getg().m.traceback = 2
1418 throw("marking free object")
1421 // If marked we have nothing to do.
1422 if mbits.isMarked() {
1428 arena, pageIdx, pageMask := pageIndexOf(span.base())
1429 if arena.pageMarks[pageIdx]&pageMask == 0 {
1430 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1433 // If this is a noscan object, fast-track it to black
1434 // instead of greying it.
1435 if span.spanclass.noscan() {
1436 gcw.bytesMarked += uint64(span.elemsize)
1441 // We're adding obj to P's local workbuf, so it's likely
1442 // this object will be processed soon by the same P.
1443 // Even if the workbuf gets flushed, there will likely still be
1444 // some benefit on platforms with inclusive shared caches.
1446 // Queue the obj for scanning.
1447 if !gcw.putFast(obj) {
1452 // gcDumpObject dumps the contents of obj for debugging and marks the
1453 // field at byte offset off in obj.
1454 func gcDumpObject(label string, obj, off uintptr) {
1456 print(label, "=", hex(obj))
1461 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1462 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
1463 print(mSpanStateNames[state], "\n")
1465 print("unknown(", state, ")\n")
1470 if s.state.get() == mSpanManual && size == 0 {
1471 // We're printing something from a stack frame. We
1472 // don't know how big it is, so just show up to an
1474 size = off + goarch.PtrSize
1476 for i := uintptr(0); i < size; i += goarch.PtrSize {
1477 // For big objects, just print the beginning (because
1478 // that usually hints at the object's type) and the
1479 // fields around off.
1480 if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
1488 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1499 // gcmarknewobject marks a newly allocated object black. obj must
1500 // not contain any non-nil pointers.
1502 // This is nosplit so it can manipulate a gcWork without preemption.
1506 func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) {
1507 if useCheckmark { // The world should be stopped so this should not happen.
1508 throw("gcmarknewobject called while doing checkmark")
1512 objIndex := span.objIndex(obj)
1513 span.markBitsForIndex(objIndex).setMarked()
1516 arena, pageIdx, pageMask := pageIndexOf(span.base())
1517 if arena.pageMarks[pageIdx]&pageMask == 0 {
1518 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1521 gcw := &getg().m.p.ptr().gcw
1522 gcw.bytesMarked += uint64(size)
1523 gcw.scanWork += int64(scanSize)
1526 // gcMarkTinyAllocs greys all active tiny alloc blocks.
1528 // The world must be stopped.
1529 func gcMarkTinyAllocs() {
1530 assertWorldStopped()
1532 for _, p := range allp {
1534 if c == nil || c.tiny == 0 {
1537 _, span, objIndex := findObject(c.tiny, 0, 0)
1539 greyobject(c.tiny, 0, 0, span, gcw, objIndex)