1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: marking and scanning
12 "internal/goexperiment"
13 "runtime/internal/atomic"
14 "runtime/internal/sys"
19 fixedRootFinalizers = iota
23 // rootBlockBytes is the number of bytes to scan per data or
25 rootBlockBytes = 256 << 10
27 // maxObletBytes is the maximum bytes of an object to scan at
28 // once. Larger objects will be split up into "oblets" of at
29 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
30 // scan preemption at ~100 µs.
32 // This must be > _MaxSmallSize so that the object base is the
34 maxObletBytes = 128 << 10
36 // drainCheckThreshold specifies how many units of work to do
37 // between self-preemption checks in gcDrain. Assuming a scan
38 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher
39 // overhead in the scan loop (the scheduler check may perform
40 // a syscall, so its overhead is nontrivial). Higher values
41 // make the system less responsive to incoming work.
42 drainCheckThreshold = 100000
44 // pagesPerSpanRoot indicates how many pages to scan from a span root
45 // at a time. Used by special root marking.
47 // Higher values improve throughput by increasing locality, but
48 // increase the minimum latency of a marking operation.
50 // Must be a multiple of the pageInUse bitmap element size and
51 // must also evenly divide pagesPerArena.
52 pagesPerSpanRoot = 512
55 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
56 // some miscellany) and initializes scanning-related state.
58 // The world must be stopped.
59 func gcMarkRootPrepare() {
62 // Compute how many data and BSS root blocks there are.
63 nBlocks := func(bytes uintptr) int {
64 return int(divRoundUp(bytes, rootBlockBytes))
71 for _, datap := range activeModules() {
72 nDataRoots := nBlocks(datap.edata - datap.data)
73 if nDataRoots > work.nDataRoots {
74 work.nDataRoots = nDataRoots
78 for _, datap := range activeModules() {
79 nBSSRoots := nBlocks(datap.ebss - datap.bss)
80 if nBSSRoots > work.nBSSRoots {
81 work.nBSSRoots = nBSSRoots
85 // Scan span roots for finalizer specials.
87 // We depend on addfinalizer to mark objects that get
88 // finalizers after root marking.
90 // We're going to scan the whole heap (that was available at the time the
91 // mark phase started, i.e. markArenas) for in-use spans which have specials.
93 // Break up the work into arenas, and further into chunks.
95 // Snapshot allArenas as markArenas. This snapshot is safe because allArenas
97 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
98 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
102 // Gs may be created after this point, but it's okay that we
103 // ignore them because they begin life without any roots, so
104 // there's nothing to scan, and any roots they create during
105 // the concurrent phase will be caught by the write barrier.
106 work.stackRoots = allGsSnapshot()
107 work.nStackRoots = len(work.stackRoots)
109 work.markrootNext = 0
110 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
112 // Calculate base indexes of each root type
113 work.baseData = uint32(fixedRootCount)
114 work.baseBSS = work.baseData + uint32(work.nDataRoots)
115 work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
116 work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
117 work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
120 // gcMarkRootCheck checks that all roots have been scanned. It is
121 // purely for debugging.
122 func gcMarkRootCheck() {
123 if work.markrootNext < work.markrootJobs {
124 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
125 throw("left over markroot jobs")
128 // Check that stacks have been scanned.
130 // We only check the first nStackRoots Gs that we should have scanned.
131 // Since we don't care about newer Gs (see comment in
132 // gcMarkRootPrepare), no locking is required.
134 forEachGRace(func(gp *g) {
135 if i >= work.nStackRoots {
140 println("gp", gp, "goid", gp.goid,
141 "status", readgstatus(gp),
142 "gcscandone", gp.gcscandone)
143 throw("scan missed a g")
150 // ptrmask for an allocation containing a single pointer.
151 var oneptrmask = [...]uint8{1}
153 // markroot scans the i'th root.
155 // Preemption must be disabled (because this uses a gcWork).
157 // Returns the amount of GC work credit produced by the operation.
158 // If flushBgCredit is true, then that credit is also flushed
159 // to the background credit pool.
161 // nowritebarrier is only advisory here.
164 func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
165 // Note: if you add a case here, please also update heapdump.go:dumproots.
167 var workCounter *atomic.Int64
169 case work.baseData <= i && i < work.baseBSS:
170 workCounter = &gcController.globalsScanWork
171 for _, datap := range activeModules() {
172 workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
175 case work.baseBSS <= i && i < work.baseSpans:
176 workCounter = &gcController.globalsScanWork
177 for _, datap := range activeModules() {
178 workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
181 case i == fixedRootFinalizers:
182 for fb := allfin; fb != nil; fb = fb.alllink {
183 cnt := uintptr(atomic.Load(&fb.cnt))
184 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
187 case i == fixedRootFreeGStacks:
188 // Switch to the system stack so we can call
190 systemstack(markrootFreeGStacks)
192 case work.baseSpans <= i && i < work.baseStacks:
193 // mark mspan.specials
194 markrootSpans(gcw, int(i-work.baseSpans))
197 // the rest is scanning goroutine stacks
198 workCounter = &gcController.stackScanWork
199 if i < work.baseStacks || work.baseEnd <= i {
201 print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
202 throw("markroot: bad index")
204 gp := work.stackRoots[i-work.baseStacks]
206 // remember when we've first observed the G blocked
207 // needed only to output in traceback
208 status := readgstatus(gp) // We are not in a scan state
209 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
210 gp.waitsince = work.tstart
213 // scanstack must be done on the system stack in case
214 // we're trying to scan our own stack.
216 // If this is a self-scan, put the user G in
217 // _Gwaiting to prevent self-deadlock. It may
218 // already be in _Gwaiting if this is a mark
219 // worker or we're in mark termination.
220 userG := getg().m.curg
221 selfScan := gp == userG && readgstatus(userG) == _Grunning
223 casGToWaiting(userG, _Grunning, waitReasonGarbageCollectionScan)
226 // TODO: suspendG blocks (and spins) until gp
227 // stops, which may take a while for
228 // running goroutines. Consider doing this in
229 // two phases where the first is non-blocking:
230 // we scan the stacks we can and ask running
231 // goroutines to scan themselves; and the
233 stopped := suspendG(gp)
239 throw("g already scanned")
241 workDone += scanstack(gp, gcw)
246 casgstatus(userG, _Gwaiting, _Grunning)
250 if workCounter != nil && workDone != 0 {
251 workCounter.Add(workDone)
253 gcFlushBgCredit(workDone)
259 // markrootBlock scans the shard'th shard of the block of memory [b0,
260 // b0+n0), with the given pointer mask.
262 // Returns the amount of work done.
265 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 {
266 if rootBlockBytes%(8*goarch.PtrSize) != 0 {
267 // This is necessary to pick byte offsets in ptrmask0.
268 throw("rootBlockBytes must be a multiple of 8*ptrSize")
271 // Note that if b0 is toward the end of the address space,
272 // then b0 + rootBlockBytes might wrap around.
273 // These tests are written to avoid any possible overflow.
274 off := uintptr(shard) * rootBlockBytes
279 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
280 n := uintptr(rootBlockBytes)
286 scanblock(b, n, ptrmask, gcw, nil)
290 // markrootFreeGStacks frees stacks of dead Gs.
292 // This does not free stacks of dead Gs cached on Ps, but having a few
293 // cached stacks around isn't a problem.
294 func markrootFreeGStacks() {
295 // Take list of dead Gs with stacks.
296 lock(&sched.gFree.lock)
297 list := sched.gFree.stack
298 sched.gFree.stack = gList{}
299 unlock(&sched.gFree.lock)
305 q := gQueue{list.head, list.head}
306 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
310 // Manipulate the queue directly since the Gs are
311 // already all linked the right way.
315 // Put Gs back on the free list.
316 lock(&sched.gFree.lock)
317 sched.gFree.noStack.pushAll(q)
318 unlock(&sched.gFree.lock)
321 // markrootSpans marks roots for one shard of markArenas.
324 func markrootSpans(gcw *gcWork, shard int) {
325 // Objects with finalizers have two GC-related invariants:
327 // 1) Everything reachable from the object must be marked.
328 // This ensures that when we pass the object to its finalizer,
329 // everything the finalizer can reach will be retained.
331 // 2) Finalizer specials (which are not in the garbage
332 // collected heap) are roots. In practice, this means the fn
333 // field must be scanned.
334 sg := mheap_.sweepgen
336 // Find the arena and page index into that arena for this shard.
337 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
338 ha := mheap_.arenas[ai.l1()][ai.l2()]
339 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
341 // Construct slice of bitmap which we'll iterate over.
342 specialsbits := ha.pageSpecials[arenaPage/8:]
343 specialsbits = specialsbits[:pagesPerSpanRoot/8]
344 for i := range specialsbits {
345 // Find set bits, which correspond to spans with specials.
346 specials := atomic.Load8(&specialsbits[i])
350 for j := uint(0); j < 8; j++ {
351 if specials&(1<<j) == 0 {
354 // Find the span for this bit.
356 // This value is guaranteed to be non-nil because having
357 // specials implies that the span is in-use, and since we're
358 // currently marking we can be sure that we don't have to worry
359 // about the span being freed and re-used.
360 s := ha.spans[arenaPage+uint(i)*8+j]
362 // The state must be mSpanInUse if the specials bit is set, so
363 // sanity check that.
364 if state := s.state.get(); state != mSpanInUse {
365 print("s.state = ", state, "\n")
366 throw("non in-use span found with specials bit set")
368 // Check that this span was swept (it may be cached or uncached).
369 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
370 // sweepgen was updated (+2) during non-checkmark GC pass
371 print("sweep ", s.sweepgen, " ", sg, "\n")
372 throw("gc: unswept span")
375 // Lock the specials to prevent a special from being
376 // removed from the list while we're traversing it.
378 for sp := s.specials; sp != nil; sp = sp.next {
379 if sp.kind != _KindSpecialFinalizer {
382 // don't mark finalized object, but scan it so we
383 // retain everything it points to.
384 spf := (*specialfinalizer)(unsafe.Pointer(sp))
385 // A finalizer can be set for an inner byte of an object, find object beginning.
386 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
388 // Mark everything that can be reached from
389 // the object (but *not* the object itself or
390 // we'll never collect it).
391 if !s.spanclass.noscan() {
395 // The special itself is a root.
396 scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
398 unlock(&s.speciallock)
403 // gcAssistAlloc performs GC work to make gp's assist debt positive.
404 // gp must be the calling user goroutine.
406 // This must be called with preemption enabled.
407 func gcAssistAlloc(gp *g) {
408 // Don't assist in non-preemptible contexts. These are
409 // generally fragile and won't allow the assist to block.
410 if getg() == gp.m.g0 {
413 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
417 // This extremely verbose boolean indicates whether we've
418 // entered mark assist from the perspective of the tracer.
420 // In the old tracer, this is just before we call gcAssistAlloc1
421 // *and* tracing is enabled. Because the old tracer doesn't
422 // do any extra tracking, we need to be careful to not emit an
423 // "end" event if there was no corresponding "begin" for the
426 // In the new tracer, this is just before we call gcAssistAlloc1
427 // *regardless* of whether tracing is enabled. This is because
428 // the new tracer allows for tracing to begin (and advance
429 // generations) in the middle of a GC mark phase, so we need to
430 // record some state so that the tracer can pick it up to ensure
431 // a consistent trace result.
433 // TODO(mknyszek): Hide the details of inMarkAssist in tracer
434 // functions and simplify all the state tracking. This is a lot.
435 enteredMarkAssistForTracing := false
437 if gcCPULimiter.limiting() {
438 // If the CPU limiter is enabled, intentionally don't
439 // assist to reduce the amount of CPU time spent in the GC.
440 if enteredMarkAssistForTracing {
441 trace := traceAcquire()
443 trace.GCMarkAssistDone()
444 // Set this *after* we trace the end to make sure
445 // that we emit an in-progress event if this is
446 // the first event for the goroutine in the trace
447 // or trace generation. Also, do this between
448 // acquire/release because this is part of the
449 // goroutine's trace state, and it must be atomic
450 // with respect to the tracer.
451 gp.inMarkAssist = false
454 // This state is tracked even if tracing isn't enabled.
455 // It's only used by the new tracer.
456 // See the comment on enteredMarkAssistForTracing.
457 gp.inMarkAssist = false
462 // Compute the amount of scan work we need to do to make the
463 // balance positive. When the required amount of work is low,
464 // we over-assist to build up credit for future allocations
465 // and amortize the cost of assisting.
466 assistWorkPerByte := gcController.assistWorkPerByte.Load()
467 assistBytesPerWork := gcController.assistBytesPerWork.Load()
468 debtBytes := -gp.gcAssistBytes
469 scanWork := int64(assistWorkPerByte * float64(debtBytes))
470 if scanWork < gcOverAssistWork {
471 scanWork = gcOverAssistWork
472 debtBytes = int64(assistBytesPerWork * float64(scanWork))
475 // Steal as much credit as we can from the background GC's
476 // scan credit. This is racy and may drop the background
477 // credit below 0 if two mutators steal at the same time. This
478 // will just cause steals to fail until credit is accumulated
479 // again, so in the long run it doesn't really matter, but we
480 // do have to handle the negative credit case.
481 bgScanCredit := gcController.bgScanCredit.Load()
483 if bgScanCredit > 0 {
484 if bgScanCredit < scanWork {
485 stolen = bgScanCredit
486 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
489 gp.gcAssistBytes += debtBytes
491 gcController.bgScanCredit.Add(-stolen)
496 // We were able to steal all of the credit we
498 if enteredMarkAssistForTracing {
499 trace := traceAcquire()
501 trace.GCMarkAssistDone()
502 // Set this *after* we trace the end to make sure
503 // that we emit an in-progress event if this is
504 // the first event for the goroutine in the trace
505 // or trace generation. Also, do this between
506 // acquire/release because this is part of the
507 // goroutine's trace state, and it must be atomic
508 // with respect to the tracer.
509 gp.inMarkAssist = false
512 // This state is tracked even if tracing isn't enabled.
513 // It's only used by the new tracer.
514 // See the comment on enteredMarkAssistForTracing.
515 gp.inMarkAssist = false
521 if !enteredMarkAssistForTracing {
522 trace := traceAcquire()
524 if !goexperiment.ExecTracer2 {
525 // In the old tracer, enter mark assist tracing only
526 // if we actually traced an event. Otherwise a goroutine
527 // waking up from mark assist post-GC might end up
528 // writing a stray "end" event.
530 // This means inMarkAssist will not be meaningful
531 // in the old tracer; that's OK, it's unused.
533 // See the comment on enteredMarkAssistForTracing.
534 enteredMarkAssistForTracing = true
536 trace.GCMarkAssistStart()
537 // Set this *after* we trace the start, otherwise we may
538 // emit an in-progress event for an assist we're about to start.
539 gp.inMarkAssist = true
542 gp.inMarkAssist = true
544 if goexperiment.ExecTracer2 {
545 // In the new tracer, set enter mark assist tracing if we
546 // ever pass this point, because we must manage inMarkAssist
549 // See the comment on enteredMarkAssistForTracing.
550 enteredMarkAssistForTracing = true
554 // Perform assist work
556 gcAssistAlloc1(gp, scanWork)
557 // The user stack may have moved, so this can't touch
558 // anything on it until it returns from systemstack.
561 completed := gp.param != nil
567 if gp.gcAssistBytes < 0 {
568 // We were unable steal enough credit or perform
569 // enough work to pay off the assist debt. We need to
570 // do one of these before letting the mutator allocate
571 // more to prevent over-allocation.
573 // If this is because we were preempted, reschedule
574 // and try some more.
580 // Add this G to an assist queue and park. When the GC
581 // has more background credit, it will satisfy queued
582 // assists before flushing to the global credit pool.
584 // Note that this does *not* get woken up when more
585 // work is added to the work list. The theory is that
586 // there wasn't enough work to do anyway, so we might
587 // as well let background marking take care of the
588 // work that is available.
593 // At this point either background GC has satisfied
594 // this G's assist debt, or the GC cycle is over.
596 if enteredMarkAssistForTracing {
597 trace := traceAcquire()
599 trace.GCMarkAssistDone()
600 // Set this *after* we trace the end to make sure
601 // that we emit an in-progress event if this is
602 // the first event for the goroutine in the trace
603 // or trace generation. Also, do this between
604 // acquire/release because this is part of the
605 // goroutine's trace state, and it must be atomic
606 // with respect to the tracer.
607 gp.inMarkAssist = false
610 // This state is tracked even if tracing isn't enabled.
611 // It's only used by the new tracer.
612 // See the comment on enteredMarkAssistForTracing.
613 gp.inMarkAssist = false
618 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
619 // stack. This is a separate function to make it easier to see that
620 // we're not capturing anything from the user stack, since the user
621 // stack may move while we're in this function.
623 // gcAssistAlloc1 indicates whether this assist completed the mark
624 // phase by setting gp.param to non-nil. This can't be communicated on
625 // the stack since it may move.
628 func gcAssistAlloc1(gp *g, scanWork int64) {
629 // Clear the flag indicating that this assist completed the
633 if atomic.Load(&gcBlackenEnabled) == 0 {
634 // The gcBlackenEnabled check in malloc races with the
635 // store that clears it but an atomic check in every malloc
636 // would be a performance hit.
637 // Instead we recheck it here on the non-preemptible system
638 // stack to determine if we should perform an assist.
640 // GC is done, so ignore any remaining debt.
644 // Track time spent in this assist. Since we're on the
645 // system stack, this is non-preemptible, so we can
646 // just measure start and end time.
648 // Limiter event tracking might be disabled if we end up here
649 // while on a mark worker.
650 startTime := nanotime()
651 trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime)
653 decnwait := atomic.Xadd(&work.nwait, -1)
654 if decnwait == work.nproc {
655 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
656 throw("nwait > work.nprocs")
659 // gcDrainN requires the caller to be preemptible.
660 casGToWaiting(gp, _Grunning, waitReasonGCAssistMarking)
662 // drain own cached work first in the hopes that it
663 // will be more cache friendly.
664 gcw := &getg().m.p.ptr().gcw
665 workDone := gcDrainN(gcw, scanWork)
667 casgstatus(gp, _Gwaiting, _Grunning)
669 // Record that we did this much scan work.
671 // Back out the number of bytes of assist credit that
672 // this scan work counts for. The "1+" is a poor man's
673 // round-up, to ensure this adds credit even if
674 // assistBytesPerWork is very low.
675 assistBytesPerWork := gcController.assistBytesPerWork.Load()
676 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
678 // If this is the last worker and we ran out of work,
679 // signal a completion point.
680 incnwait := atomic.Xadd(&work.nwait, +1)
681 if incnwait > work.nproc {
682 println("runtime: work.nwait=", incnwait,
683 "work.nproc=", work.nproc)
684 throw("work.nwait > work.nproc")
687 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
688 // This has reached a background completion point. Set
689 // gp.param to a non-nil value to indicate this. It
690 // doesn't matter what we set it to (it just has to be
692 gp.param = unsafe.Pointer(gp)
695 duration := now - startTime
697 pp.gcAssistTime += duration
698 if trackLimiterEvent {
699 pp.limiterEvent.stop(limiterEventMarkAssist, now)
701 if pp.gcAssistTime > gcAssistTimeSlack {
702 gcController.assistTime.Add(pp.gcAssistTime)
703 gcCPULimiter.update(now)
708 // gcWakeAllAssists wakes all currently blocked assists. This is used
709 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent
710 // new assists from going to sleep after this point.
711 func gcWakeAllAssists() {
712 lock(&work.assistQueue.lock)
713 list := work.assistQueue.q.popList()
715 unlock(&work.assistQueue.lock)
718 // gcParkAssist puts the current goroutine on the assist queue and parks.
720 // gcParkAssist reports whether the assist is now satisfied. If it
721 // returns false, the caller must retry the assist.
722 func gcParkAssist() bool {
723 lock(&work.assistQueue.lock)
724 // If the GC cycle finished while we were getting the lock,
725 // exit the assist. The cycle can't finish while we hold the
727 if atomic.Load(&gcBlackenEnabled) == 0 {
728 unlock(&work.assistQueue.lock)
733 oldList := work.assistQueue.q
734 work.assistQueue.q.pushBack(gp)
736 // Recheck for background credit now that this G is in
737 // the queue, but can still back out. This avoids a
738 // race in case background marking has flushed more
739 // credit since we checked above.
740 if gcController.bgScanCredit.Load() > 0 {
741 work.assistQueue.q = oldList
742 if oldList.tail != 0 {
743 oldList.tail.ptr().schedlink.set(nil)
745 unlock(&work.assistQueue.lock)
749 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
753 // gcFlushBgCredit flushes scanWork units of background scan work
754 // credit. This first satisfies blocked assists on the
755 // work.assistQueue and then flushes any remaining credit to
756 // gcController.bgScanCredit.
758 // Write barriers are disallowed because this is used by gcDrain after
759 // it has ensured that all work is drained and this must preserve that
762 //go:nowritebarrierrec
763 func gcFlushBgCredit(scanWork int64) {
764 if work.assistQueue.q.empty() {
765 // Fast path; there are no blocked assists. There's a
766 // small window here where an assist may add itself to
767 // the blocked queue and park. If that happens, we'll
768 // just get it on the next flush.
769 gcController.bgScanCredit.Add(scanWork)
773 assistBytesPerWork := gcController.assistBytesPerWork.Load()
774 scanBytes := int64(float64(scanWork) * assistBytesPerWork)
776 lock(&work.assistQueue.lock)
777 for !work.assistQueue.q.empty() && scanBytes > 0 {
778 gp := work.assistQueue.q.pop()
779 // Note that gp.gcAssistBytes is negative because gp
780 // is in debt. Think carefully about the signs below.
781 if scanBytes+gp.gcAssistBytes >= 0 {
782 // Satisfy this entire assist debt.
783 scanBytes += gp.gcAssistBytes
785 // It's important that we *not* put gp in
786 // runnext. Otherwise, it's possible for user
787 // code to exploit the GC worker's high
788 // scheduler priority to get itself always run
789 // before other goroutines and always in the
790 // fresh quantum started by GC.
793 // Partially satisfy this assist.
794 gp.gcAssistBytes += scanBytes
796 // As a heuristic, we move this assist to the
797 // back of the queue so that large assists
798 // can't clog up the assist queue and
799 // substantially delay small assists.
800 work.assistQueue.q.pushBack(gp)
806 // Convert from scan bytes back to work.
807 assistWorkPerByte := gcController.assistWorkPerByte.Load()
808 scanWork = int64(float64(scanBytes) * assistWorkPerByte)
809 gcController.bgScanCredit.Add(scanWork)
811 unlock(&work.assistQueue.lock)
814 // scanstack scans gp's stack, greying all pointers found on the stack.
816 // Returns the amount of scan work performed, but doesn't update
817 // gcController.stackScanWork or flush any credit. Any background credit produced
818 // by this function should be flushed by its caller. scanstack itself can't
819 // safely flush because it may result in trying to wake up a goroutine that
820 // was just scanned, resulting in a self-deadlock.
822 // scanstack will also shrink the stack if it is safe to do so. If it
823 // is not, it schedules a stack shrink for the next synchronous safe
826 // scanstack is marked go:systemstack because it must not be preempted
827 // while using a workbuf.
831 func scanstack(gp *g, gcw *gcWork) int64 {
832 if readgstatus(gp)&_Gscan == 0 {
833 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
834 throw("scanstack - bad status")
837 switch readgstatus(gp) &^ _Gscan {
839 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
840 throw("mark - bad status")
844 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
845 throw("scanstack: goroutine not stopped")
846 case _Grunnable, _Gsyscall, _Gwaiting:
851 throw("can't scan our own stack")
854 // scannedSize is the amount of work we'll be reporting.
856 // It is less than the allocated size (which is hi-lo).
858 if gp.syscallsp != 0 {
859 sp = gp.syscallsp // If in a system call this is the stack pointer (gp.sched.sp can be 0 in this case on Windows).
863 scannedSize := gp.stack.hi - sp
865 // Keep statistics for initial stack size calculation.
866 // Note that this accumulates the scanned size, not the allocated size.
867 p := getg().m.p.ptr()
868 p.scannedStackSize += uint64(scannedSize)
871 if isShrinkStackSafe(gp) {
872 // Shrink the stack if not much of it is being used.
875 // Otherwise, shrink the stack at the next sync safe point.
876 gp.preemptShrink = true
879 var state stackScanState
880 state.stack = gp.stack
883 println("stack trace goroutine", gp.goid)
886 if debugScanConservative && gp.asyncSafePoint {
887 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
890 // Scan the saved context register. This is effectively a live
891 // register that gets moved back and forth between the
892 // register and sched.ctxt without a write barrier.
893 if gp.sched.ctxt != nil {
894 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
897 // Scan the stack. Accumulate a list of stack objects.
899 for u.init(gp, 0); u.valid(); u.next() {
900 scanframeworker(&u.frame, &state, gcw)
903 // Find additional pointers that point into the stack from the heap.
904 // Currently this includes defers and panics. See also function copystack.
906 // Find and trace other pointers in defer records.
907 for d := gp._defer; d != nil; d = d.link {
909 // Scan the func value, which could be a stack allocated closure.
911 scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
914 // The link field of a stack-allocated defer record might point
915 // to a heap-allocated defer record. Keep that heap record live.
916 scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
918 // Retain defers records themselves.
919 // Defer records might not be reachable from the G through regular heap
920 // tracing because the defer linked list might weave between the stack and the heap.
922 scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
925 if gp._panic != nil {
926 // Panics are always stack allocated.
927 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
930 // Find and scan all reachable stack objects.
932 // The state's pointer queue prioritizes precise pointers over
933 // conservative pointers so that we'll prefer scanning stack
934 // objects precisely.
937 p, conservative := state.getPtr()
941 obj := state.findObject(p)
947 // We've already scanned this object.
950 obj.setRecord(nil) // Don't scan it again.
953 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
955 print(" (conservative)")
963 // This path is pretty unlikely, an object large enough
964 // to have a GC program allocated on the stack.
965 // We need some space to unpack the program into a straight
966 // bitmask, which we allocate/free here.
967 // TODO: it would be nice if there were a way to run a GC
968 // program without having to store all its bits. We'd have
969 // to change from a Lempel-Ziv style program to something else.
970 // Or we can forbid putting objects on stacks if they require
971 // a gc program (see issue 27447).
972 s = materializeGCProg(r.ptrdata(), gcdata)
973 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
976 b := state.stack.lo + uintptr(obj.off)
978 scanConservative(b, r.ptrdata(), gcdata, gcw, &state)
980 scanblock(b, r.ptrdata(), gcdata, gcw, &state)
984 dematerializeGCProg(s)
988 // Deallocate object buffers.
989 // (Pointer buffers were all deallocated in the loop above.)
990 for state.head != nil {
994 for i := 0; i < x.nobj; i++ {
996 if obj.r == nil { // reachable
999 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
1000 // Note: not necessarily really dead - only reachable-from-ptr dead.
1004 putempty((*workbuf)(unsafe.Pointer(x)))
1006 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
1007 throw("remaining pointer buffers")
1009 return int64(scannedSize)
1012 // Scan a stack frame: local variables and function arguments/results.
1015 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
1016 if _DebugGC > 1 && frame.continpc != 0 {
1017 print("scanframe ", funcname(frame.fn), "\n")
1020 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == abi.FuncID_asyncPreempt
1021 isDebugCall := frame.fn.valid() && frame.fn.funcID == abi.FuncID_debugCallV2
1022 if state.conservative || isAsyncPreempt || isDebugCall {
1023 if debugScanConservative {
1024 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
1027 // Conservatively scan the frame. Unlike the precise
1028 // case, this includes the outgoing argument space
1029 // since we may have stopped while this function was
1030 // setting up a call.
1032 // TODO: We could narrow this down if the compiler
1033 // produced a single map per function of stack slots
1034 // and registers that ever contain a pointer.
1035 if frame.varp != 0 {
1036 size := frame.varp - frame.sp
1038 scanConservative(frame.sp, size, nil, gcw, state)
1042 // Scan arguments to this frame.
1043 if n := frame.argBytes(); n != 0 {
1044 // TODO: We could pass the entry argument map
1045 // to narrow this down further.
1046 scanConservative(frame.argp, n, nil, gcw, state)
1049 if isAsyncPreempt || isDebugCall {
1050 // This function's frame contained the
1051 // registers for the asynchronously stopped
1052 // parent frame. Scan the parent
1054 state.conservative = true
1056 // We only wanted to scan those two frames
1057 // conservatively. Clear the flag for future
1059 state.conservative = false
1064 locals, args, objs := frame.getStackMap(false)
1066 // Scan local variables if stack frame has been allocated.
1068 size := uintptr(locals.n) * goarch.PtrSize
1069 scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
1074 scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
1077 // Add all stack objects to the stack object list.
1078 if frame.varp != 0 {
1079 // varp is 0 for defers, where there are no locals.
1080 // In that case, there can't be a pointer to its args, either.
1081 // (And all args would be scanned above anyway.)
1082 for i := range objs {
1085 base := frame.varp // locals base pointer
1087 base = frame.argp // arguments and return values base pointer
1089 ptr := base + uintptr(off)
1091 // object hasn't been allocated in the frame yet.
1094 if stackTraceDebug {
1095 println("stkobj at", hex(ptr), "of size", obj.size)
1097 state.addObject(ptr, obj)
1102 type gcDrainFlags int
1105 gcDrainUntilPreempt gcDrainFlags = 1 << iota
1106 gcDrainFlushBgCredit
1111 // gcDrainMarkWorkerIdle is a wrapper for gcDrain that exists to better account
1112 // mark time in profiles.
1113 func gcDrainMarkWorkerIdle(gcw *gcWork) {
1114 gcDrain(gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1117 // gcDrainMarkWorkerDedicated is a wrapper for gcDrain that exists to better account
1118 // mark time in profiles.
1119 func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool) {
1120 flags := gcDrainFlushBgCredit
1122 flags |= gcDrainUntilPreempt
1127 // gcDrainMarkWorkerFractional is a wrapper for gcDrain that exists to better account
1128 // mark time in profiles.
1129 func gcDrainMarkWorkerFractional(gcw *gcWork) {
1130 gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1133 // gcDrain scans roots and objects in work buffers, blackening grey
1134 // objects until it is unable to get more work. It may return before
1135 // GC is done; it's the caller's responsibility to balance work from
1138 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
1141 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work
1144 // If flags&gcDrainFractional != 0, gcDrain self-preempts when
1145 // pollFractionalWorkerExit() returns true. This implies
1148 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
1149 // credit to gcController.bgScanCredit every gcCreditSlack units of
1152 // gcDrain will always return if there is a pending STW or forEachP.
1154 // Disabling write barriers is necessary to ensure that after we've
1155 // confirmed that we've drained gcw, that we don't accidentally end
1156 // up flipping that condition by immediately adding work in the form
1157 // of a write barrier buffer flush.
1159 // Don't set nowritebarrierrec because it's safe for some callees to
1160 // have write barriers enabled.
1163 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
1164 if !writeBarrier.enabled {
1165 throw("gcDrain phase incorrect")
1168 // N.B. We must be running in a non-preemptible context, so it's
1169 // safe to hold a reference to our P here.
1172 preemptible := flags&gcDrainUntilPreempt != 0
1173 flushBgCredit := flags&gcDrainFlushBgCredit != 0
1174 idle := flags&gcDrainIdle != 0
1176 initScanWork := gcw.heapScanWork
1178 // checkWork is the scan work before performing the next
1179 // self-preempt check.
1180 checkWork := int64(1<<63 - 1)
1181 var check func() bool
1182 if flags&(gcDrainIdle|gcDrainFractional) != 0 {
1183 checkWork = initScanWork + drainCheckThreshold
1186 } else if flags&gcDrainFractional != 0 {
1187 check = pollFractionalWorkerExit
1191 // Drain root marking jobs.
1192 if work.markrootNext < work.markrootJobs {
1193 // Stop if we're preemptible, if someone wants to STW, or if
1194 // someone is calling forEachP.
1195 for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1196 job := atomic.Xadd(&work.markrootNext, +1) - 1
1197 if job >= work.markrootJobs {
1200 markroot(gcw, job, flushBgCredit)
1201 if check != nil && check() {
1207 // Drain heap marking jobs.
1209 // Stop if we're preemptible, if someone wants to STW, or if
1210 // someone is calling forEachP.
1212 // TODO(mknyszek): Consider always checking gp.preempt instead
1213 // of having the preempt flag, and making an exception for certain
1214 // mark workers in retake. That might be simpler than trying to
1215 // enumerate all the reasons why we might want to preempt, even
1216 // if we're supposed to be mostly non-preemptible.
1217 for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1218 // Try to keep work available on the global queue. We used to
1219 // check if there were waiting workers, but it's better to
1220 // just keep work available than to make workers wait. In the
1221 // worst case, we'll do O(log(_WorkbufSize)) unnecessary
1227 b := gcw.tryGetFast()
1231 // Flush the write barrier
1232 // buffer; this may create
1239 // Unable to get work.
1244 // Flush background scan work credit to the global
1245 // account if we've accumulated enough locally so
1246 // mutator assists can draw on it.
1247 if gcw.heapScanWork >= gcCreditSlack {
1248 gcController.heapScanWork.Add(gcw.heapScanWork)
1250 gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1253 checkWork -= gcw.heapScanWork
1254 gcw.heapScanWork = 0
1257 checkWork += drainCheckThreshold
1258 if check != nil && check() {
1266 // Flush remaining scan work credit.
1267 if gcw.heapScanWork > 0 {
1268 gcController.heapScanWork.Add(gcw.heapScanWork)
1270 gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1272 gcw.heapScanWork = 0
1276 // gcDrainN blackens grey objects until it has performed roughly
1277 // scanWork units of scan work or the G is preempted. This is
1278 // best-effort, so it may perform less work if it fails to get a work
1279 // buffer. Otherwise, it will perform at least n units of work, but
1280 // may perform more because scanning is always done in whole object
1281 // increments. It returns the amount of scan work performed.
1283 // The caller goroutine must be in a preemptible state (e.g.,
1284 // _Gwaiting) to prevent deadlocks during stack scanning. As a
1285 // consequence, this must be called on the system stack.
1289 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1290 if !writeBarrier.enabled {
1291 throw("gcDrainN phase incorrect")
1294 // There may already be scan work on the gcw, which we don't
1295 // want to claim was done by this call.
1296 workFlushed := -gcw.heapScanWork
1298 // In addition to backing out because of a preemption, back out
1299 // if the GC CPU limiter is enabled.
1301 for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
1302 // See gcDrain comment.
1307 b := gcw.tryGetFast()
1311 // Flush the write barrier buffer;
1312 // this may create more work.
1319 // Try to do a root job.
1320 if work.markrootNext < work.markrootJobs {
1321 job := atomic.Xadd(&work.markrootNext, +1) - 1
1322 if job < work.markrootJobs {
1323 workFlushed += markroot(gcw, job, false)
1327 // No heap or root jobs.
1333 // Flush background scan work credit.
1334 if gcw.heapScanWork >= gcCreditSlack {
1335 gcController.heapScanWork.Add(gcw.heapScanWork)
1336 workFlushed += gcw.heapScanWork
1337 gcw.heapScanWork = 0
1341 // Unlike gcDrain, there's no need to flush remaining work
1342 // here because this never flushes to bgScanCredit and
1343 // gcw.dispose will flush any remaining work to scanWork.
1345 return workFlushed + gcw.heapScanWork
1348 // scanblock scans b as scanobject would, but using an explicit
1349 // pointer bitmap instead of the heap bitmap.
1351 // This is used to scan non-heap roots, so it does not update
1352 // gcw.bytesMarked or gcw.heapScanWork.
1354 // If stk != nil, possible stack pointers are also reported to stk.putPtr.
1357 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
1358 // Use local copies of original parameters, so that a stack trace
1359 // due to one of the throws below shows the original block
1364 for i := uintptr(0); i < n; {
1365 // Find bits for the next word.
1366 bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
1368 i += goarch.PtrSize * 8
1371 for j := 0; j < 8 && i < n; j++ {
1373 // Same work as in scanobject; see comments there.
1374 p := *(*uintptr)(unsafe.Pointer(b + i))
1376 if obj, span, objIndex := findObject(p, b, i); obj != 0 {
1377 greyobject(obj, b, i, span, gcw, objIndex)
1378 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
1379 stk.putPtr(p, false)
1389 // scanobject scans the object starting at b, adding pointers to gcw.
1390 // b must point to the beginning of a heap object or an oblet.
1391 // scanobject consults the GC bitmap for the pointer mask and the
1392 // spans for the size of the object.
1395 func scanobject(b uintptr, gcw *gcWork) {
1396 // Prefetch object before we scan it.
1398 // This will overlap fetching the beginning of the object with initial
1399 // setup before we start scanning the object.
1402 // Find the bits for b and the size of the object at b.
1404 // b is either the beginning of an object, in which case this
1405 // is the size of the object to scan, or it points to an
1406 // oblet, in which case we compute the size to scan below.
1407 s := spanOfUnchecked(b)
1410 throw("scanobject n == 0")
1412 if s.spanclass.noscan() {
1413 // Correctness-wise this is ok, but it's inefficient
1414 // if noscan objects reach here.
1415 throw("scanobject of a noscan object")
1419 if n > maxObletBytes {
1420 // Large object. Break into oblets for better
1421 // parallelism and lower latency.
1423 // Enqueue the other oblets to scan later.
1424 // Some oblets may be in b's scalar tail, but
1425 // these will be marked as "no more pointers",
1426 // so we'll drop out immediately when we go to
1428 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1429 if !gcw.putFast(oblet) {
1435 // Compute the size of the oblet. Since this object
1436 // must be a large object, s.base() is the beginning
1438 n = s.base() + s.elemsize - b
1439 n = min(n, maxObletBytes)
1440 if goexperiment.AllocHeaders {
1441 tp = s.typePointersOfUnchecked(s.base())
1442 tp = tp.fastForward(b-tp.addr, b+n)
1445 if goexperiment.AllocHeaders {
1446 tp = s.typePointersOfUnchecked(b)
1451 if !goexperiment.AllocHeaders {
1452 hbits = heapBitsForAddr(b, n)
1454 var scanSize uintptr
1457 if goexperiment.AllocHeaders {
1458 if tp, addr = tp.nextFast(); addr == 0 {
1459 if tp, addr = tp.next(b + n); addr == 0 {
1464 if hbits, addr = hbits.nextFast(); addr == 0 {
1465 if hbits, addr = hbits.next(); addr == 0 {
1471 // Keep track of farthest pointer we found, so we can
1472 // update heapScanWork. TODO: is there a better metric,
1473 // now that we can skip scalar portions pretty efficiently?
1474 scanSize = addr - b + goarch.PtrSize
1476 // Work here is duplicated in scanblock and above.
1477 // If you make changes here, make changes there too.
1478 obj := *(*uintptr)(unsafe.Pointer(addr))
1480 // At this point we have extracted the next potential pointer.
1481 // Quickly filter out nil and pointers back to the current object.
1482 if obj != 0 && obj-b >= n {
1483 // Test if obj points into the Go heap and, if so,
1486 // Note that it's possible for findObject to
1487 // fail if obj points to a just-allocated heap
1488 // object because of a race with growing the
1489 // heap. In this case, we know the object was
1490 // just allocated and hence will be marked by
1491 // allocation itself.
1492 if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
1493 greyobject(obj, b, addr-b, span, gcw, objIndex)
1497 gcw.bytesMarked += uint64(n)
1498 gcw.heapScanWork += int64(scanSize)
1501 // scanConservative scans block [b, b+n) conservatively, treating any
1502 // pointer-like value in the block as a pointer.
1504 // If ptrmask != nil, only words that are marked in ptrmask are
1505 // considered as potential pointers.
1507 // If state != nil, it's assumed that [b, b+n) is a block in the stack
1508 // and may contain pointers to stack objects.
1509 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
1510 if debugScanConservative {
1512 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
1513 hexdumpWords(b, b+n, func(p uintptr) byte {
1515 word := (p - b) / goarch.PtrSize
1516 bits := *addb(ptrmask, word/8)
1517 if (bits>>(word%8))&1 == 0 {
1522 val := *(*uintptr)(unsafe.Pointer(p))
1523 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1527 span := spanOfHeap(val)
1531 idx := span.objIndex(val)
1532 if span.isFree(idx) {
1540 for i := uintptr(0); i < n; i += goarch.PtrSize {
1542 word := i / goarch.PtrSize
1543 bits := *addb(ptrmask, word/8)
1545 // Skip 8 words (the loop increment will do the 8th)
1547 // This must be the first time we've
1548 // seen this word of ptrmask, so i
1549 // must be 8-word-aligned, but check
1550 // our reasoning just in case.
1551 if i%(goarch.PtrSize*8) != 0 {
1552 throw("misaligned mask")
1554 i += goarch.PtrSize*8 - goarch.PtrSize
1557 if (bits>>(word%8))&1 == 0 {
1562 val := *(*uintptr)(unsafe.Pointer(b + i))
1564 // Check if val points into the stack.
1565 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1566 // val may point to a stack object. This
1567 // object may be dead from last cycle and
1568 // hence may contain pointers to unallocated
1569 // objects, but unlike heap objects we can't
1570 // tell if it's already dead. Hence, if all
1571 // pointers to this object are from
1572 // conservative scanning, we have to scan it
1573 // defensively, too.
1574 state.putPtr(val, true)
1578 // Check if val points to a heap span.
1579 span := spanOfHeap(val)
1584 // Check if val points to an allocated object.
1585 idx := span.objIndex(val)
1586 if span.isFree(idx) {
1590 // val points to an allocated object. Mark it.
1591 obj := span.base() + idx*span.elemsize
1592 greyobject(obj, b, i, span, gcw, idx)
1596 // Shade the object if it isn't already.
1597 // The object is not nil and known to be in the heap.
1598 // Preemption must be disabled.
1601 func shade(b uintptr) {
1602 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
1603 gcw := &getg().m.p.ptr().gcw
1604 greyobject(obj, 0, 0, span, gcw, objIndex)
1608 // obj is the start of an object with mark mbits.
1609 // If it isn't already marked, mark it and enqueue into gcw.
1610 // base and off are for debugging only and could be removed.
1612 // See also wbBufFlush1, which partially duplicates this logic.
1614 //go:nowritebarrierrec
1615 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
1616 // obj should be start of allocation, and so must be at least pointer-aligned.
1617 if obj&(goarch.PtrSize-1) != 0 {
1618 throw("greyobject: obj not pointer-aligned")
1620 mbits := span.markBitsForIndex(objIndex)
1623 if setCheckmark(obj, base, off, mbits) {
1628 if debug.gccheckmark > 0 && span.isFree(objIndex) {
1629 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1630 gcDumpObject("base", base, off)
1631 gcDumpObject("obj", obj, ^uintptr(0))
1632 getg().m.traceback = 2
1633 throw("marking free object")
1636 // If marked we have nothing to do.
1637 if mbits.isMarked() {
1643 arena, pageIdx, pageMask := pageIndexOf(span.base())
1644 if arena.pageMarks[pageIdx]&pageMask == 0 {
1645 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1648 // If this is a noscan object, fast-track it to black
1649 // instead of greying it.
1650 if span.spanclass.noscan() {
1651 gcw.bytesMarked += uint64(span.elemsize)
1656 // We're adding obj to P's local workbuf, so it's likely
1657 // this object will be processed soon by the same P.
1658 // Even if the workbuf gets flushed, there will likely still be
1659 // some benefit on platforms with inclusive shared caches.
1661 // Queue the obj for scanning.
1662 if !gcw.putFast(obj) {
1667 // gcDumpObject dumps the contents of obj for debugging and marks the
1668 // field at byte offset off in obj.
1669 func gcDumpObject(label string, obj, off uintptr) {
1671 print(label, "=", hex(obj))
1676 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1677 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
1678 print(mSpanStateNames[state], "\n")
1680 print("unknown(", state, ")\n")
1685 if s.state.get() == mSpanManual && size == 0 {
1686 // We're printing something from a stack frame. We
1687 // don't know how big it is, so just show up to an
1689 size = off + goarch.PtrSize
1691 for i := uintptr(0); i < size; i += goarch.PtrSize {
1692 // For big objects, just print the beginning (because
1693 // that usually hints at the object's type) and the
1694 // fields around off.
1695 if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
1703 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1714 // gcmarknewobject marks a newly allocated object black. obj must
1715 // not contain any non-nil pointers.
1717 // This is nosplit so it can manipulate a gcWork without preemption.
1721 func gcmarknewobject(span *mspan, obj, size uintptr) {
1722 if useCheckmark { // The world should be stopped so this should not happen.
1723 throw("gcmarknewobject called while doing checkmark")
1727 objIndex := span.objIndex(obj)
1728 span.markBitsForIndex(objIndex).setMarked()
1731 arena, pageIdx, pageMask := pageIndexOf(span.base())
1732 if arena.pageMarks[pageIdx]&pageMask == 0 {
1733 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1736 gcw := &getg().m.p.ptr().gcw
1737 gcw.bytesMarked += uint64(size)
1740 // gcMarkTinyAllocs greys all active tiny alloc blocks.
1742 // The world must be stopped.
1743 func gcMarkTinyAllocs() {
1744 assertWorldStopped()
1746 for _, p := range allp {
1748 if c == nil || c.tiny == 0 {
1751 _, span, objIndex := findObject(c.tiny, 0, 0)
1753 greyobject(c.tiny, 0, 0, span, gcw, objIndex)