1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: marking and scanning
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
16 fixedRootFinalizers = iota
20 // rootBlockBytes is the number of bytes to scan per data or
22 rootBlockBytes = 256 << 10
24 // maxObletBytes is the maximum bytes of an object to scan at
25 // once. Larger objects will be split up into "oblets" of at
26 // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
27 // scan preemption at ~100 µs.
29 // This must be > _MaxSmallSize so that the object base is the
31 maxObletBytes = 128 << 10
33 // drainCheckThreshold specifies how many units of work to do
34 // between self-preemption checks in gcDrain. Assuming a scan
35 // rate of 1 MB/ms, this is ~100 µs. Lower values have higher
36 // overhead in the scan loop (the scheduler check may perform
37 // a syscall, so its overhead is nontrivial). Higher values
38 // make the system less responsive to incoming work.
39 drainCheckThreshold = 100000
41 // pagesPerSpanRoot indicates how many pages to scan from a span root
42 // at a time. Used by special root marking.
44 // Higher values improve throughput by increasing locality, but
45 // increase the minimum latency of a marking operation.
47 // Must be a multiple of the pageInUse bitmap element size and
48 // must also evenly divide pagesPerArena.
49 pagesPerSpanRoot = 512
51 // go115NewMarkrootSpans is a feature flag that indicates whether
52 // to use the new bitmap-based markrootSpans implementation.
53 go115NewMarkrootSpans = true
56 // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
57 // some miscellany) and initializes scanning-related state.
59 // The world must be stopped.
60 func gcMarkRootPrepare() {
61 work.nFlushCacheRoots = 0
63 // Compute how many data and BSS root blocks there are.
64 nBlocks := func(bytes uintptr) int {
65 return int(divRoundUp(bytes, rootBlockBytes))
72 for _, datap := range activeModules() {
73 nDataRoots := nBlocks(datap.edata - datap.data)
74 if nDataRoots > work.nDataRoots {
75 work.nDataRoots = nDataRoots
79 for _, datap := range activeModules() {
80 nBSSRoots := nBlocks(datap.ebss - datap.bss)
81 if nBSSRoots > work.nBSSRoots {
82 work.nBSSRoots = nBSSRoots
86 // Scan span roots for finalizer specials.
88 // We depend on addfinalizer to mark objects that get
89 // finalizers after root marking.
90 if go115NewMarkrootSpans {
91 // We're going to scan the whole heap (that was available at the time the
92 // mark phase started, i.e. markArenas) for in-use spans which have specials.
94 // Break up the work into arenas, and further into chunks.
96 // Snapshot allArenas as markArenas. This snapshot is safe because allArenas
98 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
99 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
101 // We're only interested in scanning the in-use spans,
102 // which will all be swept at this point. More spans
103 // may be added to this list during concurrent GC, but
104 // we only care about spans that were allocated before
106 work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
111 // Gs may be created after this point, but it's okay that we
112 // ignore them because they begin life without any roots, so
113 // there's nothing to scan, and any roots they create during
114 // the concurrent phase will be scanned during mark
116 work.nStackRoots = int(atomic.Loaduintptr(&allglen))
118 work.markrootNext = 0
119 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
122 // gcMarkRootCheck checks that all roots have been scanned. It is
123 // purely for debugging.
124 func gcMarkRootCheck() {
125 if work.markrootNext < work.markrootJobs {
126 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
127 throw("left over markroot jobs")
131 // Check that stacks have been scanned.
133 for i := 0; i < work.nStackRoots; i++ {
143 println("gp", gp, "goid", gp.goid,
144 "status", readgstatus(gp),
145 "gcscandone", gp.gcscandone)
146 unlock(&allglock) // Avoid self-deadlock with traceback.
147 throw("scan missed a g")
150 // ptrmask for an allocation containing a single pointer.
151 var oneptrmask = [...]uint8{1}
153 // markroot scans the i'th root.
155 // Preemption must be disabled (because this uses a gcWork).
157 // nowritebarrier is only advisory here.
160 func markroot(gcw *gcWork, i uint32) {
161 // TODO(austin): This is a bit ridiculous. Compute and store
162 // the bases in gcMarkRootPrepare instead of the counts.
163 baseFlushCache := uint32(fixedRootCount)
164 baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
165 baseBSS := baseData + uint32(work.nDataRoots)
166 baseSpans := baseBSS + uint32(work.nBSSRoots)
167 baseStacks := baseSpans + uint32(work.nSpanRoots)
168 end := baseStacks + uint32(work.nStackRoots)
170 // Note: if you add a case here, please also update heapdump.go:dumproots.
172 case baseFlushCache <= i && i < baseData:
173 flushmcache(int(i - baseFlushCache))
175 case baseData <= i && i < baseBSS:
176 for _, datap := range activeModules() {
177 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
180 case baseBSS <= i && i < baseSpans:
181 for _, datap := range activeModules() {
182 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
185 case i == fixedRootFinalizers:
186 for fb := allfin; fb != nil; fb = fb.alllink {
187 cnt := uintptr(atomic.Load(&fb.cnt))
188 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
191 case i == fixedRootFreeGStacks:
192 // Switch to the system stack so we can call
194 systemstack(markrootFreeGStacks)
196 case baseSpans <= i && i < baseStacks:
197 // mark mspan.specials
198 markrootSpans(gcw, int(i-baseSpans))
201 // the rest is scanning goroutine stacks
203 if baseStacks <= i && i < end {
204 gp = allgs[i-baseStacks]
206 throw("markroot: bad index")
209 // remember when we've first observed the G blocked
210 // needed only to output in traceback
211 status := readgstatus(gp) // We are not in a scan state
212 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
213 gp.waitsince = work.tstart
216 // scanstack must be done on the system stack in case
217 // we're trying to scan our own stack.
219 // If this is a self-scan, put the user G in
220 // _Gwaiting to prevent self-deadlock. It may
221 // already be in _Gwaiting if this is a mark
222 // worker or we're in mark termination.
223 userG := getg().m.curg
224 selfScan := gp == userG && readgstatus(userG) == _Grunning
226 casgstatus(userG, _Grunning, _Gwaiting)
227 userG.waitreason = waitReasonGarbageCollectionScan
230 // TODO: suspendG blocks (and spins) until gp
231 // stops, which may take a while for
232 // running goroutines. Consider doing this in
233 // two phases where the first is non-blocking:
234 // we scan the stacks we can and ask running
235 // goroutines to scan themselves; and the
237 stopped := suspendG(gp)
243 throw("g already scanned")
250 casgstatus(userG, _Gwaiting, _Grunning)
256 // markrootBlock scans the shard'th shard of the block of memory [b0,
257 // b0+n0), with the given pointer mask.
260 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
261 if rootBlockBytes%(8*sys.PtrSize) != 0 {
262 // This is necessary to pick byte offsets in ptrmask0.
263 throw("rootBlockBytes must be a multiple of 8*ptrSize")
266 // Note that if b0 is toward the end of the address space,
267 // then b0 + rootBlockBytes might wrap around.
268 // These tests are written to avoid any possible overflow.
269 off := uintptr(shard) * rootBlockBytes
274 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
275 n := uintptr(rootBlockBytes)
281 scanblock(b, n, ptrmask, gcw, nil)
284 // markrootFreeGStacks frees stacks of dead Gs.
286 // This does not free stacks of dead Gs cached on Ps, but having a few
287 // cached stacks around isn't a problem.
288 func markrootFreeGStacks() {
289 // Take list of dead Gs with stacks.
290 lock(&sched.gFree.lock)
291 list := sched.gFree.stack
292 sched.gFree.stack = gList{}
293 unlock(&sched.gFree.lock)
299 q := gQueue{list.head, list.head}
300 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
304 // Manipulate the queue directly since the Gs are
305 // already all linked the right way.
309 // Put Gs back on the free list.
310 lock(&sched.gFree.lock)
311 sched.gFree.noStack.pushAll(q)
312 unlock(&sched.gFree.lock)
315 // markrootSpans marks roots for one shard of markArenas.
318 func markrootSpans(gcw *gcWork, shard int) {
319 if !go115NewMarkrootSpans {
320 oldMarkrootSpans(gcw, shard)
323 // Objects with finalizers have two GC-related invariants:
325 // 1) Everything reachable from the object must be marked.
326 // This ensures that when we pass the object to its finalizer,
327 // everything the finalizer can reach will be retained.
329 // 2) Finalizer specials (which are not in the garbage
330 // collected heap) are roots. In practice, this means the fn
331 // field must be scanned.
332 sg := mheap_.sweepgen
334 // Find the arena and page index into that arena for this shard.
335 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
336 ha := mheap_.arenas[ai.l1()][ai.l2()]
337 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
339 // Construct slice of bitmap which we'll iterate over.
340 specialsbits := ha.pageSpecials[arenaPage/8:]
341 specialsbits = specialsbits[:pagesPerSpanRoot/8]
342 for i := range specialsbits {
343 // Find set bits, which correspond to spans with specials.
344 specials := atomic.Load8(&specialsbits[i])
348 for j := uint(0); j < 8; j++ {
349 if specials&(1<<j) == 0 {
352 // Find the span for this bit.
354 // This value is guaranteed to be non-nil because having
355 // specials implies that the span is in-use, and since we're
356 // currently marking we can be sure that we don't have to worry
357 // about the span being freed and re-used.
358 s := ha.spans[arenaPage+uint(i)*8+j]
360 // The state must be mSpanInUse if the specials bit is set, so
361 // sanity check that.
362 if state := s.state.get(); state != mSpanInUse {
363 print("s.state = ", state, "\n")
364 throw("non in-use span found with specials bit set")
366 // Check that this span was swept (it may be cached or uncached).
367 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
368 // sweepgen was updated (+2) during non-checkmark GC pass
369 print("sweep ", s.sweepgen, " ", sg, "\n")
370 throw("gc: unswept span")
373 // Lock the specials to prevent a special from being
374 // removed from the list while we're traversing it.
376 for sp := s.specials; sp != nil; sp = sp.next {
377 if sp.kind != _KindSpecialFinalizer {
380 // don't mark finalized object, but scan it so we
381 // retain everything it points to.
382 spf := (*specialfinalizer)(unsafe.Pointer(sp))
383 // A finalizer can be set for an inner byte of an object, find object beginning.
384 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
386 // Mark everything that can be reached from
387 // the object (but *not* the object itself or
388 // we'll never collect it).
391 // The special itself is a root.
392 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
394 unlock(&s.speciallock)
399 // oldMarkrootSpans marks roots for one shard of work.spans.
401 // For go115NewMarkrootSpans = false.
404 func oldMarkrootSpans(gcw *gcWork, shard int) {
405 // Objects with finalizers have two GC-related invariants:
407 // 1) Everything reachable from the object must be marked.
408 // This ensures that when we pass the object to its finalizer,
409 // everything the finalizer can reach will be retained.
411 // 2) Finalizer specials (which are not in the garbage
412 // collected heap) are roots. In practice, this means the fn
413 // field must be scanned.
415 // TODO(austin): There are several ideas for making this more
416 // efficient in issue #11485.
418 sg := mheap_.sweepgen
419 spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
420 // Note that work.spans may not include spans that were
421 // allocated between entering the scan phase and now. We may
422 // also race with spans being added into sweepSpans when they're
423 // just created, and as a result we may see nil pointers in the
424 // spans slice. This is okay because any objects with finalizers
425 // in those spans must have been allocated and given finalizers
426 // after we entered the scan phase, so addfinalizer will have
427 // ensured the above invariants for them.
428 for i := 0; i < len(spans); i++ {
429 // sweepBuf.block requires that we read pointers from the block atomically.
430 // It also requires that we ignore nil pointers.
431 s := (*mspan)(atomic.Loadp(unsafe.Pointer(&spans[i])))
433 // This is racing with spans being initialized, so
434 // check the state carefully.
435 if s == nil || s.state.get() != mSpanInUse {
438 // Check that this span was swept (it may be cached or uncached).
439 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
440 // sweepgen was updated (+2) during non-checkmark GC pass
441 print("sweep ", s.sweepgen, " ", sg, "\n")
442 throw("gc: unswept span")
445 // Speculatively check if there are any specials
446 // without acquiring the span lock. This may race with
447 // adding the first special to a span, but in that
448 // case addfinalizer will observe that the GC is
449 // active (which is globally synchronized) and ensure
450 // the above invariants. We may also ensure the
451 // invariants, but it's okay to scan an object twice.
452 if s.specials == nil {
456 // Lock the specials to prevent a special from being
457 // removed from the list while we're traversing it.
460 for sp := s.specials; sp != nil; sp = sp.next {
461 if sp.kind != _KindSpecialFinalizer {
464 // don't mark finalized object, but scan it so we
465 // retain everything it points to.
466 spf := (*specialfinalizer)(unsafe.Pointer(sp))
467 // A finalizer can be set for an inner byte of an object, find object beginning.
468 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
470 // Mark everything that can be reached from
471 // the object (but *not* the object itself or
472 // we'll never collect it).
475 // The special itself is a root.
476 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
479 unlock(&s.speciallock)
483 // gcAssistAlloc performs GC work to make gp's assist debt positive.
484 // gp must be the calling user gorountine.
486 // This must be called with preemption enabled.
487 func gcAssistAlloc(gp *g) {
488 // Don't assist in non-preemptible contexts. These are
489 // generally fragile and won't allow the assist to block.
490 if getg() == gp.m.g0 {
493 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
499 // Compute the amount of scan work we need to do to make the
500 // balance positive. When the required amount of work is low,
501 // we over-assist to build up credit for future allocations
502 // and amortize the cost of assisting.
503 debtBytes := -gp.gcAssistBytes
504 scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
505 if scanWork < gcOverAssistWork {
506 scanWork = gcOverAssistWork
507 debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
510 // Steal as much credit as we can from the background GC's
511 // scan credit. This is racy and may drop the background
512 // credit below 0 if two mutators steal at the same time. This
513 // will just cause steals to fail until credit is accumulated
514 // again, so in the long run it doesn't really matter, but we
515 // do have to handle the negative credit case.
516 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
518 if bgScanCredit > 0 {
519 if bgScanCredit < scanWork {
520 stolen = bgScanCredit
521 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen))
524 gp.gcAssistBytes += debtBytes
526 atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
531 // We were able to steal all of the credit we
534 traceGCMarkAssistDone()
540 if trace.enabled && !traced {
542 traceGCMarkAssistStart()
545 // Perform assist work
547 gcAssistAlloc1(gp, scanWork)
548 // The user stack may have moved, so this can't touch
549 // anything on it until it returns from systemstack.
552 completed := gp.param != nil
558 if gp.gcAssistBytes < 0 {
559 // We were unable steal enough credit or perform
560 // enough work to pay off the assist debt. We need to
561 // do one of these before letting the mutator allocate
562 // more to prevent over-allocation.
564 // If this is because we were preempted, reschedule
565 // and try some more.
571 // Add this G to an assist queue and park. When the GC
572 // has more background credit, it will satisfy queued
573 // assists before flushing to the global credit pool.
575 // Note that this does *not* get woken up when more
576 // work is added to the work list. The theory is that
577 // there wasn't enough work to do anyway, so we might
578 // as well let background marking take care of the
579 // work that is available.
584 // At this point either background GC has satisfied
585 // this G's assist debt, or the GC cycle is over.
588 traceGCMarkAssistDone()
592 // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
593 // stack. This is a separate function to make it easier to see that
594 // we're not capturing anything from the user stack, since the user
595 // stack may move while we're in this function.
597 // gcAssistAlloc1 indicates whether this assist completed the mark
598 // phase by setting gp.param to non-nil. This can't be communicated on
599 // the stack since it may move.
602 func gcAssistAlloc1(gp *g, scanWork int64) {
603 // Clear the flag indicating that this assist completed the
607 if atomic.Load(&gcBlackenEnabled) == 0 {
608 // The gcBlackenEnabled check in malloc races with the
609 // store that clears it but an atomic check in every malloc
610 // would be a performance hit.
611 // Instead we recheck it here on the non-preemptable system
612 // stack to determine if we should perform an assist.
614 // GC is done, so ignore any remaining debt.
618 // Track time spent in this assist. Since we're on the
619 // system stack, this is non-preemptible, so we can
620 // just measure start and end time.
621 startTime := nanotime()
623 decnwait := atomic.Xadd(&work.nwait, -1)
624 if decnwait == work.nproc {
625 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
626 throw("nwait > work.nprocs")
629 // gcDrainN requires the caller to be preemptible.
630 casgstatus(gp, _Grunning, _Gwaiting)
631 gp.waitreason = waitReasonGCAssistMarking
633 // drain own cached work first in the hopes that it
634 // will be more cache friendly.
635 gcw := &getg().m.p.ptr().gcw
636 workDone := gcDrainN(gcw, scanWork)
638 casgstatus(gp, _Gwaiting, _Grunning)
640 // Record that we did this much scan work.
642 // Back out the number of bytes of assist credit that
643 // this scan work counts for. The "1+" is a poor man's
644 // round-up, to ensure this adds credit even if
645 // assistBytesPerWork is very low.
646 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone))
648 // If this is the last worker and we ran out of work,
649 // signal a completion point.
650 incnwait := atomic.Xadd(&work.nwait, +1)
651 if incnwait > work.nproc {
652 println("runtime: work.nwait=", incnwait,
653 "work.nproc=", work.nproc)
654 throw("work.nwait > work.nproc")
657 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
658 // This has reached a background completion point. Set
659 // gp.param to a non-nil value to indicate this. It
660 // doesn't matter what we set it to (it just has to be
662 gp.param = unsafe.Pointer(gp)
664 duration := nanotime() - startTime
666 _p_.gcAssistTime += duration
667 if _p_.gcAssistTime > gcAssistTimeSlack {
668 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
673 // gcWakeAllAssists wakes all currently blocked assists. This is used
674 // at the end of a GC cycle. gcBlackenEnabled must be false to prevent
675 // new assists from going to sleep after this point.
676 func gcWakeAllAssists() {
677 lock(&work.assistQueue.lock)
678 list := work.assistQueue.q.popList()
680 unlock(&work.assistQueue.lock)
683 // gcParkAssist puts the current goroutine on the assist queue and parks.
685 // gcParkAssist reports whether the assist is now satisfied. If it
686 // returns false, the caller must retry the assist.
689 func gcParkAssist() bool {
690 lock(&work.assistQueue.lock)
691 // If the GC cycle finished while we were getting the lock,
692 // exit the assist. The cycle can't finish while we hold the
694 if atomic.Load(&gcBlackenEnabled) == 0 {
695 unlock(&work.assistQueue.lock)
700 oldList := work.assistQueue.q
701 work.assistQueue.q.pushBack(gp)
703 // Recheck for background credit now that this G is in
704 // the queue, but can still back out. This avoids a
705 // race in case background marking has flushed more
706 // credit since we checked above.
707 if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
708 work.assistQueue.q = oldList
709 if oldList.tail != 0 {
710 oldList.tail.ptr().schedlink.set(nil)
712 unlock(&work.assistQueue.lock)
716 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
720 // gcFlushBgCredit flushes scanWork units of background scan work
721 // credit. This first satisfies blocked assists on the
722 // work.assistQueue and then flushes any remaining credit to
723 // gcController.bgScanCredit.
725 // Write barriers are disallowed because this is used by gcDrain after
726 // it has ensured that all work is drained and this must preserve that
729 //go:nowritebarrierrec
730 func gcFlushBgCredit(scanWork int64) {
731 if work.assistQueue.q.empty() {
732 // Fast path; there are no blocked assists. There's a
733 // small window here where an assist may add itself to
734 // the blocked queue and park. If that happens, we'll
735 // just get it on the next flush.
736 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
740 scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
742 lock(&work.assistQueue.lock)
743 for !work.assistQueue.q.empty() && scanBytes > 0 {
744 gp := work.assistQueue.q.pop()
745 // Note that gp.gcAssistBytes is negative because gp
746 // is in debt. Think carefully about the signs below.
747 if scanBytes+gp.gcAssistBytes >= 0 {
748 // Satisfy this entire assist debt.
749 scanBytes += gp.gcAssistBytes
751 // It's important that we *not* put gp in
752 // runnext. Otherwise, it's possible for user
753 // code to exploit the GC worker's high
754 // scheduler priority to get itself always run
755 // before other goroutines and always in the
756 // fresh quantum started by GC.
759 // Partially satisfy this assist.
760 gp.gcAssistBytes += scanBytes
762 // As a heuristic, we move this assist to the
763 // back of the queue so that large assists
764 // can't clog up the assist queue and
765 // substantially delay small assists.
766 work.assistQueue.q.pushBack(gp)
772 // Convert from scan bytes back to work.
773 scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
774 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
776 unlock(&work.assistQueue.lock)
779 // scanstack scans gp's stack, greying all pointers found on the stack.
781 // scanstack will also shrink the stack if it is safe to do so. If it
782 // is not, it schedules a stack shrink for the next synchronous safe
785 // scanstack is marked go:systemstack because it must not be preempted
786 // while using a workbuf.
790 func scanstack(gp *g, gcw *gcWork) {
791 if readgstatus(gp)&_Gscan == 0 {
792 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
793 throw("scanstack - bad status")
796 switch readgstatus(gp) &^ _Gscan {
798 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
799 throw("mark - bad status")
803 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
804 throw("scanstack: goroutine not stopped")
805 case _Grunnable, _Gsyscall, _Gwaiting:
810 throw("can't scan our own stack")
813 if isShrinkStackSafe(gp) {
814 // Shrink the stack if not much of it is being used.
817 // Otherwise, shrink the stack at the next sync safe point.
818 gp.preemptShrink = true
821 var state stackScanState
822 state.stack = gp.stack
825 println("stack trace goroutine", gp.goid)
828 if debugScanConservative && gp.asyncSafePoint {
829 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
832 // Scan the saved context register. This is effectively a live
833 // register that gets moved back and forth between the
834 // register and sched.ctxt without a write barrier.
835 if gp.sched.ctxt != nil {
836 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
839 // Scan the stack. Accumulate a list of stack objects.
840 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
841 scanframeworker(frame, &state, gcw)
844 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
846 // Find additional pointers that point into the stack from the heap.
847 // Currently this includes defers and panics. See also function copystack.
849 // Find and trace all defer arguments.
850 tracebackdefers(gp, scanframe, nil)
852 // Find and trace other pointers in defer records.
853 for d := gp._defer; d != nil; d = d.link {
855 // tracebackdefers above does not scan the func value, which could
856 // be a stack allocated closure. See issue 30453.
857 scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
860 // The link field of a stack-allocated defer record might point
861 // to a heap-allocated defer record. Keep that heap record live.
862 scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
864 // Retain defers records themselves.
865 // Defer records might not be reachable from the G through regular heap
866 // tracing because the defer linked list might weave between the stack and the heap.
868 scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
871 if gp._panic != nil {
872 // Panics are always stack allocated.
873 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
876 // Find and scan all reachable stack objects.
878 // The state's pointer queue prioritizes precise pointers over
879 // conservative pointers so that we'll prefer scanning stack
880 // objects precisely.
883 p, conservative := state.getPtr()
887 obj := state.findObject(p)
893 // We've already scanned this object.
896 obj.setType(nil) // Don't scan it again.
899 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string())
901 print(" (conservative)")
908 if t.kind&kindGCProg != 0 {
909 // This path is pretty unlikely, an object large enough
910 // to have a GC program allocated on the stack.
911 // We need some space to unpack the program into a straight
912 // bitmask, which we allocate/free here.
913 // TODO: it would be nice if there were a way to run a GC
914 // program without having to store all its bits. We'd have
915 // to change from a Lempel-Ziv style program to something else.
916 // Or we can forbid putting objects on stacks if they require
917 // a gc program (see issue 27447).
918 s = materializeGCProg(t.ptrdata, gcdata)
919 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
922 b := state.stack.lo + uintptr(obj.off)
924 scanConservative(b, t.ptrdata, gcdata, gcw, &state)
926 scanblock(b, t.ptrdata, gcdata, gcw, &state)
930 dematerializeGCProg(s)
934 // Deallocate object buffers.
935 // (Pointer buffers were all deallocated in the loop above.)
936 for state.head != nil {
940 for _, obj := range x.obj[:x.nobj] {
941 if obj.typ == nil { // reachable
944 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string())
945 // Note: not necessarily really dead - only reachable-from-ptr dead.
949 putempty((*workbuf)(unsafe.Pointer(x)))
951 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
952 throw("remaining pointer buffers")
956 // Scan a stack frame: local variables and function arguments/results.
958 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
959 if _DebugGC > 1 && frame.continpc != 0 {
960 print("scanframe ", funcname(frame.fn), "\n")
963 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt
964 isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV1
965 if state.conservative || isAsyncPreempt || isDebugCall {
966 if debugScanConservative {
967 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
970 // Conservatively scan the frame. Unlike the precise
971 // case, this includes the outgoing argument space
972 // since we may have stopped while this function was
973 // setting up a call.
975 // TODO: We could narrow this down if the compiler
976 // produced a single map per function of stack slots
977 // and registers that ever contain a pointer.
979 size := frame.varp - frame.sp
981 scanConservative(frame.sp, size, nil, gcw, state)
985 // Scan arguments to this frame.
986 if frame.arglen != 0 {
987 // TODO: We could pass the entry argument map
988 // to narrow this down further.
989 scanConservative(frame.argp, frame.arglen, nil, gcw, state)
992 if isAsyncPreempt || isDebugCall {
993 // This function's frame contained the
994 // registers for the asynchronously stopped
995 // parent frame. Scan the parent
997 state.conservative = true
999 // We only wanted to scan those two frames
1000 // conservatively. Clear the flag for future
1002 state.conservative = false
1007 locals, args, objs := getStackMap(frame, &state.cache, false)
1009 // Scan local variables if stack frame has been allocated.
1011 size := uintptr(locals.n) * sys.PtrSize
1012 scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
1017 scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
1020 // Add all stack objects to the stack object list.
1021 if frame.varp != 0 {
1022 // varp is 0 for defers, where there are no locals.
1023 // In that case, there can't be a pointer to its args, either.
1024 // (And all args would be scanned above anyway.)
1025 for _, obj := range objs {
1027 base := frame.varp // locals base pointer
1029 base = frame.argp // arguments and return values base pointer
1031 ptr := base + uintptr(off)
1033 // object hasn't been allocated in the frame yet.
1036 if stackTraceDebug {
1037 println("stkobj at", hex(ptr), "of type", obj.typ.string())
1039 state.addObject(ptr, obj.typ)
1044 type gcDrainFlags int
1047 gcDrainUntilPreempt gcDrainFlags = 1 << iota
1048 gcDrainFlushBgCredit
1053 // gcDrain scans roots and objects in work buffers, blackening grey
1054 // objects until it is unable to get more work. It may return before
1055 // GC is done; it's the caller's responsibility to balance work from
1058 // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
1061 // If flags&gcDrainIdle != 0, gcDrain returns when there is other work
1064 // If flags&gcDrainFractional != 0, gcDrain self-preempts when
1065 // pollFractionalWorkerExit() returns true. This implies
1068 // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
1069 // credit to gcController.bgScanCredit every gcCreditSlack units of
1072 // gcDrain will always return if there is a pending STW.
1075 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
1076 if !writeBarrier.needed {
1077 throw("gcDrain phase incorrect")
1081 preemptible := flags&gcDrainUntilPreempt != 0
1082 flushBgCredit := flags&gcDrainFlushBgCredit != 0
1083 idle := flags&gcDrainIdle != 0
1085 initScanWork := gcw.scanWork
1087 // checkWork is the scan work before performing the next
1088 // self-preempt check.
1089 checkWork := int64(1<<63 - 1)
1090 var check func() bool
1091 if flags&(gcDrainIdle|gcDrainFractional) != 0 {
1092 checkWork = initScanWork + drainCheckThreshold
1095 } else if flags&gcDrainFractional != 0 {
1096 check = pollFractionalWorkerExit
1100 // Drain root marking jobs.
1101 if work.markrootNext < work.markrootJobs {
1102 // Stop if we're preemptible or if someone wants to STW.
1103 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
1104 job := atomic.Xadd(&work.markrootNext, +1) - 1
1105 if job >= work.markrootJobs {
1109 if check != nil && check() {
1115 // Drain heap marking jobs.
1116 // Stop if we're preemptible or if someone wants to STW.
1117 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
1118 // Try to keep work available on the global queue. We used to
1119 // check if there were waiting workers, but it's better to
1120 // just keep work available than to make workers wait. In the
1121 // worst case, we'll do O(log(_WorkbufSize)) unnecessary
1127 b := gcw.tryGetFast()
1131 // Flush the write barrier
1132 // buffer; this may create
1139 // Unable to get work.
1144 // Flush background scan work credit to the global
1145 // account if we've accumulated enough locally so
1146 // mutator assists can draw on it.
1147 if gcw.scanWork >= gcCreditSlack {
1148 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1150 gcFlushBgCredit(gcw.scanWork - initScanWork)
1153 checkWork -= gcw.scanWork
1157 checkWork += drainCheckThreshold
1158 if check != nil && check() {
1166 // Flush remaining scan work credit.
1167 if gcw.scanWork > 0 {
1168 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1170 gcFlushBgCredit(gcw.scanWork - initScanWork)
1176 // gcDrainN blackens grey objects until it has performed roughly
1177 // scanWork units of scan work or the G is preempted. This is
1178 // best-effort, so it may perform less work if it fails to get a work
1179 // buffer. Otherwise, it will perform at least n units of work, but
1180 // may perform more because scanning is always done in whole object
1181 // increments. It returns the amount of scan work performed.
1183 // The caller goroutine must be in a preemptible state (e.g.,
1184 // _Gwaiting) to prevent deadlocks during stack scanning. As a
1185 // consequence, this must be called on the system stack.
1189 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1190 if !writeBarrier.needed {
1191 throw("gcDrainN phase incorrect")
1194 // There may already be scan work on the gcw, which we don't
1195 // want to claim was done by this call.
1196 workFlushed := -gcw.scanWork
1199 for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
1200 // See gcDrain comment.
1205 // This might be a good place to add prefetch code...
1206 // if(wbuf.nobj > 4) {
1207 // PREFETCH(wbuf->obj[wbuf.nobj - 3];
1210 b := gcw.tryGetFast()
1214 // Flush the write barrier buffer;
1215 // this may create more work.
1222 // Try to do a root job.
1224 // TODO: Assists should get credit for this
1226 if work.markrootNext < work.markrootJobs {
1227 job := atomic.Xadd(&work.markrootNext, +1) - 1
1228 if job < work.markrootJobs {
1233 // No heap or root jobs.
1238 // Flush background scan work credit.
1239 if gcw.scanWork >= gcCreditSlack {
1240 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1241 workFlushed += gcw.scanWork
1246 // Unlike gcDrain, there's no need to flush remaining work
1247 // here because this never flushes to bgScanCredit and
1248 // gcw.dispose will flush any remaining work to scanWork.
1250 return workFlushed + gcw.scanWork
1253 // scanblock scans b as scanobject would, but using an explicit
1254 // pointer bitmap instead of the heap bitmap.
1256 // This is used to scan non-heap roots, so it does not update
1257 // gcw.bytesMarked or gcw.scanWork.
1259 // If stk != nil, possible stack pointers are also reported to stk.putPtr.
1261 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
1262 // Use local copies of original parameters, so that a stack trace
1263 // due to one of the throws below shows the original block
1268 for i := uintptr(0); i < n; {
1269 // Find bits for the next word.
1270 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
1272 i += sys.PtrSize * 8
1275 for j := 0; j < 8 && i < n; j++ {
1277 // Same work as in scanobject; see comments there.
1278 p := *(*uintptr)(unsafe.Pointer(b + i))
1280 if obj, span, objIndex := findObject(p, b, i); obj != 0 {
1281 greyobject(obj, b, i, span, gcw, objIndex)
1282 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
1283 stk.putPtr(p, false)
1293 // scanobject scans the object starting at b, adding pointers to gcw.
1294 // b must point to the beginning of a heap object or an oblet.
1295 // scanobject consults the GC bitmap for the pointer mask and the
1296 // spans for the size of the object.
1299 func scanobject(b uintptr, gcw *gcWork) {
1300 // Find the bits for b and the size of the object at b.
1302 // b is either the beginning of an object, in which case this
1303 // is the size of the object to scan, or it points to an
1304 // oblet, in which case we compute the size to scan below.
1305 hbits := heapBitsForAddr(b)
1306 s := spanOfUnchecked(b)
1309 throw("scanobject n == 0")
1312 if n > maxObletBytes {
1313 // Large object. Break into oblets for better
1314 // parallelism and lower latency.
1316 // It's possible this is a noscan object (not
1317 // from greyobject, but from other code
1318 // paths), in which case we must *not* enqueue
1319 // oblets since their bitmaps will be
1321 if s.spanclass.noscan() {
1322 // Bypass the whole scan.
1323 gcw.bytesMarked += uint64(n)
1327 // Enqueue the other oblets to scan later.
1328 // Some oblets may be in b's scalar tail, but
1329 // these will be marked as "no more pointers",
1330 // so we'll drop out immediately when we go to
1332 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1333 if !gcw.putFast(oblet) {
1339 // Compute the size of the oblet. Since this object
1340 // must be a large object, s.base() is the beginning
1342 n = s.base() + s.elemsize - b
1343 if n > maxObletBytes {
1349 for i = 0; i < n; i += sys.PtrSize {
1350 // Find bits for this word.
1352 // Avoid needless hbits.next() on last iteration.
1353 hbits = hbits.next()
1355 // Load bits once. See CL 22712 and issue 16973 for discussion.
1356 bits := hbits.bits()
1357 // During checkmarking, 1-word objects store the checkmark
1358 // in the type bit for the one word. The only one-word objects
1359 // are pointers, or else they'd be merged with other non-pointer
1360 // data into larger allocations.
1361 if i != 1*sys.PtrSize && bits&bitScan == 0 {
1362 break // no more pointers in this object
1364 if bits&bitPointer == 0 {
1365 continue // not a pointer
1368 // Work here is duplicated in scanblock and above.
1369 // If you make changes here, make changes there too.
1370 obj := *(*uintptr)(unsafe.Pointer(b + i))
1372 // At this point we have extracted the next potential pointer.
1373 // Quickly filter out nil and pointers back to the current object.
1374 if obj != 0 && obj-b >= n {
1375 // Test if obj points into the Go heap and, if so,
1378 // Note that it's possible for findObject to
1379 // fail if obj points to a just-allocated heap
1380 // object because of a race with growing the
1381 // heap. In this case, we know the object was
1382 // just allocated and hence will be marked by
1383 // allocation itself.
1384 if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
1385 greyobject(obj, b, i, span, gcw, objIndex)
1389 gcw.bytesMarked += uint64(n)
1390 gcw.scanWork += int64(i)
1393 // scanConservative scans block [b, b+n) conservatively, treating any
1394 // pointer-like value in the block as a pointer.
1396 // If ptrmask != nil, only words that are marked in ptrmask are
1397 // considered as potential pointers.
1399 // If state != nil, it's assumed that [b, b+n) is a block in the stack
1400 // and may contain pointers to stack objects.
1401 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
1402 if debugScanConservative {
1404 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
1405 hexdumpWords(b, b+n, func(p uintptr) byte {
1407 word := (p - b) / sys.PtrSize
1408 bits := *addb(ptrmask, word/8)
1409 if (bits>>(word%8))&1 == 0 {
1414 val := *(*uintptr)(unsafe.Pointer(p))
1415 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1419 span := spanOfHeap(val)
1423 idx := span.objIndex(val)
1424 if span.isFree(idx) {
1432 for i := uintptr(0); i < n; i += sys.PtrSize {
1434 word := i / sys.PtrSize
1435 bits := *addb(ptrmask, word/8)
1437 // Skip 8 words (the loop increment will do the 8th)
1439 // This must be the first time we've
1440 // seen this word of ptrmask, so i
1441 // must be 8-word-aligned, but check
1442 // our reasoning just in case.
1443 if i%(sys.PtrSize*8) != 0 {
1444 throw("misaligned mask")
1446 i += sys.PtrSize*8 - sys.PtrSize
1449 if (bits>>(word%8))&1 == 0 {
1454 val := *(*uintptr)(unsafe.Pointer(b + i))
1456 // Check if val points into the stack.
1457 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1458 // val may point to a stack object. This
1459 // object may be dead from last cycle and
1460 // hence may contain pointers to unallocated
1461 // objects, but unlike heap objects we can't
1462 // tell if it's already dead. Hence, if all
1463 // pointers to this object are from
1464 // conservative scanning, we have to scan it
1465 // defensively, too.
1466 state.putPtr(val, true)
1470 // Check if val points to a heap span.
1471 span := spanOfHeap(val)
1476 // Check if val points to an allocated object.
1477 idx := span.objIndex(val)
1478 if span.isFree(idx) {
1482 // val points to an allocated object. Mark it.
1483 obj := span.base() + idx*span.elemsize
1484 greyobject(obj, b, i, span, gcw, idx)
1488 // Shade the object if it isn't already.
1489 // The object is not nil and known to be in the heap.
1490 // Preemption must be disabled.
1492 func shade(b uintptr) {
1493 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
1494 gcw := &getg().m.p.ptr().gcw
1495 greyobject(obj, 0, 0, span, gcw, objIndex)
1499 // obj is the start of an object with mark mbits.
1500 // If it isn't already marked, mark it and enqueue into gcw.
1501 // base and off are for debugging only and could be removed.
1503 // See also wbBufFlush1, which partially duplicates this logic.
1505 //go:nowritebarrierrec
1506 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
1507 // obj should be start of allocation, and so must be at least pointer-aligned.
1508 if obj&(sys.PtrSize-1) != 0 {
1509 throw("greyobject: obj not pointer-aligned")
1511 mbits := span.markBitsForIndex(objIndex)
1514 if !mbits.isMarked() {
1516 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
1517 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
1519 // Dump the source (base) object
1520 gcDumpObject("base", base, off)
1523 gcDumpObject("obj", obj, ^uintptr(0))
1525 getg().m.traceback = 2
1526 throw("checkmark found unmarked object")
1528 hbits := heapBitsForAddr(obj)
1529 if hbits.isCheckmarked(span.elemsize) {
1532 hbits.setCheckmarked(span.elemsize)
1533 if !hbits.isCheckmarked(span.elemsize) {
1534 throw("setCheckmarked and isCheckmarked disagree")
1537 if debug.gccheckmark > 0 && span.isFree(objIndex) {
1538 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1539 gcDumpObject("base", base, off)
1540 gcDumpObject("obj", obj, ^uintptr(0))
1541 getg().m.traceback = 2
1542 throw("marking free object")
1545 // If marked we have nothing to do.
1546 if mbits.isMarked() {
1552 arena, pageIdx, pageMask := pageIndexOf(span.base())
1553 if arena.pageMarks[pageIdx]&pageMask == 0 {
1554 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1557 // If this is a noscan object, fast-track it to black
1558 // instead of greying it.
1559 if span.spanclass.noscan() {
1560 gcw.bytesMarked += uint64(span.elemsize)
1565 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
1566 // seems like a nice optimization that can be added back in.
1567 // There needs to be time between the PREFETCH and the use.
1568 // Previously we put the obj in an 8 element buffer that is drained at a rate
1569 // to give the PREFETCH time to do its work.
1570 // Use of PREFETCHNTA might be more appropriate than PREFETCH
1571 if !gcw.putFast(obj) {
1576 // gcDumpObject dumps the contents of obj for debugging and marks the
1577 // field at byte offset off in obj.
1578 func gcDumpObject(label string, obj, off uintptr) {
1580 print(label, "=", hex(obj))
1585 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1586 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
1587 print(mSpanStateNames[state], "\n")
1589 print("unknown(", state, ")\n")
1594 if s.state.get() == mSpanManual && size == 0 {
1595 // We're printing something from a stack frame. We
1596 // don't know how big it is, so just show up to an
1598 size = off + sys.PtrSize
1600 for i := uintptr(0); i < size; i += sys.PtrSize {
1601 // For big objects, just print the beginning (because
1602 // that usually hints at the object's type) and the
1603 // fields around off.
1604 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
1612 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1623 // gcmarknewobject marks a newly allocated object black. obj must
1624 // not contain any non-nil pointers.
1626 // This is nosplit so it can manipulate a gcWork without preemption.
1630 func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) {
1631 if useCheckmark { // The world should be stopped so this should not happen.
1632 throw("gcmarknewobject called while doing checkmark")
1636 objIndex := span.objIndex(obj)
1637 span.markBitsForIndex(objIndex).setMarked()
1640 arena, pageIdx, pageMask := pageIndexOf(span.base())
1641 if arena.pageMarks[pageIdx]&pageMask == 0 {
1642 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1645 gcw := &getg().m.p.ptr().gcw
1646 gcw.bytesMarked += uint64(size)
1647 gcw.scanWork += int64(scanSize)
1650 // gcMarkTinyAllocs greys all active tiny alloc blocks.
1652 // The world must be stopped.
1653 func gcMarkTinyAllocs() {
1654 for _, p := range allp {
1656 if c == nil || c.tiny == 0 {
1659 _, span, objIndex := findObject(c.tiny, 0, 0)
1661 greyobject(c.tiny, 0, 0, span, gcw, objIndex)
1667 // To help debug the concurrent GC we remark with the world
1668 // stopped ensuring that any object encountered has their normal
1669 // mark bit set. To do this we use an orthogonal bit
1670 // pattern to indicate the object is marked. The following pattern
1671 // uses the upper two bits in the object's boundary nibble.
1672 // 01: scalar not marked
1673 // 10: pointer not marked
1674 // 11: pointer marked
1675 // 00: scalar marked
1676 // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
1677 // The higher bit is 1 for pointers and 0 for scalars, whether the object
1678 // is marked or not.
1679 // The first nibble no longer holds the typeDead pattern indicating that the
1680 // there are no more pointers in the object. This information is held
1681 // in the second nibble.
1683 // If useCheckmark is true, marking of an object uses the
1684 // checkmark bits (encoding above) instead of the standard
1686 var useCheckmark = false
1689 func initCheckmarks() {
1691 for _, s := range mheap_.allspans {
1692 if s.state.get() == mSpanInUse {
1693 heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
1698 func clearCheckmarks() {
1699 useCheckmark = false
1700 for _, s := range mheap_.allspans {
1701 if s.state.get() == mSpanInUse {
1702 heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())