1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: sweeping
7 // The sweeper consists of two different algorithms:
9 // * The object reclaimer finds and frees unmarked slots in spans. It
10 // can free a whole span if none of the objects are marked, but that
11 // isn't its goal. This can be driven either synchronously by
12 // mcentral.cacheSpan for mcentral spans, or asynchronously by
13 // sweepone, which looks at all the mcentral lists.
15 // * The span reclaimer looks for spans that contain no marked objects
16 // and frees whole spans. This is a separate algorithm because
17 // freeing whole spans is the hardest task for the object reclaimer,
18 // but is critical when allocating new spans. The entry point for
19 // this is mheap_.reclaim and it's driven by a sequential scan of
20 // the page marks bitmap in the heap arenas.
22 // Both algorithms ultimately call mspan.sweep, which sweeps a single
28 "internal/goexperiment"
29 "runtime/internal/atomic"
35 // State of background sweep.
36 type sweepdata struct {
41 // active tracks outstanding sweepers and the sweep
42 // termination condition.
45 // centralIndex is the current unswept span class.
46 // It represents an index into the mcentral span
47 // sets. Accessed and updated via its load and
48 // update methods. Not protected by a lock.
50 // Reset at mark termination.
51 // Used by mheap.nextSpanForSweep.
52 centralIndex sweepClass
55 // sweepClass is a spanClass and one bit to represent whether we're currently
56 // sweeping partial or full spans.
57 type sweepClass uint32
60 numSweepClasses = numSpanClasses * 2
61 sweepClassDone sweepClass = sweepClass(^uint32(0))
64 func (s *sweepClass) load() sweepClass {
65 return sweepClass(atomic.Load((*uint32)(s)))
68 func (s *sweepClass) update(sNew sweepClass) {
69 // Only update *s if its current value is less than sNew,
70 // since *s increases monotonically.
72 for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
75 // TODO(mknyszek): This isn't the only place we have
76 // an atomic monotonically increasing counter. It would
77 // be nice to have an "atomic max" which is just implemented
78 // as the above on most architectures. Some architectures
79 // like RISC-V however have native support for an atomic max.
82 func (s *sweepClass) clear() {
83 atomic.Store((*uint32)(s), 0)
86 // split returns the underlying span class as well as
87 // whether we're interested in the full or partial
88 // unswept lists for that class, indicated as a boolean
89 // (true means "full").
90 func (s sweepClass) split() (spc spanClass, full bool) {
91 return spanClass(s >> 1), s&1 == 0
94 // nextSpanForSweep finds and pops the next span for sweeping from the
95 // central sweep buffers. It returns ownership of the span to the caller.
96 // Returns nil if no such span exists.
97 func (h *mheap) nextSpanForSweep() *mspan {
99 for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
100 spc, full := sc.split()
101 c := &h.central[spc].mcentral
104 s = c.fullUnswept(sg).pop()
106 s = c.partialUnswept(sg).pop()
109 // Write down that we found something so future sweepers
110 // can start from here.
111 sweep.centralIndex.update(sc)
115 // Write down that we found nothing.
116 sweep.centralIndex.update(sweepClassDone)
120 const sweepDrainedMask = 1 << 31
122 // activeSweep is a type that captures whether sweeping
123 // is done, and whether there are any outstanding sweepers.
125 // Every potential sweeper must call begin() before they look
126 // for work, and end() after they've finished sweeping.
127 type activeSweep struct {
128 // state is divided into two parts.
130 // The top bit (masked by sweepDrainedMask) is a boolean
131 // value indicating whether all the sweep work has been
132 // drained from the queue.
134 // The rest of the bits are a counter, indicating the
135 // number of outstanding concurrent sweepers.
139 // begin registers a new sweeper. Returns a sweepLocker
140 // for acquiring spans for sweeping. Any outstanding sweeper blocks
141 // sweep termination.
143 // If the sweepLocker is invalid, the caller can be sure that all
144 // outstanding sweep work has been drained, so there is nothing left
145 // to sweep. Note that there may be sweepers currently running, so
146 // this does not indicate that all sweeping has completed.
148 // Even if the sweepLocker is invalid, its sweepGen is always valid.
149 func (a *activeSweep) begin() sweepLocker {
151 state := a.state.Load()
152 if state&sweepDrainedMask != 0 {
153 return sweepLocker{mheap_.sweepgen, false}
155 if a.state.CompareAndSwap(state, state+1) {
156 return sweepLocker{mheap_.sweepgen, true}
161 // end deregisters a sweeper. Must be called once for each time
162 // begin is called if the sweepLocker is valid.
163 func (a *activeSweep) end(sl sweepLocker) {
164 if sl.sweepGen != mheap_.sweepgen {
165 throw("sweeper left outstanding across sweep generations")
168 state := a.state.Load()
169 if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
170 throw("mismatched begin/end of activeSweep")
172 if a.state.CompareAndSwap(state, state-1) {
173 if state != sweepDrainedMask {
176 if debug.gcpacertrace > 0 {
177 live := gcController.heapLive.Load()
178 print("pacer: sweep done at heap size ", live>>20, "MB; allocated ", (live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
185 // markDrained marks the active sweep cycle as having drained
186 // all remaining work. This is safe to be called concurrently
187 // with all other methods of activeSweep, though may race.
189 // Returns true if this call was the one that actually performed
191 func (a *activeSweep) markDrained() bool {
193 state := a.state.Load()
194 if state&sweepDrainedMask != 0 {
197 if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
203 // sweepers returns the current number of active sweepers.
204 func (a *activeSweep) sweepers() uint32 {
205 return a.state.Load() &^ sweepDrainedMask
208 // isDone returns true if all sweep work has been drained and no more
209 // outstanding sweepers exist. That is, when the sweep phase is
211 func (a *activeSweep) isDone() bool {
212 return a.state.Load() == sweepDrainedMask
215 // reset sets up the activeSweep for the next sweep cycle.
217 // The world must be stopped.
218 func (a *activeSweep) reset() {
223 // finishsweep_m ensures that all spans are swept.
225 // The world must be stopped. This ensures there are no sweeps in
229 func finishsweep_m() {
232 // Sweeping must be complete before marking commences, so
233 // sweep any unswept spans. If this is a concurrent GC, there
234 // shouldn't be any spans left to sweep, so this should finish
235 // instantly. If GC was forced before the concurrent sweep
236 // finished, there may be spans to sweep.
237 for sweepone() != ^uintptr(0) {
240 // Make sure there aren't any outstanding sweepers left.
241 // At this point, with the world stopped, it means one of two
242 // things. Either we were able to preempt a sweeper, or that
243 // a sweeper didn't call sweep.active.end when it should have.
244 // Both cases indicate a bug, so throw.
245 if sweep.active.sweepers() != 0 {
246 throw("active sweepers found at start of mark phase")
249 // Reset all the unswept buffers, which should be empty.
250 // Do this in sweep termination as opposed to mark termination
251 // so that we can catch unswept spans and reclaim blocks as
253 sg := mheap_.sweepgen
254 for i := range mheap_.central {
255 c := &mheap_.central[i].mcentral
256 c.partialUnswept(sg).reset()
257 c.fullUnswept(sg).reset()
260 // Sweeping is done, so there won't be any new memory to
261 // scavenge for a bit.
263 // If the scavenger isn't already awake, wake it up. There's
264 // definitely work for it to do at this point.
267 nextMarkBitArenaEpoch()
270 func bgsweep(c chan int) {
273 lockInit(&sweep.lock, lockRankSweep)
277 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
280 // bgsweep attempts to be a "low priority" goroutine by intentionally
281 // yielding time. It's OK if it doesn't run, because goroutines allocating
282 // memory will sweep and ensure that all spans are swept before the next
283 // GC cycle. We really only want to run when we're idle.
285 // However, calling Gosched after each span swept produces a tremendous
286 // amount of tracing events, sometimes up to 50% of events in a trace. It's
287 // also inefficient to call into the scheduler so much because sweeping a
288 // single span is in general a very fast operation, taking as little as 30 ns
289 // on modern hardware. (See #54767.)
291 // As a result, bgsweep sweeps in batches, and only calls into the scheduler
292 // at the end of every batch. Furthermore, it only yields its time if there
293 // isn't spare idle time available on other cores. If there's available idle
294 // time, helping to sweep can reduce allocation latencies by getting ahead of
295 // the proportional sweeper and having spans ready to go for allocation.
296 const sweepBatchSize = 10
298 for sweepone() != ^uintptr(0) {
300 if nSwept%sweepBatchSize == 0 {
304 for freeSomeWbufs(true) {
305 // N.B. freeSomeWbufs is already batched internally.
310 // This can happen if a GC runs between
311 // gosweepone returning ^0 above
312 // and the lock being acquired.
317 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
321 // sweepLocker acquires sweep ownership of spans.
322 type sweepLocker struct {
323 // sweepGen is the sweep generation of the heap.
328 // sweepLocked represents sweep ownership of a span.
329 type sweepLocked struct {
333 // tryAcquire attempts to acquire sweep ownership of span s. If it
334 // successfully acquires ownership, it blocks sweep completion.
335 func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
337 throw("use of invalid sweepLocker")
339 // Check before attempting to CAS.
340 if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
341 return sweepLocked{}, false
343 // Attempt to acquire sweep ownership of s.
344 if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
345 return sweepLocked{}, false
347 return sweepLocked{s}, true
350 // sweepone sweeps some unswept heap span and returns the number of pages returned
351 // to the heap, or ^uintptr(0) if there was nothing to sweep.
352 func sweepone() uintptr {
355 // Increment locks to ensure that the goroutine is not preempted
356 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
359 // TODO(austin): sweepone is almost always called in a loop;
360 // lift the sweepLocker into its callers.
361 sl := sweep.active.begin()
367 // Find a span to sweep.
368 npages := ^uintptr(0)
371 s := mheap_.nextSpanForSweep()
373 noMoreWork = sweep.active.markDrained()
376 if state := s.state.get(); state != mSpanInUse {
377 // This can happen if direct sweeping already
378 // swept this span, but in that case the sweep
379 // generation should always be up-to-date.
380 if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
381 print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
382 throw("non in-use span in unswept list")
386 if s, ok := sl.tryAcquire(s); ok {
387 // Sweep the span we found.
390 // Whole span was freed. Count it toward the
391 // page reclaimer credit since these pages can
392 // now be used for span allocation.
393 mheap_.reclaimCredit.Add(npages)
395 // Span is still in-use, so this returned no
396 // pages to the heap and the span needs to
397 // move to the swept in-use list.
406 // The sweep list is empty. There may still be
407 // concurrent sweeps running, but we're at least very
408 // close to done sweeping.
410 // Move the scavenge gen forward (signaling
411 // that there's new work to do) and wake the scavenger.
413 // The scavenger is signaled by the last sweeper because once
414 // sweeping is done, we will definitely have useful work for
415 // the scavenger to do, since the scavenger only runs over the
416 // heap once per GC cycle. This update is not done during sweep
417 // termination because in some cases there may be a long delay
418 // between sweep done and sweep termination (e.g. not enough
419 // allocations to trigger a GC) which would be nice to fill in
420 // with scavenging work.
421 if debug.scavtrace > 0 {
425 // Get released stats.
426 releasedBg := mheap_.pages.scav.releasedBg.Load()
427 releasedEager := mheap_.pages.scav.releasedEager.Load()
430 printScavTrace(releasedBg, releasedEager, false)
433 mheap_.pages.scav.releasedBg.Add(-releasedBg)
434 mheap_.pages.scav.releasedEager.Add(-releasedEager)
445 // isSweepDone reports whether all spans are swept.
447 // Note that this condition may transition from false to true at any
448 // time as the sweeper runs. It may transition from true to false if a
449 // GC runs; to prevent that the caller must be non-preemptible or must
450 // somehow block GC progress.
451 func isSweepDone() bool {
452 return sweep.active.isDone()
455 // Returns only when span s has been swept.
458 func (s *mspan) ensureSwept() {
459 // Caller must disable preemption.
460 // Otherwise when this function returns the span can become unswept again
461 // (if GC is triggered on another goroutine).
463 if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
464 throw("mspan.ensureSwept: m is not locked")
467 // If this operation fails, then that means that there are
468 // no more spans to be swept. In this case, either s has already
469 // been swept, or is about to be acquired for sweeping and swept.
470 sl := sweep.active.begin()
472 // The caller must be sure that the span is a mSpanInUse span.
473 if s, ok := sl.tryAcquire(s); ok {
481 // Unfortunately we can't sweep the span ourselves. Somebody else
482 // got to it first. We don't have efficient means to wait, but that's
483 // OK, it will be swept fairly soon.
485 spangen := atomic.Load(&s.sweepgen)
486 if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
493 // sweep frees or collects finalizers for blocks not marked in the mark phase.
494 // It clears the mark bits in preparation for the next GC round.
495 // Returns true if the span was returned to heap.
496 // If preserve=true, don't return it to heap nor relink in mcentral lists;
497 // caller takes care of it.
498 func (sl *sweepLocked) sweep(preserve bool) bool {
499 // It's critical that we enter this function with preemption disabled,
500 // GC must not start while we are in the middle of this function.
502 if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
503 throw("mspan.sweep: m is not locked")
508 // We'll release ownership of this span. Nil it out to
509 // prevent the caller from accidentally using it.
513 sweepgen := mheap_.sweepgen
514 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
515 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
516 throw("mspan.sweep: bad span state")
520 traceGCSweepSpan(s.npages * _PageSize)
523 mheap_.pagesSwept.Add(int64(s.npages))
528 // The allocBits indicate which unmarked objects don't need to be
529 // processed since they were free at the end of the last GC cycle
530 // and were not allocated since then.
531 // If the allocBits index is >= s.freeindex and the bit
532 // is not marked then the object remains unallocated
533 // since the last GC.
534 // This situation is analogous to being on a freelist.
536 // Unlink & free special records for any objects we're about to free.
537 // Two complications here:
538 // 1. An object can have both finalizer and profile special records.
539 // In such case we need to queue finalizer for execution,
540 // mark the object as live and preserve the profile special.
541 // 2. A tiny object can have several finalizers setup for different offsets.
542 // If such object is not marked, we need to queue all finalizers at once.
543 // Both 1 and 2 are possible at the same time.
544 hadSpecials := s.specials != nil
545 siter := newSpecialsIter(s)
547 // A finalizer can be set for an inner byte of an object, find object beginning.
548 objIndex := uintptr(siter.s.offset) / size
549 p := s.base() + objIndex*size
550 mbits := s.markBitsForIndex(objIndex)
551 if !mbits.isMarked() {
552 // This object is not marked and has at least one special record.
553 // Pass 1: see if it has at least one finalizer.
555 endOffset := p - s.base() + size
556 for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
557 if tmp.kind == _KindSpecialFinalizer {
558 // Stop freeing of object if it has a finalizer.
559 mbits.setMarkedNonAtomic()
564 // Pass 2: queue all finalizers _or_ handle profile record.
565 for siter.valid() && uintptr(siter.s.offset) < endOffset {
566 // Find the exact byte for which the special was setup
567 // (as opposed to object beginning).
569 p := s.base() + uintptr(special.offset)
570 if special.kind == _KindSpecialFinalizer || !hasFin {
571 siter.unlinkAndNext()
572 freeSpecial(special, unsafe.Pointer(p), size)
574 // The object has finalizers, so we're keeping it alive.
575 // All other specials only apply when an object is freed,
576 // so just keep the special record.
581 // object is still live
582 if siter.s.kind == _KindSpecialReachable {
583 special := siter.unlinkAndNext()
584 (*specialReachable)(unsafe.Pointer(special)).reachable = true
585 freeSpecial(special, unsafe.Pointer(p), size)
587 // keep special record
592 if hadSpecials && s.specials == nil {
596 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
597 // Find all newly freed objects. This doesn't have to
598 // efficient; allocfreetrace has massive overhead.
599 mbits := s.markBitsForBase()
600 abits := s.allocBitsForIndex(0)
601 for i := uintptr(0); i < uintptr(s.nelems); i++ {
602 if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) {
603 x := s.base() + i*s.elemsize
604 if debug.allocfreetrace != 0 {
605 tracefree(unsafe.Pointer(x), size)
607 if debug.clobberfree != 0 {
608 clobberfree(unsafe.Pointer(x), size)
610 // User arenas are handled on explicit free.
611 if raceenabled && !s.isUserArenaChunk {
612 racefree(unsafe.Pointer(x), size)
614 if msanenabled && !s.isUserArenaChunk {
615 msanfree(unsafe.Pointer(x), size)
617 if asanenabled && !s.isUserArenaChunk {
618 asanpoison(unsafe.Pointer(x), size)
626 // Check for zombie objects.
627 if s.freeindex < s.nelems {
628 // Everything < freeindex is allocated and hence
629 // cannot be zombies.
631 // Check the first bitmap byte, where we have to be
632 // careful with freeindex.
633 obj := uintptr(s.freeindex)
634 if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
637 // Check remaining bytes.
638 for i := obj/8 + 1; i < divRoundUp(uintptr(s.nelems), 8); i++ {
639 if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
645 // Count the number of free objects in this span.
646 nalloc := uint16(s.countAlloc())
647 nfreed := s.allocCount - nalloc
648 if nalloc > s.allocCount {
649 // The zombie check above should have caught this in
651 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
652 throw("sweep increased allocation count")
655 s.allocCount = nalloc
656 s.freeindex = 0 // reset allocation index to start of span.
657 s.freeIndexForScan = 0
659 getg().m.p.ptr().trace.reclaimed += uintptr(nfreed) * s.elemsize
662 // gcmarkBits becomes the allocBits.
663 // get a fresh cleared gcmarkBits in preparation for next GC
664 s.allocBits = s.gcmarkBits
665 s.gcmarkBits = newMarkBits(uintptr(s.nelems))
667 // refresh pinnerBits if they exists
668 if s.pinnerBits != nil {
669 s.refreshPinnerBits()
672 // Initialize alloc bits cache.
673 s.refillAllocCache(0)
675 // The span must be in our exclusive ownership until we update sweepgen,
676 // check for potential races.
677 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
678 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
679 throw("mspan.sweep: bad span state after sweep")
681 if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
682 throw("swept cached span")
685 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
686 // because of the potential for a concurrent free/SetFinalizer.
688 // But we need to set it before we make the span available for allocation
689 // (return it to heap or mcentral), because allocation code assumes that a
690 // span is already swept if available for allocation.
692 // Serialization point.
693 // At this point the mark bits are cleared and allocation ready
694 // to go so release the span.
695 atomic.Store(&s.sweepgen, sweepgen)
697 if s.isUserArenaChunk {
699 // This is a case that should never be handled by a sweeper that
700 // preserves the span for reuse.
701 throw("sweep: tried to preserve a user arena span")
704 // There still exist pointers into the span or the span hasn't been
705 // freed yet. It's not ready to be reused. Put it back on the
706 // full swept list for the next cycle.
707 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
711 // It's only at this point that the sweeper doesn't actually need to look
712 // at this arena anymore, so subtract from pagesInUse now.
713 mheap_.pagesInUse.Add(-s.npages)
714 s.state.set(mSpanDead)
716 // The arena is ready to be recycled. Remove it from the quarantine list
717 // and place it on the ready list. Don't add it back to any sweep lists.
719 // It's the arena code's responsibility to get the chunk on the quarantine
720 // list by the time all references to the chunk are gone.
721 if s.list != &mheap_.userArena.quarantineList {
722 throw("user arena span is on the wrong list")
725 mheap_.userArena.quarantineList.remove(s)
726 mheap_.userArena.readyList.insert(s)
732 if spc.sizeclass() != 0 {
733 // Handle spans for small objects.
735 // Only mark the span as needing zeroing if we've freed any
736 // objects, because a fresh span that had been allocated into,
737 // wasn't totally filled, but then swept, still has all of its
738 // free slots zeroed.
740 stats := memstats.heapStats.acquire()
741 atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
742 memstats.heapStats.release()
744 // Count the frees in the inconsistent, internal stats.
745 gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))
748 // The caller may not have removed this span from whatever
749 // unswept set its on but taken ownership of the span for
750 // sweeping by updating sweepgen. If this span still is in
751 // an unswept set, then the mcentral will pop it off the
752 // set, check its sweepgen, and ignore it.
754 // Free totally free span directly back to the heap.
758 // Return span back to the right mcentral list.
759 if nalloc == s.nelems {
760 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
762 mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
765 } else if !preserve {
766 // Handle spans for large objects.
768 // Free large object span to heap.
770 // NOTE(rsc,dvyukov): The original implementation of efence
771 // in CL 22060046 used sysFree instead of sysFault, so that
772 // the operating system would eventually give the memory
773 // back to us again, so that an efence program could run
774 // longer without running out of memory. Unfortunately,
775 // calling sysFree here without any kind of adjustment of the
776 // heap data structures means that when the memory does
777 // come back to us, we have the wrong metadata for it, either in
778 // the mspan structures or in the garbage collection bitmap.
779 // Using sysFault here means that the program will run out of
780 // memory fairly quickly in efence mode, but at least it won't
781 // have mysterious crashes due to confused memory reuse.
782 // It should be possible to switch back to sysFree if we also
783 // implement and then call some kind of mheap.deleteSpan.
784 if debug.efence > 0 {
785 s.limit = 0 // prevent mlookup from finding this span
786 sysFault(unsafe.Pointer(s.base()), size)
790 if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.Kind_&kindGCProg != 0 {
791 // In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately.
792 // Free the space for the unrolled bitmap.
794 s := spanOf(uintptr(unsafe.Pointer(s.largeType)))
795 mheap_.freeManual(s, spanAllocPtrScalarBits)
800 // Count the free in the consistent, external stats.
801 stats := memstats.heapStats.acquire()
802 atomic.Xadd64(&stats.largeFreeCount, 1)
803 atomic.Xadd64(&stats.largeFree, int64(size))
804 memstats.heapStats.release()
806 // Count the free in the inconsistent, internal stats.
807 gcController.totalFree.Add(int64(size))
812 // Add a large span directly onto the full+swept list.
813 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
818 // reportZombies reports any marked but free objects in s and throws.
820 // This generally means one of the following:
822 // 1. User code converted a pointer to a uintptr and then back
823 // unsafely, and a GC ran while the uintptr was the only reference to
826 // 2. User code (or a compiler bug) constructed a bad pointer that
827 // points to a free slot, often a past-the-end pointer.
829 // 3. The GC two cycles ago missed a pointer and freed a live object,
830 // but it was still live in the last cycle, so this GC cycle found a
831 // pointer to that object and marked it.
832 func (s *mspan) reportZombies() {
834 print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
835 mbits := s.markBitsForBase()
836 abits := s.allocBitsForIndex(0)
837 for i := uintptr(0); i < uintptr(s.nelems); i++ {
838 addr := s.base() + i*s.elemsize
840 alloc := i < uintptr(s.freeindex) || abits.isMarked()
846 if mbits.isMarked() {
851 zombie := mbits.isMarked() && !alloc
861 hexdumpWords(addr, addr+length, nil)
866 throw("found pointer to free object")
869 // deductSweepCredit deducts sweep credit for allocating a span of
870 // size spanBytes. This must be performed *before* the span is
871 // allocated to ensure the system has enough credit. If necessary, it
872 // performs sweeping to prevent going in to debt. If the caller will
873 // also sweep pages (e.g., for a large allocation), it can pass a
874 // non-zero callerSweepPages to leave that many pages unswept.
876 // deductSweepCredit makes a worst-case assumption that all spanBytes
877 // bytes of the ultimately allocated span will be available for object
880 // deductSweepCredit is the core of the "proportional sweep" system.
881 // It uses statistics gathered by the garbage collector to perform
882 // enough sweeping so that all pages are swept during the concurrent
883 // sweep phase between GC cycles.
885 // mheap_ must NOT be locked.
886 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
887 if mheap_.sweepPagesPerByte == 0 {
888 // Proportional sweep is done or disabled.
896 // Fix debt if necessary.
898 sweptBasis := mheap_.pagesSweptBasis.Load()
899 live := gcController.heapLive.Load()
900 liveBasis := mheap_.sweepHeapLiveBasis
901 newHeapLive := spanBytes
902 if liveBasis < live {
903 // Only do this subtraction when we don't overflow. Otherwise, pagesTarget
904 // might be computed as something really huge, causing us to get stuck
905 // sweeping here until the next mark phase.
907 // Overflow can happen here if gcPaceSweeper is called concurrently with
908 // sweeping (i.e. not during a STW, like it usually is) because this code
909 // is intentionally racy. A concurrent call to gcPaceSweeper can happen
910 // if a GC tuning parameter is modified and we read an older value of
911 // heapLive than what was used to set the basis.
913 // This state should be transient, so it's fine to just let newHeapLive
914 // be a relatively small number. We'll probably just skip this attempt to
918 newHeapLive += uintptr(live - liveBasis)
920 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
921 for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
922 if sweepone() == ^uintptr(0) {
923 mheap_.sweepPagesPerByte = 0
926 if mheap_.pagesSweptBasis.Load() != sweptBasis {
927 // Sweep pacing changed. Recompute debt.
937 // clobberfree sets the memory content at x to bad content, for debugging
939 func clobberfree(x unsafe.Pointer, size uintptr) {
940 // size (span.elemsize) is always a multiple of 4.
941 for i := uintptr(0); i < size; i += 4 {
942 *(*uint32)(add(x, i)) = 0xdeadbeef
946 // gcPaceSweeper updates the sweeper's pacing parameters.
948 // Must be called whenever the GC's pacing is updated.
950 // The world must be stopped, or mheap_.lock must be held.
951 func gcPaceSweeper(trigger uint64) {
952 assertWorldStoppedOrLockHeld(&mheap_.lock)
954 // Update sweep pacing.
956 mheap_.sweepPagesPerByte = 0
958 // Concurrent sweep needs to sweep all of the in-use
959 // pages by the time the allocated heap reaches the GC
960 // trigger. Compute the ratio of in-use pages to sweep
961 // per byte allocated, accounting for the fact that
962 // some might already be swept.
963 heapLiveBasis := gcController.heapLive.Load()
964 heapDistance := int64(trigger) - int64(heapLiveBasis)
965 // Add a little margin so rounding errors and
966 // concurrent sweep are less likely to leave pages
967 // unswept when GC starts.
968 heapDistance -= 1024 * 1024
969 if heapDistance < _PageSize {
970 // Avoid setting the sweep ratio extremely high
971 heapDistance = _PageSize
973 pagesSwept := mheap_.pagesSwept.Load()
974 pagesInUse := mheap_.pagesInUse.Load()
975 sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
976 if sweepDistancePages <= 0 {
977 mheap_.sweepPagesPerByte = 0
979 mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
980 mheap_.sweepHeapLiveBasis = heapLiveBasis
981 // Write pagesSweptBasis last, since this
982 // signals concurrent sweeps to recompute
984 mheap_.pagesSweptBasis.Store(pagesSwept)