package runtime
import (
+ "internal/goexperiment"
"runtime/internal/atomic"
"unsafe"
)
// State of background sweep.
type sweepdata struct {
- lock mutex
- g *g
- parked bool
- started bool
+ lock mutex
+ g *g
+ parked bool
- nbgsweep uint32
- npausesweep uint32
+ // active tracks outstanding sweepers and the sweep
+ // termination condition.
+ active activeSweep
// centralIndex is the current unswept span class.
// It represents an index into the mcentral span
return nil
}
+const sweepDrainedMask = 1 << 31
+
+// activeSweep is a type that captures whether sweeping
+// is done, and whether there are any outstanding sweepers.
+//
+// Every potential sweeper must call begin() before they look
+// for work, and end() after they've finished sweeping.
+type activeSweep struct {
+ // state is divided into two parts.
+ //
+ // The top bit (masked by sweepDrainedMask) is a boolean
+ // value indicating whether all the sweep work has been
+ // drained from the queue.
+ //
+ // The rest of the bits are a counter, indicating the
+ // number of outstanding concurrent sweepers.
+ state atomic.Uint32
+}
+
+// begin registers a new sweeper. Returns a sweepLocker
+// for acquiring spans for sweeping. Any outstanding sweeper blocks
+// sweep termination.
+//
+// If the sweepLocker is invalid, the caller can be sure that all
+// outstanding sweep work has been drained, so there is nothing left
+// to sweep. Note that there may be sweepers currently running, so
+// this does not indicate that all sweeping has completed.
+//
+// Even if the sweepLocker is invalid, its sweepGen is always valid.
+func (a *activeSweep) begin() sweepLocker {
+ for {
+ state := a.state.Load()
+ if state&sweepDrainedMask != 0 {
+ return sweepLocker{mheap_.sweepgen, false}
+ }
+ if a.state.CompareAndSwap(state, state+1) {
+ return sweepLocker{mheap_.sweepgen, true}
+ }
+ }
+}
+
+// end deregisters a sweeper. Must be called once for each time
+// begin is called if the sweepLocker is valid.
+func (a *activeSweep) end(sl sweepLocker) {
+ if sl.sweepGen != mheap_.sweepgen {
+ throw("sweeper left outstanding across sweep generations")
+ }
+ for {
+ state := a.state.Load()
+ if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
+ throw("mismatched begin/end of activeSweep")
+ }
+ if a.state.CompareAndSwap(state, state-1) {
+ if state != sweepDrainedMask {
+ return
+ }
+ if debug.gcpacertrace > 0 {
+ live := gcController.heapLive.Load()
+ print("pacer: sweep done at heap size ", live>>20, "MB; allocated ", (live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
+ }
+ return
+ }
+ }
+}
+
+// markDrained marks the active sweep cycle as having drained
+// all remaining work. This is safe to be called concurrently
+// with all other methods of activeSweep, though may race.
+//
+// Returns true if this call was the one that actually performed
+// the mark.
+func (a *activeSweep) markDrained() bool {
+ for {
+ state := a.state.Load()
+ if state&sweepDrainedMask != 0 {
+ return false
+ }
+ if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
+ return true
+ }
+ }
+}
+
+// sweepers returns the current number of active sweepers.
+func (a *activeSweep) sweepers() uint32 {
+ return a.state.Load() &^ sweepDrainedMask
+}
+
+// isDone returns true if all sweep work has been drained and no more
+// outstanding sweepers exist. That is, when the sweep phase is
+// completely done.
+func (a *activeSweep) isDone() bool {
+ return a.state.Load() == sweepDrainedMask
+}
+
+// reset sets up the activeSweep for the next sweep cycle.
+//
+// The world must be stopped.
+func (a *activeSweep) reset() {
+ assertWorldStopped()
+ a.state.Store(0)
+}
+
// finishsweep_m ensures that all spans are swept.
//
// The world must be stopped. This ensures there are no sweeps in
// instantly. If GC was forced before the concurrent sweep
// finished, there may be spans to sweep.
for sweepone() != ^uintptr(0) {
- sweep.npausesweep++
+ }
+
+ // Make sure there aren't any outstanding sweepers left.
+ // At this point, with the world stopped, it means one of two
+ // things. Either we were able to preempt a sweeper, or that
+ // a sweeper didn't call sweep.active.end when it should have.
+ // Both cases indicate a bug, so throw.
+ if sweep.active.sweepers() != 0 {
+ throw("active sweepers found at start of mark phase")
}
// Reset all the unswept buffers, which should be empty.
c.fullUnswept(sg).reset()
}
- // Sweeping is done, so if the scavenger isn't already awake,
- // wake it up. There's definitely work for it to do at this
- // point.
- wakeScavenger()
+ // Sweeping is done, so there won't be any new memory to
+ // scavenge for a bit.
+ //
+ // If the scavenger isn't already awake, wake it up. There's
+ // definitely work for it to do at this point.
+ scavenger.wake()
nextMarkBitArenaEpoch()
}
lock(&sweep.lock)
sweep.parked = true
c <- 1
- goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
+ goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
for {
+ // bgsweep attempts to be a "low priority" goroutine by intentionally
+ // yielding time. It's OK if it doesn't run, because goroutines allocating
+ // memory will sweep and ensure that all spans are swept before the next
+ // GC cycle. We really only want to run when we're idle.
+ //
+ // However, calling Gosched after each span swept produces a tremendous
+ // amount of tracing events, sometimes up to 50% of events in a trace. It's
+ // also inefficient to call into the scheduler so much because sweeping a
+ // single span is in general a very fast operation, taking as little as 30 ns
+ // on modern hardware. (See #54767.)
+ //
+ // As a result, bgsweep sweeps in batches, and only calls into the scheduler
+ // at the end of every batch. Furthermore, it only yields its time if there
+ // isn't spare idle time available on other cores. If there's available idle
+ // time, helping to sweep can reduce allocation latencies by getting ahead of
+ // the proportional sweeper and having spans ready to go for allocation.
+ const sweepBatchSize = 10
+ nSwept := 0
for sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
- Gosched()
+ nSwept++
+ if nSwept%sweepBatchSize == 0 {
+ goschedIfBusy()
+ }
}
for freeSomeWbufs(true) {
- Gosched()
+ // N.B. freeSomeWbufs is already batched internally.
+ goschedIfBusy()
}
lock(&sweep.lock)
if !isSweepDone() {
continue
}
sweep.parked = true
- goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
+ goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
}
}
-// sweepLocker acquires sweep ownership of spans and blocks sweep
-// completion.
+// sweepLocker acquires sweep ownership of spans.
type sweepLocker struct {
// sweepGen is the sweep generation of the heap.
sweepGen uint32
- // blocking indicates that this tracker is blocking sweep
- // completion, usually as a result of acquiring sweep
- // ownership of at least one span.
- blocking bool
+ valid bool
}
// sweepLocked represents sweep ownership of a span.
*mspan
}
-func newSweepLocker() sweepLocker {
- return sweepLocker{
- sweepGen: mheap_.sweepgen,
- }
-}
-
// tryAcquire attempts to acquire sweep ownership of span s. If it
// successfully acquires ownership, it blocks sweep completion.
func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
+ if !l.valid {
+ throw("use of invalid sweepLocker")
+ }
// Check before attempting to CAS.
if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
return sweepLocked{}, false
}
- // Add ourselves to sweepers before potentially taking
- // ownership.
- l.blockCompletion()
// Attempt to acquire sweep ownership of s.
if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
return sweepLocked{}, false
return sweepLocked{s}, true
}
-// blockCompletion blocks sweep completion without acquiring any
-// specific spans.
-func (l *sweepLocker) blockCompletion() {
- if !l.blocking {
- atomic.Xadd(&mheap_.sweepers, +1)
- l.blocking = true
- }
-}
-
-func (l *sweepLocker) dispose() {
- if !l.blocking {
- return
- }
- // Decrement the number of active sweepers and if this is the
- // last one, mark sweep as complete.
- l.blocking = false
- if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
- l.sweepIsDone()
- }
-}
-
-func (l *sweepLocker) sweepIsDone() {
- if debug.gcpacertrace > 0 {
- print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
- }
-}
-
// sweepone sweeps some unswept heap span and returns the number of pages returned
// to the heap, or ^uintptr(0) if there was nothing to sweep.
func sweepone() uintptr {
- _g_ := getg()
+ gp := getg()
- // increment locks to ensure that the goroutine is not preempted
+ // Increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
- _g_.m.locks++
- if atomic.Load(&mheap_.sweepDrained) != 0 {
- _g_.m.locks--
- return ^uintptr(0)
- }
+ gp.m.locks++
+
// TODO(austin): sweepone is almost always called in a loop;
// lift the sweepLocker into its callers.
- sl := newSweepLocker()
+ sl := sweep.active.begin()
+ if !sl.valid {
+ gp.m.locks--
+ return ^uintptr(0)
+ }
// Find a span to sweep.
npages := ^uintptr(0)
for {
s := mheap_.nextSpanForSweep()
if s == nil {
- noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
+ noMoreWork = sweep.active.markDrained()
break
}
if state := s.state.get(); state != mSpanInUse {
break
}
}
-
- sl.dispose()
+ sweep.active.end(sl)
if noMoreWork {
// The sweep list is empty. There may still be
// concurrent sweeps running, but we're at least very
// close to done sweeping.
- // Move the scavenge gen forward (signalling
+ // Move the scavenge gen forward (signaling
// that there's new work to do) and wake the scavenger.
//
// The scavenger is signaled by the last sweeper because once
// sweeping is done, we will definitely have useful work for
// the scavenger to do, since the scavenger only runs over the
- // heap once per GC cyle. This update is not done during sweep
+ // heap once per GC cycle. This update is not done during sweep
// termination because in some cases there may be a long delay
// between sweep done and sweep termination (e.g. not enough
// allocations to trigger a GC) which would be nice to fill in
// with scavenging work.
- systemstack(func() {
- lock(&mheap_.lock)
- mheap_.pages.scavengeStartGen()
- unlock(&mheap_.lock)
- })
- // Since we might sweep in an allocation path, it's not possible
- // for us to wake the scavenger directly via wakeScavenger, since
- // it could allocate. Ask sysmon to do it for us instead.
- readyForScavenger()
+ if debug.scavtrace > 0 {
+ systemstack(func() {
+ lock(&mheap_.lock)
+
+ // Get released stats.
+ releasedBg := mheap_.pages.scav.releasedBg.Load()
+ releasedEager := mheap_.pages.scav.releasedEager.Load()
+
+ // Print the line.
+ printScavTrace(releasedBg, releasedEager, false)
+
+ // Update the stats.
+ mheap_.pages.scav.releasedBg.Add(-releasedBg)
+ mheap_.pages.scav.releasedEager.Add(-releasedEager)
+ unlock(&mheap_.lock)
+ })
+ }
+ scavenger.ready()
}
- _g_.m.locks--
+ gp.m.locks--
return npages
}
// GC runs; to prevent that the caller must be non-preemptible or must
// somehow block GC progress.
func isSweepDone() bool {
- // Check that all spans have at least begun sweeping and there
- // are no active sweepers. If both are true, then all spans
- // have finished sweeping.
- return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
+ return sweep.active.isDone()
}
// Returns only when span s has been swept.
+//
//go:nowritebarrier
func (s *mspan) ensureSwept() {
// Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again
// (if GC is triggered on another goroutine).
- _g_ := getg()
- if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ gp := getg()
+ if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
throw("mspan.ensureSwept: m is not locked")
}
- sl := newSweepLocker()
- // The caller must be sure that the span is a mSpanInUse span.
- if s, ok := sl.tryAcquire(s); ok {
- s.sweep(false)
- sl.dispose()
- return
+ // If this operation fails, then that means that there are
+ // no more spans to be swept. In this case, either s has already
+ // been swept, or is about to be acquired for sweeping and swept.
+ sl := sweep.active.begin()
+ if sl.valid {
+ // The caller must be sure that the span is a mSpanInUse span.
+ if s, ok := sl.tryAcquire(s); ok {
+ s.sweep(false)
+ sweep.active.end(sl)
+ return
+ }
+ sweep.active.end(sl)
}
- sl.dispose()
- // unfortunate condition, and we don't have efficient means to wait
+ // Unfortunately we can't sweep the span ourselves. Somebody else
+ // got to it first. We don't have efficient means to wait, but that's
+ // OK, it will be swept fairly soon.
for {
spangen := atomic.Load(&s.sweepgen)
if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
}
}
-// Sweep frees or collects finalizers for blocks not marked in the mark phase.
+// sweep frees or collects finalizers for blocks not marked in the mark phase.
// It clears the mark bits in preparation for the next GC round.
// Returns true if the span was returned to heap.
// If preserve=true, don't return it to heap nor relink in mcentral lists;
func (sl *sweepLocked) sweep(preserve bool) bool {
// It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function.
- _g_ := getg()
- if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ gp := getg()
+ if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
throw("mspan.sweep: m is not locked")
}
throw("mspan.sweep: bad span state")
}
- if trace.enabled {
- traceGCSweepSpan(s.npages * _PageSize)
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCSweepSpan(s.npages * _PageSize)
+ traceRelease(trace)
}
mheap_.pagesSwept.Add(int64(s.npages))
spanHasNoSpecials(s)
}
- if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
+ if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
// Find all newly freed objects. This doesn't have to
// efficient; allocfreetrace has massive overhead.
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
- for i := uintptr(0); i < s.nelems; i++ {
- if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
+ for i := uintptr(0); i < uintptr(s.nelems); i++ {
+ if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) {
x := s.base() + i*s.elemsize
if debug.allocfreetrace != 0 {
tracefree(unsafe.Pointer(x), size)
if debug.clobberfree != 0 {
clobberfree(unsafe.Pointer(x), size)
}
- if raceenabled {
+ // User arenas are handled on explicit free.
+ if raceenabled && !s.isUserArenaChunk {
racefree(unsafe.Pointer(x), size)
}
- if msanenabled {
+ if msanenabled && !s.isUserArenaChunk {
msanfree(unsafe.Pointer(x), size)
}
+ if asanenabled && !s.isUserArenaChunk {
+ asanpoison(unsafe.Pointer(x), size)
+ }
}
mbits.advance()
abits.advance()
//
// Check the first bitmap byte, where we have to be
// careful with freeindex.
- obj := s.freeindex
+ obj := uintptr(s.freeindex)
if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
s.reportZombies()
}
// Check remaining bytes.
- for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
+ for i := obj/8 + 1; i < divRoundUp(uintptr(s.nelems), 8); i++ {
if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
s.reportZombies()
}
s.allocCount = nalloc
s.freeindex = 0 // reset allocation index to start of span.
- if trace.enabled {
- getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
+ s.freeIndexForScan = 0
+ if traceEnabled() {
+ getg().m.p.ptr().trace.reclaimed += uintptr(nfreed) * s.elemsize
}
// gcmarkBits becomes the allocBits.
// get a fresh cleared gcmarkBits in preparation for next GC
s.allocBits = s.gcmarkBits
- s.gcmarkBits = newMarkBits(s.nelems)
+ s.gcmarkBits = newMarkBits(uintptr(s.nelems))
+
+ // refresh pinnerBits if they exists
+ if s.pinnerBits != nil {
+ s.refreshPinnerBits()
+ }
// Initialize alloc bits cache.
s.refillAllocCache(0)
// to go so release the span.
atomic.Store(&s.sweepgen, sweepgen)
+ if s.isUserArenaChunk {
+ if preserve {
+ // This is a case that should never be handled by a sweeper that
+ // preserves the span for reuse.
+ throw("sweep: tried to preserve a user arena span")
+ }
+ if nalloc > 0 {
+ // There still exist pointers into the span or the span hasn't been
+ // freed yet. It's not ready to be reused. Put it back on the
+ // full swept list for the next cycle.
+ mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
+ return false
+ }
+
+ // It's only at this point that the sweeper doesn't actually need to look
+ // at this arena anymore, so subtract from pagesInUse now.
+ mheap_.pagesInUse.Add(-s.npages)
+ s.state.set(mSpanDead)
+
+ // The arena is ready to be recycled. Remove it from the quarantine list
+ // and place it on the ready list. Don't add it back to any sweep lists.
+ systemstack(func() {
+ // It's the arena code's responsibility to get the chunk on the quarantine
+ // list by the time all references to the chunk are gone.
+ if s.list != &mheap_.userArena.quarantineList {
+ throw("user arena span is on the wrong list")
+ }
+ lock(&mheap_.lock)
+ mheap_.userArena.quarantineList.remove(s)
+ mheap_.userArena.readyList.insert(s)
+ unlock(&mheap_.lock)
+ })
+ return false
+ }
+
if spc.sizeclass() != 0 {
// Handle spans for small objects.
if nfreed > 0 {
// free slots zeroed.
s.needzero = 1
stats := memstats.heapStats.acquire()
- atomic.Xadduintptr(&stats.smallFreeCount[spc.sizeclass()], uintptr(nfreed))
+ atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
memstats.heapStats.release()
+
+ // Count the frees in the inconsistent, internal stats.
+ gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))
}
if !preserve {
// The caller may not have removed this span from whatever
return true
}
// Return span back to the right mcentral list.
- if uintptr(nalloc) == s.nelems {
+ if nalloc == s.nelems {
mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
} else {
mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
} else {
mheap_.freeSpan(s)
}
+ if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.Kind_&kindGCProg != 0 {
+ // In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately.
+ // Free the space for the unrolled bitmap.
+ systemstack(func() {
+ s := spanOf(uintptr(unsafe.Pointer(s.largeType)))
+ mheap_.freeManual(s, spanAllocPtrScalarBits)
+ })
+ s.largeType = nil
+ }
+
+ // Count the free in the consistent, external stats.
stats := memstats.heapStats.acquire()
- atomic.Xadduintptr(&stats.largeFreeCount, 1)
- atomic.Xadduintptr(&stats.largeFree, size)
+ atomic.Xadd64(&stats.largeFreeCount, 1)
+ atomic.Xadd64(&stats.largeFree, int64(size))
memstats.heapStats.release()
+
+ // Count the free in the inconsistent, internal stats.
+ gcController.totalFree.Add(int64(size))
+
return true
}
print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
- for i := uintptr(0); i < s.nelems; i++ {
+ for i := uintptr(0); i < uintptr(s.nelems); i++ {
addr := s.base() + i*s.elemsize
print(hex(addr))
- alloc := i < s.freeindex || abits.isMarked()
+ alloc := i < uintptr(s.freeindex) || abits.isMarked()
if alloc {
print(" alloc")
} else {
return
}
- if trace.enabled {
- traceGCSweepStart()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCSweepStart()
+ traceRelease(trace)
}
+ // Fix debt if necessary.
retry:
sweptBasis := mheap_.pagesSweptBasis.Load()
-
- // Fix debt if necessary.
- newHeapLive := uintptr(atomic.Load64(&gcController.heapLive)-mheap_.sweepHeapLiveBasis) + spanBytes
+ live := gcController.heapLive.Load()
+ liveBasis := mheap_.sweepHeapLiveBasis
+ newHeapLive := spanBytes
+ if liveBasis < live {
+ // Only do this subtraction when we don't overflow. Otherwise, pagesTarget
+ // might be computed as something really huge, causing us to get stuck
+ // sweeping here until the next mark phase.
+ //
+ // Overflow can happen here if gcPaceSweeper is called concurrently with
+ // sweeping (i.e. not during a STW, like it usually is) because this code
+ // is intentionally racy. A concurrent call to gcPaceSweeper can happen
+ // if a GC tuning parameter is modified and we read an older value of
+ // heapLive than what was used to set the basis.
+ //
+ // This state should be transient, so it's fine to just let newHeapLive
+ // be a relatively small number. We'll probably just skip this attempt to
+ // sweep.
+ //
+ // See issue #57523.
+ newHeapLive += uintptr(live - liveBasis)
+ }
pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
if sweepone() == ^uintptr(0) {
}
}
- if trace.enabled {
- traceGCSweepDone()
+ trace = traceAcquire()
+ if trace.ok() {
+ trace.GCSweepDone()
+ traceRelease(trace)
}
}
*(*uint32)(add(x, i)) = 0xdeadbeef
}
}
+
+// gcPaceSweeper updates the sweeper's pacing parameters.
+//
+// Must be called whenever the GC's pacing is updated.
+//
+// The world must be stopped, or mheap_.lock must be held.
+func gcPaceSweeper(trigger uint64) {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+
+ // Update sweep pacing.
+ if isSweepDone() {
+ mheap_.sweepPagesPerByte = 0
+ } else {
+ // Concurrent sweep needs to sweep all of the in-use
+ // pages by the time the allocated heap reaches the GC
+ // trigger. Compute the ratio of in-use pages to sweep
+ // per byte allocated, accounting for the fact that
+ // some might already be swept.
+ heapLiveBasis := gcController.heapLive.Load()
+ heapDistance := int64(trigger) - int64(heapLiveBasis)
+ // Add a little margin so rounding errors and
+ // concurrent sweep are less likely to leave pages
+ // unswept when GC starts.
+ heapDistance -= 1024 * 1024
+ if heapDistance < _PageSize {
+ // Avoid setting the sweep ratio extremely high
+ heapDistance = _PageSize
+ }
+ pagesSwept := mheap_.pagesSwept.Load()
+ pagesInUse := mheap_.pagesInUse.Load()
+ sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
+ if sweepDistancePages <= 0 {
+ mheap_.sweepPagesPerByte = 0
+ } else {
+ mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
+ mheap_.sweepHeapLiveBasis = heapLiveBasis
+ // Write pagesSweptBasis last, since this
+ // signals concurrent sweeps to recompute
+ // their debt.
+ mheap_.pagesSweptBasis.Store(pagesSwept)
+ }
+ }
+}