1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: sweeping
7 // The sweeper consists of two different algorithms:
9 // * The object reclaimer finds and frees unmarked slots in spans. It
10 // can free a whole span if none of the objects are marked, but that
11 // isn't its goal. This can be driven either synchronously by
12 // mcentral.cacheSpan for mcentral spans, or asynchronously by
13 // sweepone, which looks at all the mcentral lists.
15 // * The span reclaimer looks for spans that contain no marked objects
16 // and frees whole spans. This is a separate algorithm because
17 // freeing whole spans is the hardest task for the object reclaimer,
18 // but is critical when allocating new spans. The entry point for
19 // this is mheap_.reclaim and it's driven by a sequential scan of
20 // the page marks bitmap in the heap arenas.
22 // Both algorithms ultimately call mspan.sweep, which sweeps a single
28 "runtime/internal/atomic"
34 // State of background sweep.
35 type sweepdata struct {
44 // centralIndex is the current unswept span class.
45 // It represents an index into the mcentral span
46 // sets. Accessed and updated via its load and
47 // update methods. Not protected by a lock.
49 // Reset at mark termination.
50 // Used by mheap.nextSpanForSweep.
51 centralIndex sweepClass
54 // sweepClass is a spanClass and one bit to represent whether we're currently
55 // sweeping partial or full spans.
56 type sweepClass uint32
59 numSweepClasses = numSpanClasses * 2
60 sweepClassDone sweepClass = sweepClass(^uint32(0))
63 func (s *sweepClass) load() sweepClass {
64 return sweepClass(atomic.Load((*uint32)(s)))
67 func (s *sweepClass) update(sNew sweepClass) {
68 // Only update *s if its current value is less than sNew,
69 // since *s increases monotonically.
71 for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
74 // TODO(mknyszek): This isn't the only place we have
75 // an atomic monotonically increasing counter. It would
76 // be nice to have an "atomic max" which is just implemented
77 // as the above on most architectures. Some architectures
78 // like RISC-V however have native support for an atomic max.
81 func (s *sweepClass) clear() {
82 atomic.Store((*uint32)(s), 0)
85 // split returns the underlying span class as well as
86 // whether we're interested in the full or partial
87 // unswept lists for that class, indicated as a boolean
88 // (true means "full").
89 func (s sweepClass) split() (spc spanClass, full bool) {
90 return spanClass(s >> 1), s&1 == 0
93 // nextSpanForSweep finds and pops the next span for sweeping from the
94 // central sweep buffers. It returns ownership of the span to the caller.
95 // Returns nil if no such span exists.
96 func (h *mheap) nextSpanForSweep() *mspan {
98 for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
99 spc, full := sc.split()
100 c := &h.central[spc].mcentral
103 s = c.fullUnswept(sg).pop()
105 s = c.partialUnswept(sg).pop()
108 // Write down that we found something so future sweepers
109 // can start from here.
110 sweep.centralIndex.update(sc)
114 // Write down that we found nothing.
115 sweep.centralIndex.update(sweepClassDone)
119 // finishsweep_m ensures that all spans are swept.
121 // The world must be stopped. This ensures there are no sweeps in
125 func finishsweep_m() {
126 // Sweeping must be complete before marking commences, so
127 // sweep any unswept spans. If this is a concurrent GC, there
128 // shouldn't be any spans left to sweep, so this should finish
129 // instantly. If GC was forced before the concurrent sweep
130 // finished, there may be spans to sweep.
131 for sweepone() != ^uintptr(0) {
135 if go115NewMCentralImpl {
136 // Reset all the unswept buffers, which should be empty.
137 // Do this in sweep termination as opposed to mark termination
138 // so that we can catch unswept spans and reclaim blocks as
140 sg := mheap_.sweepgen
141 for i := range mheap_.central {
142 c := &mheap_.central[i].mcentral
143 c.partialUnswept(sg).reset()
144 c.fullUnswept(sg).reset()
148 nextMarkBitArenaEpoch()
151 func bgsweep(c chan int) {
154 lockInit(&sweep.lock, lockRankSweep)
158 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
161 for sweepone() != ^uintptr(0) {
165 for freeSomeWbufs(true) {
170 // This can happen if a GC runs between
171 // gosweepone returning ^0 above
172 // and the lock being acquired.
177 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
181 // sweepone sweeps some unswept heap span and returns the number of pages returned
182 // to the heap, or ^uintptr(0) if there was nothing to sweep.
183 func sweepone() uintptr {
185 sweepRatio := mheap_.sweepPagesPerByte // For debugging
187 // increment locks to ensure that the goroutine is not preempted
188 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
190 if atomic.Load(&mheap_.sweepdone) != 0 {
194 atomic.Xadd(&mheap_.sweepers, +1)
196 // Find a span to sweep.
198 sg := mheap_.sweepgen
200 if go115NewMCentralImpl {
201 s = mheap_.nextSpanForSweep()
203 s = mheap_.sweepSpans[1-sg/2%2].pop()
206 atomic.Store(&mheap_.sweepdone, 1)
209 if state := s.state.get(); state != mSpanInUse {
210 // This can happen if direct sweeping already
211 // swept this span, but in that case the sweep
212 // generation should always be up-to-date.
213 if !(s.sweepgen == sg || s.sweepgen == sg+3) {
214 print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
215 throw("non in-use span in unswept list")
219 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
224 // Sweep the span we found.
225 npages := ^uintptr(0)
229 // Whole span was freed. Count it toward the
230 // page reclaimer credit since these pages can
231 // now be used for span allocation.
232 atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
234 // Span is still in-use, so this returned no
235 // pages to the heap and the span needs to
236 // move to the swept in-use list.
241 // Decrement the number of active sweepers and if this is the
242 // last one print trace information.
243 if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
244 if debug.gcpacertrace > 0 {
245 print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
252 // isSweepDone reports whether all spans are swept or currently being swept.
254 // Note that this condition may transition from false to true at any
255 // time as the sweeper runs. It may transition from true to false if a
256 // GC runs; to prevent that the caller must be non-preemptible or must
257 // somehow block GC progress.
258 func isSweepDone() bool {
259 return mheap_.sweepdone != 0
262 // Returns only when span s has been swept.
264 func (s *mspan) ensureSwept() {
265 // Caller must disable preemption.
266 // Otherwise when this function returns the span can become unswept again
267 // (if GC is triggered on another goroutine).
269 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
270 throw("mspan.ensureSwept: m is not locked")
273 sg := mheap_.sweepgen
274 spangen := atomic.Load(&s.sweepgen)
275 if spangen == sg || spangen == sg+3 {
278 // The caller must be sure that the span is a mSpanInUse span.
279 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
283 // unfortunate condition, and we don't have efficient means to wait
285 spangen := atomic.Load(&s.sweepgen)
286 if spangen == sg || spangen == sg+3 {
293 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
294 // It clears the mark bits in preparation for the next GC round.
295 // Returns true if the span was returned to heap.
296 // If preserve=true, don't return it to heap nor relink in mcentral lists;
297 // caller takes care of it.
298 func (s *mspan) sweep(preserve bool) bool {
299 if !go115NewMCentralImpl {
300 return s.oldSweep(preserve)
302 // It's critical that we enter this function with preemption disabled,
303 // GC must not start while we are in the middle of this function.
305 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
306 throw("mspan.sweep: m is not locked")
308 sweepgen := mheap_.sweepgen
309 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
310 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
311 throw("mspan.sweep: bad span state")
315 traceGCSweepSpan(s.npages * _PageSize)
318 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
323 c := _g_.m.p.ptr().mcache
325 // The allocBits indicate which unmarked objects don't need to be
326 // processed since they were free at the end of the last GC cycle
327 // and were not allocated since then.
328 // If the allocBits index is >= s.freeindex and the bit
329 // is not marked then the object remains unallocated
330 // since the last GC.
331 // This situation is analogous to being on a freelist.
333 // Unlink & free special records for any objects we're about to free.
334 // Two complications here:
335 // 1. An object can have both finalizer and profile special records.
336 // In such case we need to queue finalizer for execution,
337 // mark the object as live and preserve the profile special.
338 // 2. A tiny object can have several finalizers setup for different offsets.
339 // If such object is not marked, we need to queue all finalizers at once.
340 // Both 1 and 2 are possible at the same time.
341 hadSpecials := s.specials != nil
342 specialp := &s.specials
345 // A finalizer can be set for an inner byte of an object, find object beginning.
346 objIndex := uintptr(special.offset) / size
347 p := s.base() + objIndex*size
348 mbits := s.markBitsForIndex(objIndex)
349 if !mbits.isMarked() {
350 // This object is not marked and has at least one special record.
351 // Pass 1: see if it has at least one finalizer.
353 endOffset := p - s.base() + size
354 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
355 if tmp.kind == _KindSpecialFinalizer {
356 // Stop freeing of object if it has a finalizer.
357 mbits.setMarkedNonAtomic()
362 // Pass 2: queue all finalizers _or_ handle profile record.
363 for special != nil && uintptr(special.offset) < endOffset {
364 // Find the exact byte for which the special was setup
365 // (as opposed to object beginning).
366 p := s.base() + uintptr(special.offset)
367 if special.kind == _KindSpecialFinalizer || !hasFin {
368 // Splice out special record.
370 special = special.next
372 freespecial(y, unsafe.Pointer(p), size)
374 // This is profile record, but the object has finalizers (so kept alive).
375 // Keep special record.
376 specialp = &special.next
381 // object is still live: keep special record
382 specialp = &special.next
386 if hadSpecials && s.specials == nil {
390 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
391 // Find all newly freed objects. This doesn't have to
392 // efficient; allocfreetrace has massive overhead.
393 mbits := s.markBitsForBase()
394 abits := s.allocBitsForIndex(0)
395 for i := uintptr(0); i < s.nelems; i++ {
396 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
397 x := s.base() + i*s.elemsize
398 if debug.allocfreetrace != 0 {
399 tracefree(unsafe.Pointer(x), size)
401 if debug.clobberfree != 0 {
402 clobberfree(unsafe.Pointer(x), size)
405 racefree(unsafe.Pointer(x), size)
408 msanfree(unsafe.Pointer(x), size)
416 // Count the number of free objects in this span.
417 nalloc := uint16(s.countAlloc())
418 nfreed := s.allocCount - nalloc
419 if nalloc > s.allocCount {
420 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
421 throw("sweep increased allocation count")
424 s.allocCount = nalloc
425 s.freeindex = 0 // reset allocation index to start of span.
427 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
430 // gcmarkBits becomes the allocBits.
431 // get a fresh cleared gcmarkBits in preparation for next GC
432 s.allocBits = s.gcmarkBits
433 s.gcmarkBits = newMarkBits(s.nelems)
435 // Initialize alloc bits cache.
436 s.refillAllocCache(0)
438 // The span must be in our exclusive ownership until we update sweepgen,
439 // check for potential races.
440 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
441 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
442 throw("mspan.sweep: bad span state after sweep")
444 if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
445 throw("swept cached span")
448 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
449 // because of the potential for a concurrent free/SetFinalizer.
451 // But we need to set it before we make the span available for allocation
452 // (return it to heap or mcentral), because allocation code assumes that a
453 // span is already swept if available for allocation.
455 // Serialization point.
456 // At this point the mark bits are cleared and allocation ready
457 // to go so release the span.
458 atomic.Store(&s.sweepgen, sweepgen)
460 if spc.sizeclass() != 0 {
461 // Handle spans for small objects.
463 // Only mark the span as needing zeroing if we've freed any
464 // objects, because a fresh span that had been allocated into,
465 // wasn't totally filled, but then swept, still has all of its
466 // free slots zeroed.
468 c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
471 // The caller may not have removed this span from whatever
472 // unswept set its on but taken ownership of the span for
473 // sweeping by updating sweepgen. If this span still is in
474 // an unswept set, then the mcentral will pop it off the
475 // set, check its sweepgen, and ignore it.
477 // Free totally free span directly back to the heap.
481 // Return span back to the right mcentral list.
482 if uintptr(nalloc) == s.nelems {
483 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
485 mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
488 } else if !preserve {
489 // Handle spans for large objects.
491 // Free large object span to heap.
493 // NOTE(rsc,dvyukov): The original implementation of efence
494 // in CL 22060046 used sysFree instead of sysFault, so that
495 // the operating system would eventually give the memory
496 // back to us again, so that an efence program could run
497 // longer without running out of memory. Unfortunately,
498 // calling sysFree here without any kind of adjustment of the
499 // heap data structures means that when the memory does
500 // come back to us, we have the wrong metadata for it, either in
501 // the mspan structures or in the garbage collection bitmap.
502 // Using sysFault here means that the program will run out of
503 // memory fairly quickly in efence mode, but at least it won't
504 // have mysterious crashes due to confused memory reuse.
505 // It should be possible to switch back to sysFree if we also
506 // implement and then call some kind of mheap.deleteSpan.
507 if debug.efence > 0 {
508 s.limit = 0 // prevent mlookup from finding this span
509 sysFault(unsafe.Pointer(s.base()), size)
514 c.local_largefree += size
518 // Add a large span directly onto the full+swept list.
519 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
524 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
525 // It clears the mark bits in preparation for the next GC round.
526 // Returns true if the span was returned to heap.
527 // If preserve=true, don't return it to heap nor relink in mcentral lists;
528 // caller takes care of it.
530 // For !go115NewMCentralImpl.
531 func (s *mspan) oldSweep(preserve bool) bool {
532 // It's critical that we enter this function with preemption disabled,
533 // GC must not start while we are in the middle of this function.
535 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
536 throw("mspan.sweep: m is not locked")
538 sweepgen := mheap_.sweepgen
539 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
540 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
541 throw("mspan.sweep: bad span state")
545 traceGCSweepSpan(s.npages * _PageSize)
548 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
554 c := _g_.m.p.ptr().mcache
557 // The allocBits indicate which unmarked objects don't need to be
558 // processed since they were free at the end of the last GC cycle
559 // and were not allocated since then.
560 // If the allocBits index is >= s.freeindex and the bit
561 // is not marked then the object remains unallocated
562 // since the last GC.
563 // This situation is analogous to being on a freelist.
565 // Unlink & free special records for any objects we're about to free.
566 // Two complications here:
567 // 1. An object can have both finalizer and profile special records.
568 // In such case we need to queue finalizer for execution,
569 // mark the object as live and preserve the profile special.
570 // 2. A tiny object can have several finalizers setup for different offsets.
571 // If such object is not marked, we need to queue all finalizers at once.
572 // Both 1 and 2 are possible at the same time.
573 hadSpecials := s.specials != nil
574 specialp := &s.specials
577 // A finalizer can be set for an inner byte of an object, find object beginning.
578 objIndex := uintptr(special.offset) / size
579 p := s.base() + objIndex*size
580 mbits := s.markBitsForIndex(objIndex)
581 if !mbits.isMarked() {
582 // This object is not marked and has at least one special record.
583 // Pass 1: see if it has at least one finalizer.
585 endOffset := p - s.base() + size
586 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
587 if tmp.kind == _KindSpecialFinalizer {
588 // Stop freeing of object if it has a finalizer.
589 mbits.setMarkedNonAtomic()
594 // Pass 2: queue all finalizers _or_ handle profile record.
595 for special != nil && uintptr(special.offset) < endOffset {
596 // Find the exact byte for which the special was setup
597 // (as opposed to object beginning).
598 p := s.base() + uintptr(special.offset)
599 if special.kind == _KindSpecialFinalizer || !hasFin {
600 // Splice out special record.
602 special = special.next
604 freespecial(y, unsafe.Pointer(p), size)
606 // This is profile record, but the object has finalizers (so kept alive).
607 // Keep special record.
608 specialp = &special.next
613 // object is still live: keep special record
614 specialp = &special.next
618 if go115NewMarkrootSpans && hadSpecials && s.specials == nil {
622 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
623 // Find all newly freed objects. This doesn't have to
624 // efficient; allocfreetrace has massive overhead.
625 mbits := s.markBitsForBase()
626 abits := s.allocBitsForIndex(0)
627 for i := uintptr(0); i < s.nelems; i++ {
628 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
629 x := s.base() + i*s.elemsize
630 if debug.allocfreetrace != 0 {
631 tracefree(unsafe.Pointer(x), size)
633 if debug.clobberfree != 0 {
634 clobberfree(unsafe.Pointer(x), size)
637 racefree(unsafe.Pointer(x), size)
640 msanfree(unsafe.Pointer(x), size)
648 // Count the number of free objects in this span.
649 nalloc := uint16(s.countAlloc())
650 if spc.sizeclass() == 0 && nalloc == 0 {
654 nfreed := s.allocCount - nalloc
655 if nalloc > s.allocCount {
656 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
657 throw("sweep increased allocation count")
660 s.allocCount = nalloc
661 wasempty := s.nextFreeIndex() == s.nelems
662 s.freeindex = 0 // reset allocation index to start of span.
664 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
667 // gcmarkBits becomes the allocBits.
668 // get a fresh cleared gcmarkBits in preparation for next GC
669 s.allocBits = s.gcmarkBits
670 s.gcmarkBits = newMarkBits(s.nelems)
672 // Initialize alloc bits cache.
673 s.refillAllocCache(0)
675 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
676 // because of the potential for a concurrent free/SetFinalizer.
677 // But we need to set it before we make the span available for allocation
678 // (return it to heap or mcentral), because allocation code assumes that a
679 // span is already swept if available for allocation.
680 if freeToHeap || nfreed == 0 {
681 // The span must be in our exclusive ownership until we update sweepgen,
682 // check for potential races.
683 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
684 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
685 throw("mspan.sweep: bad span state after sweep")
687 // Serialization point.
688 // At this point the mark bits are cleared and allocation ready
689 // to go so release the span.
690 atomic.Store(&s.sweepgen, sweepgen)
693 if nfreed > 0 && spc.sizeclass() != 0 {
694 c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
695 res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
696 // mcentral.freeSpan updates sweepgen
697 } else if freeToHeap {
698 // Free large span to heap
700 // NOTE(rsc,dvyukov): The original implementation of efence
701 // in CL 22060046 used sysFree instead of sysFault, so that
702 // the operating system would eventually give the memory
703 // back to us again, so that an efence program could run
704 // longer without running out of memory. Unfortunately,
705 // calling sysFree here without any kind of adjustment of the
706 // heap data structures means that when the memory does
707 // come back to us, we have the wrong metadata for it, either in
708 // the mspan structures or in the garbage collection bitmap.
709 // Using sysFault here means that the program will run out of
710 // memory fairly quickly in efence mode, but at least it won't
711 // have mysterious crashes due to confused memory reuse.
712 // It should be possible to switch back to sysFree if we also
713 // implement and then call some kind of mheap.deleteSpan.
714 if debug.efence > 0 {
715 s.limit = 0 // prevent mlookup from finding this span
716 sysFault(unsafe.Pointer(s.base()), size)
721 c.local_largefree += size
725 // The span has been swept and is still in-use, so put
726 // it on the swept in-use list.
727 mheap_.sweepSpans[sweepgen/2%2].push(s)
732 // deductSweepCredit deducts sweep credit for allocating a span of
733 // size spanBytes. This must be performed *before* the span is
734 // allocated to ensure the system has enough credit. If necessary, it
735 // performs sweeping to prevent going in to debt. If the caller will
736 // also sweep pages (e.g., for a large allocation), it can pass a
737 // non-zero callerSweepPages to leave that many pages unswept.
739 // deductSweepCredit makes a worst-case assumption that all spanBytes
740 // bytes of the ultimately allocated span will be available for object
743 // deductSweepCredit is the core of the "proportional sweep" system.
744 // It uses statistics gathered by the garbage collector to perform
745 // enough sweeping so that all pages are swept during the concurrent
746 // sweep phase between GC cycles.
748 // mheap_ must NOT be locked.
749 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
750 if mheap_.sweepPagesPerByte == 0 {
751 // Proportional sweep is done or disabled.
760 sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
762 // Fix debt if necessary.
763 newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
764 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
765 for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
766 if sweepone() == ^uintptr(0) {
767 mheap_.sweepPagesPerByte = 0
770 if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
771 // Sweep pacing changed. Recompute debt.
781 // clobberfree sets the memory content at x to bad content, for debugging
783 func clobberfree(x unsafe.Pointer, size uintptr) {
784 // size (span.elemsize) is always a multiple of 4.
785 for i := uintptr(0); i < size; i += 4 {
786 *(*uint32)(add(x, i)) = 0xdeadbeef