1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: sweeping
10 "runtime/internal/atomic"
16 // State of background sweep.
17 type sweepdata struct {
23 spanidx uint32 // background sweeper position
30 func finishsweep_m(stw bool) {
31 // Sweeping must be complete before marking commences, so
32 // sweep any unswept spans. If this is a concurrent GC, there
33 // shouldn't be any spans left to sweep, so this should finish
34 // instantly. If GC was forced before the concurrent sweep
35 // finished, there may be spans to sweep.
36 for sweepone() != ^uintptr(0) {
40 // There may be some other spans being swept concurrently that
41 // we need to wait for. If finishsweep_m is done with the world stopped
42 // this is not required because the STW must have waited for sweeps.
44 // TODO(austin): As of this writing, we always pass true for stw.
45 // Consider removing this code.
48 for _, s := range work.spans {
49 if s.sweepgen != sg && s.state == _MSpanInUse {
56 func bgsweep(c chan int) {
62 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
65 for gosweepone() != ^uintptr(0) {
71 // This can happen if a GC runs between
72 // gosweepone returning ^0 above
73 // and the lock being acquired.
78 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
83 // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
85 func sweepone() uintptr {
88 // increment locks to ensure that the goroutine is not preempted
89 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
93 idx := atomic.Xadd(&sweep.spanidx, 1) - 1
94 if idx >= uint32(len(work.spans)) {
100 if s.state != mSpanInUse {
104 if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
117 func gosweepone() uintptr {
126 func gosweepdone() bool {
127 return mheap_.sweepdone != 0
130 // Returns only when span s has been swept.
132 func (s *mspan) ensureSwept() {
133 // Caller must disable preemption.
134 // Otherwise when this function returns the span can become unswept again
135 // (if GC is triggered on another goroutine).
137 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
138 throw("MSpan_EnsureSwept: m is not locked")
141 sg := mheap_.sweepgen
142 if atomic.Load(&s.sweepgen) == sg {
145 // The caller must be sure that the span is a MSpanInUse span.
146 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
150 // unfortunate condition, and we don't have efficient means to wait
151 for atomic.Load(&s.sweepgen) != sg {
156 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
157 // It clears the mark bits in preparation for the next GC round.
158 // Returns true if the span was returned to heap.
159 // If preserve=true, don't return it to heap nor relink in MCentral lists;
160 // caller takes care of it.
161 //TODO go:nowritebarrier
162 func (s *mspan) sweep(preserve bool) bool {
163 // It's critical that we enter this function with preemption disabled,
164 // GC must not start while we are in the middle of this function.
166 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
167 throw("MSpan_Sweep: m is not locked")
169 sweepgen := mheap_.sweepgen
170 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
171 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
172 throw("MSpan_Sweep: bad span state")
179 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
186 var head, end gclinkptr
191 // Mark any free objects in this span so we don't collect them.
192 sstart := uintptr(s.start << _PageShift)
193 for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
194 if uintptr(link) < sstart || s.limit <= uintptr(link) {
195 // Free list is corrupted.
197 throw("free list corrupted")
199 heapBitsForAddr(uintptr(link)).setMarkedNonAtomic()
202 // Unlink & free special records for any objects we're about to free.
203 // Two complications here:
204 // 1. An object can have both finalizer and profile special records.
205 // In such case we need to queue finalizer for execution,
206 // mark the object as live and preserve the profile special.
207 // 2. A tiny object can have several finalizers setup for different offsets.
208 // If such object is not marked, we need to queue all finalizers at once.
209 // Both 1 and 2 are possible at the same time.
210 specialp := &s.specials
213 // A finalizer can be set for an inner byte of an object, find object beginning.
214 p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
215 hbits := heapBitsForAddr(p)
216 if !hbits.isMarked() {
217 // This object is not marked and has at least one special record.
218 // Pass 1: see if it has at least one finalizer.
220 endOffset := p - uintptr(s.start<<_PageShift) + size
221 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
222 if tmp.kind == _KindSpecialFinalizer {
223 // Stop freeing of object if it has a finalizer.
224 hbits.setMarkedNonAtomic()
229 // Pass 2: queue all finalizers _or_ handle profile record.
230 for special != nil && uintptr(special.offset) < endOffset {
231 // Find the exact byte for which the special was setup
232 // (as opposed to object beginning).
233 p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
234 if special.kind == _KindSpecialFinalizer || !hasFin {
235 // Splice out special record.
237 special = special.next
239 freespecial(y, unsafe.Pointer(p), size)
241 // This is profile record, but the object has finalizers (so kept alive).
242 // Keep special record.
243 specialp = &special.next
248 // object is still live: keep special record
249 specialp = &special.next
254 // Sweep through n objects of given size starting at p.
255 // This thread owns the span now, so it can manipulate
256 // the block bitmap without atomic operations.
258 size, n, _ := s.layout()
259 heapBitsSweepSpan(s.base(), size, n, func(p uintptr) {
260 // At this point we know that we are looking at garbage object
261 // that needs to be collected.
262 if debug.allocfreetrace != 0 {
263 tracefree(unsafe.Pointer(p), size)
266 msanfree(unsafe.Pointer(p), size)
269 // Reset to allocated+noscan.
273 throw("can't preserve large span")
275 heapBitsForSpan(p).initSpan(s.layout())
278 // Free the span after heapBitsSweepSpan
279 // returns, since it's not done with the span.
282 // Free small object.
283 if size > 2*ptrSize {
284 *(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
285 } else if size > ptrSize {
286 *(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
288 if head.ptr() == nil {
291 end.ptr().next = gclinkptr(p)
294 end.ptr().next = gclinkptr(0x0bade5)
299 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
300 // because of the potential for a concurrent free/SetFinalizer.
301 // But we need to set it before we make the span available for allocation
302 // (return it to heap or mcentral), because allocation code assumes that a
303 // span is already swept if available for allocation.
304 if freeToHeap || nfree == 0 {
305 // The span must be in our exclusive ownership until we update sweepgen,
306 // check for potential races.
307 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
308 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
309 throw("MSpan_Sweep: bad span state after sweep")
311 atomic.Store(&s.sweepgen, sweepgen)
314 c.local_nsmallfree[cl] += uintptr(nfree)
315 res = mheap_.central[cl].mcentral.freeSpan(s, int32(nfree), head, end, preserve)
316 // MCentral_FreeSpan updates sweepgen
317 } else if freeToHeap {
318 // Free large span to heap
320 // NOTE(rsc,dvyukov): The original implementation of efence
321 // in CL 22060046 used SysFree instead of SysFault, so that
322 // the operating system would eventually give the memory
323 // back to us again, so that an efence program could run
324 // longer without running out of memory. Unfortunately,
325 // calling SysFree here without any kind of adjustment of the
326 // heap data structures means that when the memory does
327 // come back to us, we have the wrong metadata for it, either in
328 // the MSpan structures or in the garbage collection bitmap.
329 // Using SysFault here means that the program will run out of
330 // memory fairly quickly in efence mode, but at least it won't
331 // have mysterious crashes due to confused memory reuse.
332 // It should be possible to switch back to SysFree if we also
333 // implement and then call some kind of MHeap_DeleteSpan.
334 if debug.efence > 0 {
335 s.limit = 0 // prevent mlookup from finding this span
336 sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
338 mheap_.freeSpan(s, 1)
341 c.local_largefree += size
350 // deductSweepCredit deducts sweep credit for allocating a span of
351 // size spanBytes. This must be performed *before* the span is
352 // allocated to ensure the system has enough credit. If necessary, it
353 // performs sweeping to prevent going in to debt. If the caller will
354 // also sweep pages (e.g., for a large allocation), it can pass a
355 // non-zero callerSweepPages to leave that many pages unswept.
357 // deductSweepCredit makes a worst-case assumption that all spanBytes
358 // bytes of the ultimately allocated span will be available for object
359 // allocation. The caller should call reimburseSweepCredit if that
360 // turns out not to be the case once the span is allocated.
362 // deductSweepCredit is the core of the "proportional sweep" system.
363 // It uses statistics gathered by the garbage collector to perform
364 // enough sweeping so that all pages are swept during the concurrent
365 // sweep phase between GC cycles.
367 // mheap_ must NOT be locked.
368 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
369 if mheap_.sweepPagesPerByte == 0 {
370 // Proportional sweep is done or disabled.
374 // Account for this span allocation.
375 spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
377 // Fix debt if necessary.
378 pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
379 for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
380 if gosweepone() == ^uintptr(0) {
381 mheap_.sweepPagesPerByte = 0
387 // reimburseSweepCredit records that unusableBytes bytes of a
388 // just-allocated span are not available for object allocation. This
389 // offsets the worst-case charge performed by deductSweepCredit.
390 func reimburseSweepCredit(unusableBytes uintptr) {
391 if mheap_.sweepPagesPerByte == 0 {
392 // Nobody cares about the credit. Avoid the atomic.
395 atomic.Xadd64(&mheap_.spanBytesAlloc, -int64(unusableBytes))
398 func dumpFreeList(s *mspan) {
400 print("runtime: free list of span ", s, ":\n")
401 sstart := uintptr(s.start << _PageShift)
403 for i := 0; i < int(s.npages*_PageSize/s.elemsize); i++ {
408 if link.ptr() == nil {
411 if uintptr(link) < sstart || s.limit <= uintptr(link) {
412 // Bad link. Stop walking before we crash.
416 link = link.ptr().next