1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: sweeping
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
17 // State of background sweep.
18 type sweepdata struct {
24 spanidx uint32 // background sweeper position
31 func finishsweep_m(stw bool) {
32 // Sweeping must be complete before marking commences, so
33 // sweep any unswept spans. If this is a concurrent GC, there
34 // shouldn't be any spans left to sweep, so this should finish
35 // instantly. If GC was forced before the concurrent sweep
36 // finished, there may be spans to sweep.
37 for sweepone() != ^uintptr(0) {
41 // There may be some other spans being swept concurrently that
42 // we need to wait for. If finishsweep_m is done with the world stopped
43 // this is not required because the STW must have waited for sweeps.
45 // TODO(austin): As of this writing, we always pass true for stw.
46 // Consider removing this code.
49 for _, s := range work.spans {
50 if s.sweepgen != sg && s.state == _MSpanInUse {
57 func bgsweep(c chan int) {
63 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
66 for gosweepone() != ^uintptr(0) {
72 // This can happen if a GC runs between
73 // gosweepone returning ^0 above
74 // and the lock being acquired.
79 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
84 // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
86 func sweepone() uintptr {
89 // increment locks to ensure that the goroutine is not preempted
90 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
94 idx := atomic.Xadd(&sweep.spanidx, 1) - 1
95 if idx >= uint32(len(work.spans)) {
101 if s.state != mSpanInUse {
105 if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
118 func gosweepone() uintptr {
127 func gosweepdone() bool {
128 return mheap_.sweepdone != 0
131 // Returns only when span s has been swept.
133 func (s *mspan) ensureSwept() {
134 // Caller must disable preemption.
135 // Otherwise when this function returns the span can become unswept again
136 // (if GC is triggered on another goroutine).
138 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
139 throw("MSpan_EnsureSwept: m is not locked")
142 sg := mheap_.sweepgen
143 if atomic.Load(&s.sweepgen) == sg {
146 // The caller must be sure that the span is a MSpanInUse span.
147 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
151 // unfortunate condition, and we don't have efficient means to wait
152 for atomic.Load(&s.sweepgen) != sg {
157 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
158 // It clears the mark bits in preparation for the next GC round.
159 // Returns true if the span was returned to heap.
160 // If preserve=true, don't return it to heap nor relink in MCentral lists;
161 // caller takes care of it.
162 //TODO go:nowritebarrier
163 func (s *mspan) sweep(preserve bool) bool {
164 // It's critical that we enter this function with preemption disabled,
165 // GC must not start while we are in the middle of this function.
167 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
168 throw("MSpan_Sweep: m is not locked")
170 sweepgen := mheap_.sweepgen
171 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
172 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
173 throw("MSpan_Sweep: bad span state")
180 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
187 var head, end gclinkptr
192 // Mark any free objects in this span so we don't collect them.
193 sstart := uintptr(s.start << _PageShift)
194 for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
195 if uintptr(link) < sstart || s.limit <= uintptr(link) {
196 // Free list is corrupted.
198 throw("free list corrupted")
200 heapBitsForAddr(uintptr(link)).setMarkedNonAtomic()
203 // Unlink & free special records for any objects we're about to free.
204 // Two complications here:
205 // 1. An object can have both finalizer and profile special records.
206 // In such case we need to queue finalizer for execution,
207 // mark the object as live and preserve the profile special.
208 // 2. A tiny object can have several finalizers setup for different offsets.
209 // If such object is not marked, we need to queue all finalizers at once.
210 // Both 1 and 2 are possible at the same time.
211 specialp := &s.specials
214 // A finalizer can be set for an inner byte of an object, find object beginning.
215 p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
216 hbits := heapBitsForAddr(p)
217 if !hbits.isMarked() {
218 // This object is not marked and has at least one special record.
219 // Pass 1: see if it has at least one finalizer.
221 endOffset := p - uintptr(s.start<<_PageShift) + size
222 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
223 if tmp.kind == _KindSpecialFinalizer {
224 // Stop freeing of object if it has a finalizer.
225 hbits.setMarkedNonAtomic()
230 // Pass 2: queue all finalizers _or_ handle profile record.
231 for special != nil && uintptr(special.offset) < endOffset {
232 // Find the exact byte for which the special was setup
233 // (as opposed to object beginning).
234 p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
235 if special.kind == _KindSpecialFinalizer || !hasFin {
236 // Splice out special record.
238 special = special.next
240 freespecial(y, unsafe.Pointer(p), size)
242 // This is profile record, but the object has finalizers (so kept alive).
243 // Keep special record.
244 specialp = &special.next
249 // object is still live: keep special record
250 specialp = &special.next
255 // Sweep through n objects of given size starting at p.
256 // This thread owns the span now, so it can manipulate
257 // the block bitmap without atomic operations.
259 size, n, _ := s.layout()
260 heapBitsSweepSpan(s.base(), size, n, func(p uintptr) {
261 // At this point we know that we are looking at garbage object
262 // that needs to be collected.
263 if debug.allocfreetrace != 0 {
264 tracefree(unsafe.Pointer(p), size)
267 msanfree(unsafe.Pointer(p), size)
270 // Reset to allocated+noscan.
274 throw("can't preserve large span")
276 heapBitsForSpan(p).initSpan(s.layout())
279 // Free the span after heapBitsSweepSpan
280 // returns, since it's not done with the span.
283 // Free small object.
284 if size > 2*sys.PtrSize {
285 *(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
286 } else if size > sys.PtrSize {
287 *(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = 0
289 if head.ptr() == nil {
292 end.ptr().next = gclinkptr(p)
295 end.ptr().next = gclinkptr(0x0bade5)
300 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
301 // because of the potential for a concurrent free/SetFinalizer.
302 // But we need to set it before we make the span available for allocation
303 // (return it to heap or mcentral), because allocation code assumes that a
304 // span is already swept if available for allocation.
305 if freeToHeap || nfree == 0 {
306 // The span must be in our exclusive ownership until we update sweepgen,
307 // check for potential races.
308 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
309 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
310 throw("MSpan_Sweep: bad span state after sweep")
312 atomic.Store(&s.sweepgen, sweepgen)
315 c.local_nsmallfree[cl] += uintptr(nfree)
316 res = mheap_.central[cl].mcentral.freeSpan(s, int32(nfree), head, end, preserve)
317 // MCentral_FreeSpan updates sweepgen
318 } else if freeToHeap {
319 // Free large span to heap
321 // NOTE(rsc,dvyukov): The original implementation of efence
322 // in CL 22060046 used SysFree instead of SysFault, so that
323 // the operating system would eventually give the memory
324 // back to us again, so that an efence program could run
325 // longer without running out of memory. Unfortunately,
326 // calling SysFree here without any kind of adjustment of the
327 // heap data structures means that when the memory does
328 // come back to us, we have the wrong metadata for it, either in
329 // the MSpan structures or in the garbage collection bitmap.
330 // Using SysFault here means that the program will run out of
331 // memory fairly quickly in efence mode, but at least it won't
332 // have mysterious crashes due to confused memory reuse.
333 // It should be possible to switch back to SysFree if we also
334 // implement and then call some kind of MHeap_DeleteSpan.
335 if debug.efence > 0 {
336 s.limit = 0 // prevent mlookup from finding this span
337 sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
339 mheap_.freeSpan(s, 1)
342 c.local_largefree += size
351 // deductSweepCredit deducts sweep credit for allocating a span of
352 // size spanBytes. This must be performed *before* the span is
353 // allocated to ensure the system has enough credit. If necessary, it
354 // performs sweeping to prevent going in to debt. If the caller will
355 // also sweep pages (e.g., for a large allocation), it can pass a
356 // non-zero callerSweepPages to leave that many pages unswept.
358 // deductSweepCredit makes a worst-case assumption that all spanBytes
359 // bytes of the ultimately allocated span will be available for object
360 // allocation. The caller should call reimburseSweepCredit if that
361 // turns out not to be the case once the span is allocated.
363 // deductSweepCredit is the core of the "proportional sweep" system.
364 // It uses statistics gathered by the garbage collector to perform
365 // enough sweeping so that all pages are swept during the concurrent
366 // sweep phase between GC cycles.
368 // mheap_ must NOT be locked.
369 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
370 if mheap_.sweepPagesPerByte == 0 {
371 // Proportional sweep is done or disabled.
375 // Account for this span allocation.
376 spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
378 // Fix debt if necessary.
379 pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
380 for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
381 if gosweepone() == ^uintptr(0) {
382 mheap_.sweepPagesPerByte = 0
388 // reimburseSweepCredit records that unusableBytes bytes of a
389 // just-allocated span are not available for object allocation. This
390 // offsets the worst-case charge performed by deductSweepCredit.
391 func reimburseSweepCredit(unusableBytes uintptr) {
392 if mheap_.sweepPagesPerByte == 0 {
393 // Nobody cares about the credit. Avoid the atomic.
396 atomic.Xadd64(&mheap_.spanBytesAlloc, -int64(unusableBytes))
399 func dumpFreeList(s *mspan) {
401 print("runtime: free list of span ", s, ":\n")
402 sstart := uintptr(s.start << _PageShift)
404 for i := 0; i < int(s.npages*_PageSize/s.elemsize); i++ {
409 if link.ptr() == nil {
412 if uintptr(link) < sstart || s.limit <= uintptr(link) {
413 // Bad link. Stop walking before we crash.
417 link = link.ptr().next