1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector (GC).
7 // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple GC
8 // thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
9 // non-generational and non-compacting. Allocation is done using size segregated per P allocation
10 // areas to minimize fragmentation while eliminating locks in the common case.
12 // The algorithm decomposes into several steps.
13 // This is a high level description of the algorithm being used. For an overview of GC a good
14 // place to start is Richard Jones' gchandbook.org.
16 // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
17 // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
18 // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 966-975.
19 // For journal quality proofs that these steps are complete, correct, and terminate see
20 // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
21 // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
23 // 0. Set phase = GCscan from GCoff.
24 // 1. Wait for all P's to acknowledge phase change.
25 // At this point all goroutines have passed through a GC safepoint and
26 // know we are in the GCscan phase.
27 // 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
28 // (marking avoids most duplicate enqueuing but races may produce duplication which is benign).
29 // Preempted goroutines are scanned before P schedules next goroutine.
30 // 3. Set phase = GCmark.
31 // 4. Wait for all P's to acknowledge phase change.
32 // 5. Now write barrier marks and enqueues black, grey, or white to white pointers.
33 // Malloc still allocates white (non-marked) objects.
34 // 6. Meanwhile GC transitively walks the heap marking reachable objects.
35 // 7. When GC finishes marking heap, it preempts P's one-by-one and
36 // retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
37 // currently scheduled on the P).
38 // 8. Once the GC has exhausted all available marking work it sets phase = marktermination.
39 // 9. Wait for all P's to acknowledge phase change.
40 // 10. Malloc now allocates black objects, so number of unmarked reachable objects
41 // monotonically decreases.
42 // 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet reachable objects.
43 // 12. When GC completes a full cycle over P's and discovers no new grey
44 // objects, (which means all reachable objects are marked) set phase = GCsweep.
45 // 13. Wait for all P's to acknowledge phase change.
46 // 14. Now malloc allocates white (but sweeps spans before use).
47 // Write barrier becomes nop.
48 // 15. GC does background sweeping, see description below.
49 // 16. When sweeping is complete set phase to GCoff.
50 // 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
51 // see discussion of GC rate below.
54 // Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
55 // All phase action must be benign in the presence of a change.
56 // Starting with GCoff
58 // GSscan scans stacks and globals greying them and never marks an object black.
59 // Once all the P's are aware of the new phase they will scan gs on preemption.
60 // This means that the scanning of preempted gs can't start until all the Ps
63 // GCMark turns on the write barrier which also only greys objects. No scanning
64 // of objects (making them black) can happen until all the Ps have acknowledged
66 // GCmark to GCmarktermination
67 // The only change here is that we start allocating black so the Ps must acknowledge
68 // the change before we begin the termination algorithm
69 // GCmarktermination to GSsweep
70 // Object currently on the freelist must be marked black for this to work.
71 // Are things on the free lists black or white? How does the sweep phase work?
74 // The sweep phase proceeds concurrently with normal program execution.
75 // The heap is swept span-by-span both lazily (when a goroutine needs another span)
76 // and concurrently in a background goroutine (this helps programs that are not CPU bound).
77 // However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
78 // and so next_gc calculation is tricky and happens as follows.
79 // At the end of the stop-the-world phase next_gc is conservatively set based on total
80 // heap size; all spans are marked as "needs sweeping".
81 // Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
82 // The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
83 // closer to the target value. However, this is not enough to avoid over-allocating memory.
84 // Consider that a goroutine wants to allocate a new span for a large object and
85 // there are no free swept spans, but there are small-object unswept spans.
86 // If the goroutine naively allocates a new span, it can surpass the yet-unknown
87 // target next_gc value. In order to prevent such cases (1) when a goroutine needs
88 // to allocate a new small-object span, it sweeps small-object spans for the same
89 // object size until it frees at least one object; (2) when a goroutine needs to
90 // allocate large-object span from heap, it sweeps spans until it frees at least
91 // that many pages into heap. Together these two measures ensure that we don't surpass
92 // target next_gc value by a large margin. There is an exception: if a goroutine sweeps
93 // and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
94 // but there can still be other one-page unswept spans which could be combined into a two-page span.
95 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
96 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
97 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
98 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
99 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
100 // The finalizer goroutine is kicked off only when all spans are swept.
101 // When the next GC starts, it sweeps all not-yet-swept spans (if any).
104 // Next GC is after we've allocated an extra amount of memory proportional to
105 // the amount already in use. The proportion is controlled by GOGC environment variable
106 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
107 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
108 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
109 // (and also the amount of extra memory used).
112 #include "arch_GOARCH.h"
119 #include "typekind.h"
120 #include "funcdata.h"
121 #include "textflag.h"
127 FinBlockSize = 4*1024,
136 // ptrmask for an allocation containing a single pointer.
137 static byte oneptr[] = {BitsPointer};
139 // Initialized from $GOGC. GOGC=off means no GC.
140 extern int32 runtime·gcpercent;
142 // Holding worldsema grants an M the right to try to stop the world.
145 // runtime·semacquire(&runtime·worldsema);
147 // runtime·stoptheworld();
152 // runtime·semrelease(&runtime·worldsema);
153 // runtime·starttheworld();
155 uint32 runtime·worldsema = 1;
157 // It is a bug if bits does not have bitBoundary set but
158 // there are still some cases where this happens related
160 typedef struct Markbits Markbits;
162 byte *bitp; // pointer to the byte holding xbits
163 byte shift; // bits xbits needs to be shifted to get bits
164 byte xbits; // byte holding all the bits from *bitp
165 byte bits; // mark and boundary bits relevant to corresponding slot.
166 byte tbits; // pointer||scalar bits relevant to corresponding slot.
169 extern byte runtime·data[];
170 extern byte runtime·edata[];
171 extern byte runtime·bss[];
172 extern byte runtime·ebss[];
174 extern byte runtime·gcdata[];
175 extern byte runtime·gcbss[];
177 Mutex runtime·finlock; // protects the following variables
178 G* runtime·fing; // goroutine that runs finalizers
179 FinBlock* runtime·finq; // list of finalizers that are to be executed
180 FinBlock* runtime·finc; // cache of free blocks
181 static byte finptrmask[FinBlockSize/PtrSize/PointersPerByte];
182 bool runtime·fingwait;
183 bool runtime·fingwake;
184 FinBlock *runtime·allfin; // list of all blocks
186 BitVector runtime·gcdatamask;
187 BitVector runtime·gcbssmask;
189 Mutex runtime·gclock;
191 static Workbuf* getpartialorempty(void);
192 static void putpartial(Workbuf*);
193 static Workbuf* getempty(Workbuf*);
194 static Workbuf* getfull(Workbuf*);
195 static void putempty(Workbuf*);
196 static void putfull(Workbuf*);
197 static Workbuf* handoff(Workbuf*);
198 static void gchelperstart(void);
199 static void flushallmcaches(void);
200 static bool scanframe(Stkframe*, void*);
201 static void scanstack(G*);
202 static BitVector unrollglobgcprog(byte*, uintptr);
203 static void scanblock(byte*, uintptr, byte*);
204 static byte* objectstart(byte*, Markbits*);
205 static Workbuf* greyobject(byte*, Markbits*, Workbuf*);
206 static bool inheap(byte*);
207 static bool shaded(byte*);
208 static void shade(byte*);
209 static void slottombits(byte*, Markbits*);
210 static void atomicxor8(byte*, byte);
211 static bool ischeckmarked(Markbits*);
212 static bool ismarked(Markbits*);
213 static void clearcheckmarkbits(void);
214 static void clearcheckmarkbitsspan(MSpan*);
216 void runtime·bgsweep(void);
217 void runtime·finishsweep_m(void);
218 static FuncVal bgsweepv = {runtime·bgsweep};
220 typedef struct WorkData WorkData;
222 uint64 full; // lock-free list of full blocks
223 uint64 empty; // lock-free list of empty blocks
224 uint64 partial; // lock-free list of partially filled blocks
225 byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
228 volatile uint32 nwait;
229 volatile uint32 ndone;
233 // Copy of mheap.allspans for marker or sweeper.
237 WorkData runtime·work;
239 // To help debug the concurrent GC we remark with the world
240 // stopped ensuring that any object encountered has their normal
241 // mark bit set. To do this we use an orthogonal bit
242 // pattern to indicate the object is marked. The following pattern
243 // uses the upper two bits in the object's bounday nibble.
244 // 01: scalar not marked
245 // 10: pointer not marked
246 // 11: pointer marked
248 // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
249 // The higher bit is 1 for pointers and 0 for scalars, whether the object
251 // The first nibble no longer holds the bitsDead pattern indicating that the
252 // there are no more pointers in the object. This information is held
253 // in the second nibble.
255 // When marking an object if the bool checkmark is true one uses the above
256 // encoding, otherwise one uses the bitMarked bit in the lower two bits
258 static bool checkmark = false;
259 static bool gccheckmarkenable = true;
261 // Is address b in the known heap. If it doesn't have a valid gcmap
262 // returns false. For example pointers into stacks will return false.
270 if(b == nil || b < runtime·mheap.arena_start || b >= runtime·mheap.arena_used)
272 // Not a beginning of a block, consult span table to find the block beginning.
273 k = (uintptr)b>>PageShift;
275 x -= (uintptr)runtime·mheap.arena_start>>PageShift;
276 s = runtime·mheap.spans[x];
277 if(s == nil || k < s->start || b >= s->limit || s->state != MSpanInUse)
282 // Given an address in the heap return the relevant byte from the gcmap. This routine
283 // can be used on addresses to the start of an object or to the interior of the an object.
285 slottombits(byte *obj, Markbits *mbits)
289 off = (uintptr*)((uintptr)obj&~(PtrSize-1)) - (uintptr*)runtime·mheap.arena_start;
290 mbits->bitp = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
291 mbits->shift = (off % wordsPerBitmapByte) * gcBits;
292 mbits->xbits = *mbits->bitp;
293 mbits->bits = (mbits->xbits >> mbits->shift) & bitMask;
294 mbits->tbits = ((mbits->xbits >> mbits->shift) & bitPtrMask) >> 2;
297 // b is a pointer into the heap.
298 // Find the start of the object refered to by b.
299 // Set mbits to the associated bits from the bit map.
300 // If b is not a valid heap object return nil and
301 // undefined values in mbits.
303 objectstart(byte *b, Markbits *mbits)
308 uintptr x, size, idx;
310 obj = (byte*)((uintptr)b&~(PtrSize-1));
312 slottombits(obj, mbits);
313 if((mbits->bits&bitBoundary) == bitBoundary)
316 // Not a beginning of a block, consult span table to find the block beginning.
317 k = (uintptr)obj>>PageShift;
319 x -= (uintptr)runtime·mheap.arena_start>>PageShift;
320 s = runtime·mheap.spans[x];
321 if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse){
322 if(s != nil && s->state == MSpanStack) {
323 return nil; // This is legit.
326 // The following ensures that we are rigorous about what data
327 // structures hold valid pointers
329 // Still happens sometimes. We don't know why.
330 runtime·printf("runtime:objectstart Span weird: obj=%p, k=%p", obj, k);
332 runtime·printf(" s=nil\n");
334 runtime·printf(" s->start=%p s->limit=%p, s->state=%d\n", s->start*PageSize, s->limit, s->state);
335 runtime·throw("objectstart: bad pointer in unexpected span");
339 p = (byte*)((uintptr)s->start<<PageShift);
340 if(s->sizeclass != 0) {
342 idx = ((byte*)obj - p)/size;
346 runtime·printf("runtime: failed to find block beginning for %p s=%p s->limit=%p\n",
347 p, s->start*PageSize, s->limit);
348 runtime·throw("failed to find block beginning");
352 // if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit
353 // Clear any low bits to get to the start of the object.
354 // greyobject depends on this.
358 // Slow for now as we serialize this, since this is on a debug path
359 // speed is not critical at this point.
360 static Mutex andlock;
362 atomicand8(byte *src, byte val)
364 runtime·lock(&andlock);
366 runtime·unlock(&andlock);
369 // Mark using the checkmark scheme.
371 docheckmark(Markbits *mbits)
373 // xor 01 moves 01(scalar unmarked) to 00(scalar marked)
374 // and 10(pointer unmarked) to 11(pointer marked)
375 if(mbits->tbits == BitsScalar)
376 atomicand8(mbits->bitp, ~(byte)(BitsCheckMarkXor<<mbits->shift<<2));
377 else if(mbits->tbits == BitsPointer)
378 runtime·atomicor8(mbits->bitp, BitsCheckMarkXor<<mbits->shift<<2);
380 // reload bits for ischeckmarked
381 mbits->xbits = *mbits->bitp;
382 mbits->bits = (mbits->xbits >> mbits->shift) & bitMask;
383 mbits->tbits = ((mbits->xbits >> mbits->shift) & bitPtrMask) >> 2;
388 // In the default scheme does mbits refer to a marked object.
390 ismarked(Markbits *mbits)
392 if((mbits->bits&bitBoundary) != bitBoundary)
393 runtime·throw("ismarked: bits should have boundary bit set");
394 return (mbits->bits&bitMarked) == bitMarked;
397 // In the checkmark scheme does mbits refer to a marked object.
399 ischeckmarked(Markbits *mbits)
401 if((mbits->bits&bitBoundary) != bitBoundary)
402 runtime·printf("runtime:ischeckmarked: bits should have boundary bit set\n");
403 return mbits->tbits==BitsScalarMarked || mbits->tbits==BitsPointerMarked;
406 // When in GCmarkterminate phase we allocate black.
408 runtime·gcmarknewobject_m(void)
413 if(runtime·gcphase != GCmarktermination)
414 runtime·throw("marking new object while not in mark termination phase");
415 if(checkmark) // The world should be stopped so this should not happen.
416 runtime·throw("gcmarknewobject called while doing checkmark");
418 obj = g->m->ptrarg[0];
419 slottombits((byte*)((uintptr)obj & (PtrSize-1)), &mbits);
421 if((mbits.bits&bitMarked) != 0)
424 // Each byte of GC bitmap holds info for two words.
425 // If the current object is larger than two words, or if the object is one word
426 // but the object it shares the byte with is already marked,
427 // then all the possible concurrent updates are trying to set the same bit,
428 // so we can use a non-atomic update.
429 if((mbits.xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1)
430 *mbits.bitp = mbits.xbits | (bitMarked<<mbits.shift);
432 runtime·atomicor8(mbits.bitp, bitMarked<<mbits.shift);
436 // obj is the start of an object with mark mbits.
437 // If it isn't already marked, mark it and enqueue into workbuf.
438 // Return possibly new workbuf to use.
440 greyobject(byte *obj, Markbits *mbits, Workbuf *wbuf)
442 // obj should be start of allocation, and so must be at least pointer-aligned.
443 if(((uintptr)obj & (PtrSize-1)) != 0)
444 runtime·throw("greyobject: obj not pointer-aligned");
447 if(!ismarked(mbits)) {
452 runtime·printf("runtime:greyobject: checkmarks finds unexpected unmarked object obj=%p, mbits->bits=%x, *mbits->bitp=%x\n", obj, mbits->bits, *mbits->bitp);
454 k = (uintptr)obj>>PageShift;
456 x -= (uintptr)runtime·mheap.arena_start>>PageShift;
457 s = runtime·mheap.spans[x];
458 runtime·printf("runtime:greyobject Span: obj=%p, k=%p", obj, k);
460 runtime·printf(" s=nil\n");
462 runtime·printf(" s->start=%p s->limit=%p, s->state=%d, s->sizeclass=%d, s->elemsize=%D \n", s->start*PageSize, s->limit, s->state, s->sizeclass, s->elemsize);
463 for(i=0; i<s->sizeclass; i++) {
464 runtime·printf(" ((uintptr*)obj)[%D]=%p\n", i, ((uintptr*)obj)[i]);
467 runtime·throw("checkmark found unmarked object");
469 if(ischeckmarked(mbits))
472 if(!ischeckmarked(mbits)) {
473 runtime·printf("mbits xbits=%x bits=%x tbits=%x shift=%d\n", mbits->xbits, mbits->bits, mbits->tbits, mbits->shift);
474 runtime·throw("docheckmark and ischeckmarked disagree");
477 // If marked we have nothing to do.
478 if((mbits->bits&bitMarked) != 0)
481 // Each byte of GC bitmap holds info for two words.
482 // If the current object is larger than two words, or if the object is one word
483 // but the object it shares the byte with is already marked,
484 // then all the possible concurrent updates are trying to set the same bit,
485 // so we can use a non-atomic update.
486 if((mbits->xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1)
487 *mbits->bitp = mbits->xbits | (bitMarked<<mbits->shift);
489 runtime·atomicor8(mbits->bitp, bitMarked<<mbits->shift);
492 if (!checkmark && (((mbits->xbits>>(mbits->shift+2))&BitsMask) == BitsDead))
493 return wbuf; // noscan object
495 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
496 // seems like a nice optimization that can be added back in.
497 // There needs to be time between the PREFETCH and the use.
498 // Previously we put the obj in an 8 element buffer that is drained at a rate
499 // to give the PREFETCH time to do its work.
500 // Use of PREFETCHNTA might be more appropriate than PREFETCH
502 // If workbuf is full, obtain an empty one.
503 if(wbuf->nobj >= nelem(wbuf->obj)) {
504 wbuf = getempty(wbuf);
507 wbuf->obj[wbuf->nobj] = obj;
512 // Scan the object b of size n, adding pointers to wbuf.
513 // Return possibly new wbuf to use.
514 // If ptrmask != nil, it specifies where pointers are in b.
515 // If ptrmask == nil, the GC bitmap should be consulted.
516 // In this case, n may be an overestimate of the size; the GC bitmap
517 // must also be used to make sure the scan stops at the end of b.
519 scanobject(byte *b, uintptr n, byte *ptrmask, Workbuf *wbuf)
521 byte *obj, *arena_start, *arena_used, *ptrbitp;
526 arena_start = (byte*)runtime·mheap.arena_start;
527 arena_used = runtime·mheap.arena_used;
530 // Find bits of the beginning of the object.
532 b = objectstart(b, &mbits);
535 ptrbitp = mbits.bitp; //arena_start - off/wordsPerBitmapByte - 1;
537 for(i = 0; i < n; i += PtrSize) {
538 // Find bits for this word.
540 // dense mask (stack or data)
541 bits = (ptrmask[(i/PtrSize)/4]>>(((i/PtrSize)%4)*BitsPerPointer))&BitsMask;
543 // Check if we have reached end of span.
544 // n is an overestimate of the size of the object.
545 if((((uintptr)b+i)%PageSize) == 0 &&
546 runtime·mheap.spans[(b-arena_start)>>PageShift] != runtime·mheap.spans[(b+i-arena_start)>>PageShift])
548 // Consult GC bitmap.
550 if(wordsPerBitmapByte != 2)
551 runtime·throw("alg doesn't work for wordsPerBitmapByte != 2");
552 j = ((uintptr)b+i)/PtrSize & 1; // j indicates upper nibble or lower nibble
555 bits &= ~bitBoundary;
558 if((bits&bitBoundary) != 0 && i != 0)
559 break; // reached beginning of the next object
560 bits = (bits&bitPtrMask)>>2; // bits refer to the type bits.
562 if(i != 0 && bits == BitsDead) // BitsDead in first nibble not valid during checkmark
563 break; // reached no-scan part of the object
566 if(bits <= BitsScalar) // Bits Scalar ||
567 // BitsDead || // default encoding
568 // BitsScalarMarked // checkmark encoding
571 if((bits&BitsPointer) != BitsPointer) {
572 runtime·printf("gc checkmark=%d, b=%p ptrmask=%p, mbits.bitp=%p, mbits.xbits=%x, bits=%x\n", checkmark, b, ptrmask, mbits.bitp, mbits.xbits, bits);
573 runtime·throw("unexpected garbage collection bits");
576 obj = *(byte**)(b+i);
577 // At this point we have extracted the next potential pointer.
578 // Check if it points into heap.
579 if(obj == nil || obj < arena_start || obj >= arena_used)
581 // Mark the object. return some important bits.
582 // We we combine the following two rotines we don't have to pass mbits or obj around.
583 obj = objectstart(obj, &mbits);
584 // In the case of the span being MSpan_Stack mbits is useless and will not have
585 // the boundary bit set. It does not need to be greyed since it will be
586 // scanned using the scan stack mechanism.
589 wbuf = greyobject(obj, &mbits, wbuf);
594 // scanblock starts by scanning b as scanobject would.
595 // If the gcphase is GCscan, that's all scanblock does.
596 // Otherwise it traverses some fraction of the pointers it found in b, recursively.
597 // As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
598 // stopping only when no work is left in the system.
600 scanblock(byte *b, uintptr n, byte *ptrmask)
605 wbuf = getpartialorempty();
607 wbuf = scanobject(b, n, ptrmask, wbuf);
608 if(runtime·gcphase == GCscan) {
609 if(inheap(b) && !ptrmask)
610 // b is in heap, we are in GCscan so there should be a ptrmask.
611 runtime·throw("scanblock: In GCscan phase and inheap is true.");
612 // GCscan only goes one level deep since mark wb not turned on.
617 if(runtime·gcphase == GCscan) {
618 runtime·throw("scanblock: In GCscan phase but no b passed in.");
621 keepworking = b == nil;
623 // ptrmask can have 2 possible values:
624 // 1. nil - obtain pointer mask from GC bitmap.
625 // 2. pointer to a compact mask (for stacks and data).
627 if(wbuf->nobj == 0) {
632 // Refill workbuf from global queue.
633 wbuf = getfull(wbuf);
634 if(wbuf == nil) // nil means out of work barrier reached
638 runtime·throw("runtime:scanblock getfull returns empty buffer");
643 // If another proc wants a pointer, give it some.
644 if(runtime·work.nwait > 0 && wbuf->nobj > 4 && runtime·work.full == 0) {
645 wbuf = handoff(wbuf);
648 // This might be a good place to add prefetch code...
649 // if(wbuf->nobj > 4) {
650 // PREFETCH(wbuf->obj[wbuf->nobj - 3];
653 b = wbuf->obj[wbuf->nobj];
654 wbuf = scanobject(b, runtime·mheap.arena_used - b, nil, wbuf);
659 markroot(ParFor *desc, uint32 i)
670 // Note: if you add a case here, please also update heapdump.c:dumproots.
673 scanblock(runtime·data, runtime·edata - runtime·data, runtime·gcdatamask.bytedata);
677 scanblock(runtime·bss, runtime·ebss - runtime·bss, runtime·gcbssmask.bytedata);
681 for(fb=runtime·allfin; fb; fb=fb->alllink)
682 scanblock((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), finptrmask);
686 // mark MSpan.specials
687 sg = runtime·mheap.sweepgen;
688 for(spanidx=0; spanidx<runtime·work.nspan; spanidx++) {
690 SpecialFinalizer *spf;
692 s = runtime·work.spans[spanidx];
693 if(s->state != MSpanInUse)
695 if(!checkmark && s->sweepgen != sg) {
696 // sweepgen was updated (+2) during non-checkmark GC pass
697 runtime·printf("sweep %d %d\n", s->sweepgen, sg);
698 runtime·throw("gc: unswept span");
700 for(sp = s->specials; sp != nil; sp = sp->next) {
701 if(sp->kind != KindSpecialFinalizer)
703 // don't mark finalized object, but scan it so we
704 // retain everything it points to.
705 spf = (SpecialFinalizer*)sp;
706 // A finalizer can be set for an inner byte of an object, find object beginning.
707 p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
708 if(runtime·gcphase != GCscan)
709 scanblock(p, s->elemsize, nil); // Scanned during mark phase
710 scanblock((void*)&spf->fn, PtrSize, oneptr);
715 case RootFlushCaches:
716 if (runtime·gcphase != GCscan) // Do not flush mcaches during GCscan phase.
721 // the rest is scanning goroutine stacks
722 if(i - RootCount >= runtime·allglen)
723 runtime·throw("markroot: bad index");
724 gp = runtime·allg[i - RootCount];
725 // remember when we've first observed the G blocked
726 // needed only to output in traceback
727 status = runtime·readgstatus(gp); // We are not in a scan state
728 if((status == Gwaiting || status == Gsyscall) && gp->waitsince == 0)
729 gp->waitsince = runtime·work.tstart;
730 // Shrink a stack if not much of it is being used but not in the scan phase.
731 if (runtime·gcphase != GCscan) // Do not shrink during GCscan phase.
732 runtime·shrinkstack(gp);
733 if(runtime·readgstatus(gp) == Gdead)
734 gp->gcworkdone = true;
736 gp->gcworkdone = false;
737 restart = runtime·stopg(gp);
739 // goroutine will scan its own stack when it stops running.
740 // Wait until it has.
741 while(runtime·readgstatus(gp) == Grunning && !gp->gcworkdone) {
744 // scanstack(gp) is done as part of gcphasework
745 // But to make sure we finished we need to make sure that
746 // the stack traps have all responded so drop into
747 // this while loop until they respond.
748 while(!gp->gcworkdone){
749 status = runtime·readgstatus(gp);
750 if(status == Gdead) {
751 gp->gcworkdone = true; // scan is a noop
753 //do nothing, scan not needed.
755 if(status == Gwaiting || status == Grunnable)
756 restart = runtime·stopg(gp);
759 runtime·restartg(gp);
764 // Get an empty work buffer off the work.empty list,
765 // allocating new buffers as needed.
773 if(runtime·work.empty)
774 b = (Workbuf*)runtime·lfstackpop(&runtime·work.empty);
776 if(b && b->nobj != 0) {
777 runtime·printf("m%d: getempty: popped b=%p with non-zero b->nobj=%d\n", g->m->id, b, (uint32)b->nobj);
778 runtime·throw("getempty: workbuffer not empty, b->nobj not 0");
781 b = runtime·persistentalloc(sizeof(*b), CacheLineSize, &mstats.gc_sys);
791 runtime·throw("putempty: b->nobj not 0\n");
793 runtime·lfstackpush(&runtime·work.empty, &b->node);
796 // Put a full or partially full workbuf on the full list.
801 runtime·throw("putfull: b->nobj <= 0\n");
803 runtime·lfstackpush(&runtime·work.full, &b->node);
806 // Get an partially empty work buffer
807 // if none are available get an empty one.
809 getpartialorempty(void)
813 b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
820 putpartial(Workbuf *b)
824 runtime·lfstackpush(&runtime·work.empty, &b->node);
825 else if (b->nobj < nelem(b->obj))
826 runtime·lfstackpush(&runtime·work.partial, &b->node);
827 else if (b->nobj == nelem(b->obj))
828 runtime·lfstackpush(&runtime·work.full, &b->node);
830 runtime·printf("b=%p, b->nobj=%d, nelem(b->obj)=%d\n", b, (uint32)b->nobj, (uint32)nelem(b->obj));
831 runtime·throw("putpartial: bad Workbuf b->nobj");
835 // Get a full work buffer off the work.full or a partially
836 // filled one off the work.partial list. If nothing is available
837 // wait until all the other gc helpers have finished and then
839 // getfull acts as a barrier for work.nproc helpers. As long as one
840 // gchelper is actively marking objects it
841 // may create a workbuffer that the other helpers can work on.
842 // The for loop either exits when a work buffer is found
843 // or when _all_ of the work.nproc GC helpers are in the loop
844 // looking for work and thus not capable of creating new work.
845 // This is in fact the termination condition for the STW mark
855 b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
857 b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
858 if(b != nil || runtime·work.nproc == 1)
861 runtime·xadd(&runtime·work.nwait, +1);
863 if(runtime·work.full != 0) {
864 runtime·xadd(&runtime·work.nwait, -1);
865 b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
867 b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
870 runtime·xadd(&runtime·work.nwait, +1);
872 if(runtime·work.nwait == runtime·work.nproc)
875 g->m->gcstats.nprocyield++;
876 runtime·procyield(20);
878 g->m->gcstats.nosyield++;
881 g->m->gcstats.nsleep++;
893 // Make new buffer with half of b's pointers.
898 runtime·memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]);
899 g->m->gcstats.nhandoff++;
900 g->m->gcstats.nhandoffcnt += n;
902 // Put b on full list - let first half of b get stolen.
903 runtime·lfstackpush(&runtime·work.full, &b->node);
908 runtime·stackmapdata(StackMap *stackmap, int32 n)
910 if(n < 0 || n >= stackmap->n)
911 runtime·throw("stackmapdata: index out of range");
912 return (BitVector){stackmap->nbit, stackmap->bytedata + n*((stackmap->nbit+31)/32*4)};
915 // Scan a stack frame: local variables and function arguments/results.
917 scanframe(Stkframe *frame, void *unused)
922 uintptr size, minsize;
928 targetpc = frame->continpc;
934 runtime·printf("scanframe %s\n", runtime·funcname(f));
935 if(targetpc != f->entry)
937 pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
939 // We do not have a valid pcdata value but there might be a
940 // stackmap for this function. It is likely that we are looking
941 // at the function prologue, assume so and hope for the best.
945 // Scan local variables if stack frame has been allocated.
946 size = frame->varp - frame->sp;
947 if(thechar != '6' && thechar != '8')
948 minsize = sizeof(uintptr);
952 stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
953 if(stackmap == nil || stackmap->n <= 0) {
954 runtime·printf("runtime: frame %s untyped locals %p+%p\n", runtime·funcname(f), (byte*)(frame->varp-size), size);
955 runtime·throw("missing stackmap");
958 // Locals bitmap information, scan just the pointers in locals.
959 if(pcdata < 0 || pcdata >= stackmap->n) {
960 // don't know where we are
961 runtime·printf("runtime: pcdata is %d and %d locals stack map entries for %s (targetpc=%p)\n",
962 pcdata, stackmap->n, runtime·funcname(f), targetpc);
963 runtime·throw("scanframe: bad symbol table");
965 bv = runtime·stackmapdata(stackmap, pcdata);
966 size = (bv.n * PtrSize) / BitsPerPointer;
967 scanblock((byte*)(frame->varp - size), bv.n/BitsPerPointer*PtrSize, bv.bytedata);
971 if(frame->arglen > 0) {
972 if(frame->argmap != nil)
975 stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
976 if(stackmap == nil || stackmap->n <= 0) {
977 runtime·printf("runtime: frame %s untyped args %p+%p\n", runtime·funcname(f), frame->argp, (uintptr)frame->arglen);
978 runtime·throw("missing stackmap");
980 if(pcdata < 0 || pcdata >= stackmap->n) {
981 // don't know where we are
982 runtime·printf("runtime: pcdata is %d and %d args stack map entries for %s (targetpc=%p)\n",
983 pcdata, stackmap->n, runtime·funcname(f), targetpc);
984 runtime·throw("scanframe: bad symbol table");
986 bv = runtime·stackmapdata(stackmap, pcdata);
988 scanblock((byte*)frame->argp, bv.n/BitsPerPointer*PtrSize, bv.bytedata);
997 bool (*fn)(Stkframe*, void*);
999 if(runtime·readgstatus(gp)&Gscan == 0) {
1000 runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp->goid, runtime·readgstatus(gp));
1001 runtime·throw("mark - bad status");
1004 switch(runtime·readgstatus(gp)&~Gscan) {
1006 runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp->goid, runtime·readgstatus(gp));
1007 runtime·throw("mark - bad status");
1011 runtime·throw("scanstack: - goroutine not stopped");
1019 runtime·throw("can't scan our own stack");
1020 if((mp = gp->m) != nil && mp->helpgc)
1021 runtime·throw("can't scan gchelper stack");
1024 runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &fn, nil, 0);
1025 runtime·tracebackdefers(gp, &fn, nil);
1028 // If the slot is grey or black return true, if white return false.
1029 // If the slot is not in the known heap and thus does not have a valid GC bitmap then
1030 // it is considered grey. Globals and stacks can hold such slots.
1031 // The slot is grey if its mark bit is set and it is enqueued to be scanned.
1032 // The slot is black if it has already been scanned.
1033 // It is white if it has a valid mark bit and the bit is not set.
1040 if(!inheap(slot)) // non-heap slots considered grey
1043 valid = objectstart(slot, &mbits);
1048 return ischeckmarked(&mbits);
1050 return (mbits.bits&bitMarked) != 0;
1053 // Shade the object if it isn't already.
1054 // The object is not nil and known to be in the heap.
1063 runtime·throw("shade: passed an address not in the heap");
1065 wbuf = getpartialorempty();
1066 // Mark the object, return some important bits.
1067 // If we combine the following two rotines we don't have to pass mbits or obj around.
1068 obj = objectstart(b, &mbits);
1070 wbuf = greyobject(obj, &mbits, wbuf); // augments the wbuf
1076 // This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
1077 // The original Dijkstra barrier only shaded ptrs being placed in black slots.
1079 // Shade indicates that it has seen a white pointer by adding the referent
1080 // to wbuf as well as marking it.
1082 // slot is the destination (dst) in go code
1083 // ptr is the value that goes into the slot (src) in the go code
1085 // Dijkstra pointed out that maintaining the no black to white
1086 // pointers means that white to white pointers not need
1087 // to be noted by the write barrier. Furthermore if either
1088 // white object dies before it is reached by the
1089 // GC then the object can be collected during this GC cycle
1090 // instead of waiting for the next cycle. Unfortunately the cost of
1091 // ensure that the object holding the slot doesn't concurrently
1092 // change to black without the mutator noticing seems prohibitive.
1094 // Consider the following example where the mutator writes into
1095 // a slot and then loads the slot's mark bit while the GC thread
1096 // writes to the slot's mark bit and then as part of scanning reads
1099 // Initially both [slot] and [slotmark] are 0 (nil)
1100 // Mutator thread GC thread
1101 // st [slot], ptr st [slotmark], 1
1103 // ld r1, [slotmark] ld r2, [slot]
1105 // This is a classic example of independent reads of independent writes,
1106 // aka IRIW. The question is if r1==r2==0 is allowed and for most HW the
1107 // answer is yes without inserting a memory barriers between the st and the ld.
1108 // These barriers are expensive so we have decided that we will
1109 // always grey the ptr object regardless of the slot's color.
1112 runtime·gcmarkwb_m()
1115 ptr = (byte*)g->m->scalararg[1];
1117 switch(runtime·gcphase) {
1119 runtime·throw("gcphasework in bad gcphase");
1127 if(ptr != nil && inheap(ptr))
1130 case GCmarktermination:
1131 if(ptr != nil && inheap(ptr))
1137 // The gp has been moved to a GC safepoint. GC phase specific
1138 // work is done here.
1140 runtime·gcphasework(G *gp)
1142 switch(runtime·gcphase) {
1144 runtime·throw("gcphasework in bad gcphase");
1152 // scan the stack, mark the objects, put pointers in work buffers
1153 // hanging off the P where this is being run.
1158 case GCmarktermination:
1160 // All available mark work will be emptied before returning.
1163 gp->gcworkdone = true;
1166 #pragma dataflag NOPTR
1167 static byte finalizer1[] = {
1168 // Each Finalizer is 5 words, ptr ptr uintptr ptr ptr.
1169 // Each byte describes 4 words.
1170 // Need 4 Finalizers described by 5 bytes before pattern repeats:
1171 // ptr ptr uintptr ptr ptr
1172 // ptr ptr uintptr ptr ptr
1173 // ptr ptr uintptr ptr ptr
1174 // ptr ptr uintptr ptr ptr
1176 // ptr ptr uintptr ptr
1177 // ptr ptr ptr uintptr
1179 // uintptr ptr ptr ptr
1180 // ptr uintptr ptr ptr
1181 // Assumptions about Finalizer layout checked below.
1182 BitsPointer | BitsPointer<<2 | BitsScalar<<4 | BitsPointer<<6,
1183 BitsPointer | BitsPointer<<2 | BitsPointer<<4 | BitsScalar<<6,
1184 BitsPointer | BitsPointer<<2 | BitsPointer<<4 | BitsPointer<<6,
1185 BitsScalar | BitsPointer<<2 | BitsPointer<<4 | BitsPointer<<6,
1186 BitsPointer | BitsScalar<<2 | BitsPointer<<4 | BitsPointer<<6,
1190 runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot)
1196 runtime·lock(&runtime·finlock);
1197 if(runtime·finq == nil || runtime·finq->cnt == runtime·finq->cap) {
1198 if(runtime·finc == nil) {
1199 runtime·finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
1200 runtime·finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
1201 runtime·finc->alllink = runtime·allfin;
1202 runtime·allfin = runtime·finc;
1203 if(finptrmask[0] == 0) {
1204 // Build pointer mask for Finalizer array in block.
1205 // Check assumptions made in finalizer1 array above.
1206 if(sizeof(Finalizer) != 5*PtrSize ||
1207 offsetof(Finalizer, fn) != 0 ||
1208 offsetof(Finalizer, arg) != PtrSize ||
1209 offsetof(Finalizer, nret) != 2*PtrSize ||
1210 offsetof(Finalizer, fint) != 3*PtrSize ||
1211 offsetof(Finalizer, ot) != 4*PtrSize ||
1212 BitsPerPointer != 2) {
1213 runtime·throw("finalizer out of sync");
1215 for(i=0; i<nelem(finptrmask); i++)
1216 finptrmask[i] = finalizer1[i%nelem(finalizer1)];
1219 block = runtime·finc;
1220 runtime·finc = block->next;
1221 block->next = runtime·finq;
1222 runtime·finq = block;
1224 f = &runtime·finq->fin[runtime·finq->cnt];
1225 runtime·finq->cnt++;
1231 runtime·fingwake = true;
1232 runtime·unlock(&runtime·finlock);
1236 runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*))
1242 for(fb = runtime·allfin; fb; fb = fb->alllink) {
1243 for(i = 0; i < fb->cnt; i++) {
1245 callback(f->fn, f->arg, f->nret, f->fint, f->ot);
1250 // Returns only when span s has been swept.
1252 runtime·MSpan_EnsureSwept(MSpan *s)
1256 // Caller must disable preemption.
1257 // Otherwise when this function returns the span can become unswept again
1258 // (if GC is triggered on another goroutine).
1259 if(g->m->locks == 0 && g->m->mallocing == 0 && g != g->m->g0)
1260 runtime·throw("MSpan_EnsureSwept: m is not locked");
1262 sg = runtime·mheap.sweepgen;
1263 if(runtime·atomicload(&s->sweepgen) == sg)
1265 // The caller must be sure that the span is a MSpanInUse span.
1266 if(runtime·cas(&s->sweepgen, sg-2, sg-1)) {
1267 runtime·MSpan_Sweep(s, false);
1270 // unfortunate condition, and we don't have efficient means to wait
1271 while(runtime·atomicload(&s->sweepgen) != sg)
1275 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
1276 // It clears the mark bits in preparation for the next GC round.
1277 // Returns true if the span was returned to heap.
1278 // If preserve=true, don't return it to heap nor relink in MCentral lists;
1279 // caller takes care of it.
1281 runtime·MSpan_Sweep(MSpan *s, bool preserve)
1283 int32 cl, n, npages, nfree;
1284 uintptr size, off, step;
1286 byte *p, *bitp, shift, xbits, bits;
1289 MLink head, *end, *link;
1290 Special *special, **specialp, *y;
1291 bool res, sweepgenset;
1294 runtime·throw("MSpan_Sweep: checkmark only runs in STW and after the sweep.");
1296 // It's critical that we enter this function with preemption disabled,
1297 // GC must not start while we are in the middle of this function.
1298 if(g->m->locks == 0 && g->m->mallocing == 0 && g != g->m->g0)
1299 runtime·throw("MSpan_Sweep: m is not locked");
1300 sweepgen = runtime·mheap.sweepgen;
1301 if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
1302 runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1303 s->state, s->sweepgen, sweepgen);
1304 runtime·throw("MSpan_Sweep: bad span state");
1306 arena_start = runtime·mheap.arena_start;
1312 // Chunk full of small blocks.
1313 npages = runtime·class_to_allocnpages[cl];
1314 n = (npages << PageShift) / size;
1320 sweepgenset = false;
1322 // Mark any free objects in this span so we don't collect them.
1323 for(link = s->freelist; link != nil; link = link->next) {
1324 off = (uintptr*)link - (uintptr*)arena_start;
1325 bitp = arena_start - off/wordsPerBitmapByte - 1;
1326 shift = (off % wordsPerBitmapByte) * gcBits;
1327 *bitp |= bitMarked<<shift;
1330 // Unlink & free special records for any objects we're about to free.
1331 specialp = &s->specials;
1332 special = *specialp;
1333 while(special != nil) {
1334 // A finalizer can be set for an inner byte of an object, find object beginning.
1335 p = (byte*)(s->start << PageShift) + special->offset/size*size;
1336 off = (uintptr*)p - (uintptr*)arena_start;
1337 bitp = arena_start - off/wordsPerBitmapByte - 1;
1338 shift = (off % wordsPerBitmapByte) * gcBits;
1339 bits = (*bitp>>shift) & bitMask;
1340 if((bits&bitMarked) == 0) {
1341 // Find the exact byte for which the special was setup
1342 // (as opposed to object beginning).
1343 p = (byte*)(s->start << PageShift) + special->offset;
1344 // about to free object: splice out special record
1346 special = special->next;
1347 *specialp = special;
1348 if(!runtime·freespecial(y, p, size, false)) {
1349 // stop freeing of object if it has a finalizer
1350 *bitp |= bitMarked << shift;
1353 // object is still live: keep special record
1354 specialp = &special->next;
1355 special = *specialp;
1359 // Sweep through n objects of given size starting at p.
1360 // This thread owns the span now, so it can manipulate
1361 // the block bitmap without atomic operations.
1362 p = (byte*)(s->start << PageShift);
1363 // Find bits for the beginning of the span.
1364 off = (uintptr*)p - (uintptr*)arena_start;
1365 bitp = arena_start - off/wordsPerBitmapByte - 1;
1367 step = size/(PtrSize*wordsPerBitmapByte);
1368 // Rewind to the previous quadruple as we move to the next
1369 // in the beginning of the loop.
1376 for(; n > 0; n--, p += size) {
1381 shift = gcBits - shift;
1385 bits = (xbits>>shift) & bitMask;
1387 // Allocated and marked object, reset bits to allocated.
1388 if((bits&bitMarked) != 0) {
1389 *bitp &= ~(bitMarked<<shift);
1392 // At this point we know that we are looking at garbage object
1393 // that needs to be collected.
1394 if(runtime·debug.allocfreetrace)
1395 runtime·tracefree(p, size);
1396 // Reset to allocated+noscan.
1397 *bitp = (xbits & ~((bitMarked|(BitsMask<<2))<<shift)) | ((uintptr)BitsDead<<(shift+2));
1401 runtime·throw("can't preserve large span");
1402 runtime·unmarkspan(p, s->npages<<PageShift);
1404 // important to set sweepgen before returning it to heap
1405 runtime·atomicstore(&s->sweepgen, sweepgen);
1407 // NOTE(rsc,dvyukov): The original implementation of efence
1408 // in CL 22060046 used SysFree instead of SysFault, so that
1409 // the operating system would eventually give the memory
1410 // back to us again, so that an efence program could run
1411 // longer without running out of memory. Unfortunately,
1412 // calling SysFree here without any kind of adjustment of the
1413 // heap data structures means that when the memory does
1414 // come back to us, we have the wrong metadata for it, either in
1415 // the MSpan structures or in the garbage collection bitmap.
1416 // Using SysFault here means that the program will run out of
1417 // memory fairly quickly in efence mode, but at least it won't
1418 // have mysterious crashes due to confused memory reuse.
1419 // It should be possible to switch back to SysFree if we also
1420 // implement and then call some kind of MHeap_DeleteSpan.
1421 if(runtime·debug.efence) {
1422 s->limit = nil; // prevent mlookup from finding this span
1423 runtime·SysFault(p, size);
1425 runtime·MHeap_Free(&runtime·mheap, s, 1);
1426 c->local_nlargefree++;
1427 c->local_largefree += size;
1428 runtime·xadd64(&mstats.next_gc, -(uint64)(size * (runtime·gcpercent + 100)/100));
1431 // Free small object.
1432 if(size > 2*sizeof(uintptr))
1433 ((uintptr*)p)[1] = (uintptr)0xdeaddeaddeaddeadll; // mark as "needs to be zeroed"
1434 else if(size > sizeof(uintptr))
1435 ((uintptr*)p)[1] = 0;
1437 end->next = (MLink*)p;
1443 // We need to set s->sweepgen = h->sweepgen only when all blocks are swept,
1444 // because of the potential for a concurrent free/SetFinalizer.
1445 // But we need to set it before we make the span available for allocation
1446 // (return it to heap or mcentral), because allocation code assumes that a
1447 // span is already swept if available for allocation.
1449 if(!sweepgenset && nfree == 0) {
1450 // The span must be in our exclusive ownership until we update sweepgen,
1451 // check for potential races.
1452 if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
1453 runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1454 s->state, s->sweepgen, sweepgen);
1455 runtime·throw("MSpan_Sweep: bad span state after sweep");
1457 runtime·atomicstore(&s->sweepgen, sweepgen);
1460 c->local_nsmallfree[cl] += nfree;
1461 c->local_cachealloc -= nfree * size;
1462 runtime·xadd64(&mstats.next_gc, -(uint64)(nfree * size * (runtime·gcpercent + 100)/100));
1463 res = runtime·MCentral_FreeSpan(&runtime·mheap.central[cl].mcentral, s, nfree, head.next, end, preserve);
1464 // MCentral_FreeSpan updates sweepgen
1469 // State of background runtime·sweep.
1470 // Protected by runtime·gclock.
1471 typedef struct SweepData SweepData;
1477 uint32 spanidx; // background sweeper position
1482 SweepData runtime·sweep;
1485 // returns number of pages returned to heap, or -1 if there is nothing to sweep
1487 runtime·sweepone(void)
1493 // increment locks to ensure that the goroutine is not preempted
1494 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
1496 sg = runtime·mheap.sweepgen;
1498 idx = runtime·xadd(&runtime·sweep.spanidx, 1) - 1;
1499 if(idx >= runtime·work.nspan) {
1500 runtime·mheap.sweepdone = true;
1504 s = runtime·work.spans[idx];
1505 if(s->state != MSpanInUse) {
1509 if(s->sweepgen != sg-2 || !runtime·cas(&s->sweepgen, sg-2, sg-1))
1512 if(!runtime·MSpan_Sweep(s, false))
1522 g->m->scalararg[0] = runtime·sweepone();
1525 #pragma textflag NOSPLIT
1527 runtime·gosweepone(void)
1533 return g->m->scalararg[0];
1536 #pragma textflag NOSPLIT
1538 runtime·gosweepdone(void)
1540 return runtime·mheap.sweepdone;
1545 runtime·gchelper(void)
1549 g->m->traceback = 2;
1552 // parallel mark for over GC roots
1553 runtime·parfordo(runtime·work.markfor);
1554 if(runtime·gcphase != GCscan)
1555 scanblock(nil, 0, nil); // blocks in getfull
1556 nproc = runtime·work.nproc; // work.nproc can change right after we increment work.ndone
1557 if(runtime·xadd(&runtime·work.ndone, +1) == nproc-1)
1558 runtime·notewakeup(&runtime·work.alldone);
1559 g->m->traceback = 0;
1568 for(pp=runtime·allp; p=*pp; pp++) {
1572 runtime·purgecachedstats(c);
1577 flushallmcaches(void)
1582 // Flush MCache's to MCentral.
1583 for(pp=runtime·allp; p=*pp; pp++) {
1587 runtime·MCache_ReleaseAll(c);
1588 runtime·stackcache_clear(c);
1593 flushallmcaches_m(G *gp)
1596 runtime·gogo(&gp->sched);
1600 runtime·updatememstats(GCStats *stats)
1610 runtime·memclr((byte*)stats, sizeof(*stats));
1611 for(mp=runtime·allm; mp; mp=mp->alllink) {
1613 src = (uint64*)&mp->gcstats;
1614 dst = (uint64*)stats;
1615 for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
1617 runtime·memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
1620 mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
1621 mstats.mspan_inuse = runtime·mheap.spanalloc.inuse;
1622 mstats.sys = mstats.heap_sys + mstats.stacks_sys + mstats.mspan_sys +
1623 mstats.mcache_sys + mstats.buckhash_sys + mstats.gc_sys + mstats.other_sys;
1625 // Calculate memory allocator stats.
1626 // During program execution we only count number of frees and amount of freed memory.
1627 // Current number of alive object in the heap and amount of alive heap memory
1628 // are calculated by scanning all spans.
1629 // Total number of mallocs is calculated as number of frees plus number of alive objects.
1630 // Similarly, total amount of allocated memory is calculated as amount of freed memory
1631 // plus amount of alive heap memory.
1633 mstats.total_alloc = 0;
1636 for(i = 0; i < nelem(mstats.by_size); i++) {
1637 mstats.by_size[i].nmalloc = 0;
1638 mstats.by_size[i].nfree = 0;
1641 // Flush MCache's to MCentral.
1645 fn = flushallmcaches_m;
1649 // Aggregate local stats.
1652 // Scan all spans and count number of alive objects.
1653 runtime·lock(&runtime·mheap.lock);
1654 for(i = 0; i < runtime·mheap.nspan; i++) {
1655 s = runtime·mheap.allspans[i];
1656 if(s->state != MSpanInUse)
1658 if(s->sizeclass == 0) {
1660 mstats.alloc += s->elemsize;
1662 mstats.nmalloc += s->ref;
1663 mstats.by_size[s->sizeclass].nmalloc += s->ref;
1664 mstats.alloc += s->ref*s->elemsize;
1667 runtime·unlock(&runtime·mheap.lock);
1669 // Aggregate by size class.
1671 mstats.nfree = runtime·mheap.nlargefree;
1672 for(i = 0; i < nelem(mstats.by_size); i++) {
1673 mstats.nfree += runtime·mheap.nsmallfree[i];
1674 mstats.by_size[i].nfree = runtime·mheap.nsmallfree[i];
1675 mstats.by_size[i].nmalloc += runtime·mheap.nsmallfree[i];
1676 smallfree += runtime·mheap.nsmallfree[i] * runtime·class_to_size[i];
1678 mstats.nfree += mstats.tinyallocs;
1679 mstats.nmalloc += mstats.nfree;
1681 // Calculate derived stats.
1682 mstats.total_alloc = mstats.alloc + runtime·mheap.largefree + smallfree;
1683 mstats.heap_alloc = mstats.alloc;
1684 mstats.heap_objects = mstats.nmalloc - mstats.nfree;
1687 // Structure of arguments passed to function gc().
1688 // This allows the arguments to be passed via runtime·mcall.
1691 int64 start_time; // start time of GC in ns (just before stoptheworld)
1695 static void gc(struct gc_args *args);
1698 runtime·readgogc(void)
1702 p = runtime·getenv("GOGC");
1703 if(p == nil || p[0] == '\0')
1705 if(runtime·strcmp(p, (byte*)"off") == 0)
1707 return runtime·atoi(p);
1711 runtime·gcinit(void)
1713 if(sizeof(Workbuf) != WorkbufSize)
1714 runtime·throw("runtime: size of Workbuf is suboptimal");
1716 runtime·work.markfor = runtime·parforalloc(MaxGcproc);
1717 runtime·gcpercent = runtime·readgogc();
1718 runtime·gcdatamask = unrollglobgcprog(runtime·gcdata, runtime·edata - runtime·data);
1719 runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss);
1722 // Called from malloc.go using onM, stopping and starting the world handled in caller.
1730 runtime·casgstatus(gp, Grunning, Gwaiting);
1731 gp->waitreason = runtime·gostringnocopy((byte*)"garbage collection");
1733 a.start_time = (uint64)(g->m->scalararg[0]) | ((uint64)(g->m->scalararg[1]) << 32);
1734 a.eagersweep = g->m->scalararg[2];
1736 runtime·casgstatus(gp, Gwaiting, Grunning);
1739 // Similar to clearcheckmarkbits but works on a single span.
1740 // It preforms two tasks.
1741 // 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
1742 // for nibbles with the BoundaryBit set.
1743 // 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
1744 // BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
1745 // For the second case it is possible to restore the BitsDead pattern but since
1746 // clearmark is a debug tool performance has a lower priority than simplicity.
1747 // The span is MSpanInUse and the world is stopped.
1749 clearcheckmarkbitsspan(MSpan *s)
1751 int32 cl, n, npages, i;
1752 uintptr size, off, step;
1753 byte *p, *bitp, *arena_start, b;
1755 if(s->state != MSpanInUse) {
1756 runtime·printf("runtime:clearcheckmarkbitsspan: state=%d\n",
1758 runtime·throw("clearcheckmarkbitsspan: bad span state");
1760 arena_start = runtime·mheap.arena_start;
1766 // Chunk full of small blocks.
1767 npages = runtime·class_to_allocnpages[cl];
1768 n = (npages << PageShift) / size;
1771 // MSpan_Sweep has similar code but instead of overloading and
1772 // complicating that routine we do a simpler walk here.
1773 // Sweep through n objects of given size starting at p.
1774 // This thread owns the span now, so it can manipulate
1775 // the block bitmap without atomic operations.
1776 p = (byte*)(s->start << PageShift);
1777 // Find bits for the beginning of the span.
1778 off = (uintptr*)p - (uintptr*)arena_start;
1779 bitp = arena_start - off/wordsPerBitmapByte - 1;
1780 step = size/(PtrSize*wordsPerBitmapByte);
1782 // The type bit values are:
1783 // 00 - BitsDead, for us BitsScalarMarked
1786 // 11 - unused, for us BitsPointerMarked
1788 // When called to prepare for the checkmark phase (checkmark==1),
1789 // we change BitsDead to BitsScalar, so that there are no BitsScalarMarked
1790 // type bits anywhere.
1792 // The checkmark phase marks by changing BitsScalar to BitsScalarMarked
1793 // and BitsPointer to BitsPointerMarked.
1795 // When called to clean up after the checkmark phase (checkmark==0),
1796 // we unmark by changing BitsScalarMarked back to BitsScalar and
1797 // BitsPointerMarked back to BitsPointer.
1799 // There are two problems with the scheme as just described.
1800 // First, the setup rewrites BitsDead to BitsScalar, but the type bits
1801 // following a BitsDead are uninitialized and must not be used.
1802 // Second, objects that are free are expected to have their type
1803 // bits zeroed (BitsDead), so in the cleanup we need to restore
1804 // any BitsDeads that were there originally.
1806 // In a one-word object (8-byte allocation on 64-bit system),
1807 // there is no difference between BitsScalar and BitsDead, because
1808 // neither is a pointer and there are no more words in the object,
1809 // so using BitsScalar during the checkmark is safe and mapping
1810 // both back to BitsDead during cleanup is also safe.
1812 // In a larger object, we need to be more careful. During setup,
1813 // if the type of the first word is BitsDead, we change it to BitsScalar
1814 // (as we must) but also initialize the type of the second
1815 // word to BitsDead, so that a scan during the checkmark phase
1816 // will still stop before seeing the uninitialized type bits in the
1817 // rest of the object. The sequence 'BitsScalar BitsDead' never
1818 // happens in real type bitmaps - BitsDead is always as early
1819 // as possible, so immediately after the last BitsPointer.
1820 // During cleanup, if we see a BitsScalar, we can check to see if it
1821 // is followed by BitsDead. If so, it was originally BitsDead and
1822 // we can change it back.
1825 // updating top and bottom nibbles, all boundaries
1826 for(i=0; i<n/2; i++, bitp--) {
1827 if((*bitp & bitBoundary) != bitBoundary)
1828 runtime·throw("missing bitBoundary");
1829 b = (*bitp & bitPtrMask)>>2;
1830 if(!checkmark && (b == BitsScalar || b == BitsScalarMarked))
1831 *bitp &= ~0x0c; // convert to BitsDead
1832 else if(b == BitsScalarMarked || b == BitsPointerMarked)
1833 *bitp ^= BitsCheckMarkXor<<2;
1835 if(((*bitp>>gcBits) & bitBoundary) != bitBoundary)
1836 runtime·throw("missing bitBoundary");
1837 b = ((*bitp>>gcBits) & bitPtrMask)>>2;
1838 if(!checkmark && (b == BitsScalar || b == BitsScalarMarked))
1839 *bitp &= ~0xc0; // convert to BitsDead
1840 else if(b == BitsScalarMarked || b == BitsPointerMarked)
1841 *bitp ^= BitsCheckMarkXor<<(2+gcBits);
1844 // updating bottom nibble for first word of each object
1845 for(i=0; i<n; i++, bitp -= step) {
1846 if((*bitp & bitBoundary) != bitBoundary)
1847 runtime·throw("missing bitBoundary");
1848 b = (*bitp & bitPtrMask)>>2;
1850 if(checkmark && b == BitsDead) {
1851 // move BitsDead into second word.
1852 // set bits to BitsScalar in preparation for checkmark phase.
1854 *bitp |= BitsScalar<<2;
1855 } else if(!checkmark && (b == BitsScalar || b == BitsScalarMarked) && (*bitp & 0xc0) == 0) {
1856 // Cleaning up after checkmark phase.
1857 // First word is scalar or dead (we forgot)
1858 // and second word is dead.
1859 // First word might as well be dead too.
1861 } else if(b == BitsScalarMarked || b == BitsPointerMarked)
1862 *bitp ^= BitsCheckMarkXor<<2;
1867 // clearcheckmarkbits preforms two tasks.
1868 // 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
1869 // for nibbles with the BoundaryBit set.
1870 // 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
1871 // BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
1872 // This is a bit expensive but preserves the BitsDead encoding during the normal marking.
1873 // BitsDead remains valid for every nibble except the ones with BitsBoundary set.
1875 clearcheckmarkbits(void)
1879 for(idx=0; idx<runtime·work.nspan; idx++) {
1880 s = runtime·work.spans[idx];
1881 if(s->state == MSpanInUse) {
1882 clearcheckmarkbitsspan(s);
1887 // Called from malloc.go using onM.
1888 // The world is stopped. Rerun the scan and mark phases
1889 // using the bitMarkedCheck bit instead of the
1890 // bitMarked bit. If the marking encounters an
1891 // bitMarked bit that is not set then we throw.
1893 runtime·gccheckmark_m(void)
1895 if(!gccheckmarkenable)
1899 runtime·throw("gccheckmark_m, entered with checkmark already true.");
1902 clearcheckmarkbits(); // Converts BitsDead to BitsScalar.
1903 runtime·gc_m(); // turns off checkmark
1904 // Work done, fixed up the GC bitmap to remove the checkmark bits.
1905 clearcheckmarkbits();
1908 // checkmarkenable is initially false
1910 runtime·gccheckmarkenable_m(void)
1912 gccheckmarkenable = true;
1916 runtime·gccheckmarkdisable_m(void)
1918 gccheckmarkenable = false;
1922 runtime·finishsweep_m(void)
1927 // The world is stopped so we should be able to complete the sweeps
1929 while(runtime·sweepone() != -1)
1930 runtime·sweep.npausesweep++;
1932 // There may be some other spans being swept concurrently that
1933 // we need to wait for. If finishsweep_m is done with the world stopped
1934 // this code is not required.
1935 sg = runtime·mheap.sweepgen;
1936 for(i=0; i<runtime·work.nspan; i++) {
1937 s = runtime·work.spans[i];
1938 if(s->sweepgen == sg) {
1941 if(s->state != MSpanInUse) // Span is not part of the GCed heap so no need to ensure it is swept.
1943 runtime·MSpan_EnsureSwept(s);
1947 // Scan all of the stacks, greying (or graying if in America) the referents
1948 // but not blackening them since the mark write barrier isn't installed.
1950 runtime·gcscan_m(void)
1952 uint32 i, allglen, oldphase;
1953 G *gp, *mastergp, **allg;
1955 // Grab the g that called us and potentially allow rescheduling.
1956 // This allows it to be scanned like other goroutines.
1957 mastergp = g->m->curg;
1959 runtime·casgstatus(mastergp, Grunning, Gwaiting);
1960 mastergp->waitreason = runtime·gostringnocopy((byte*)"garbage collection scan");
1962 // Span sweeping has been done by finishsweep_m.
1963 // Long term we will want to make this goroutine runnable
1964 // by placing it onto a scanenqueue state and then calling
1965 // runtime·restartg(mastergp) to make it Grunnable.
1966 // At the bottom we will want to return this p back to the scheduler.
1968 oldphase = runtime·gcphase;
1970 runtime·lock(&runtime·allglock);
1971 allglen = runtime·allglen;
1972 allg = runtime·allg;
1973 // Prepare flag indicating that the scan has not been completed.
1974 for(i = 0; i < allglen; i++) {
1976 gp->gcworkdone = false; // set to true in gcphasework
1978 runtime·unlock(&runtime·allglock);
1980 runtime·work.nwait = 0;
1981 runtime·work.ndone = 0;
1982 runtime·work.nproc = 1; // For now do not do this in parallel.
1983 runtime·gcphase = GCscan;
1984 // ackgcphase is not needed since we are not scanning running goroutines.
1985 runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + allglen, nil, false, markroot);
1986 runtime·parfordo(runtime·work.markfor);
1988 runtime·lock(&runtime·allglock);
1990 allg = runtime·allg;
1991 // Check that gc work is done.
1992 for(i = 0; i < allglen; i++) {
1994 if(!gp->gcworkdone) {
1995 runtime·throw("scan missed a g");
1998 runtime·unlock(&runtime·allglock);
2000 runtime·gcphase = oldphase;
2001 runtime·casgstatus(mastergp, Gwaiting, Grunning);
2002 // Let the g that called us continue to run.
2005 // Mark all objects that are known about.
2007 runtime·gcmark_m(void)
2009 scanblock(nil, 0, nil);
2012 // For now this must be bracketed with a stoptheworld and a starttheworld to ensure
2013 // all go routines see the new barrier.
2015 runtime·gcinstallmarkwb_m(void)
2017 runtime·gcphase = GCmark;
2020 // For now this must be bracketed with a stoptheworld and a starttheworld to ensure
2021 // all go routines see the new barrier.
2023 runtime·gcinstalloffwb_m(void)
2025 runtime·gcphase = GCoff;
2029 gc(struct gc_args *args)
2031 int64 t0, t1, t2, t3, t4;
2032 uint64 heap0, heap1, obj;
2038 if(runtime·debug.allocfreetrace)
2041 g->m->traceback = 2;
2042 t0 = args->start_time;
2043 runtime·work.tstart = args->start_time;
2046 if(runtime·debug.gctrace)
2047 t1 = runtime·nanotime();
2050 runtime·finishsweep_m(); // skip during checkmark debug phase.
2052 // Cache runtime·mheap.allspans in work.spans to avoid conflicts with
2053 // resizing/freeing allspans.
2054 // New spans can be created while GC progresses, but they are not garbage for
2056 // - new stack spans can be created even while the world is stopped.
2057 // - new malloc spans can be created during the concurrent sweep
2059 // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
2060 runtime·lock(&runtime·mheap.lock);
2061 // Free the old cached sweep array if necessary.
2062 if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
2063 runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
2064 // Cache the current array for marking.
2065 runtime·mheap.gcspans = runtime·mheap.allspans;
2066 runtime·work.spans = runtime·mheap.allspans;
2067 runtime·work.nspan = runtime·mheap.nspan;
2068 runtime·unlock(&runtime·mheap.lock);
2069 oldphase = runtime·gcphase;
2071 runtime·work.nwait = 0;
2072 runtime·work.ndone = 0;
2073 runtime·work.nproc = runtime·gcprocs();
2074 runtime·gcphase = GCmarktermination;
2076 // World is stopped so allglen will not change.
2077 for(i = 0; i < runtime·allglen; i++) {
2078 gp = runtime·allg[i];
2079 gp->gcworkdone = false; // set to true in gcphasework
2082 runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + runtime·allglen, nil, false, markroot);
2083 if(runtime·work.nproc > 1) {
2084 runtime·noteclear(&runtime·work.alldone);
2085 runtime·helpgc(runtime·work.nproc);
2089 if(runtime·debug.gctrace)
2090 t2 = runtime·nanotime();
2093 runtime·parfordo(runtime·work.markfor);
2095 scanblock(nil, 0, nil);
2097 if(runtime·work.full)
2098 runtime·throw("runtime·work.full != nil");
2099 if(runtime·work.partial)
2100 runtime·throw("runtime·work.partial != nil");
2102 runtime·gcphase = oldphase;
2104 if(runtime·debug.gctrace)
2105 t3 = runtime·nanotime();
2107 if(runtime·work.nproc > 1)
2108 runtime·notesleep(&runtime·work.alldone);
2110 runtime·shrinkfinish();
2113 // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
2114 // estimate what was live heap size after previous GC (for tracing only)
2115 heap0 = mstats.next_gc*100/(runtime·gcpercent+100);
2116 // conservatively set next_gc to high value assuming that everything is live
2117 // concurrent/lazy sweep will reduce this number while discovering new garbage
2118 mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*runtime·gcpercent/100;
2120 t4 = runtime·nanotime();
2121 runtime·atomicstore64(&mstats.last_gc, runtime·unixnanotime()); // must be Unix time to make sense to user
2122 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
2123 mstats.pause_end[mstats.numgc%nelem(mstats.pause_end)] = t4;
2124 mstats.pause_total_ns += t4 - t0;
2127 runtime·printf("pause %D\n", t4-t0);
2129 if(runtime·debug.gctrace) {
2130 heap1 = mstats.heap_alloc;
2131 runtime·updatememstats(&stats);
2132 if(heap1 != mstats.heap_alloc) {
2133 runtime·printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
2134 runtime·throw("mstats skew");
2136 obj = mstats.nmalloc - mstats.nfree;
2138 stats.nprocyield += runtime·work.markfor->nprocyield;
2139 stats.nosyield += runtime·work.markfor->nosyield;
2140 stats.nsleep += runtime·work.markfor->nsleep;
2142 runtime·printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
2145 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
2146 mstats.numgc, runtime·work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
2147 heap0>>20, heap1>>20, obj,
2148 mstats.nmalloc, mstats.nfree,
2150 runtime·work.nspan, runtime·sweep.nbgsweep, runtime·sweep.npausesweep,
2151 stats.nhandoff, stats.nhandoffcnt,
2152 runtime·work.markfor->nsteal, runtime·work.markfor->nstealcnt,
2153 stats.nprocyield, stats.nosyield, stats.nsleep);
2154 runtime·sweep.nbgsweep = runtime·sweep.npausesweep = 0;
2157 // See the comment in the beginning of this function as to why we need the following.
2158 // Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
2159 runtime·lock(&runtime·mheap.lock);
2160 // Free the old cached mark array if necessary.
2161 if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
2162 runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
2164 if(gccheckmarkenable) {
2166 // first half of two-pass; don't set up sweep
2167 runtime·unlock(&runtime·mheap.lock);
2170 checkmark = false; // done checking marks
2173 // Cache the current array for sweeping.
2174 runtime·mheap.gcspans = runtime·mheap.allspans;
2175 runtime·mheap.sweepgen += 2;
2176 runtime·mheap.sweepdone = false;
2177 runtime·work.spans = runtime·mheap.allspans;
2178 runtime·work.nspan = runtime·mheap.nspan;
2179 runtime·sweep.spanidx = 0;
2180 runtime·unlock(&runtime·mheap.lock);
2183 if(ConcurrentSweep && !args->eagersweep) {
2184 runtime·lock(&runtime·gclock);
2185 if(runtime·sweep.g == nil)
2186 runtime·sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, gc);
2187 else if(runtime·sweep.parked) {
2188 runtime·sweep.parked = false;
2189 runtime·ready(runtime·sweep.g);
2191 runtime·unlock(&runtime·gclock);
2193 // Sweep all spans eagerly.
2194 while(runtime·sweepone() != -1)
2195 runtime·sweep.npausesweep++;
2196 // Do an additional mProf_GC, because all 'free' events are now real as well.
2201 g->m->traceback = 0;
2204 extern uintptr runtime·sizeof_C_MStats;
2206 static void readmemstats_m(void);
2209 runtime·readmemstats_m(void)
2213 stats = g->m->ptrarg[0];
2214 g->m->ptrarg[0] = nil;
2216 runtime·updatememstats(nil);
2217 // Size of the trailing by_size array differs between Go and C,
2218 // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
2219 runtime·memmove(stats, &mstats, runtime·sizeof_C_MStats);
2221 // Stack numbers are part of the heap numbers, separate those out for user consumption
2222 stats->stacks_sys = stats->stacks_inuse;
2223 stats->heap_inuse -= stats->stacks_inuse;
2224 stats->heap_sys -= stats->stacks_inuse;
2227 static void readgcstats_m(void);
2229 #pragma textflag NOSPLIT
2231 runtime∕debug·readGCStats(Slice *pauses)
2235 g->m->ptrarg[0] = pauses;
2247 pauses = g->m->ptrarg[0];
2248 g->m->ptrarg[0] = nil;
2250 // Calling code in runtime/debug should make the slice large enough.
2251 if(pauses->cap < nelem(mstats.pause_ns)+3)
2252 runtime·throw("runtime: short slice passed to readGCStats");
2254 // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
2255 p = (uint64*)pauses->array;
2256 runtime·lock(&runtime·mheap.lock);
2259 if(n > nelem(mstats.pause_ns))
2260 n = nelem(mstats.pause_ns);
2262 // The pause buffer is circular. The most recent pause is at
2263 // pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
2264 // from there to go back farther in time. We deliver the times
2265 // most recent first (in p[0]).
2266 for(i=0; i<n; i++) {
2267 j = (mstats.numgc-1-i)%nelem(mstats.pause_ns);
2268 p[i] = mstats.pause_ns[j];
2269 p[n+i] = mstats.pause_end[j];
2272 p[n+n] = mstats.last_gc;
2273 p[n+n+1] = mstats.numgc;
2274 p[n+n+2] = mstats.pause_total_ns;
2275 runtime·unlock(&runtime·mheap.lock);
2276 pauses->len = n+n+3;
2280 runtime·setgcpercent_m(void)
2285 in = (int32)(intptr)g->m->scalararg[0];
2287 runtime·lock(&runtime·mheap.lock);
2288 out = runtime·gcpercent;
2291 runtime·gcpercent = in;
2292 runtime·unlock(&runtime·mheap.lock);
2294 g->m->scalararg[0] = (uintptr)(intptr)out;
2300 if(g->m->helpgc < 0 || g->m->helpgc >= MaxGcproc)
2301 runtime·throw("gchelperstart: bad m->helpgc");
2303 runtime·throw("gchelper not running on g0 stack");
2307 runtime·wakefing(void)
2312 runtime·lock(&runtime·finlock);
2313 if(runtime·fingwait && runtime·fingwake) {
2314 runtime·fingwait = false;
2315 runtime·fingwake = false;
2318 runtime·unlock(&runtime·finlock);
2322 // Recursively unrolls GC program in prog.
2323 // mask is where to store the result.
2324 // ppos is a pointer to position in mask, in bits.
2325 // sparse says to generate 4-bits per word mask for heap (2-bits for data/bss otherwise).
2327 unrollgcprog1(byte *mask, byte *prog, uintptr *ppos, bool inplace, bool sparse)
2329 uintptr pos, siz, i, off;
2330 byte *arena_start, *prog1, v, *bitp, shift;
2332 arena_start = runtime·mheap.arena_start;
2340 for(i = 0; i < siz; i++) {
2341 v = prog[i/PointersPerByte];
2342 v >>= (i%PointersPerByte)*BitsPerPointer;
2345 // Store directly into GC bitmap.
2346 off = (uintptr*)(mask+pos) - (uintptr*)arena_start;
2347 bitp = arena_start - off/wordsPerBitmapByte - 1;
2348 shift = (off % wordsPerBitmapByte) * gcBits;
2351 *bitp |= v<<(shift+2);
2362 pos += BitsPerPointer;
2365 prog += ROUND(siz*BitsPerPointer, 8)/8;
2370 for(i = 0; i < PtrSize; i++)
2371 siz = (siz<<8) + prog[PtrSize-i-1];
2374 for(i = 0; i < siz; i++)
2375 prog1 = unrollgcprog1(mask, prog, &pos, inplace, sparse);
2376 if(prog1[0] != insArrayEnd)
2377 runtime·throw("unrollgcprog: array does not end with insArrayEnd");
2385 runtime·throw("unrollgcprog: unknown instruction");
2390 // Unrolls GC program prog for data/bss, returns dense GC mask.
2392 unrollglobgcprog(byte *prog, uintptr size)
2395 uintptr pos, masksize;
2397 masksize = ROUND(ROUND(size, PtrSize)/PtrSize*BitsPerPointer, 8)/8;
2398 mask = runtime·persistentalloc(masksize+1, 0, &mstats.gc_sys);
2399 mask[masksize] = 0xa1;
2401 prog = unrollgcprog1(mask, prog, &pos, false, false);
2402 if(pos != size/PtrSize*BitsPerPointer) {
2403 runtime·printf("unrollglobgcprog: bad program size, got %D, expect %D\n",
2404 (uint64)pos, (uint64)size/PtrSize*BitsPerPointer);
2405 runtime·throw("unrollglobgcprog: bad program size");
2407 if(prog[0] != insEnd)
2408 runtime·throw("unrollglobgcprog: program does not end with insEnd");
2409 if(mask[masksize] != 0xa1)
2410 runtime·throw("unrollglobgcprog: overflow");
2411 return (BitVector){masksize*8, mask};
2415 runtime·unrollgcproginplace_m(void)
2417 uintptr size, size0, pos, off;
2418 byte *arena_start, *prog, *bitp, shift;
2422 v = g->m->ptrarg[0];
2423 typ = g->m->ptrarg[1];
2424 size = g->m->scalararg[0];
2425 size0 = g->m->scalararg[1];
2426 g->m->ptrarg[0] = nil;
2427 g->m->ptrarg[1] = nil;
2430 prog = (byte*)typ->gc[1];
2432 unrollgcprog1(v, prog, &pos, true, true);
2433 // Mark first word as bitAllocated.
2434 arena_start = runtime·mheap.arena_start;
2435 off = (uintptr*)v - (uintptr*)arena_start;
2436 bitp = arena_start - off/wordsPerBitmapByte - 1;
2437 shift = (off % wordsPerBitmapByte) * gcBits;
2438 *bitp |= bitBoundary<<shift;
2439 // Mark word after last as BitsDead.
2441 off = (uintptr*)((byte*)v + size0) - (uintptr*)arena_start;
2442 bitp = arena_start - off/wordsPerBitmapByte - 1;
2443 shift = (off % wordsPerBitmapByte) * gcBits;
2444 *bitp &= ~(bitPtrMask<<shift) | ((uintptr)BitsDead<<(shift+2));
2448 // Unrolls GC program in typ->gc[1] into typ->gc[0]
2450 runtime·unrollgcprog_m(void)
2458 typ = g->m->ptrarg[0];
2459 g->m->ptrarg[0] = nil;
2461 runtime·lock(&lock);
2462 mask = (byte*)typ->gc[0];
2464 pos = 8; // skip the unroll flag
2465 prog = (byte*)typ->gc[1];
2466 prog = unrollgcprog1(mask, prog, &pos, false, true);
2467 if(prog[0] != insEnd)
2468 runtime·throw("unrollgcprog: program does not end with insEnd");
2469 if(((typ->size/PtrSize)%2) != 0) {
2470 // repeat the program twice
2471 prog = (byte*)typ->gc[1];
2472 unrollgcprog1(mask, prog, &pos, false, true);
2475 // atomic way to say mask[0] = 1
2476 x = *(uintptr*)mask;
2478 runtime·atomicstorep((void**)mask, (void*)x);
2480 runtime·unlock(&lock);
2483 // mark the span of memory at v as having n blocks of the given size.
2484 // if leftover is true, there is left over space at the end of the span.
2486 runtime·markspan(void *v, uintptr size, uintptr n, bool leftover)
2488 uintptr i, off, step;
2491 if((byte*)v+size*n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
2492 runtime·throw("markspan: bad pointer");
2494 // Find bits of the beginning of the span.
2495 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
2496 b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
2497 if((off%wordsPerBitmapByte) != 0)
2498 runtime·throw("markspan: unaligned length");
2500 // Okay to use non-atomic ops here, because we control
2501 // the entire span, and each bitmap byte has bits for only
2502 // one span, so no other goroutines are changing these bitmap words.
2504 if(size == PtrSize) {
2505 // Possible only on 64-bits (minimal size class is 8 bytes).
2506 // Poor man's memset(0x11).
2507 if(0x11 != ((bitBoundary+BitsDead)<<gcBits) + (bitBoundary+BitsDead))
2508 runtime·throw("markspan: bad bits");
2509 if((n%(wordsPerBitmapByte*PtrSize)) != 0)
2510 runtime·throw("markspan: unaligned length");
2511 b = b - n/wordsPerBitmapByte + 1; // find first byte
2512 if(((uintptr)b%PtrSize) != 0)
2513 runtime·throw("markspan: unaligned pointer");
2514 for(i = 0; i != n; i += wordsPerBitmapByte*PtrSize, b += PtrSize)
2515 *(uintptr*)b = (uintptr)0x1111111111111111ULL; // bitBoundary+BitsDead
2520 n++; // mark a boundary just past end of last block too
2521 step = size/(PtrSize*wordsPerBitmapByte);
2522 for(i = 0; i != n; i++, b -= step)
2523 *b = bitBoundary|(BitsDead<<2);
2526 // unmark the span of memory at v of length n bytes.
2528 runtime·unmarkspan(void *v, uintptr n)
2533 if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
2534 runtime·throw("markspan: bad pointer");
2536 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
2537 if((off % (PtrSize*wordsPerBitmapByte)) != 0)
2538 runtime·throw("markspan: unaligned pointer");
2539 b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
2541 if(n%(PtrSize*wordsPerBitmapByte) != 0)
2542 runtime·throw("unmarkspan: unaligned length");
2543 // Okay to use non-atomic ops here, because we control
2544 // the entire span, and each bitmap word has bits for only
2545 // one span, so no other goroutines are changing these
2547 n /= wordsPerBitmapByte;
2548 runtime·memclr(b - n + 1, n);
2552 runtime·MHeap_MapBits(MHeap *h)
2554 // Caller has added extra mappings to the arena.
2555 // Add extra mappings of bitmap words as needed.
2556 // We allocate extra bitmap pieces in chunks of bitmapChunk.
2562 n = (h->arena_used - h->arena_start) / (PtrSize*wordsPerBitmapByte);
2563 n = ROUND(n, bitmapChunk);
2564 n = ROUND(n, PhysPageSize);
2565 if(h->bitmap_mapped >= n)
2568 runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
2569 h->bitmap_mapped = n;
2573 getgcmaskcb(Stkframe *frame, void *ctxt)
2578 if(frame->sp <= frame0->sp && frame0->sp < frame->varp) {
2585 // Returns GC type info for object p for testing.
2587 runtime·getgcmask(byte *p, Type *t, byte **mask, uintptr *len)
2591 byte *base, bits, shift, *b;
2592 bool (*cb)(Stkframe*, void*);
2598 if(p >= runtime·data && p < runtime·edata) {
2599 n = ((PtrType*)t)->elem->size;
2601 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2602 for(i = 0; i < n; i += PtrSize) {
2603 off = (p+i-runtime·data)/PtrSize;
2604 bits = (runtime·gcdatamask.bytedata[off/PointersPerByte] >> ((off%PointersPerByte)*BitsPerPointer))&BitsMask;
2605 (*mask)[i/PtrSize] = bits;
2610 if(p >= runtime·bss && p < runtime·ebss) {
2611 n = ((PtrType*)t)->elem->size;
2613 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2614 for(i = 0; i < n; i += PtrSize) {
2615 off = (p+i-runtime·bss)/PtrSize;
2616 bits = (runtime·gcbssmask.bytedata[off/PointersPerByte] >> ((off%PointersPerByte)*BitsPerPointer))&BitsMask;
2617 (*mask)[i/PtrSize] = bits;
2622 if(runtime·mlookup(p, &base, &n, nil)) {
2624 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2625 for(i = 0; i < n; i += PtrSize) {
2626 off = (uintptr*)(base+i) - (uintptr*)runtime·mheap.arena_start;
2627 b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
2628 shift = (off % wordsPerBitmapByte) * gcBits;
2629 bits = (*b >> (shift+2))&BitsMask;
2630 (*mask)[i/PtrSize] = bits;
2636 frame.sp = (uintptr)p;
2638 runtime·gentraceback(g->m->curg->sched.pc, g->m->curg->sched.sp, 0, g->m->curg, 0, nil, 1000, &cb, &frame, 0);
2639 if(frame.fn != nil) {
2648 targetpc = frame.continpc;
2651 if(targetpc != f->entry)
2653 pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
2656 stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
2657 if(stackmap == nil || stackmap->n <= 0)
2659 bv = runtime·stackmapdata(stackmap, pcdata);
2660 size = bv.n/BitsPerPointer*PtrSize;
2661 n = ((PtrType*)t)->elem->size;
2663 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2664 for(i = 0; i < n; i += PtrSize) {
2665 off = (p+i-(byte*)frame.varp+size)/PtrSize;
2666 bits = (bv.bytedata[off*BitsPerPointer/8] >> ((off*BitsPerPointer)%8))&BitsMask;
2667 (*mask)[i/PtrSize] = bits;
2672 void runtime·gc_unixnanotime(int64 *now);
2675 runtime·unixnanotime(void)
2679 runtime·gc_unixnanotime(&now);