1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector (GC).
7 // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple GC
8 // thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
9 // non-generational and non-compacting. Allocation is done using size segregated per P allocation
10 // areas to minimize fragmentation while eliminating locks in the common case.
12 // The algorithm decomposes into several steps.
13 // This is a high level description of the algorithm being used. For an overview of GC a good
14 // place to start is Richard Jones' gchandbook.org.
16 // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
17 // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
18 // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 966-975.
19 // For journal quality proofs that these steps are complete, correct, and terminate see
20 // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
21 // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
23 // 0. Set phase = GCscan from GCoff.
24 // 1. Wait for all P's to acknowledge phase change.
25 // At this point all goroutines have passed through a GC safepoint and
26 // know we are in the GCscan phase.
27 // 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
28 // (marking avoids most duplicate enqueuing but races may produce duplication which is benign).
29 // Preempted goroutines are scanned before P schedules next goroutine.
30 // 3. Set phase = GCmark.
31 // 4. Wait for all P's to acknowledge phase change.
32 // 5. Now write barrier marks and enqueues black or grey to white pointers. If a pointer is
33 // stored into a white slot, such pointer is not marked.
34 // Malloc still allocates white (non-marked) objects.
35 // 6. Meanwhile GC transitively walks the heap marking reachable objects.
36 // 7. When GC finishes marking heap, it preempts P's one-by-one and
37 // retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
38 // currently scheduled on the P).
39 // 8. Once the GC has exhausted all available marking work it sets phase = marktermination.
40 // 9. Wait for all P's to acknowledge phase change.
41 // 10. Malloc now allocates black objects, so number of unmarked reachable objects
42 // monotonically decreases.
43 // 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet reachable objects.
44 // 12. When GC completes a full cycle over P's and discovers no new grey
45 // objects, (which means all reachable objects are marked) set phase = GCsweep.
46 // 13. Wait for all P's to acknowledge phase change.
47 // 14. Now malloc allocates white (but sweeps spans before use).
48 // Write barrier becomes nop.
49 // 15. GC does background sweeping, see description below.
50 // 16. When sweeping is complete set phase to GCoff.
51 // 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
52 // see discussion of GC rate below.
55 // Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
56 // All phase action must be benign in the presence of a change.
57 // Starting with GCoff
59 // GSscan scans stacks and globals greying them and never marks an object black.
60 // Once all the P's are aware of the new phase they will scan gs on preemption.
61 // This means that the scanning of preempted gs can't start until all the Ps
64 // GCMark turns on the write barrier which also only greys objects. No scanning
65 // of objects (making them black) can happen until all the Ps have acknowledged
67 // GCmark to GCmarktermination
68 // The only change here is that we start allocating black so the Ps must acknowledge
69 // the change before we begin the termination algorithm
70 // GCmarktermination to GSsweep
71 // Object currently on the freelist must be marked black for this to work.
72 // Are things on the free lists black or white? How does the sweep phase work?
75 // The sweep phase proceeds concurrently with normal program execution.
76 // The heap is swept span-by-span both lazily (when a goroutine needs another span)
77 // and concurrently in a background goroutine (this helps programs that are not CPU bound).
78 // However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
79 // and so next_gc calculation is tricky and happens as follows.
80 // At the end of the stop-the-world phase next_gc is conservatively set based on total
81 // heap size; all spans are marked as "needs sweeping".
82 // Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
83 // The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
84 // closer to the target value. However, this is not enough to avoid over-allocating memory.
85 // Consider that a goroutine wants to allocate a new span for a large object and
86 // there are no free swept spans, but there are small-object unswept spans.
87 // If the goroutine naively allocates a new span, it can surpass the yet-unknown
88 // target next_gc value. In order to prevent such cases (1) when a goroutine needs
89 // to allocate a new small-object span, it sweeps small-object spans for the same
90 // object size until it frees at least one object; (2) when a goroutine needs to
91 // allocate large-object span from heap, it sweeps spans until it frees at least
92 // that many pages into heap. Together these two measures ensure that we don't surpass
93 // target next_gc value by a large margin. There is an exception: if a goroutine sweeps
94 // and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
95 // but there can still be other one-page unswept spans which could be combined into a two-page span.
96 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
97 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
98 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
99 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
100 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
101 // The finalizer goroutine is kicked off only when all spans are swept.
102 // When the next GC starts, it sweeps all not-yet-swept spans (if any).
105 // Next GC is after we've allocated an extra amount of memory proportional to
106 // the amount already in use. The proportion is controlled by GOGC environment variable
107 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
108 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
109 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
110 // (and also the amount of extra memory used).
113 #include "arch_GOARCH.h"
120 #include "typekind.h"
121 #include "funcdata.h"
122 #include "textflag.h"
128 FinBlockSize = 4*1024,
137 // ptrmask for an allocation containing a single pointer.
138 static byte oneptr[] = {BitsPointer};
140 // Initialized from $GOGC. GOGC=off means no GC.
141 extern int32 runtime·gcpercent;
143 // Holding worldsema grants an M the right to try to stop the world.
146 // runtime·semacquire(&runtime·worldsema);
148 // runtime·stoptheworld();
153 // runtime·semrelease(&runtime·worldsema);
154 // runtime·starttheworld();
156 uint32 runtime·worldsema = 1;
158 typedef struct Markbits Markbits;
160 byte *bitp; // pointer to the byte holding xbits
161 byte shift; // bits xbits needs to be shifted to get bits
162 byte xbits; // byte holding all the bits from *bitp
163 byte bits; // bits relevant to corresponding slot.
166 extern byte runtime·data[];
167 extern byte runtime·edata[];
168 extern byte runtime·bss[];
169 extern byte runtime·ebss[];
171 extern byte runtime·gcdata[];
172 extern byte runtime·gcbss[];
174 Mutex runtime·finlock; // protects the following variables
175 G* runtime·fing; // goroutine that runs finalizers
176 FinBlock* runtime·finq; // list of finalizers that are to be executed
177 FinBlock* runtime·finc; // cache of free blocks
178 static byte finptrmask[FinBlockSize/PtrSize/PointersPerByte];
179 bool runtime·fingwait;
180 bool runtime·fingwake;
181 FinBlock *runtime·allfin; // list of all blocks
183 BitVector runtime·gcdatamask;
184 BitVector runtime·gcbssmask;
186 Mutex runtime·gclock;
188 static Workbuf* getpartialorempty(void);
189 static void putpartial(Workbuf*);
190 static Workbuf* getempty(Workbuf*);
191 static Workbuf* getfull(Workbuf*);
192 static void putempty(Workbuf*);
193 static void putfull(Workbuf*);
194 static Workbuf* handoff(Workbuf*);
195 static void gchelperstart(void);
196 static void flushallmcaches(void);
197 static bool scanframe(Stkframe*, void*);
198 static void scanstack(G*);
199 static BitVector unrollglobgcprog(byte*, uintptr);
200 static void scanblock(byte*, uintptr, byte*);
201 static byte* objectstart(byte*, Markbits*);
202 static Workbuf* greyobject(byte*, Markbits*, Workbuf*);
203 static bool inheap(byte*);
204 static bool shaded(byte*);
205 static void shade(byte*);
206 static void slottombits(byte*, Markbits*);
208 void runtime·bgsweep(void);
209 void runtime·finishsweep_m(void);
210 static FuncVal bgsweepv = {runtime·bgsweep};
212 typedef struct WorkData WorkData;
214 uint64 full; // lock-free list of full blocks
215 uint64 empty; // lock-free list of empty blocks
216 uint64 partial; // lock-free list of partially filled blocks
217 byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
220 volatile uint32 nwait;
221 volatile uint32 ndone;
225 // Copy of mheap.allspans for marker or sweeper.
229 WorkData runtime·work;
231 // Is address b in the known heap. If it doesn't have a valid gcmap
232 // returns false. For example pointers into stacks will return false.
240 if(b == nil || b < runtime·mheap.arena_start || b >= runtime·mheap.arena_used)
242 // Not a beginning of a block, consult span table to find the block beginning.
243 k = (uintptr)b>>PageShift;
245 x -= (uintptr)runtime·mheap.arena_start>>PageShift;
246 s = runtime·mheap.spans[x];
247 if(s == nil || k < s->start || b >= s->limit || s->state != MSpanInUse)
252 // Given an address in the heap return the relevant byte from the gcmap. This routine
253 // can be used on addresses to the start of an object or to the interior of the an object.
255 slottombits(byte *obj, Markbits *mbits)
259 off = (uintptr*)((uintptr)obj&~(PtrSize-1)) - (uintptr*)runtime·mheap.arena_start;
260 mbits->bitp = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
261 mbits->shift = (off % wordsPerBitmapByte) * gcBits;
262 mbits->xbits = *mbits->bitp;
263 mbits->bits = (mbits->xbits >> mbits->shift) & bitMask;
266 // b is a pointer into the heap.
267 // Find the start of the object refered to by b.
268 // Set mbits to the associated bits from the bit map.
270 objectstart(byte *b, Markbits *mbits)
275 uintptr x, size, idx;
277 obj = (byte*)((uintptr)b&~(PtrSize-1));
279 slottombits(obj, mbits);
280 if(mbits->bits&bitBoundary == bitBoundary)
283 // Not a beginning of a block, consult span table to find the block beginning.
284 k = (uintptr)obj>>PageShift;
286 x -= (uintptr)runtime·mheap.arena_start>>PageShift;
287 s = runtime·mheap.spans[x];
288 if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse){
289 if(s->state == MSpanStack)
290 break; // This is legit.
292 // The following is catching some bugs left over from
293 // us not being rigerous about what data structures are
294 // hold valid pointers and different parts of the system
295 // considering different structures as roots. For example
296 // if there is a pointer into a stack that is left in
297 // a global data structure but that part of the runtime knows that
298 // those structures will be reinitialized before they are
299 // reused. Unfortunately the GC believes these roots are valid.
300 // Typically a stack gets moved and only the structures that part of
301 // the system knows are alive are updated. The span is freed
302 // after the stack copy and the pointer is still alive. This
303 // check is catching that bug but for now we will not throw,
304 // instead we will simply break out of this routine and depend
305 // on the caller to recognize that this pointer is not a valid
306 // heap pointer. I leave the code that catches the bug so that once
307 // resolved we can turn this check back on and throw.
309 //runtime·printf("Runtime: Span weird: obj=%p, k=%p", obj, k);
311 // runtime·printf(" s=nil\n");
313 // runtime·printf(" s->start=%p s->limit=%p, s->state=%d\n", s->start*PageSize, s->limit, s->state);
314 //runtime·throw("Blowup on weird span");
315 break; // We are not in a real block throw??
317 p = (byte*)((uintptr)s->start<<PageShift);
318 if(s->sizeclass != 0) {
320 idx = ((byte*)obj - p)/size;
324 runtime·printf("runtime: failed to find block beginning for %p s=%p s->limit=%p\n",
325 p, s->start*PageSize, s->limit);
326 runtime·throw("failed to find block beginning");
330 // if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit
331 // Clear any low bits to get to the start of the object.
332 // greyobject depends on this.
336 // obj is the start of an object with mark mbits.
337 // If it isn't already marked, mark it and enqueue into workbuf.
338 // Return possibly new workbuf to use.
340 greyobject(byte *obj, Markbits *mbits, Workbuf *wbuf)
342 // obj should be start of allocation, and so must be at least pointer-aligned.
343 if(((uintptr)obj & (PtrSize-1)) != 0)
344 runtime·throw("greyobject: obj not pointer-aligned");
346 // If marked we have nothing to do.
347 if((mbits->bits&bitMarked) != 0)
350 // Each byte of GC bitmap holds info for two words.
351 // If the current object is larger than two words, or if the object is one word
352 // but the object it shares the byte with is already marked,
353 // then all the possible concurrent updates are trying to set the same bit,
354 // so we can use a non-atomic update.
355 if((mbits->xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1)
356 *mbits->bitp = mbits->xbits | (bitMarked<<mbits->shift);
358 runtime·atomicor8(mbits->bitp, bitMarked<<mbits->shift);
360 if(((mbits->xbits>>(mbits->shift+2))&BitsMask) == BitsDead)
361 return wbuf; // noscan object
363 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
364 // seems like a nice optimization that can be added back in.
365 // There needs to be time between the PREFETCH and the use.
366 // Previously we put the obj in an 8 element buffer that is drained at a rate
367 // to give the PREFETCH time to do its work.
368 // Use of PREFETCHNTA might be more appropriate than PREFETCH
370 // If workbuf is full, obtain an empty one.
371 if(wbuf->nobj >= nelem(wbuf->obj)) {
372 wbuf = getempty(wbuf);
375 wbuf->obj[wbuf->nobj] = obj;
380 // Scan the object b of size n, adding pointers to wbuf.
381 // Return possibly new wbuf to use.
382 // If ptrmask != nil, it specifies where pointers are in b.
383 // If ptrmask == nil, the GC bitmap should be consulted.
384 // In this case, n may be an overestimate of the size; the GC bitmap
385 // must also be used to make sure the scan stops at the end of b.
387 scanobject(byte *b, uintptr n, byte *ptrmask, Workbuf *wbuf)
389 byte *obj, *arena_start, *arena_used, *ptrbitp;
394 arena_start = (byte*)runtime·mheap.arena_start;
395 arena_used = runtime·mheap.arena_used;
398 // Find bits of the beginning of the object.
400 b = objectstart(b, &mbits);
401 ptrbitp = mbits.bitp; //arena_start - off/wordsPerBitmapByte - 1;
403 for(i = 0; i < n; i += PtrSize) {
404 // Find bits for this word.
406 // dense mask (stack or data)
407 bits = (ptrmask[(i/PtrSize)/4]>>(((i/PtrSize)%4)*BitsPerPointer))&BitsMask;
409 // Check if we have reached end of span.
410 if((((uintptr)b+i)%PageSize) == 0 &&
411 runtime·mheap.spans[(b-arena_start)>>PageShift] != runtime·mheap.spans[(b+i-arena_start)>>PageShift])
413 // Consult GC bitmap.
415 if(wordsPerBitmapByte != 2)
416 runtime·throw("alg doesn't work for wordsPerBitmapByte != 2");
417 j = ((uintptr)b+i)/PtrSize & 1;
420 bits &= ~bitBoundary;
423 if((bits&bitBoundary) != 0 && i != 0)
424 break; // reached beginning of the next object
425 bits = (bits>>2)&BitsMask;
427 break; // reached no-scan part of the object
430 if(bits <= BitsScalar) // Bits Scalar || BitsDead
432 if(bits != BitsPointer) {
433 runtime·printf("gc bits=%x\n", bits);
434 runtime·throw("unexpected garbage collection bits");
437 obj = *(byte**)(b+i);
438 // At this point we have extracted the next potential pointer.
439 // Check if it points into heap.
440 if(obj == nil || obj < arena_start || obj >= arena_used)
442 // Mark the object. return some important bits.
443 // We we combine the following two rotines we don't have to pass mbits or obj around.
444 obj = objectstart(obj, &mbits);
445 wbuf = greyobject(obj, &mbits, wbuf);
450 // scanblock starts by scanning b as scanobject would.
451 // If the gcphase is GCscan, that's all scanblock does.
452 // Otherwise it traverses some fraction of the pointers it found in b, recursively.
453 // As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
454 // stopping only when no work is left in the system.
456 scanblock(byte *b, uintptr n, byte *ptrmask)
461 wbuf = getpartialorempty();
463 wbuf = scanobject(b, n, ptrmask, wbuf);
464 if(runtime·gcphase == GCscan) {
465 if(inheap(b) && !ptrmask)
466 // b is in heap, we are in GCscan so there should be a ptrmask.
467 runtime·throw("scanblock: In GCscan phase and inheap is true.");
468 // GCscan only goes one level deep since mark wb not turned on.
473 if(runtime·gcphase == GCscan) {
474 runtime·throw("scanblock: In GCscan phase but no b passed in.");
477 keepworking = b == nil;
479 // ptrmask can have 2 possible values:
480 // 1. nil - obtain pointer mask from GC bitmap.
481 // 2. pointer to a compact mask (for stacks and data).
483 if(wbuf->nobj == 0) {
488 // Refill workbuf from global queue.
489 wbuf = getfull(wbuf);
490 if(wbuf == nil) // nil means out of work barrier reached
494 runtime·throw("runtime:scanblock getfull returns empty buffer");
499 // If another proc wants a pointer, give it some.
500 if(runtime·work.nwait > 0 && wbuf->nobj > 4 && runtime·work.full == 0) {
501 wbuf = handoff(wbuf);
504 // This might be a good place to add prefetch code...
505 // if(wbuf->nobj > 4) {
506 // PREFETCH(wbuf->obj[wbuf->nobj - 3];
509 b = wbuf->obj[wbuf->nobj];
510 wbuf = scanobject(b, runtime·mheap.arena_used - b, nil, wbuf);
515 markroot(ParFor *desc, uint32 i)
526 // Note: if you add a case here, please also update heapdump.c:dumproots.
529 scanblock(runtime·data, runtime·edata - runtime·data, runtime·gcdatamask.bytedata);
533 scanblock(runtime·bss, runtime·ebss - runtime·bss, runtime·gcbssmask.bytedata);
537 for(fb=runtime·allfin; fb; fb=fb->alllink)
538 scanblock((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), finptrmask);
542 // mark MSpan.specials
543 sg = runtime·mheap.sweepgen;
544 for(spanidx=0; spanidx<runtime·work.nspan; spanidx++) {
546 SpecialFinalizer *spf;
548 s = runtime·work.spans[spanidx];
549 if(s->state != MSpanInUse)
551 if(s->sweepgen != sg) {
552 runtime·printf("sweep %d %d\n", s->sweepgen, sg);
553 runtime·throw("gc: unswept span");
555 for(sp = s->specials; sp != nil; sp = sp->next) {
556 if(sp->kind != KindSpecialFinalizer)
558 // don't mark finalized object, but scan it so we
559 // retain everything it points to.
560 spf = (SpecialFinalizer*)sp;
561 // A finalizer can be set for an inner byte of an object, find object beginning.
562 p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
563 if(runtime·gcphase != GCscan)
564 scanblock(p, s->elemsize, nil); // Scanned during mark phase
565 scanblock((void*)&spf->fn, PtrSize, oneptr);
570 case RootFlushCaches:
571 if (runtime·gcphase != GCscan) // Do not flush mcaches during GCscan phase.
576 // the rest is scanning goroutine stacks
577 if(i - RootCount >= runtime·allglen)
578 runtime·throw("markroot: bad index");
579 gp = runtime·allg[i - RootCount];
580 // remember when we've first observed the G blocked
581 // needed only to output in traceback
582 status = runtime·readgstatus(gp); // We are not in a scan state
583 if((status == Gwaiting || status == Gsyscall) && gp->waitsince == 0)
584 gp->waitsince = runtime·work.tstart;
585 // Shrink a stack if not much of it is being used but not in the scan phase.
586 if (runtime·gcphase != GCscan) // Do not shrink during GCscan phase.
587 runtime·shrinkstack(gp);
588 if(runtime·readgstatus(gp) == Gdead)
589 gp->gcworkdone = true;
591 gp->gcworkdone = false;
592 restart = runtime·stopg(gp);
594 // goroutine will scan its own stack when it stops running.
595 // Wait until it has.
596 while(runtime·readgstatus(gp) == Grunning && !gp->gcworkdone) {
599 // scanstack(gp) is done as part of gcphasework
600 // But to make sure we finished we need to make sure that
601 // the stack traps have all responded so drop into
602 // this while loop until they respond.
603 while(!gp->gcworkdone){
604 status = runtime·readgstatus(gp);
605 if(status == Gdead) {
606 gp->gcworkdone = true; // scan is a noop
608 //do nothing, scan not needed.
610 if(status == Gwaiting || status == Grunnable)
611 restart = runtime·stopg(gp);
614 runtime·restartg(gp);
619 // wblock is used for creating new empty work buffer blocks.
622 // Get an empty work buffer off the work.empty list,
623 // allocating new buffers as needed.
631 if(runtime·work.empty)
632 b = (Workbuf*)runtime·lfstackpop(&runtime·work.empty);
634 if(b && b->nobj != 0) {
635 runtime·printf("m%d: getempty: popped b=%p with non-zero b->nobj=%d\n", g->m->id, b, (uint32)b->nobj);
636 runtime·throw("getempty: workbuffer not empty, b->nobj not 0");
639 runtime·lock(&wblock);
640 b = runtime·persistentalloc(sizeof(*b), CacheLineSize, &mstats.gc_sys);
642 runtime·unlock(&wblock);
651 runtime·throw("putempty: b->nobj not 0\n");
653 runtime·lfstackpush(&runtime·work.empty, &b->node);
656 // Put a full or partially full workbuf on the full list.
661 runtime·throw("putfull: b->nobj <= 0\n");
663 runtime·lfstackpush(&runtime·work.full, &b->node);
666 // Get an partially empty work buffer
667 // if none are available get an empty one.
669 getpartialorempty(void)
673 b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
680 putpartial(Workbuf *b)
684 runtime·lfstackpush(&runtime·work.empty, &b->node);
685 else if (b->nobj < nelem(b->obj))
686 runtime·lfstackpush(&runtime·work.partial, &b->node);
687 else if (b->nobj == nelem(b->obj))
688 runtime·lfstackpush(&runtime·work.full, &b->node);
690 runtime·printf("b=%p, b->nobj=%d, nelem(b->obj)=%d\n", b, (uint32)b->nobj, (uint32)nelem(b->obj));
691 runtime·throw("putpartial: bad Workbuf b->nobj");
696 runtime·gcworkbuffree(Workbuf *b)
706 // Get a full work buffer off the work.full or a partially
707 // filled one off the work.partial list. If nothing is available
708 // wait until all the other gc helpers have finished and then
710 // getfull acts as a barrier for work.nproc helpers. As long as one
711 // gchelper is actively marking objects it
712 // may create a workbuffer that the other helpers can work on.
713 // The for loop either exits when a work buffer is found
714 // or when _all_ of the work.nproc GC helpers are in the loop
715 // looking for work and thus not capable of creating new work.
716 // This is in fact the termination condition for the STW mark
726 b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
728 b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
729 if(b != nil || runtime·work.nproc == 1)
732 runtime·xadd(&runtime·work.nwait, +1);
734 if(runtime·work.full != 0) {
735 runtime·xadd(&runtime·work.nwait, -1);
736 b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
738 b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
741 runtime·xadd(&runtime·work.nwait, +1);
743 if(runtime·work.nwait == runtime·work.nproc)
746 g->m->gcstats.nprocyield++;
747 runtime·procyield(20);
749 g->m->gcstats.nosyield++;
752 g->m->gcstats.nsleep++;
764 // Make new buffer with half of b's pointers.
769 runtime·memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]);
770 g->m->gcstats.nhandoff++;
771 g->m->gcstats.nhandoffcnt += n;
773 // Put b on full list - let first half of b get stolen.
774 runtime·lfstackpush(&runtime·work.full, &b->node);
779 runtime·stackmapdata(StackMap *stackmap, int32 n)
781 if(n < 0 || n >= stackmap->n)
782 runtime·throw("stackmapdata: index out of range");
783 return (BitVector){stackmap->nbit, stackmap->bytedata + n*((stackmap->nbit+31)/32*4)};
786 // Scan a stack frame: local variables and function arguments/results.
788 scanframe(Stkframe *frame, void *unused)
793 uintptr size, minsize;
799 targetpc = frame->continpc;
805 runtime·printf("scanframe %s\n", runtime·funcname(f));
806 if(targetpc != f->entry)
808 pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
810 // We do not have a valid pcdata value but there might be a
811 // stackmap for this function. It is likely that we are looking
812 // at the function prologue, assume so and hope for the best.
816 // Scan local variables if stack frame has been allocated.
817 size = frame->varp - frame->sp;
818 if(thechar != '6' && thechar != '8')
819 minsize = sizeof(uintptr);
823 stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
824 if(stackmap == nil || stackmap->n <= 0) {
825 runtime·printf("runtime: frame %s untyped locals %p+%p\n", runtime·funcname(f), (byte*)(frame->varp-size), size);
826 runtime·throw("missing stackmap");
829 // Locals bitmap information, scan just the pointers in locals.
830 if(pcdata < 0 || pcdata >= stackmap->n) {
831 // don't know where we are
832 runtime·printf("runtime: pcdata is %d and %d locals stack map entries for %s (targetpc=%p)\n",
833 pcdata, stackmap->n, runtime·funcname(f), targetpc);
834 runtime·throw("scanframe: bad symbol table");
836 bv = runtime·stackmapdata(stackmap, pcdata);
837 size = (bv.n * PtrSize) / BitsPerPointer;
838 scanblock((byte*)(frame->varp - size), bv.n/BitsPerPointer*PtrSize, bv.bytedata);
842 if(frame->arglen > 0) {
843 if(frame->argmap != nil)
846 stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
847 if(stackmap == nil || stackmap->n <= 0) {
848 runtime·printf("runtime: frame %s untyped args %p+%p\n", runtime·funcname(f), frame->argp, (uintptr)frame->arglen);
849 runtime·throw("missing stackmap");
851 if(pcdata < 0 || pcdata >= stackmap->n) {
852 // don't know where we are
853 runtime·printf("runtime: pcdata is %d and %d args stack map entries for %s (targetpc=%p)\n",
854 pcdata, stackmap->n, runtime·funcname(f), targetpc);
855 runtime·throw("scanframe: bad symbol table");
857 bv = runtime·stackmapdata(stackmap, pcdata);
859 scanblock((byte*)frame->argp, bv.n/BitsPerPointer*PtrSize, bv.bytedata);
868 bool (*fn)(Stkframe*, void*);
870 if(runtime·readgstatus(gp)&Gscan == 0) {
871 runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp->goid, runtime·readgstatus(gp));
872 runtime·throw("mark - bad status");
875 switch(runtime·readgstatus(gp)&~Gscan) {
877 runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp->goid, runtime·readgstatus(gp));
878 runtime·throw("mark - bad status");
882 runtime·throw("scanstack: - goroutine not stopped");
890 runtime·throw("can't scan our own stack");
891 if((mp = gp->m) != nil && mp->helpgc)
892 runtime·throw("can't scan gchelper stack");
895 runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &fn, nil, false);
896 runtime·tracebackdefers(gp, &fn, nil);
899 // If the slot is grey or black return true, if white return false.
900 // If the slot is not in the known heap and thus does not have a valid GC bitmap then
901 // it is considered grey. Globals and stacks can hold such slots.
902 // The slot is grey if its mark bit is set and it is enqueued to be scanned.
903 // The slot is black if it has already been scanned.
904 // It is white if it has a valid mark bit and the bit is not set.
910 if(!inheap(slot)) // non-heap slots considered grey
913 objectstart(slot, &mbits);
914 return (mbits.bits&bitMarked) != 0;
917 // Shade the object if it isn't already.
918 // The object is not nil and known to be in the heap.
927 runtime·throw("shade: passed an address not in the heap");
929 wbuf = getpartialorempty();
930 // Mark the object, return some important bits.
931 // If we combine the following two rotines we don't have to pass mbits or obj around.
932 obj = objectstart(b, &mbits);
933 wbuf = greyobject(obj, &mbits, wbuf); // augments the wbuf
938 // This is the Dijkstra barrier coarsened to shade grey to white whereas
939 // the original Dijkstra barrier only shaded black to white.
941 // Shade indicates that it has seen a white pointer by adding the referent
944 runtime·markwb(void **slot, void *ptr)
946 // initial nil check avoids some needlesss loads
947 if(ptr != nil && inheap(ptr) && shaded((void*)slot))
952 // The gp has been moved to a GC safepoint. GC phase specific
953 // work is done here.
955 runtime·gcphasework(G *gp)
957 switch(runtime·gcphase) {
959 runtime·throw("gcphasework in bad gcphase");
967 // scan the stack, mark the objects, put pointers in work buffers
968 // hanging off the P where this is being run.
972 case GCmarktermination:
974 // All available mark work will be emptied before returning.
977 gp->gcworkdone = true;
980 #pragma dataflag NOPTR
981 static byte finalizer1[] = {
982 // Each Finalizer is 5 words, ptr ptr uintptr ptr ptr.
983 // Each byte describes 4 words.
984 // Need 4 Finalizers described by 5 bytes before pattern repeats:
985 // ptr ptr uintptr ptr ptr
986 // ptr ptr uintptr ptr ptr
987 // ptr ptr uintptr ptr ptr
988 // ptr ptr uintptr ptr ptr
990 // ptr ptr uintptr ptr
991 // ptr ptr ptr uintptr
993 // uintptr ptr ptr ptr
994 // ptr uintptr ptr ptr
995 // Assumptions about Finalizer layout checked below.
996 BitsPointer | BitsPointer<<2 | BitsScalar<<4 | BitsPointer<<6,
997 BitsPointer | BitsPointer<<2 | BitsPointer<<4 | BitsScalar<<6,
998 BitsPointer | BitsPointer<<2 | BitsPointer<<4 | BitsPointer<<6,
999 BitsScalar | BitsPointer<<2 | BitsPointer<<4 | BitsPointer<<6,
1000 BitsPointer | BitsScalar<<2 | BitsPointer<<4 | BitsPointer<<6,
1004 runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot)
1010 runtime·lock(&runtime·finlock);
1011 if(runtime·finq == nil || runtime·finq->cnt == runtime·finq->cap) {
1012 if(runtime·finc == nil) {
1013 runtime·finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
1014 runtime·finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
1015 runtime·finc->alllink = runtime·allfin;
1016 runtime·allfin = runtime·finc;
1017 if(finptrmask[0] == 0) {
1018 // Build pointer mask for Finalizer array in block.
1019 // Check assumptions made in finalizer1 array above.
1020 if(sizeof(Finalizer) != 5*PtrSize ||
1021 offsetof(Finalizer, fn) != 0 ||
1022 offsetof(Finalizer, arg) != PtrSize ||
1023 offsetof(Finalizer, nret) != 2*PtrSize ||
1024 offsetof(Finalizer, fint) != 3*PtrSize ||
1025 offsetof(Finalizer, ot) != 4*PtrSize ||
1026 BitsPerPointer != 2) {
1027 runtime·throw("finalizer out of sync");
1029 for(i=0; i<nelem(finptrmask); i++)
1030 finptrmask[i] = finalizer1[i%nelem(finalizer1)];
1033 block = runtime·finc;
1034 runtime·finc = block->next;
1035 block->next = runtime·finq;
1036 runtime·finq = block;
1038 f = &runtime·finq->fin[runtime·finq->cnt];
1039 runtime·finq->cnt++;
1045 runtime·fingwake = true;
1046 runtime·unlock(&runtime·finlock);
1050 runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*))
1056 for(fb = runtime·allfin; fb; fb = fb->alllink) {
1057 for(i = 0; i < fb->cnt; i++) {
1059 callback(f->fn, f->arg, f->nret, f->fint, f->ot);
1064 // Returns only when span s has been swept.
1066 runtime·MSpan_EnsureSwept(MSpan *s)
1070 // Caller must disable preemption.
1071 // Otherwise when this function returns the span can become unswept again
1072 // (if GC is triggered on another goroutine).
1073 if(g->m->locks == 0 && g->m->mallocing == 0 && g != g->m->g0)
1074 runtime·throw("MSpan_EnsureSwept: m is not locked");
1076 sg = runtime·mheap.sweepgen;
1077 if(runtime·atomicload(&s->sweepgen) == sg)
1079 // The caller must be sure that the span is a MSpanInUse span.
1080 if(runtime·cas(&s->sweepgen, sg-2, sg-1)) {
1081 runtime·MSpan_Sweep(s, false);
1084 // unfortunate condition, and we don't have efficient means to wait
1085 while(runtime·atomicload(&s->sweepgen) != sg)
1089 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
1090 // It clears the mark bits in preparation for the next GC round.
1091 // Returns true if the span was returned to heap.
1092 // If preserve=true, don't return it to heap nor relink in MCentral lists;
1093 // caller takes care of it.
1095 runtime·MSpan_Sweep(MSpan *s, bool preserve)
1097 int32 cl, n, npages, nfree;
1098 uintptr size, off, step;
1100 byte *p, *bitp, shift, xbits, bits;
1103 MLink head, *end, *link;
1104 Special *special, **specialp, *y;
1105 bool res, sweepgenset;
1107 // It's critical that we enter this function with preemption disabled,
1108 // GC must not start while we are in the middle of this function.
1109 if(g->m->locks == 0 && g->m->mallocing == 0 && g != g->m->g0)
1110 runtime·throw("MSpan_Sweep: m is not locked");
1111 sweepgen = runtime·mheap.sweepgen;
1112 if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
1113 runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1114 s->state, s->sweepgen, sweepgen);
1115 runtime·throw("MSpan_Sweep: bad span state");
1117 arena_start = runtime·mheap.arena_start;
1123 // Chunk full of small blocks.
1124 npages = runtime·class_to_allocnpages[cl];
1125 n = (npages << PageShift) / size;
1131 sweepgenset = false;
1133 // Mark any free objects in this span so we don't collect them.
1134 for(link = s->freelist; link != nil; link = link->next) {
1135 off = (uintptr*)link - (uintptr*)arena_start;
1136 bitp = arena_start - off/wordsPerBitmapByte - 1;
1137 shift = (off % wordsPerBitmapByte) * gcBits;
1138 *bitp |= bitMarked<<shift;
1141 // Unlink & free special records for any objects we're about to free.
1142 specialp = &s->specials;
1143 special = *specialp;
1144 while(special != nil) {
1145 // A finalizer can be set for an inner byte of an object, find object beginning.
1146 p = (byte*)(s->start << PageShift) + special->offset/size*size;
1147 off = (uintptr*)p - (uintptr*)arena_start;
1148 bitp = arena_start - off/wordsPerBitmapByte - 1;
1149 shift = (off % wordsPerBitmapByte) * gcBits;
1150 bits = (*bitp>>shift) & bitMask;
1151 if((bits&bitMarked) == 0) {
1152 // Find the exact byte for which the special was setup
1153 // (as opposed to object beginning).
1154 p = (byte*)(s->start << PageShift) + special->offset;
1155 // about to free object: splice out special record
1157 special = special->next;
1158 *specialp = special;
1159 if(!runtime·freespecial(y, p, size, false)) {
1160 // stop freeing of object if it has a finalizer
1161 *bitp |= bitMarked << shift;
1164 // object is still live: keep special record
1165 specialp = &special->next;
1166 special = *specialp;
1170 // Sweep through n objects of given size starting at p.
1171 // This thread owns the span now, so it can manipulate
1172 // the block bitmap without atomic operations.
1173 p = (byte*)(s->start << PageShift);
1174 // Find bits for the beginning of the span.
1175 off = (uintptr*)p - (uintptr*)arena_start;
1176 bitp = arena_start - off/wordsPerBitmapByte - 1;
1178 step = size/(PtrSize*wordsPerBitmapByte);
1179 // Rewind to the previous quadruple as we move to the next
1180 // in the beginning of the loop.
1187 for(; n > 0; n--, p += size) {
1192 shift = gcBits - shift;
1196 bits = (xbits>>shift) & bitMask;
1198 // Allocated and marked object, reset bits to allocated.
1199 if((bits&bitMarked) != 0) {
1200 *bitp &= ~(bitMarked<<shift);
1203 // At this point we know that we are looking at garbage object
1204 // that needs to be collected.
1205 if(runtime·debug.allocfreetrace)
1206 runtime·tracefree(p, size);
1207 // Reset to allocated+noscan.
1208 *bitp = (xbits & ~((bitMarked|(BitsMask<<2))<<shift)) | ((uintptr)BitsDead<<(shift+2));
1212 runtime·throw("can't preserve large span");
1213 runtime·unmarkspan(p, s->npages<<PageShift);
1215 // important to set sweepgen before returning it to heap
1216 runtime·atomicstore(&s->sweepgen, sweepgen);
1218 // NOTE(rsc,dvyukov): The original implementation of efence
1219 // in CL 22060046 used SysFree instead of SysFault, so that
1220 // the operating system would eventually give the memory
1221 // back to us again, so that an efence program could run
1222 // longer without running out of memory. Unfortunately,
1223 // calling SysFree here without any kind of adjustment of the
1224 // heap data structures means that when the memory does
1225 // come back to us, we have the wrong metadata for it, either in
1226 // the MSpan structures or in the garbage collection bitmap.
1227 // Using SysFault here means that the program will run out of
1228 // memory fairly quickly in efence mode, but at least it won't
1229 // have mysterious crashes due to confused memory reuse.
1230 // It should be possible to switch back to SysFree if we also
1231 // implement and then call some kind of MHeap_DeleteSpan.
1232 if(runtime·debug.efence) {
1233 s->limit = nil; // prevent mlookup from finding this span
1234 runtime·SysFault(p, size);
1236 runtime·MHeap_Free(&runtime·mheap, s, 1);
1237 c->local_nlargefree++;
1238 c->local_largefree += size;
1239 runtime·xadd64(&mstats.next_gc, -(uint64)(size * (runtime·gcpercent + 100)/100));
1242 // Free small object.
1243 if(size > 2*sizeof(uintptr))
1244 ((uintptr*)p)[1] = (uintptr)0xdeaddeaddeaddeadll; // mark as "needs to be zeroed"
1245 else if(size > sizeof(uintptr))
1246 ((uintptr*)p)[1] = 0;
1248 end->next = (MLink*)p;
1254 // We need to set s->sweepgen = h->sweepgen only when all blocks are swept,
1255 // because of the potential for a concurrent free/SetFinalizer.
1256 // But we need to set it before we make the span available for allocation
1257 // (return it to heap or mcentral), because allocation code assumes that a
1258 // span is already swept if available for allocation.
1260 if(!sweepgenset && nfree == 0) {
1261 // The span must be in our exclusive ownership until we update sweepgen,
1262 // check for potential races.
1263 if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
1264 runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1265 s->state, s->sweepgen, sweepgen);
1266 runtime·throw("MSpan_Sweep: bad span state after sweep");
1268 runtime·atomicstore(&s->sweepgen, sweepgen);
1271 c->local_nsmallfree[cl] += nfree;
1272 c->local_cachealloc -= nfree * size;
1273 runtime·xadd64(&mstats.next_gc, -(uint64)(nfree * size * (runtime·gcpercent + 100)/100));
1274 res = runtime·MCentral_FreeSpan(&runtime·mheap.central[cl].mcentral, s, nfree, head.next, end, preserve);
1275 // MCentral_FreeSpan updates sweepgen
1280 // State of background runtime·sweep.
1281 // Protected by runtime·gclock.
1282 typedef struct SweepData SweepData;
1288 uint32 spanidx; // background sweeper position
1293 SweepData runtime·sweep;
1296 // returns number of pages returned to heap, or -1 if there is nothing to sweep
1298 runtime·sweepone(void)
1304 // increment locks to ensure that the goroutine is not preempted
1305 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
1307 sg = runtime·mheap.sweepgen;
1309 idx = runtime·xadd(&runtime·sweep.spanidx, 1) - 1;
1310 if(idx >= runtime·work.nspan) {
1311 runtime·mheap.sweepdone = true;
1315 s = runtime·work.spans[idx];
1316 if(s->state != MSpanInUse) {
1320 if(s->sweepgen != sg-2 || !runtime·cas(&s->sweepgen, sg-2, sg-1))
1323 if(!runtime·MSpan_Sweep(s, false))
1333 g->m->scalararg[0] = runtime·sweepone();
1336 #pragma textflag NOSPLIT
1338 runtime·gosweepone(void)
1344 return g->m->scalararg[0];
1347 #pragma textflag NOSPLIT
1349 runtime·gosweepdone(void)
1351 return runtime·mheap.sweepdone;
1356 runtime·gchelper(void)
1360 g->m->traceback = 2;
1363 // parallel mark for over GC roots
1364 runtime·parfordo(runtime·work.markfor);
1365 if(runtime·gcphase != GCscan)
1366 scanblock(nil, 0, nil); // blocks in getfull
1367 nproc = runtime·work.nproc; // work.nproc can change right after we increment work.ndone
1368 if(runtime·xadd(&runtime·work.ndone, +1) == nproc-1)
1369 runtime·notewakeup(&runtime·work.alldone);
1370 g->m->traceback = 0;
1379 for(pp=runtime·allp; p=*pp; pp++) {
1383 runtime·purgecachedstats(c);
1388 flushallmcaches(void)
1393 // Flush MCache's to MCentral.
1394 for(pp=runtime·allp; p=*pp; pp++) {
1398 runtime·MCache_ReleaseAll(c);
1399 runtime·stackcache_clear(c);
1404 flushallmcaches_m(G *gp)
1407 runtime·gogo(&gp->sched);
1411 runtime·updatememstats(GCStats *stats)
1421 runtime·memclr((byte*)stats, sizeof(*stats));
1422 for(mp=runtime·allm; mp; mp=mp->alllink) {
1424 src = (uint64*)&mp->gcstats;
1425 dst = (uint64*)stats;
1426 for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
1428 runtime·memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
1431 mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
1432 mstats.mspan_inuse = runtime·mheap.spanalloc.inuse;
1433 mstats.sys = mstats.heap_sys + mstats.stacks_sys + mstats.mspan_sys +
1434 mstats.mcache_sys + mstats.buckhash_sys + mstats.gc_sys + mstats.other_sys;
1436 // Calculate memory allocator stats.
1437 // During program execution we only count number of frees and amount of freed memory.
1438 // Current number of alive object in the heap and amount of alive heap memory
1439 // are calculated by scanning all spans.
1440 // Total number of mallocs is calculated as number of frees plus number of alive objects.
1441 // Similarly, total amount of allocated memory is calculated as amount of freed memory
1442 // plus amount of alive heap memory.
1444 mstats.total_alloc = 0;
1447 for(i = 0; i < nelem(mstats.by_size); i++) {
1448 mstats.by_size[i].nmalloc = 0;
1449 mstats.by_size[i].nfree = 0;
1452 // Flush MCache's to MCentral.
1456 fn = flushallmcaches_m;
1460 // Aggregate local stats.
1463 // Scan all spans and count number of alive objects.
1464 runtime·lock(&runtime·mheap.lock);
1465 for(i = 0; i < runtime·mheap.nspan; i++) {
1466 s = runtime·mheap.allspans[i];
1467 if(s->state != MSpanInUse)
1469 if(s->sizeclass == 0) {
1471 mstats.alloc += s->elemsize;
1473 mstats.nmalloc += s->ref;
1474 mstats.by_size[s->sizeclass].nmalloc += s->ref;
1475 mstats.alloc += s->ref*s->elemsize;
1478 runtime·unlock(&runtime·mheap.lock);
1480 // Aggregate by size class.
1482 mstats.nfree = runtime·mheap.nlargefree;
1483 for(i = 0; i < nelem(mstats.by_size); i++) {
1484 mstats.nfree += runtime·mheap.nsmallfree[i];
1485 mstats.by_size[i].nfree = runtime·mheap.nsmallfree[i];
1486 mstats.by_size[i].nmalloc += runtime·mheap.nsmallfree[i];
1487 smallfree += runtime·mheap.nsmallfree[i] * runtime·class_to_size[i];
1489 mstats.nfree += mstats.tinyallocs;
1490 mstats.nmalloc += mstats.nfree;
1492 // Calculate derived stats.
1493 mstats.total_alloc = mstats.alloc + runtime·mheap.largefree + smallfree;
1494 mstats.heap_alloc = mstats.alloc;
1495 mstats.heap_objects = mstats.nmalloc - mstats.nfree;
1498 // Structure of arguments passed to function gc().
1499 // This allows the arguments to be passed via runtime·mcall.
1502 int64 start_time; // start time of GC in ns (just before stoptheworld)
1506 static void gc(struct gc_args *args);
1509 runtime·readgogc(void)
1513 p = runtime·getenv("GOGC");
1514 if(p == nil || p[0] == '\0')
1516 if(runtime·strcmp(p, (byte*)"off") == 0)
1518 return runtime·atoi(p);
1522 runtime·gcinit(void)
1524 if(sizeof(Workbuf) != WorkbufSize)
1525 runtime·throw("runtime: size of Workbuf is suboptimal");
1527 runtime·work.markfor = runtime·parforalloc(MaxGcproc);
1528 runtime·gcpercent = runtime·readgogc();
1529 runtime·gcdatamask = unrollglobgcprog(runtime·gcdata, runtime·edata - runtime·data);
1530 runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss);
1533 // Called from malloc.go using onM, stopping and starting the world handled in caller.
1541 runtime·casgstatus(gp, Grunning, Gwaiting);
1542 gp->waitreason = runtime·gostringnocopy((byte*)"garbage collection");
1544 a.start_time = (uint64)(g->m->scalararg[0]) | ((uint64)(g->m->scalararg[1]) << 32);
1545 a.eagersweep = g->m->scalararg[2];
1547 runtime·casgstatus(gp, Gwaiting, Grunning);
1551 runtime·finishsweep_m(void)
1556 // The world is stopped so we should be able to complete the sweeps
1558 while(runtime·sweepone() != -1)
1559 runtime·sweep.npausesweep++;
1561 // There may be some other spans being swept concurrently that
1562 // we need to wait for. If finishsweep_m is done with the world stopped
1563 // this code is not required.
1564 sg = runtime·mheap.sweepgen;
1565 for(i=0; i<runtime·work.nspan; i++) {
1566 s = runtime·work.spans[i];
1567 if(s->sweepgen == sg) {
1570 if(s->state != MSpanInUse) // Span is not part of the GCed heap so no need to ensure it is swept.
1572 runtime·MSpan_EnsureSwept(s);
1576 // Scan all of the stacks, greying (or graying if in America) the referents
1577 // but not blackening them since the mark write barrier isn't installed.
1579 runtime·gcscan_m(void)
1581 uint32 i, allglen, oldphase;
1582 G *gp, *mastergp, **allg;
1584 // Grab the g that called us and potentially allow rescheduling.
1585 // This allows it to be scanned like other goroutines.
1586 mastergp = g->m->curg;
1588 runtime·casgstatus(mastergp, Grunning, Gwaiting);
1589 mastergp->waitreason = runtime·gostringnocopy((byte*)"garbage collection scan");
1591 // Span sweeping has been done by finishsweep_m.
1592 // Long term we will want to make this goroutine runnable
1593 // by placing it onto a scanenqueue state and then calling
1594 // runtime·restartg(mastergp) to make it Grunnable.
1595 // At the bottom we will want to return this p back to the scheduler.
1597 oldphase = runtime·gcphase;
1599 runtime·lock(&runtime·allglock);
1600 allglen = runtime·allglen;
1601 allg = runtime·allg;
1602 // Prepare flag indicating that the scan has not been completed.
1603 for(i = 0; i < allglen; i++) {
1605 gp->gcworkdone = false; // set to true in gcphasework
1607 runtime·unlock(&runtime·allglock);
1609 runtime·work.nwait = 0;
1610 runtime·work.ndone = 0;
1611 runtime·work.nproc = 1; // For now do not do this in parallel.
1612 runtime·gcphase = GCscan;
1613 // ackgcphase is not needed since we are not scanning running goroutines.
1614 runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + allglen, nil, false, markroot);
1615 runtime·parfordo(runtime·work.markfor);
1617 runtime·lock(&runtime·allglock);
1619 allg = runtime·allg;
1620 // Check that gc work is done.
1621 for(i = 0; i < allglen; i++) {
1623 if(!gp->gcworkdone) {
1624 runtime·throw("scan missed a g");
1627 runtime·unlock(&runtime·allglock);
1629 runtime·gcphase = oldphase;
1630 runtime·casgstatus(mastergp, Gwaiting, Grunning);
1631 // Let the g that called us continue to run.
1635 gc(struct gc_args *args)
1637 int64 t0, t1, t2, t3, t4;
1638 uint64 heap0, heap1, obj;
1644 if(runtime·debug.allocfreetrace)
1647 g->m->traceback = 2;
1648 t0 = args->start_time;
1649 runtime·work.tstart = args->start_time;
1652 if(runtime·debug.gctrace)
1653 t1 = runtime·nanotime();
1655 runtime·finishsweep_m();
1657 // Cache runtime·mheap.allspans in work.spans to avoid conflicts with
1658 // resizing/freeing allspans.
1659 // New spans can be created while GC progresses, but they are not garbage for
1661 // - new stack spans can be created even while the world is stopped.
1662 // - new malloc spans can be created during the concurrent sweep
1664 // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
1665 runtime·lock(&runtime·mheap.lock);
1666 // Free the old cached sweep array if necessary.
1667 if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
1668 runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
1669 // Cache the current array for marking.
1670 runtime·mheap.gcspans = runtime·mheap.allspans;
1671 runtime·work.spans = runtime·mheap.allspans;
1672 runtime·work.nspan = runtime·mheap.nspan;
1673 runtime·unlock(&runtime·mheap.lock);
1674 oldphase = runtime·gcphase;
1676 runtime·work.nwait = 0;
1677 runtime·work.ndone = 0;
1678 runtime·work.nproc = runtime·gcprocs();
1679 runtime·gcphase = GCmark;
1681 // World is stopped so allglen will not change.
1682 for(i = 0; i < runtime·allglen; i++) {
1683 gp = runtime·allg[i];
1684 gp->gcworkdone = false; // set to true in gcphasework
1687 runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + runtime·allglen, nil, false, markroot);
1688 if(runtime·work.nproc > 1) {
1689 runtime·noteclear(&runtime·work.alldone);
1690 runtime·helpgc(runtime·work.nproc);
1694 if(runtime·debug.gctrace)
1695 t2 = runtime·nanotime();
1698 runtime·parfordo(runtime·work.markfor);
1700 scanblock(nil, 0, nil);
1702 if(runtime·work.full)
1703 runtime·throw("runtime·work.full != nil");
1704 if(runtime·work.partial)
1705 runtime·throw("runtime·work.partial != nil");
1707 runtime·gcphase = oldphase;
1709 if(runtime·debug.gctrace)
1710 t3 = runtime·nanotime();
1712 if(runtime·work.nproc > 1)
1713 runtime·notesleep(&runtime·work.alldone);
1715 runtime·shrinkfinish();
1718 // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
1719 // estimate what was live heap size after previous GC (for tracing only)
1720 heap0 = mstats.next_gc*100/(runtime·gcpercent+100);
1721 // conservatively set next_gc to high value assuming that everything is live
1722 // concurrent/lazy sweep will reduce this number while discovering new garbage
1723 mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*runtime·gcpercent/100;
1725 t4 = runtime·nanotime();
1726 runtime·atomicstore64(&mstats.last_gc, runtime·unixnanotime()); // must be Unix time to make sense to user
1727 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
1728 mstats.pause_end[mstats.numgc%nelem(mstats.pause_end)] = t4;
1729 mstats.pause_total_ns += t4 - t0;
1732 runtime·printf("pause %D\n", t4-t0);
1734 if(runtime·debug.gctrace) {
1735 heap1 = mstats.heap_alloc;
1736 runtime·updatememstats(&stats);
1737 if(heap1 != mstats.heap_alloc) {
1738 runtime·printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
1739 runtime·throw("mstats skew");
1741 obj = mstats.nmalloc - mstats.nfree;
1743 stats.nprocyield += runtime·work.markfor->nprocyield;
1744 stats.nosyield += runtime·work.markfor->nosyield;
1745 stats.nsleep += runtime·work.markfor->nsleep;
1747 runtime·printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
1750 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
1751 mstats.numgc, runtime·work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
1752 heap0>>20, heap1>>20, obj,
1753 mstats.nmalloc, mstats.nfree,
1755 runtime·work.nspan, runtime·sweep.nbgsweep, runtime·sweep.npausesweep,
1756 stats.nhandoff, stats.nhandoffcnt,
1757 runtime·work.markfor->nsteal, runtime·work.markfor->nstealcnt,
1758 stats.nprocyield, stats.nosyield, stats.nsleep);
1759 runtime·sweep.nbgsweep = runtime·sweep.npausesweep = 0;
1762 // See the comment in the beginning of this function as to why we need the following.
1763 // Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
1764 runtime·lock(&runtime·mheap.lock);
1765 // Free the old cached mark array if necessary.
1766 if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
1767 runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
1768 // Cache the current array for sweeping.
1769 runtime·mheap.gcspans = runtime·mheap.allspans;
1770 runtime·mheap.sweepgen += 2;
1771 runtime·mheap.sweepdone = false;
1772 runtime·work.spans = runtime·mheap.allspans;
1773 runtime·work.nspan = runtime·mheap.nspan;
1774 runtime·sweep.spanidx = 0;
1775 runtime·unlock(&runtime·mheap.lock);
1777 if(ConcurrentSweep && !args->eagersweep) {
1778 runtime·lock(&runtime·gclock);
1779 if(runtime·sweep.g == nil)
1780 runtime·sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, gc);
1781 else if(runtime·sweep.parked) {
1782 runtime·sweep.parked = false;
1783 runtime·ready(runtime·sweep.g);
1785 runtime·unlock(&runtime·gclock);
1787 // Sweep all spans eagerly.
1788 while(runtime·sweepone() != -1)
1789 runtime·sweep.npausesweep++;
1790 // Do an additional mProf_GC, because all 'free' events are now real as well.
1795 g->m->traceback = 0;
1798 extern uintptr runtime·sizeof_C_MStats;
1800 static void readmemstats_m(void);
1803 runtime·readmemstats_m(void)
1807 stats = g->m->ptrarg[0];
1808 g->m->ptrarg[0] = nil;
1810 runtime·updatememstats(nil);
1811 // Size of the trailing by_size array differs between Go and C,
1812 // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
1813 runtime·memmove(stats, &mstats, runtime·sizeof_C_MStats);
1815 // Stack numbers are part of the heap numbers, separate those out for user consumption
1816 stats->stacks_sys = stats->stacks_inuse;
1817 stats->heap_inuse -= stats->stacks_inuse;
1818 stats->heap_sys -= stats->stacks_inuse;
1821 static void readgcstats_m(void);
1823 #pragma textflag NOSPLIT
1825 runtime∕debug·readGCStats(Slice *pauses)
1829 g->m->ptrarg[0] = pauses;
1841 pauses = g->m->ptrarg[0];
1842 g->m->ptrarg[0] = nil;
1844 // Calling code in runtime/debug should make the slice large enough.
1845 if(pauses->cap < nelem(mstats.pause_ns)+3)
1846 runtime·throw("runtime: short slice passed to readGCStats");
1848 // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
1849 p = (uint64*)pauses->array;
1850 runtime·lock(&runtime·mheap.lock);
1853 if(n > nelem(mstats.pause_ns))
1854 n = nelem(mstats.pause_ns);
1856 // The pause buffer is circular. The most recent pause is at
1857 // pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
1858 // from there to go back farther in time. We deliver the times
1859 // most recent first (in p[0]).
1860 for(i=0; i<n; i++) {
1861 j = (mstats.numgc-1-i)%nelem(mstats.pause_ns);
1862 p[i] = mstats.pause_ns[j];
1863 p[n+i] = mstats.pause_end[j];
1866 p[n+n] = mstats.last_gc;
1867 p[n+n+1] = mstats.numgc;
1868 p[n+n+2] = mstats.pause_total_ns;
1869 runtime·unlock(&runtime·mheap.lock);
1870 pauses->len = n+n+3;
1874 runtime·setgcpercent_m(void)
1879 in = (int32)(intptr)g->m->scalararg[0];
1881 runtime·lock(&runtime·mheap.lock);
1882 out = runtime·gcpercent;
1885 runtime·gcpercent = in;
1886 runtime·unlock(&runtime·mheap.lock);
1888 g->m->scalararg[0] = (uintptr)(intptr)out;
1894 if(g->m->helpgc < 0 || g->m->helpgc >= MaxGcproc)
1895 runtime·throw("gchelperstart: bad m->helpgc");
1897 runtime·throw("gchelper not running on g0 stack");
1901 runtime·wakefing(void)
1906 runtime·lock(&runtime·finlock);
1907 if(runtime·fingwait && runtime·fingwake) {
1908 runtime·fingwait = false;
1909 runtime·fingwake = false;
1912 runtime·unlock(&runtime·finlock);
1916 // Recursively unrolls GC program in prog.
1917 // mask is where to store the result.
1918 // ppos is a pointer to position in mask, in bits.
1919 // sparse says to generate 4-bits per word mask for heap (2-bits for data/bss otherwise).
1921 unrollgcprog1(byte *mask, byte *prog, uintptr *ppos, bool inplace, bool sparse)
1923 uintptr pos, siz, i, off;
1924 byte *arena_start, *prog1, v, *bitp, shift;
1926 arena_start = runtime·mheap.arena_start;
1934 for(i = 0; i < siz; i++) {
1935 v = prog[i/PointersPerByte];
1936 v >>= (i%PointersPerByte)*BitsPerPointer;
1939 // Store directly into GC bitmap.
1940 off = (uintptr*)(mask+pos) - (uintptr*)arena_start;
1941 bitp = arena_start - off/wordsPerBitmapByte - 1;
1942 shift = (off % wordsPerBitmapByte) * gcBits;
1945 *bitp |= v<<(shift+2);
1956 pos += BitsPerPointer;
1959 prog += ROUND(siz*BitsPerPointer, 8)/8;
1964 for(i = 0; i < PtrSize; i++)
1965 siz = (siz<<8) + prog[PtrSize-i-1];
1968 for(i = 0; i < siz; i++)
1969 prog1 = unrollgcprog1(mask, prog, &pos, inplace, sparse);
1970 if(prog1[0] != insArrayEnd)
1971 runtime·throw("unrollgcprog: array does not end with insArrayEnd");
1979 runtime·throw("unrollgcprog: unknown instruction");
1984 // Unrolls GC program prog for data/bss, returns dense GC mask.
1986 unrollglobgcprog(byte *prog, uintptr size)
1989 uintptr pos, masksize;
1991 masksize = ROUND(ROUND(size, PtrSize)/PtrSize*BitsPerPointer, 8)/8;
1992 mask = runtime·persistentalloc(masksize+1, 0, &mstats.gc_sys);
1993 mask[masksize] = 0xa1;
1995 prog = unrollgcprog1(mask, prog, &pos, false, false);
1996 if(pos != size/PtrSize*BitsPerPointer) {
1997 runtime·printf("unrollglobgcprog: bad program size, got %D, expect %D\n",
1998 (uint64)pos, (uint64)size/PtrSize*BitsPerPointer);
1999 runtime·throw("unrollglobgcprog: bad program size");
2001 if(prog[0] != insEnd)
2002 runtime·throw("unrollglobgcprog: program does not end with insEnd");
2003 if(mask[masksize] != 0xa1)
2004 runtime·throw("unrollglobgcprog: overflow");
2005 return (BitVector){masksize*8, mask};
2009 runtime·unrollgcproginplace_m(void)
2011 uintptr size, size0, pos, off;
2012 byte *arena_start, *prog, *bitp, shift;
2016 v = g->m->ptrarg[0];
2017 typ = g->m->ptrarg[1];
2018 size = g->m->scalararg[0];
2019 size0 = g->m->scalararg[1];
2020 g->m->ptrarg[0] = nil;
2021 g->m->ptrarg[1] = nil;
2024 prog = (byte*)typ->gc[1];
2026 unrollgcprog1(v, prog, &pos, true, true);
2027 // Mark first word as bitAllocated.
2028 arena_start = runtime·mheap.arena_start;
2029 off = (uintptr*)v - (uintptr*)arena_start;
2030 bitp = arena_start - off/wordsPerBitmapByte - 1;
2031 shift = (off % wordsPerBitmapByte) * gcBits;
2032 *bitp |= bitBoundary<<shift;
2033 // Mark word after last as BitsDead.
2035 off = (uintptr*)((byte*)v + size0) - (uintptr*)arena_start;
2036 bitp = arena_start - off/wordsPerBitmapByte - 1;
2037 shift = (off % wordsPerBitmapByte) * gcBits;
2038 *bitp &= ~(bitPtrMask<<shift) | ((uintptr)BitsDead<<(shift+2));
2042 // Unrolls GC program in typ->gc[1] into typ->gc[0]
2044 runtime·unrollgcprog_m(void)
2052 typ = g->m->ptrarg[0];
2053 g->m->ptrarg[0] = nil;
2055 runtime·lock(&lock);
2056 mask = (byte*)typ->gc[0];
2058 pos = 8; // skip the unroll flag
2059 prog = (byte*)typ->gc[1];
2060 prog = unrollgcprog1(mask, prog, &pos, false, true);
2061 if(prog[0] != insEnd)
2062 runtime·throw("unrollgcprog: program does not end with insEnd");
2063 if(((typ->size/PtrSize)%2) != 0) {
2064 // repeat the program twice
2065 prog = (byte*)typ->gc[1];
2066 unrollgcprog1(mask, prog, &pos, false, true);
2068 // atomic way to say mask[0] = 1
2069 x = *(uintptr*)mask;
2071 runtime·atomicstorep((void**)mask, (void*)x);
2073 runtime·unlock(&lock);
2076 // mark the span of memory at v as having n blocks of the given size.
2077 // if leftover is true, there is left over space at the end of the span.
2079 runtime·markspan(void *v, uintptr size, uintptr n, bool leftover)
2081 uintptr i, off, step;
2084 if((byte*)v+size*n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
2085 runtime·throw("markspan: bad pointer");
2087 // Find bits of the beginning of the span.
2088 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
2089 b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
2090 if((off%wordsPerBitmapByte) != 0)
2091 runtime·throw("markspan: unaligned length");
2093 // Okay to use non-atomic ops here, because we control
2094 // the entire span, and each bitmap byte has bits for only
2095 // one span, so no other goroutines are changing these bitmap words.
2097 if(size == PtrSize) {
2098 // Possible only on 64-bits (minimal size class is 8 bytes).
2099 // Poor man's memset(0x11).
2100 if(0x11 != ((bitBoundary+BitsDead)<<gcBits) + (bitBoundary+BitsDead))
2101 runtime·throw("markspan: bad bits");
2102 if((n%(wordsPerBitmapByte*PtrSize)) != 0)
2103 runtime·throw("markspan: unaligned length");
2104 b = b - n/wordsPerBitmapByte + 1; // find first byte
2105 if(((uintptr)b%PtrSize) != 0)
2106 runtime·throw("markspan: unaligned pointer");
2107 for(i = 0; i != n; i += wordsPerBitmapByte*PtrSize, b += PtrSize)
2108 *(uintptr*)b = (uintptr)0x1111111111111111ULL; // bitBoundary+BitsDead
2113 n++; // mark a boundary just past end of last block too
2114 step = size/(PtrSize*wordsPerBitmapByte);
2115 for(i = 0; i != n; i++, b -= step)
2116 *b = bitBoundary|(BitsDead<<2);
2119 // unmark the span of memory at v of length n bytes.
2121 runtime·unmarkspan(void *v, uintptr n)
2126 if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
2127 runtime·throw("markspan: bad pointer");
2129 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
2130 if((off % (PtrSize*wordsPerBitmapByte)) != 0)
2131 runtime·throw("markspan: unaligned pointer");
2132 b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
2134 if(n%(PtrSize*wordsPerBitmapByte) != 0)
2135 runtime·throw("unmarkspan: unaligned length");
2136 // Okay to use non-atomic ops here, because we control
2137 // the entire span, and each bitmap word has bits for only
2138 // one span, so no other goroutines are changing these
2140 n /= wordsPerBitmapByte;
2141 runtime·memclr(b - n + 1, n);
2145 runtime·MHeap_MapBits(MHeap *h)
2147 // Caller has added extra mappings to the arena.
2148 // Add extra mappings of bitmap words as needed.
2149 // We allocate extra bitmap pieces in chunks of bitmapChunk.
2155 n = (h->arena_used - h->arena_start) / (PtrSize*wordsPerBitmapByte);
2156 n = ROUND(n, bitmapChunk);
2157 n = ROUND(n, PhysPageSize);
2158 if(h->bitmap_mapped >= n)
2161 runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
2162 h->bitmap_mapped = n;
2166 getgcmaskcb(Stkframe *frame, void *ctxt)
2171 if(frame->sp <= frame0->sp && frame0->sp < frame->varp) {
2178 // Returns GC type info for object p for testing.
2180 runtime·getgcmask(byte *p, Type *t, byte **mask, uintptr *len)
2184 byte *base, bits, shift, *b;
2185 bool (*cb)(Stkframe*, void*);
2191 if(p >= runtime·data && p < runtime·edata) {
2192 n = ((PtrType*)t)->elem->size;
2194 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2195 for(i = 0; i < n; i += PtrSize) {
2196 off = (p+i-runtime·data)/PtrSize;
2197 bits = (runtime·gcdatamask.bytedata[off/PointersPerByte] >> ((off%PointersPerByte)*BitsPerPointer))&BitsMask;
2198 (*mask)[i/PtrSize] = bits;
2203 if(p >= runtime·bss && p < runtime·ebss) {
2204 n = ((PtrType*)t)->elem->size;
2206 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2207 for(i = 0; i < n; i += PtrSize) {
2208 off = (p+i-runtime·bss)/PtrSize;
2209 bits = (runtime·gcbssmask.bytedata[off/PointersPerByte] >> ((off%PointersPerByte)*BitsPerPointer))&BitsMask;
2210 (*mask)[i/PtrSize] = bits;
2215 if(runtime·mlookup(p, &base, &n, nil)) {
2217 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2218 for(i = 0; i < n; i += PtrSize) {
2219 off = (uintptr*)(base+i) - (uintptr*)runtime·mheap.arena_start;
2220 b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
2221 shift = (off % wordsPerBitmapByte) * gcBits;
2222 bits = (*b >> (shift+2))&BitsMask;
2223 (*mask)[i/PtrSize] = bits;
2229 frame.sp = (uintptr)p;
2231 runtime·gentraceback(g->m->curg->sched.pc, g->m->curg->sched.sp, 0, g->m->curg, 0, nil, 1000, &cb, &frame, false);
2232 if(frame.fn != nil) {
2241 targetpc = frame.continpc;
2244 if(targetpc != f->entry)
2246 pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
2249 stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
2250 if(stackmap == nil || stackmap->n <= 0)
2252 bv = runtime·stackmapdata(stackmap, pcdata);
2253 size = bv.n/BitsPerPointer*PtrSize;
2254 n = ((PtrType*)t)->elem->size;
2256 *mask = runtime·mallocgc(*len, nil, FlagNoScan);
2257 for(i = 0; i < n; i += PtrSize) {
2258 off = (p+i-(byte*)frame.varp+size)/PtrSize;
2259 bits = (bv.bytedata[off*BitsPerPointer/8] >> ((off*BitsPerPointer)%8))&BitsMask;
2260 (*mask)[i/PtrSize] = bits;
2265 void runtime·gc_unixnanotime(int64 *now);
2268 runtime·unixnanotime(void)
2272 runtime·gc_unixnanotime(&now);