1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: type and heap bitmaps.
7 // Stack, data, and bss bitmaps
9 // Stack frames and global variables in the data and bss sections are described
10 // by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
11 // to be visited during GC. The bits in each byte are consumed starting with
12 // the low bit: 1<<0, 1<<1, and so on.
16 // The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
17 // stored in the heapArena metadata backing each heap arena.
18 // That is, if ha is the heapArena for the arena starting a start,
19 // then ha.bitmap[0] holds the 2-bit entries for the four words start
20 // through start+3*ptrSize, ha.bitmap[1] holds the entries for
21 // start+4*ptrSize through start+7*ptrSize, and so on.
23 // In each 2-bit entry, the lower bit holds the same information as in the 1-bit
24 // bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
25 // The meaning of the high bit depends on the position of the word being described
26 // in its allocated object. In all words *except* the second word, the
27 // high bit indicates that the object is still being described. In
28 // these words, if a bit pair with a high bit 0 is encountered, the
29 // low bit can also be assumed to be 0, and the object description is
30 // over. This 00 is called the ``dead'' encoding: it signals that the
31 // rest of the words in the object are uninteresting to the garbage
34 // In the second word, the high bit is the GC ``checkmarked'' bit (see below).
36 // The 2-bit entries are split when written into the byte, so that the top half
37 // of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
39 // This form allows a copy from the 1-bit to the 4-bit form to keep the
40 // pointer bits contiguous, instead of having to space them out.
42 // The code makes use of the fact that the zero value for a heap bitmap
43 // has no live pointer bit set and is (depending on position), not used,
44 // not checkmarked, and is the dead encoding.
45 // These properties must be preserved when modifying the encoding.
47 // The bitmap for noscan spans is not maintained. Code must ensure
48 // that an object is scannable before consulting its bitmap by
49 // checking either the noscan bit in the span or by consulting its
50 // type's information.
54 // In a concurrent garbage collector, one worries about failing to mark
55 // a live object due to mutations without write barriers or bugs in the
56 // collector implementation. As a sanity check, the GC has a 'checkmark'
57 // mode that retraverses the object graph with the world stopped, to make
58 // sure that everything that should be marked is marked.
59 // In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
60 // for the second word of the object holds the checkmark bit.
61 // When not in checkmark mode, this bit is set to 1.
63 // The smallest possible allocation is 8 bytes. On a 32-bit machine, that
64 // means every allocated object has two words, so there is room for the
65 // checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
66 // just one word, so the second bit pair is not available for encoding the
67 // checkmark. However, because non-pointer allocations are combined
68 // into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
69 // must be a pointer, so the type bit in the first word is not actually needed.
70 // It is still used in general, except in checkmark the type bit is repurposed
71 // as the checkmark bit and then reinitialized (to 1) as the type bit when
78 "runtime/internal/atomic"
79 "runtime/internal/sys"
87 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries
88 wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
90 // all scan/pointer bits in a byte
91 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
92 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
95 // addb returns the byte pointer p+n.
98 func addb(p *byte, n uintptr) *byte {
99 // Note: wrote out full expression instead of calling add(p, n)
100 // to reduce the number of temporaries generated by the
101 // compiler for this trivial expression during inlining.
102 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
105 // subtractb returns the byte pointer p-n.
108 func subtractb(p *byte, n uintptr) *byte {
109 // Note: wrote out full expression instead of calling add(p, -n)
110 // to reduce the number of temporaries generated by the
111 // compiler for this trivial expression during inlining.
112 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
115 // add1 returns the byte pointer p+1.
118 func add1(p *byte) *byte {
119 // Note: wrote out full expression instead of calling addb(p, 1)
120 // to reduce the number of temporaries generated by the
121 // compiler for this trivial expression during inlining.
122 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
125 // subtract1 returns the byte pointer p-1.
128 // nosplit because it is used during write barriers and must not be preempted.
130 func subtract1(p *byte) *byte {
131 // Note: wrote out full expression instead of calling subtractb(p, 1)
132 // to reduce the number of temporaries generated by the
133 // compiler for this trivial expression during inlining.
134 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
137 // heapBits provides access to the bitmap bits for a single heap word.
138 // The methods on heapBits take value receivers so that the compiler
139 // can more easily inline calls to those methods and registerize the
140 // struct fields independently.
141 type heapBits struct {
144 arena uint32 // Index of heap arena containing bitp
145 last *uint8 // Last byte arena's bitmap
148 // Make the compiler check that heapBits.arena is large enough to hold
149 // the maximum arena frame number.
150 var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
152 // markBits provides access to the mark bit for an object in the heap.
153 // bytep points to the byte holding the mark bit.
154 // mask is a byte with a single bit set that can be &ed with *bytep
155 // to see if the bit has been set.
156 // *m.byte&m.mask != 0 indicates the mark bit is set.
157 // index can be used along with span information to generate
158 // the address of the object in the heap.
159 // We maintain one set of mark bits for allocation and one for
161 type markBits struct {
168 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
169 bytep, mask := s.allocBits.bitp(allocBitIndex)
170 return markBits{bytep, mask, allocBitIndex}
173 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
174 // and negates them so that ctz (count trailing zeros) instructions
175 // can be used. It then places these 8 bytes into the cached 64 bit
177 func (s *mspan) refillAllocCache(whichByte uintptr) {
178 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
180 aCache |= uint64(bytes[0])
181 aCache |= uint64(bytes[1]) << (1 * 8)
182 aCache |= uint64(bytes[2]) << (2 * 8)
183 aCache |= uint64(bytes[3]) << (3 * 8)
184 aCache |= uint64(bytes[4]) << (4 * 8)
185 aCache |= uint64(bytes[5]) << (5 * 8)
186 aCache |= uint64(bytes[6]) << (6 * 8)
187 aCache |= uint64(bytes[7]) << (7 * 8)
188 s.allocCache = ^aCache
191 // nextFreeIndex returns the index of the next free object in s at
192 // or after s.freeindex.
193 // There are hardware instructions that can be used to make this
194 // faster if profiling warrants it.
195 func (s *mspan) nextFreeIndex() uintptr {
196 sfreeindex := s.freeindex
198 if sfreeindex == snelems {
201 if sfreeindex > snelems {
202 throw("s.freeindex > s.nelems")
205 aCache := s.allocCache
207 bitIndex := sys.Ctz64(aCache)
209 // Move index to start of next cached bits.
210 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
211 if sfreeindex >= snelems {
212 s.freeindex = snelems
215 whichByte := sfreeindex / 8
216 // Refill s.allocCache with the next 64 alloc bits.
217 s.refillAllocCache(whichByte)
218 aCache = s.allocCache
219 bitIndex = sys.Ctz64(aCache)
220 // nothing available in cached bits
221 // grab the next 8 bytes and try again.
223 result := sfreeindex + uintptr(bitIndex)
224 if result >= snelems {
225 s.freeindex = snelems
229 s.allocCache >>= uint(bitIndex + 1)
230 sfreeindex = result + 1
232 if sfreeindex%64 == 0 && sfreeindex != snelems {
233 // We just incremented s.freeindex so it isn't 0.
234 // As each 1 in s.allocCache was encountered and used for allocation
235 // it was shifted away. At this point s.allocCache contains all 0s.
236 // Refill s.allocCache so that it corresponds
237 // to the bits at s.allocBits starting at s.freeindex.
238 whichByte := sfreeindex / 8
239 s.refillAllocCache(whichByte)
241 s.freeindex = sfreeindex
245 // isFree reports whether the index'th object in s is unallocated.
247 // The caller must ensure s.state is mSpanInUse, and there must have
248 // been no preemption points since ensuring this (which could allow a
249 // GC transition, which would allow the state to change).
250 func (s *mspan) isFree(index uintptr) bool {
251 if index < s.freeindex {
254 bytep, mask := s.allocBits.bitp(index)
255 return *bytep&mask == 0
258 func (s *mspan) objIndex(p uintptr) uintptr {
259 byteOffset := p - s.base()
264 // s.baseMask is non-0, elemsize is a power of two, so shift by s.divShift
265 return byteOffset >> s.divShift
267 return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
270 func markBitsForAddr(p uintptr) markBits {
272 objIndex := s.objIndex(p)
273 return s.markBitsForIndex(objIndex)
276 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
277 bytep, mask := s.gcmarkBits.bitp(objIndex)
278 return markBits{bytep, mask, objIndex}
281 func (s *mspan) markBitsForBase() markBits {
282 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
285 // isMarked reports whether mark bit m is set.
286 func (m markBits) isMarked() bool {
287 return *m.bytep&m.mask != 0
290 // setMarked sets the marked bit in the markbits, atomically.
291 func (m markBits) setMarked() {
292 // Might be racing with other updates, so use atomic update always.
293 // We used to be clever here and use a non-atomic update in certain
294 // cases, but it's not worth the risk.
295 atomic.Or8(m.bytep, m.mask)
298 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
299 func (m markBits) setMarkedNonAtomic() {
303 // clearMarked clears the marked bit in the markbits, atomically.
304 func (m markBits) clearMarked() {
305 // Might be racing with other updates, so use atomic update always.
306 // We used to be clever here and use a non-atomic update in certain
307 // cases, but it's not worth the risk.
308 atomic.And8(m.bytep, ^m.mask)
311 // markBitsForSpan returns the markBits for the span base address base.
312 func markBitsForSpan(base uintptr) (mbits markBits) {
313 mbits = markBitsForAddr(base)
315 throw("markBitsForSpan: unaligned start")
320 // advance advances the markBits to the next object in the span.
321 func (m *markBits) advance() {
323 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
331 // heapBitsForAddr returns the heapBits for the address addr.
332 // The caller must ensure addr is in an allocated span.
333 // In particular, be careful not to point past the end of an object.
335 // nosplit because it is used during write barriers and must not be preempted.
337 func heapBitsForAddr(addr uintptr) (h heapBits) {
338 // 2 bits per word, 4 pairs per byte, and a mask is hard coded.
339 arena := arenaIndex(addr)
340 ha := mheap_.arenas[arena.l1()][arena.l2()]
341 // The compiler uses a load for nil checking ha, but in this
342 // case we'll almost never hit that cache line again, so it
343 // makes more sense to do a value check.
345 // addr is not in the heap. Return nil heapBits, which
346 // we expect to crash in the caller.
349 h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
350 h.shift = uint32((addr / sys.PtrSize) & 3)
351 h.arena = uint32(arena)
352 h.last = &ha.bitmap[len(ha.bitmap)-1]
356 // badPointer throws bad pointer in heap panic.
357 func badPointer(s *mspan, p, refBase, refOff uintptr) {
358 // Typically this indicates an incorrect use
359 // of unsafe or cgo to store a bad pointer in
360 // the Go heap. It may also indicate a runtime
363 // TODO(austin): We could be more aggressive
364 // and detect pointers to unallocated objects
365 // in allocated spans.
367 print("runtime: pointer ", hex(p))
368 state := s.state.get()
369 if state != mSpanInUse {
370 print(" to unallocated span")
372 print(" to unused region of span")
374 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state, "\n")
376 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
377 gcDumpObject("object", refBase, refOff)
379 getg().m.traceback = 2
380 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
383 // findObject returns the base address for the heap object containing
384 // the address p, the object's span, and the index of the object in s.
385 // If p does not point into a heap object, it returns base == 0.
387 // If p points is an invalid heap pointer and debug.invalidptr != 0,
388 // findObject panics.
390 // refBase and refOff optionally give the base address of the object
391 // in which the pointer p was found and the byte offset at which it
392 // was found. These are used for error reporting.
394 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
395 // Since p is a uintptr, it would not be adjusted if the stack were to move.
397 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
399 // If s is nil, the virtual address has never been part of the heap.
400 // This pointer may be to some mmap'd region, so we allow it.
404 // If p is a bad pointer, it may not be in s's bounds.
406 // Check s.state to synchronize with span initialization
407 // before checking other fields. See also spanOfHeap.
408 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
409 // Pointers into stacks are also ok, the runtime manages these explicitly.
410 if state == mSpanManual {
413 // The following ensures that we are rigorous about what data
414 // structures hold valid pointers.
415 if debug.invalidptr != 0 {
416 badPointer(s, p, refBase, refOff)
420 // If this span holds object of a power of 2 size, just mask off the bits to
421 // the interior of the object. Otherwise use the size to get the base.
423 // optimize for power of 2 sized objects.
425 base = base + (p-base)&uintptr(s.baseMask)
426 objIndex = (base - s.base()) >> s.divShift
427 // base = p & s.baseMask is faster for small spans,
428 // but doesn't work for large spans.
429 // Overall, it's faster to use the more general computation above.
432 if p-base >= s.elemsize {
433 // n := (p - base) / s.elemsize, using division by multiplication
434 objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
435 base += objIndex * s.elemsize
441 // next returns the heapBits describing the next pointer-sized word in memory.
442 // That is, if h describes address p, h.next() describes p+ptrSize.
443 // Note that next does not modify h. The caller must record the result.
445 // nosplit because it is used during write barriers and must not be preempted.
447 func (h heapBits) next() heapBits {
448 if h.shift < 3*heapBitsShift {
449 h.shift += heapBitsShift
450 } else if h.bitp != h.last {
451 h.bitp, h.shift = add1(h.bitp), 0
453 // Move to the next arena.
459 // nextArena advances h to the beginning of the next heap arena.
461 // This is a slow-path helper to next. gc's inliner knows that
462 // heapBits.next can be inlined even though it calls this. This is
463 // marked noinline so it doesn't get inlined into next and cause next
464 // to be too big to inline.
468 func (h heapBits) nextArena() heapBits {
470 ai := arenaIdx(h.arena)
471 l2 := mheap_.arenas[ai.l1()]
473 // We just passed the end of the object, which
474 // was also the end of the heap. Poison h. It
475 // should never be dereferenced at this point.
482 h.bitp, h.shift = &ha.bitmap[0], 0
483 h.last = &ha.bitmap[len(ha.bitmap)-1]
487 // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
488 // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
489 // h.forward(1) is equivalent to h.next(), just slower.
490 // Note that forward does not modify h. The caller must record the result.
491 // bits returns the heap bits for the current word.
493 func (h heapBits) forward(n uintptr) heapBits {
494 n += uintptr(h.shift) / heapBitsShift
495 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
496 h.shift = uint32(n%4) * heapBitsShift
497 if nbitp <= uintptr(unsafe.Pointer(h.last)) {
498 h.bitp = (*uint8)(unsafe.Pointer(nbitp))
502 // We're in a new heap arena.
503 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
504 h.arena += 1 + uint32(past/heapArenaBitmapBytes)
505 ai := arenaIdx(h.arena)
506 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
508 h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
509 h.last = &a.bitmap[len(a.bitmap)-1]
511 h.bitp, h.last = nil, nil
516 // forwardOrBoundary is like forward, but stops at boundaries between
517 // contiguous sections of the bitmap. It returns the number of words
518 // advanced over, which will be <= n.
519 func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
520 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
524 return h.forward(n), n
527 // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
528 // The result includes in its higher bits the bits for subsequent words
529 // described by the same bitmap byte.
531 // nosplit because it is used during write barriers and must not be preempted.
533 func (h heapBits) bits() uint32 {
534 // The (shift & 31) eliminates a test and conditional branch
535 // from the generated code.
536 return uint32(*h.bitp) >> (h.shift & 31)
539 // morePointers reports whether this word and all remaining words in this object
541 // h must not describe the second word of the object.
542 func (h heapBits) morePointers() bool {
543 return h.bits()&bitScan != 0
546 // isPointer reports whether the heap bits describe a pointer word.
548 // nosplit because it is used during write barriers and must not be preempted.
550 func (h heapBits) isPointer() bool {
551 return h.bits()&bitPointer != 0
554 // isCheckmarked reports whether the heap bits have the checkmarked bit set.
555 // It must be told how large the object at h is, because the encoding of the
556 // checkmark bit varies by size.
557 // h must describe the initial word of the object.
558 func (h heapBits) isCheckmarked(size uintptr) bool {
559 if size == sys.PtrSize {
560 return (*h.bitp>>h.shift)&bitPointer != 0
562 // All multiword objects are 2-word aligned,
563 // so we know that the initial word's 2-bit pair
564 // and the second word's 2-bit pair are in the
565 // same heap bitmap byte, *h.bitp.
566 return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
569 // setCheckmarked sets the checkmarked bit.
570 // It must be told how large the object at h is, because the encoding of the
571 // checkmark bit varies by size.
572 // h must describe the initial word of the object.
573 func (h heapBits) setCheckmarked(size uintptr) {
574 if size == sys.PtrSize {
575 atomic.Or8(h.bitp, bitPointer<<h.shift)
578 atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
581 // bulkBarrierPreWrite executes a write barrier
582 // for every pointer slot in the memory range [src, src+size),
583 // using pointer/scalar information from [dst, dst+size).
584 // This executes the write barriers necessary before a memmove.
585 // src, dst, and size must be pointer-aligned.
586 // The range [dst, dst+size) must lie within a single object.
587 // It does not perform the actual writes.
589 // As a special case, src == 0 indicates that this is being used for a
590 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
593 // Callers should call bulkBarrierPreWrite immediately before
594 // calling memmove(dst, src, size). This function is marked nosplit
595 // to avoid being preempted; the GC must not stop the goroutine
596 // between the memmove and the execution of the barriers.
597 // The caller is also responsible for cgo pointer checks if this
598 // may be writing Go pointers into non-Go memory.
600 // The pointer bitmap is not maintained for allocations containing
601 // no pointers at all; any caller of bulkBarrierPreWrite must first
602 // make sure the underlying allocation contains pointers, usually
603 // by checking typ.ptrdata.
605 // Callers must perform cgo checks if writeBarrier.cgo.
608 func bulkBarrierPreWrite(dst, src, size uintptr) {
609 if (dst|src|size)&(sys.PtrSize-1) != 0 {
610 throw("bulkBarrierPreWrite: unaligned arguments")
612 if !writeBarrier.needed {
615 if s := spanOf(dst); s == nil {
616 // If dst is a global, use the data or BSS bitmaps to
617 // execute write barriers.
618 for _, datap := range activeModules() {
619 if datap.data <= dst && dst < datap.edata {
620 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
624 for _, datap := range activeModules() {
625 if datap.bss <= dst && dst < datap.ebss {
626 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
631 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
632 // dst was heap memory at some point, but isn't now.
633 // It can't be a global. It must be either our stack,
634 // or in the case of direct channel sends, it could be
635 // another stack. Either way, no need for barriers.
636 // This will also catch if dst is in a freed span,
637 // though that should never have.
641 buf := &getg().m.p.ptr().wbBuf
642 h := heapBitsForAddr(dst)
644 for i := uintptr(0); i < size; i += sys.PtrSize {
646 dstx := (*uintptr)(unsafe.Pointer(dst + i))
647 if !buf.putFast(*dstx, 0) {
654 for i := uintptr(0); i < size; i += sys.PtrSize {
656 dstx := (*uintptr)(unsafe.Pointer(dst + i))
657 srcx := (*uintptr)(unsafe.Pointer(src + i))
658 if !buf.putFast(*dstx, *srcx) {
667 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
668 // does not execute write barriers for [dst, dst+size).
670 // In addition to the requirements of bulkBarrierPreWrite
671 // callers need to ensure [dst, dst+size) is zeroed.
673 // This is used for special cases where e.g. dst was just
674 // created and zeroed with malloc.
676 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
677 if (dst|src|size)&(sys.PtrSize-1) != 0 {
678 throw("bulkBarrierPreWrite: unaligned arguments")
680 if !writeBarrier.needed {
683 buf := &getg().m.p.ptr().wbBuf
684 h := heapBitsForAddr(dst)
685 for i := uintptr(0); i < size; i += sys.PtrSize {
687 srcx := (*uintptr)(unsafe.Pointer(src + i))
688 if !buf.putFast(0, *srcx) {
696 // bulkBarrierBitmap executes write barriers for copying from [src,
697 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
698 // assumed to start maskOffset bytes into the data covered by the
699 // bitmap in bits (which may not be a multiple of 8).
701 // This is used by bulkBarrierPreWrite for writes to data and BSS.
704 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
705 word := maskOffset / sys.PtrSize
706 bits = addb(bits, word/8)
707 mask := uint8(1) << (word % 8)
709 buf := &getg().m.p.ptr().wbBuf
710 for i := uintptr(0); i < size; i += sys.PtrSize {
721 dstx := (*uintptr)(unsafe.Pointer(dst + i))
723 if !buf.putFast(*dstx, 0) {
727 srcx := (*uintptr)(unsafe.Pointer(src + i))
728 if !buf.putFast(*dstx, *srcx) {
737 // typeBitsBulkBarrier executes a write barrier for every
738 // pointer that would be copied from [src, src+size) to [dst,
739 // dst+size) by a memmove using the type bitmap to locate those
742 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
743 // dst, src, and size must be pointer-aligned.
744 // The type typ must have a plain bitmap, not a GC program.
745 // The only use of this function is in channel sends, and the
746 // 64 kB channel element limit takes care of this for us.
748 // Must not be preempted because it typically runs right before memmove,
749 // and the GC must observe them as an atomic action.
751 // Callers must perform cgo checks if writeBarrier.cgo.
754 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
756 throw("runtime: typeBitsBulkBarrier without type")
758 if typ.size != size {
759 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
760 throw("runtime: invalid typeBitsBulkBarrier")
762 if typ.kind&kindGCProg != 0 {
763 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
764 throw("runtime: invalid typeBitsBulkBarrier")
766 if !writeBarrier.needed {
769 ptrmask := typ.gcdata
770 buf := &getg().m.p.ptr().wbBuf
772 for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
773 if i&(sys.PtrSize*8-1) == 0 {
774 bits = uint32(*ptrmask)
775 ptrmask = addb(ptrmask, 1)
780 dstx := (*uintptr)(unsafe.Pointer(dst + i))
781 srcx := (*uintptr)(unsafe.Pointer(src + i))
782 if !buf.putFast(*dstx, *srcx) {
789 // The methods operating on spans all require that h has been returned
790 // by heapBitsForSpan and that size, n, total are the span layout description
791 // returned by the mspan's layout method.
792 // If total > size*n, it means that there is extra leftover memory in the span,
793 // usually due to rounding.
795 // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
797 // initSpan initializes the heap bitmap for a span.
798 // It clears all checkmark bits.
799 // If this is a span of pointer-sized objects, it initializes all
800 // words to pointer/scan.
801 // Otherwise, it initializes all words to scalar/dead.
802 func (h heapBits) initSpan(s *mspan) {
803 // Clear bits corresponding to objects.
804 nw := (s.npages << _PageShift) / sys.PtrSize
805 if nw%wordsPerBitmapByte != 0 {
806 throw("initSpan: unaligned length")
809 throw("initSpan: unaligned base")
811 isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
813 hNext, anw := h.forwardOrBoundary(nw)
814 nbyte := anw / wordsPerBitmapByte
817 for i := uintptr(0); i < nbyte; i++ {
818 *bitp = bitPointerAll | bitScanAll
822 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
829 // initCheckmarkSpan initializes a span for being checkmarked.
830 // It clears the checkmark bits, which are set to 1 in normal operation.
831 func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
832 // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
833 if sys.PtrSize == 8 && size == sys.PtrSize {
834 // Checkmark bit is type bit, bottom bit of every 2-bit entry.
835 // Only possible on 64-bit system, since minimum size is 8.
836 // Must clear type bit (checkmark bit) of every word.
837 // The type bit is the lower of every two-bit pair.
838 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
839 *h.bitp &^= bitPointerAll
840 h = h.forward(wordsPerBitmapByte)
844 for i := uintptr(0); i < n; i++ {
845 *h.bitp &^= bitScan << (heapBitsShift + h.shift)
846 h = h.forward(size / sys.PtrSize)
850 // clearCheckmarkSpan undoes all the checkmarking in a span.
851 // The actual checkmark bits are ignored, so the only work to do
852 // is to fix the pointer bits. (Pointer bits are ignored by scanobject
853 // but consulted by typedmemmove.)
854 func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
855 // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
856 if sys.PtrSize == 8 && size == sys.PtrSize {
857 // Checkmark bit is type bit, bottom bit of every 2-bit entry.
858 // Only possible on 64-bit system, since minimum size is 8.
859 // Must clear type bit (checkmark bit) of every word.
860 // The type bit is the lower of every two-bit pair.
861 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
862 *h.bitp |= bitPointerAll
863 h = h.forward(wordsPerBitmapByte)
868 // oneBitCount is indexed by byte and produces the
869 // number of 1 bits in that byte. For example 128 has 1 bit set
870 // and oneBitCount[128] will holds 1.
871 var oneBitCount = [256]uint8{
872 0, 1, 1, 2, 1, 2, 2, 3,
873 1, 2, 2, 3, 2, 3, 3, 4,
874 1, 2, 2, 3, 2, 3, 3, 4,
875 2, 3, 3, 4, 3, 4, 4, 5,
876 1, 2, 2, 3, 2, 3, 3, 4,
877 2, 3, 3, 4, 3, 4, 4, 5,
878 2, 3, 3, 4, 3, 4, 4, 5,
879 3, 4, 4, 5, 4, 5, 5, 6,
880 1, 2, 2, 3, 2, 3, 3, 4,
881 2, 3, 3, 4, 3, 4, 4, 5,
882 2, 3, 3, 4, 3, 4, 4, 5,
883 3, 4, 4, 5, 4, 5, 5, 6,
884 2, 3, 3, 4, 3, 4, 4, 5,
885 3, 4, 4, 5, 4, 5, 5, 6,
886 3, 4, 4, 5, 4, 5, 5, 6,
887 4, 5, 5, 6, 5, 6, 6, 7,
888 1, 2, 2, 3, 2, 3, 3, 4,
889 2, 3, 3, 4, 3, 4, 4, 5,
890 2, 3, 3, 4, 3, 4, 4, 5,
891 3, 4, 4, 5, 4, 5, 5, 6,
892 2, 3, 3, 4, 3, 4, 4, 5,
893 3, 4, 4, 5, 4, 5, 5, 6,
894 3, 4, 4, 5, 4, 5, 5, 6,
895 4, 5, 5, 6, 5, 6, 6, 7,
896 2, 3, 3, 4, 3, 4, 4, 5,
897 3, 4, 4, 5, 4, 5, 5, 6,
898 3, 4, 4, 5, 4, 5, 5, 6,
899 4, 5, 5, 6, 5, 6, 6, 7,
900 3, 4, 4, 5, 4, 5, 5, 6,
901 4, 5, 5, 6, 5, 6, 6, 7,
902 4, 5, 5, 6, 5, 6, 6, 7,
903 5, 6, 6, 7, 6, 7, 7, 8}
905 // countAlloc returns the number of objects allocated in span s by
906 // scanning the allocation bitmap.
907 // TODO:(rlh) Use popcount intrinsic.
908 func (s *mspan) countAlloc() int {
910 maxIndex := s.nelems / 8
911 for i := uintptr(0); i < maxIndex; i++ {
912 mrkBits := *s.gcmarkBits.bytep(i)
913 count += int(oneBitCount[mrkBits])
915 if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
916 mrkBits := *s.gcmarkBits.bytep(maxIndex)
917 mask := uint8((1 << bitsInLastByte) - 1)
918 bits := mrkBits & mask
919 count += int(oneBitCount[bits])
924 // heapBitsSetType records that the new allocation [x, x+size)
925 // holds in [x, x+dataSize) one or more values of type typ.
926 // (The number of values is given by dataSize / typ.size.)
927 // If dataSize < size, the fragment [x+dataSize, x+size) is
928 // recorded as non-pointer data.
929 // It is known that the type has pointers somewhere;
930 // malloc does not call heapBitsSetType when there are no pointers,
931 // because all free objects are marked as noscan during
932 // heapBitsSweepSpan.
934 // There can only be one allocation from a given span active at a time,
935 // and the bitmap for a span always falls on byte boundaries,
936 // so there are no write-write races for access to the heap bitmap.
937 // Hence, heapBitsSetType can access the bitmap without atomics.
939 // There can be read-write races between heapBitsSetType and things
940 // that read the heap bitmap like scanobject. However, since
941 // heapBitsSetType is only used for objects that have not yet been
942 // made reachable, readers will ignore bits being modified by this
943 // function. This does mean this function cannot transiently modify
944 // bits that belong to neighboring objects. Also, on weakly-ordered
945 // machines, callers must execute a store/store (publication) barrier
946 // between calling this function and making the object reachable.
947 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
948 const doubleCheck = false // slow but helpful; enable to test modifications to this code
950 // dataSize is always size rounded up to the next malloc size class,
951 // except in the case of allocating a defer block, in which case
952 // size is sizeof(_defer{}) (at least 6 words) and dataSize may be
953 // arbitrarily larger.
955 // The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
956 // assume that dataSize == size without checking it explicitly.
958 if sys.PtrSize == 8 && size == sys.PtrSize {
959 // It's one word and it has pointers, it must be a pointer.
960 // Since all allocated one-word objects are pointers
961 // (non-pointers are aggregated into tinySize allocations),
962 // initSpan sets the pointer bits for us. Nothing to do here.
964 h := heapBitsForAddr(x)
966 throw("heapBitsSetType: pointer bit missing")
968 if !h.morePointers() {
969 throw("heapBitsSetType: scan bit missing")
975 h := heapBitsForAddr(x)
976 ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
978 // Heap bitmap bits for 2-word object are only 4 bits,
979 // so also shared with objects next to it.
980 // This is called out as a special case primarily for 32-bit systems,
981 // so that on 32-bit systems the code below can assume all objects
982 // are 4-word aligned (because they're all 16-byte aligned).
983 if size == 2*sys.PtrSize {
984 if typ.size == sys.PtrSize {
985 // We're allocating a block big enough to hold two pointers.
986 // On 64-bit, that means the actual object must be two pointers,
987 // or else we'd have used the one-pointer-sized block.
988 // On 32-bit, however, this is the 8-byte block, the smallest one.
989 // So it could be that we're allocating one pointer and this was
990 // just the smallest block available. Distinguish by checking dataSize.
991 // (In general the number of instances of typ being allocated is
992 // dataSize/typ.size.)
993 if sys.PtrSize == 4 && dataSize == sys.PtrSize {
994 // 1 pointer object. On 32-bit machines clear the bit for the
995 // unused second word.
996 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
997 *h.bitp |= (bitPointer | bitScan) << h.shift
999 // 2-element slice of pointer.
1000 *h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
1004 // Otherwise typ.size must be 2*sys.PtrSize,
1005 // and typ.kind&kindGCProg == 0.
1007 if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
1008 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
1009 throw("heapBitsSetType")
1012 b := uint32(*ptrmask)
1013 hb := (b & 3) | bitScan
1014 // bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
1015 // 110011 is shifted h.shift and complemented.
1016 // This clears out the bits that are about to be
1017 // ored into *h.hbitp in the next instructions.
1018 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
1019 *h.bitp |= uint8(hb << h.shift)
1023 // Copy from 1-bit ptrmask into 2-bit bitmap.
1024 // The basic approach is to use a single uintptr as a bit buffer,
1025 // alternating between reloading the buffer and writing bitmap bytes.
1026 // In general, one load can supply two bitmap byte writes.
1027 // This is a lot of lines of code, but it compiles into relatively few
1028 // machine instructions.
1031 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
1032 // This object spans heap arenas, so the bitmap may be
1033 // discontiguous. Unroll it into the object instead
1034 // and then copy it out.
1036 // In doubleCheck mode, we randomly do this anyway to
1037 // stress test the bitmap copying path.
1039 h.bitp = (*uint8)(unsafe.Pointer(x))
1045 p *byte // last ptrmask byte read
1046 b uintptr // ptrmask bits already loaded
1047 nb uintptr // number of bits in b at next read
1048 endp *byte // final ptrmask byte to read (then repeat)
1049 endnb uintptr // number of valid bits in *endp
1050 pbits uintptr // alternate source of bits
1052 // Heap bitmap output.
1053 w uintptr // words processed
1054 nw uintptr // number of words to process
1055 hbitp *byte // next heap bitmap byte to write
1056 hb uintptr // bits being prepared for *hbitp
1061 // Handle GC program. Delayed until this part of the code
1062 // so that we can use the same double-checking mechanism
1063 // as the 1-bit case. Nothing above could have encountered
1064 // GC programs: the cases were all too small.
1065 if typ.kind&kindGCProg != 0 {
1066 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
1068 // Double-check the heap bits written by GC program
1069 // by running the GC program to create a 1-bit pointer mask
1070 // and then jumping to the double-check code below.
1071 // This doesn't catch bugs shared between the 1-bit and 4-bit
1072 // GC program execution, but it does catch mistakes specific
1073 // to just one of those and bugs in heapBitsSetTypeGCProg's
1074 // implementation of arrays.
1075 lock(&debugPtrmask.lock)
1076 if debugPtrmask.data == nil {
1077 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
1079 ptrmask = debugPtrmask.data
1080 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
1085 // Note about sizes:
1087 // typ.size is the number of words in the object,
1088 // and typ.ptrdata is the number of words in the prefix
1089 // of the object that contains pointers. That is, the final
1090 // typ.size - typ.ptrdata words contain no pointers.
1091 // This allows optimization of a common pattern where
1092 // an object has a small header followed by a large scalar
1093 // buffer. If we know the pointers are over, we don't have
1094 // to scan the buffer's heap bitmap at all.
1095 // The 1-bit ptrmasks are sized to contain only bits for
1096 // the typ.ptrdata prefix, zero padded out to a full byte
1097 // of bitmap. This code sets nw (below) so that heap bitmap
1098 // bits are only written for the typ.ptrdata prefix; if there is
1099 // more room in the allocated object, the next heap bitmap
1100 // entry is a 00, indicating that there are no more pointers
1101 // to scan. So only the ptrmask for the ptrdata bytes is needed.
1103 // Replicated copies are not as nice: if there is an array of
1104 // objects with scalar tails, all but the last tail does have to
1105 // be initialized, because there is no way to say "skip forward".
1106 // However, because of the possibility of a repeated type with
1107 // size not a multiple of 4 pointers (one heap bitmap byte),
1108 // the code already must handle the last ptrmask byte specially
1109 // by treating it as containing only the bits for endnb pointers,
1110 // where endnb <= 4. We represent large scalar tails that must
1111 // be expanded in the replication by setting endnb larger than 4.
1112 // This will have the effect of reading many bits out of b,
1113 // but once the real bits are shifted out, b will supply as many
1114 // zero bits as we try to read, which is exactly what we need.
1117 if typ.size < dataSize {
1118 // Filling in bits for an array of typ.
1119 // Set up for repetition of ptrmask during main loop.
1120 // Note that ptrmask describes only a prefix of
1121 const maxBits = sys.PtrSize*8 - 7
1122 if typ.ptrdata/sys.PtrSize <= maxBits {
1123 // Entire ptrmask fits in uintptr with room for a byte fragment.
1124 // Load into pbits and never read from ptrmask again.
1125 // This is especially important when the ptrmask has
1126 // fewer than 8 bits in it; otherwise the reload in the middle
1127 // of the Phase 2 loop would itself need to loop to gather
1130 // Accumulate ptrmask into b.
1131 // ptrmask is sized to describe only typ.ptrdata, but we record
1132 // it as describing typ.size bytes, since all the high bits are zero.
1133 nb = typ.ptrdata / sys.PtrSize
1134 for i := uintptr(0); i < nb; i += 8 {
1135 b |= uintptr(*p) << i
1138 nb = typ.size / sys.PtrSize
1140 // Replicate ptrmask to fill entire pbits uintptr.
1141 // Doubling and truncating is fewer steps than
1142 // iterating by nb each time. (nb could be 1.)
1143 // Since we loaded typ.ptrdata/sys.PtrSize bits
1144 // but are pretending to have typ.size/sys.PtrSize,
1145 // there might be no replication necessary/possible.
1148 if nb+nb <= maxBits {
1149 for endnb <= sys.PtrSize*8 {
1150 pbits |= pbits << endnb
1153 // Truncate to a multiple of original ptrmask.
1154 // Because nb+nb <= maxBits, nb fits in a byte.
1155 // Byte division is cheaper than uintptr division.
1156 endnb = uintptr(maxBits/byte(nb)) * nb
1157 pbits &= 1<<endnb - 1
1162 // Clear p and endp as sentinel for using pbits.
1163 // Checked during Phase 2 loop.
1167 // Ptrmask is larger. Read it multiple times.
1168 n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
1169 endp = addb(ptrmask, n)
1170 endnb = typ.size/sys.PtrSize - n*8
1179 if typ.size == dataSize {
1180 // Single entry: can stop once we reach the non-pointer data.
1181 nw = typ.ptrdata / sys.PtrSize
1183 // Repeated instances of typ in an array.
1184 // Have to process first N-1 entries in full, but can stop
1185 // once we reach the non-pointer data in the final entry.
1186 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
1189 // No pointers! Caller was supposed to check.
1190 println("runtime: invalid type ", typ.string())
1191 throw("heapBitsSetType: called with non-pointer type")
1195 // Must write at least 2 words, because the "no scan"
1196 // encoding doesn't take effect until the third word.
1200 // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
1201 // The leading byte is special because it contains the bits for word 1,
1202 // which does not have the scan bit set.
1203 // The leading half-byte is special because it's a half a byte,
1204 // so we have to be careful with the bits already there.
1207 throw("heapBitsSetType: unexpected shift")
1210 // Ptrmask and heap bitmap are aligned.
1211 // Handle first byte of bitmap specially.
1213 // The first byte we write out covers the first four
1214 // words of the object. The scan/dead bit on the first
1215 // word must be set to scan since there are pointers
1216 // somewhere in the object. The scan/dead bit on the
1217 // second word is the checkmark, so we don't set it.
1218 // In all following words, we set the scan/dead
1219 // appropriately to indicate that the object contains
1220 // to the next 2-bit entry in the bitmap.
1222 // TODO: It doesn't matter if we set the checkmark, so
1223 // maybe this case isn't needed any more.
1224 hb = b & bitPointerAll
1225 hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
1226 if w += 4; w >= nw {
1234 case sys.PtrSize == 8 && h.shift == 2:
1235 // Ptrmask and heap bitmap are misaligned.
1236 // The bits for the first two words are in a byte shared
1237 // with another object, so we must be careful with the bits
1239 // We took care of 1-word and 2-word objects above,
1240 // so this is at least a 6-word object.
1241 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1242 // This is not noscan, so set the scan bit in the
1244 hb |= bitScan << (2 * heapBitsShift)
1247 // Note: no bitScan for second word because that's
1249 *hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
1252 if w += 2; w >= nw {
1253 // We know that there is more data, because we handled 2-word objects above.
1254 // This must be at least a 6-word object. If we're out of pointer words,
1255 // mark no scan in next bitmap byte and finish.
1262 // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
1263 // The loop computes the bits for that last write but does not execute the write;
1264 // it leaves the bits in hb for processing by phase 3.
1265 // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
1266 // use in the first half of the loop right now, and then we only adjust nb explicitly
1267 // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
1270 // Emit bitmap byte.
1271 // b has at least nb+4 bits, with one exception:
1272 // if w+4 >= nw, then b has only nw-w bits,
1273 // but we'll stop at the break and then truncate
1274 // appropriately in Phase 3.
1275 hb = b & bitPointerAll
1277 if w += 4; w >= nw {
1284 // Load more bits. b has nb right now.
1286 // Fast path: keep reading from ptrmask.
1287 // nb unmodified: we just loaded 8 bits,
1288 // and the next iteration will consume 8 bits,
1289 // leaving us with the same nb the next time we're here.
1291 b |= uintptr(*p) << nb
1294 // Reduce the number of bits in b.
1295 // This is important if we skipped
1296 // over a scalar tail, since nb could
1297 // be larger than the bit width of b.
1300 } else if p == nil {
1301 // Almost as fast path: track bit count and refill from pbits.
1302 // For short repetitions.
1307 nb -= 8 // for next iteration
1309 // Slow path: reached end of ptrmask.
1310 // Process final partial byte and rewind to start.
1311 b |= uintptr(*p) << nb
1314 b |= uintptr(*ptrmask) << nb
1322 // Emit bitmap byte.
1323 hb = b & bitPointerAll
1325 if w += 4; w >= nw {
1334 // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
1336 // Counting the 4 entries in hb not yet written to memory,
1337 // there are more entries than possible pointer slots.
1338 // Discard the excess entries (can't be more than 3).
1339 mask := uintptr(1)<<(4-(w-nw)) - 1
1340 hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
1343 // Change nw from counting possibly-pointer words to total words in allocation.
1344 nw = size / sys.PtrSize
1346 // Write whole bitmap bytes.
1347 // The first is hb, the rest are zero.
1351 hb = 0 // for possible final half-byte below
1352 for w += 4; w <= nw; w += 4 {
1358 // Write final partial bitmap byte if any.
1359 // We know w > nw, or else we'd still be in the loop above.
1360 // It can be bigger only due to the 4 entries in hb that it counts.
1361 // If w == nw+4 then there's nothing left to do: we wrote all nw entries
1362 // and can discard the 4 sitting in hb.
1363 // But if w == nw+2, we need to write first two in hb.
1364 // The byte is shared with the next object, so be careful with
1367 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
1371 // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
1373 // TODO: We could probably make this faster by
1374 // handling [x+dataSize, x+size) specially.
1375 h := heapBitsForAddr(x)
1376 // cnw is the number of heap words, or bit pairs
1377 // remaining (like nw above).
1378 cnw := size / sys.PtrSize
1379 src := (*uint8)(unsafe.Pointer(x))
1380 // We know the first and last byte of the bitmap are
1381 // not the same, but it's still possible for small
1382 // objects span arenas, so it may share bitmap bytes
1383 // with neighboring objects.
1385 // Handle the first byte specially if it's shared. See
1386 // Phase 1 for why this is the only special case we need.
1388 if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) {
1389 print("x=", x, " size=", size, " cnw=", h.shift, "\n")
1390 throw("bad start shift")
1393 if sys.PtrSize == 8 && h.shift == 2 {
1394 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
1399 // We're now byte aligned. Copy out to per-arena
1400 // bitmaps until the last byte (which may again be
1403 // This loop processes four words at a time,
1404 // so round cnw down accordingly.
1405 hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
1407 // n is the number of bitmap bytes to copy.
1409 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
1414 if doubleCheck && h.shift != 0 {
1415 print("cnw=", cnw, " h.shift=", h.shift, "\n")
1416 throw("bad shift after block copy")
1418 // Handle the last byte if it's shared.
1420 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
1425 if uintptr(unsafe.Pointer(src)) > x+size {
1426 throw("copy exceeded object size")
1428 if !(cnw == 0 || cnw == 2) {
1429 print("x=", x, " size=", size, " cnw=", cnw, "\n")
1430 throw("bad number of remaining words")
1432 // Set up hbitp so doubleCheck code below can check it.
1435 // Zero the object where we wrote the bitmap.
1436 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
1439 // Double check the whole bitmap.
1441 // x+size may not point to the heap, so back up one
1442 // word and then call next().
1443 end := heapBitsForAddr(x + size - sys.PtrSize).next()
1444 endAI := arenaIdx(end.arena)
1445 if !outOfPlace && (end.bitp == nil || (end.shift == 0 && end.bitp == &mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0])) {
1446 // The unrolling code above walks hbitp just
1447 // past the bitmap without moving to the next
1448 // arena. Synthesize this for end.bitp.
1450 endAI = arenaIdx(end.arena)
1451 end.bitp = addb(&mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0], heapArenaBitmapBytes)
1454 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
1455 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
1456 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1457 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1458 h0 := heapBitsForAddr(x)
1459 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1460 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1461 throw("bad heapBitsSetType")
1464 // Double-check that bits to be written were written correctly.
1465 // Does not check that other bits were not written, unfortunately.
1466 h := heapBitsForAddr(x)
1467 nptr := typ.ptrdata / sys.PtrSize
1468 ndata := typ.size / sys.PtrSize
1469 count := dataSize / typ.size
1470 totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
1471 for i := uintptr(0); i < size/sys.PtrSize; i++ {
1473 var have, want uint8
1474 have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
1476 want = 0 // deadmarker
1477 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
1481 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1491 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
1492 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1493 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
1494 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1495 h0 := heapBitsForAddr(x)
1496 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1497 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1498 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
1499 println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
1500 if typ.kind&kindGCProg != 0 {
1501 println("GC program:")
1502 dumpGCProg(addb(typ.gcdata, 4))
1504 throw("bad heapBitsSetType")
1508 if ptrmask == debugPtrmask.data {
1509 unlock(&debugPtrmask.lock)
1514 var debugPtrmask struct {
1519 // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
1520 // progSize is the size of the memory described by the program.
1521 // elemSize is the size of the element that the GC program describes (a prefix of).
1522 // dataSize is the total size of the intended data, a multiple of elemSize.
1523 // allocSize is the total size of the allocated memory.
1525 // GC programs are only used for large allocations.
1526 // heapBitsSetType requires that allocSize is a multiple of 4 words,
1527 // so that the relevant bitmap bytes are not shared with surrounding
1529 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
1530 if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
1531 // Alignment will be wrong.
1532 throw("heapBitsSetTypeGCProg: small allocation")
1534 var totalBits uintptr
1535 if elemSize == dataSize {
1536 totalBits = runGCProg(prog, nil, h.bitp, 2)
1537 if totalBits*sys.PtrSize != progSize {
1538 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1539 throw("heapBitsSetTypeGCProg: unexpected bit count")
1542 count := dataSize / elemSize
1544 // Piece together program trailer to run after prog that does:
1546 // repeat(1, elemSize-progSize-1) // zeros to fill element size
1547 // repeat(elemSize, count-1) // repeat that element for count
1548 // This zero-pads the data remaining in the first element and then
1549 // repeats that first element to fill the array.
1550 var trailer [40]byte // 3 varints (max 10 each) + some bytes
1552 if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
1563 for ; n >= 0x80; n >>= 7 {
1564 trailer[i] = byte(n | 0x80)
1567 trailer[i] = byte(n)
1571 // repeat(elemSize/ptrSize, count-1)
1574 n := elemSize / sys.PtrSize
1575 for ; n >= 0x80; n >>= 7 {
1576 trailer[i] = byte(n | 0x80)
1579 trailer[i] = byte(n)
1582 for ; n >= 0x80; n >>= 7 {
1583 trailer[i] = byte(n | 0x80)
1586 trailer[i] = byte(n)
1591 runGCProg(prog, &trailer[0], h.bitp, 2)
1593 // Even though we filled in the full array just now,
1594 // record that we only filled in up to the ptrdata of the
1595 // last element. This will cause the code below to
1596 // memclr the dead section of the final array element,
1597 // so that scanobject can stop early in the final element.
1598 totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
1600 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
1601 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
1602 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
1605 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1606 // size the size of the region described by prog, in bytes.
1607 // The resulting bitvector will have no more than size/sys.PtrSize bits.
1608 func progToPointerMask(prog *byte, size uintptr) bitvector {
1609 n := (size/sys.PtrSize + 7) / 8
1610 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1611 x[len(x)-1] = 0xa1 // overflow check sentinel
1612 n = runGCProg(prog, nil, &x[0], 1)
1613 if x[len(x)-1] != 0xa1 {
1614 throw("progToPointerMask: overflow")
1616 return bitvector{int32(n), &x[0]}
1619 // Packed GC pointer bitmaps, aka GC programs.
1621 // For large types containing arrays, the type information has a
1622 // natural repetition that can be encoded to save space in the
1623 // binary and in the memory representation of the type information.
1625 // The encoding is a simple Lempel-Ziv style bytecode machine
1626 // with the following instructions:
1629 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1630 // 10000000 n c: repeat the previous n bits c times; n, c are varints
1631 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1633 // runGCProg executes the GC program prog, and then trailer if non-nil,
1634 // writing to dst with entries of the given size.
1635 // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
1636 // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
1637 // starting at dst (because the heap bitmap does). In this case, the caller guarantees
1638 // that only whole bytes in dst need to be written.
1640 // runGCProg returns the number of 1- or 2-bit entries written to memory.
1641 func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1644 // Bits waiting to be written to memory.
1651 // Flush accumulated full bytes.
1652 // The rest of the loop assumes that nbits <= 7.
1653 for ; nbits >= 8; nbits -= 8 {
1659 v := bits&bitPointerAll | bitScanAll
1663 v = bits&bitPointerAll | bitScanAll
1670 // Process one instruction.
1675 // Literal bits; n == 0 means end of program.
1677 // Program is over; continue in trailer if present.
1686 for i := uintptr(0); i < nbyte; i++ {
1687 bits |= uintptr(*p) << nbits
1694 v := bits&0xf | bitScanAll
1698 v = bits&0xf | bitScanAll
1705 bits |= uintptr(*p) << nbits
1712 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1714 for off := uint(0); ; off += 7 {
1717 n |= (x & 0x7F) << off
1724 // Count is encoded in a varint in the next bytes.
1726 for off := uint(0); ; off += 7 {
1729 c |= (x & 0x7F) << off
1734 c *= n // now total number of bits to copy
1736 // If the number of bits being repeated is small, load them
1737 // into a register and use that register for the entire loop
1738 // instead of repeatedly reading from memory.
1739 // Handling fewer than 8 bits here makes the general loop simpler.
1740 // The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
1741 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1742 // it will not overflow.
1744 const maxBits = sys.PtrSize*8 - 7
1746 // Start with bits in output buffer.
1750 // If we need more bits, fetch them from memory.
1752 src = subtract1(src)
1755 pattern |= uintptr(*src)
1756 src = subtract1(src)
1760 src = subtract1(src)
1763 pattern |= uintptr(*src) & 0xf
1764 src = subtract1(src)
1769 // We started with the whole bit output buffer,
1770 // and then we loaded bits from whole bytes.
1771 // Either way, we might now have too many instead of too few.
1772 // Discard the extra.
1774 pattern >>= npattern - n
1778 // Replicate pattern to at most maxBits.
1780 // One bit being repeated.
1781 // If the bit is 1, make the pattern all 1s.
1782 // If the bit is 0, the pattern is already all 0s,
1783 // but we can claim that the number of bits
1784 // in the word is equal to the number we need (c),
1785 // because right shift of bits will zero fill.
1787 pattern = 1<<maxBits - 1
1795 if nb+nb <= maxBits {
1796 // Double pattern until the whole uintptr is filled.
1797 for nb <= sys.PtrSize*8 {
1801 // Trim away incomplete copy of original pattern in high bits.
1802 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1803 nb = maxBits / npattern * npattern
1810 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1811 // Since pattern contains >8 bits, there will be full bytes to flush
1812 // on each iteration.
1813 for ; c >= npattern; c -= npattern {
1814 bits |= pattern << nbits
1825 *dst = uint8(bits&0xf | bitScanAll)
1833 // Add final fragment to bit buffer.
1836 bits |= pattern << nbits
1842 // Repeat; n too large to fit in a register.
1843 // Since nbits <= 7, we know the first few bytes of repeated data
1844 // are already written to memory.
1845 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1847 // Leading src fragment.
1848 src = subtractb(src, (off+7)/8)
1849 if frag := off & 7; frag != 0 {
1850 bits |= uintptr(*src) >> (8 - frag) << nbits
1855 // Main loop: load one byte, write another.
1856 // The bits are rotating through the bit buffer.
1857 for i := c / 8; i > 0; i-- {
1858 bits |= uintptr(*src) << nbits
1864 // Final src fragment.
1866 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1870 // Leading src fragment.
1871 src = subtractb(src, (off+3)/4)
1872 if frag := off & 3; frag != 0 {
1873 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
1878 // Main loop: load one byte, write another.
1879 // The bits are rotating through the bit buffer.
1880 for i := c / 4; i > 0; i-- {
1881 bits |= (uintptr(*src) & 0xf) << nbits
1883 *dst = uint8(bits&0xf | bitScanAll)
1887 // Final src fragment.
1889 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1895 // Write any final bits out, using full-byte writes, even for the final byte.
1896 var totalBits uintptr
1898 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1900 for ; nbits > 0; nbits -= 8 {
1906 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
1908 for ; nbits > 0; nbits -= 4 {
1909 v := bits&0xf | bitScanAll
1918 // materializeGCProg allocates space for the (1-bit) pointer bitmask
1919 // for an object of size ptrdata. Then it fills that space with the
1920 // pointer bitmask specified by the program prog.
1921 // The bitmask starts at s.startAddr.
1922 // The result must be deallocated with dematerializeGCProg.
1923 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1924 s := mheap_.allocManual((ptrdata/(8*sys.PtrSize)+pageSize-1)/pageSize, &memstats.gc_sys)
1925 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
1928 func dematerializeGCProg(s *mspan) {
1929 mheap_.freeManual(s, &memstats.gc_sys)
1932 func dumpGCProg(p *byte) {
1938 print("\t", nptr, " end\n")
1942 print("\t", nptr, " lit ", x, ":")
1944 for i := 0; i < n; i++ {
1951 nbit := int(x &^ 0x80)
1953 for nb := uint(0); ; nb += 7 {
1956 nbit |= int(x&0x7f) << nb
1963 for nb := uint(0); ; nb += 7 {
1966 count |= int(x&0x7f) << nb
1971 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1972 nptr += nbit * count
1979 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1980 target := (*stkframe)(ctxt)
1981 if frame.sp <= target.sp && target.sp < frame.varp {
1988 // gcbits returns the GC type info for x, for testing.
1989 // The result is the bitmap entries (0 or 1), one entry per byte.
1990 //go:linkname reflect_gcbits reflect.gcbits
1991 func reflect_gcbits(x interface{}) []byte {
1993 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1994 nptr := typ.ptrdata / sys.PtrSize
1995 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1996 ret = ret[:len(ret)-1]
2001 // Returns GC type info for the pointer stored in ep for testing.
2002 // If ep points to the stack, only static live information will be returned
2003 // (i.e. not for objects which are only dynamically live stack objects).
2004 func getgcmask(ep interface{}) (mask []byte) {
2009 for _, datap := range activeModules() {
2011 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
2012 bitmap := datap.gcdatamask.bytedata
2013 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2014 mask = make([]byte, n/sys.PtrSize)
2015 for i := uintptr(0); i < n; i += sys.PtrSize {
2016 off := (uintptr(p) + i - datap.data) / sys.PtrSize
2017 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
2023 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
2024 bitmap := datap.gcbssmask.bytedata
2025 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2026 mask = make([]byte, n/sys.PtrSize)
2027 for i := uintptr(0); i < n; i += sys.PtrSize {
2028 off := (uintptr(p) + i - datap.bss) / sys.PtrSize
2029 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
2036 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
2037 hbits := heapBitsForAddr(base)
2039 mask = make([]byte, n/sys.PtrSize)
2040 for i := uintptr(0); i < n; i += sys.PtrSize {
2041 if hbits.isPointer() {
2042 mask[i/sys.PtrSize] = 1
2044 if i != 1*sys.PtrSize && !hbits.morePointers() {
2045 mask = mask[:i/sys.PtrSize]
2048 hbits = hbits.next()
2054 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
2056 frame.sp = uintptr(p)
2058 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
2059 if frame.fn.valid() {
2060 locals, _, _ := getStackMap(&frame, nil, false)
2064 size := uintptr(locals.n) * sys.PtrSize
2065 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2066 mask = make([]byte, n/sys.PtrSize)
2067 for i := uintptr(0); i < n; i += sys.PtrSize {
2068 off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
2069 mask[i/sys.PtrSize] = locals.ptrbit(off)
2075 // otherwise, not something the GC knows about.
2076 // possibly read-only data, like malloc(0).
2077 // must not have pointers