1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: type and heap bitmaps.
7 // Stack, data, and bss bitmaps
9 // Stack frames and global variables in the data and bss sections are
10 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
11 // means the word is a live pointer to be visited by the GC (referred to
12 // as "pointer"). A "0" bit means the word should be ignored by GC
13 // (referred to as "scalar", though it could be a dead pointer value).
17 // The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
18 // stored in the heapArena metadata backing each heap arena.
19 // That is, if ha is the heapArena for the arena starting a start,
20 // then ha.bitmap[0] holds the 2-bit entries for the four words start
21 // through start+3*ptrSize, ha.bitmap[1] holds the entries for
22 // start+4*ptrSize through start+7*ptrSize, and so on.
24 // In each 2-bit entry, the lower bit is a pointer/scalar bit, just
25 // like in the stack/data bitmaps described above. The upper bit
26 // indicates scan/dead: a "1" value ("scan") indicates that there may
27 // be pointers in later words of the allocation, and a "0" value
28 // ("dead") indicates there are no more pointers in the allocation. If
29 // the upper bit is 0, the lower bit must also be 0, and this
30 // indicates scanning can ignore the rest of the allocation.
32 // The 2-bit entries are split when written into the byte, so that the top half
33 // of the byte contains 4 high (scan) bits and the bottom half contains 4 low
34 // (pointer) bits. This form allows a copy from the 1-bit to the 4-bit form to
35 // keep the pointer bits contiguous, instead of having to space them out.
37 // The code makes use of the fact that the zero value for a heap
38 // bitmap means scalar/dead. This property must be preserved when
39 // modifying the encoding.
41 // The bitmap for noscan spans is not maintained. Code must ensure
42 // that an object is scannable before consulting its bitmap by
43 // checking either the noscan bit in the span or by consulting its
44 // type's information.
50 "runtime/internal/atomic"
51 "runtime/internal/sys"
59 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries
60 wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
62 // all scan/pointer bits in a byte
63 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
64 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
67 // addb returns the byte pointer p+n.
70 func addb(p *byte, n uintptr) *byte {
71 // Note: wrote out full expression instead of calling add(p, n)
72 // to reduce the number of temporaries generated by the
73 // compiler for this trivial expression during inlining.
74 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
77 // subtractb returns the byte pointer p-n.
80 func subtractb(p *byte, n uintptr) *byte {
81 // Note: wrote out full expression instead of calling add(p, -n)
82 // to reduce the number of temporaries generated by the
83 // compiler for this trivial expression during inlining.
84 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
87 // add1 returns the byte pointer p+1.
90 func add1(p *byte) *byte {
91 // Note: wrote out full expression instead of calling addb(p, 1)
92 // to reduce the number of temporaries generated by the
93 // compiler for this trivial expression during inlining.
94 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
97 // subtract1 returns the byte pointer p-1.
100 // nosplit because it is used during write barriers and must not be preempted.
102 func subtract1(p *byte) *byte {
103 // Note: wrote out full expression instead of calling subtractb(p, 1)
104 // to reduce the number of temporaries generated by the
105 // compiler for this trivial expression during inlining.
106 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
109 // heapBits provides access to the bitmap bits for a single heap word.
110 // The methods on heapBits take value receivers so that the compiler
111 // can more easily inline calls to those methods and registerize the
112 // struct fields independently.
113 type heapBits struct {
116 arena uint32 // Index of heap arena containing bitp
117 last *uint8 // Last byte arena's bitmap
120 // Make the compiler check that heapBits.arena is large enough to hold
121 // the maximum arena frame number.
122 var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
124 // markBits provides access to the mark bit for an object in the heap.
125 // bytep points to the byte holding the mark bit.
126 // mask is a byte with a single bit set that can be &ed with *bytep
127 // to see if the bit has been set.
128 // *m.byte&m.mask != 0 indicates the mark bit is set.
129 // index can be used along with span information to generate
130 // the address of the object in the heap.
131 // We maintain one set of mark bits for allocation and one for
133 type markBits struct {
140 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
141 bytep, mask := s.allocBits.bitp(allocBitIndex)
142 return markBits{bytep, mask, allocBitIndex}
145 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
146 // and negates them so that ctz (count trailing zeros) instructions
147 // can be used. It then places these 8 bytes into the cached 64 bit
149 func (s *mspan) refillAllocCache(whichByte uintptr) {
150 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
152 aCache |= uint64(bytes[0])
153 aCache |= uint64(bytes[1]) << (1 * 8)
154 aCache |= uint64(bytes[2]) << (2 * 8)
155 aCache |= uint64(bytes[3]) << (3 * 8)
156 aCache |= uint64(bytes[4]) << (4 * 8)
157 aCache |= uint64(bytes[5]) << (5 * 8)
158 aCache |= uint64(bytes[6]) << (6 * 8)
159 aCache |= uint64(bytes[7]) << (7 * 8)
160 s.allocCache = ^aCache
163 // nextFreeIndex returns the index of the next free object in s at
164 // or after s.freeindex.
165 // There are hardware instructions that can be used to make this
166 // faster if profiling warrants it.
167 func (s *mspan) nextFreeIndex() uintptr {
168 sfreeindex := s.freeindex
170 if sfreeindex == snelems {
173 if sfreeindex > snelems {
174 throw("s.freeindex > s.nelems")
177 aCache := s.allocCache
179 bitIndex := sys.Ctz64(aCache)
181 // Move index to start of next cached bits.
182 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
183 if sfreeindex >= snelems {
184 s.freeindex = snelems
187 whichByte := sfreeindex / 8
188 // Refill s.allocCache with the next 64 alloc bits.
189 s.refillAllocCache(whichByte)
190 aCache = s.allocCache
191 bitIndex = sys.Ctz64(aCache)
192 // nothing available in cached bits
193 // grab the next 8 bytes and try again.
195 result := sfreeindex + uintptr(bitIndex)
196 if result >= snelems {
197 s.freeindex = snelems
201 s.allocCache >>= uint(bitIndex + 1)
202 sfreeindex = result + 1
204 if sfreeindex%64 == 0 && sfreeindex != snelems {
205 // We just incremented s.freeindex so it isn't 0.
206 // As each 1 in s.allocCache was encountered and used for allocation
207 // it was shifted away. At this point s.allocCache contains all 0s.
208 // Refill s.allocCache so that it corresponds
209 // to the bits at s.allocBits starting at s.freeindex.
210 whichByte := sfreeindex / 8
211 s.refillAllocCache(whichByte)
213 s.freeindex = sfreeindex
217 // isFree reports whether the index'th object in s is unallocated.
219 // The caller must ensure s.state is mSpanInUse, and there must have
220 // been no preemption points since ensuring this (which could allow a
221 // GC transition, which would allow the state to change).
222 func (s *mspan) isFree(index uintptr) bool {
223 if index < s.freeindex {
226 bytep, mask := s.allocBits.bitp(index)
227 return *bytep&mask == 0
230 // divideByElemSize returns n/s.elemsize.
231 // n must be within [0, s.npages*_PageSize),
232 // or may be exactly s.npages*_PageSize
233 // if s.elemsize is from sizeclasses.go.
234 func (s *mspan) divideByElemSize(n uintptr) uintptr {
235 const doubleCheck = false
237 // See explanation in mksizeclasses.go's computeDivMagic.
238 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
240 if doubleCheck && q != n/s.elemsize {
241 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
242 throw("bad magic division")
247 func (s *mspan) objIndex(p uintptr) uintptr {
248 return s.divideByElemSize(p - s.base())
251 func markBitsForAddr(p uintptr) markBits {
253 objIndex := s.objIndex(p)
254 return s.markBitsForIndex(objIndex)
257 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
258 bytep, mask := s.gcmarkBits.bitp(objIndex)
259 return markBits{bytep, mask, objIndex}
262 func (s *mspan) markBitsForBase() markBits {
263 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
266 // isMarked reports whether mark bit m is set.
267 func (m markBits) isMarked() bool {
268 return *m.bytep&m.mask != 0
271 // setMarked sets the marked bit in the markbits, atomically.
272 func (m markBits) setMarked() {
273 // Might be racing with other updates, so use atomic update always.
274 // We used to be clever here and use a non-atomic update in certain
275 // cases, but it's not worth the risk.
276 atomic.Or8(m.bytep, m.mask)
279 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
280 func (m markBits) setMarkedNonAtomic() {
284 // clearMarked clears the marked bit in the markbits, atomically.
285 func (m markBits) clearMarked() {
286 // Might be racing with other updates, so use atomic update always.
287 // We used to be clever here and use a non-atomic update in certain
288 // cases, but it's not worth the risk.
289 atomic.And8(m.bytep, ^m.mask)
292 // markBitsForSpan returns the markBits for the span base address base.
293 func markBitsForSpan(base uintptr) (mbits markBits) {
294 mbits = markBitsForAddr(base)
296 throw("markBitsForSpan: unaligned start")
301 // advance advances the markBits to the next object in the span.
302 func (m *markBits) advance() {
304 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
312 // heapBitsForAddr returns the heapBits for the address addr.
313 // The caller must ensure addr is in an allocated span.
314 // In particular, be careful not to point past the end of an object.
316 // nosplit because it is used during write barriers and must not be preempted.
318 func heapBitsForAddr(addr uintptr) (h heapBits) {
319 // 2 bits per word, 4 pairs per byte, and a mask is hard coded.
320 arena := arenaIndex(addr)
321 ha := mheap_.arenas[arena.l1()][arena.l2()]
322 // The compiler uses a load for nil checking ha, but in this
323 // case we'll almost never hit that cache line again, so it
324 // makes more sense to do a value check.
326 // addr is not in the heap. Return nil heapBits, which
327 // we expect to crash in the caller.
330 h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes]
331 h.shift = uint32((addr / goarch.PtrSize) & 3)
332 h.arena = uint32(arena)
333 h.last = &ha.bitmap[len(ha.bitmap)-1]
337 // clobberdeadPtr is a special value that is used by the compiler to
338 // clobber dead stack slots, when -clobberdead flag is set.
339 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
341 // badPointer throws bad pointer in heap panic.
342 func badPointer(s *mspan, p, refBase, refOff uintptr) {
343 // Typically this indicates an incorrect use
344 // of unsafe or cgo to store a bad pointer in
345 // the Go heap. It may also indicate a runtime
348 // TODO(austin): We could be more aggressive
349 // and detect pointers to unallocated objects
350 // in allocated spans.
352 print("runtime: pointer ", hex(p))
354 state := s.state.get()
355 if state != mSpanInUse {
356 print(" to unallocated span")
358 print(" to unused region of span")
360 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
364 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
365 gcDumpObject("object", refBase, refOff)
367 getg().m.traceback = 2
368 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
371 // findObject returns the base address for the heap object containing
372 // the address p, the object's span, and the index of the object in s.
373 // If p does not point into a heap object, it returns base == 0.
375 // If p points is an invalid heap pointer and debug.invalidptr != 0,
376 // findObject panics.
378 // refBase and refOff optionally give the base address of the object
379 // in which the pointer p was found and the byte offset at which it
380 // was found. These are used for error reporting.
382 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
383 // Since p is a uintptr, it would not be adjusted if the stack were to move.
385 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
387 // If s is nil, the virtual address has never been part of the heap.
388 // This pointer may be to some mmap'd region, so we allow it.
390 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
391 // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
392 // as they are the only platform where compiler's clobberdead mode is
393 // implemented. On these platforms clobberdeadPtr cannot be a valid address.
394 badPointer(s, p, refBase, refOff)
398 // If p is a bad pointer, it may not be in s's bounds.
400 // Check s.state to synchronize with span initialization
401 // before checking other fields. See also spanOfHeap.
402 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
403 // Pointers into stacks are also ok, the runtime manages these explicitly.
404 if state == mSpanManual {
407 // The following ensures that we are rigorous about what data
408 // structures hold valid pointers.
409 if debug.invalidptr != 0 {
410 badPointer(s, p, refBase, refOff)
415 objIndex = s.objIndex(p)
416 base = s.base() + objIndex*s.elemsize
420 // verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
421 //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
422 func reflect_verifyNotInHeapPtr(p uintptr) bool {
423 // Conversion to a pointer is ok as long as findObject above does not call badPointer.
424 // Since we're already promised that p doesn't point into the heap, just disallow heap
425 // pointers and the special clobbered pointer.
426 return spanOf(p) == nil && p != clobberdeadPtr
429 // next returns the heapBits describing the next pointer-sized word in memory.
430 // That is, if h describes address p, h.next() describes p+ptrSize.
431 // Note that next does not modify h. The caller must record the result.
433 // nosplit because it is used during write barriers and must not be preempted.
435 func (h heapBits) next() heapBits {
436 if h.shift < 3*heapBitsShift {
437 h.shift += heapBitsShift
438 } else if h.bitp != h.last {
439 h.bitp, h.shift = add1(h.bitp), 0
441 // Move to the next arena.
447 // nextArena advances h to the beginning of the next heap arena.
449 // This is a slow-path helper to next. gc's inliner knows that
450 // heapBits.next can be inlined even though it calls this. This is
451 // marked noinline so it doesn't get inlined into next and cause next
452 // to be too big to inline.
456 func (h heapBits) nextArena() heapBits {
458 ai := arenaIdx(h.arena)
459 l2 := mheap_.arenas[ai.l1()]
461 // We just passed the end of the object, which
462 // was also the end of the heap. Poison h. It
463 // should never be dereferenced at this point.
470 h.bitp, h.shift = &ha.bitmap[0], 0
471 h.last = &ha.bitmap[len(ha.bitmap)-1]
475 // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
476 // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
477 // h.forward(1) is equivalent to h.next(), just slower.
478 // Note that forward does not modify h. The caller must record the result.
479 // bits returns the heap bits for the current word.
481 func (h heapBits) forward(n uintptr) heapBits {
482 n += uintptr(h.shift) / heapBitsShift
483 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
484 h.shift = uint32(n%4) * heapBitsShift
485 if nbitp <= uintptr(unsafe.Pointer(h.last)) {
486 h.bitp = (*uint8)(unsafe.Pointer(nbitp))
490 // We're in a new heap arena.
491 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
492 h.arena += 1 + uint32(past/heapArenaBitmapBytes)
493 ai := arenaIdx(h.arena)
494 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
496 h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
497 h.last = &a.bitmap[len(a.bitmap)-1]
499 h.bitp, h.last = nil, nil
504 // forwardOrBoundary is like forward, but stops at boundaries between
505 // contiguous sections of the bitmap. It returns the number of words
506 // advanced over, which will be <= n.
507 func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
508 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
512 return h.forward(n), n
515 // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
516 // The result includes in its higher bits the bits for subsequent words
517 // described by the same bitmap byte.
519 // nosplit because it is used during write barriers and must not be preempted.
521 func (h heapBits) bits() uint32 {
522 // The (shift & 31) eliminates a test and conditional branch
523 // from the generated code.
524 return uint32(*h.bitp) >> (h.shift & 31)
527 // morePointers reports whether this word and all remaining words in this object
529 // h must not describe the second word of the object.
530 func (h heapBits) morePointers() bool {
531 return h.bits()&bitScan != 0
534 // isPointer reports whether the heap bits describe a pointer word.
536 // nosplit because it is used during write barriers and must not be preempted.
538 func (h heapBits) isPointer() bool {
539 return h.bits()&bitPointer != 0
542 // bulkBarrierPreWrite executes a write barrier
543 // for every pointer slot in the memory range [src, src+size),
544 // using pointer/scalar information from [dst, dst+size).
545 // This executes the write barriers necessary before a memmove.
546 // src, dst, and size must be pointer-aligned.
547 // The range [dst, dst+size) must lie within a single object.
548 // It does not perform the actual writes.
550 // As a special case, src == 0 indicates that this is being used for a
551 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
554 // Callers should call bulkBarrierPreWrite immediately before
555 // calling memmove(dst, src, size). This function is marked nosplit
556 // to avoid being preempted; the GC must not stop the goroutine
557 // between the memmove and the execution of the barriers.
558 // The caller is also responsible for cgo pointer checks if this
559 // may be writing Go pointers into non-Go memory.
561 // The pointer bitmap is not maintained for allocations containing
562 // no pointers at all; any caller of bulkBarrierPreWrite must first
563 // make sure the underlying allocation contains pointers, usually
564 // by checking typ.ptrdata.
566 // Callers must perform cgo checks if writeBarrier.cgo.
569 func bulkBarrierPreWrite(dst, src, size uintptr) {
570 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
571 throw("bulkBarrierPreWrite: unaligned arguments")
573 if !writeBarrier.needed {
576 if s := spanOf(dst); s == nil {
577 // If dst is a global, use the data or BSS bitmaps to
578 // execute write barriers.
579 for _, datap := range activeModules() {
580 if datap.data <= dst && dst < datap.edata {
581 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
585 for _, datap := range activeModules() {
586 if datap.bss <= dst && dst < datap.ebss {
587 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
592 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
593 // dst was heap memory at some point, but isn't now.
594 // It can't be a global. It must be either our stack,
595 // or in the case of direct channel sends, it could be
596 // another stack. Either way, no need for barriers.
597 // This will also catch if dst is in a freed span,
598 // though that should never have.
602 buf := &getg().m.p.ptr().wbBuf
603 h := heapBitsForAddr(dst)
605 for i := uintptr(0); i < size; i += goarch.PtrSize {
607 dstx := (*uintptr)(unsafe.Pointer(dst + i))
608 if !buf.putFast(*dstx, 0) {
615 for i := uintptr(0); i < size; i += goarch.PtrSize {
617 dstx := (*uintptr)(unsafe.Pointer(dst + i))
618 srcx := (*uintptr)(unsafe.Pointer(src + i))
619 if !buf.putFast(*dstx, *srcx) {
628 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
629 // does not execute write barriers for [dst, dst+size).
631 // In addition to the requirements of bulkBarrierPreWrite
632 // callers need to ensure [dst, dst+size) is zeroed.
634 // This is used for special cases where e.g. dst was just
635 // created and zeroed with malloc.
637 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
638 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
639 throw("bulkBarrierPreWrite: unaligned arguments")
641 if !writeBarrier.needed {
644 buf := &getg().m.p.ptr().wbBuf
645 h := heapBitsForAddr(dst)
646 for i := uintptr(0); i < size; i += goarch.PtrSize {
648 srcx := (*uintptr)(unsafe.Pointer(src + i))
649 if !buf.putFast(0, *srcx) {
657 // bulkBarrierBitmap executes write barriers for copying from [src,
658 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
659 // assumed to start maskOffset bytes into the data covered by the
660 // bitmap in bits (which may not be a multiple of 8).
662 // This is used by bulkBarrierPreWrite for writes to data and BSS.
665 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
666 word := maskOffset / goarch.PtrSize
667 bits = addb(bits, word/8)
668 mask := uint8(1) << (word % 8)
670 buf := &getg().m.p.ptr().wbBuf
671 for i := uintptr(0); i < size; i += goarch.PtrSize {
676 i += 7 * goarch.PtrSize
682 dstx := (*uintptr)(unsafe.Pointer(dst + i))
684 if !buf.putFast(*dstx, 0) {
688 srcx := (*uintptr)(unsafe.Pointer(src + i))
689 if !buf.putFast(*dstx, *srcx) {
698 // typeBitsBulkBarrier executes a write barrier for every
699 // pointer that would be copied from [src, src+size) to [dst,
700 // dst+size) by a memmove using the type bitmap to locate those
703 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
704 // dst, src, and size must be pointer-aligned.
705 // The type typ must have a plain bitmap, not a GC program.
706 // The only use of this function is in channel sends, and the
707 // 64 kB channel element limit takes care of this for us.
709 // Must not be preempted because it typically runs right before memmove,
710 // and the GC must observe them as an atomic action.
712 // Callers must perform cgo checks if writeBarrier.cgo.
715 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
717 throw("runtime: typeBitsBulkBarrier without type")
719 if typ.size != size {
720 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
721 throw("runtime: invalid typeBitsBulkBarrier")
723 if typ.kind&kindGCProg != 0 {
724 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
725 throw("runtime: invalid typeBitsBulkBarrier")
727 if !writeBarrier.needed {
730 ptrmask := typ.gcdata
731 buf := &getg().m.p.ptr().wbBuf
733 for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
734 if i&(goarch.PtrSize*8-1) == 0 {
735 bits = uint32(*ptrmask)
736 ptrmask = addb(ptrmask, 1)
741 dstx := (*uintptr)(unsafe.Pointer(dst + i))
742 srcx := (*uintptr)(unsafe.Pointer(src + i))
743 if !buf.putFast(*dstx, *srcx) {
750 // The methods operating on spans all require that h has been returned
751 // by heapBitsForSpan and that size, n, total are the span layout description
752 // returned by the mspan's layout method.
753 // If total > size*n, it means that there is extra leftover memory in the span,
754 // usually due to rounding.
756 // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
758 // initSpan initializes the heap bitmap for a span.
759 // If this is a span of pointer-sized objects, it initializes all
760 // words to pointer/scan.
761 // Otherwise, it initializes all words to scalar/dead.
762 func (h heapBits) initSpan(s *mspan) {
763 // Clear bits corresponding to objects.
764 nw := (s.npages << _PageShift) / goarch.PtrSize
765 if nw%wordsPerBitmapByte != 0 {
766 throw("initSpan: unaligned length")
769 throw("initSpan: unaligned base")
771 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
773 hNext, anw := h.forwardOrBoundary(nw)
774 nbyte := anw / wordsPerBitmapByte
777 for i := uintptr(0); i < nbyte; i++ {
778 *bitp = bitPointerAll | bitScanAll
782 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
789 // countAlloc returns the number of objects allocated in span s by
790 // scanning the allocation bitmap.
791 func (s *mspan) countAlloc() int {
793 bytes := divRoundUp(s.nelems, 8)
794 // Iterate over each 8-byte chunk and count allocations
795 // with an intrinsic. Note that newMarkBits guarantees that
796 // gcmarkBits will be 8-byte aligned, so we don't have to
797 // worry about edge cases, irrelevant bits will simply be zero.
798 for i := uintptr(0); i < bytes; i += 8 {
799 // Extract 64 bits from the byte pointer and get a OnesCount.
800 // Note that the unsafe cast here doesn't preserve endianness,
801 // but that's OK. We only care about how many bits are 1, not
802 // about the order we discover them in.
803 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
804 count += sys.OnesCount64(mrkBits)
809 // heapBitsSetType records that the new allocation [x, x+size)
810 // holds in [x, x+dataSize) one or more values of type typ.
811 // (The number of values is given by dataSize / typ.size.)
812 // If dataSize < size, the fragment [x+dataSize, x+size) is
813 // recorded as non-pointer data.
814 // It is known that the type has pointers somewhere;
815 // malloc does not call heapBitsSetType when there are no pointers,
816 // because all free objects are marked as noscan during
817 // heapBitsSweepSpan.
819 // There can only be one allocation from a given span active at a time,
820 // and the bitmap for a span always falls on byte boundaries,
821 // so there are no write-write races for access to the heap bitmap.
822 // Hence, heapBitsSetType can access the bitmap without atomics.
824 // There can be read-write races between heapBitsSetType and things
825 // that read the heap bitmap like scanobject. However, since
826 // heapBitsSetType is only used for objects that have not yet been
827 // made reachable, readers will ignore bits being modified by this
828 // function. This does mean this function cannot transiently modify
829 // bits that belong to neighboring objects. Also, on weakly-ordered
830 // machines, callers must execute a store/store (publication) barrier
831 // between calling this function and making the object reachable.
832 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
833 const doubleCheck = false // slow but helpful; enable to test modifications to this code
836 mask1 = bitPointer | bitScan // 00010001
837 mask2 = bitPointer | bitScan | mask1<<heapBitsShift // 00110011
838 mask3 = bitPointer | bitScan | mask2<<heapBitsShift // 01110111
841 // dataSize is always size rounded up to the next malloc size class,
842 // except in the case of allocating a defer block, in which case
843 // size is sizeof(_defer{}) (at least 6 words) and dataSize may be
844 // arbitrarily larger.
846 // The checks for size == goarch.PtrSize and size == 2*goarch.PtrSize can therefore
847 // assume that dataSize == size without checking it explicitly.
849 if goarch.PtrSize == 8 && size == goarch.PtrSize {
850 // It's one word and it has pointers, it must be a pointer.
851 // Since all allocated one-word objects are pointers
852 // (non-pointers are aggregated into tinySize allocations),
853 // initSpan sets the pointer bits for us. Nothing to do here.
855 h := heapBitsForAddr(x)
857 throw("heapBitsSetType: pointer bit missing")
859 if !h.morePointers() {
860 throw("heapBitsSetType: scan bit missing")
866 h := heapBitsForAddr(x)
867 ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
869 // 2-word objects only have 4 bitmap bits and 3-word objects only have 6 bitmap bits.
870 // Therefore, these objects share a heap bitmap byte with the objects next to them.
871 // These are called out as a special case primarily so the code below can assume all
872 // objects are at least 4 words long and that their bitmaps start either at the beginning
873 // of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
875 if size == 2*goarch.PtrSize {
876 if typ.size == goarch.PtrSize {
877 // We're allocating a block big enough to hold two pointers.
878 // On 64-bit, that means the actual object must be two pointers,
879 // or else we'd have used the one-pointer-sized block.
880 // On 32-bit, however, this is the 8-byte block, the smallest one.
881 // So it could be that we're allocating one pointer and this was
882 // just the smallest block available. Distinguish by checking dataSize.
883 // (In general the number of instances of typ being allocated is
884 // dataSize/typ.size.)
885 if goarch.PtrSize == 4 && dataSize == goarch.PtrSize {
886 // 1 pointer object. On 32-bit machines clear the bit for the
887 // unused second word.
888 *h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
889 *h.bitp |= (bitPointer | bitScan) << h.shift
891 // 2-element array of pointer.
892 *h.bitp |= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
896 // Otherwise typ.size must be 2*goarch.PtrSize,
897 // and typ.kind&kindGCProg == 0.
899 if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
900 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
901 throw("heapBitsSetType")
904 b := uint32(*ptrmask)
906 hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1)
907 // Clear the bits for this object so we can set the
909 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
910 *h.bitp |= uint8(hb << h.shift)
912 } else if size == 3*goarch.PtrSize {
916 println("runtime: invalid type ", typ.string())
917 throw("heapBitsSetType: called with non-pointer type")
919 if goarch.PtrSize != 8 {
920 throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
922 if typ.kind&kindGCProg != 0 {
923 throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
925 if typ.size == 2*goarch.PtrSize {
926 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
927 throw("heapBitsSetType: inconsistent object sizes")
930 if typ.size == goarch.PtrSize {
931 // The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
932 // Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
933 if doubleCheck && *typ.gcdata != 1 {
934 print("runtime: heapBitsSetType size=", size, " typ.size=", typ.size, "but *typ.gcdata", *typ.gcdata, "\n")
935 throw("heapBitsSetType: unexpected gcdata for 1 pointer wide type size in 3 pointer wide size class")
937 // 3 element array of pointers. Unrolling ptrmask 3 times into p yields 00000111.
942 // Set bitScan bits for all pointers.
943 hb |= hb << wordsPerBitmapByte
944 // First bitScan bit is always set since the type contains pointers.
946 // Second bitScan bit needs to also be set if the third bitScan bit is set.
947 hb |= hb & (bitScan << (2 * heapBitsShift)) >> 1
949 // For h.shift > 1 heap bits cross a byte boundary and need to be written part
950 // to h.bitp and part to the next h.bitp.
953 *h.bitp &^= mask3 << 0
956 *h.bitp &^= mask3 << 1
959 *h.bitp &^= mask2 << 2
960 *h.bitp |= (hb & mask2) << 2
961 // Two words written to the first byte.
962 // Advance two words to get to the next byte.
965 *h.bitp |= (hb >> 2) & mask1
967 *h.bitp &^= mask1 << 3
968 *h.bitp |= (hb & mask1) << 3
969 // One word written to the first byte.
970 // Advance one word to get to the next byte.
973 *h.bitp |= (hb >> 1) & mask2
978 // Copy from 1-bit ptrmask into 2-bit bitmap.
979 // The basic approach is to use a single uintptr as a bit buffer,
980 // alternating between reloading the buffer and writing bitmap bytes.
981 // In general, one load can supply two bitmap byte writes.
982 // This is a lot of lines of code, but it compiles into relatively few
983 // machine instructions.
986 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrandn(2) == 0) {
987 // This object spans heap arenas, so the bitmap may be
988 // discontiguous. Unroll it into the object instead
989 // and then copy it out.
991 // In doubleCheck mode, we randomly do this anyway to
992 // stress test the bitmap copying path.
994 h.bitp = (*uint8)(unsafe.Pointer(x))
1000 p *byte // last ptrmask byte read
1001 b uintptr // ptrmask bits already loaded
1002 nb uintptr // number of bits in b at next read
1003 endp *byte // final ptrmask byte to read (then repeat)
1004 endnb uintptr // number of valid bits in *endp
1005 pbits uintptr // alternate source of bits
1007 // Heap bitmap output.
1008 w uintptr // words processed
1009 nw uintptr // number of words to process
1010 hbitp *byte // next heap bitmap byte to write
1011 hb uintptr // bits being prepared for *hbitp
1016 // Handle GC program. Delayed until this part of the code
1017 // so that we can use the same double-checking mechanism
1018 // as the 1-bit case. Nothing above could have encountered
1019 // GC programs: the cases were all too small.
1020 if typ.kind&kindGCProg != 0 {
1021 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
1023 // Double-check the heap bits written by GC program
1024 // by running the GC program to create a 1-bit pointer mask
1025 // and then jumping to the double-check code below.
1026 // This doesn't catch bugs shared between the 1-bit and 4-bit
1027 // GC program execution, but it does catch mistakes specific
1028 // to just one of those and bugs in heapBitsSetTypeGCProg's
1029 // implementation of arrays.
1030 lock(&debugPtrmask.lock)
1031 if debugPtrmask.data == nil {
1032 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
1034 ptrmask = debugPtrmask.data
1035 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
1040 // Note about sizes:
1042 // typ.size is the number of words in the object,
1043 // and typ.ptrdata is the number of words in the prefix
1044 // of the object that contains pointers. That is, the final
1045 // typ.size - typ.ptrdata words contain no pointers.
1046 // This allows optimization of a common pattern where
1047 // an object has a small header followed by a large scalar
1048 // buffer. If we know the pointers are over, we don't have
1049 // to scan the buffer's heap bitmap at all.
1050 // The 1-bit ptrmasks are sized to contain only bits for
1051 // the typ.ptrdata prefix, zero padded out to a full byte
1052 // of bitmap. This code sets nw (below) so that heap bitmap
1053 // bits are only written for the typ.ptrdata prefix; if there is
1054 // more room in the allocated object, the next heap bitmap
1055 // entry is a 00, indicating that there are no more pointers
1056 // to scan. So only the ptrmask for the ptrdata bytes is needed.
1058 // Replicated copies are not as nice: if there is an array of
1059 // objects with scalar tails, all but the last tail does have to
1060 // be initialized, because there is no way to say "skip forward".
1061 // However, because of the possibility of a repeated type with
1062 // size not a multiple of 4 pointers (one heap bitmap byte),
1063 // the code already must handle the last ptrmask byte specially
1064 // by treating it as containing only the bits for endnb pointers,
1065 // where endnb <= 4. We represent large scalar tails that must
1066 // be expanded in the replication by setting endnb larger than 4.
1067 // This will have the effect of reading many bits out of b,
1068 // but once the real bits are shifted out, b will supply as many
1069 // zero bits as we try to read, which is exactly what we need.
1072 if typ.size < dataSize {
1073 // Filling in bits for an array of typ.
1074 // Set up for repetition of ptrmask during main loop.
1075 // Note that ptrmask describes only a prefix of
1076 const maxBits = goarch.PtrSize*8 - 7
1077 if typ.ptrdata/goarch.PtrSize <= maxBits {
1078 // Entire ptrmask fits in uintptr with room for a byte fragment.
1079 // Load into pbits and never read from ptrmask again.
1080 // This is especially important when the ptrmask has
1081 // fewer than 8 bits in it; otherwise the reload in the middle
1082 // of the Phase 2 loop would itself need to loop to gather
1085 // Accumulate ptrmask into b.
1086 // ptrmask is sized to describe only typ.ptrdata, but we record
1087 // it as describing typ.size bytes, since all the high bits are zero.
1088 nb = typ.ptrdata / goarch.PtrSize
1089 for i := uintptr(0); i < nb; i += 8 {
1090 b |= uintptr(*p) << i
1093 nb = typ.size / goarch.PtrSize
1095 // Replicate ptrmask to fill entire pbits uintptr.
1096 // Doubling and truncating is fewer steps than
1097 // iterating by nb each time. (nb could be 1.)
1098 // Since we loaded typ.ptrdata/goarch.PtrSize bits
1099 // but are pretending to have typ.size/goarch.PtrSize,
1100 // there might be no replication necessary/possible.
1103 if nb+nb <= maxBits {
1104 for endnb <= goarch.PtrSize*8 {
1105 pbits |= pbits << endnb
1108 // Truncate to a multiple of original ptrmask.
1109 // Because nb+nb <= maxBits, nb fits in a byte.
1110 // Byte division is cheaper than uintptr division.
1111 endnb = uintptr(maxBits/byte(nb)) * nb
1112 pbits &= 1<<endnb - 1
1117 // Clear p and endp as sentinel for using pbits.
1118 // Checked during Phase 2 loop.
1122 // Ptrmask is larger. Read it multiple times.
1123 n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1
1124 endp = addb(ptrmask, n)
1125 endnb = typ.size/goarch.PtrSize - n*8
1134 if typ.size == dataSize {
1135 // Single entry: can stop once we reach the non-pointer data.
1136 nw = typ.ptrdata / goarch.PtrSize
1138 // Repeated instances of typ in an array.
1139 // Have to process first N-1 entries in full, but can stop
1140 // once we reach the non-pointer data in the final entry.
1141 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize
1144 // No pointers! Caller was supposed to check.
1145 println("runtime: invalid type ", typ.string())
1146 throw("heapBitsSetType: called with non-pointer type")
1150 // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
1151 // The leading byte is special because it contains the bits for word 1,
1152 // which does not have the scan bit set.
1153 // The leading half-byte is special because it's a half a byte,
1154 // so we have to be careful with the bits already there.
1157 throw("heapBitsSetType: unexpected shift")
1160 // Ptrmask and heap bitmap are aligned.
1162 // This is a fast path for small objects.
1164 // The first byte we write out covers the first four
1165 // words of the object. The scan/dead bit on the first
1166 // word must be set to scan since there are pointers
1167 // somewhere in the object.
1168 // In all following words, we set the scan/dead
1169 // appropriately to indicate that the object continues
1170 // to the next 2-bit entry in the bitmap.
1172 // We set four bits at a time here, but if the object
1173 // is fewer than four words, phase 3 will clear
1174 // unnecessary bits.
1175 hb = b & bitPointerAll
1177 if w += 4; w >= nw {
1186 // Ptrmask and heap bitmap are misaligned.
1188 // On 32 bit architectures only the 6-word object that corresponds
1189 // to a 24 bytes size class can start with h.shift of 2 here since
1190 // all other non 16 byte aligned size classes have been handled by
1191 // special code paths at the beginning of heapBitsSetType on 32 bit.
1193 // Many size classes are only 16 byte aligned. On 64 bit architectures
1194 // this results in a heap bitmap position starting with a h.shift of 2.
1196 // The bits for the first two words are in a byte shared
1197 // with another object, so we must be careful with the bits
1200 // We took care of 1-word, 2-word, and 3-word objects above,
1201 // so this is at least a 6-word object.
1202 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1203 hb |= bitScan << (2 * heapBitsShift)
1205 hb |= bitScan << (3 * heapBitsShift)
1209 *hbitp &^= uint8((bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << (2 * heapBitsShift))
1212 if w += 2; w >= nw {
1213 // We know that there is more data, because we handled 2-word and 3-word objects above.
1214 // This must be at least a 6-word object. If we're out of pointer words,
1215 // mark no scan in next bitmap byte and finish.
1222 // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
1223 // The loop computes the bits for that last write but does not execute the write;
1224 // it leaves the bits in hb for processing by phase 3.
1225 // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
1226 // use in the first half of the loop right now, and then we only adjust nb explicitly
1227 // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
1230 // Emit bitmap byte.
1231 // b has at least nb+4 bits, with one exception:
1232 // if w+4 >= nw, then b has only nw-w bits,
1233 // but we'll stop at the break and then truncate
1234 // appropriately in Phase 3.
1235 hb = b & bitPointerAll
1237 if w += 4; w >= nw {
1244 // Load more bits. b has nb right now.
1246 // Fast path: keep reading from ptrmask.
1247 // nb unmodified: we just loaded 8 bits,
1248 // and the next iteration will consume 8 bits,
1249 // leaving us with the same nb the next time we're here.
1251 b |= uintptr(*p) << nb
1254 // Reduce the number of bits in b.
1255 // This is important if we skipped
1256 // over a scalar tail, since nb could
1257 // be larger than the bit width of b.
1260 } else if p == nil {
1261 // Almost as fast path: track bit count and refill from pbits.
1262 // For short repetitions.
1267 nb -= 8 // for next iteration
1269 // Slow path: reached end of ptrmask.
1270 // Process final partial byte and rewind to start.
1271 b |= uintptr(*p) << nb
1274 b |= uintptr(*ptrmask) << nb
1282 // Emit bitmap byte.
1283 hb = b & bitPointerAll
1285 if w += 4; w >= nw {
1294 // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
1296 // Counting the 4 entries in hb not yet written to memory,
1297 // there are more entries than possible pointer slots.
1298 // Discard the excess entries (can't be more than 3).
1299 mask := uintptr(1)<<(4-(w-nw)) - 1
1300 hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
1303 // Change nw from counting possibly-pointer words to total words in allocation.
1304 nw = size / goarch.PtrSize
1306 // Write whole bitmap bytes.
1307 // The first is hb, the rest are zero.
1311 hb = 0 // for possible final half-byte below
1312 for w += 4; w <= nw; w += 4 {
1318 // Write final partial bitmap byte if any.
1319 // We know w > nw, or else we'd still be in the loop above.
1320 // It can be bigger only due to the 4 entries in hb that it counts.
1321 // If w == nw+4 then there's nothing left to do: we wrote all nw entries
1322 // and can discard the 4 sitting in hb.
1323 // But if w == nw+2, we need to write first two in hb.
1324 // The byte is shared with the next object, so be careful with
1327 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
1331 // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
1333 // TODO: We could probably make this faster by
1334 // handling [x+dataSize, x+size) specially.
1335 h := heapBitsForAddr(x)
1336 // cnw is the number of heap words, or bit pairs
1337 // remaining (like nw above).
1338 cnw := size / goarch.PtrSize
1339 src := (*uint8)(unsafe.Pointer(x))
1340 // We know the first and last byte of the bitmap are
1341 // not the same, but it's still possible for small
1342 // objects span arenas, so it may share bitmap bytes
1343 // with neighboring objects.
1345 // Handle the first byte specially if it's shared. See
1346 // Phase 1 for why this is the only special case we need.
1348 if !(h.shift == 0 || h.shift == 2) {
1349 print("x=", x, " size=", size, " cnw=", h.shift, "\n")
1350 throw("bad start shift")
1354 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
1359 // We're now byte aligned. Copy out to per-arena
1360 // bitmaps until the last byte (which may again be
1363 // This loop processes four words at a time,
1364 // so round cnw down accordingly.
1365 hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
1367 // n is the number of bitmap bytes to copy.
1369 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
1374 if doubleCheck && h.shift != 0 {
1375 print("cnw=", cnw, " h.shift=", h.shift, "\n")
1376 throw("bad shift after block copy")
1378 // Handle the last byte if it's shared.
1380 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
1385 if uintptr(unsafe.Pointer(src)) > x+size {
1386 throw("copy exceeded object size")
1388 if !(cnw == 0 || cnw == 2) {
1389 print("x=", x, " size=", size, " cnw=", cnw, "\n")
1390 throw("bad number of remaining words")
1392 // Set up hbitp so doubleCheck code below can check it.
1395 // Zero the object where we wrote the bitmap.
1396 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
1399 // Double check the whole bitmap.
1401 // x+size may not point to the heap, so back up one
1402 // word and then advance it the way we do above.
1403 end := heapBitsForAddr(x + size - goarch.PtrSize)
1405 // In out-of-place copying, we just advance
1409 // Don't use next because that may advance to
1410 // the next arena and the in-place logic
1412 end.shift += heapBitsShift
1413 if end.shift == 4*heapBitsShift {
1414 end.bitp, end.shift = add1(end.bitp), 0
1417 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
1418 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
1419 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1420 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1421 h0 := heapBitsForAddr(x)
1422 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1423 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1424 throw("bad heapBitsSetType")
1427 // Double-check that bits to be written were written correctly.
1428 // Does not check that other bits were not written, unfortunately.
1429 h := heapBitsForAddr(x)
1430 nptr := typ.ptrdata / goarch.PtrSize
1431 ndata := typ.size / goarch.PtrSize
1432 count := dataSize / typ.size
1433 totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize
1434 for i := uintptr(0); i < size/goarch.PtrSize; i++ {
1436 var have, want uint8
1437 have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
1439 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
1440 // heapBitsSetTypeGCProg always fills
1441 // in full nibbles of bitScan.
1445 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1451 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
1452 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1453 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
1454 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1455 h0 := heapBitsForAddr(x)
1456 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1457 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1458 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
1459 println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want))
1460 if typ.kind&kindGCProg != 0 {
1461 println("GC program:")
1462 dumpGCProg(addb(typ.gcdata, 4))
1464 throw("bad heapBitsSetType")
1468 if ptrmask == debugPtrmask.data {
1469 unlock(&debugPtrmask.lock)
1474 var debugPtrmask struct {
1479 // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
1480 // progSize is the size of the memory described by the program.
1481 // elemSize is the size of the element that the GC program describes (a prefix of).
1482 // dataSize is the total size of the intended data, a multiple of elemSize.
1483 // allocSize is the total size of the allocated memory.
1485 // GC programs are only used for large allocations.
1486 // heapBitsSetType requires that allocSize is a multiple of 4 words,
1487 // so that the relevant bitmap bytes are not shared with surrounding
1489 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
1490 if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 {
1491 // Alignment will be wrong.
1492 throw("heapBitsSetTypeGCProg: small allocation")
1494 var totalBits uintptr
1495 if elemSize == dataSize {
1496 totalBits = runGCProg(prog, nil, h.bitp, 2)
1497 if totalBits*goarch.PtrSize != progSize {
1498 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1499 throw("heapBitsSetTypeGCProg: unexpected bit count")
1502 count := dataSize / elemSize
1504 // Piece together program trailer to run after prog that does:
1506 // repeat(1, elemSize-progSize-1) // zeros to fill element size
1507 // repeat(elemSize, count-1) // repeat that element for count
1508 // This zero-pads the data remaining in the first element and then
1509 // repeats that first element to fill the array.
1510 var trailer [40]byte // 3 varints (max 10 each) + some bytes
1512 if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 {
1523 for ; n >= 0x80; n >>= 7 {
1524 trailer[i] = byte(n | 0x80)
1527 trailer[i] = byte(n)
1531 // repeat(elemSize/ptrSize, count-1)
1534 n := elemSize / goarch.PtrSize
1535 for ; n >= 0x80; n >>= 7 {
1536 trailer[i] = byte(n | 0x80)
1539 trailer[i] = byte(n)
1542 for ; n >= 0x80; n >>= 7 {
1543 trailer[i] = byte(n | 0x80)
1546 trailer[i] = byte(n)
1551 runGCProg(prog, &trailer[0], h.bitp, 2)
1553 // Even though we filled in the full array just now,
1554 // record that we only filled in up to the ptrdata of the
1555 // last element. This will cause the code below to
1556 // memclr the dead section of the final array element,
1557 // so that scanobject can stop early in the final element.
1558 totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize
1560 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
1561 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte))
1562 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
1565 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1566 // size the size of the region described by prog, in bytes.
1567 // The resulting bitvector will have no more than size/goarch.PtrSize bits.
1568 func progToPointerMask(prog *byte, size uintptr) bitvector {
1569 n := (size/goarch.PtrSize + 7) / 8
1570 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1571 x[len(x)-1] = 0xa1 // overflow check sentinel
1572 n = runGCProg(prog, nil, &x[0], 1)
1573 if x[len(x)-1] != 0xa1 {
1574 throw("progToPointerMask: overflow")
1576 return bitvector{int32(n), &x[0]}
1579 // Packed GC pointer bitmaps, aka GC programs.
1581 // For large types containing arrays, the type information has a
1582 // natural repetition that can be encoded to save space in the
1583 // binary and in the memory representation of the type information.
1585 // The encoding is a simple Lempel-Ziv style bytecode machine
1586 // with the following instructions:
1589 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1590 // 10000000 n c: repeat the previous n bits c times; n, c are varints
1591 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1593 // runGCProg executes the GC program prog, and then trailer if non-nil,
1594 // writing to dst with entries of the given size.
1595 // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
1596 // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
1597 // starting at dst (because the heap bitmap does). In this case, the caller guarantees
1598 // that only whole bytes in dst need to be written.
1600 // runGCProg returns the number of 1- or 2-bit entries written to memory.
1601 func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1604 // Bits waiting to be written to memory.
1611 // Flush accumulated full bytes.
1612 // The rest of the loop assumes that nbits <= 7.
1613 for ; nbits >= 8; nbits -= 8 {
1619 v := bits&bitPointerAll | bitScanAll
1623 v = bits&bitPointerAll | bitScanAll
1630 // Process one instruction.
1635 // Literal bits; n == 0 means end of program.
1637 // Program is over; continue in trailer if present.
1646 for i := uintptr(0); i < nbyte; i++ {
1647 bits |= uintptr(*p) << nbits
1654 v := bits&0xf | bitScanAll
1658 v = bits&0xf | bitScanAll
1665 bits |= uintptr(*p) << nbits
1672 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1674 for off := uint(0); ; off += 7 {
1677 n |= (x & 0x7F) << off
1684 // Count is encoded in a varint in the next bytes.
1686 for off := uint(0); ; off += 7 {
1689 c |= (x & 0x7F) << off
1694 c *= n // now total number of bits to copy
1696 // If the number of bits being repeated is small, load them
1697 // into a register and use that register for the entire loop
1698 // instead of repeatedly reading from memory.
1699 // Handling fewer than 8 bits here makes the general loop simpler.
1700 // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
1701 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1702 // it will not overflow.
1704 const maxBits = goarch.PtrSize*8 - 7
1706 // Start with bits in output buffer.
1710 // If we need more bits, fetch them from memory.
1712 src = subtract1(src)
1715 pattern |= uintptr(*src)
1716 src = subtract1(src)
1720 src = subtract1(src)
1723 pattern |= uintptr(*src) & 0xf
1724 src = subtract1(src)
1729 // We started with the whole bit output buffer,
1730 // and then we loaded bits from whole bytes.
1731 // Either way, we might now have too many instead of too few.
1732 // Discard the extra.
1734 pattern >>= npattern - n
1738 // Replicate pattern to at most maxBits.
1740 // One bit being repeated.
1741 // If the bit is 1, make the pattern all 1s.
1742 // If the bit is 0, the pattern is already all 0s,
1743 // but we can claim that the number of bits
1744 // in the word is equal to the number we need (c),
1745 // because right shift of bits will zero fill.
1747 pattern = 1<<maxBits - 1
1755 if nb+nb <= maxBits {
1756 // Double pattern until the whole uintptr is filled.
1757 for nb <= goarch.PtrSize*8 {
1761 // Trim away incomplete copy of original pattern in high bits.
1762 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1763 nb = maxBits / npattern * npattern
1770 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1771 // Since pattern contains >8 bits, there will be full bytes to flush
1772 // on each iteration.
1773 for ; c >= npattern; c -= npattern {
1774 bits |= pattern << nbits
1785 *dst = uint8(bits&0xf | bitScanAll)
1793 // Add final fragment to bit buffer.
1796 bits |= pattern << nbits
1802 // Repeat; n too large to fit in a register.
1803 // Since nbits <= 7, we know the first few bytes of repeated data
1804 // are already written to memory.
1805 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1807 // Leading src fragment.
1808 src = subtractb(src, (off+7)/8)
1809 if frag := off & 7; frag != 0 {
1810 bits |= uintptr(*src) >> (8 - frag) << nbits
1815 // Main loop: load one byte, write another.
1816 // The bits are rotating through the bit buffer.
1817 for i := c / 8; i > 0; i-- {
1818 bits |= uintptr(*src) << nbits
1824 // Final src fragment.
1826 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1830 // Leading src fragment.
1831 src = subtractb(src, (off+3)/4)
1832 if frag := off & 3; frag != 0 {
1833 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
1838 // Main loop: load one byte, write another.
1839 // The bits are rotating through the bit buffer.
1840 for i := c / 4; i > 0; i-- {
1841 bits |= (uintptr(*src) & 0xf) << nbits
1843 *dst = uint8(bits&0xf | bitScanAll)
1847 // Final src fragment.
1849 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1855 // Write any final bits out, using full-byte writes, even for the final byte.
1856 var totalBits uintptr
1858 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1860 for ; nbits > 0; nbits -= 8 {
1866 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
1868 for ; nbits > 0; nbits -= 4 {
1869 v := bits&0xf | bitScanAll
1878 // materializeGCProg allocates space for the (1-bit) pointer bitmask
1879 // for an object of size ptrdata. Then it fills that space with the
1880 // pointer bitmask specified by the program prog.
1881 // The bitmask starts at s.startAddr.
1882 // The result must be deallocated with dematerializeGCProg.
1883 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1884 // Each word of ptrdata needs one bit in the bitmap.
1885 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1886 // Compute the number of pages needed for bitmapBytes.
1887 pages := divRoundUp(bitmapBytes, pageSize)
1888 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1889 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
1892 func dematerializeGCProg(s *mspan) {
1893 mheap_.freeManual(s, spanAllocPtrScalarBits)
1896 func dumpGCProg(p *byte) {
1902 print("\t", nptr, " end\n")
1906 print("\t", nptr, " lit ", x, ":")
1908 for i := 0; i < n; i++ {
1915 nbit := int(x &^ 0x80)
1917 for nb := uint(0); ; nb += 7 {
1920 nbit |= int(x&0x7f) << nb
1927 for nb := uint(0); ; nb += 7 {
1930 count |= int(x&0x7f) << nb
1935 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1936 nptr += nbit * count
1943 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1944 target := (*stkframe)(ctxt)
1945 if frame.sp <= target.sp && target.sp < frame.varp {
1952 // gcbits returns the GC type info for x, for testing.
1953 // The result is the bitmap entries (0 or 1), one entry per byte.
1954 //go:linkname reflect_gcbits reflect.gcbits
1955 func reflect_gcbits(x any) []byte {
1957 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1958 nptr := typ.ptrdata / goarch.PtrSize
1959 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1960 ret = ret[:len(ret)-1]
1965 // Returns GC type info for the pointer stored in ep for testing.
1966 // If ep points to the stack, only static live information will be returned
1967 // (i.e. not for objects which are only dynamically live stack objects).
1968 func getgcmask(ep any) (mask []byte) {
1973 for _, datap := range activeModules() {
1975 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1976 bitmap := datap.gcdatamask.bytedata
1977 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1978 mask = make([]byte, n/goarch.PtrSize)
1979 for i := uintptr(0); i < n; i += goarch.PtrSize {
1980 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1981 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1987 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1988 bitmap := datap.gcbssmask.bytedata
1989 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1990 mask = make([]byte, n/goarch.PtrSize)
1991 for i := uintptr(0); i < n; i += goarch.PtrSize {
1992 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1993 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
2000 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
2001 hbits := heapBitsForAddr(base)
2003 mask = make([]byte, n/goarch.PtrSize)
2004 for i := uintptr(0); i < n; i += goarch.PtrSize {
2005 if hbits.isPointer() {
2006 mask[i/goarch.PtrSize] = 1
2008 if !hbits.morePointers() {
2009 mask = mask[:i/goarch.PtrSize]
2012 hbits = hbits.next()
2018 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
2020 frame.sp = uintptr(p)
2022 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
2023 if frame.fn.valid() {
2024 locals, _, _ := getStackMap(&frame, nil, false)
2028 size := uintptr(locals.n) * goarch.PtrSize
2029 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2030 mask = make([]byte, n/goarch.PtrSize)
2031 for i := uintptr(0); i < n; i += goarch.PtrSize {
2032 off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
2033 mask[i/goarch.PtrSize] = locals.ptrbit(off)
2039 // otherwise, not something the GC knows about.
2040 // possibly read-only data, like malloc(0).
2041 // must not have pointers