1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: type and heap bitmaps.
7 // Stack, data, and bss bitmaps
9 // Stack frames and global variables in the data and bss sections are
10 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
11 // means the word is a live pointer to be visited by the GC (referred to
12 // as "pointer"). A "0" bit means the word should be ignored by GC
13 // (referred to as "scalar", though it could be a dead pointer value).
17 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
18 // recording whether a pointer is stored in that word or not. This bitmap
19 // is stored in the heapArena metadata backing each heap arena.
20 // That is, if ha is the heapArena for the arena starting at "start",
21 // then ha.bitmap[0] holds the 64 bits for the 64 words "start"
22 // through start+63*ptrSize, ha.bitmap[1] holds the entries for
23 // start+64*ptrSize through start+127*ptrSize, and so on.
24 // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
25 // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
26 // (For 32-bit platforms, s/64/32/.)
28 // We also keep a noMorePtrs bitmap which allows us to stop scanning
29 // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
30 // is 1, then the object containing the last word described by ha.bitmap[8*i+j]
31 // has no more pointers beyond those described by ha.bitmap[8*i+j].
32 // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
33 // beyond must all be zero until the start of the next object.
35 // The bitmap for noscan spans is set to all zero at span allocation time.
37 // The bitmap for unallocated objects in scannable spans is not maintained
44 "runtime/internal/atomic"
45 "runtime/internal/sys"
49 // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
50 type heapArenaPtrScalar struct {
51 // bitmap stores the pointer/scalar bitmap for the words in
52 // this arena. See mbitmap.go for a description.
53 // This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit).
54 bitmap [heapArenaBitmapWords]uintptr
56 // If the ith bit of noMorePtrs is true, then there are no more
57 // pointers for the object containing the word described by the
58 // high bit of bitmap[i].
59 // In that case, bitmap[i+1], ... must be zero until the start
60 // of the next object.
61 // We never operate on these entries using bit-parallel techniques,
62 // so it is ok if they are small. Also, they can't be bigger than
63 // uint16 because at that size a single noMorePtrs entry
64 // represents 8K of memory, the minimum size of a span. Any larger
65 // and we'd have to worry about concurrent updates.
66 // This array uses 1 bit per word of bitmap, or .024% of the heap size (for 64-bit).
67 noMorePtrs [heapArenaBitmapWords / 8]uint8
70 // addb returns the byte pointer p+n.
74 func addb(p *byte, n uintptr) *byte {
75 // Note: wrote out full expression instead of calling add(p, n)
76 // to reduce the number of temporaries generated by the
77 // compiler for this trivial expression during inlining.
78 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
81 // subtractb returns the byte pointer p-n.
85 func subtractb(p *byte, n uintptr) *byte {
86 // Note: wrote out full expression instead of calling add(p, -n)
87 // to reduce the number of temporaries generated by the
88 // compiler for this trivial expression during inlining.
89 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
92 // add1 returns the byte pointer p+1.
96 func add1(p *byte) *byte {
97 // Note: wrote out full expression instead of calling addb(p, 1)
98 // to reduce the number of temporaries generated by the
99 // compiler for this trivial expression during inlining.
100 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
103 // subtract1 returns the byte pointer p-1.
105 // nosplit because it is used during write barriers and must not be preempted.
109 func subtract1(p *byte) *byte {
110 // Note: wrote out full expression instead of calling subtractb(p, 1)
111 // to reduce the number of temporaries generated by the
112 // compiler for this trivial expression during inlining.
113 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
116 // markBits provides access to the mark bit for an object in the heap.
117 // bytep points to the byte holding the mark bit.
118 // mask is a byte with a single bit set that can be &ed with *bytep
119 // to see if the bit has been set.
120 // *m.byte&m.mask != 0 indicates the mark bit is set.
121 // index can be used along with span information to generate
122 // the address of the object in the heap.
123 // We maintain one set of mark bits for allocation and one for
125 type markBits struct {
132 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
133 bytep, mask := s.allocBits.bitp(allocBitIndex)
134 return markBits{bytep, mask, allocBitIndex}
137 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
138 // and negates them so that ctz (count trailing zeros) instructions
139 // can be used. It then places these 8 bytes into the cached 64 bit
141 func (s *mspan) refillAllocCache(whichByte uint16) {
142 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
144 aCache |= uint64(bytes[0])
145 aCache |= uint64(bytes[1]) << (1 * 8)
146 aCache |= uint64(bytes[2]) << (2 * 8)
147 aCache |= uint64(bytes[3]) << (3 * 8)
148 aCache |= uint64(bytes[4]) << (4 * 8)
149 aCache |= uint64(bytes[5]) << (5 * 8)
150 aCache |= uint64(bytes[6]) << (6 * 8)
151 aCache |= uint64(bytes[7]) << (7 * 8)
152 s.allocCache = ^aCache
155 // nextFreeIndex returns the index of the next free object in s at
156 // or after s.freeindex.
157 // There are hardware instructions that can be used to make this
158 // faster if profiling warrants it.
159 func (s *mspan) nextFreeIndex() uint16 {
160 sfreeindex := s.freeindex
162 if sfreeindex == snelems {
165 if sfreeindex > snelems {
166 throw("s.freeindex > s.nelems")
169 aCache := s.allocCache
171 bitIndex := sys.TrailingZeros64(aCache)
173 // Move index to start of next cached bits.
174 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
175 if sfreeindex >= snelems {
176 s.freeindex = snelems
179 whichByte := sfreeindex / 8
180 // Refill s.allocCache with the next 64 alloc bits.
181 s.refillAllocCache(whichByte)
182 aCache = s.allocCache
183 bitIndex = sys.TrailingZeros64(aCache)
184 // nothing available in cached bits
185 // grab the next 8 bytes and try again.
187 result := sfreeindex + uint16(bitIndex)
188 if result >= snelems {
189 s.freeindex = snelems
193 s.allocCache >>= uint(bitIndex + 1)
194 sfreeindex = result + 1
196 if sfreeindex%64 == 0 && sfreeindex != snelems {
197 // We just incremented s.freeindex so it isn't 0.
198 // As each 1 in s.allocCache was encountered and used for allocation
199 // it was shifted away. At this point s.allocCache contains all 0s.
200 // Refill s.allocCache so that it corresponds
201 // to the bits at s.allocBits starting at s.freeindex.
202 whichByte := sfreeindex / 8
203 s.refillAllocCache(whichByte)
205 s.freeindex = sfreeindex
209 // isFree reports whether the index'th object in s is unallocated.
211 // The caller must ensure s.state is mSpanInUse, and there must have
212 // been no preemption points since ensuring this (which could allow a
213 // GC transition, which would allow the state to change).
214 func (s *mspan) isFree(index uintptr) bool {
215 if index < uintptr(s.freeIndexForScan) {
218 bytep, mask := s.allocBits.bitp(index)
219 return *bytep&mask == 0
222 // divideByElemSize returns n/s.elemsize.
223 // n must be within [0, s.npages*_PageSize),
224 // or may be exactly s.npages*_PageSize
225 // if s.elemsize is from sizeclasses.go.
227 // nosplit, because it is called by objIndex, which is nosplit
230 func (s *mspan) divideByElemSize(n uintptr) uintptr {
231 const doubleCheck = false
233 // See explanation in mksizeclasses.go's computeDivMagic.
234 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
236 if doubleCheck && q != n/s.elemsize {
237 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
238 throw("bad magic division")
243 // nosplit, because it is called by other nosplit code like findObject
246 func (s *mspan) objIndex(p uintptr) uintptr {
247 return s.divideByElemSize(p - s.base())
250 func markBitsForAddr(p uintptr) markBits {
252 objIndex := s.objIndex(p)
253 return s.markBitsForIndex(objIndex)
256 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
257 bytep, mask := s.gcmarkBits.bitp(objIndex)
258 return markBits{bytep, mask, objIndex}
261 func (s *mspan) markBitsForBase() markBits {
262 return markBits{&s.gcmarkBits.x, uint8(1), 0}
265 // isMarked reports whether mark bit m is set.
266 func (m markBits) isMarked() bool {
267 return *m.bytep&m.mask != 0
270 // setMarked sets the marked bit in the markbits, atomically.
271 func (m markBits) setMarked() {
272 // Might be racing with other updates, so use atomic update always.
273 // We used to be clever here and use a non-atomic update in certain
274 // cases, but it's not worth the risk.
275 atomic.Or8(m.bytep, m.mask)
278 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
279 func (m markBits) setMarkedNonAtomic() {
283 // clearMarked clears the marked bit in the markbits, atomically.
284 func (m markBits) clearMarked() {
285 // Might be racing with other updates, so use atomic update always.
286 // We used to be clever here and use a non-atomic update in certain
287 // cases, but it's not worth the risk.
288 atomic.And8(m.bytep, ^m.mask)
291 // markBitsForSpan returns the markBits for the span base address base.
292 func markBitsForSpan(base uintptr) (mbits markBits) {
293 mbits = markBitsForAddr(base)
295 throw("markBitsForSpan: unaligned start")
300 // advance advances the markBits to the next object in the span.
301 func (m *markBits) advance() {
303 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
311 // clobberdeadPtr is a special value that is used by the compiler to
312 // clobber dead stack slots, when -clobberdead flag is set.
313 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
315 // badPointer throws bad pointer in heap panic.
316 func badPointer(s *mspan, p, refBase, refOff uintptr) {
317 // Typically this indicates an incorrect use
318 // of unsafe or cgo to store a bad pointer in
319 // the Go heap. It may also indicate a runtime
322 // TODO(austin): We could be more aggressive
323 // and detect pointers to unallocated objects
324 // in allocated spans.
326 print("runtime: pointer ", hex(p))
328 state := s.state.get()
329 if state != mSpanInUse {
330 print(" to unallocated span")
332 print(" to unused region of span")
334 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
338 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
339 gcDumpObject("object", refBase, refOff)
341 getg().m.traceback = 2
342 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
345 // findObject returns the base address for the heap object containing
346 // the address p, the object's span, and the index of the object in s.
347 // If p does not point into a heap object, it returns base == 0.
349 // If p points is an invalid heap pointer and debug.invalidptr != 0,
350 // findObject panics.
352 // refBase and refOff optionally give the base address of the object
353 // in which the pointer p was found and the byte offset at which it
354 // was found. These are used for error reporting.
356 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
357 // Since p is a uintptr, it would not be adjusted if the stack were to move.
360 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
362 // If s is nil, the virtual address has never been part of the heap.
363 // This pointer may be to some mmap'd region, so we allow it.
365 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
366 // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
367 // as they are the only platform where compiler's clobberdead mode is
368 // implemented. On these platforms clobberdeadPtr cannot be a valid address.
369 badPointer(s, p, refBase, refOff)
373 // If p is a bad pointer, it may not be in s's bounds.
375 // Check s.state to synchronize with span initialization
376 // before checking other fields. See also spanOfHeap.
377 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
378 // Pointers into stacks are also ok, the runtime manages these explicitly.
379 if state == mSpanManual {
382 // The following ensures that we are rigorous about what data
383 // structures hold valid pointers.
384 if debug.invalidptr != 0 {
385 badPointer(s, p, refBase, refOff)
390 objIndex = s.objIndex(p)
391 base = s.base() + objIndex*s.elemsize
395 // reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
397 //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
398 func reflect_verifyNotInHeapPtr(p uintptr) bool {
399 // Conversion to a pointer is ok as long as findObject above does not call badPointer.
400 // Since we're already promised that p doesn't point into the heap, just disallow heap
401 // pointers and the special clobbered pointer.
402 return spanOf(p) == nil && p != clobberdeadPtr
405 const ptrBits = 8 * goarch.PtrSize
407 // heapBits provides access to the bitmap bits for a single heap word.
408 // The methods on heapBits take value receivers so that the compiler
409 // can more easily inline calls to those methods and registerize the
410 // struct fields independently.
411 type heapBits struct {
412 // heapBits will report on pointers in the range [addr,addr+size).
413 // The low bit of mask contains the pointerness of the word at addr
414 // (assuming valid>0).
417 // The next few pointer bits representing words starting at addr.
418 // Those bits already returned by next() are zeroed.
420 // Number of bits in mask that are valid. mask is always less than 1<<valid.
424 // heapBitsForAddr returns the heapBits for the address addr.
425 // The caller must ensure [addr,addr+size) is in an allocated span.
426 // In particular, be careful not to point past the end of an object.
428 // nosplit because it is used during write barriers and must not be preempted.
431 func heapBitsForAddr(addr, size uintptr) heapBits {
433 ai := arenaIndex(addr)
434 ha := mheap_.arenas[ai.l1()][ai.l2()]
436 // Word index in arena.
437 word := addr / goarch.PtrSize % heapArenaWords
439 // Word index and bit offset in bitmap array.
440 idx := word / ptrBits
441 off := word % ptrBits
443 // Grab relevant bits of bitmap.
444 mask := ha.bitmap[idx] >> off
445 valid := ptrBits - off
447 // Process depending on where the object ends.
448 nptr := size / goarch.PtrSize
450 // Bits for this object end before the end of this bitmap word.
451 // Squash bits for the following objects.
452 mask &= 1<<(nptr&(ptrBits-1)) - 1
454 } else if nptr == valid {
455 // Bits for this object end at exactly the end of this bitmap word.
458 // Bits for this object extend into the next bitmap word. See if there
459 // may be any pointers recorded there.
460 if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
461 // No more pointers in this object after this bitmap word.
462 // Update size so we know not to look there.
463 size = valid * goarch.PtrSize
467 return heapBits{addr: addr, size: size, mask: mask, valid: valid}
470 // Returns the (absolute) address of the next known pointer and
471 // a heapBits iterator representing any remaining pointers.
472 // If there are no more pointers, returns address 0.
473 // Note that next does not modify h. The caller must record the result.
475 // nosplit because it is used during write barriers and must not be preempted.
478 func (h heapBits) next() (heapBits, uintptr) {
482 if goarch.PtrSize == 8 {
483 i = sys.TrailingZeros64(uint64(h.mask))
485 i = sys.TrailingZeros32(uint32(h.mask))
487 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
488 return h, h.addr + uintptr(i)*goarch.PtrSize
491 // Skip words that we've already processed.
492 h.addr += h.valid * goarch.PtrSize
493 h.size -= h.valid * goarch.PtrSize
495 return h, 0 // no more pointers
498 // Grab more bits and try again.
499 h = heapBitsForAddr(h.addr, h.size)
503 // nextFast is like next, but can return 0 even when there are more pointers
504 // to be found. Callers should call next if nextFast returns 0 as its second
507 // if addr, h = h.nextFast(); addr == 0 {
508 // if addr, h = h.next(); addr == 0 {
509 // ... no more pointers ...
512 // ... process pointer at addr ...
514 // nextFast is designed to be inlineable.
517 func (h heapBits) nextFast() (heapBits, uintptr) {
524 if goarch.PtrSize == 8 {
525 i = sys.TrailingZeros64(uint64(h.mask))
527 i = sys.TrailingZeros32(uint32(h.mask))
530 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
532 return h, h.addr + uintptr(i)*goarch.PtrSize
535 // bulkBarrierPreWrite executes a write barrier
536 // for every pointer slot in the memory range [src, src+size),
537 // using pointer/scalar information from [dst, dst+size).
538 // This executes the write barriers necessary before a memmove.
539 // src, dst, and size must be pointer-aligned.
540 // The range [dst, dst+size) must lie within a single object.
541 // It does not perform the actual writes.
543 // As a special case, src == 0 indicates that this is being used for a
544 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
547 // Callers should call bulkBarrierPreWrite immediately before
548 // calling memmove(dst, src, size). This function is marked nosplit
549 // to avoid being preempted; the GC must not stop the goroutine
550 // between the memmove and the execution of the barriers.
551 // The caller is also responsible for cgo pointer checks if this
552 // may be writing Go pointers into non-Go memory.
554 // The pointer bitmap is not maintained for allocations containing
555 // no pointers at all; any caller of bulkBarrierPreWrite must first
556 // make sure the underlying allocation contains pointers, usually
557 // by checking typ.PtrBytes.
559 // Callers must perform cgo checks if goexperiment.CgoCheck2.
562 func bulkBarrierPreWrite(dst, src, size uintptr) {
563 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
564 throw("bulkBarrierPreWrite: unaligned arguments")
566 if !writeBarrier.needed {
569 if s := spanOf(dst); s == nil {
570 // If dst is a global, use the data or BSS bitmaps to
571 // execute write barriers.
572 for _, datap := range activeModules() {
573 if datap.data <= dst && dst < datap.edata {
574 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
578 for _, datap := range activeModules() {
579 if datap.bss <= dst && dst < datap.ebss {
580 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
585 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
586 // dst was heap memory at some point, but isn't now.
587 // It can't be a global. It must be either our stack,
588 // or in the case of direct channel sends, it could be
589 // another stack. Either way, no need for barriers.
590 // This will also catch if dst is in a freed span,
591 // though that should never have.
595 buf := &getg().m.p.ptr().wbBuf
596 h := heapBitsForAddr(dst, size)
600 if h, addr = h.next(); addr == 0 {
603 dstx := (*uintptr)(unsafe.Pointer(addr))
610 if h, addr = h.next(); addr == 0 {
613 dstx := (*uintptr)(unsafe.Pointer(addr))
614 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
622 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
623 // does not execute write barriers for [dst, dst+size).
625 // In addition to the requirements of bulkBarrierPreWrite
626 // callers need to ensure [dst, dst+size) is zeroed.
628 // This is used for special cases where e.g. dst was just
629 // created and zeroed with malloc.
632 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
633 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
634 throw("bulkBarrierPreWrite: unaligned arguments")
636 if !writeBarrier.needed {
639 buf := &getg().m.p.ptr().wbBuf
640 h := heapBitsForAddr(dst, size)
643 if h, addr = h.next(); addr == 0 {
646 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
652 // bulkBarrierBitmap executes write barriers for copying from [src,
653 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
654 // assumed to start maskOffset bytes into the data covered by the
655 // bitmap in bits (which may not be a multiple of 8).
657 // This is used by bulkBarrierPreWrite for writes to data and BSS.
660 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
661 word := maskOffset / goarch.PtrSize
662 bits = addb(bits, word/8)
663 mask := uint8(1) << (word % 8)
665 buf := &getg().m.p.ptr().wbBuf
666 for i := uintptr(0); i < size; i += goarch.PtrSize {
671 i += 7 * goarch.PtrSize
677 dstx := (*uintptr)(unsafe.Pointer(dst + i))
682 srcx := (*uintptr)(unsafe.Pointer(src + i))
692 // typeBitsBulkBarrier executes a write barrier for every
693 // pointer that would be copied from [src, src+size) to [dst,
694 // dst+size) by a memmove using the type bitmap to locate those
697 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
698 // dst, src, and size must be pointer-aligned.
699 // The type typ must have a plain bitmap, not a GC program.
700 // The only use of this function is in channel sends, and the
701 // 64 kB channel element limit takes care of this for us.
703 // Must not be preempted because it typically runs right before memmove,
704 // and the GC must observe them as an atomic action.
706 // Callers must perform cgo checks if goexperiment.CgoCheck2.
709 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
711 throw("runtime: typeBitsBulkBarrier without type")
713 if typ.Size_ != size {
714 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
715 throw("runtime: invalid typeBitsBulkBarrier")
717 if typ.Kind_&kindGCProg != 0 {
718 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
719 throw("runtime: invalid typeBitsBulkBarrier")
721 if !writeBarrier.needed {
724 ptrmask := typ.GCData
725 buf := &getg().m.p.ptr().wbBuf
727 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
728 if i&(goarch.PtrSize*8-1) == 0 {
729 bits = uint32(*ptrmask)
730 ptrmask = addb(ptrmask, 1)
735 dstx := (*uintptr)(unsafe.Pointer(dst + i))
736 srcx := (*uintptr)(unsafe.Pointer(src + i))
744 // initHeapBits initializes the heap bitmap for a span.
745 // If this is a span of single pointer allocations, it initializes all
746 // words to pointer. If force is true, clears all bits.
747 func (s *mspan) initHeapBits(forceClear bool) {
748 if forceClear || s.spanclass.noscan() {
749 // Set all the pointer bits to zero. We do this once
750 // when the span is allocated so we don't have to do it
751 // for each object allocation.
753 size := s.npages * pageSize
754 h := writeHeapBitsForAddr(base)
758 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
760 return // nothing to do
762 h := writeHeapBitsForAddr(s.base())
763 size := s.npages * pageSize
764 nptrs := size / goarch.PtrSize
765 for i := uintptr(0); i < nptrs; i += ptrBits {
766 h = h.write(^uintptr(0), ptrBits)
768 h.flush(s.base(), size)
771 // countAlloc returns the number of objects allocated in span s by
772 // scanning the allocation bitmap.
773 func (s *mspan) countAlloc() int {
775 bytes := divRoundUp(uintptr(s.nelems), 8)
776 // Iterate over each 8-byte chunk and count allocations
777 // with an intrinsic. Note that newMarkBits guarantees that
778 // gcmarkBits will be 8-byte aligned, so we don't have to
779 // worry about edge cases, irrelevant bits will simply be zero.
780 for i := uintptr(0); i < bytes; i += 8 {
781 // Extract 64 bits from the byte pointer and get a OnesCount.
782 // Note that the unsafe cast here doesn't preserve endianness,
783 // but that's OK. We only care about how many bits are 1, not
784 // about the order we discover them in.
785 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
786 count += sys.OnesCount64(mrkBits)
791 type writeHeapBits struct {
792 addr uintptr // address that the low bit of mask represents the pointer state of.
793 mask uintptr // some pointer bits starting at the address addr.
794 valid uintptr // number of bits in buf that are valid (including low)
795 low uintptr // number of low-order bits to not overwrite
798 func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
799 // We start writing bits maybe in the middle of a heap bitmap word.
800 // Remember how many bits into the word we started, so we can be sure
801 // not to overwrite the previous bits.
802 h.low = addr / goarch.PtrSize % ptrBits
804 // round down to heap word that starts the bitmap word.
805 h.addr = addr - h.low*goarch.PtrSize
807 // We don't have any bits yet.
814 // write appends the pointerness of the next valid pointer slots
815 // using the low valid bits of bits. 1=pointer, 0=scalar.
816 func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
817 if h.valid+valid <= ptrBits {
818 // Fast path - just accumulate the bits.
819 h.mask |= bits << h.valid
823 // Too many bits to fit in this word. Write the current word
824 // out and move on to the next word.
826 data := h.mask | bits<<h.valid // mask for this word
827 h.mask = bits >> (ptrBits - h.valid) // leftover for next word
828 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
830 // Flush mask to the memory bitmap.
831 // TODO: figure out how to cache arena lookup.
832 ai := arenaIndex(h.addr)
833 ha := mheap_.arenas[ai.l1()][ai.l2()]
834 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
835 m := uintptr(1)<<h.low - 1
836 ha.bitmap[idx] = ha.bitmap[idx]&m | data
837 // Note: no synchronization required for this write because
838 // the allocator has exclusive access to the page, and the bitmap
839 // entries are all for a single page. Also, visibility of these
840 // writes is guaranteed by the publication barrier in mallocgc.
842 // Clear noMorePtrs bit, since we're going to be writing bits
843 // into the following word.
844 ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
845 // Note: same as above
847 // Move to next word of bitmap.
848 h.addr += ptrBits * goarch.PtrSize
853 // Add padding of size bytes.
854 func (h writeHeapBits) pad(size uintptr) writeHeapBits {
858 words := size / goarch.PtrSize
859 for words > ptrBits {
860 h = h.write(0, ptrBits)
863 return h.write(0, words)
866 // Flush the bits that have been written, and add zeros as needed
867 // to cover the full object [addr, addr+size).
868 func (h writeHeapBits) flush(addr, size uintptr) {
869 // zeros counts the number of bits needed to represent the object minus the
870 // number of bits we've already written. This is the number of 0 bits
871 // that need to be added.
872 zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
874 // Add zero bits up to the bitmap word boundary
876 z := ptrBits - h.valid
884 // Find word in bitmap that we're going to write.
885 ai := arenaIndex(h.addr)
886 ha := mheap_.arenas[ai.l1()][ai.l2()]
887 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
889 // Write remaining bits.
890 if h.valid != h.low {
891 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
892 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
893 ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
899 // Record in the noMorePtrs map that there won't be any more 1 bits,
900 // so readers can stop early.
901 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
903 // Advance to next bitmap word.
904 h.addr += ptrBits * goarch.PtrSize
906 // Continue on writing zeros for the rest of the object.
907 // For standard use of the ptr bits this is not required, as
908 // the bits are read from the beginning of the object. Some uses,
909 // like noscan spans, oblets, bulk write barriers, and cgocheck, might
910 // start mid-object, so these writes are still required.
913 ai := arenaIndex(h.addr)
914 ha := mheap_.arenas[ai.l1()][ai.l2()]
915 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
917 ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
919 } else if zeros == ptrBits {
926 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
927 h.addr += ptrBits * goarch.PtrSize
931 // Read the bytes starting at the aligned pointer p into a uintptr.
932 // Read is little-endian.
933 func readUintptr(p *byte) uintptr {
934 x := *(*uintptr)(unsafe.Pointer(p))
935 if goarch.BigEndian {
936 if goarch.PtrSize == 8 {
937 return uintptr(sys.Bswap64(uint64(x)))
939 return uintptr(sys.Bswap32(uint32(x)))
944 // heapBitsSetType records that the new allocation [x, x+size)
945 // holds in [x, x+dataSize) one or more values of type typ.
946 // (The number of values is given by dataSize / typ.Size.)
947 // If dataSize < size, the fragment [x+dataSize, x+size) is
948 // recorded as non-pointer data.
949 // It is known that the type has pointers somewhere;
950 // malloc does not call heapBitsSetType when there are no pointers,
951 // because all free objects are marked as noscan during
952 // heapBitsSweepSpan.
954 // There can only be one allocation from a given span active at a time,
955 // and the bitmap for a span always falls on word boundaries,
956 // so there are no write-write races for access to the heap bitmap.
957 // Hence, heapBitsSetType can access the bitmap without atomics.
959 // There can be read-write races between heapBitsSetType and things
960 // that read the heap bitmap like scanobject. However, since
961 // heapBitsSetType is only used for objects that have not yet been
962 // made reachable, readers will ignore bits being modified by this
963 // function. This does mean this function cannot transiently modify
964 // bits that belong to neighboring objects. Also, on weakly-ordered
965 // machines, callers must execute a store/store (publication) barrier
966 // between calling this function and making the object reachable.
967 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
968 const doubleCheck = false // slow but helpful; enable to test modifications to this code
970 if doubleCheck && dataSize%typ.Size_ != 0 {
971 throw("heapBitsSetType: dataSize not a multiple of typ.Size")
974 if goarch.PtrSize == 8 && size == goarch.PtrSize {
975 // It's one word and it has pointers, it must be a pointer.
976 // Since all allocated one-word objects are pointers
977 // (non-pointers are aggregated into tinySize allocations),
978 // (*mspan).initHeapBits sets the pointer bits for us.
979 // Nothing to do here.
981 h, addr := heapBitsForAddr(x, size).next()
983 throw("heapBitsSetType: pointer bit missing")
987 throw("heapBitsSetType: second pointer bit found")
993 h := writeHeapBitsForAddr(x)
995 // Handle GC program.
996 if typ.Kind_&kindGCProg != 0 {
997 // Expand the gc program into the storage we're going to use for the actual object.
998 obj := (*uint8)(unsafe.Pointer(x))
999 n := runGCProg(addb(typ.GCData, 4), obj)
1000 // Use the expanded program to set the heap bits.
1001 for i := uintptr(0); true; i += typ.Size_ {
1002 // Copy expanded program to heap bitmap.
1006 h = h.write(uintptr(*p), 8)
1010 h = h.write(uintptr(*p), j)
1012 if i+typ.Size_ == dataSize {
1013 break // no padding after last element
1016 // Pad with zeros to the start of the next element.
1017 h = h.pad(typ.Size_ - n*goarch.PtrSize)
1022 // Erase the expanded GC program.
1023 memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
1027 // Note about sizes:
1029 // typ.Size is the number of words in the object,
1030 // and typ.PtrBytes is the number of words in the prefix
1031 // of the object that contains pointers. That is, the final
1032 // typ.Size - typ.PtrBytes words contain no pointers.
1033 // This allows optimization of a common pattern where
1034 // an object has a small header followed by a large scalar
1035 // buffer. If we know the pointers are over, we don't have
1036 // to scan the buffer's heap bitmap at all.
1037 // The 1-bit ptrmasks are sized to contain only bits for
1038 // the typ.PtrBytes prefix, zero padded out to a full byte
1039 // of bitmap. If there is more room in the allocated object,
1040 // that space is pointerless. The noMorePtrs bitmap will prevent
1041 // scanning large pointerless tails of an object.
1043 // Replicated copies are not as nice: if there is an array of
1044 // objects with scalar tails, all but the last tail does have to
1045 // be initialized, because there is no way to say "skip forward".
1047 ptrs := typ.PtrBytes / goarch.PtrSize
1048 if typ.Size_ == dataSize { // Single element
1049 if ptrs <= ptrBits { // Single small element
1050 m := readUintptr(typ.GCData)
1051 h = h.write(m, ptrs)
1052 } else { // Single large element
1055 h = h.write(readUintptr(p), ptrBits)
1056 p = addb(p, ptrBits/8)
1058 if ptrs <= ptrBits {
1063 h = h.write(m, ptrs)
1065 } else { // Repeated element
1066 words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
1067 if words <= ptrBits { // Repeated small element
1068 n := dataSize / typ.Size_
1069 m := readUintptr(typ.GCData)
1070 // Make larger unit to repeat
1071 for words <= ptrBits/2 {
1073 h = h.write(m, words)
1084 h = h.write(m, words)
1087 h = h.write(m, ptrs)
1088 } else { // Repeated large element
1089 for i := uintptr(0); true; i += typ.Size_ {
1093 h = h.write(readUintptr(p), ptrBits)
1094 p = addb(p, ptrBits/8)
1099 if i+typ.Size_ == dataSize {
1100 break // don't need the trailing nonptr bits on the last element.
1102 // Pad with zeros to the start of the next element.
1103 h = h.pad(typ.Size_ - typ.PtrBytes)
1110 h := heapBitsForAddr(x, size)
1111 for i := uintptr(0); i < size; i += goarch.PtrSize {
1112 // Compute the pointer bit we want at offset i.
1115 off := i % typ.Size_
1116 if off < typ.PtrBytes {
1117 j := off / goarch.PtrSize
1118 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
1125 throw("heapBitsSetType: pointer entry not correct")
1129 if _, addr := h.next(); addr != 0 {
1130 throw("heapBitsSetType: extra pointer")
1135 var debugPtrmask struct {
1140 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1141 // size the size of the region described by prog, in bytes.
1142 // The resulting bitvector will have no more than size/goarch.PtrSize bits.
1143 func progToPointerMask(prog *byte, size uintptr) bitvector {
1144 n := (size/goarch.PtrSize + 7) / 8
1145 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1146 x[len(x)-1] = 0xa1 // overflow check sentinel
1147 n = runGCProg(prog, &x[0])
1148 if x[len(x)-1] != 0xa1 {
1149 throw("progToPointerMask: overflow")
1151 return bitvector{int32(n), &x[0]}
1154 // Packed GC pointer bitmaps, aka GC programs.
1156 // For large types containing arrays, the type information has a
1157 // natural repetition that can be encoded to save space in the
1158 // binary and in the memory representation of the type information.
1160 // The encoding is a simple Lempel-Ziv style bytecode machine
1161 // with the following instructions:
1164 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1165 // 10000000 n c: repeat the previous n bits c times; n, c are varints
1166 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1168 // runGCProg returns the number of 1-bit entries written to memory.
1169 func runGCProg(prog, dst *byte) uintptr {
1172 // Bits waiting to be written to memory.
1179 // Flush accumulated full bytes.
1180 // The rest of the loop assumes that nbits <= 7.
1181 for ; nbits >= 8; nbits -= 8 {
1187 // Process one instruction.
1192 // Literal bits; n == 0 means end of program.
1198 for i := uintptr(0); i < nbyte; i++ {
1199 bits |= uintptr(*p) << nbits
1206 bits |= uintptr(*p) << nbits
1213 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1215 for off := uint(0); ; off += 7 {
1218 n |= (x & 0x7F) << off
1225 // Count is encoded in a varint in the next bytes.
1227 for off := uint(0); ; off += 7 {
1230 c |= (x & 0x7F) << off
1235 c *= n // now total number of bits to copy
1237 // If the number of bits being repeated is small, load them
1238 // into a register and use that register for the entire loop
1239 // instead of repeatedly reading from memory.
1240 // Handling fewer than 8 bits here makes the general loop simpler.
1241 // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
1242 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1243 // it will not overflow.
1245 const maxBits = goarch.PtrSize*8 - 7
1247 // Start with bits in output buffer.
1251 // If we need more bits, fetch them from memory.
1252 src = subtract1(src)
1255 pattern |= uintptr(*src)
1256 src = subtract1(src)
1260 // We started with the whole bit output buffer,
1261 // and then we loaded bits from whole bytes.
1262 // Either way, we might now have too many instead of too few.
1263 // Discard the extra.
1265 pattern >>= npattern - n
1269 // Replicate pattern to at most maxBits.
1271 // One bit being repeated.
1272 // If the bit is 1, make the pattern all 1s.
1273 // If the bit is 0, the pattern is already all 0s,
1274 // but we can claim that the number of bits
1275 // in the word is equal to the number we need (c),
1276 // because right shift of bits will zero fill.
1278 pattern = 1<<maxBits - 1
1286 if nb+nb <= maxBits {
1287 // Double pattern until the whole uintptr is filled.
1288 for nb <= goarch.PtrSize*8 {
1292 // Trim away incomplete copy of original pattern in high bits.
1293 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1294 nb = maxBits / npattern * npattern
1301 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1302 // Since pattern contains >8 bits, there will be full bytes to flush
1303 // on each iteration.
1304 for ; c >= npattern; c -= npattern {
1305 bits |= pattern << nbits
1315 // Add final fragment to bit buffer.
1318 bits |= pattern << nbits
1324 // Repeat; n too large to fit in a register.
1325 // Since nbits <= 7, we know the first few bytes of repeated data
1326 // are already written to memory.
1327 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1328 // Leading src fragment.
1329 src = subtractb(src, (off+7)/8)
1330 if frag := off & 7; frag != 0 {
1331 bits |= uintptr(*src) >> (8 - frag) << nbits
1336 // Main loop: load one byte, write another.
1337 // The bits are rotating through the bit buffer.
1338 for i := c / 8; i > 0; i-- {
1339 bits |= uintptr(*src) << nbits
1345 // Final src fragment.
1347 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1352 // Write any final bits out, using full-byte writes, even for the final byte.
1353 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1355 for ; nbits > 0; nbits -= 8 {
1363 // materializeGCProg allocates space for the (1-bit) pointer bitmask
1364 // for an object of size ptrdata. Then it fills that space with the
1365 // pointer bitmask specified by the program prog.
1366 // The bitmask starts at s.startAddr.
1367 // The result must be deallocated with dematerializeGCProg.
1368 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1369 // Each word of ptrdata needs one bit in the bitmap.
1370 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1371 // Compute the number of pages needed for bitmapBytes.
1372 pages := divRoundUp(bitmapBytes, pageSize)
1373 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1374 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
1377 func dematerializeGCProg(s *mspan) {
1378 mheap_.freeManual(s, spanAllocPtrScalarBits)
1381 func dumpGCProg(p *byte) {
1387 print("\t", nptr, " end\n")
1391 print("\t", nptr, " lit ", x, ":")
1393 for i := 0; i < n; i++ {
1400 nbit := int(x &^ 0x80)
1402 for nb := uint(0); ; nb += 7 {
1405 nbit |= int(x&0x7f) << nb
1412 for nb := uint(0); ; nb += 7 {
1415 count |= int(x&0x7f) << nb
1420 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1421 nptr += nbit * count
1428 // reflect_gcbits returns the GC type info for x, for testing.
1429 // The result is the bitmap entries (0 or 1), one entry per byte.
1431 //go:linkname reflect_gcbits reflect.gcbits
1432 func reflect_gcbits(x any) []byte {
1436 // Returns GC type info for the pointer stored in ep for testing.
1437 // If ep points to the stack, only static live information will be returned
1438 // (i.e. not for objects which are only dynamically live stack objects).
1439 func getgcmask(ep any) (mask []byte) {
1444 for _, datap := range activeModules() {
1446 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1447 bitmap := datap.gcdatamask.bytedata
1448 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1449 mask = make([]byte, n/goarch.PtrSize)
1450 for i := uintptr(0); i < n; i += goarch.PtrSize {
1451 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1452 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1458 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1459 bitmap := datap.gcbssmask.bytedata
1460 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1461 mask = make([]byte, n/goarch.PtrSize)
1462 for i := uintptr(0); i < n; i += goarch.PtrSize {
1463 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1464 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1471 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1472 if s.spanclass.noscan() {
1476 hbits := heapBitsForAddr(base, n)
1477 mask = make([]byte, n/goarch.PtrSize)
1480 if hbits, addr = hbits.next(); addr == 0 {
1483 mask[(addr-base)/goarch.PtrSize] = 1
1485 // Callers expect this mask to end at the last pointer.
1486 for len(mask) > 0 && mask[len(mask)-1] == 0 {
1487 mask = mask[:len(mask)-1]
1493 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1496 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1497 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1503 locals, _, _ := u.frame.getStackMap(false)
1507 size := uintptr(locals.n) * goarch.PtrSize
1508 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1509 mask = make([]byte, n/goarch.PtrSize)
1510 for i := uintptr(0); i < n; i += goarch.PtrSize {
1511 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1512 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1518 // otherwise, not something the GC knows about.
1519 // possibly read-only data, like malloc(0).
1520 // must not have pointers
1524 // userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for
1525 // non-slice-backing-store Go values allocated in a user arena chunk. It
1526 // sets up the heap bitmap for the value with type typ allocated at address ptr.
1527 // base is the base address of the arena chunk.
1528 func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) {
1529 h := writeHeapBitsForAddr(uintptr(ptr))
1531 // Our last allocation might have ended right at a noMorePtrs mark,
1532 // which we would not have erased. We need to erase that mark here,
1533 // because we're going to start adding new heap bitmap bits.
1534 // We only need to clear one mark, because below we make sure to
1535 // pad out the bits with zeroes and only write one noMorePtrs bit
1536 // for each new object.
1537 // (This is only necessary at noMorePtrs boundaries, as noMorePtrs
1538 // marks within an object allocated with newAt will be erased by
1539 // the normal writeHeapBitsForAddr mechanism.)
1541 // Note that we skip this if this is the first allocation in the
1542 // arena because there's definitely no previous noMorePtrs mark
1543 // (in fact, we *must* do this, because we're going to try to back
1544 // up a pointer to fix this up).
1545 if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base {
1546 // Back up one pointer and rewrite that pointer. That will
1547 // cause the writeHeapBits implementation to clear the
1548 // noMorePtrs bit we need to clear.
1549 r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize)
1552 if p == uintptr(ptr)-goarch.PtrSize {
1555 h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize)
1559 p := typ.GCData // start of 1-bit pointer mask (or GC program)
1560 var gcProgBits uintptr
1561 if typ.Kind_&kindGCProg != 0 {
1562 // Expand gc program, using the object itself for storage.
1563 gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
1566 nb := typ.PtrBytes / goarch.PtrSize
1568 for i := uintptr(0); i < nb; i += ptrBits {
1573 h = h.write(readUintptr(addb(p, i/8)), k)
1575 // Note: we call pad here to ensure we emit explicit 0 bits
1576 // for the pointerless tail of the object. This ensures that
1577 // there's only a single noMorePtrs mark for the next object
1578 // to clear. We don't need to do this to clear stale noMorePtrs
1579 // markers from previous uses because arena chunk pointer bitmaps
1580 // are always fully cleared when reused.
1581 h = h.pad(typ.Size_ - typ.PtrBytes)
1582 h.flush(uintptr(ptr), typ.Size_)
1584 if typ.Kind_&kindGCProg != 0 {
1585 // Zero out temporary ptrmask buffer inside object.
1586 memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
1589 // Double-check that the bitmap was written out correctly.
1591 // Derived from heapBitsSetType.
1592 const doubleCheck = false
1596 h := heapBitsForAddr(x, size)
1597 for i := uintptr(0); i < size; i += goarch.PtrSize {
1598 // Compute the pointer bit we want at offset i.
1600 off := i % typ.Size_
1601 if off < typ.PtrBytes {
1602 j := off / goarch.PtrSize
1603 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
1609 throw("userArenaHeapBitsSetType: pointer entry not correct")
1613 if _, addr := h.next(); addr != 0 {
1614 throw("userArenaHeapBitsSetType: extra pointer")