1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build !goexperiment.allocheaders
7 // Garbage collector: type and heap bitmaps.
9 // Stack, data, and bss bitmaps
11 // Stack frames and global variables in the data and bss sections are
12 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
13 // means the word is a live pointer to be visited by the GC (referred to
14 // as "pointer"). A "0" bit means the word should be ignored by GC
15 // (referred to as "scalar", though it could be a dead pointer value).
19 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
20 // recording whether a pointer is stored in that word or not. This bitmap
21 // is stored in the heapArena metadata backing each heap arena.
22 // That is, if ha is the heapArena for the arena starting at "start",
23 // then ha.bitmap[0] holds the 64 bits for the 64 words "start"
24 // through start+63*ptrSize, ha.bitmap[1] holds the entries for
25 // start+64*ptrSize through start+127*ptrSize, and so on.
26 // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
27 // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
28 // (For 32-bit platforms, s/64/32/.)
30 // We also keep a noMorePtrs bitmap which allows us to stop scanning
31 // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
32 // is 1, then the object containing the last word described by ha.bitmap[8*i+j]
33 // has no more pointers beyond those described by ha.bitmap[8*i+j].
34 // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
35 // beyond must all be zero until the start of the next object.
37 // The bitmap for noscan spans is set to all zero at span allocation time.
39 // The bitmap for unallocated objects in scannable spans is not maintained
46 "runtime/internal/sys"
50 // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
51 type heapArenaPtrScalar struct {
52 // bitmap stores the pointer/scalar bitmap for the words in
53 // this arena. See mbitmap.go for a description.
54 // This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit).
55 bitmap [heapArenaBitmapWords]uintptr
57 // If the ith bit of noMorePtrs is true, then there are no more
58 // pointers for the object containing the word described by the
59 // high bit of bitmap[i].
60 // In that case, bitmap[i+1], ... must be zero until the start
61 // of the next object.
62 // We never operate on these entries using bit-parallel techniques,
63 // so it is ok if they are small. Also, they can't be bigger than
64 // uint16 because at that size a single noMorePtrs entry
65 // represents 8K of memory, the minimum size of a span. Any larger
66 // and we'd have to worry about concurrent updates.
67 // This array uses 1 bit per word of bitmap, or .024% of the heap size (for 64-bit).
68 noMorePtrs [heapArenaBitmapWords / 8]uint8
71 // heapBits provides access to the bitmap bits for a single heap word.
72 // The methods on heapBits take value receivers so that the compiler
73 // can more easily inline calls to those methods and registerize the
74 // struct fields independently.
75 type heapBits struct {
76 // heapBits will report on pointers in the range [addr,addr+size).
77 // The low bit of mask contains the pointerness of the word at addr
78 // (assuming valid>0).
81 // The next few pointer bits representing words starting at addr.
82 // Those bits already returned by next() are zeroed.
84 // Number of bits in mask that are valid. mask is always less than 1<<valid.
88 // heapBitsForAddr returns the heapBits for the address addr.
89 // The caller must ensure [addr,addr+size) is in an allocated span.
90 // In particular, be careful not to point past the end of an object.
92 // nosplit because it is used during write barriers and must not be preempted.
95 func heapBitsForAddr(addr, size uintptr) heapBits {
97 ai := arenaIndex(addr)
98 ha := mheap_.arenas[ai.l1()][ai.l2()]
100 // Word index in arena.
101 word := addr / goarch.PtrSize % heapArenaWords
103 // Word index and bit offset in bitmap array.
104 idx := word / ptrBits
105 off := word % ptrBits
107 // Grab relevant bits of bitmap.
108 mask := ha.bitmap[idx] >> off
109 valid := ptrBits - off
111 // Process depending on where the object ends.
112 nptr := size / goarch.PtrSize
114 // Bits for this object end before the end of this bitmap word.
115 // Squash bits for the following objects.
116 mask &= 1<<(nptr&(ptrBits-1)) - 1
118 } else if nptr == valid {
119 // Bits for this object end at exactly the end of this bitmap word.
122 // Bits for this object extend into the next bitmap word. See if there
123 // may be any pointers recorded there.
124 if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
125 // No more pointers in this object after this bitmap word.
126 // Update size so we know not to look there.
127 size = valid * goarch.PtrSize
131 return heapBits{addr: addr, size: size, mask: mask, valid: valid}
134 // Returns the (absolute) address of the next known pointer and
135 // a heapBits iterator representing any remaining pointers.
136 // If there are no more pointers, returns address 0.
137 // Note that next does not modify h. The caller must record the result.
139 // nosplit because it is used during write barriers and must not be preempted.
142 func (h heapBits) next() (heapBits, uintptr) {
146 if goarch.PtrSize == 8 {
147 i = sys.TrailingZeros64(uint64(h.mask))
149 i = sys.TrailingZeros32(uint32(h.mask))
151 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
152 return h, h.addr + uintptr(i)*goarch.PtrSize
155 // Skip words that we've already processed.
156 h.addr += h.valid * goarch.PtrSize
157 h.size -= h.valid * goarch.PtrSize
159 return h, 0 // no more pointers
162 // Grab more bits and try again.
163 h = heapBitsForAddr(h.addr, h.size)
167 // nextFast is like next, but can return 0 even when there are more pointers
168 // to be found. Callers should call next if nextFast returns 0 as its second
171 // if addr, h = h.nextFast(); addr == 0 {
172 // if addr, h = h.next(); addr == 0 {
173 // ... no more pointers ...
176 // ... process pointer at addr ...
178 // nextFast is designed to be inlineable.
181 func (h heapBits) nextFast() (heapBits, uintptr) {
188 if goarch.PtrSize == 8 {
189 i = sys.TrailingZeros64(uint64(h.mask))
191 i = sys.TrailingZeros32(uint32(h.mask))
194 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
196 return h, h.addr + uintptr(i)*goarch.PtrSize
199 // bulkBarrierPreWrite executes a write barrier
200 // for every pointer slot in the memory range [src, src+size),
201 // using pointer/scalar information from [dst, dst+size).
202 // This executes the write barriers necessary before a memmove.
203 // src, dst, and size must be pointer-aligned.
204 // The range [dst, dst+size) must lie within a single object.
205 // It does not perform the actual writes.
207 // As a special case, src == 0 indicates that this is being used for a
208 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
211 // Callers should call bulkBarrierPreWrite immediately before
212 // calling memmove(dst, src, size). This function is marked nosplit
213 // to avoid being preempted; the GC must not stop the goroutine
214 // between the memmove and the execution of the barriers.
215 // The caller is also responsible for cgo pointer checks if this
216 // may be writing Go pointers into non-Go memory.
218 // The pointer bitmap is not maintained for allocations containing
219 // no pointers at all; any caller of bulkBarrierPreWrite must first
220 // make sure the underlying allocation contains pointers, usually
221 // by checking typ.PtrBytes.
223 // Callers must perform cgo checks if goexperiment.CgoCheck2.
226 func bulkBarrierPreWrite(dst, src, size uintptr) {
227 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
228 throw("bulkBarrierPreWrite: unaligned arguments")
230 if !writeBarrier.enabled {
233 if s := spanOf(dst); s == nil {
234 // If dst is a global, use the data or BSS bitmaps to
235 // execute write barriers.
236 for _, datap := range activeModules() {
237 if datap.data <= dst && dst < datap.edata {
238 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
242 for _, datap := range activeModules() {
243 if datap.bss <= dst && dst < datap.ebss {
244 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
249 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
250 // dst was heap memory at some point, but isn't now.
251 // It can't be a global. It must be either our stack,
252 // or in the case of direct channel sends, it could be
253 // another stack. Either way, no need for barriers.
254 // This will also catch if dst is in a freed span,
255 // though that should never have.
259 buf := &getg().m.p.ptr().wbBuf
260 h := heapBitsForAddr(dst, size)
264 if h, addr = h.next(); addr == 0 {
267 dstx := (*uintptr)(unsafe.Pointer(addr))
274 if h, addr = h.next(); addr == 0 {
277 dstx := (*uintptr)(unsafe.Pointer(addr))
278 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
286 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
287 // does not execute write barriers for [dst, dst+size).
289 // In addition to the requirements of bulkBarrierPreWrite
290 // callers need to ensure [dst, dst+size) is zeroed.
292 // This is used for special cases where e.g. dst was just
293 // created and zeroed with malloc.
296 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
297 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
298 throw("bulkBarrierPreWrite: unaligned arguments")
300 if !writeBarrier.enabled {
303 buf := &getg().m.p.ptr().wbBuf
304 h := heapBitsForAddr(dst, size)
307 if h, addr = h.next(); addr == 0 {
310 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
316 // initHeapBits initializes the heap bitmap for a span.
317 // If this is a span of single pointer allocations, it initializes all
318 // words to pointer. If force is true, clears all bits.
319 func (s *mspan) initHeapBits(forceClear bool) {
320 if forceClear || s.spanclass.noscan() {
321 // Set all the pointer bits to zero. We do this once
322 // when the span is allocated so we don't have to do it
323 // for each object allocation.
325 size := s.npages * pageSize
326 h := writeHeapBitsForAddr(base)
330 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
332 return // nothing to do
334 h := writeHeapBitsForAddr(s.base())
335 size := s.npages * pageSize
336 nptrs := size / goarch.PtrSize
337 for i := uintptr(0); i < nptrs; i += ptrBits {
338 h = h.write(^uintptr(0), ptrBits)
340 h.flush(s.base(), size)
343 type writeHeapBits struct {
344 addr uintptr // address that the low bit of mask represents the pointer state of.
345 mask uintptr // some pointer bits starting at the address addr.
346 valid uintptr // number of bits in buf that are valid (including low)
347 low uintptr // number of low-order bits to not overwrite
350 func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
351 // We start writing bits maybe in the middle of a heap bitmap word.
352 // Remember how many bits into the word we started, so we can be sure
353 // not to overwrite the previous bits.
354 h.low = addr / goarch.PtrSize % ptrBits
356 // round down to heap word that starts the bitmap word.
357 h.addr = addr - h.low*goarch.PtrSize
359 // We don't have any bits yet.
366 // write appends the pointerness of the next valid pointer slots
367 // using the low valid bits of bits. 1=pointer, 0=scalar.
368 func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
369 if h.valid+valid <= ptrBits {
370 // Fast path - just accumulate the bits.
371 h.mask |= bits << h.valid
375 // Too many bits to fit in this word. Write the current word
376 // out and move on to the next word.
378 data := h.mask | bits<<h.valid // mask for this word
379 h.mask = bits >> (ptrBits - h.valid) // leftover for next word
380 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
382 // Flush mask to the memory bitmap.
383 // TODO: figure out how to cache arena lookup.
384 ai := arenaIndex(h.addr)
385 ha := mheap_.arenas[ai.l1()][ai.l2()]
386 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
387 m := uintptr(1)<<h.low - 1
388 ha.bitmap[idx] = ha.bitmap[idx]&m | data
389 // Note: no synchronization required for this write because
390 // the allocator has exclusive access to the page, and the bitmap
391 // entries are all for a single page. Also, visibility of these
392 // writes is guaranteed by the publication barrier in mallocgc.
394 // Clear noMorePtrs bit, since we're going to be writing bits
395 // into the following word.
396 ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
397 // Note: same as above
399 // Move to next word of bitmap.
400 h.addr += ptrBits * goarch.PtrSize
405 // Add padding of size bytes.
406 func (h writeHeapBits) pad(size uintptr) writeHeapBits {
410 words := size / goarch.PtrSize
411 for words > ptrBits {
412 h = h.write(0, ptrBits)
415 return h.write(0, words)
418 // Flush the bits that have been written, and add zeros as needed
419 // to cover the full object [addr, addr+size).
420 func (h writeHeapBits) flush(addr, size uintptr) {
421 // zeros counts the number of bits needed to represent the object minus the
422 // number of bits we've already written. This is the number of 0 bits
423 // that need to be added.
424 zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
426 // Add zero bits up to the bitmap word boundary
428 z := ptrBits - h.valid
436 // Find word in bitmap that we're going to write.
437 ai := arenaIndex(h.addr)
438 ha := mheap_.arenas[ai.l1()][ai.l2()]
439 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
441 // Write remaining bits.
442 if h.valid != h.low {
443 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
444 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
445 ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
451 // Record in the noMorePtrs map that there won't be any more 1 bits,
452 // so readers can stop early.
453 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
455 // Advance to next bitmap word.
456 h.addr += ptrBits * goarch.PtrSize
458 // Continue on writing zeros for the rest of the object.
459 // For standard use of the ptr bits this is not required, as
460 // the bits are read from the beginning of the object. Some uses,
461 // like noscan spans, oblets, bulk write barriers, and cgocheck, might
462 // start mid-object, so these writes are still required.
465 ai := arenaIndex(h.addr)
466 ha := mheap_.arenas[ai.l1()][ai.l2()]
467 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
469 ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
471 } else if zeros == ptrBits {
478 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
479 h.addr += ptrBits * goarch.PtrSize
483 // heapBitsSetType records that the new allocation [x, x+size)
484 // holds in [x, x+dataSize) one or more values of type typ.
485 // (The number of values is given by dataSize / typ.Size.)
486 // If dataSize < size, the fragment [x+dataSize, x+size) is
487 // recorded as non-pointer data.
488 // It is known that the type has pointers somewhere;
489 // malloc does not call heapBitsSetType when there are no pointers,
490 // because all free objects are marked as noscan during
491 // heapBitsSweepSpan.
493 // There can only be one allocation from a given span active at a time,
494 // and the bitmap for a span always falls on word boundaries,
495 // so there are no write-write races for access to the heap bitmap.
496 // Hence, heapBitsSetType can access the bitmap without atomics.
498 // There can be read-write races between heapBitsSetType and things
499 // that read the heap bitmap like scanobject. However, since
500 // heapBitsSetType is only used for objects that have not yet been
501 // made reachable, readers will ignore bits being modified by this
502 // function. This does mean this function cannot transiently modify
503 // bits that belong to neighboring objects. Also, on weakly-ordered
504 // machines, callers must execute a store/store (publication) barrier
505 // between calling this function and making the object reachable.
506 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
507 const doubleCheck = false // slow but helpful; enable to test modifications to this code
509 if doubleCheck && dataSize%typ.Size_ != 0 {
510 throw("heapBitsSetType: dataSize not a multiple of typ.Size")
513 if goarch.PtrSize == 8 && size == goarch.PtrSize {
514 // It's one word and it has pointers, it must be a pointer.
515 // Since all allocated one-word objects are pointers
516 // (non-pointers are aggregated into tinySize allocations),
517 // (*mspan).initHeapBits sets the pointer bits for us.
518 // Nothing to do here.
520 h, addr := heapBitsForAddr(x, size).next()
522 throw("heapBitsSetType: pointer bit missing")
526 throw("heapBitsSetType: second pointer bit found")
532 h := writeHeapBitsForAddr(x)
534 // Handle GC program.
535 if typ.Kind_&kindGCProg != 0 {
536 // Expand the gc program into the storage we're going to use for the actual object.
537 obj := (*uint8)(unsafe.Pointer(x))
538 n := runGCProg(addb(typ.GCData, 4), obj)
539 // Use the expanded program to set the heap bits.
540 for i := uintptr(0); true; i += typ.Size_ {
541 // Copy expanded program to heap bitmap.
545 h = h.write(uintptr(*p), 8)
549 h = h.write(uintptr(*p), j)
551 if i+typ.Size_ == dataSize {
552 break // no padding after last element
555 // Pad with zeros to the start of the next element.
556 h = h.pad(typ.Size_ - n*goarch.PtrSize)
561 // Erase the expanded GC program.
562 memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
568 // typ.Size is the number of words in the object,
569 // and typ.PtrBytes is the number of words in the prefix
570 // of the object that contains pointers. That is, the final
571 // typ.Size - typ.PtrBytes words contain no pointers.
572 // This allows optimization of a common pattern where
573 // an object has a small header followed by a large scalar
574 // buffer. If we know the pointers are over, we don't have
575 // to scan the buffer's heap bitmap at all.
576 // The 1-bit ptrmasks are sized to contain only bits for
577 // the typ.PtrBytes prefix, zero padded out to a full byte
578 // of bitmap. If there is more room in the allocated object,
579 // that space is pointerless. The noMorePtrs bitmap will prevent
580 // scanning large pointerless tails of an object.
582 // Replicated copies are not as nice: if there is an array of
583 // objects with scalar tails, all but the last tail does have to
584 // be initialized, because there is no way to say "skip forward".
586 ptrs := typ.PtrBytes / goarch.PtrSize
587 if typ.Size_ == dataSize { // Single element
588 if ptrs <= ptrBits { // Single small element
589 m := readUintptr(typ.GCData)
591 } else { // Single large element
594 h = h.write(readUintptr(p), ptrBits)
595 p = addb(p, ptrBits/8)
604 } else { // Repeated element
605 words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
606 if words <= ptrBits { // Repeated small element
607 n := dataSize / typ.Size_
608 m := readUintptr(typ.GCData)
609 // Make larger unit to repeat
610 for words <= ptrBits/2 {
612 h = h.write(m, words)
623 h = h.write(m, words)
627 } else { // Repeated large element
628 for i := uintptr(0); true; i += typ.Size_ {
632 h = h.write(readUintptr(p), ptrBits)
633 p = addb(p, ptrBits/8)
638 if i+typ.Size_ == dataSize {
639 break // don't need the trailing nonptr bits on the last element.
641 // Pad with zeros to the start of the next element.
642 h = h.pad(typ.Size_ - typ.PtrBytes)
649 h := heapBitsForAddr(x, size)
650 for i := uintptr(0); i < size; i += goarch.PtrSize {
651 // Compute the pointer bit we want at offset i.
655 if off < typ.PtrBytes {
656 j := off / goarch.PtrSize
657 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
664 throw("heapBitsSetType: pointer entry not correct")
668 if _, addr := h.next(); addr != 0 {
669 throw("heapBitsSetType: extra pointer")
676 // Returns GC type info for the pointer stored in ep for testing.
677 // If ep points to the stack, only static live information will be returned
678 // (i.e. not for objects which are only dynamically live stack objects).
679 func getgcmask(ep any) (mask []byte) {
684 for _, datap := range activeModules() {
686 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
687 bitmap := datap.gcdatamask.bytedata
688 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
689 mask = make([]byte, n/goarch.PtrSize)
690 for i := uintptr(0); i < n; i += goarch.PtrSize {
691 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
692 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
698 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
699 bitmap := datap.gcbssmask.bytedata
700 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
701 mask = make([]byte, n/goarch.PtrSize)
702 for i := uintptr(0); i < n; i += goarch.PtrSize {
703 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
704 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
711 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
712 if s.spanclass.noscan() {
716 hbits := heapBitsForAddr(base, n)
717 mask = make([]byte, n/goarch.PtrSize)
720 if hbits, addr = hbits.next(); addr == 0 {
723 mask[(addr-base)/goarch.PtrSize] = 1
725 // Callers expect this mask to end at the last pointer.
726 for len(mask) > 0 && mask[len(mask)-1] == 0 {
727 mask = mask[:len(mask)-1]
733 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
736 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
737 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
743 locals, _, _ := u.frame.getStackMap(false)
747 size := uintptr(locals.n) * goarch.PtrSize
748 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
749 mask = make([]byte, n/goarch.PtrSize)
750 for i := uintptr(0); i < n; i += goarch.PtrSize {
751 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
752 mask[i/goarch.PtrSize] = locals.ptrbit(off)
758 // otherwise, not something the GC knows about.
759 // possibly read-only data, like malloc(0).
760 // must not have pointers
764 // userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for
765 // non-slice-backing-store Go values allocated in a user arena chunk. It
766 // sets up the heap bitmap for the value with type typ allocated at address ptr.
767 // base is the base address of the arena chunk.
768 func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) {
769 h := writeHeapBitsForAddr(uintptr(ptr))
771 // Our last allocation might have ended right at a noMorePtrs mark,
772 // which we would not have erased. We need to erase that mark here,
773 // because we're going to start adding new heap bitmap bits.
774 // We only need to clear one mark, because below we make sure to
775 // pad out the bits with zeroes and only write one noMorePtrs bit
776 // for each new object.
777 // (This is only necessary at noMorePtrs boundaries, as noMorePtrs
778 // marks within an object allocated with newAt will be erased by
779 // the normal writeHeapBitsForAddr mechanism.)
781 // Note that we skip this if this is the first allocation in the
782 // arena because there's definitely no previous noMorePtrs mark
783 // (in fact, we *must* do this, because we're going to try to back
784 // up a pointer to fix this up).
785 if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base {
786 // Back up one pointer and rewrite that pointer. That will
787 // cause the writeHeapBits implementation to clear the
788 // noMorePtrs bit we need to clear.
789 r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize)
792 if p == uintptr(ptr)-goarch.PtrSize {
795 h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize)
799 p := typ.GCData // start of 1-bit pointer mask (or GC program)
800 var gcProgBits uintptr
801 if typ.Kind_&kindGCProg != 0 {
802 // Expand gc program, using the object itself for storage.
803 gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
806 nb := typ.PtrBytes / goarch.PtrSize
808 for i := uintptr(0); i < nb; i += ptrBits {
813 h = h.write(readUintptr(addb(p, i/8)), k)
815 // Note: we call pad here to ensure we emit explicit 0 bits
816 // for the pointerless tail of the object. This ensures that
817 // there's only a single noMorePtrs mark for the next object
818 // to clear. We don't need to do this to clear stale noMorePtrs
819 // markers from previous uses because arena chunk pointer bitmaps
820 // are always fully cleared when reused.
821 h = h.pad(typ.Size_ - typ.PtrBytes)
822 h.flush(uintptr(ptr), typ.Size_)
824 if typ.Kind_&kindGCProg != 0 {
825 // Zero out temporary ptrmask buffer inside object.
826 memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
829 // Double-check that the bitmap was written out correctly.
831 // Derived from heapBitsSetType.
832 const doubleCheck = false
836 h := heapBitsForAddr(x, size)
837 for i := uintptr(0); i < size; i += goarch.PtrSize {
838 // Compute the pointer bit we want at offset i.
841 if off < typ.PtrBytes {
842 j := off / goarch.PtrSize
843 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
849 throw("userArenaHeapBitsSetType: pointer entry not correct")
853 if _, addr := h.next(); addr != 0 {
854 throw("userArenaHeapBitsSetType: extra pointer")