1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build !goexperiment.allocheaders
7 // Garbage collector: type and heap bitmaps.
9 // Stack, data, and bss bitmaps
11 // Stack frames and global variables in the data and bss sections are
12 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
13 // means the word is a live pointer to be visited by the GC (referred to
14 // as "pointer"). A "0" bit means the word should be ignored by GC
15 // (referred to as "scalar", though it could be a dead pointer value).
19 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
20 // recording whether a pointer is stored in that word or not. This bitmap
21 // is stored in the heapArena metadata backing each heap arena.
22 // That is, if ha is the heapArena for the arena starting at "start",
23 // then ha.bitmap[0] holds the 64 bits for the 64 words "start"
24 // through start+63*ptrSize, ha.bitmap[1] holds the entries for
25 // start+64*ptrSize through start+127*ptrSize, and so on.
26 // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
27 // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
28 // (For 32-bit platforms, s/64/32/.)
30 // We also keep a noMorePtrs bitmap which allows us to stop scanning
31 // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
32 // is 1, then the object containing the last word described by ha.bitmap[8*i+j]
33 // has no more pointers beyond those described by ha.bitmap[8*i+j].
34 // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
35 // beyond must all be zero until the start of the next object.
37 // The bitmap for noscan spans is set to all zero at span allocation time.
39 // The bitmap for unallocated objects in scannable spans is not maintained
46 "runtime/internal/sys"
51 // For compatibility with the allocheaders GOEXPERIMENT.
53 minSizeForMallocHeader = ^uintptr(0)
56 // For compatibility with the allocheaders GOEXPERIMENT.
59 func heapBitsInSpan(_ uintptr) bool {
63 // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
64 type heapArenaPtrScalar struct {
65 // bitmap stores the pointer/scalar bitmap for the words in
66 // this arena. See mbitmap.go for a description.
67 // This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit).
68 bitmap [heapArenaBitmapWords]uintptr
70 // If the ith bit of noMorePtrs is true, then there are no more
71 // pointers for the object containing the word described by the
72 // high bit of bitmap[i].
73 // In that case, bitmap[i+1], ... must be zero until the start
74 // of the next object.
75 // We never operate on these entries using bit-parallel techniques,
76 // so it is ok if they are small. Also, they can't be bigger than
77 // uint16 because at that size a single noMorePtrs entry
78 // represents 8K of memory, the minimum size of a span. Any larger
79 // and we'd have to worry about concurrent updates.
80 // This array uses 1 bit per word of bitmap, or .024% of the heap size (for 64-bit).
81 noMorePtrs [heapArenaBitmapWords / 8]uint8
84 // heapBits provides access to the bitmap bits for a single heap word.
85 // The methods on heapBits take value receivers so that the compiler
86 // can more easily inline calls to those methods and registerize the
87 // struct fields independently.
88 type heapBits struct {
89 // heapBits will report on pointers in the range [addr,addr+size).
90 // The low bit of mask contains the pointerness of the word at addr
91 // (assuming valid>0).
94 // The next few pointer bits representing words starting at addr.
95 // Those bits already returned by next() are zeroed.
97 // Number of bits in mask that are valid. mask is always less than 1<<valid.
101 // heapBitsForAddr returns the heapBits for the address addr.
102 // The caller must ensure [addr,addr+size) is in an allocated span.
103 // In particular, be careful not to point past the end of an object.
105 // nosplit because it is used during write barriers and must not be preempted.
108 func heapBitsForAddr(addr, size uintptr) heapBits {
110 ai := arenaIndex(addr)
111 ha := mheap_.arenas[ai.l1()][ai.l2()]
113 // Word index in arena.
114 word := addr / goarch.PtrSize % heapArenaWords
116 // Word index and bit offset in bitmap array.
117 idx := word / ptrBits
118 off := word % ptrBits
120 // Grab relevant bits of bitmap.
121 mask := ha.bitmap[idx] >> off
122 valid := ptrBits - off
124 // Process depending on where the object ends.
125 nptr := size / goarch.PtrSize
127 // Bits for this object end before the end of this bitmap word.
128 // Squash bits for the following objects.
129 mask &= 1<<(nptr&(ptrBits-1)) - 1
131 } else if nptr == valid {
132 // Bits for this object end at exactly the end of this bitmap word.
135 // Bits for this object extend into the next bitmap word. See if there
136 // may be any pointers recorded there.
137 if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
138 // No more pointers in this object after this bitmap word.
139 // Update size so we know not to look there.
140 size = valid * goarch.PtrSize
144 return heapBits{addr: addr, size: size, mask: mask, valid: valid}
147 // Returns the (absolute) address of the next known pointer and
148 // a heapBits iterator representing any remaining pointers.
149 // If there are no more pointers, returns address 0.
150 // Note that next does not modify h. The caller must record the result.
152 // nosplit because it is used during write barriers and must not be preempted.
155 func (h heapBits) next() (heapBits, uintptr) {
159 if goarch.PtrSize == 8 {
160 i = sys.TrailingZeros64(uint64(h.mask))
162 i = sys.TrailingZeros32(uint32(h.mask))
164 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
165 return h, h.addr + uintptr(i)*goarch.PtrSize
168 // Skip words that we've already processed.
169 h.addr += h.valid * goarch.PtrSize
170 h.size -= h.valid * goarch.PtrSize
172 return h, 0 // no more pointers
175 // Grab more bits and try again.
176 h = heapBitsForAddr(h.addr, h.size)
180 // nextFast is like next, but can return 0 even when there are more pointers
181 // to be found. Callers should call next if nextFast returns 0 as its second
184 // if addr, h = h.nextFast(); addr == 0 {
185 // if addr, h = h.next(); addr == 0 {
186 // ... no more pointers ...
189 // ... process pointer at addr ...
191 // nextFast is designed to be inlineable.
194 func (h heapBits) nextFast() (heapBits, uintptr) {
201 if goarch.PtrSize == 8 {
202 i = sys.TrailingZeros64(uint64(h.mask))
204 i = sys.TrailingZeros32(uint32(h.mask))
207 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
209 return h, h.addr + uintptr(i)*goarch.PtrSize
212 // bulkBarrierPreWrite executes a write barrier
213 // for every pointer slot in the memory range [src, src+size),
214 // using pointer/scalar information from [dst, dst+size).
215 // This executes the write barriers necessary before a memmove.
216 // src, dst, and size must be pointer-aligned.
217 // The range [dst, dst+size) must lie within a single object.
218 // It does not perform the actual writes.
220 // As a special case, src == 0 indicates that this is being used for a
221 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
224 // Callers should call bulkBarrierPreWrite immediately before
225 // calling memmove(dst, src, size). This function is marked nosplit
226 // to avoid being preempted; the GC must not stop the goroutine
227 // between the memmove and the execution of the barriers.
228 // The caller is also responsible for cgo pointer checks if this
229 // may be writing Go pointers into non-Go memory.
231 // The pointer bitmap is not maintained for allocations containing
232 // no pointers at all; any caller of bulkBarrierPreWrite must first
233 // make sure the underlying allocation contains pointers, usually
234 // by checking typ.PtrBytes.
236 // Callers must perform cgo checks if goexperiment.CgoCheck2.
239 func bulkBarrierPreWrite(dst, src, size uintptr) {
240 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
241 throw("bulkBarrierPreWrite: unaligned arguments")
243 if !writeBarrier.enabled {
246 if s := spanOf(dst); s == nil {
247 // If dst is a global, use the data or BSS bitmaps to
248 // execute write barriers.
249 for _, datap := range activeModules() {
250 if datap.data <= dst && dst < datap.edata {
251 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
255 for _, datap := range activeModules() {
256 if datap.bss <= dst && dst < datap.ebss {
257 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
262 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
263 // dst was heap memory at some point, but isn't now.
264 // It can't be a global. It must be either our stack,
265 // or in the case of direct channel sends, it could be
266 // another stack. Either way, no need for barriers.
267 // This will also catch if dst is in a freed span,
268 // though that should never have.
272 buf := &getg().m.p.ptr().wbBuf
273 h := heapBitsForAddr(dst, size)
277 if h, addr = h.next(); addr == 0 {
280 dstx := (*uintptr)(unsafe.Pointer(addr))
287 if h, addr = h.next(); addr == 0 {
290 dstx := (*uintptr)(unsafe.Pointer(addr))
291 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
299 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
300 // does not execute write barriers for [dst, dst+size).
302 // In addition to the requirements of bulkBarrierPreWrite
303 // callers need to ensure [dst, dst+size) is zeroed.
305 // This is used for special cases where e.g. dst was just
306 // created and zeroed with malloc.
309 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
310 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
311 throw("bulkBarrierPreWrite: unaligned arguments")
313 if !writeBarrier.enabled {
316 buf := &getg().m.p.ptr().wbBuf
317 h := heapBitsForAddr(dst, size)
320 if h, addr = h.next(); addr == 0 {
323 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
329 // initHeapBits initializes the heap bitmap for a span.
330 // If this is a span of single pointer allocations, it initializes all
331 // words to pointer. If force is true, clears all bits.
332 func (s *mspan) initHeapBits(forceClear bool) {
333 if forceClear || s.spanclass.noscan() {
334 // Set all the pointer bits to zero. We do this once
335 // when the span is allocated so we don't have to do it
336 // for each object allocation.
338 size := s.npages * pageSize
339 h := writeHeapBitsForAddr(base)
343 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
345 return // nothing to do
347 h := writeHeapBitsForAddr(s.base())
348 size := s.npages * pageSize
349 nptrs := size / goarch.PtrSize
350 for i := uintptr(0); i < nptrs; i += ptrBits {
351 h = h.write(^uintptr(0), ptrBits)
353 h.flush(s.base(), size)
356 type writeHeapBits struct {
357 addr uintptr // address that the low bit of mask represents the pointer state of.
358 mask uintptr // some pointer bits starting at the address addr.
359 valid uintptr // number of bits in buf that are valid (including low)
360 low uintptr // number of low-order bits to not overwrite
363 func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
364 // We start writing bits maybe in the middle of a heap bitmap word.
365 // Remember how many bits into the word we started, so we can be sure
366 // not to overwrite the previous bits.
367 h.low = addr / goarch.PtrSize % ptrBits
369 // round down to heap word that starts the bitmap word.
370 h.addr = addr - h.low*goarch.PtrSize
372 // We don't have any bits yet.
379 // write appends the pointerness of the next valid pointer slots
380 // using the low valid bits of bits. 1=pointer, 0=scalar.
381 func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
382 if h.valid+valid <= ptrBits {
383 // Fast path - just accumulate the bits.
384 h.mask |= bits << h.valid
388 // Too many bits to fit in this word. Write the current word
389 // out and move on to the next word.
391 data := h.mask | bits<<h.valid // mask for this word
392 h.mask = bits >> (ptrBits - h.valid) // leftover for next word
393 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
395 // Flush mask to the memory bitmap.
396 // TODO: figure out how to cache arena lookup.
397 ai := arenaIndex(h.addr)
398 ha := mheap_.arenas[ai.l1()][ai.l2()]
399 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
400 m := uintptr(1)<<h.low - 1
401 ha.bitmap[idx] = ha.bitmap[idx]&m | data
402 // Note: no synchronization required for this write because
403 // the allocator has exclusive access to the page, and the bitmap
404 // entries are all for a single page. Also, visibility of these
405 // writes is guaranteed by the publication barrier in mallocgc.
407 // Clear noMorePtrs bit, since we're going to be writing bits
408 // into the following word.
409 ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
410 // Note: same as above
412 // Move to next word of bitmap.
413 h.addr += ptrBits * goarch.PtrSize
418 // Add padding of size bytes.
419 func (h writeHeapBits) pad(size uintptr) writeHeapBits {
423 words := size / goarch.PtrSize
424 for words > ptrBits {
425 h = h.write(0, ptrBits)
428 return h.write(0, words)
431 // Flush the bits that have been written, and add zeros as needed
432 // to cover the full object [addr, addr+size).
433 func (h writeHeapBits) flush(addr, size uintptr) {
434 // zeros counts the number of bits needed to represent the object minus the
435 // number of bits we've already written. This is the number of 0 bits
436 // that need to be added.
437 zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
439 // Add zero bits up to the bitmap word boundary
441 z := ptrBits - h.valid
449 // Find word in bitmap that we're going to write.
450 ai := arenaIndex(h.addr)
451 ha := mheap_.arenas[ai.l1()][ai.l2()]
452 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
454 // Write remaining bits.
455 if h.valid != h.low {
456 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
457 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
458 ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
464 // Record in the noMorePtrs map that there won't be any more 1 bits,
465 // so readers can stop early.
466 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
468 // Advance to next bitmap word.
469 h.addr += ptrBits * goarch.PtrSize
471 // Continue on writing zeros for the rest of the object.
472 // For standard use of the ptr bits this is not required, as
473 // the bits are read from the beginning of the object. Some uses,
474 // like noscan spans, oblets, bulk write barriers, and cgocheck, might
475 // start mid-object, so these writes are still required.
478 ai := arenaIndex(h.addr)
479 ha := mheap_.arenas[ai.l1()][ai.l2()]
480 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
482 ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
484 } else if zeros == ptrBits {
491 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
492 h.addr += ptrBits * goarch.PtrSize
496 // heapBitsSetType records that the new allocation [x, x+size)
497 // holds in [x, x+dataSize) one or more values of type typ.
498 // (The number of values is given by dataSize / typ.Size.)
499 // If dataSize < size, the fragment [x+dataSize, x+size) is
500 // recorded as non-pointer data.
501 // It is known that the type has pointers somewhere;
502 // malloc does not call heapBitsSetType when there are no pointers,
503 // because all free objects are marked as noscan during
504 // heapBitsSweepSpan.
506 // There can only be one allocation from a given span active at a time,
507 // and the bitmap for a span always falls on word boundaries,
508 // so there are no write-write races for access to the heap bitmap.
509 // Hence, heapBitsSetType can access the bitmap without atomics.
511 // There can be read-write races between heapBitsSetType and things
512 // that read the heap bitmap like scanobject. However, since
513 // heapBitsSetType is only used for objects that have not yet been
514 // made reachable, readers will ignore bits being modified by this
515 // function. This does mean this function cannot transiently modify
516 // bits that belong to neighboring objects. Also, on weakly-ordered
517 // machines, callers must execute a store/store (publication) barrier
518 // between calling this function and making the object reachable.
519 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
520 const doubleCheck = false // slow but helpful; enable to test modifications to this code
522 if doubleCheck && dataSize%typ.Size_ != 0 {
523 throw("heapBitsSetType: dataSize not a multiple of typ.Size")
526 if goarch.PtrSize == 8 && size == goarch.PtrSize {
527 // It's one word and it has pointers, it must be a pointer.
528 // Since all allocated one-word objects are pointers
529 // (non-pointers are aggregated into tinySize allocations),
530 // (*mspan).initHeapBits sets the pointer bits for us.
531 // Nothing to do here.
533 h, addr := heapBitsForAddr(x, size).next()
535 throw("heapBitsSetType: pointer bit missing")
539 throw("heapBitsSetType: second pointer bit found")
545 h := writeHeapBitsForAddr(x)
547 // Handle GC program.
548 if typ.Kind_&kindGCProg != 0 {
549 // Expand the gc program into the storage we're going to use for the actual object.
550 obj := (*uint8)(unsafe.Pointer(x))
551 n := runGCProg(addb(typ.GCData, 4), obj)
552 // Use the expanded program to set the heap bits.
553 for i := uintptr(0); true; i += typ.Size_ {
554 // Copy expanded program to heap bitmap.
558 h = h.write(uintptr(*p), 8)
562 h = h.write(uintptr(*p), j)
564 if i+typ.Size_ == dataSize {
565 break // no padding after last element
568 // Pad with zeros to the start of the next element.
569 h = h.pad(typ.Size_ - n*goarch.PtrSize)
574 // Erase the expanded GC program.
575 memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
581 // typ.Size is the number of words in the object,
582 // and typ.PtrBytes is the number of words in the prefix
583 // of the object that contains pointers. That is, the final
584 // typ.Size - typ.PtrBytes words contain no pointers.
585 // This allows optimization of a common pattern where
586 // an object has a small header followed by a large scalar
587 // buffer. If we know the pointers are over, we don't have
588 // to scan the buffer's heap bitmap at all.
589 // The 1-bit ptrmasks are sized to contain only bits for
590 // the typ.PtrBytes prefix, zero padded out to a full byte
591 // of bitmap. If there is more room in the allocated object,
592 // that space is pointerless. The noMorePtrs bitmap will prevent
593 // scanning large pointerless tails of an object.
595 // Replicated copies are not as nice: if there is an array of
596 // objects with scalar tails, all but the last tail does have to
597 // be initialized, because there is no way to say "skip forward".
599 ptrs := typ.PtrBytes / goarch.PtrSize
600 if typ.Size_ == dataSize { // Single element
601 if ptrs <= ptrBits { // Single small element
602 m := readUintptr(typ.GCData)
604 } else { // Single large element
607 h = h.write(readUintptr(p), ptrBits)
608 p = addb(p, ptrBits/8)
617 } else { // Repeated element
618 words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
619 if words <= ptrBits { // Repeated small element
620 n := dataSize / typ.Size_
621 m := readUintptr(typ.GCData)
622 // Make larger unit to repeat
623 for words <= ptrBits/2 {
625 h = h.write(m, words)
636 h = h.write(m, words)
640 } else { // Repeated large element
641 for i := uintptr(0); true; i += typ.Size_ {
645 h = h.write(readUintptr(p), ptrBits)
646 p = addb(p, ptrBits/8)
651 if i+typ.Size_ == dataSize {
652 break // don't need the trailing nonptr bits on the last element.
654 // Pad with zeros to the start of the next element.
655 h = h.pad(typ.Size_ - typ.PtrBytes)
662 h := heapBitsForAddr(x, size)
663 for i := uintptr(0); i < size; i += goarch.PtrSize {
664 // Compute the pointer bit we want at offset i.
668 if off < typ.PtrBytes {
669 j := off / goarch.PtrSize
670 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
677 throw("heapBitsSetType: pointer entry not correct")
681 if _, addr := h.next(); addr != 0 {
682 throw("heapBitsSetType: extra pointer")
687 // For goexperiment.AllocHeaders
688 func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
694 // Returns GC type info for the pointer stored in ep for testing.
695 // If ep points to the stack, only static live information will be returned
696 // (i.e. not for objects which are only dynamically live stack objects).
697 func getgcmask(ep any) (mask []byte) {
702 for _, datap := range activeModules() {
704 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
705 bitmap := datap.gcdatamask.bytedata
706 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
707 mask = make([]byte, n/goarch.PtrSize)
708 for i := uintptr(0); i < n; i += goarch.PtrSize {
709 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
710 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
716 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
717 bitmap := datap.gcbssmask.bytedata
718 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
719 mask = make([]byte, n/goarch.PtrSize)
720 for i := uintptr(0); i < n; i += goarch.PtrSize {
721 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
722 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
729 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
730 if s.spanclass.noscan() {
734 hbits := heapBitsForAddr(base, n)
735 mask = make([]byte, n/goarch.PtrSize)
738 if hbits, addr = hbits.next(); addr == 0 {
741 mask[(addr-base)/goarch.PtrSize] = 1
743 // Callers expect this mask to end at the last pointer.
744 for len(mask) > 0 && mask[len(mask)-1] == 0 {
745 mask = mask[:len(mask)-1]
751 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
754 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
755 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
761 locals, _, _ := u.frame.getStackMap(false)
765 size := uintptr(locals.n) * goarch.PtrSize
766 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
767 mask = make([]byte, n/goarch.PtrSize)
768 for i := uintptr(0); i < n; i += goarch.PtrSize {
769 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
770 mask[i/goarch.PtrSize] = locals.ptrbit(off)
776 // otherwise, not something the GC knows about.
777 // possibly read-only data, like malloc(0).
778 // must not have pointers
782 // userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for
783 // non-slice-backing-store Go values allocated in a user arena chunk. It
784 // sets up the heap bitmap for the value with type typ allocated at address ptr.
785 // base is the base address of the arena chunk.
786 func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
788 h := writeHeapBitsForAddr(uintptr(ptr))
790 // Our last allocation might have ended right at a noMorePtrs mark,
791 // which we would not have erased. We need to erase that mark here,
792 // because we're going to start adding new heap bitmap bits.
793 // We only need to clear one mark, because below we make sure to
794 // pad out the bits with zeroes and only write one noMorePtrs bit
795 // for each new object.
796 // (This is only necessary at noMorePtrs boundaries, as noMorePtrs
797 // marks within an object allocated with newAt will be erased by
798 // the normal writeHeapBitsForAddr mechanism.)
800 // Note that we skip this if this is the first allocation in the
801 // arena because there's definitely no previous noMorePtrs mark
802 // (in fact, we *must* do this, because we're going to try to back
803 // up a pointer to fix this up).
804 if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base {
805 // Back up one pointer and rewrite that pointer. That will
806 // cause the writeHeapBits implementation to clear the
807 // noMorePtrs bit we need to clear.
808 r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize)
811 if p == uintptr(ptr)-goarch.PtrSize {
814 h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize)
818 p := typ.GCData // start of 1-bit pointer mask (or GC program)
819 var gcProgBits uintptr
820 if typ.Kind_&kindGCProg != 0 {
821 // Expand gc program, using the object itself for storage.
822 gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
825 nb := typ.PtrBytes / goarch.PtrSize
827 for i := uintptr(0); i < nb; i += ptrBits {
832 h = h.write(readUintptr(addb(p, i/8)), k)
834 // Note: we call pad here to ensure we emit explicit 0 bits
835 // for the pointerless tail of the object. This ensures that
836 // there's only a single noMorePtrs mark for the next object
837 // to clear. We don't need to do this to clear stale noMorePtrs
838 // markers from previous uses because arena chunk pointer bitmaps
839 // are always fully cleared when reused.
840 h = h.pad(typ.Size_ - typ.PtrBytes)
841 h.flush(uintptr(ptr), typ.Size_)
843 if typ.Kind_&kindGCProg != 0 {
844 // Zero out temporary ptrmask buffer inside object.
845 memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
848 // Double-check that the bitmap was written out correctly.
850 // Derived from heapBitsSetType.
851 const doubleCheck = false
855 h := heapBitsForAddr(x, size)
856 for i := uintptr(0); i < size; i += goarch.PtrSize {
857 // Compute the pointer bit we want at offset i.
860 if off < typ.PtrBytes {
861 j := off / goarch.PtrSize
862 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
868 throw("userArenaHeapBitsSetType: pointer entry not correct")
872 if _, addr := h.next(); addr != 0 {
873 throw("userArenaHeapBitsSetType: extra pointer")
878 // For goexperiment.AllocHeaders.
879 type typePointers struct {
883 // For goexperiment.AllocHeaders.
886 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
887 panic("not implemented")
890 // For goexperiment.AllocHeaders.
893 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
894 panic("not implemented")
897 // For goexperiment.AllocHeaders.
900 func (tp typePointers) nextFast() (typePointers, uintptr) {
901 panic("not implemented")
904 // For goexperiment.AllocHeaders.
907 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
908 panic("not implemented")
911 // For goexperiment.AllocHeaders.
914 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
915 panic("not implemented")
918 // For goexperiment.AllocHeaders, to pass TestIntendedInlining.
919 func (s *mspan) writeUserArenaHeapBits() {
920 panic("not implemented")
923 // For goexperiment.AllocHeaders, to pass TestIntendedInlining.
924 func heapBitsSlice() {
925 panic("not implemented")