1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build goexperiment.allocheaders
7 // Garbage collector: type and heap bitmaps.
9 // Stack, data, and bss bitmaps
11 // Stack frames and global variables in the data and bss sections are
12 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
13 // means the word is a live pointer to be visited by the GC (referred to
14 // as "pointer"). A "0" bit means the word should be ignored by GC
15 // (referred to as "scalar", though it could be a dead pointer value).
19 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
20 // recording whether a pointer is stored in that word or not. This bitmap
21 // is stored at the end of a span for small objects and is unrolled at
22 // runtime from type metadata for all larger objects. Objects without
23 // pointers have neither a bitmap nor associated type metadata.
25 // Bits in all cases correspond to words in little-endian order.
27 // For small objects, if s is the mspan for the span starting at "start",
28 // then s.heapBits() returns a slice containing the bitmap for the whole span.
29 // That is, s.heapBits()[0] holds the goarch.PtrSize*8 bits for the first
30 // goarch.PtrSize*8 words from "start" through "start+63*ptrSize" in the span.
31 // On a related note, small objects are always small enough that their bitmap
32 // fits in goarch.PtrSize*8 bits, so writing out bitmap data takes two bitmap
33 // writes at most (because object boundaries don't generally lie on
34 // s.heapBits()[i] boundaries).
36 // For larger objects, if t is the type for the object starting at "start",
37 // within some span whose mspan is s, then the bitmap at t.GCData is "tiled"
38 // from "start" through "start+s.elemsize".
39 // Specifically, the first bit of t.GCData corresponds to the word at "start",
40 // the second to the word after "start", and so on up to t.PtrBytes. At t.PtrBytes,
41 // we skip to "start+t.Size_" and begin again from there. This process is
42 // repeated until we hit "start+s.elemsize".
43 // This tiling algorithm supports array data, since the type always refers to
44 // the element type of the array. Single objects are considered the same as
45 // single-element arrays.
46 // The tiling algorithm may scan data past the end of the compiler-recognized
47 // object, but any unused data within the allocation slot (i.e. within s.elemsize)
48 // is zeroed, so the GC just observes nil pointers.
49 // Note that this "tiled" bitmap isn't stored anywhere; it is generated on-the-fly.
51 // For objects without their own span, the type metadata is stored in the last
52 // word of the allocation slot. For objects with their own span, the type metadata
53 // is stored in the mspan.
55 // The bitmap for small unallocated objects in scannable spans is not maintained
62 "runtime/internal/sys"
67 // A malloc header is functionally a single type pointer, but
68 // we need to use 8 here to ensure 8-byte alignment of allocations
69 // on 32-bit platforms. It's wasteful, but a lot of code relies on
70 // 8-byte alignment for 8-byte atomics.
73 // The minimum object size that has a malloc header, exclusive.
75 // The size of this value controls overheads from the malloc header.
76 // The minimum size is bound by writeHeapBitsSmall, which assumes that the
77 // pointer bitmap for objects of a size smaller than this doesn't cross
78 // more than one pointer-word boundary. This sets an upper-bound on this
79 // value at the number of bits in a uintptr, multiplied by the pointer
82 // We choose a value here that has a natural cutover point in terms of memory
83 // overheads. This value just happens to be the maximum possible value this
86 // A span with heap bits in it will have 128 bytes of heap bits on 64-bit
87 // platforms, and 256 bytes of heap bits on 32-bit platforms. The first size
88 // class where malloc headers match this overhead for 64-bit platforms is
89 // 512 bytes (8 KiB / 512 bytes * 8 bytes-per-header = 128 bytes of overhead).
90 // On 32-bit platforms, this same point is the 256 byte size class
91 // (8 KiB / 256 bytes * 8 bytes-per-header = 256 bytes of overhead).
93 // Guaranteed to be exactly at a size class boundary. The reason this value is
94 // an exclusive minimum is subtle. Suppose we're allocating a 504-byte object
95 // and its rounded up to 512 bytes for the size class. If minSizeForMallocHeader
96 // is 512 and an inclusive minimum, then a comparison against minSizeForMallocHeader
97 // by the two values would produce different results. In other words, the comparison
98 // would not be invariant to size-class rounding. Eschewing this property means a
99 // more complex check or possibly storing additional state to determine whether a
100 // span has malloc headers.
101 minSizeForMallocHeader = goarch.PtrSize * ptrBits
104 // heapBitsInSpan returns true if the size of an object implies its ptr/scalar
105 // data is stored at the end of the span, and is accessible via span.heapBits.
107 // Note: this works for both rounded-up sizes (span.elemsize) and unrounded
108 // type sizes because minSizeForMallocHeader is guaranteed to be at a size
112 func heapBitsInSpan(userSize uintptr) bool {
113 // N.B. minSizeForMallocHeader is an exclusive minimum so that this function is
114 // invariant under size-class rounding on its input.
115 return userSize <= minSizeForMallocHeader
118 // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
119 type heapArenaPtrScalar struct {
120 // N.B. This is no longer necessary with allocation headers.
123 // typePointers is an iterator over the pointers in a heap object.
125 // Iteration through this type implements the tiling algorithm described at the
127 type typePointers struct {
128 // elem is the address of the current array element of type typ being iterated over.
129 // Objects that are not arrays are treated as single-element arrays, in which case
130 // this value does not change.
133 // addr is the address the iterator is currently working from and describes
134 // the address of the first word referenced by mask.
137 // mask is a bitmask where each bit corresponds to pointer-words after addr.
138 // Bit 0 is the pointer-word at addr, Bit 1 is the next word, and so on.
139 // If a bit is 1, then there is a pointer at that word.
140 // nextFast and next mask out bits in this mask as their pointers are processed.
143 // typ is a pointer to the type information for the heap object's type.
144 // This may be nil if the object is in a span where heapBitsInSpan(span.elemsize) is true.
148 // typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size).
150 // addr and addr+size must be in the range [span.base(), span.limit).
152 // Note: addr+size must be passed as the limit argument to the iterator's next method on
153 // each iteration. This slightly awkward API is to allow typePointers to be destructured
156 // nosplit because it is used during write barriers and must not be preempted.
159 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
160 base := span.objBase(addr)
161 tp := span.typePointersOfUnchecked(base)
162 if base == addr && size == span.elemsize {
165 return tp.fastForward(addr-tp.addr, addr+size)
168 // typePointersOfUnchecked is like typePointersOf, but assumes addr is the base
169 // pointer of an object in span. It returns an iterator that generates all pointers
170 // in the range [addr, addr+span.elemsize).
172 // nosplit because it is used during write barriers and must not be preempted.
175 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
176 const doubleCheck = false
177 if doubleCheck && span.objBase(addr) != addr {
178 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
179 throw("typePointersOfUnchecked consisting of non-base-address for object")
182 spc := span.spanclass
184 return typePointers{}
186 if heapBitsInSpan(span.elemsize) {
187 // Handle header-less objects.
188 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
191 // All of these objects have a header.
193 if spc.sizeclass() != 0 {
194 // Pull the allocation header from the last word of the object.
195 typ = *(**_type)(unsafe.Pointer(addr + span.elemsize - mallocHeaderSize))
200 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
203 // nextFast is the fast path of next. nextFast is written to be inlineable and,
204 // as the name implies, fast.
206 // Callers that are performance-critical should iterate using the following
211 // if tp, addr = tp.nextFast(); addr == 0 {
212 // if tp, addr = tp.next(limit); addr == 0 {
220 // nosplit because it is used during write barriers and must not be preempted.
223 func (tp typePointers) nextFast() (typePointers, uintptr) {
230 if goarch.PtrSize == 8 {
231 i = sys.TrailingZeros64(uint64(tp.mask))
233 i = sys.TrailingZeros32(uint32(tp.mask))
236 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
238 return tp, tp.addr + uintptr(i)*goarch.PtrSize
241 // next advances the pointers iterator, returning the updated iterator and
242 // the address of the next pointer.
244 // limit must be the same each time it is passed to next.
246 // nosplit because it is used during write barriers and must not be preempted.
249 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
255 // Stop if we don't actually have type information.
257 return typePointers{}, 0
260 // Advance to the next element if necessary.
261 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
262 tp.elem += tp.typ.Size_
265 tp.addr += ptrBits * goarch.PtrSize
268 // Check if we've exceeded the limit with the last update.
269 if tp.addr >= limit {
270 return typePointers{}, 0
273 // Grab more bits and try again.
274 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
275 if tp.addr+goarch.PtrSize*ptrBits > limit {
276 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
277 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
282 // fastForward moves the iterator forward by n bytes. n must be a multiple
283 // of goarch.PtrSize. limit must be the same limit passed to next for this
286 // nosplit because it is used during write barriers and must not be preempted.
289 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
290 // Basic bounds check.
291 target := tp.addr + n
293 return typePointers{}
296 // Handle small objects.
297 // Clear any bits before the target address.
298 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
299 // Clear any bits past the limit.
300 if tp.addr+goarch.PtrSize*ptrBits > limit {
301 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
302 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
307 // Move up elem and addr.
308 // Offsets within an element are always at a ptrBits*goarch.PtrSize boundary.
309 if n >= tp.typ.Size_ {
310 // elem needs to be moved to the element containing
313 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
314 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
316 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
319 if tp.addr-tp.elem >= tp.typ.PtrBytes {
320 // We're starting in the non-pointer area of an array.
321 // Move up to the next element.
322 tp.elem += tp.typ.Size_
324 tp.mask = readUintptr(tp.typ.GCData)
326 // We may have exceeded the limit after this. Bail just like next does.
327 if tp.addr >= limit {
328 return typePointers{}
331 // Grab the mask, but then clear any bits before the target address and any
332 // bits over the limit.
333 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
334 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
336 if tp.addr+goarch.PtrSize*ptrBits > limit {
337 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
338 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
343 // objBase returns the base pointer for the object containing addr in span.
345 // Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
348 func (span *mspan) objBase(addr uintptr) uintptr {
349 return span.base() + span.objIndex(addr)*span.elemsize
352 // bulkBarrierPreWrite executes a write barrier
353 // for every pointer slot in the memory range [src, src+size),
354 // using pointer/scalar information from [dst, dst+size).
355 // This executes the write barriers necessary before a memmove.
356 // src, dst, and size must be pointer-aligned.
357 // The range [dst, dst+size) must lie within a single object.
358 // It does not perform the actual writes.
360 // As a special case, src == 0 indicates that this is being used for a
361 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
364 // Callers should call bulkBarrierPreWrite immediately before
365 // calling memmove(dst, src, size). This function is marked nosplit
366 // to avoid being preempted; the GC must not stop the goroutine
367 // between the memmove and the execution of the barriers.
368 // The caller is also responsible for cgo pointer checks if this
369 // may be writing Go pointers into non-Go memory.
371 // The pointer bitmap is not maintained for allocations containing
372 // no pointers at all; any caller of bulkBarrierPreWrite must first
373 // make sure the underlying allocation contains pointers, usually
374 // by checking typ.PtrBytes.
376 // Callers must perform cgo checks if goexperiment.CgoCheck2.
379 func bulkBarrierPreWrite(dst, src, size uintptr) {
380 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
381 throw("bulkBarrierPreWrite: unaligned arguments")
383 if !writeBarrier.enabled {
388 // If dst is a global, use the data or BSS bitmaps to
389 // execute write barriers.
390 for _, datap := range activeModules() {
391 if datap.data <= dst && dst < datap.edata {
392 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
396 for _, datap := range activeModules() {
397 if datap.bss <= dst && dst < datap.ebss {
398 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
403 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
404 // dst was heap memory at some point, but isn't now.
405 // It can't be a global. It must be either our stack,
406 // or in the case of direct channel sends, it could be
407 // another stack. Either way, no need for barriers.
408 // This will also catch if dst is in a freed span,
409 // though that should never have.
412 buf := &getg().m.p.ptr().wbBuf
414 tp := s.typePointersOf(dst, size)
418 if tp, addr = tp.next(dst + size); addr == 0 {
421 dstx := (*uintptr)(unsafe.Pointer(addr))
428 if tp, addr = tp.next(dst + size); addr == 0 {
431 dstx := (*uintptr)(unsafe.Pointer(addr))
432 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
440 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
441 // does not execute write barriers for [dst, dst+size).
443 // In addition to the requirements of bulkBarrierPreWrite
444 // callers need to ensure [dst, dst+size) is zeroed.
446 // This is used for special cases where e.g. dst was just
447 // created and zeroed with malloc.
450 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
451 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
452 throw("bulkBarrierPreWrite: unaligned arguments")
454 if !writeBarrier.enabled {
457 buf := &getg().m.p.ptr().wbBuf
458 tp := spanOf(dst).typePointersOf(dst, size)
461 if tp, addr = tp.next(dst + size); addr == 0 {
464 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
470 // initHeapBits initializes the heap bitmap for a span.
472 // TODO(mknyszek): This should set the heap bits for single pointer
473 // allocations eagerly to avoid calling heapSetType at allocation time,
474 // just to write one bit.
475 func (s *mspan) initHeapBits(forceClear bool) {
476 if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
484 // bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms,
485 // and leaves it alone elsewhere.
486 func bswapIfBigEndian(x uintptr) uintptr {
487 if goarch.BigEndian {
488 if goarch.PtrSize == 8 {
489 return uintptr(sys.Bswap64(uint64(x)))
491 return uintptr(sys.Bswap32(uint32(x)))
496 type writeUserArenaHeapBits struct {
497 offset uintptr // offset in span that the low bit of mask represents the pointer state of.
498 mask uintptr // some pointer bits starting at the address addr.
499 valid uintptr // number of bits in buf that are valid (including low)
500 low uintptr // number of low-order bits to not overwrite
503 func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits) {
504 offset := addr - s.base()
506 // We start writing bits maybe in the middle of a heap bitmap word.
507 // Remember how many bits into the word we started, so we can be sure
508 // not to overwrite the previous bits.
509 h.low = offset / goarch.PtrSize % ptrBits
511 // round down to heap word that starts the bitmap word.
512 h.offset = offset - h.low*goarch.PtrSize
514 // We don't have any bits yet.
521 // write appends the pointerness of the next valid pointer slots
522 // using the low valid bits of bits. 1=pointer, 0=scalar.
523 func (h writeUserArenaHeapBits) write(s *mspan, bits, valid uintptr) writeUserArenaHeapBits {
524 if h.valid+valid <= ptrBits {
525 // Fast path - just accumulate the bits.
526 h.mask |= bits << h.valid
530 // Too many bits to fit in this word. Write the current word
531 // out and move on to the next word.
533 data := h.mask | bits<<h.valid // mask for this word
534 h.mask = bits >> (ptrBits - h.valid) // leftover for next word
535 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
537 // Flush mask to the memory bitmap.
538 idx := h.offset / (ptrBits * goarch.PtrSize)
539 m := uintptr(1)<<h.low - 1
540 bitmap := s.heapBits()
541 bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | data)
542 // Note: no synchronization required for this write because
543 // the allocator has exclusive access to the page, and the bitmap
544 // entries are all for a single page. Also, visibility of these
545 // writes is guaranteed by the publication barrier in mallocgc.
547 // Move to next word of bitmap.
548 h.offset += ptrBits * goarch.PtrSize
553 // Add padding of size bytes.
554 func (h writeUserArenaHeapBits) pad(s *mspan, size uintptr) writeUserArenaHeapBits {
558 words := size / goarch.PtrSize
559 for words > ptrBits {
560 h = h.write(s, 0, ptrBits)
563 return h.write(s, 0, words)
566 // Flush the bits that have been written, and add zeros as needed
567 // to cover the full object [addr, addr+size).
568 func (h writeUserArenaHeapBits) flush(s *mspan, addr, size uintptr) {
569 offset := addr - s.base()
571 // zeros counts the number of bits needed to represent the object minus the
572 // number of bits we've already written. This is the number of 0 bits
573 // that need to be added.
574 zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
576 // Add zero bits up to the bitmap word boundary
578 z := ptrBits - h.valid
586 // Find word in bitmap that we're going to write.
587 bitmap := s.heapBits()
588 idx := h.offset / (ptrBits * goarch.PtrSize)
590 // Write remaining bits.
591 if h.valid != h.low {
592 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
593 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
594 bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | h.mask)
600 // Advance to next bitmap word.
601 h.offset += ptrBits * goarch.PtrSize
603 // Continue on writing zeros for the rest of the object.
604 // For standard use of the ptr bits this is not required, as
605 // the bits are read from the beginning of the object. Some uses,
606 // like noscan spans, oblets, bulk write barriers, and cgocheck, might
607 // start mid-object, so these writes are still required.
610 idx := h.offset / (ptrBits * goarch.PtrSize)
612 bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx]) &^ (uintptr(1)<<zeros - 1))
614 } else if zeros == ptrBits {
621 h.offset += ptrBits * goarch.PtrSize
625 // heapBits returns the heap ptr/scalar bits stored at the end of the span for
626 // small object spans and heap arena spans.
628 // Note that the uintptr of each element means something different for small object
629 // spans and for heap arena spans. Small object spans are easy: they're never interpreted
630 // as anything but uintptr, so they're immune to differences in endianness. However, the
631 // heapBits for user arena spans is exposed through a dummy type descriptor, so the byte
632 // ordering needs to match the same byte ordering the compiler would emit. The compiler always
633 // emits the bitmap data in little endian byte ordering, so on big endian platforms these
634 // uintptrs will have their byte orders swapped from what they normally would be.
636 // heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true.
639 func (span *mspan) heapBits() []uintptr {
640 const doubleCheck = false
642 if doubleCheck && !span.isUserArenaChunk {
643 if span.spanclass.noscan() {
644 throw("heapBits called for noscan")
646 if span.elemsize > minSizeForMallocHeader {
647 throw("heapBits called for span class that should have a malloc header")
650 // Find the bitmap at the end of the span.
652 // Nearly every span with heap bits is exactly one page in size. Arenas are the only exception.
653 if span.npages == 1 {
654 // This will be inlined and constant-folded down.
655 return heapBitsSlice(span.base(), pageSize)
657 return heapBitsSlice(span.base(), span.npages*pageSize)
660 // Helper for constructing a slice for the span's heap bits.
663 func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
664 bitmapSize := spanSize / goarch.PtrSize / 8
665 elems := int(bitmapSize / goarch.PtrSize)
666 var sl notInHeapSlice
667 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
668 return *(*[]uintptr)(unsafe.Pointer(&sl))
671 // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits.
673 // addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize)
677 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
678 spanSize := span.npages * pageSize
679 bitmapSize := spanSize / goarch.PtrSize / 8
680 hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
682 // These objects are always small enough that their bitmaps
683 // fit in a single word, so just load the word or two we need.
685 // Mirrors mspan.writeHeapBitsSmall.
687 // We should be using heapBits(), but unfortunately it introduces
688 // both bounds checks panics and throw which causes us to exceed
689 // the nosplit limit in quite a few cases.
690 i := (addr - span.base()) / goarch.PtrSize / ptrBits
691 j := (addr - span.base()) / goarch.PtrSize % ptrBits
692 bits := span.elemsize / goarch.PtrSize
693 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
694 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
697 if j+bits > ptrBits {
700 bits1 := bits - bits0
702 read |= (*word1 & ((1 << bits1) - 1)) << bits0
705 read = (*word0 >> j) & ((1 << bits) - 1)
710 // writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is
711 // stored as a bitmap at the end of the span.
713 // Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
714 // heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
717 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
718 // The objects here are always really small, so a single load is sufficient.
719 src0 := readUintptr(typ.GCData)
721 // Create repetitions of the bitmap if we have a small array.
722 bits := span.elemsize / goarch.PtrSize
723 scanSize = typ.PtrBytes
727 src = (1 << (dataSize / goarch.PtrSize)) - 1
729 for i := typ.Size_; i < dataSize; i += typ.Size_ {
730 src |= src0 << (i / goarch.PtrSize)
731 scanSize += typ.Size_
735 // Since we're never writing more than one uintptr's worth of bits, we're either going
736 // to do one or two writes.
737 dst := span.heapBits()
738 o := (x - span.base()) / goarch.PtrSize
741 if j+bits > ptrBits {
744 bits1 := bits - bits0
745 dst[i+0] = dst[i+0]&(^uintptr(0)>>bits0) | (src << j)
746 dst[i+1] = dst[i+1]&^((1<<bits1)-1) | (src >> bits0)
749 dst[i] = (dst[i] &^ (((1 << bits) - 1) << j)) | (src << j)
752 const doubleCheck = false
754 srcRead := span.heapBitsSmallForAddr(x)
756 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
757 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
758 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
759 throw("bad pointer bits written for small object")
765 // For !goexperiment.AllocHeaders.
766 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
769 // heapSetType records that the new allocation [x, x+size)
770 // holds in [x, x+dataSize) one or more values of type typ.
771 // (The number of values is given by dataSize / typ.Size.)
772 // If dataSize < size, the fragment [x+dataSize, x+size) is
773 // recorded as non-pointer data.
774 // It is known that the type has pointers somewhere;
775 // malloc does not call heapSetType when there are no pointers.
777 // There can be read-write races between heapSetType and things
778 // that read the heap metadata like scanobject. However, since
779 // heapSetType is only used for objects that have not yet been
780 // made reachable, readers will ignore bits being modified by this
781 // function. This does mean this function cannot transiently modify
782 // shared memory that belongs to neighboring objects. Also, on weakly-ordered
783 // machines, callers must execute a store/store (publication) barrier
784 // between calling this function and making the object reachable.
785 func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
786 const doubleCheck = false
790 if doubleCheck && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
791 throw("tried to write heap bits, but no heap bits in span")
793 // Handle the case where we have no malloc header.
794 scanSize = span.writeHeapBitsSmall(x, dataSize, typ)
796 if typ.Kind_&kindGCProg != 0 {
797 // Allocate space to unroll the gcprog. This space will consist of
798 // a dummy _type value and the unrolled gcprog. The dummy _type will
799 // refer to the bitmap, and the mspan will refer to the dummy _type.
800 if span.spanclass.sizeclass() != 0 {
801 throw("GCProg for type that isn't large")
803 spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
804 heapBitsOff := spaceNeeded
805 spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
806 npages := alignUp(spaceNeeded, pageSize) / pageSize
809 progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits)
810 memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
812 // Write a dummy _type in the new space.
814 // We only need to write size, PtrBytes, and GCData, since that's all
815 // the GC cares about.
816 gctyp = (*_type)(unsafe.Pointer(progSpan.base()))
817 gctyp.Kind_ |= kindGCProg
818 gctyp.Size_ = typ.Size_
819 gctyp.PtrBytes = typ.PtrBytes
820 gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff))
822 // Expand the GC program into space reserved at the end of the object.
823 runGCProg(addb(typ.GCData, 4), gctyp.GCData)
826 // Write out the header.
828 scanSize = span.elemsize
832 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
834 // To exercise the less common path more often, generate
835 // a random interior pointer and make sure iterating from
836 // that point works correctly too.
837 maxIterBytes := span.elemsize
839 maxIterBytes = dataSize
841 off := alignUp(uintptr(fastrand())%dataSize, goarch.PtrSize)
842 size := dataSize - off
844 off -= goarch.PtrSize
845 size += goarch.PtrSize
848 size -= alignDown(uintptr(fastrand())%size, goarch.PtrSize)
850 size = goarch.PtrSize
852 // Round up the type to the size of the type.
853 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
854 if interior+size > x+maxIterBytes {
855 size = x + maxIterBytes - interior
857 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
862 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
863 // Check that scanning the full object works.
864 tp := span.typePointersOfUnchecked(span.objBase(x))
865 maxIterBytes := span.elemsize
867 maxIterBytes = dataSize
870 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
871 // Compute the pointer bit we want at offset i.
873 if i < span.elemsize {
875 if off < typ.PtrBytes {
876 j := off / goarch.PtrSize
877 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
882 tp, addr = tp.next(x + span.elemsize)
884 println("runtime: found bad iterator")
887 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
894 tp, addr = tp.next(x + span.elemsize)
898 println("runtime: extra pointer:", hex(addr))
900 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&kindGCProg != 0, "\n")
901 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
902 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
903 print("runtime: limit=", hex(x+span.elemsize), "\n")
904 tp = span.typePointersOfUnchecked(x)
908 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
909 println("runtime: would've stopped here")
913 print("runtime: addr=", hex(addr), "\n")
916 throw("heapSetType: pointer entry not correct")
919 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
922 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
923 throw("found bad interior pointer")
926 tp := span.typePointersOf(interior, size)
927 for i := off; i < off+size; i += goarch.PtrSize {
928 // Compute the pointer bit we want at offset i.
930 if i < span.elemsize {
932 if off < typ.PtrBytes {
933 j := off / goarch.PtrSize
934 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
939 tp, addr = tp.next(interior + size)
941 println("runtime: found bad iterator")
945 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
952 tp, addr = tp.next(interior + size)
956 println("runtime: extra pointer:", hex(addr))
958 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
959 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
960 print("runtime: limit=", hex(interior+size), "\n")
961 tp = span.typePointersOf(interior, size)
965 if tp, addr = tp.next(interior + size); addr == 0 {
966 println("runtime: would've stopped here")
970 print("runtime: addr=", hex(addr), "\n")
974 print("runtime: want: ")
975 for i := off; i < off+size; i += goarch.PtrSize {
976 // Compute the pointer bit we want at offset i.
980 if off < typ.PtrBytes {
981 j := off / goarch.PtrSize
982 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
993 throw("heapSetType: pointer entry not correct")
996 func dumpTypePointers(tp typePointers) {
997 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
998 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
999 for i := uintptr(0); i < ptrBits; i++ {
1000 if tp.mask&(uintptr(1)<<i) != 0 {
1011 // Returns GC type info for the pointer stored in ep for testing.
1012 // If ep points to the stack, only static live information will be returned
1013 // (i.e. not for objects which are only dynamically live stack objects).
1014 func getgcmask(ep any) (mask []byte) {
1019 for _, datap := range activeModules() {
1021 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1022 bitmap := datap.gcdatamask.bytedata
1023 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1024 mask = make([]byte, n/goarch.PtrSize)
1025 for i := uintptr(0); i < n; i += goarch.PtrSize {
1026 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1027 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1033 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1034 bitmap := datap.gcbssmask.bytedata
1035 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1036 mask = make([]byte, n/goarch.PtrSize)
1037 for i := uintptr(0); i < n; i += goarch.PtrSize {
1038 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1039 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1046 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1047 if s.spanclass.noscan() {
1050 limit := base + s.elemsize
1052 // Move the base up to the iterator's start, because
1053 // we want to hide evidence of a malloc header from the
1055 tp := s.typePointersOfUnchecked(base)
1058 // Unroll the full bitmap the GC would actually observe.
1059 mask = make([]byte, (limit-base)/goarch.PtrSize)
1062 if tp, addr = tp.next(limit); addr == 0 {
1065 mask[(addr-base)/goarch.PtrSize] = 1
1068 // Double-check that every part of the ptr/scalar we're not
1069 // showing the caller is zeroed. This keeps us honest that
1070 // that information is actually irrelevant.
1071 for i := limit; i < s.elemsize; i++ {
1072 if *(*byte)(unsafe.Pointer(i)) != 0 {
1073 throw("found non-zeroed tail of allocation")
1077 // Callers expect this mask to end at the last pointer.
1078 for len(mask) > 0 && mask[len(mask)-1] == 0 {
1079 mask = mask[:len(mask)-1]
1085 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1088 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1089 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1095 locals, _, _ := u.frame.getStackMap(false)
1099 size := uintptr(locals.n) * goarch.PtrSize
1100 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1101 mask = make([]byte, n/goarch.PtrSize)
1102 for i := uintptr(0); i < n; i += goarch.PtrSize {
1103 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1104 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1110 // otherwise, not something the GC knows about.
1111 // possibly read-only data, like malloc(0).
1112 // must not have pointers
1116 // userArenaHeapBitsSetType is the equivalent of heapSetType but for
1117 // non-slice-backing-store Go values allocated in a user arena chunk. It
1118 // sets up the type metadata for the value with type typ allocated at address ptr.
1119 // base is the base address of the arena chunk.
1120 func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
1122 h := s.writeUserArenaHeapBits(uintptr(ptr))
1124 p := typ.GCData // start of 1-bit pointer mask (or GC program)
1125 var gcProgBits uintptr
1126 if typ.Kind_&kindGCProg != 0 {
1127 // Expand gc program, using the object itself for storage.
1128 gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
1131 nb := typ.PtrBytes / goarch.PtrSize
1133 for i := uintptr(0); i < nb; i += ptrBits {
1138 // N.B. On big endian platforms we byte swap the data that we
1139 // read from GCData, which is always stored in little-endian order
1140 // by the compiler. writeUserArenaHeapBits handles data in
1141 // a platform-ordered way for efficiency, but stores back the
1142 // data in little endian order, since we expose the bitmap through
1144 h = h.write(s, readUintptr(addb(p, i/8)), k)
1146 // Note: we call pad here to ensure we emit explicit 0 bits
1147 // for the pointerless tail of the object. This ensures that
1148 // there's only a single noMorePtrs mark for the next object
1149 // to clear. We don't need to do this to clear stale noMorePtrs
1150 // markers from previous uses because arena chunk pointer bitmaps
1151 // are always fully cleared when reused.
1152 h = h.pad(s, typ.Size_-typ.PtrBytes)
1153 h.flush(s, uintptr(ptr), typ.Size_)
1155 if typ.Kind_&kindGCProg != 0 {
1156 // Zero out temporary ptrmask buffer inside object.
1157 memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
1160 // Update the PtrBytes value in the type information. After this
1161 // point, the GC will observe the new bitmap.
1162 s.largeType.PtrBytes = uintptr(ptr) - base + typ.PtrBytes
1164 // Double-check that the bitmap was written out correctly.
1165 const doubleCheck = false
1167 doubleCheckHeapPointersInterior(uintptr(ptr), uintptr(ptr), typ.Size_, typ.Size_, typ, &s.largeType, s)
1171 // For !goexperiment.AllocHeaders, to pass TestIntendedInlining.
1172 func writeHeapBitsForAddr() {
1173 panic("not implemented")
1176 // For !goexperiment.AllocHeaders.
1177 type heapBits struct {
1180 // For !goexperiment.AllocHeaders.
1183 func heapBitsForAddr(addr, size uintptr) heapBits {
1184 panic("not implemented")
1187 // For !goexperiment.AllocHeaders.
1190 func (h heapBits) next() (heapBits, uintptr) {
1191 panic("not implemented")
1194 // For !goexperiment.AllocHeaders.
1197 func (h heapBits) nextFast() (heapBits, uintptr) {
1198 panic("not implemented")