1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 //go:build goexperiment.allocheaders
7 // Garbage collector: type and heap bitmaps.
9 // Stack, data, and bss bitmaps
11 // Stack frames and global variables in the data and bss sections are
12 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
13 // means the word is a live pointer to be visited by the GC (referred to
14 // as "pointer"). A "0" bit means the word should be ignored by GC
15 // (referred to as "scalar", though it could be a dead pointer value).
19 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
20 // recording whether a pointer is stored in that word or not. This bitmap
21 // is stored at the end of a span for small objects and is unrolled at
22 // runtime from type metadata for all larger objects. Objects without
23 // pointers have neither a bitmap nor associated type metadata.
25 // Bits in all cases correspond to words in little-endian order.
27 // For small objects, if s is the mspan for the span starting at "start",
28 // then s.heapBits() returns a slice containing the bitmap for the whole span.
29 // That is, s.heapBits()[0] holds the goarch.PtrSize*8 bits for the first
30 // goarch.PtrSize*8 words from "start" through "start+63*ptrSize" in the span.
31 // On a related note, small objects are always small enough that their bitmap
32 // fits in goarch.PtrSize*8 bits, so writing out bitmap data takes two bitmap
33 // writes at most (because object boundaries don't generally lie on
34 // s.heapBits()[i] boundaries).
36 // For larger objects, if t is the type for the object starting at "start",
37 // within some span whose mspan is s, then the bitmap at t.GCData is "tiled"
38 // from "start" through "start+s.elemsize".
39 // Specifically, the first bit of t.GCData corresponds to the word at "start",
40 // the second to the word after "start", and so on up to t.PtrBytes. At t.PtrBytes,
41 // we skip to "start+t.Size_" and begin again from there. This process is
42 // repeated until we hit "start+s.elemsize".
43 // This tiling algorithm supports array data, since the type always refers to
44 // the element type of the array. Single objects are considered the same as
45 // single-element arrays.
46 // The tiling algorithm may scan data past the end of the compiler-recognized
47 // object, but any unused data within the allocation slot (i.e. within s.elemsize)
48 // is zeroed, so the GC just observes nil pointers.
49 // Note that this "tiled" bitmap isn't stored anywhere; it is generated on-the-fly.
51 // For objects without their own span, the type metadata is stored in the first
52 // word before the object at the beginning of the allocation slot. For objects
53 // with their own span, the type metadata is stored in the mspan.
55 // The bitmap for small unallocated objects in scannable spans is not maintained
62 "runtime/internal/sys"
67 // A malloc header is functionally a single type pointer, but
68 // we need to use 8 here to ensure 8-byte alignment of allocations
69 // on 32-bit platforms. It's wasteful, but a lot of code relies on
70 // 8-byte alignment for 8-byte atomics.
73 // The minimum object size that has a malloc header, exclusive.
75 // The size of this value controls overheads from the malloc header.
76 // The minimum size is bound by writeHeapBitsSmall, which assumes that the
77 // pointer bitmap for objects of a size smaller than this doesn't cross
78 // more than one pointer-word boundary. This sets an upper-bound on this
79 // value at the number of bits in a uintptr, multiplied by the pointer
82 // We choose a value here that has a natural cutover point in terms of memory
83 // overheads. This value just happens to be the maximum possible value this
86 // A span with heap bits in it will have 128 bytes of heap bits on 64-bit
87 // platforms, and 256 bytes of heap bits on 32-bit platforms. The first size
88 // class where malloc headers match this overhead for 64-bit platforms is
89 // 512 bytes (8 KiB / 512 bytes * 8 bytes-per-header = 128 bytes of overhead).
90 // On 32-bit platforms, this same point is the 256 byte size class
91 // (8 KiB / 256 bytes * 8 bytes-per-header = 256 bytes of overhead).
93 // Guaranteed to be exactly at a size class boundary. The reason this value is
94 // an exclusive minimum is subtle. Suppose we're allocating a 504-byte object
95 // and its rounded up to 512 bytes for the size class. If minSizeForMallocHeader
96 // is 512 and an inclusive minimum, then a comparison against minSizeForMallocHeader
97 // by the two values would produce different results. In other words, the comparison
98 // would not be invariant to size-class rounding. Eschewing this property means a
99 // more complex check or possibly storing additional state to determine whether a
100 // span has malloc headers.
101 minSizeForMallocHeader = goarch.PtrSize * ptrBits
104 // heapBitsInSpan returns true if the size of an object implies its ptr/scalar
105 // data is stored at the end of the span, and is accessible via span.heapBits.
107 // Note: this works for both rounded-up sizes (span.elemsize) and unrounded
108 // type sizes because minSizeForMallocHeader is guaranteed to be at a size
112 func heapBitsInSpan(userSize uintptr) bool {
113 // N.B. minSizeForMallocHeader is an exclusive minimum so that this function is
114 // invariant under size-class rounding on its input.
115 return userSize <= minSizeForMallocHeader
118 // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
119 type heapArenaPtrScalar struct {
120 // N.B. This is no longer necessary with allocation headers.
123 // typePointers is an iterator over the pointers in a heap object.
125 // Iteration through this type implements the tiling algorithm described at the
127 type typePointers struct {
128 // elem is the address of the current array element of type typ being iterated over.
129 // Objects that are not arrays are treated as single-element arrays, in which case
130 // this value does not change.
133 // addr is the address the iterator is currently working from and describes
134 // the address of the first word referenced by mask.
137 // mask is a bitmask where each bit corresponds to pointer-words after addr.
138 // Bit 0 is the pointer-word at addr, Bit 1 is the next word, and so on.
139 // If a bit is 1, then there is a pointer at that word.
140 // nextFast and next mask out bits in this mask as their pointers are processed.
143 // typ is a pointer to the type information for the heap object's type.
144 // This may be nil if the object is in a span where heapBitsInSpan(span.elemsize) is true.
148 // typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size).
150 // addr and addr+size must be in the range [span.base(), span.limit).
152 // Note: addr+size must be passed as the limit argument to the iterator's next method on
153 // each iteration. This slightly awkward API is to allow typePointers to be destructured
156 // nosplit because it is used during write barriers and must not be preempted.
159 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
160 base := span.objBase(addr)
161 tp := span.typePointersOfUnchecked(base)
162 if base == addr && size == span.elemsize {
165 return tp.fastForward(addr-tp.addr, addr+size)
168 // typePointersOfUnchecked is like typePointersOf, but assumes addr is the base
169 // pointer of an object in span. It returns an iterator that generates all pointers
170 // in the range [addr, addr+span.elemsize).
172 // nosplit because it is used during write barriers and must not be preempted.
175 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
176 const doubleCheck = false
177 if doubleCheck && span.objBase(addr) != addr {
178 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
179 throw("typePointersOfUnchecked consisting of non-base-address for object")
182 spc := span.spanclass
184 return typePointers{}
186 if heapBitsInSpan(span.elemsize) {
187 // Handle header-less objects.
188 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
191 // All of these objects have a header.
193 if spc.sizeclass() != 0 {
194 // Pull the allocation header from the first word of the object.
195 typ = *(**_type)(unsafe.Pointer(addr))
196 addr += mallocHeaderSize
201 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
204 // nextFast is the fast path of next. nextFast is written to be inlineable and,
205 // as the name implies, fast.
207 // Callers that are performance-critical should iterate using the following
212 // if tp, addr = tp.nextFast(); addr == 0 {
213 // if tp, addr = tp.next(limit); addr == 0 {
221 // nosplit because it is used during write barriers and must not be preempted.
224 func (tp typePointers) nextFast() (typePointers, uintptr) {
231 if goarch.PtrSize == 8 {
232 i = sys.TrailingZeros64(uint64(tp.mask))
234 i = sys.TrailingZeros32(uint32(tp.mask))
237 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
239 return tp, tp.addr + uintptr(i)*goarch.PtrSize
242 // next advances the pointers iterator, returning the updated iterator and
243 // the address of the next pointer.
245 // limit must be the same each time it is passed to next.
247 // nosplit because it is used during write barriers and must not be preempted.
250 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
256 // Stop if we don't actually have type information.
258 return typePointers{}, 0
261 // Advance to the next element if necessary.
262 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
263 tp.elem += tp.typ.Size_
266 tp.addr += ptrBits * goarch.PtrSize
269 // Check if we've exceeded the limit with the last update.
270 if tp.addr >= limit {
271 return typePointers{}, 0
274 // Grab more bits and try again.
275 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
276 if tp.addr+goarch.PtrSize*ptrBits > limit {
277 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
278 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
283 // fastForward moves the iterator forward by n bytes. n must be a multiple
284 // of goarch.PtrSize. limit must be the same limit passed to next for this
287 // nosplit because it is used during write barriers and must not be preempted.
290 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
291 // Basic bounds check.
292 target := tp.addr + n
294 return typePointers{}
297 // Handle small objects.
298 // Clear any bits before the target address.
299 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
300 // Clear any bits past the limit.
301 if tp.addr+goarch.PtrSize*ptrBits > limit {
302 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
303 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
308 // Move up elem and addr.
309 // Offsets within an element are always at a ptrBits*goarch.PtrSize boundary.
310 if n >= tp.typ.Size_ {
311 // elem needs to be moved to the element containing
314 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
315 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
317 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
320 if tp.addr-tp.elem >= tp.typ.PtrBytes {
321 // We're starting in the non-pointer area of an array.
322 // Move up to the next element.
323 tp.elem += tp.typ.Size_
325 tp.mask = readUintptr(tp.typ.GCData)
327 // We may have exceeded the limit after this. Bail just like next does.
328 if tp.addr >= limit {
329 return typePointers{}
332 // Grab the mask, but then clear any bits before the target address and any
333 // bits over the limit.
334 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
335 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
337 if tp.addr+goarch.PtrSize*ptrBits > limit {
338 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
339 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
344 // objBase returns the base pointer for the object containing addr in span.
346 // Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
349 func (span *mspan) objBase(addr uintptr) uintptr {
350 return span.base() + span.objIndex(addr)*span.elemsize
353 // bulkBarrierPreWrite executes a write barrier
354 // for every pointer slot in the memory range [src, src+size),
355 // using pointer/scalar information from [dst, dst+size).
356 // This executes the write barriers necessary before a memmove.
357 // src, dst, and size must be pointer-aligned.
358 // The range [dst, dst+size) must lie within a single object.
359 // It does not perform the actual writes.
361 // As a special case, src == 0 indicates that this is being used for a
362 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
365 // Callers should call bulkBarrierPreWrite immediately before
366 // calling memmove(dst, src, size). This function is marked nosplit
367 // to avoid being preempted; the GC must not stop the goroutine
368 // between the memmove and the execution of the barriers.
369 // The caller is also responsible for cgo pointer checks if this
370 // may be writing Go pointers into non-Go memory.
372 // The pointer bitmap is not maintained for allocations containing
373 // no pointers at all; any caller of bulkBarrierPreWrite must first
374 // make sure the underlying allocation contains pointers, usually
375 // by checking typ.PtrBytes.
377 // Callers must perform cgo checks if goexperiment.CgoCheck2.
380 func bulkBarrierPreWrite(dst, src, size uintptr) {
381 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
382 throw("bulkBarrierPreWrite: unaligned arguments")
384 if !writeBarrier.enabled {
389 // If dst is a global, use the data or BSS bitmaps to
390 // execute write barriers.
391 for _, datap := range activeModules() {
392 if datap.data <= dst && dst < datap.edata {
393 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
397 for _, datap := range activeModules() {
398 if datap.bss <= dst && dst < datap.ebss {
399 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
404 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
405 // dst was heap memory at some point, but isn't now.
406 // It can't be a global. It must be either our stack,
407 // or in the case of direct channel sends, it could be
408 // another stack. Either way, no need for barriers.
409 // This will also catch if dst is in a freed span,
410 // though that should never have.
413 buf := &getg().m.p.ptr().wbBuf
415 tp := s.typePointersOf(dst, size)
419 if tp, addr = tp.next(dst + size); addr == 0 {
422 dstx := (*uintptr)(unsafe.Pointer(addr))
429 if tp, addr = tp.next(dst + size); addr == 0 {
432 dstx := (*uintptr)(unsafe.Pointer(addr))
433 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
441 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
442 // does not execute write barriers for [dst, dst+size).
444 // In addition to the requirements of bulkBarrierPreWrite
445 // callers need to ensure [dst, dst+size) is zeroed.
447 // This is used for special cases where e.g. dst was just
448 // created and zeroed with malloc.
451 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
452 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
453 throw("bulkBarrierPreWrite: unaligned arguments")
455 if !writeBarrier.enabled {
458 buf := &getg().m.p.ptr().wbBuf
459 tp := spanOf(dst).typePointersOf(dst, size)
462 if tp, addr = tp.next(dst + size); addr == 0 {
465 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
471 // initHeapBits initializes the heap bitmap for a span.
473 // TODO(mknyszek): This should set the heap bits for single pointer
474 // allocations eagerly to avoid calling heapSetType at allocation time,
475 // just to write one bit.
476 func (s *mspan) initHeapBits(forceClear bool) {
477 if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
485 type writeHeapBits struct {
486 offset uintptr // offset in span that the low bit of mask represents the pointer state of.
487 mask uintptr // some pointer bits starting at the address addr.
488 valid uintptr // number of bits in buf that are valid (including low)
489 low uintptr // number of low-order bits to not overwrite
492 func (s *mspan) writeHeapBits(addr uintptr) (h writeHeapBits) {
493 offset := addr - s.base()
495 // We start writing bits maybe in the middle of a heap bitmap word.
496 // Remember how many bits into the word we started, so we can be sure
497 // not to overwrite the previous bits.
498 h.low = offset / goarch.PtrSize % ptrBits
500 // round down to heap word that starts the bitmap word.
501 h.offset = offset - h.low*goarch.PtrSize
503 // We don't have any bits yet.
510 // write appends the pointerness of the next valid pointer slots
511 // using the low valid bits of bits. 1=pointer, 0=scalar.
512 func (h writeHeapBits) write(s *mspan, bits, valid uintptr) writeHeapBits {
513 if h.valid+valid <= ptrBits {
514 // Fast path - just accumulate the bits.
515 h.mask |= bits << h.valid
519 // Too many bits to fit in this word. Write the current word
520 // out and move on to the next word.
522 data := h.mask | bits<<h.valid // mask for this word
523 h.mask = bits >> (ptrBits - h.valid) // leftover for next word
524 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
526 // Flush mask to the memory bitmap.
527 idx := h.offset / (ptrBits * goarch.PtrSize)
528 m := uintptr(1)<<h.low - 1
529 bitmap := s.heapBits()
530 bitmap[idx] = bitmap[idx]&m | data
531 // Note: no synchronization required for this write because
532 // the allocator has exclusive access to the page, and the bitmap
533 // entries are all for a single page. Also, visibility of these
534 // writes is guaranteed by the publication barrier in mallocgc.
536 // Move to next word of bitmap.
537 h.offset += ptrBits * goarch.PtrSize
542 // Add padding of size bytes.
543 func (h writeHeapBits) pad(s *mspan, size uintptr) writeHeapBits {
547 words := size / goarch.PtrSize
548 for words > ptrBits {
549 h = h.write(s, 0, ptrBits)
552 return h.write(s, 0, words)
555 // Flush the bits that have been written, and add zeros as needed
556 // to cover the full object [addr, addr+size).
557 func (h writeHeapBits) flush(s *mspan, addr, size uintptr) {
558 offset := addr - s.base()
560 // zeros counts the number of bits needed to represent the object minus the
561 // number of bits we've already written. This is the number of 0 bits
562 // that need to be added.
563 zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
565 // Add zero bits up to the bitmap word boundary
567 z := ptrBits - h.valid
575 // Find word in bitmap that we're going to write.
576 bitmap := s.heapBits()
577 idx := h.offset / (ptrBits * goarch.PtrSize)
579 // Write remaining bits.
580 if h.valid != h.low {
581 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
582 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
583 bitmap[idx] = bitmap[idx]&m | h.mask
589 // Advance to next bitmap word.
590 h.offset += ptrBits * goarch.PtrSize
592 // Continue on writing zeros for the rest of the object.
593 // For standard use of the ptr bits this is not required, as
594 // the bits are read from the beginning of the object. Some uses,
595 // like noscan spans, oblets, bulk write barriers, and cgocheck, might
596 // start mid-object, so these writes are still required.
599 idx := h.offset / (ptrBits * goarch.PtrSize)
601 bitmap[idx] &^= uintptr(1)<<zeros - 1
603 } else if zeros == ptrBits {
610 h.offset += ptrBits * goarch.PtrSize
614 // heapBits returns the heap ptr/scalar bits stored at the end of the span for
615 // small object spans.
617 // heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true.
620 func (span *mspan) heapBits() []uintptr {
621 const doubleCheck = false
623 if doubleCheck && !span.isUserArenaChunk {
624 if span.spanclass.noscan() {
625 throw("heapBits called for noscan")
627 if span.elemsize > minSizeForMallocHeader {
628 throw("heapBits called for span class that should have a malloc header")
631 // Find the bitmap at the end of the span.
633 // Nearly every span with heap bits is exactly one page in size. Arenas are the only exception.
634 if span.npages == 1 {
635 // This will be inlined and constant-folded down.
636 return heapBitsSlice(span.base(), pageSize)
638 return heapBitsSlice(span.base(), span.npages*pageSize)
641 // Helper for constructing a slice for the span's heap bits.
644 func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
645 bitmapSize := spanSize / goarch.PtrSize / 8
646 elems := int(bitmapSize / goarch.PtrSize)
647 var sl notInHeapSlice
648 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
649 return *(*[]uintptr)(unsafe.Pointer(&sl))
652 // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits.
654 // addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize)
658 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
659 spanSize := span.npages * pageSize
660 bitmapSize := spanSize / goarch.PtrSize / 8
661 hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
663 // These objects are always small enough that their bitmaps
664 // fit in a single word, so just load the word or two we need.
666 // Mirrors mspan.writeHeapBitsSmall.
668 // We should be using heapBits(), but unfortunately it introduces
669 // both bounds checks panics and throw which causes us to exceed
670 // the nosplit limit in quite a few cases.
671 i := (addr - span.base()) / goarch.PtrSize / ptrBits
672 j := (addr - span.base()) / goarch.PtrSize % ptrBits
673 bits := span.elemsize / goarch.PtrSize
674 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
675 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
678 if j+bits > ptrBits {
681 bits1 := bits - bits0
683 read |= (*word1 & ((1 << bits1) - 1)) << bits0
686 read = (*word0 >> j) & ((1 << bits) - 1)
691 // writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is
692 // stored as a bitmap at the end of the span.
694 // Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
695 // heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
698 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
699 // The objects here are always really small, so a single load is sufficient.
700 src0 := readUintptr(typ.GCData)
702 // Create repetitions of the bitmap if we have a small array.
703 bits := span.elemsize / goarch.PtrSize
704 scanSize = typ.PtrBytes
708 src = (1 << (dataSize / goarch.PtrSize)) - 1
710 for i := typ.Size_; i < dataSize; i += typ.Size_ {
711 src |= src0 << (i / goarch.PtrSize)
712 scanSize += typ.Size_
716 // Since we're never writing more than one uintptr's worth of bits, we're either going
717 // to do one or two writes.
718 dst := span.heapBits()
719 o := (x - span.base()) / goarch.PtrSize
722 if j+bits > ptrBits {
725 bits1 := bits - bits0
726 dst[i+0] = dst[i+0]&(^uintptr(0)>>bits0) | (src << j)
727 dst[i+1] = dst[i+1]&^((1<<bits1)-1) | (src >> bits0)
730 dst[i] = (dst[i] &^ (((1 << bits) - 1) << j)) | (src << j)
733 const doubleCheck = false
735 srcRead := span.heapBitsSmallForAddr(x)
737 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
738 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
739 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
740 throw("bad pointer bits written for small object")
746 // For !goexperiment.AllocHeaders.
747 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
750 // heapSetType records that the new allocation [x, x+size)
751 // holds in [x, x+dataSize) one or more values of type typ.
752 // (The number of values is given by dataSize / typ.Size.)
753 // If dataSize < size, the fragment [x+dataSize, x+size) is
754 // recorded as non-pointer data.
755 // It is known that the type has pointers somewhere;
756 // malloc does not call heapSetType when there are no pointers.
758 // There can be read-write races between heapSetType and things
759 // that read the heap metadata like scanobject. However, since
760 // heapSetType is only used for objects that have not yet been
761 // made reachable, readers will ignore bits being modified by this
762 // function. This does mean this function cannot transiently modify
763 // shared memory that belongs to neighboring objects. Also, on weakly-ordered
764 // machines, callers must execute a store/store (publication) barrier
765 // between calling this function and making the object reachable.
766 func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
767 const doubleCheck = false
771 if doubleCheck && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
772 throw("tried to write heap bits, but no heap bits in span")
774 // Handle the case where we have no malloc header.
775 scanSize = span.writeHeapBitsSmall(x, dataSize, typ)
777 if typ.Kind_&kindGCProg != 0 {
778 // Allocate space to unroll the gcprog. This space will consist of
779 // a dummy _type value and the unrolled gcprog. The dummy _type will
780 // refer to the bitmap, and the mspan will refer to the dummy _type.
781 if span.spanclass.sizeclass() != 0 {
782 throw("GCProg for type that isn't large")
784 spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
785 heapBitsOff := spaceNeeded
786 spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
787 npages := alignUp(spaceNeeded, pageSize) / pageSize
790 progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits)
791 memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
793 // Write a dummy _type in the new space.
795 // We only need to write size, PtrBytes, and GCData, since that's all
796 // the GC cares about.
797 gctyp = (*_type)(unsafe.Pointer(progSpan.base()))
798 gctyp.Kind_ |= kindGCProg
799 gctyp.Size_ = typ.Size_
800 gctyp.PtrBytes = typ.PtrBytes
801 gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff))
803 // Expand the GC program into space reserved at the end of the object.
804 runGCProg(addb(typ.GCData, 4), gctyp.GCData)
807 // Write out the header.
809 scanSize = span.elemsize
813 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
815 // To exercise the less common path more often, generate
816 // a random interior pointer and make sure iterating from
817 // that point works correctly too.
818 maxIterBytes := span.elemsize
820 maxIterBytes = dataSize
822 off := alignUp(uintptr(fastrand())%dataSize, goarch.PtrSize)
823 size := dataSize - off
825 off -= goarch.PtrSize
826 size += goarch.PtrSize
829 size -= alignDown(uintptr(fastrand())%size, goarch.PtrSize)
831 size = goarch.PtrSize
833 // Round up the type to the size of the type.
834 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
835 if interior+size > x+maxIterBytes {
836 size = x + maxIterBytes - interior
838 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
843 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
844 // Check that scanning the full object works.
845 tp := span.typePointersOfUnchecked(span.objBase(x))
846 maxIterBytes := span.elemsize
848 maxIterBytes = dataSize
851 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
852 // Compute the pointer bit we want at offset i.
854 if i < span.elemsize {
856 if off < typ.PtrBytes {
857 j := off / goarch.PtrSize
858 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
863 tp, addr = tp.next(x + span.elemsize)
865 println("runtime: found bad iterator")
868 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
875 tp, addr = tp.next(x + span.elemsize)
879 println("runtime: extra pointer:", hex(addr))
881 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&kindGCProg != 0, "\n")
882 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
883 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
884 print("runtime: limit=", hex(x+span.elemsize), "\n")
885 tp = span.typePointersOfUnchecked(x)
889 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
890 println("runtime: would've stopped here")
894 print("runtime: addr=", hex(addr), "\n")
897 throw("heapSetType: pointer entry not correct")
900 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
903 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
904 throw("found bad interior pointer")
907 tp := span.typePointersOf(interior, size)
908 for i := off; i < off+size; i += goarch.PtrSize {
909 // Compute the pointer bit we want at offset i.
911 if i < span.elemsize {
913 if off < typ.PtrBytes {
914 j := off / goarch.PtrSize
915 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
920 tp, addr = tp.next(interior + size)
922 println("runtime: found bad iterator")
926 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
933 tp, addr = tp.next(interior + size)
937 println("runtime: extra pointer:", hex(addr))
939 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
940 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
941 print("runtime: limit=", hex(interior+size), "\n")
942 tp = span.typePointersOf(interior, size)
946 if tp, addr = tp.next(interior + size); addr == 0 {
947 println("runtime: would've stopped here")
951 print("runtime: addr=", hex(addr), "\n")
955 print("runtime: want: ")
956 for i := off; i < off+size; i += goarch.PtrSize {
957 // Compute the pointer bit we want at offset i.
961 if off < typ.PtrBytes {
962 j := off / goarch.PtrSize
963 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
974 throw("heapSetType: pointer entry not correct")
977 func dumpTypePointers(tp typePointers) {
978 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
979 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
980 for i := uintptr(0); i < ptrBits; i++ {
981 if tp.mask&(uintptr(1)<<i) != 0 {
992 // Returns GC type info for the pointer stored in ep for testing.
993 // If ep points to the stack, only static live information will be returned
994 // (i.e. not for objects which are only dynamically live stack objects).
995 func getgcmask(ep any) (mask []byte) {
1000 for _, datap := range activeModules() {
1002 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1003 bitmap := datap.gcdatamask.bytedata
1004 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1005 mask = make([]byte, n/goarch.PtrSize)
1006 for i := uintptr(0); i < n; i += goarch.PtrSize {
1007 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1008 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1014 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1015 bitmap := datap.gcbssmask.bytedata
1016 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1017 mask = make([]byte, n/goarch.PtrSize)
1018 for i := uintptr(0); i < n; i += goarch.PtrSize {
1019 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1020 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1027 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1028 if s.spanclass.noscan() {
1031 limit := base + s.elemsize
1033 // Move the base up to the iterator's start, because
1034 // we want to hide evidence of a malloc header from the
1036 tp := s.typePointersOfUnchecked(base)
1039 // Unroll the full bitmap the GC would actually observe.
1040 mask = make([]byte, (limit-base)/goarch.PtrSize)
1043 if tp, addr = tp.next(limit); addr == 0 {
1046 mask[(addr-base)/goarch.PtrSize] = 1
1049 // Double-check that every part of the ptr/scalar we're not
1050 // showing the caller is zeroed. This keeps us honest that
1051 // that information is actually irrelevant.
1052 for i := limit; i < s.elemsize; i++ {
1053 if *(*byte)(unsafe.Pointer(i)) != 0 {
1054 throw("found non-zeroed tail of allocation")
1058 // Callers expect this mask to end at the last pointer.
1059 for len(mask) > 0 && mask[len(mask)-1] == 0 {
1060 mask = mask[:len(mask)-1]
1066 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1069 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1070 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1076 locals, _, _ := u.frame.getStackMap(false)
1080 size := uintptr(locals.n) * goarch.PtrSize
1081 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1082 mask = make([]byte, n/goarch.PtrSize)
1083 for i := uintptr(0); i < n; i += goarch.PtrSize {
1084 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1085 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1091 // otherwise, not something the GC knows about.
1092 // possibly read-only data, like malloc(0).
1093 // must not have pointers
1097 // userArenaHeapBitsSetType is the equivalent of heapSetType but for
1098 // non-slice-backing-store Go values allocated in a user arena chunk. It
1099 // sets up the type metadata for the value with type typ allocated at address ptr.
1100 // base is the base address of the arena chunk.
1101 func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
1103 h := s.writeHeapBits(uintptr(ptr))
1105 p := typ.GCData // start of 1-bit pointer mask (or GC program)
1106 var gcProgBits uintptr
1107 if typ.Kind_&kindGCProg != 0 {
1108 // Expand gc program, using the object itself for storage.
1109 gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
1112 nb := typ.PtrBytes / goarch.PtrSize
1114 for i := uintptr(0); i < nb; i += ptrBits {
1119 h = h.write(s, readUintptr(addb(p, i/8)), k)
1121 // Note: we call pad here to ensure we emit explicit 0 bits
1122 // for the pointerless tail of the object. This ensures that
1123 // there's only a single noMorePtrs mark for the next object
1124 // to clear. We don't need to do this to clear stale noMorePtrs
1125 // markers from previous uses because arena chunk pointer bitmaps
1126 // are always fully cleared when reused.
1127 h = h.pad(s, typ.Size_-typ.PtrBytes)
1128 h.flush(s, uintptr(ptr), typ.Size_)
1130 if typ.Kind_&kindGCProg != 0 {
1131 // Zero out temporary ptrmask buffer inside object.
1132 memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
1135 // Update the PtrBytes value in the type information. After this
1136 // point, the GC will observe the new bitmap.
1137 s.largeType.PtrBytes = uintptr(ptr) - base + typ.PtrBytes
1139 // Double-check that the bitmap was written out correctly.
1140 const doubleCheck = false
1142 doubleCheckHeapPointersInterior(uintptr(ptr), uintptr(ptr), typ.Size_, typ.Size_, typ, &s.largeType, s)
1146 // For !goexperiment.AllocHeaders, to pass TestIntendedInlining.
1147 func writeHeapBitsForAddr() {
1148 panic("not implemented")
1151 // For !goexperiment.AllocHeaders.
1152 type heapBits struct {
1155 // For !goexperiment.AllocHeaders.
1158 func heapBitsForAddr(addr, size uintptr) heapBits {
1159 panic("not implemented")
1162 // For !goexperiment.AllocHeaders.
1165 func (h heapBits) next() (heapBits, uintptr) {
1166 panic("not implemented")
1169 // For !goexperiment.AllocHeaders.
1172 func (h heapBits) nextFast() (heapBits, uintptr) {
1173 panic("not implemented")