1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: type and heap bitmaps.
7 // Stack, data, and bss bitmaps
9 // Stack frames and global variables in the data and bss sections are
10 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
11 // means the word is a live pointer to be visited by the GC (referred to
12 // as "pointer"). A "0" bit means the word should be ignored by GC
13 // (referred to as "scalar", though it could be a dead pointer value).
17 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
18 // recording whether a pointer is stored in that word or not. This bitmap
19 // is stored in the heapArena metadata backing each heap arena.
20 // That is, if ha is the heapArena for the arena starting at "start",
21 // then ha.bitmap[0] holds the 64 bits for the 64 words "start"
22 // through start+63*ptrSize, ha.bitmap[1] holds the entries for
23 // start+64*ptrSize through start+127*ptrSize, and so on.
24 // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
25 // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
26 // (For 32-bit platforms, s/64/32/.)
28 // We also keep a noMorePtrs bitmap which allows us to stop scanning
29 // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
30 // is 1, then the object containing the last word described by ha.bitmap[8*i+j]
31 // has no more pointers beyond those described by ha.bitmap[8*i+j].
32 // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
33 // beyond must all be zero until the start of the next object.
35 // The bitmap for noscan spans is set to all zero at span allocation time.
37 // The bitmap for unallocated objects in scannable spans is not maintained
44 "runtime/internal/atomic"
45 "runtime/internal/sys"
49 // addb returns the byte pointer p+n.
53 func addb(p *byte, n uintptr) *byte {
54 // Note: wrote out full expression instead of calling add(p, n)
55 // to reduce the number of temporaries generated by the
56 // compiler for this trivial expression during inlining.
57 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
60 // subtractb returns the byte pointer p-n.
64 func subtractb(p *byte, n uintptr) *byte {
65 // Note: wrote out full expression instead of calling add(p, -n)
66 // to reduce the number of temporaries generated by the
67 // compiler for this trivial expression during inlining.
68 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
71 // add1 returns the byte pointer p+1.
75 func add1(p *byte) *byte {
76 // Note: wrote out full expression instead of calling addb(p, 1)
77 // to reduce the number of temporaries generated by the
78 // compiler for this trivial expression during inlining.
79 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
82 // subtract1 returns the byte pointer p-1.
84 // nosplit because it is used during write barriers and must not be preempted.
88 func subtract1(p *byte) *byte {
89 // Note: wrote out full expression instead of calling subtractb(p, 1)
90 // to reduce the number of temporaries generated by the
91 // compiler for this trivial expression during inlining.
92 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
95 // markBits provides access to the mark bit for an object in the heap.
96 // bytep points to the byte holding the mark bit.
97 // mask is a byte with a single bit set that can be &ed with *bytep
98 // to see if the bit has been set.
99 // *m.byte&m.mask != 0 indicates the mark bit is set.
100 // index can be used along with span information to generate
101 // the address of the object in the heap.
102 // We maintain one set of mark bits for allocation and one for
104 type markBits struct {
111 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
112 bytep, mask := s.allocBits.bitp(allocBitIndex)
113 return markBits{bytep, mask, allocBitIndex}
116 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
117 // and negates them so that ctz (count trailing zeros) instructions
118 // can be used. It then places these 8 bytes into the cached 64 bit
120 func (s *mspan) refillAllocCache(whichByte uintptr) {
121 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
123 aCache |= uint64(bytes[0])
124 aCache |= uint64(bytes[1]) << (1 * 8)
125 aCache |= uint64(bytes[2]) << (2 * 8)
126 aCache |= uint64(bytes[3]) << (3 * 8)
127 aCache |= uint64(bytes[4]) << (4 * 8)
128 aCache |= uint64(bytes[5]) << (5 * 8)
129 aCache |= uint64(bytes[6]) << (6 * 8)
130 aCache |= uint64(bytes[7]) << (7 * 8)
131 s.allocCache = ^aCache
134 // nextFreeIndex returns the index of the next free object in s at
135 // or after s.freeindex.
136 // There are hardware instructions that can be used to make this
137 // faster if profiling warrants it.
138 func (s *mspan) nextFreeIndex() uintptr {
139 sfreeindex := s.freeindex
141 if sfreeindex == snelems {
144 if sfreeindex > snelems {
145 throw("s.freeindex > s.nelems")
148 aCache := s.allocCache
150 bitIndex := sys.TrailingZeros64(aCache)
152 // Move index to start of next cached bits.
153 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
154 if sfreeindex >= snelems {
155 s.freeindex = snelems
158 whichByte := sfreeindex / 8
159 // Refill s.allocCache with the next 64 alloc bits.
160 s.refillAllocCache(whichByte)
161 aCache = s.allocCache
162 bitIndex = sys.TrailingZeros64(aCache)
163 // nothing available in cached bits
164 // grab the next 8 bytes and try again.
166 result := sfreeindex + uintptr(bitIndex)
167 if result >= snelems {
168 s.freeindex = snelems
172 s.allocCache >>= uint(bitIndex + 1)
173 sfreeindex = result + 1
175 if sfreeindex%64 == 0 && sfreeindex != snelems {
176 // We just incremented s.freeindex so it isn't 0.
177 // As each 1 in s.allocCache was encountered and used for allocation
178 // it was shifted away. At this point s.allocCache contains all 0s.
179 // Refill s.allocCache so that it corresponds
180 // to the bits at s.allocBits starting at s.freeindex.
181 whichByte := sfreeindex / 8
182 s.refillAllocCache(whichByte)
184 s.freeindex = sfreeindex
188 // isFree reports whether the index'th object in s is unallocated.
190 // The caller must ensure s.state is mSpanInUse, and there must have
191 // been no preemption points since ensuring this (which could allow a
192 // GC transition, which would allow the state to change).
193 func (s *mspan) isFree(index uintptr) bool {
194 if index < s.freeindex {
197 bytep, mask := s.allocBits.bitp(index)
198 return *bytep&mask == 0
201 // divideByElemSize returns n/s.elemsize.
202 // n must be within [0, s.npages*_PageSize),
203 // or may be exactly s.npages*_PageSize
204 // if s.elemsize is from sizeclasses.go.
205 func (s *mspan) divideByElemSize(n uintptr) uintptr {
206 const doubleCheck = false
208 // See explanation in mksizeclasses.go's computeDivMagic.
209 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
211 if doubleCheck && q != n/s.elemsize {
212 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
213 throw("bad magic division")
218 func (s *mspan) objIndex(p uintptr) uintptr {
219 return s.divideByElemSize(p - s.base())
222 func markBitsForAddr(p uintptr) markBits {
224 objIndex := s.objIndex(p)
225 return s.markBitsForIndex(objIndex)
228 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
229 bytep, mask := s.gcmarkBits.bitp(objIndex)
230 return markBits{bytep, mask, objIndex}
233 func (s *mspan) markBitsForBase() markBits {
234 return markBits{&s.gcmarkBits.x, uint8(1), 0}
237 // isMarked reports whether mark bit m is set.
238 func (m markBits) isMarked() bool {
239 return *m.bytep&m.mask != 0
242 // setMarked sets the marked bit in the markbits, atomically.
243 func (m markBits) setMarked() {
244 // Might be racing with other updates, so use atomic update always.
245 // We used to be clever here and use a non-atomic update in certain
246 // cases, but it's not worth the risk.
247 atomic.Or8(m.bytep, m.mask)
250 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
251 func (m markBits) setMarkedNonAtomic() {
255 // clearMarked clears the marked bit in the markbits, atomically.
256 func (m markBits) clearMarked() {
257 // Might be racing with other updates, so use atomic update always.
258 // We used to be clever here and use a non-atomic update in certain
259 // cases, but it's not worth the risk.
260 atomic.And8(m.bytep, ^m.mask)
263 // markBitsForSpan returns the markBits for the span base address base.
264 func markBitsForSpan(base uintptr) (mbits markBits) {
265 mbits = markBitsForAddr(base)
267 throw("markBitsForSpan: unaligned start")
272 // advance advances the markBits to the next object in the span.
273 func (m *markBits) advance() {
275 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
283 // clobberdeadPtr is a special value that is used by the compiler to
284 // clobber dead stack slots, when -clobberdead flag is set.
285 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
287 // badPointer throws bad pointer in heap panic.
288 func badPointer(s *mspan, p, refBase, refOff uintptr) {
289 // Typically this indicates an incorrect use
290 // of unsafe or cgo to store a bad pointer in
291 // the Go heap. It may also indicate a runtime
294 // TODO(austin): We could be more aggressive
295 // and detect pointers to unallocated objects
296 // in allocated spans.
298 print("runtime: pointer ", hex(p))
300 state := s.state.get()
301 if state != mSpanInUse {
302 print(" to unallocated span")
304 print(" to unused region of span")
306 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
310 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
311 gcDumpObject("object", refBase, refOff)
313 getg().m.traceback = 2
314 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
317 // findObject returns the base address for the heap object containing
318 // the address p, the object's span, and the index of the object in s.
319 // If p does not point into a heap object, it returns base == 0.
321 // If p points is an invalid heap pointer and debug.invalidptr != 0,
322 // findObject panics.
324 // refBase and refOff optionally give the base address of the object
325 // in which the pointer p was found and the byte offset at which it
326 // was found. These are used for error reporting.
328 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
329 // Since p is a uintptr, it would not be adjusted if the stack were to move.
332 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
334 // If s is nil, the virtual address has never been part of the heap.
335 // This pointer may be to some mmap'd region, so we allow it.
337 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
338 // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
339 // as they are the only platform where compiler's clobberdead mode is
340 // implemented. On these platforms clobberdeadPtr cannot be a valid address.
341 badPointer(s, p, refBase, refOff)
345 // If p is a bad pointer, it may not be in s's bounds.
347 // Check s.state to synchronize with span initialization
348 // before checking other fields. See also spanOfHeap.
349 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
350 // Pointers into stacks are also ok, the runtime manages these explicitly.
351 if state == mSpanManual {
354 // The following ensures that we are rigorous about what data
355 // structures hold valid pointers.
356 if debug.invalidptr != 0 {
357 badPointer(s, p, refBase, refOff)
362 objIndex = s.objIndex(p)
363 base = s.base() + objIndex*s.elemsize
367 // reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
369 //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
370 func reflect_verifyNotInHeapPtr(p uintptr) bool {
371 // Conversion to a pointer is ok as long as findObject above does not call badPointer.
372 // Since we're already promised that p doesn't point into the heap, just disallow heap
373 // pointers and the special clobbered pointer.
374 return spanOf(p) == nil && p != clobberdeadPtr
377 const ptrBits = 8 * goarch.PtrSize
379 // heapBits provides access to the bitmap bits for a single heap word.
380 // The methods on heapBits take value receivers so that the compiler
381 // can more easily inline calls to those methods and registerize the
382 // struct fields independently.
383 type heapBits struct {
384 // heapBits will report on pointers in the range [addr,addr+size).
385 // The low bit of mask contains the pointerness of the word at addr
386 // (assuming valid>0).
389 // The next few pointer bits representing words starting at addr.
390 // Those bits already returned by next() are zeroed.
392 // Number of bits in mask that are valid. mask is always less than 1<<valid.
396 // heapBitsForAddr returns the heapBits for the address addr.
397 // The caller must ensure [addr,addr+size) is in an allocated span.
398 // In particular, be careful not to point past the end of an object.
400 // nosplit because it is used during write barriers and must not be preempted.
403 func heapBitsForAddr(addr, size uintptr) heapBits {
405 ai := arenaIndex(addr)
406 ha := mheap_.arenas[ai.l1()][ai.l2()]
408 // Word index in arena.
409 word := addr / goarch.PtrSize % heapArenaWords
411 // Word index and bit offset in bitmap array.
412 idx := word / ptrBits
413 off := word % ptrBits
415 // Grab relevant bits of bitmap.
416 mask := ha.bitmap[idx] >> off
417 valid := ptrBits - off
419 // Process depending on where the object ends.
420 nptr := size / goarch.PtrSize
422 // Bits for this object end before the end of this bitmap word.
423 // Squash bits for the following objects.
424 mask &= 1<<(nptr&(ptrBits-1)) - 1
426 } else if nptr == valid {
427 // Bits for this object end at exactly the end of this bitmap word.
430 // Bits for this object extend into the next bitmap word. See if there
431 // may be any pointers recorded there.
432 if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
433 // No more pointers in this object after this bitmap word.
434 // Update size so we know not to look there.
435 size = valid * goarch.PtrSize
439 return heapBits{addr: addr, size: size, mask: mask, valid: valid}
442 // Returns the (absolute) address of the next known pointer and
443 // a heapBits iterator representing any remaining pointers.
444 // If there are no more pointers, returns address 0.
445 // Note that next does not modify h. The caller must record the result.
447 // nosplit because it is used during write barriers and must not be preempted.
450 func (h heapBits) next() (heapBits, uintptr) {
454 if goarch.PtrSize == 8 {
455 i = sys.TrailingZeros64(uint64(h.mask))
457 i = sys.TrailingZeros32(uint32(h.mask))
459 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
460 return h, h.addr + uintptr(i)*goarch.PtrSize
463 // Skip words that we've already processed.
464 h.addr += h.valid * goarch.PtrSize
465 h.size -= h.valid * goarch.PtrSize
467 return h, 0 // no more pointers
470 // Grab more bits and try again.
471 h = heapBitsForAddr(h.addr, h.size)
475 // nextFast is like next, but can return 0 even when there are more pointers
476 // to be found. Callers should call next if nextFast returns 0 as its second
479 // if addr, h = h.nextFast(); addr == 0 {
480 // if addr, h = h.next(); addr == 0 {
481 // ... no more pointers ...
484 // ... process pointer at addr ...
486 // nextFast is designed to be inlineable.
489 func (h heapBits) nextFast() (heapBits, uintptr) {
496 if goarch.PtrSize == 8 {
497 i = sys.TrailingZeros64(uint64(h.mask))
499 i = sys.TrailingZeros32(uint32(h.mask))
502 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
504 return h, h.addr + uintptr(i)*goarch.PtrSize
507 // bulkBarrierPreWrite executes a write barrier
508 // for every pointer slot in the memory range [src, src+size),
509 // using pointer/scalar information from [dst, dst+size).
510 // This executes the write barriers necessary before a memmove.
511 // src, dst, and size must be pointer-aligned.
512 // The range [dst, dst+size) must lie within a single object.
513 // It does not perform the actual writes.
515 // As a special case, src == 0 indicates that this is being used for a
516 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
519 // Callers should call bulkBarrierPreWrite immediately before
520 // calling memmove(dst, src, size). This function is marked nosplit
521 // to avoid being preempted; the GC must not stop the goroutine
522 // between the memmove and the execution of the barriers.
523 // The caller is also responsible for cgo pointer checks if this
524 // may be writing Go pointers into non-Go memory.
526 // The pointer bitmap is not maintained for allocations containing
527 // no pointers at all; any caller of bulkBarrierPreWrite must first
528 // make sure the underlying allocation contains pointers, usually
529 // by checking typ.ptrdata.
531 // Callers must perform cgo checks if writeBarrier.cgo.
534 func bulkBarrierPreWrite(dst, src, size uintptr) {
535 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
536 throw("bulkBarrierPreWrite: unaligned arguments")
538 if !writeBarrier.needed {
541 if s := spanOf(dst); s == nil {
542 // If dst is a global, use the data or BSS bitmaps to
543 // execute write barriers.
544 for _, datap := range activeModules() {
545 if datap.data <= dst && dst < datap.edata {
546 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
550 for _, datap := range activeModules() {
551 if datap.bss <= dst && dst < datap.ebss {
552 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
557 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
558 // dst was heap memory at some point, but isn't now.
559 // It can't be a global. It must be either our stack,
560 // or in the case of direct channel sends, it could be
561 // another stack. Either way, no need for barriers.
562 // This will also catch if dst is in a freed span,
563 // though that should never have.
567 buf := &getg().m.p.ptr().wbBuf
568 h := heapBitsForAddr(dst, size)
572 if h, addr = h.next(); addr == 0 {
575 dstx := (*uintptr)(unsafe.Pointer(addr))
576 if !buf.putFast(*dstx, 0) {
583 if h, addr = h.next(); addr == 0 {
586 dstx := (*uintptr)(unsafe.Pointer(addr))
587 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
588 if !buf.putFast(*dstx, *srcx) {
595 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
596 // does not execute write barriers for [dst, dst+size).
598 // In addition to the requirements of bulkBarrierPreWrite
599 // callers need to ensure [dst, dst+size) is zeroed.
601 // This is used for special cases where e.g. dst was just
602 // created and zeroed with malloc.
605 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
606 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
607 throw("bulkBarrierPreWrite: unaligned arguments")
609 if !writeBarrier.needed {
612 buf := &getg().m.p.ptr().wbBuf
613 h := heapBitsForAddr(dst, size)
616 if h, addr = h.next(); addr == 0 {
619 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
620 if !buf.putFast(0, *srcx) {
626 // bulkBarrierBitmap executes write barriers for copying from [src,
627 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
628 // assumed to start maskOffset bytes into the data covered by the
629 // bitmap in bits (which may not be a multiple of 8).
631 // This is used by bulkBarrierPreWrite for writes to data and BSS.
634 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
635 word := maskOffset / goarch.PtrSize
636 bits = addb(bits, word/8)
637 mask := uint8(1) << (word % 8)
639 buf := &getg().m.p.ptr().wbBuf
640 for i := uintptr(0); i < size; i += goarch.PtrSize {
645 i += 7 * goarch.PtrSize
651 dstx := (*uintptr)(unsafe.Pointer(dst + i))
653 if !buf.putFast(*dstx, 0) {
657 srcx := (*uintptr)(unsafe.Pointer(src + i))
658 if !buf.putFast(*dstx, *srcx) {
667 // typeBitsBulkBarrier executes a write barrier for every
668 // pointer that would be copied from [src, src+size) to [dst,
669 // dst+size) by a memmove using the type bitmap to locate those
672 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
673 // dst, src, and size must be pointer-aligned.
674 // The type typ must have a plain bitmap, not a GC program.
675 // The only use of this function is in channel sends, and the
676 // 64 kB channel element limit takes care of this for us.
678 // Must not be preempted because it typically runs right before memmove,
679 // and the GC must observe them as an atomic action.
681 // Callers must perform cgo checks if writeBarrier.cgo.
684 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
686 throw("runtime: typeBitsBulkBarrier without type")
688 if typ.size != size {
689 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
690 throw("runtime: invalid typeBitsBulkBarrier")
692 if typ.kind&kindGCProg != 0 {
693 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
694 throw("runtime: invalid typeBitsBulkBarrier")
696 if !writeBarrier.needed {
699 ptrmask := typ.gcdata
700 buf := &getg().m.p.ptr().wbBuf
702 for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
703 if i&(goarch.PtrSize*8-1) == 0 {
704 bits = uint32(*ptrmask)
705 ptrmask = addb(ptrmask, 1)
710 dstx := (*uintptr)(unsafe.Pointer(dst + i))
711 srcx := (*uintptr)(unsafe.Pointer(src + i))
712 if !buf.putFast(*dstx, *srcx) {
719 // initHeapBits initializes the heap bitmap for a span.
720 // If this is a span of single pointer allocations, it initializes all
721 // words to pointer. If force is true, clears all bits.
722 func (s *mspan) initHeapBits(forceClear bool) {
723 if forceClear || s.spanclass.noscan() {
724 // Set all the pointer bits to zero. We do this once
725 // when the span is allocated so we don't have to do it
726 // for each object allocation.
728 size := s.npages * pageSize
729 h := writeHeapBitsForAddr(base)
733 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
735 return // nothing to do
737 h := writeHeapBitsForAddr(s.base())
738 size := s.npages * pageSize
739 nptrs := size / goarch.PtrSize
740 for i := uintptr(0); i < nptrs; i += ptrBits {
741 h = h.write(^uintptr(0), ptrBits)
743 h.flush(s.base(), size)
746 // countAlloc returns the number of objects allocated in span s by
747 // scanning the allocation bitmap.
748 func (s *mspan) countAlloc() int {
750 bytes := divRoundUp(s.nelems, 8)
751 // Iterate over each 8-byte chunk and count allocations
752 // with an intrinsic. Note that newMarkBits guarantees that
753 // gcmarkBits will be 8-byte aligned, so we don't have to
754 // worry about edge cases, irrelevant bits will simply be zero.
755 for i := uintptr(0); i < bytes; i += 8 {
756 // Extract 64 bits from the byte pointer and get a OnesCount.
757 // Note that the unsafe cast here doesn't preserve endianness,
758 // but that's OK. We only care about how many bits are 1, not
759 // about the order we discover them in.
760 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
761 count += sys.OnesCount64(mrkBits)
766 type writeHeapBits struct {
767 addr uintptr // address that the low bit of mask represents the pointer state of.
768 mask uintptr // some pointer bits starting at the address addr.
769 valid uintptr // number of bits in buf that are valid (including low)
770 low uintptr // number of low-order bits to not overwrite
773 func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
774 // We start writing bits maybe in the middle of a heap bitmap word.
775 // Remember how many bits into the word we started, so we can be sure
776 // not to overwrite the previous bits.
777 h.low = addr / goarch.PtrSize % ptrBits
779 // round down to heap word that starts the bitmap word.
780 h.addr = addr - h.low*goarch.PtrSize
782 // We don't have any bits yet.
789 // write appends the pointerness of the next valid pointer slots
790 // using the low valid bits of bits. 1=pointer, 0=scalar.
791 func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
792 if h.valid+valid <= ptrBits {
793 // Fast path - just accumulate the bits.
794 h.mask |= bits << h.valid
798 // Too many bits to fit in this word. Write the current word
799 // out and move on to the next word.
801 data := h.mask | bits<<h.valid // mask for this word
802 h.mask = bits >> (ptrBits - h.valid) // leftover for next word
803 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
805 // Flush mask to the memory bitmap.
806 // TODO: figure out how to cache arena lookup.
807 ai := arenaIndex(h.addr)
808 ha := mheap_.arenas[ai.l1()][ai.l2()]
809 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
810 m := uintptr(1)<<h.low - 1
811 ha.bitmap[idx] = ha.bitmap[idx]&m | data
812 // Note: no synchronization required for this write because
813 // the allocator has exclusive access to the page, and the bitmap
814 // entries are all for a single page. Also, visibility of these
815 // writes is guaranteed by the publication barrier in mallocgc.
817 // Clear noMorePtrs bit, since we're going to be writing bits
818 // into the following word.
819 ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
820 // Note: same as above
822 // Move to next word of bitmap.
823 h.addr += ptrBits * goarch.PtrSize
828 // Add padding of size bytes.
829 func (h writeHeapBits) pad(size uintptr) writeHeapBits {
833 words := size / goarch.PtrSize
834 for words > ptrBits {
835 h = h.write(0, ptrBits)
838 return h.write(0, words)
841 // Flush the bits that have been written, and add zeros as needed
842 // to cover the full object [addr, addr+size).
843 func (h writeHeapBits) flush(addr, size uintptr) {
844 // zeros counts the number of bits needed to represent the object minus the
845 // number of bits we've already written. This is the number of 0 bits
846 // that need to be added.
847 zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
849 // Add zero bits up to the bitmap word boundary
851 z := ptrBits - h.valid
859 // Find word in bitmap that we're going to write.
860 ai := arenaIndex(h.addr)
861 ha := mheap_.arenas[ai.l1()][ai.l2()]
862 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
864 // Write remaining bits.
865 if h.valid != h.low {
866 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
867 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
868 ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
874 // Record in the noMorePtrs map that there won't be any more 1 bits,
875 // so readers can stop early.
876 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
878 // Advance to next bitmap word.
879 h.addr += ptrBits * goarch.PtrSize
881 // Continue on writing zeros for the rest of the object.
882 // For standard use of the ptr bits this is not required, as
883 // the bits are read from the beginning of the object. Some uses,
884 // like noscan spans, oblets, bulk write barriers, and cgocheck, might
885 // start mid-object, so these writes are still required.
888 ai := arenaIndex(h.addr)
889 ha := mheap_.arenas[ai.l1()][ai.l2()]
890 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
892 ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
894 } else if zeros == ptrBits {
901 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
902 h.addr += ptrBits * goarch.PtrSize
906 // Read the bytes starting at the aligned pointer p into a uintptr.
907 // Read is little-endian.
908 func readUintptr(p *byte) uintptr {
909 x := *(*uintptr)(unsafe.Pointer(p))
910 if goarch.BigEndian {
911 if goarch.PtrSize == 8 {
912 return uintptr(sys.Bswap64(uint64(x)))
914 return uintptr(sys.Bswap32(uint32(x)))
919 // heapBitsSetType records that the new allocation [x, x+size)
920 // holds in [x, x+dataSize) one or more values of type typ.
921 // (The number of values is given by dataSize / typ.size.)
922 // If dataSize < size, the fragment [x+dataSize, x+size) is
923 // recorded as non-pointer data.
924 // It is known that the type has pointers somewhere;
925 // malloc does not call heapBitsSetType when there are no pointers,
926 // because all free objects are marked as noscan during
927 // heapBitsSweepSpan.
929 // There can only be one allocation from a given span active at a time,
930 // and the bitmap for a span always falls on word boundaries,
931 // so there are no write-write races for access to the heap bitmap.
932 // Hence, heapBitsSetType can access the bitmap without atomics.
934 // There can be read-write races between heapBitsSetType and things
935 // that read the heap bitmap like scanobject. However, since
936 // heapBitsSetType is only used for objects that have not yet been
937 // made reachable, readers will ignore bits being modified by this
938 // function. This does mean this function cannot transiently modify
939 // bits that belong to neighboring objects. Also, on weakly-ordered
940 // machines, callers must execute a store/store (publication) barrier
941 // between calling this function and making the object reachable.
942 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
943 const doubleCheck = false // slow but helpful; enable to test modifications to this code
945 if doubleCheck && dataSize%typ.size != 0 {
946 throw("heapBitsSetType: dataSize not a multiple of typ.size")
949 if goarch.PtrSize == 8 && size == goarch.PtrSize {
950 // It's one word and it has pointers, it must be a pointer.
951 // Since all allocated one-word objects are pointers
952 // (non-pointers are aggregated into tinySize allocations),
953 // (*mspan).initHeapBits sets the pointer bits for us.
954 // Nothing to do here.
956 h, addr := heapBitsForAddr(x, size).next()
958 throw("heapBitsSetType: pointer bit missing")
962 throw("heapBitsSetType: second pointer bit found")
968 h := writeHeapBitsForAddr(x)
970 // Handle GC program.
971 if typ.kind&kindGCProg != 0 {
972 // Expand the gc program into the storage we're going to use for the actual object.
973 obj := (*uint8)(unsafe.Pointer(x))
974 n := runGCProg(addb(typ.gcdata, 4), obj)
975 // Use the expanded program to set the heap bits.
976 for i := uintptr(0); true; i += typ.size {
977 // Copy expanded program to heap bitmap.
981 h = h.write(uintptr(*p), 8)
985 h = h.write(uintptr(*p), j)
987 if i+typ.size == dataSize {
988 break // no padding after last element
991 // Pad with zeros to the start of the next element.
992 h = h.pad(typ.size - n*goarch.PtrSize)
997 // Erase the expanded GC program.
998 memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
1002 // Note about sizes:
1004 // typ.size is the number of words in the object,
1005 // and typ.ptrdata is the number of words in the prefix
1006 // of the object that contains pointers. That is, the final
1007 // typ.size - typ.ptrdata words contain no pointers.
1008 // This allows optimization of a common pattern where
1009 // an object has a small header followed by a large scalar
1010 // buffer. If we know the pointers are over, we don't have
1011 // to scan the buffer's heap bitmap at all.
1012 // The 1-bit ptrmasks are sized to contain only bits for
1013 // the typ.ptrdata prefix, zero padded out to a full byte
1014 // of bitmap. If there is more room in the allocated object,
1015 // that space is pointerless. The noMorePtrs bitmap will prevent
1016 // scanning large pointerless tails of an object.
1018 // Replicated copies are not as nice: if there is an array of
1019 // objects with scalar tails, all but the last tail does have to
1020 // be initialized, because there is no way to say "skip forward".
1022 ptrs := typ.ptrdata / goarch.PtrSize
1023 if typ.size == dataSize { // Single element
1024 if ptrs <= ptrBits { // Single small element
1025 m := readUintptr(typ.gcdata)
1026 h = h.write(m, ptrs)
1027 } else { // Single large element
1030 h = h.write(readUintptr(p), ptrBits)
1031 p = addb(p, ptrBits/8)
1033 if ptrs <= ptrBits {
1038 h = h.write(m, ptrs)
1040 } else { // Repeated element
1041 words := typ.size / goarch.PtrSize // total words, including scalar tail
1042 if words <= ptrBits { // Repeated small element
1043 n := dataSize / typ.size
1044 m := readUintptr(typ.gcdata)
1045 // Make larger unit to repeat
1046 for words <= ptrBits/2 {
1048 h = h.write(m, words)
1059 h = h.write(m, words)
1062 h = h.write(m, ptrs)
1063 } else { // Repeated large element
1064 for i := uintptr(0); true; i += typ.size {
1068 h = h.write(readUintptr(p), ptrBits)
1069 p = addb(p, ptrBits/8)
1074 if i+typ.size == dataSize {
1075 break // don't need the trailing nonptr bits on the last element.
1077 // Pad with zeros to the start of the next element.
1078 h = h.pad(typ.size - typ.ptrdata)
1085 h := heapBitsForAddr(x, size)
1086 for i := uintptr(0); i < size; i += goarch.PtrSize {
1087 // Compute the pointer bit we want at offset i.
1091 if off < typ.ptrdata {
1092 j := off / goarch.PtrSize
1093 want = *addb(typ.gcdata, j/8)>>(j%8)&1 != 0
1100 throw("heapBitsSetType: pointer entry not correct")
1104 if _, addr := h.next(); addr != 0 {
1105 throw("heapBitsSetType: extra pointer")
1110 var debugPtrmask struct {
1115 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1116 // size the size of the region described by prog, in bytes.
1117 // The resulting bitvector will have no more than size/goarch.PtrSize bits.
1118 func progToPointerMask(prog *byte, size uintptr) bitvector {
1119 n := (size/goarch.PtrSize + 7) / 8
1120 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1121 x[len(x)-1] = 0xa1 // overflow check sentinel
1122 n = runGCProg(prog, &x[0])
1123 if x[len(x)-1] != 0xa1 {
1124 throw("progToPointerMask: overflow")
1126 return bitvector{int32(n), &x[0]}
1129 // Packed GC pointer bitmaps, aka GC programs.
1131 // For large types containing arrays, the type information has a
1132 // natural repetition that can be encoded to save space in the
1133 // binary and in the memory representation of the type information.
1135 // The encoding is a simple Lempel-Ziv style bytecode machine
1136 // with the following instructions:
1139 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1140 // 10000000 n c: repeat the previous n bits c times; n, c are varints
1141 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1143 // runGCProg returns the number of 1-bit entries written to memory.
1144 func runGCProg(prog, dst *byte) uintptr {
1147 // Bits waiting to be written to memory.
1154 // Flush accumulated full bytes.
1155 // The rest of the loop assumes that nbits <= 7.
1156 for ; nbits >= 8; nbits -= 8 {
1162 // Process one instruction.
1167 // Literal bits; n == 0 means end of program.
1173 for i := uintptr(0); i < nbyte; i++ {
1174 bits |= uintptr(*p) << nbits
1181 bits |= uintptr(*p) << nbits
1188 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1190 for off := uint(0); ; off += 7 {
1193 n |= (x & 0x7F) << off
1200 // Count is encoded in a varint in the next bytes.
1202 for off := uint(0); ; off += 7 {
1205 c |= (x & 0x7F) << off
1210 c *= n // now total number of bits to copy
1212 // If the number of bits being repeated is small, load them
1213 // into a register and use that register for the entire loop
1214 // instead of repeatedly reading from memory.
1215 // Handling fewer than 8 bits here makes the general loop simpler.
1216 // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
1217 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1218 // it will not overflow.
1220 const maxBits = goarch.PtrSize*8 - 7
1222 // Start with bits in output buffer.
1226 // If we need more bits, fetch them from memory.
1227 src = subtract1(src)
1230 pattern |= uintptr(*src)
1231 src = subtract1(src)
1235 // We started with the whole bit output buffer,
1236 // and then we loaded bits from whole bytes.
1237 // Either way, we might now have too many instead of too few.
1238 // Discard the extra.
1240 pattern >>= npattern - n
1244 // Replicate pattern to at most maxBits.
1246 // One bit being repeated.
1247 // If the bit is 1, make the pattern all 1s.
1248 // If the bit is 0, the pattern is already all 0s,
1249 // but we can claim that the number of bits
1250 // in the word is equal to the number we need (c),
1251 // because right shift of bits will zero fill.
1253 pattern = 1<<maxBits - 1
1261 if nb+nb <= maxBits {
1262 // Double pattern until the whole uintptr is filled.
1263 for nb <= goarch.PtrSize*8 {
1267 // Trim away incomplete copy of original pattern in high bits.
1268 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1269 nb = maxBits / npattern * npattern
1276 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1277 // Since pattern contains >8 bits, there will be full bytes to flush
1278 // on each iteration.
1279 for ; c >= npattern; c -= npattern {
1280 bits |= pattern << nbits
1290 // Add final fragment to bit buffer.
1293 bits |= pattern << nbits
1299 // Repeat; n too large to fit in a register.
1300 // Since nbits <= 7, we know the first few bytes of repeated data
1301 // are already written to memory.
1302 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1303 // Leading src fragment.
1304 src = subtractb(src, (off+7)/8)
1305 if frag := off & 7; frag != 0 {
1306 bits |= uintptr(*src) >> (8 - frag) << nbits
1311 // Main loop: load one byte, write another.
1312 // The bits are rotating through the bit buffer.
1313 for i := c / 8; i > 0; i-- {
1314 bits |= uintptr(*src) << nbits
1320 // Final src fragment.
1322 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1327 // Write any final bits out, using full-byte writes, even for the final byte.
1328 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1330 for ; nbits > 0; nbits -= 8 {
1338 // materializeGCProg allocates space for the (1-bit) pointer bitmask
1339 // for an object of size ptrdata. Then it fills that space with the
1340 // pointer bitmask specified by the program prog.
1341 // The bitmask starts at s.startAddr.
1342 // The result must be deallocated with dematerializeGCProg.
1343 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1344 // Each word of ptrdata needs one bit in the bitmap.
1345 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1346 // Compute the number of pages needed for bitmapBytes.
1347 pages := divRoundUp(bitmapBytes, pageSize)
1348 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1349 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
1352 func dematerializeGCProg(s *mspan) {
1353 mheap_.freeManual(s, spanAllocPtrScalarBits)
1356 func dumpGCProg(p *byte) {
1362 print("\t", nptr, " end\n")
1366 print("\t", nptr, " lit ", x, ":")
1368 for i := 0; i < n; i++ {
1375 nbit := int(x &^ 0x80)
1377 for nb := uint(0); ; nb += 7 {
1380 nbit |= int(x&0x7f) << nb
1387 for nb := uint(0); ; nb += 7 {
1390 count |= int(x&0x7f) << nb
1395 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1396 nptr += nbit * count
1403 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1404 target := (*stkframe)(ctxt)
1405 if frame.sp <= target.sp && target.sp < frame.varp {
1412 // reflect_gcbits returns the GC type info for x, for testing.
1413 // The result is the bitmap entries (0 or 1), one entry per byte.
1415 //go:linkname reflect_gcbits reflect.gcbits
1416 func reflect_gcbits(x any) []byte {
1420 // Returns GC type info for the pointer stored in ep for testing.
1421 // If ep points to the stack, only static live information will be returned
1422 // (i.e. not for objects which are only dynamically live stack objects).
1423 func getgcmask(ep any) (mask []byte) {
1428 for _, datap := range activeModules() {
1430 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1431 bitmap := datap.gcdatamask.bytedata
1432 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1433 mask = make([]byte, n/goarch.PtrSize)
1434 for i := uintptr(0); i < n; i += goarch.PtrSize {
1435 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1436 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1442 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1443 bitmap := datap.gcbssmask.bytedata
1444 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1445 mask = make([]byte, n/goarch.PtrSize)
1446 for i := uintptr(0); i < n; i += goarch.PtrSize {
1447 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1448 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1455 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1456 if s.spanclass.noscan() {
1460 hbits := heapBitsForAddr(base, n)
1461 mask = make([]byte, n/goarch.PtrSize)
1464 if hbits, addr = hbits.next(); addr == 0 {
1467 mask[(addr-base)/goarch.PtrSize] = 1
1469 // Callers expect this mask to end at the last pointer.
1470 for len(mask) > 0 && mask[len(mask)-1] == 0 {
1471 mask = mask[:len(mask)-1]
1477 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1479 frame.sp = uintptr(p)
1480 gentraceback(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
1481 if frame.fn.valid() {
1482 locals, _, _ := frame.getStackMap(nil, false)
1486 size := uintptr(locals.n) * goarch.PtrSize
1487 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1488 mask = make([]byte, n/goarch.PtrSize)
1489 for i := uintptr(0); i < n; i += goarch.PtrSize {
1490 off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
1491 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1497 // otherwise, not something the GC knows about.
1498 // possibly read-only data, like malloc(0).
1499 // must not have pointers