1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: type and heap bitmaps.
7 // Stack, data, and bss bitmaps
9 // Stack frames and global variables in the data and bss sections are
10 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
11 // means the word is a live pointer to be visited by the GC (referred to
12 // as "pointer"). A "0" bit means the word should be ignored by GC
13 // (referred to as "scalar", though it could be a dead pointer value).
17 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
18 // recording whether a pointer is stored in that word or not. This bitmap
19 // is stored in the heapArena metadata backing each heap arena.
20 // That is, if ha is the heapArena for the arena starting at "start",
21 // then ha.bitmap[0] holds the 64 bits for the 64 words "start"
22 // through start+63*ptrSize, ha.bitmap[1] holds the entries for
23 // start+64*ptrSize through start+127*ptrSize, and so on.
24 // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
25 // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
26 // (For 32-bit platforms, s/64/32/.)
28 // We also keep a noMorePtrs bitmap which allows us to stop scanning
29 // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
30 // is 1, then the object containing the last word described by ha.bitmap[8*i+j]
31 // has no more pointers beyond those described by ha.bitmap[8*i+j].
32 // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
33 // beyond must all be zero until the start of the next object.
35 // The bitmap for noscan spans is set to all zero at span allocation time.
37 // The bitmap for unallocated objects in scannable spans is not maintained
44 "runtime/internal/atomic"
45 "runtime/internal/sys"
49 // addb returns the byte pointer p+n.
53 func addb(p *byte, n uintptr) *byte {
54 // Note: wrote out full expression instead of calling add(p, n)
55 // to reduce the number of temporaries generated by the
56 // compiler for this trivial expression during inlining.
57 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
60 // subtractb returns the byte pointer p-n.
64 func subtractb(p *byte, n uintptr) *byte {
65 // Note: wrote out full expression instead of calling add(p, -n)
66 // to reduce the number of temporaries generated by the
67 // compiler for this trivial expression during inlining.
68 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
71 // add1 returns the byte pointer p+1.
75 func add1(p *byte) *byte {
76 // Note: wrote out full expression instead of calling addb(p, 1)
77 // to reduce the number of temporaries generated by the
78 // compiler for this trivial expression during inlining.
79 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
82 // subtract1 returns the byte pointer p-1.
84 // nosplit because it is used during write barriers and must not be preempted.
88 func subtract1(p *byte) *byte {
89 // Note: wrote out full expression instead of calling subtractb(p, 1)
90 // to reduce the number of temporaries generated by the
91 // compiler for this trivial expression during inlining.
92 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
95 // markBits provides access to the mark bit for an object in the heap.
96 // bytep points to the byte holding the mark bit.
97 // mask is a byte with a single bit set that can be &ed with *bytep
98 // to see if the bit has been set.
99 // *m.byte&m.mask != 0 indicates the mark bit is set.
100 // index can be used along with span information to generate
101 // the address of the object in the heap.
102 // We maintain one set of mark bits for allocation and one for
104 type markBits struct {
111 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
112 bytep, mask := s.allocBits.bitp(allocBitIndex)
113 return markBits{bytep, mask, allocBitIndex}
116 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
117 // and negates them so that ctz (count trailing zeros) instructions
118 // can be used. It then places these 8 bytes into the cached 64 bit
120 func (s *mspan) refillAllocCache(whichByte uintptr) {
121 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
123 aCache |= uint64(bytes[0])
124 aCache |= uint64(bytes[1]) << (1 * 8)
125 aCache |= uint64(bytes[2]) << (2 * 8)
126 aCache |= uint64(bytes[3]) << (3 * 8)
127 aCache |= uint64(bytes[4]) << (4 * 8)
128 aCache |= uint64(bytes[5]) << (5 * 8)
129 aCache |= uint64(bytes[6]) << (6 * 8)
130 aCache |= uint64(bytes[7]) << (7 * 8)
131 s.allocCache = ^aCache
134 // nextFreeIndex returns the index of the next free object in s at
135 // or after s.freeindex.
136 // There are hardware instructions that can be used to make this
137 // faster if profiling warrants it.
138 func (s *mspan) nextFreeIndex() uintptr {
139 sfreeindex := s.freeindex
141 if sfreeindex == snelems {
144 if sfreeindex > snelems {
145 throw("s.freeindex > s.nelems")
148 aCache := s.allocCache
150 bitIndex := sys.TrailingZeros64(aCache)
152 // Move index to start of next cached bits.
153 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
154 if sfreeindex >= snelems {
155 s.freeindex = snelems
158 whichByte := sfreeindex / 8
159 // Refill s.allocCache with the next 64 alloc bits.
160 s.refillAllocCache(whichByte)
161 aCache = s.allocCache
162 bitIndex = sys.TrailingZeros64(aCache)
163 // nothing available in cached bits
164 // grab the next 8 bytes and try again.
166 result := sfreeindex + uintptr(bitIndex)
167 if result >= snelems {
168 s.freeindex = snelems
172 s.allocCache >>= uint(bitIndex + 1)
173 sfreeindex = result + 1
175 if sfreeindex%64 == 0 && sfreeindex != snelems {
176 // We just incremented s.freeindex so it isn't 0.
177 // As each 1 in s.allocCache was encountered and used for allocation
178 // it was shifted away. At this point s.allocCache contains all 0s.
179 // Refill s.allocCache so that it corresponds
180 // to the bits at s.allocBits starting at s.freeindex.
181 whichByte := sfreeindex / 8
182 s.refillAllocCache(whichByte)
184 s.freeindex = sfreeindex
188 // isFree reports whether the index'th object in s is unallocated.
190 // The caller must ensure s.state is mSpanInUse, and there must have
191 // been no preemption points since ensuring this (which could allow a
192 // GC transition, which would allow the state to change).
193 func (s *mspan) isFree(index uintptr) bool {
194 if index < s.freeIndexForScan {
197 bytep, mask := s.allocBits.bitp(index)
198 return *bytep&mask == 0
201 // divideByElemSize returns n/s.elemsize.
202 // n must be within [0, s.npages*_PageSize),
203 // or may be exactly s.npages*_PageSize
204 // if s.elemsize is from sizeclasses.go.
206 // nosplit, because it is called by objIndex, which is nosplit
209 func (s *mspan) divideByElemSize(n uintptr) uintptr {
210 const doubleCheck = false
212 // See explanation in mksizeclasses.go's computeDivMagic.
213 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
215 if doubleCheck && q != n/s.elemsize {
216 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
217 throw("bad magic division")
222 // nosplit, because it is called by other nosplit code like findObject
225 func (s *mspan) objIndex(p uintptr) uintptr {
226 return s.divideByElemSize(p - s.base())
229 func markBitsForAddr(p uintptr) markBits {
231 objIndex := s.objIndex(p)
232 return s.markBitsForIndex(objIndex)
235 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
236 bytep, mask := s.gcmarkBits.bitp(objIndex)
237 return markBits{bytep, mask, objIndex}
240 func (s *mspan) markBitsForBase() markBits {
241 return markBits{&s.gcmarkBits.x, uint8(1), 0}
244 // isMarked reports whether mark bit m is set.
245 func (m markBits) isMarked() bool {
246 return *m.bytep&m.mask != 0
249 // setMarked sets the marked bit in the markbits, atomically.
250 func (m markBits) setMarked() {
251 // Might be racing with other updates, so use atomic update always.
252 // We used to be clever here and use a non-atomic update in certain
253 // cases, but it's not worth the risk.
254 atomic.Or8(m.bytep, m.mask)
257 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
258 func (m markBits) setMarkedNonAtomic() {
262 // clearMarked clears the marked bit in the markbits, atomically.
263 func (m markBits) clearMarked() {
264 // Might be racing with other updates, so use atomic update always.
265 // We used to be clever here and use a non-atomic update in certain
266 // cases, but it's not worth the risk.
267 atomic.And8(m.bytep, ^m.mask)
270 // markBitsForSpan returns the markBits for the span base address base.
271 func markBitsForSpan(base uintptr) (mbits markBits) {
272 mbits = markBitsForAddr(base)
274 throw("markBitsForSpan: unaligned start")
279 // advance advances the markBits to the next object in the span.
280 func (m *markBits) advance() {
282 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
290 // clobberdeadPtr is a special value that is used by the compiler to
291 // clobber dead stack slots, when -clobberdead flag is set.
292 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
294 // badPointer throws bad pointer in heap panic.
295 func badPointer(s *mspan, p, refBase, refOff uintptr) {
296 // Typically this indicates an incorrect use
297 // of unsafe or cgo to store a bad pointer in
298 // the Go heap. It may also indicate a runtime
301 // TODO(austin): We could be more aggressive
302 // and detect pointers to unallocated objects
303 // in allocated spans.
305 print("runtime: pointer ", hex(p))
307 state := s.state.get()
308 if state != mSpanInUse {
309 print(" to unallocated span")
311 print(" to unused region of span")
313 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
317 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
318 gcDumpObject("object", refBase, refOff)
320 getg().m.traceback = 2
321 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
324 // findObject returns the base address for the heap object containing
325 // the address p, the object's span, and the index of the object in s.
326 // If p does not point into a heap object, it returns base == 0.
328 // If p points is an invalid heap pointer and debug.invalidptr != 0,
329 // findObject panics.
331 // refBase and refOff optionally give the base address of the object
332 // in which the pointer p was found and the byte offset at which it
333 // was found. These are used for error reporting.
335 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
336 // Since p is a uintptr, it would not be adjusted if the stack were to move.
339 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
341 // If s is nil, the virtual address has never been part of the heap.
342 // This pointer may be to some mmap'd region, so we allow it.
344 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
345 // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
346 // as they are the only platform where compiler's clobberdead mode is
347 // implemented. On these platforms clobberdeadPtr cannot be a valid address.
348 badPointer(s, p, refBase, refOff)
352 // If p is a bad pointer, it may not be in s's bounds.
354 // Check s.state to synchronize with span initialization
355 // before checking other fields. See also spanOfHeap.
356 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
357 // Pointers into stacks are also ok, the runtime manages these explicitly.
358 if state == mSpanManual {
361 // The following ensures that we are rigorous about what data
362 // structures hold valid pointers.
363 if debug.invalidptr != 0 {
364 badPointer(s, p, refBase, refOff)
369 objIndex = s.objIndex(p)
370 base = s.base() + objIndex*s.elemsize
374 // reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
376 //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
377 func reflect_verifyNotInHeapPtr(p uintptr) bool {
378 // Conversion to a pointer is ok as long as findObject above does not call badPointer.
379 // Since we're already promised that p doesn't point into the heap, just disallow heap
380 // pointers and the special clobbered pointer.
381 return spanOf(p) == nil && p != clobberdeadPtr
384 const ptrBits = 8 * goarch.PtrSize
386 // heapBits provides access to the bitmap bits for a single heap word.
387 // The methods on heapBits take value receivers so that the compiler
388 // can more easily inline calls to those methods and registerize the
389 // struct fields independently.
390 type heapBits struct {
391 // heapBits will report on pointers in the range [addr,addr+size).
392 // The low bit of mask contains the pointerness of the word at addr
393 // (assuming valid>0).
396 // The next few pointer bits representing words starting at addr.
397 // Those bits already returned by next() are zeroed.
399 // Number of bits in mask that are valid. mask is always less than 1<<valid.
403 // heapBitsForAddr returns the heapBits for the address addr.
404 // The caller must ensure [addr,addr+size) is in an allocated span.
405 // In particular, be careful not to point past the end of an object.
407 // nosplit because it is used during write barriers and must not be preempted.
410 func heapBitsForAddr(addr, size uintptr) heapBits {
412 ai := arenaIndex(addr)
413 ha := mheap_.arenas[ai.l1()][ai.l2()]
415 // Word index in arena.
416 word := addr / goarch.PtrSize % heapArenaWords
418 // Word index and bit offset in bitmap array.
419 idx := word / ptrBits
420 off := word % ptrBits
422 // Grab relevant bits of bitmap.
423 mask := ha.bitmap[idx] >> off
424 valid := ptrBits - off
426 // Process depending on where the object ends.
427 nptr := size / goarch.PtrSize
429 // Bits for this object end before the end of this bitmap word.
430 // Squash bits for the following objects.
431 mask &= 1<<(nptr&(ptrBits-1)) - 1
433 } else if nptr == valid {
434 // Bits for this object end at exactly the end of this bitmap word.
437 // Bits for this object extend into the next bitmap word. See if there
438 // may be any pointers recorded there.
439 if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
440 // No more pointers in this object after this bitmap word.
441 // Update size so we know not to look there.
442 size = valid * goarch.PtrSize
446 return heapBits{addr: addr, size: size, mask: mask, valid: valid}
449 // Returns the (absolute) address of the next known pointer and
450 // a heapBits iterator representing any remaining pointers.
451 // If there are no more pointers, returns address 0.
452 // Note that next does not modify h. The caller must record the result.
454 // nosplit because it is used during write barriers and must not be preempted.
457 func (h heapBits) next() (heapBits, uintptr) {
461 if goarch.PtrSize == 8 {
462 i = sys.TrailingZeros64(uint64(h.mask))
464 i = sys.TrailingZeros32(uint32(h.mask))
466 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
467 return h, h.addr + uintptr(i)*goarch.PtrSize
470 // Skip words that we've already processed.
471 h.addr += h.valid * goarch.PtrSize
472 h.size -= h.valid * goarch.PtrSize
474 return h, 0 // no more pointers
477 // Grab more bits and try again.
478 h = heapBitsForAddr(h.addr, h.size)
482 // nextFast is like next, but can return 0 even when there are more pointers
483 // to be found. Callers should call next if nextFast returns 0 as its second
486 // if addr, h = h.nextFast(); addr == 0 {
487 // if addr, h = h.next(); addr == 0 {
488 // ... no more pointers ...
491 // ... process pointer at addr ...
493 // nextFast is designed to be inlineable.
496 func (h heapBits) nextFast() (heapBits, uintptr) {
503 if goarch.PtrSize == 8 {
504 i = sys.TrailingZeros64(uint64(h.mask))
506 i = sys.TrailingZeros32(uint32(h.mask))
509 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
511 return h, h.addr + uintptr(i)*goarch.PtrSize
514 // bulkBarrierPreWrite executes a write barrier
515 // for every pointer slot in the memory range [src, src+size),
516 // using pointer/scalar information from [dst, dst+size).
517 // This executes the write barriers necessary before a memmove.
518 // src, dst, and size must be pointer-aligned.
519 // The range [dst, dst+size) must lie within a single object.
520 // It does not perform the actual writes.
522 // As a special case, src == 0 indicates that this is being used for a
523 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
526 // Callers should call bulkBarrierPreWrite immediately before
527 // calling memmove(dst, src, size). This function is marked nosplit
528 // to avoid being preempted; the GC must not stop the goroutine
529 // between the memmove and the execution of the barriers.
530 // The caller is also responsible for cgo pointer checks if this
531 // may be writing Go pointers into non-Go memory.
533 // The pointer bitmap is not maintained for allocations containing
534 // no pointers at all; any caller of bulkBarrierPreWrite must first
535 // make sure the underlying allocation contains pointers, usually
536 // by checking typ.PtrBytes.
538 // Callers must perform cgo checks if goexperiment.CgoCheck2.
541 func bulkBarrierPreWrite(dst, src, size uintptr) {
542 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
543 throw("bulkBarrierPreWrite: unaligned arguments")
545 if !writeBarrier.needed {
548 if s := spanOf(dst); s == nil {
549 // If dst is a global, use the data or BSS bitmaps to
550 // execute write barriers.
551 for _, datap := range activeModules() {
552 if datap.data <= dst && dst < datap.edata {
553 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
557 for _, datap := range activeModules() {
558 if datap.bss <= dst && dst < datap.ebss {
559 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
564 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
565 // dst was heap memory at some point, but isn't now.
566 // It can't be a global. It must be either our stack,
567 // or in the case of direct channel sends, it could be
568 // another stack. Either way, no need for barriers.
569 // This will also catch if dst is in a freed span,
570 // though that should never have.
574 buf := &getg().m.p.ptr().wbBuf
575 h := heapBitsForAddr(dst, size)
579 if h, addr = h.next(); addr == 0 {
582 dstx := (*uintptr)(unsafe.Pointer(addr))
589 if h, addr = h.next(); addr == 0 {
592 dstx := (*uintptr)(unsafe.Pointer(addr))
593 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
601 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
602 // does not execute write barriers for [dst, dst+size).
604 // In addition to the requirements of bulkBarrierPreWrite
605 // callers need to ensure [dst, dst+size) is zeroed.
607 // This is used for special cases where e.g. dst was just
608 // created and zeroed with malloc.
611 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
612 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
613 throw("bulkBarrierPreWrite: unaligned arguments")
615 if !writeBarrier.needed {
618 buf := &getg().m.p.ptr().wbBuf
619 h := heapBitsForAddr(dst, size)
622 if h, addr = h.next(); addr == 0 {
625 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
631 // bulkBarrierBitmap executes write barriers for copying from [src,
632 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
633 // assumed to start maskOffset bytes into the data covered by the
634 // bitmap in bits (which may not be a multiple of 8).
636 // This is used by bulkBarrierPreWrite for writes to data and BSS.
639 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
640 word := maskOffset / goarch.PtrSize
641 bits = addb(bits, word/8)
642 mask := uint8(1) << (word % 8)
644 buf := &getg().m.p.ptr().wbBuf
645 for i := uintptr(0); i < size; i += goarch.PtrSize {
650 i += 7 * goarch.PtrSize
656 dstx := (*uintptr)(unsafe.Pointer(dst + i))
661 srcx := (*uintptr)(unsafe.Pointer(src + i))
671 // typeBitsBulkBarrier executes a write barrier for every
672 // pointer that would be copied from [src, src+size) to [dst,
673 // dst+size) by a memmove using the type bitmap to locate those
676 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
677 // dst, src, and size must be pointer-aligned.
678 // The type typ must have a plain bitmap, not a GC program.
679 // The only use of this function is in channel sends, and the
680 // 64 kB channel element limit takes care of this for us.
682 // Must not be preempted because it typically runs right before memmove,
683 // and the GC must observe them as an atomic action.
685 // Callers must perform cgo checks if goexperiment.CgoCheck2.
688 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
690 throw("runtime: typeBitsBulkBarrier without type")
692 if typ.Size_ != size {
693 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
694 throw("runtime: invalid typeBitsBulkBarrier")
696 if typ.Kind_&kindGCProg != 0 {
697 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
698 throw("runtime: invalid typeBitsBulkBarrier")
700 if !writeBarrier.needed {
703 ptrmask := typ.GCData
704 buf := &getg().m.p.ptr().wbBuf
706 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
707 if i&(goarch.PtrSize*8-1) == 0 {
708 bits = uint32(*ptrmask)
709 ptrmask = addb(ptrmask, 1)
714 dstx := (*uintptr)(unsafe.Pointer(dst + i))
715 srcx := (*uintptr)(unsafe.Pointer(src + i))
723 // initHeapBits initializes the heap bitmap for a span.
724 // If this is a span of single pointer allocations, it initializes all
725 // words to pointer. If force is true, clears all bits.
726 func (s *mspan) initHeapBits(forceClear bool) {
727 if forceClear || s.spanclass.noscan() {
728 // Set all the pointer bits to zero. We do this once
729 // when the span is allocated so we don't have to do it
730 // for each object allocation.
732 size := s.npages * pageSize
733 h := writeHeapBitsForAddr(base)
737 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
739 return // nothing to do
741 h := writeHeapBitsForAddr(s.base())
742 size := s.npages * pageSize
743 nptrs := size / goarch.PtrSize
744 for i := uintptr(0); i < nptrs; i += ptrBits {
745 h = h.write(^uintptr(0), ptrBits)
747 h.flush(s.base(), size)
750 // countAlloc returns the number of objects allocated in span s by
751 // scanning the allocation bitmap.
752 func (s *mspan) countAlloc() int {
754 bytes := divRoundUp(s.nelems, 8)
755 // Iterate over each 8-byte chunk and count allocations
756 // with an intrinsic. Note that newMarkBits guarantees that
757 // gcmarkBits will be 8-byte aligned, so we don't have to
758 // worry about edge cases, irrelevant bits will simply be zero.
759 for i := uintptr(0); i < bytes; i += 8 {
760 // Extract 64 bits from the byte pointer and get a OnesCount.
761 // Note that the unsafe cast here doesn't preserve endianness,
762 // but that's OK. We only care about how many bits are 1, not
763 // about the order we discover them in.
764 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
765 count += sys.OnesCount64(mrkBits)
770 type writeHeapBits struct {
771 addr uintptr // address that the low bit of mask represents the pointer state of.
772 mask uintptr // some pointer bits starting at the address addr.
773 valid uintptr // number of bits in buf that are valid (including low)
774 low uintptr // number of low-order bits to not overwrite
777 func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
778 // We start writing bits maybe in the middle of a heap bitmap word.
779 // Remember how many bits into the word we started, so we can be sure
780 // not to overwrite the previous bits.
781 h.low = addr / goarch.PtrSize % ptrBits
783 // round down to heap word that starts the bitmap word.
784 h.addr = addr - h.low*goarch.PtrSize
786 // We don't have any bits yet.
793 // write appends the pointerness of the next valid pointer slots
794 // using the low valid bits of bits. 1=pointer, 0=scalar.
795 func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
796 if h.valid+valid <= ptrBits {
797 // Fast path - just accumulate the bits.
798 h.mask |= bits << h.valid
802 // Too many bits to fit in this word. Write the current word
803 // out and move on to the next word.
805 data := h.mask | bits<<h.valid // mask for this word
806 h.mask = bits >> (ptrBits - h.valid) // leftover for next word
807 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
809 // Flush mask to the memory bitmap.
810 // TODO: figure out how to cache arena lookup.
811 ai := arenaIndex(h.addr)
812 ha := mheap_.arenas[ai.l1()][ai.l2()]
813 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
814 m := uintptr(1)<<h.low - 1
815 ha.bitmap[idx] = ha.bitmap[idx]&m | data
816 // Note: no synchronization required for this write because
817 // the allocator has exclusive access to the page, and the bitmap
818 // entries are all for a single page. Also, visibility of these
819 // writes is guaranteed by the publication barrier in mallocgc.
821 // Clear noMorePtrs bit, since we're going to be writing bits
822 // into the following word.
823 ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
824 // Note: same as above
826 // Move to next word of bitmap.
827 h.addr += ptrBits * goarch.PtrSize
832 // Add padding of size bytes.
833 func (h writeHeapBits) pad(size uintptr) writeHeapBits {
837 words := size / goarch.PtrSize
838 for words > ptrBits {
839 h = h.write(0, ptrBits)
842 return h.write(0, words)
845 // Flush the bits that have been written, and add zeros as needed
846 // to cover the full object [addr, addr+size).
847 func (h writeHeapBits) flush(addr, size uintptr) {
848 // zeros counts the number of bits needed to represent the object minus the
849 // number of bits we've already written. This is the number of 0 bits
850 // that need to be added.
851 zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
853 // Add zero bits up to the bitmap word boundary
855 z := ptrBits - h.valid
863 // Find word in bitmap that we're going to write.
864 ai := arenaIndex(h.addr)
865 ha := mheap_.arenas[ai.l1()][ai.l2()]
866 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
868 // Write remaining bits.
869 if h.valid != h.low {
870 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
871 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
872 ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
878 // Record in the noMorePtrs map that there won't be any more 1 bits,
879 // so readers can stop early.
880 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
882 // Advance to next bitmap word.
883 h.addr += ptrBits * goarch.PtrSize
885 // Continue on writing zeros for the rest of the object.
886 // For standard use of the ptr bits this is not required, as
887 // the bits are read from the beginning of the object. Some uses,
888 // like noscan spans, oblets, bulk write barriers, and cgocheck, might
889 // start mid-object, so these writes are still required.
892 ai := arenaIndex(h.addr)
893 ha := mheap_.arenas[ai.l1()][ai.l2()]
894 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
896 ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
898 } else if zeros == ptrBits {
905 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
906 h.addr += ptrBits * goarch.PtrSize
910 // Read the bytes starting at the aligned pointer p into a uintptr.
911 // Read is little-endian.
912 func readUintptr(p *byte) uintptr {
913 x := *(*uintptr)(unsafe.Pointer(p))
914 if goarch.BigEndian {
915 if goarch.PtrSize == 8 {
916 return uintptr(sys.Bswap64(uint64(x)))
918 return uintptr(sys.Bswap32(uint32(x)))
923 // heapBitsSetType records that the new allocation [x, x+size)
924 // holds in [x, x+dataSize) one or more values of type typ.
925 // (The number of values is given by dataSize / typ.Size.)
926 // If dataSize < size, the fragment [x+dataSize, x+size) is
927 // recorded as non-pointer data.
928 // It is known that the type has pointers somewhere;
929 // malloc does not call heapBitsSetType when there are no pointers,
930 // because all free objects are marked as noscan during
931 // heapBitsSweepSpan.
933 // There can only be one allocation from a given span active at a time,
934 // and the bitmap for a span always falls on word boundaries,
935 // so there are no write-write races for access to the heap bitmap.
936 // Hence, heapBitsSetType can access the bitmap without atomics.
938 // There can be read-write races between heapBitsSetType and things
939 // that read the heap bitmap like scanobject. However, since
940 // heapBitsSetType is only used for objects that have not yet been
941 // made reachable, readers will ignore bits being modified by this
942 // function. This does mean this function cannot transiently modify
943 // bits that belong to neighboring objects. Also, on weakly-ordered
944 // machines, callers must execute a store/store (publication) barrier
945 // between calling this function and making the object reachable.
946 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
947 const doubleCheck = false // slow but helpful; enable to test modifications to this code
949 if doubleCheck && dataSize%typ.Size_ != 0 {
950 throw("heapBitsSetType: dataSize not a multiple of typ.Size")
953 if goarch.PtrSize == 8 && size == goarch.PtrSize {
954 // It's one word and it has pointers, it must be a pointer.
955 // Since all allocated one-word objects are pointers
956 // (non-pointers are aggregated into tinySize allocations),
957 // (*mspan).initHeapBits sets the pointer bits for us.
958 // Nothing to do here.
960 h, addr := heapBitsForAddr(x, size).next()
962 throw("heapBitsSetType: pointer bit missing")
966 throw("heapBitsSetType: second pointer bit found")
972 h := writeHeapBitsForAddr(x)
974 // Handle GC program.
975 if typ.Kind_&kindGCProg != 0 {
976 // Expand the gc program into the storage we're going to use for the actual object.
977 obj := (*uint8)(unsafe.Pointer(x))
978 n := runGCProg(addb(typ.GCData, 4), obj)
979 // Use the expanded program to set the heap bits.
980 for i := uintptr(0); true; i += typ.Size_ {
981 // Copy expanded program to heap bitmap.
985 h = h.write(uintptr(*p), 8)
989 h = h.write(uintptr(*p), j)
991 if i+typ.Size_ == dataSize {
992 break // no padding after last element
995 // Pad with zeros to the start of the next element.
996 h = h.pad(typ.Size_ - n*goarch.PtrSize)
1001 // Erase the expanded GC program.
1002 memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
1006 // Note about sizes:
1008 // typ.Size is the number of words in the object,
1009 // and typ.PtrBytes is the number of words in the prefix
1010 // of the object that contains pointers. That is, the final
1011 // typ.Size - typ.PtrBytes words contain no pointers.
1012 // This allows optimization of a common pattern where
1013 // an object has a small header followed by a large scalar
1014 // buffer. If we know the pointers are over, we don't have
1015 // to scan the buffer's heap bitmap at all.
1016 // The 1-bit ptrmasks are sized to contain only bits for
1017 // the typ.PtrBytes prefix, zero padded out to a full byte
1018 // of bitmap. If there is more room in the allocated object,
1019 // that space is pointerless. The noMorePtrs bitmap will prevent
1020 // scanning large pointerless tails of an object.
1022 // Replicated copies are not as nice: if there is an array of
1023 // objects with scalar tails, all but the last tail does have to
1024 // be initialized, because there is no way to say "skip forward".
1026 ptrs := typ.PtrBytes / goarch.PtrSize
1027 if typ.Size_ == dataSize { // Single element
1028 if ptrs <= ptrBits { // Single small element
1029 m := readUintptr(typ.GCData)
1030 h = h.write(m, ptrs)
1031 } else { // Single large element
1034 h = h.write(readUintptr(p), ptrBits)
1035 p = addb(p, ptrBits/8)
1037 if ptrs <= ptrBits {
1042 h = h.write(m, ptrs)
1044 } else { // Repeated element
1045 words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
1046 if words <= ptrBits { // Repeated small element
1047 n := dataSize / typ.Size_
1048 m := readUintptr(typ.GCData)
1049 // Make larger unit to repeat
1050 for words <= ptrBits/2 {
1052 h = h.write(m, words)
1063 h = h.write(m, words)
1066 h = h.write(m, ptrs)
1067 } else { // Repeated large element
1068 for i := uintptr(0); true; i += typ.Size_ {
1072 h = h.write(readUintptr(p), ptrBits)
1073 p = addb(p, ptrBits/8)
1078 if i+typ.Size_ == dataSize {
1079 break // don't need the trailing nonptr bits on the last element.
1081 // Pad with zeros to the start of the next element.
1082 h = h.pad(typ.Size_ - typ.PtrBytes)
1089 h := heapBitsForAddr(x, size)
1090 for i := uintptr(0); i < size; i += goarch.PtrSize {
1091 // Compute the pointer bit we want at offset i.
1094 off := i % typ.Size_
1095 if off < typ.PtrBytes {
1096 j := off / goarch.PtrSize
1097 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
1104 throw("heapBitsSetType: pointer entry not correct")
1108 if _, addr := h.next(); addr != 0 {
1109 throw("heapBitsSetType: extra pointer")
1114 var debugPtrmask struct {
1119 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1120 // size the size of the region described by prog, in bytes.
1121 // The resulting bitvector will have no more than size/goarch.PtrSize bits.
1122 func progToPointerMask(prog *byte, size uintptr) bitvector {
1123 n := (size/goarch.PtrSize + 7) / 8
1124 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1125 x[len(x)-1] = 0xa1 // overflow check sentinel
1126 n = runGCProg(prog, &x[0])
1127 if x[len(x)-1] != 0xa1 {
1128 throw("progToPointerMask: overflow")
1130 return bitvector{int32(n), &x[0]}
1133 // Packed GC pointer bitmaps, aka GC programs.
1135 // For large types containing arrays, the type information has a
1136 // natural repetition that can be encoded to save space in the
1137 // binary and in the memory representation of the type information.
1139 // The encoding is a simple Lempel-Ziv style bytecode machine
1140 // with the following instructions:
1143 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1144 // 10000000 n c: repeat the previous n bits c times; n, c are varints
1145 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1147 // runGCProg returns the number of 1-bit entries written to memory.
1148 func runGCProg(prog, dst *byte) uintptr {
1151 // Bits waiting to be written to memory.
1158 // Flush accumulated full bytes.
1159 // The rest of the loop assumes that nbits <= 7.
1160 for ; nbits >= 8; nbits -= 8 {
1166 // Process one instruction.
1171 // Literal bits; n == 0 means end of program.
1177 for i := uintptr(0); i < nbyte; i++ {
1178 bits |= uintptr(*p) << nbits
1185 bits |= uintptr(*p) << nbits
1192 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1194 for off := uint(0); ; off += 7 {
1197 n |= (x & 0x7F) << off
1204 // Count is encoded in a varint in the next bytes.
1206 for off := uint(0); ; off += 7 {
1209 c |= (x & 0x7F) << off
1214 c *= n // now total number of bits to copy
1216 // If the number of bits being repeated is small, load them
1217 // into a register and use that register for the entire loop
1218 // instead of repeatedly reading from memory.
1219 // Handling fewer than 8 bits here makes the general loop simpler.
1220 // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
1221 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1222 // it will not overflow.
1224 const maxBits = goarch.PtrSize*8 - 7
1226 // Start with bits in output buffer.
1230 // If we need more bits, fetch them from memory.
1231 src = subtract1(src)
1234 pattern |= uintptr(*src)
1235 src = subtract1(src)
1239 // We started with the whole bit output buffer,
1240 // and then we loaded bits from whole bytes.
1241 // Either way, we might now have too many instead of too few.
1242 // Discard the extra.
1244 pattern >>= npattern - n
1248 // Replicate pattern to at most maxBits.
1250 // One bit being repeated.
1251 // If the bit is 1, make the pattern all 1s.
1252 // If the bit is 0, the pattern is already all 0s,
1253 // but we can claim that the number of bits
1254 // in the word is equal to the number we need (c),
1255 // because right shift of bits will zero fill.
1257 pattern = 1<<maxBits - 1
1265 if nb+nb <= maxBits {
1266 // Double pattern until the whole uintptr is filled.
1267 for nb <= goarch.PtrSize*8 {
1271 // Trim away incomplete copy of original pattern in high bits.
1272 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1273 nb = maxBits / npattern * npattern
1280 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1281 // Since pattern contains >8 bits, there will be full bytes to flush
1282 // on each iteration.
1283 for ; c >= npattern; c -= npattern {
1284 bits |= pattern << nbits
1294 // Add final fragment to bit buffer.
1297 bits |= pattern << nbits
1303 // Repeat; n too large to fit in a register.
1304 // Since nbits <= 7, we know the first few bytes of repeated data
1305 // are already written to memory.
1306 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1307 // Leading src fragment.
1308 src = subtractb(src, (off+7)/8)
1309 if frag := off & 7; frag != 0 {
1310 bits |= uintptr(*src) >> (8 - frag) << nbits
1315 // Main loop: load one byte, write another.
1316 // The bits are rotating through the bit buffer.
1317 for i := c / 8; i > 0; i-- {
1318 bits |= uintptr(*src) << nbits
1324 // Final src fragment.
1326 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1331 // Write any final bits out, using full-byte writes, even for the final byte.
1332 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1334 for ; nbits > 0; nbits -= 8 {
1342 // materializeGCProg allocates space for the (1-bit) pointer bitmask
1343 // for an object of size ptrdata. Then it fills that space with the
1344 // pointer bitmask specified by the program prog.
1345 // The bitmask starts at s.startAddr.
1346 // The result must be deallocated with dematerializeGCProg.
1347 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1348 // Each word of ptrdata needs one bit in the bitmap.
1349 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1350 // Compute the number of pages needed for bitmapBytes.
1351 pages := divRoundUp(bitmapBytes, pageSize)
1352 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1353 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
1356 func dematerializeGCProg(s *mspan) {
1357 mheap_.freeManual(s, spanAllocPtrScalarBits)
1360 func dumpGCProg(p *byte) {
1366 print("\t", nptr, " end\n")
1370 print("\t", nptr, " lit ", x, ":")
1372 for i := 0; i < n; i++ {
1379 nbit := int(x &^ 0x80)
1381 for nb := uint(0); ; nb += 7 {
1384 nbit |= int(x&0x7f) << nb
1391 for nb := uint(0); ; nb += 7 {
1394 count |= int(x&0x7f) << nb
1399 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1400 nptr += nbit * count
1407 // reflect_gcbits returns the GC type info for x, for testing.
1408 // The result is the bitmap entries (0 or 1), one entry per byte.
1410 //go:linkname reflect_gcbits reflect.gcbits
1411 func reflect_gcbits(x any) []byte {
1415 // Returns GC type info for the pointer stored in ep for testing.
1416 // If ep points to the stack, only static live information will be returned
1417 // (i.e. not for objects which are only dynamically live stack objects).
1418 func getgcmask(ep any) (mask []byte) {
1423 for _, datap := range activeModules() {
1425 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1426 bitmap := datap.gcdatamask.bytedata
1427 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1428 mask = make([]byte, n/goarch.PtrSize)
1429 for i := uintptr(0); i < n; i += goarch.PtrSize {
1430 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1431 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1437 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1438 bitmap := datap.gcbssmask.bytedata
1439 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1440 mask = make([]byte, n/goarch.PtrSize)
1441 for i := uintptr(0); i < n; i += goarch.PtrSize {
1442 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1443 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1450 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1451 if s.spanclass.noscan() {
1455 hbits := heapBitsForAddr(base, n)
1456 mask = make([]byte, n/goarch.PtrSize)
1459 if hbits, addr = hbits.next(); addr == 0 {
1462 mask[(addr-base)/goarch.PtrSize] = 1
1464 // Callers expect this mask to end at the last pointer.
1465 for len(mask) > 0 && mask[len(mask)-1] == 0 {
1466 mask = mask[:len(mask)-1]
1472 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1475 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1476 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1482 locals, _, _ := u.frame.getStackMap(false)
1486 size := uintptr(locals.n) * goarch.PtrSize
1487 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1488 mask = make([]byte, n/goarch.PtrSize)
1489 for i := uintptr(0); i < n; i += goarch.PtrSize {
1490 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1491 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1497 // otherwise, not something the GC knows about.
1498 // possibly read-only data, like malloc(0).
1499 // must not have pointers