1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: type and heap bitmaps.
7 // Stack, data, and bss bitmaps
9 // Stack frames and global variables in the data and bss sections are
10 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
11 // means the word is a live pointer to be visited by the GC (referred to
12 // as "pointer"). A "0" bit means the word should be ignored by GC
13 // (referred to as "scalar", though it could be a dead pointer value).
17 // The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
18 // stored in the heapArena metadata backing each heap arena.
19 // That is, if ha is the heapArena for the arena starting a start,
20 // then ha.bitmap[0] holds the 2-bit entries for the four words start
21 // through start+3*ptrSize, ha.bitmap[1] holds the entries for
22 // start+4*ptrSize through start+7*ptrSize, and so on.
24 // In each 2-bit entry, the lower bit is a pointer/scalar bit, just
25 // like in the stack/data bitmaps described above. The upper bit
26 // indicates scan/dead: a "1" value ("scan") indicates that there may
27 // be pointers in later words of the allocation, and a "0" value
28 // ("dead") indicates there are no more pointers in the allocation. If
29 // the upper bit is 0, the lower bit must also be 0, and this
30 // indicates scanning can ignore the rest of the allocation.
32 // The 2-bit entries are split when written into the byte, so that the top half
33 // of the byte contains 4 high (scan) bits and the bottom half contains 4 low
34 // (pointer) bits. This form allows a copy from the 1-bit to the 4-bit form to
35 // keep the pointer bits contiguous, instead of having to space them out.
37 // The code makes use of the fact that the zero value for a heap
38 // bitmap means scalar/dead. This property must be preserved when
39 // modifying the encoding.
41 // The bitmap for noscan spans is not maintained. Code must ensure
42 // that an object is scannable before consulting its bitmap by
43 // checking either the noscan bit in the span or by consulting its
44 // type's information.
49 "runtime/internal/atomic"
50 "runtime/internal/sys"
58 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries
59 wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
61 // all scan/pointer bits in a byte
62 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
63 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
66 // addb returns the byte pointer p+n.
69 func addb(p *byte, n uintptr) *byte {
70 // Note: wrote out full expression instead of calling add(p, n)
71 // to reduce the number of temporaries generated by the
72 // compiler for this trivial expression during inlining.
73 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
76 // subtractb returns the byte pointer p-n.
79 func subtractb(p *byte, n uintptr) *byte {
80 // Note: wrote out full expression instead of calling add(p, -n)
81 // to reduce the number of temporaries generated by the
82 // compiler for this trivial expression during inlining.
83 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
86 // add1 returns the byte pointer p+1.
89 func add1(p *byte) *byte {
90 // Note: wrote out full expression instead of calling addb(p, 1)
91 // to reduce the number of temporaries generated by the
92 // compiler for this trivial expression during inlining.
93 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
96 // subtract1 returns the byte pointer p-1.
99 // nosplit because it is used during write barriers and must not be preempted.
101 func subtract1(p *byte) *byte {
102 // Note: wrote out full expression instead of calling subtractb(p, 1)
103 // to reduce the number of temporaries generated by the
104 // compiler for this trivial expression during inlining.
105 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
108 // heapBits provides access to the bitmap bits for a single heap word.
109 // The methods on heapBits take value receivers so that the compiler
110 // can more easily inline calls to those methods and registerize the
111 // struct fields independently.
112 type heapBits struct {
115 arena uint32 // Index of heap arena containing bitp
116 last *uint8 // Last byte arena's bitmap
119 // Make the compiler check that heapBits.arena is large enough to hold
120 // the maximum arena frame number.
121 var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
123 // markBits provides access to the mark bit for an object in the heap.
124 // bytep points to the byte holding the mark bit.
125 // mask is a byte with a single bit set that can be &ed with *bytep
126 // to see if the bit has been set.
127 // *m.byte&m.mask != 0 indicates the mark bit is set.
128 // index can be used along with span information to generate
129 // the address of the object in the heap.
130 // We maintain one set of mark bits for allocation and one for
132 type markBits struct {
139 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
140 bytep, mask := s.allocBits.bitp(allocBitIndex)
141 return markBits{bytep, mask, allocBitIndex}
144 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
145 // and negates them so that ctz (count trailing zeros) instructions
146 // can be used. It then places these 8 bytes into the cached 64 bit
148 func (s *mspan) refillAllocCache(whichByte uintptr) {
149 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
151 aCache |= uint64(bytes[0])
152 aCache |= uint64(bytes[1]) << (1 * 8)
153 aCache |= uint64(bytes[2]) << (2 * 8)
154 aCache |= uint64(bytes[3]) << (3 * 8)
155 aCache |= uint64(bytes[4]) << (4 * 8)
156 aCache |= uint64(bytes[5]) << (5 * 8)
157 aCache |= uint64(bytes[6]) << (6 * 8)
158 aCache |= uint64(bytes[7]) << (7 * 8)
159 s.allocCache = ^aCache
162 // nextFreeIndex returns the index of the next free object in s at
163 // or after s.freeindex.
164 // There are hardware instructions that can be used to make this
165 // faster if profiling warrants it.
166 func (s *mspan) nextFreeIndex() uintptr {
167 sfreeindex := s.freeindex
169 if sfreeindex == snelems {
172 if sfreeindex > snelems {
173 throw("s.freeindex > s.nelems")
176 aCache := s.allocCache
178 bitIndex := sys.Ctz64(aCache)
180 // Move index to start of next cached bits.
181 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
182 if sfreeindex >= snelems {
183 s.freeindex = snelems
186 whichByte := sfreeindex / 8
187 // Refill s.allocCache with the next 64 alloc bits.
188 s.refillAllocCache(whichByte)
189 aCache = s.allocCache
190 bitIndex = sys.Ctz64(aCache)
191 // nothing available in cached bits
192 // grab the next 8 bytes and try again.
194 result := sfreeindex + uintptr(bitIndex)
195 if result >= snelems {
196 s.freeindex = snelems
200 s.allocCache >>= uint(bitIndex + 1)
201 sfreeindex = result + 1
203 if sfreeindex%64 == 0 && sfreeindex != snelems {
204 // We just incremented s.freeindex so it isn't 0.
205 // As each 1 in s.allocCache was encountered and used for allocation
206 // it was shifted away. At this point s.allocCache contains all 0s.
207 // Refill s.allocCache so that it corresponds
208 // to the bits at s.allocBits starting at s.freeindex.
209 whichByte := sfreeindex / 8
210 s.refillAllocCache(whichByte)
212 s.freeindex = sfreeindex
216 // isFree reports whether the index'th object in s is unallocated.
218 // The caller must ensure s.state is mSpanInUse, and there must have
219 // been no preemption points since ensuring this (which could allow a
220 // GC transition, which would allow the state to change).
221 func (s *mspan) isFree(index uintptr) bool {
222 if index < s.freeindex {
225 bytep, mask := s.allocBits.bitp(index)
226 return *bytep&mask == 0
229 // divideByElemSize returns n/s.elemsize.
230 // n must be within [0, s.npages*_PageSize),
231 // or may be exactly s.npages*_PageSize
232 // if s.elemsize is from sizeclasses.go.
233 func (s *mspan) divideByElemSize(n uintptr) uintptr {
234 const doubleCheck = false
236 // See explanation in mksizeclasses.go's computeDivMagic.
237 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
239 if doubleCheck && q != n/s.elemsize {
240 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
241 throw("bad magic division")
246 func (s *mspan) objIndex(p uintptr) uintptr {
247 return s.divideByElemSize(p - s.base())
250 func markBitsForAddr(p uintptr) markBits {
252 objIndex := s.objIndex(p)
253 return s.markBitsForIndex(objIndex)
256 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
257 bytep, mask := s.gcmarkBits.bitp(objIndex)
258 return markBits{bytep, mask, objIndex}
261 func (s *mspan) markBitsForBase() markBits {
262 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
265 // isMarked reports whether mark bit m is set.
266 func (m markBits) isMarked() bool {
267 return *m.bytep&m.mask != 0
270 // setMarked sets the marked bit in the markbits, atomically.
271 func (m markBits) setMarked() {
272 // Might be racing with other updates, so use atomic update always.
273 // We used to be clever here and use a non-atomic update in certain
274 // cases, but it's not worth the risk.
275 atomic.Or8(m.bytep, m.mask)
278 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
279 func (m markBits) setMarkedNonAtomic() {
283 // clearMarked clears the marked bit in the markbits, atomically.
284 func (m markBits) clearMarked() {
285 // Might be racing with other updates, so use atomic update always.
286 // We used to be clever here and use a non-atomic update in certain
287 // cases, but it's not worth the risk.
288 atomic.And8(m.bytep, ^m.mask)
291 // markBitsForSpan returns the markBits for the span base address base.
292 func markBitsForSpan(base uintptr) (mbits markBits) {
293 mbits = markBitsForAddr(base)
295 throw("markBitsForSpan: unaligned start")
300 // advance advances the markBits to the next object in the span.
301 func (m *markBits) advance() {
303 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
311 // heapBitsForAddr returns the heapBits for the address addr.
312 // The caller must ensure addr is in an allocated span.
313 // In particular, be careful not to point past the end of an object.
315 // nosplit because it is used during write barriers and must not be preempted.
317 func heapBitsForAddr(addr uintptr) (h heapBits) {
318 // 2 bits per word, 4 pairs per byte, and a mask is hard coded.
319 arena := arenaIndex(addr)
320 ha := mheap_.arenas[arena.l1()][arena.l2()]
321 // The compiler uses a load for nil checking ha, but in this
322 // case we'll almost never hit that cache line again, so it
323 // makes more sense to do a value check.
325 // addr is not in the heap. Return nil heapBits, which
326 // we expect to crash in the caller.
329 h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
330 h.shift = uint32((addr / sys.PtrSize) & 3)
331 h.arena = uint32(arena)
332 h.last = &ha.bitmap[len(ha.bitmap)-1]
336 // badPointer throws bad pointer in heap panic.
337 func badPointer(s *mspan, p, refBase, refOff uintptr) {
338 // Typically this indicates an incorrect use
339 // of unsafe or cgo to store a bad pointer in
340 // the Go heap. It may also indicate a runtime
343 // TODO(austin): We could be more aggressive
344 // and detect pointers to unallocated objects
345 // in allocated spans.
347 print("runtime: pointer ", hex(p))
348 state := s.state.get()
349 if state != mSpanInUse {
350 print(" to unallocated span")
352 print(" to unused region of span")
354 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state, "\n")
356 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
357 gcDumpObject("object", refBase, refOff)
359 getg().m.traceback = 2
360 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
363 // findObject returns the base address for the heap object containing
364 // the address p, the object's span, and the index of the object in s.
365 // If p does not point into a heap object, it returns base == 0.
367 // If p points is an invalid heap pointer and debug.invalidptr != 0,
368 // findObject panics.
370 // refBase and refOff optionally give the base address of the object
371 // in which the pointer p was found and the byte offset at which it
372 // was found. These are used for error reporting.
374 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
375 // Since p is a uintptr, it would not be adjusted if the stack were to move.
377 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
379 // If s is nil, the virtual address has never been part of the heap.
380 // This pointer may be to some mmap'd region, so we allow it.
384 // If p is a bad pointer, it may not be in s's bounds.
386 // Check s.state to synchronize with span initialization
387 // before checking other fields. See also spanOfHeap.
388 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
389 // Pointers into stacks are also ok, the runtime manages these explicitly.
390 if state == mSpanManual {
393 // The following ensures that we are rigorous about what data
394 // structures hold valid pointers.
395 if debug.invalidptr != 0 {
396 badPointer(s, p, refBase, refOff)
401 objIndex = s.objIndex(p)
402 base = s.base() + objIndex*s.elemsize
406 // next returns the heapBits describing the next pointer-sized word in memory.
407 // That is, if h describes address p, h.next() describes p+ptrSize.
408 // Note that next does not modify h. The caller must record the result.
410 // nosplit because it is used during write barriers and must not be preempted.
412 func (h heapBits) next() heapBits {
413 if h.shift < 3*heapBitsShift {
414 h.shift += heapBitsShift
415 } else if h.bitp != h.last {
416 h.bitp, h.shift = add1(h.bitp), 0
418 // Move to the next arena.
424 // nextArena advances h to the beginning of the next heap arena.
426 // This is a slow-path helper to next. gc's inliner knows that
427 // heapBits.next can be inlined even though it calls this. This is
428 // marked noinline so it doesn't get inlined into next and cause next
429 // to be too big to inline.
433 func (h heapBits) nextArena() heapBits {
435 ai := arenaIdx(h.arena)
436 l2 := mheap_.arenas[ai.l1()]
438 // We just passed the end of the object, which
439 // was also the end of the heap. Poison h. It
440 // should never be dereferenced at this point.
447 h.bitp, h.shift = &ha.bitmap[0], 0
448 h.last = &ha.bitmap[len(ha.bitmap)-1]
452 // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
453 // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
454 // h.forward(1) is equivalent to h.next(), just slower.
455 // Note that forward does not modify h. The caller must record the result.
456 // bits returns the heap bits for the current word.
458 func (h heapBits) forward(n uintptr) heapBits {
459 n += uintptr(h.shift) / heapBitsShift
460 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
461 h.shift = uint32(n%4) * heapBitsShift
462 if nbitp <= uintptr(unsafe.Pointer(h.last)) {
463 h.bitp = (*uint8)(unsafe.Pointer(nbitp))
467 // We're in a new heap arena.
468 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
469 h.arena += 1 + uint32(past/heapArenaBitmapBytes)
470 ai := arenaIdx(h.arena)
471 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
473 h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
474 h.last = &a.bitmap[len(a.bitmap)-1]
476 h.bitp, h.last = nil, nil
481 // forwardOrBoundary is like forward, but stops at boundaries between
482 // contiguous sections of the bitmap. It returns the number of words
483 // advanced over, which will be <= n.
484 func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
485 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
489 return h.forward(n), n
492 // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
493 // The result includes in its higher bits the bits for subsequent words
494 // described by the same bitmap byte.
496 // nosplit because it is used during write barriers and must not be preempted.
498 func (h heapBits) bits() uint32 {
499 // The (shift & 31) eliminates a test and conditional branch
500 // from the generated code.
501 return uint32(*h.bitp) >> (h.shift & 31)
504 // morePointers reports whether this word and all remaining words in this object
506 // h must not describe the second word of the object.
507 func (h heapBits) morePointers() bool {
508 return h.bits()&bitScan != 0
511 // isPointer reports whether the heap bits describe a pointer word.
513 // nosplit because it is used during write barriers and must not be preempted.
515 func (h heapBits) isPointer() bool {
516 return h.bits()&bitPointer != 0
519 // bulkBarrierPreWrite executes a write barrier
520 // for every pointer slot in the memory range [src, src+size),
521 // using pointer/scalar information from [dst, dst+size).
522 // This executes the write barriers necessary before a memmove.
523 // src, dst, and size must be pointer-aligned.
524 // The range [dst, dst+size) must lie within a single object.
525 // It does not perform the actual writes.
527 // As a special case, src == 0 indicates that this is being used for a
528 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
531 // Callers should call bulkBarrierPreWrite immediately before
532 // calling memmove(dst, src, size). This function is marked nosplit
533 // to avoid being preempted; the GC must not stop the goroutine
534 // between the memmove and the execution of the barriers.
535 // The caller is also responsible for cgo pointer checks if this
536 // may be writing Go pointers into non-Go memory.
538 // The pointer bitmap is not maintained for allocations containing
539 // no pointers at all; any caller of bulkBarrierPreWrite must first
540 // make sure the underlying allocation contains pointers, usually
541 // by checking typ.ptrdata.
543 // Callers must perform cgo checks if writeBarrier.cgo.
546 func bulkBarrierPreWrite(dst, src, size uintptr) {
547 if (dst|src|size)&(sys.PtrSize-1) != 0 {
548 throw("bulkBarrierPreWrite: unaligned arguments")
550 if !writeBarrier.needed {
553 if s := spanOf(dst); s == nil {
554 // If dst is a global, use the data or BSS bitmaps to
555 // execute write barriers.
556 for _, datap := range activeModules() {
557 if datap.data <= dst && dst < datap.edata {
558 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
562 for _, datap := range activeModules() {
563 if datap.bss <= dst && dst < datap.ebss {
564 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
569 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
570 // dst was heap memory at some point, but isn't now.
571 // It can't be a global. It must be either our stack,
572 // or in the case of direct channel sends, it could be
573 // another stack. Either way, no need for barriers.
574 // This will also catch if dst is in a freed span,
575 // though that should never have.
579 buf := &getg().m.p.ptr().wbBuf
580 h := heapBitsForAddr(dst)
582 for i := uintptr(0); i < size; i += sys.PtrSize {
584 dstx := (*uintptr)(unsafe.Pointer(dst + i))
585 if !buf.putFast(*dstx, 0) {
592 for i := uintptr(0); i < size; i += sys.PtrSize {
594 dstx := (*uintptr)(unsafe.Pointer(dst + i))
595 srcx := (*uintptr)(unsafe.Pointer(src + i))
596 if !buf.putFast(*dstx, *srcx) {
605 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
606 // does not execute write barriers for [dst, dst+size).
608 // In addition to the requirements of bulkBarrierPreWrite
609 // callers need to ensure [dst, dst+size) is zeroed.
611 // This is used for special cases where e.g. dst was just
612 // created and zeroed with malloc.
614 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
615 if (dst|src|size)&(sys.PtrSize-1) != 0 {
616 throw("bulkBarrierPreWrite: unaligned arguments")
618 if !writeBarrier.needed {
621 buf := &getg().m.p.ptr().wbBuf
622 h := heapBitsForAddr(dst)
623 for i := uintptr(0); i < size; i += sys.PtrSize {
625 srcx := (*uintptr)(unsafe.Pointer(src + i))
626 if !buf.putFast(0, *srcx) {
634 // bulkBarrierBitmap executes write barriers for copying from [src,
635 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
636 // assumed to start maskOffset bytes into the data covered by the
637 // bitmap in bits (which may not be a multiple of 8).
639 // This is used by bulkBarrierPreWrite for writes to data and BSS.
642 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
643 word := maskOffset / sys.PtrSize
644 bits = addb(bits, word/8)
645 mask := uint8(1) << (word % 8)
647 buf := &getg().m.p.ptr().wbBuf
648 for i := uintptr(0); i < size; i += sys.PtrSize {
659 dstx := (*uintptr)(unsafe.Pointer(dst + i))
661 if !buf.putFast(*dstx, 0) {
665 srcx := (*uintptr)(unsafe.Pointer(src + i))
666 if !buf.putFast(*dstx, *srcx) {
675 // typeBitsBulkBarrier executes a write barrier for every
676 // pointer that would be copied from [src, src+size) to [dst,
677 // dst+size) by a memmove using the type bitmap to locate those
680 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
681 // dst, src, and size must be pointer-aligned.
682 // The type typ must have a plain bitmap, not a GC program.
683 // The only use of this function is in channel sends, and the
684 // 64 kB channel element limit takes care of this for us.
686 // Must not be preempted because it typically runs right before memmove,
687 // and the GC must observe them as an atomic action.
689 // Callers must perform cgo checks if writeBarrier.cgo.
692 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
694 throw("runtime: typeBitsBulkBarrier without type")
696 if typ.size != size {
697 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
698 throw("runtime: invalid typeBitsBulkBarrier")
700 if typ.kind&kindGCProg != 0 {
701 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
702 throw("runtime: invalid typeBitsBulkBarrier")
704 if !writeBarrier.needed {
707 ptrmask := typ.gcdata
708 buf := &getg().m.p.ptr().wbBuf
710 for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
711 if i&(sys.PtrSize*8-1) == 0 {
712 bits = uint32(*ptrmask)
713 ptrmask = addb(ptrmask, 1)
718 dstx := (*uintptr)(unsafe.Pointer(dst + i))
719 srcx := (*uintptr)(unsafe.Pointer(src + i))
720 if !buf.putFast(*dstx, *srcx) {
727 // The methods operating on spans all require that h has been returned
728 // by heapBitsForSpan and that size, n, total are the span layout description
729 // returned by the mspan's layout method.
730 // If total > size*n, it means that there is extra leftover memory in the span,
731 // usually due to rounding.
733 // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
735 // initSpan initializes the heap bitmap for a span.
736 // If this is a span of pointer-sized objects, it initializes all
737 // words to pointer/scan.
738 // Otherwise, it initializes all words to scalar/dead.
739 func (h heapBits) initSpan(s *mspan) {
740 // Clear bits corresponding to objects.
741 nw := (s.npages << _PageShift) / sys.PtrSize
742 if nw%wordsPerBitmapByte != 0 {
743 throw("initSpan: unaligned length")
746 throw("initSpan: unaligned base")
748 isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
750 hNext, anw := h.forwardOrBoundary(nw)
751 nbyte := anw / wordsPerBitmapByte
754 for i := uintptr(0); i < nbyte; i++ {
755 *bitp = bitPointerAll | bitScanAll
759 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
766 // countAlloc returns the number of objects allocated in span s by
767 // scanning the allocation bitmap.
768 func (s *mspan) countAlloc() int {
770 bytes := divRoundUp(s.nelems, 8)
771 // Iterate over each 8-byte chunk and count allocations
772 // with an intrinsic. Note that newMarkBits guarantees that
773 // gcmarkBits will be 8-byte aligned, so we don't have to
774 // worry about edge cases, irrelevant bits will simply be zero.
775 for i := uintptr(0); i < bytes; i += 8 {
776 // Extract 64 bits from the byte pointer and get a OnesCount.
777 // Note that the unsafe cast here doesn't preserve endianness,
778 // but that's OK. We only care about how many bits are 1, not
779 // about the order we discover them in.
780 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
781 count += sys.OnesCount64(mrkBits)
786 // heapBitsSetType records that the new allocation [x, x+size)
787 // holds in [x, x+dataSize) one or more values of type typ.
788 // (The number of values is given by dataSize / typ.size.)
789 // If dataSize < size, the fragment [x+dataSize, x+size) is
790 // recorded as non-pointer data.
791 // It is known that the type has pointers somewhere;
792 // malloc does not call heapBitsSetType when there are no pointers,
793 // because all free objects are marked as noscan during
794 // heapBitsSweepSpan.
796 // There can only be one allocation from a given span active at a time,
797 // and the bitmap for a span always falls on byte boundaries,
798 // so there are no write-write races for access to the heap bitmap.
799 // Hence, heapBitsSetType can access the bitmap without atomics.
801 // There can be read-write races between heapBitsSetType and things
802 // that read the heap bitmap like scanobject. However, since
803 // heapBitsSetType is only used for objects that have not yet been
804 // made reachable, readers will ignore bits being modified by this
805 // function. This does mean this function cannot transiently modify
806 // bits that belong to neighboring objects. Also, on weakly-ordered
807 // machines, callers must execute a store/store (publication) barrier
808 // between calling this function and making the object reachable.
809 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
810 const doubleCheck = false // slow but helpful; enable to test modifications to this code
813 mask1 = bitPointer | bitScan // 00010001
814 mask2 = bitPointer | bitScan | mask1<<heapBitsShift // 00110011
815 mask3 = bitPointer | bitScan | mask2<<heapBitsShift // 01110111
818 // dataSize is always size rounded up to the next malloc size class,
819 // except in the case of allocating a defer block, in which case
820 // size is sizeof(_defer{}) (at least 6 words) and dataSize may be
821 // arbitrarily larger.
823 // The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
824 // assume that dataSize == size without checking it explicitly.
826 if sys.PtrSize == 8 && size == sys.PtrSize {
827 // It's one word and it has pointers, it must be a pointer.
828 // Since all allocated one-word objects are pointers
829 // (non-pointers are aggregated into tinySize allocations),
830 // initSpan sets the pointer bits for us. Nothing to do here.
832 h := heapBitsForAddr(x)
834 throw("heapBitsSetType: pointer bit missing")
836 if !h.morePointers() {
837 throw("heapBitsSetType: scan bit missing")
843 h := heapBitsForAddr(x)
844 ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
846 // 2-word objects only have 4 bitmap bits and 3-word objects only have 6 bitmap bits.
847 // Therefore, these objects share a heap bitmap byte with the objects next to them.
848 // These are called out as a special case primarily so the code below can assume all
849 // objects are at least 4 words long and that their bitmaps start either at the beginning
850 // of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
852 if size == 2*sys.PtrSize {
853 if typ.size == sys.PtrSize {
854 // We're allocating a block big enough to hold two pointers.
855 // On 64-bit, that means the actual object must be two pointers,
856 // or else we'd have used the one-pointer-sized block.
857 // On 32-bit, however, this is the 8-byte block, the smallest one.
858 // So it could be that we're allocating one pointer and this was
859 // just the smallest block available. Distinguish by checking dataSize.
860 // (In general the number of instances of typ being allocated is
861 // dataSize/typ.size.)
862 if sys.PtrSize == 4 && dataSize == sys.PtrSize {
863 // 1 pointer object. On 32-bit machines clear the bit for the
864 // unused second word.
865 *h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
866 *h.bitp |= (bitPointer | bitScan) << h.shift
868 // 2-element array of pointer.
869 *h.bitp |= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
873 // Otherwise typ.size must be 2*sys.PtrSize,
874 // and typ.kind&kindGCProg == 0.
876 if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
877 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
878 throw("heapBitsSetType")
881 b := uint32(*ptrmask)
883 hb |= bitScanAll & ((bitScan << (typ.ptrdata / sys.PtrSize)) - 1)
884 // Clear the bits for this object so we can set the
886 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
887 *h.bitp |= uint8(hb << h.shift)
889 } else if size == 3*sys.PtrSize {
893 println("runtime: invalid type ", typ.string())
894 throw("heapBitsSetType: called with non-pointer type")
896 if sys.PtrSize != 8 {
897 throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
899 if typ.kind&kindGCProg != 0 {
900 throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
902 if typ.size == 2*sys.PtrSize {
903 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
904 throw("heapBitsSetType: inconsistent object sizes")
907 if typ.size == sys.PtrSize {
908 // The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
909 // Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
910 if doubleCheck && *typ.gcdata != 1 {
911 print("runtime: heapBitsSetType size=", size, " typ.size=", typ.size, "but *typ.gcdata", *typ.gcdata, "\n")
912 throw("heapBitsSetType: unexpected gcdata for 1 pointer wide type size in 3 pointer wide size class")
914 // 3 element array of pointers. Unrolling ptrmask 3 times into p yields 00000111.
919 // Set bitScan bits for all pointers.
920 hb |= hb << wordsPerBitmapByte
921 // First bitScan bit is always set since the type contains pointers.
923 // Second bitScan bit needs to also be set if the third bitScan bit is set.
924 hb |= hb & (bitScan << (2 * heapBitsShift)) >> 1
926 // For h.shift > 1 heap bits cross a byte boundary and need to be written part
927 // to h.bitp and part to the next h.bitp.
930 *h.bitp &^= mask3 << 0
933 *h.bitp &^= mask3 << 1
936 *h.bitp &^= mask2 << 2
937 *h.bitp |= (hb & mask2) << 2
938 // Two words written to the first byte.
939 // Advance two words to get to the next byte.
942 *h.bitp |= (hb >> 2) & mask1
944 *h.bitp &^= mask1 << 3
945 *h.bitp |= (hb & mask1) << 3
946 // One word written to the first byte.
947 // Advance one word to get to the next byte.
950 *h.bitp |= (hb >> 1) & mask2
955 // Copy from 1-bit ptrmask into 2-bit bitmap.
956 // The basic approach is to use a single uintptr as a bit buffer,
957 // alternating between reloading the buffer and writing bitmap bytes.
958 // In general, one load can supply two bitmap byte writes.
959 // This is a lot of lines of code, but it compiles into relatively few
960 // machine instructions.
963 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
964 // This object spans heap arenas, so the bitmap may be
965 // discontiguous. Unroll it into the object instead
966 // and then copy it out.
968 // In doubleCheck mode, we randomly do this anyway to
969 // stress test the bitmap copying path.
971 h.bitp = (*uint8)(unsafe.Pointer(x))
977 p *byte // last ptrmask byte read
978 b uintptr // ptrmask bits already loaded
979 nb uintptr // number of bits in b at next read
980 endp *byte // final ptrmask byte to read (then repeat)
981 endnb uintptr // number of valid bits in *endp
982 pbits uintptr // alternate source of bits
984 // Heap bitmap output.
985 w uintptr // words processed
986 nw uintptr // number of words to process
987 hbitp *byte // next heap bitmap byte to write
988 hb uintptr // bits being prepared for *hbitp
993 // Handle GC program. Delayed until this part of the code
994 // so that we can use the same double-checking mechanism
995 // as the 1-bit case. Nothing above could have encountered
996 // GC programs: the cases were all too small.
997 if typ.kind&kindGCProg != 0 {
998 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
1000 // Double-check the heap bits written by GC program
1001 // by running the GC program to create a 1-bit pointer mask
1002 // and then jumping to the double-check code below.
1003 // This doesn't catch bugs shared between the 1-bit and 4-bit
1004 // GC program execution, but it does catch mistakes specific
1005 // to just one of those and bugs in heapBitsSetTypeGCProg's
1006 // implementation of arrays.
1007 lock(&debugPtrmask.lock)
1008 if debugPtrmask.data == nil {
1009 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
1011 ptrmask = debugPtrmask.data
1012 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
1017 // Note about sizes:
1019 // typ.size is the number of words in the object,
1020 // and typ.ptrdata is the number of words in the prefix
1021 // of the object that contains pointers. That is, the final
1022 // typ.size - typ.ptrdata words contain no pointers.
1023 // This allows optimization of a common pattern where
1024 // an object has a small header followed by a large scalar
1025 // buffer. If we know the pointers are over, we don't have
1026 // to scan the buffer's heap bitmap at all.
1027 // The 1-bit ptrmasks are sized to contain only bits for
1028 // the typ.ptrdata prefix, zero padded out to a full byte
1029 // of bitmap. This code sets nw (below) so that heap bitmap
1030 // bits are only written for the typ.ptrdata prefix; if there is
1031 // more room in the allocated object, the next heap bitmap
1032 // entry is a 00, indicating that there are no more pointers
1033 // to scan. So only the ptrmask for the ptrdata bytes is needed.
1035 // Replicated copies are not as nice: if there is an array of
1036 // objects with scalar tails, all but the last tail does have to
1037 // be initialized, because there is no way to say "skip forward".
1038 // However, because of the possibility of a repeated type with
1039 // size not a multiple of 4 pointers (one heap bitmap byte),
1040 // the code already must handle the last ptrmask byte specially
1041 // by treating it as containing only the bits for endnb pointers,
1042 // where endnb <= 4. We represent large scalar tails that must
1043 // be expanded in the replication by setting endnb larger than 4.
1044 // This will have the effect of reading many bits out of b,
1045 // but once the real bits are shifted out, b will supply as many
1046 // zero bits as we try to read, which is exactly what we need.
1049 if typ.size < dataSize {
1050 // Filling in bits for an array of typ.
1051 // Set up for repetition of ptrmask during main loop.
1052 // Note that ptrmask describes only a prefix of
1053 const maxBits = sys.PtrSize*8 - 7
1054 if typ.ptrdata/sys.PtrSize <= maxBits {
1055 // Entire ptrmask fits in uintptr with room for a byte fragment.
1056 // Load into pbits and never read from ptrmask again.
1057 // This is especially important when the ptrmask has
1058 // fewer than 8 bits in it; otherwise the reload in the middle
1059 // of the Phase 2 loop would itself need to loop to gather
1062 // Accumulate ptrmask into b.
1063 // ptrmask is sized to describe only typ.ptrdata, but we record
1064 // it as describing typ.size bytes, since all the high bits are zero.
1065 nb = typ.ptrdata / sys.PtrSize
1066 for i := uintptr(0); i < nb; i += 8 {
1067 b |= uintptr(*p) << i
1070 nb = typ.size / sys.PtrSize
1072 // Replicate ptrmask to fill entire pbits uintptr.
1073 // Doubling and truncating is fewer steps than
1074 // iterating by nb each time. (nb could be 1.)
1075 // Since we loaded typ.ptrdata/sys.PtrSize bits
1076 // but are pretending to have typ.size/sys.PtrSize,
1077 // there might be no replication necessary/possible.
1080 if nb+nb <= maxBits {
1081 for endnb <= sys.PtrSize*8 {
1082 pbits |= pbits << endnb
1085 // Truncate to a multiple of original ptrmask.
1086 // Because nb+nb <= maxBits, nb fits in a byte.
1087 // Byte division is cheaper than uintptr division.
1088 endnb = uintptr(maxBits/byte(nb)) * nb
1089 pbits &= 1<<endnb - 1
1094 // Clear p and endp as sentinel for using pbits.
1095 // Checked during Phase 2 loop.
1099 // Ptrmask is larger. Read it multiple times.
1100 n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
1101 endp = addb(ptrmask, n)
1102 endnb = typ.size/sys.PtrSize - n*8
1111 if typ.size == dataSize {
1112 // Single entry: can stop once we reach the non-pointer data.
1113 nw = typ.ptrdata / sys.PtrSize
1115 // Repeated instances of typ in an array.
1116 // Have to process first N-1 entries in full, but can stop
1117 // once we reach the non-pointer data in the final entry.
1118 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
1121 // No pointers! Caller was supposed to check.
1122 println("runtime: invalid type ", typ.string())
1123 throw("heapBitsSetType: called with non-pointer type")
1127 // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
1128 // The leading byte is special because it contains the bits for word 1,
1129 // which does not have the scan bit set.
1130 // The leading half-byte is special because it's a half a byte,
1131 // so we have to be careful with the bits already there.
1134 throw("heapBitsSetType: unexpected shift")
1137 // Ptrmask and heap bitmap are aligned.
1139 // This is a fast path for small objects.
1141 // The first byte we write out covers the first four
1142 // words of the object. The scan/dead bit on the first
1143 // word must be set to scan since there are pointers
1144 // somewhere in the object.
1145 // In all following words, we set the scan/dead
1146 // appropriately to indicate that the object continues
1147 // to the next 2-bit entry in the bitmap.
1149 // We set four bits at a time here, but if the object
1150 // is fewer than four words, phase 3 will clear
1151 // unnecessary bits.
1152 hb = b & bitPointerAll
1154 if w += 4; w >= nw {
1163 // Ptrmask and heap bitmap are misaligned.
1165 // On 32 bit architectures only the 6-word object that corresponds
1166 // to a 24 bytes size class can start with h.shift of 2 here since
1167 // all other non 16 byte aligned size classes have been handled by
1168 // special code paths at the beginning of heapBitsSetType on 32 bit.
1170 // Many size classes are only 16 byte aligned. On 64 bit architectures
1171 // this results in a heap bitmap position starting with a h.shift of 2.
1173 // The bits for the first two words are in a byte shared
1174 // with another object, so we must be careful with the bits
1177 // We took care of 1-word, 2-word, and 3-word objects above,
1178 // so this is at least a 6-word object.
1179 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1180 hb |= bitScan << (2 * heapBitsShift)
1182 hb |= bitScan << (3 * heapBitsShift)
1186 *hbitp &^= uint8((bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << (2 * heapBitsShift))
1189 if w += 2; w >= nw {
1190 // We know that there is more data, because we handled 2-word and 3-word objects above.
1191 // This must be at least a 6-word object. If we're out of pointer words,
1192 // mark no scan in next bitmap byte and finish.
1199 // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
1200 // The loop computes the bits for that last write but does not execute the write;
1201 // it leaves the bits in hb for processing by phase 3.
1202 // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
1203 // use in the first half of the loop right now, and then we only adjust nb explicitly
1204 // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
1207 // Emit bitmap byte.
1208 // b has at least nb+4 bits, with one exception:
1209 // if w+4 >= nw, then b has only nw-w bits,
1210 // but we'll stop at the break and then truncate
1211 // appropriately in Phase 3.
1212 hb = b & bitPointerAll
1214 if w += 4; w >= nw {
1221 // Load more bits. b has nb right now.
1223 // Fast path: keep reading from ptrmask.
1224 // nb unmodified: we just loaded 8 bits,
1225 // and the next iteration will consume 8 bits,
1226 // leaving us with the same nb the next time we're here.
1228 b |= uintptr(*p) << nb
1231 // Reduce the number of bits in b.
1232 // This is important if we skipped
1233 // over a scalar tail, since nb could
1234 // be larger than the bit width of b.
1237 } else if p == nil {
1238 // Almost as fast path: track bit count and refill from pbits.
1239 // For short repetitions.
1244 nb -= 8 // for next iteration
1246 // Slow path: reached end of ptrmask.
1247 // Process final partial byte and rewind to start.
1248 b |= uintptr(*p) << nb
1251 b |= uintptr(*ptrmask) << nb
1259 // Emit bitmap byte.
1260 hb = b & bitPointerAll
1262 if w += 4; w >= nw {
1271 // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
1273 // Counting the 4 entries in hb not yet written to memory,
1274 // there are more entries than possible pointer slots.
1275 // Discard the excess entries (can't be more than 3).
1276 mask := uintptr(1)<<(4-(w-nw)) - 1
1277 hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
1280 // Change nw from counting possibly-pointer words to total words in allocation.
1281 nw = size / sys.PtrSize
1283 // Write whole bitmap bytes.
1284 // The first is hb, the rest are zero.
1288 hb = 0 // for possible final half-byte below
1289 for w += 4; w <= nw; w += 4 {
1295 // Write final partial bitmap byte if any.
1296 // We know w > nw, or else we'd still be in the loop above.
1297 // It can be bigger only due to the 4 entries in hb that it counts.
1298 // If w == nw+4 then there's nothing left to do: we wrote all nw entries
1299 // and can discard the 4 sitting in hb.
1300 // But if w == nw+2, we need to write first two in hb.
1301 // The byte is shared with the next object, so be careful with
1304 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
1308 // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
1310 // TODO: We could probably make this faster by
1311 // handling [x+dataSize, x+size) specially.
1312 h := heapBitsForAddr(x)
1313 // cnw is the number of heap words, or bit pairs
1314 // remaining (like nw above).
1315 cnw := size / sys.PtrSize
1316 src := (*uint8)(unsafe.Pointer(x))
1317 // We know the first and last byte of the bitmap are
1318 // not the same, but it's still possible for small
1319 // objects span arenas, so it may share bitmap bytes
1320 // with neighboring objects.
1322 // Handle the first byte specially if it's shared. See
1323 // Phase 1 for why this is the only special case we need.
1325 if !(h.shift == 0 || h.shift == 2) {
1326 print("x=", x, " size=", size, " cnw=", h.shift, "\n")
1327 throw("bad start shift")
1331 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
1336 // We're now byte aligned. Copy out to per-arena
1337 // bitmaps until the last byte (which may again be
1340 // This loop processes four words at a time,
1341 // so round cnw down accordingly.
1342 hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
1344 // n is the number of bitmap bytes to copy.
1346 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
1351 if doubleCheck && h.shift != 0 {
1352 print("cnw=", cnw, " h.shift=", h.shift, "\n")
1353 throw("bad shift after block copy")
1355 // Handle the last byte if it's shared.
1357 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
1362 if uintptr(unsafe.Pointer(src)) > x+size {
1363 throw("copy exceeded object size")
1365 if !(cnw == 0 || cnw == 2) {
1366 print("x=", x, " size=", size, " cnw=", cnw, "\n")
1367 throw("bad number of remaining words")
1369 // Set up hbitp so doubleCheck code below can check it.
1372 // Zero the object where we wrote the bitmap.
1373 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
1376 // Double check the whole bitmap.
1378 // x+size may not point to the heap, so back up one
1379 // word and then advance it the way we do above.
1380 end := heapBitsForAddr(x + size - sys.PtrSize)
1382 // In out-of-place copying, we just advance
1386 // Don't use next because that may advance to
1387 // the next arena and the in-place logic
1389 end.shift += heapBitsShift
1390 if end.shift == 4*heapBitsShift {
1391 end.bitp, end.shift = add1(end.bitp), 0
1394 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
1395 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
1396 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1397 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1398 h0 := heapBitsForAddr(x)
1399 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1400 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1401 throw("bad heapBitsSetType")
1404 // Double-check that bits to be written were written correctly.
1405 // Does not check that other bits were not written, unfortunately.
1406 h := heapBitsForAddr(x)
1407 nptr := typ.ptrdata / sys.PtrSize
1408 ndata := typ.size / sys.PtrSize
1409 count := dataSize / typ.size
1410 totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
1411 for i := uintptr(0); i < size/sys.PtrSize; i++ {
1413 var have, want uint8
1414 have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
1416 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
1417 // heapBitsSetTypeGCProg always fills
1418 // in full nibbles of bitScan.
1422 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1428 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
1429 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1430 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
1431 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1432 h0 := heapBitsForAddr(x)
1433 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1434 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1435 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
1436 println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
1437 if typ.kind&kindGCProg != 0 {
1438 println("GC program:")
1439 dumpGCProg(addb(typ.gcdata, 4))
1441 throw("bad heapBitsSetType")
1445 if ptrmask == debugPtrmask.data {
1446 unlock(&debugPtrmask.lock)
1451 var debugPtrmask struct {
1456 // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
1457 // progSize is the size of the memory described by the program.
1458 // elemSize is the size of the element that the GC program describes (a prefix of).
1459 // dataSize is the total size of the intended data, a multiple of elemSize.
1460 // allocSize is the total size of the allocated memory.
1462 // GC programs are only used for large allocations.
1463 // heapBitsSetType requires that allocSize is a multiple of 4 words,
1464 // so that the relevant bitmap bytes are not shared with surrounding
1466 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
1467 if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
1468 // Alignment will be wrong.
1469 throw("heapBitsSetTypeGCProg: small allocation")
1471 var totalBits uintptr
1472 if elemSize == dataSize {
1473 totalBits = runGCProg(prog, nil, h.bitp, 2)
1474 if totalBits*sys.PtrSize != progSize {
1475 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1476 throw("heapBitsSetTypeGCProg: unexpected bit count")
1479 count := dataSize / elemSize
1481 // Piece together program trailer to run after prog that does:
1483 // repeat(1, elemSize-progSize-1) // zeros to fill element size
1484 // repeat(elemSize, count-1) // repeat that element for count
1485 // This zero-pads the data remaining in the first element and then
1486 // repeats that first element to fill the array.
1487 var trailer [40]byte // 3 varints (max 10 each) + some bytes
1489 if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
1500 for ; n >= 0x80; n >>= 7 {
1501 trailer[i] = byte(n | 0x80)
1504 trailer[i] = byte(n)
1508 // repeat(elemSize/ptrSize, count-1)
1511 n := elemSize / sys.PtrSize
1512 for ; n >= 0x80; n >>= 7 {
1513 trailer[i] = byte(n | 0x80)
1516 trailer[i] = byte(n)
1519 for ; n >= 0x80; n >>= 7 {
1520 trailer[i] = byte(n | 0x80)
1523 trailer[i] = byte(n)
1528 runGCProg(prog, &trailer[0], h.bitp, 2)
1530 // Even though we filled in the full array just now,
1531 // record that we only filled in up to the ptrdata of the
1532 // last element. This will cause the code below to
1533 // memclr the dead section of the final array element,
1534 // so that scanobject can stop early in the final element.
1535 totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
1537 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
1538 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
1539 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
1542 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1543 // size the size of the region described by prog, in bytes.
1544 // The resulting bitvector will have no more than size/sys.PtrSize bits.
1545 func progToPointerMask(prog *byte, size uintptr) bitvector {
1546 n := (size/sys.PtrSize + 7) / 8
1547 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1548 x[len(x)-1] = 0xa1 // overflow check sentinel
1549 n = runGCProg(prog, nil, &x[0], 1)
1550 if x[len(x)-1] != 0xa1 {
1551 throw("progToPointerMask: overflow")
1553 return bitvector{int32(n), &x[0]}
1556 // Packed GC pointer bitmaps, aka GC programs.
1558 // For large types containing arrays, the type information has a
1559 // natural repetition that can be encoded to save space in the
1560 // binary and in the memory representation of the type information.
1562 // The encoding is a simple Lempel-Ziv style bytecode machine
1563 // with the following instructions:
1566 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1567 // 10000000 n c: repeat the previous n bits c times; n, c are varints
1568 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1570 // runGCProg executes the GC program prog, and then trailer if non-nil,
1571 // writing to dst with entries of the given size.
1572 // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
1573 // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
1574 // starting at dst (because the heap bitmap does). In this case, the caller guarantees
1575 // that only whole bytes in dst need to be written.
1577 // runGCProg returns the number of 1- or 2-bit entries written to memory.
1578 func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1581 // Bits waiting to be written to memory.
1588 // Flush accumulated full bytes.
1589 // The rest of the loop assumes that nbits <= 7.
1590 for ; nbits >= 8; nbits -= 8 {
1596 v := bits&bitPointerAll | bitScanAll
1600 v = bits&bitPointerAll | bitScanAll
1607 // Process one instruction.
1612 // Literal bits; n == 0 means end of program.
1614 // Program is over; continue in trailer if present.
1623 for i := uintptr(0); i < nbyte; i++ {
1624 bits |= uintptr(*p) << nbits
1631 v := bits&0xf | bitScanAll
1635 v = bits&0xf | bitScanAll
1642 bits |= uintptr(*p) << nbits
1649 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1651 for off := uint(0); ; off += 7 {
1654 n |= (x & 0x7F) << off
1661 // Count is encoded in a varint in the next bytes.
1663 for off := uint(0); ; off += 7 {
1666 c |= (x & 0x7F) << off
1671 c *= n // now total number of bits to copy
1673 // If the number of bits being repeated is small, load them
1674 // into a register and use that register for the entire loop
1675 // instead of repeatedly reading from memory.
1676 // Handling fewer than 8 bits here makes the general loop simpler.
1677 // The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
1678 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1679 // it will not overflow.
1681 const maxBits = sys.PtrSize*8 - 7
1683 // Start with bits in output buffer.
1687 // If we need more bits, fetch them from memory.
1689 src = subtract1(src)
1692 pattern |= uintptr(*src)
1693 src = subtract1(src)
1697 src = subtract1(src)
1700 pattern |= uintptr(*src) & 0xf
1701 src = subtract1(src)
1706 // We started with the whole bit output buffer,
1707 // and then we loaded bits from whole bytes.
1708 // Either way, we might now have too many instead of too few.
1709 // Discard the extra.
1711 pattern >>= npattern - n
1715 // Replicate pattern to at most maxBits.
1717 // One bit being repeated.
1718 // If the bit is 1, make the pattern all 1s.
1719 // If the bit is 0, the pattern is already all 0s,
1720 // but we can claim that the number of bits
1721 // in the word is equal to the number we need (c),
1722 // because right shift of bits will zero fill.
1724 pattern = 1<<maxBits - 1
1732 if nb+nb <= maxBits {
1733 // Double pattern until the whole uintptr is filled.
1734 for nb <= sys.PtrSize*8 {
1738 // Trim away incomplete copy of original pattern in high bits.
1739 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1740 nb = maxBits / npattern * npattern
1747 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1748 // Since pattern contains >8 bits, there will be full bytes to flush
1749 // on each iteration.
1750 for ; c >= npattern; c -= npattern {
1751 bits |= pattern << nbits
1762 *dst = uint8(bits&0xf | bitScanAll)
1770 // Add final fragment to bit buffer.
1773 bits |= pattern << nbits
1779 // Repeat; n too large to fit in a register.
1780 // Since nbits <= 7, we know the first few bytes of repeated data
1781 // are already written to memory.
1782 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1784 // Leading src fragment.
1785 src = subtractb(src, (off+7)/8)
1786 if frag := off & 7; frag != 0 {
1787 bits |= uintptr(*src) >> (8 - frag) << nbits
1792 // Main loop: load one byte, write another.
1793 // The bits are rotating through the bit buffer.
1794 for i := c / 8; i > 0; i-- {
1795 bits |= uintptr(*src) << nbits
1801 // Final src fragment.
1803 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1807 // Leading src fragment.
1808 src = subtractb(src, (off+3)/4)
1809 if frag := off & 3; frag != 0 {
1810 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
1815 // Main loop: load one byte, write another.
1816 // The bits are rotating through the bit buffer.
1817 for i := c / 4; i > 0; i-- {
1818 bits |= (uintptr(*src) & 0xf) << nbits
1820 *dst = uint8(bits&0xf | bitScanAll)
1824 // Final src fragment.
1826 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1832 // Write any final bits out, using full-byte writes, even for the final byte.
1833 var totalBits uintptr
1835 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1837 for ; nbits > 0; nbits -= 8 {
1843 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
1845 for ; nbits > 0; nbits -= 4 {
1846 v := bits&0xf | bitScanAll
1855 // materializeGCProg allocates space for the (1-bit) pointer bitmask
1856 // for an object of size ptrdata. Then it fills that space with the
1857 // pointer bitmask specified by the program prog.
1858 // The bitmask starts at s.startAddr.
1859 // The result must be deallocated with dematerializeGCProg.
1860 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1861 // Each word of ptrdata needs one bit in the bitmap.
1862 bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
1863 // Compute the number of pages needed for bitmapBytes.
1864 pages := divRoundUp(bitmapBytes, pageSize)
1865 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1866 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
1869 func dematerializeGCProg(s *mspan) {
1870 mheap_.freeManual(s, spanAllocPtrScalarBits)
1873 func dumpGCProg(p *byte) {
1879 print("\t", nptr, " end\n")
1883 print("\t", nptr, " lit ", x, ":")
1885 for i := 0; i < n; i++ {
1892 nbit := int(x &^ 0x80)
1894 for nb := uint(0); ; nb += 7 {
1897 nbit |= int(x&0x7f) << nb
1904 for nb := uint(0); ; nb += 7 {
1907 count |= int(x&0x7f) << nb
1912 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1913 nptr += nbit * count
1920 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1921 target := (*stkframe)(ctxt)
1922 if frame.sp <= target.sp && target.sp < frame.varp {
1929 // gcbits returns the GC type info for x, for testing.
1930 // The result is the bitmap entries (0 or 1), one entry per byte.
1931 //go:linkname reflect_gcbits reflect.gcbits
1932 func reflect_gcbits(x interface{}) []byte {
1934 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1935 nptr := typ.ptrdata / sys.PtrSize
1936 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1937 ret = ret[:len(ret)-1]
1942 // Returns GC type info for the pointer stored in ep for testing.
1943 // If ep points to the stack, only static live information will be returned
1944 // (i.e. not for objects which are only dynamically live stack objects).
1945 func getgcmask(ep interface{}) (mask []byte) {
1950 for _, datap := range activeModules() {
1952 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1953 bitmap := datap.gcdatamask.bytedata
1954 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1955 mask = make([]byte, n/sys.PtrSize)
1956 for i := uintptr(0); i < n; i += sys.PtrSize {
1957 off := (uintptr(p) + i - datap.data) / sys.PtrSize
1958 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1964 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1965 bitmap := datap.gcbssmask.bytedata
1966 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1967 mask = make([]byte, n/sys.PtrSize)
1968 for i := uintptr(0); i < n; i += sys.PtrSize {
1969 off := (uintptr(p) + i - datap.bss) / sys.PtrSize
1970 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1977 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1978 hbits := heapBitsForAddr(base)
1980 mask = make([]byte, n/sys.PtrSize)
1981 for i := uintptr(0); i < n; i += sys.PtrSize {
1982 if hbits.isPointer() {
1983 mask[i/sys.PtrSize] = 1
1985 if !hbits.morePointers() {
1986 mask = mask[:i/sys.PtrSize]
1989 hbits = hbits.next()
1995 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
1997 frame.sp = uintptr(p)
1999 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
2000 if frame.fn.valid() {
2001 locals, _, _ := getStackMap(&frame, nil, false)
2005 size := uintptr(locals.n) * sys.PtrSize
2006 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2007 mask = make([]byte, n/sys.PtrSize)
2008 for i := uintptr(0); i < n; i += sys.PtrSize {
2009 off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
2010 mask[i/sys.PtrSize] = locals.ptrbit(off)
2016 // otherwise, not something the GC knows about.
2017 // possibly read-only data, like malloc(0).
2018 // must not have pointers