1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
14 // addb returns the byte pointer p+n.
18 func addb(p *byte, n uintptr) *byte {
19 // Note: wrote out full expression instead of calling add(p, n)
20 // to reduce the number of temporaries generated by the
21 // compiler for this trivial expression during inlining.
22 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
25 // subtractb returns the byte pointer p-n.
29 func subtractb(p *byte, n uintptr) *byte {
30 // Note: wrote out full expression instead of calling add(p, -n)
31 // to reduce the number of temporaries generated by the
32 // compiler for this trivial expression during inlining.
33 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
36 // add1 returns the byte pointer p+1.
40 func add1(p *byte) *byte {
41 // Note: wrote out full expression instead of calling addb(p, 1)
42 // to reduce the number of temporaries generated by the
43 // compiler for this trivial expression during inlining.
44 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
47 // subtract1 returns the byte pointer p-1.
49 // nosplit because it is used during write barriers and must not be preempted.
53 func subtract1(p *byte) *byte {
54 // Note: wrote out full expression instead of calling subtractb(p, 1)
55 // to reduce the number of temporaries generated by the
56 // compiler for this trivial expression during inlining.
57 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
60 // markBits provides access to the mark bit for an object in the heap.
61 // bytep points to the byte holding the mark bit.
62 // mask is a byte with a single bit set that can be &ed with *bytep
63 // to see if the bit has been set.
64 // *m.byte&m.mask != 0 indicates the mark bit is set.
65 // index can be used along with span information to generate
66 // the address of the object in the heap.
67 // We maintain one set of mark bits for allocation and one for
69 type markBits struct {
76 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
77 bytep, mask := s.allocBits.bitp(allocBitIndex)
78 return markBits{bytep, mask, allocBitIndex}
81 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
82 // and negates them so that ctz (count trailing zeros) instructions
83 // can be used. It then places these 8 bytes into the cached 64 bit
85 func (s *mspan) refillAllocCache(whichByte uint16) {
86 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
88 aCache |= uint64(bytes[0])
89 aCache |= uint64(bytes[1]) << (1 * 8)
90 aCache |= uint64(bytes[2]) << (2 * 8)
91 aCache |= uint64(bytes[3]) << (3 * 8)
92 aCache |= uint64(bytes[4]) << (4 * 8)
93 aCache |= uint64(bytes[5]) << (5 * 8)
94 aCache |= uint64(bytes[6]) << (6 * 8)
95 aCache |= uint64(bytes[7]) << (7 * 8)
96 s.allocCache = ^aCache
99 // nextFreeIndex returns the index of the next free object in s at
100 // or after s.freeindex.
101 // There are hardware instructions that can be used to make this
102 // faster if profiling warrants it.
103 func (s *mspan) nextFreeIndex() uint16 {
104 sfreeindex := s.freeindex
106 if sfreeindex == snelems {
109 if sfreeindex > snelems {
110 throw("s.freeindex > s.nelems")
113 aCache := s.allocCache
115 bitIndex := sys.TrailingZeros64(aCache)
117 // Move index to start of next cached bits.
118 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
119 if sfreeindex >= snelems {
120 s.freeindex = snelems
123 whichByte := sfreeindex / 8
124 // Refill s.allocCache with the next 64 alloc bits.
125 s.refillAllocCache(whichByte)
126 aCache = s.allocCache
127 bitIndex = sys.TrailingZeros64(aCache)
128 // nothing available in cached bits
129 // grab the next 8 bytes and try again.
131 result := sfreeindex + uint16(bitIndex)
132 if result >= snelems {
133 s.freeindex = snelems
137 s.allocCache >>= uint(bitIndex + 1)
138 sfreeindex = result + 1
140 if sfreeindex%64 == 0 && sfreeindex != snelems {
141 // We just incremented s.freeindex so it isn't 0.
142 // As each 1 in s.allocCache was encountered and used for allocation
143 // it was shifted away. At this point s.allocCache contains all 0s.
144 // Refill s.allocCache so that it corresponds
145 // to the bits at s.allocBits starting at s.freeindex.
146 whichByte := sfreeindex / 8
147 s.refillAllocCache(whichByte)
149 s.freeindex = sfreeindex
153 // isFree reports whether the index'th object in s is unallocated.
155 // The caller must ensure s.state is mSpanInUse, and there must have
156 // been no preemption points since ensuring this (which could allow a
157 // GC transition, which would allow the state to change).
158 func (s *mspan) isFree(index uintptr) bool {
159 if index < uintptr(s.freeIndexForScan) {
162 bytep, mask := s.allocBits.bitp(index)
163 return *bytep&mask == 0
166 // divideByElemSize returns n/s.elemsize.
167 // n must be within [0, s.npages*_PageSize),
168 // or may be exactly s.npages*_PageSize
169 // if s.elemsize is from sizeclasses.go.
171 // nosplit, because it is called by objIndex, which is nosplit
174 func (s *mspan) divideByElemSize(n uintptr) uintptr {
175 const doubleCheck = false
177 // See explanation in mksizeclasses.go's computeDivMagic.
178 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
180 if doubleCheck && q != n/s.elemsize {
181 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
182 throw("bad magic division")
187 // nosplit, because it is called by other nosplit code like findObject
190 func (s *mspan) objIndex(p uintptr) uintptr {
191 return s.divideByElemSize(p - s.base())
194 func markBitsForAddr(p uintptr) markBits {
196 objIndex := s.objIndex(p)
197 return s.markBitsForIndex(objIndex)
200 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
201 bytep, mask := s.gcmarkBits.bitp(objIndex)
202 return markBits{bytep, mask, objIndex}
205 func (s *mspan) markBitsForBase() markBits {
206 return markBits{&s.gcmarkBits.x, uint8(1), 0}
209 // isMarked reports whether mark bit m is set.
210 func (m markBits) isMarked() bool {
211 return *m.bytep&m.mask != 0
214 // setMarked sets the marked bit in the markbits, atomically.
215 func (m markBits) setMarked() {
216 // Might be racing with other updates, so use atomic update always.
217 // We used to be clever here and use a non-atomic update in certain
218 // cases, but it's not worth the risk.
219 atomic.Or8(m.bytep, m.mask)
222 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
223 func (m markBits) setMarkedNonAtomic() {
227 // clearMarked clears the marked bit in the markbits, atomically.
228 func (m markBits) clearMarked() {
229 // Might be racing with other updates, so use atomic update always.
230 // We used to be clever here and use a non-atomic update in certain
231 // cases, but it's not worth the risk.
232 atomic.And8(m.bytep, ^m.mask)
235 // markBitsForSpan returns the markBits for the span base address base.
236 func markBitsForSpan(base uintptr) (mbits markBits) {
237 mbits = markBitsForAddr(base)
239 throw("markBitsForSpan: unaligned start")
244 // advance advances the markBits to the next object in the span.
245 func (m *markBits) advance() {
247 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
255 // clobberdeadPtr is a special value that is used by the compiler to
256 // clobber dead stack slots, when -clobberdead flag is set.
257 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
259 // badPointer throws bad pointer in heap panic.
260 func badPointer(s *mspan, p, refBase, refOff uintptr) {
261 // Typically this indicates an incorrect use
262 // of unsafe or cgo to store a bad pointer in
263 // the Go heap. It may also indicate a runtime
266 // TODO(austin): We could be more aggressive
267 // and detect pointers to unallocated objects
268 // in allocated spans.
270 print("runtime: pointer ", hex(p))
272 state := s.state.get()
273 if state != mSpanInUse {
274 print(" to unallocated span")
276 print(" to unused region of span")
278 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
282 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
283 gcDumpObject("object", refBase, refOff)
285 getg().m.traceback = 2
286 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
289 // findObject returns the base address for the heap object containing
290 // the address p, the object's span, and the index of the object in s.
291 // If p does not point into a heap object, it returns base == 0.
293 // If p points is an invalid heap pointer and debug.invalidptr != 0,
294 // findObject panics.
296 // refBase and refOff optionally give the base address of the object
297 // in which the pointer p was found and the byte offset at which it
298 // was found. These are used for error reporting.
300 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
301 // Since p is a uintptr, it would not be adjusted if the stack were to move.
304 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
306 // If s is nil, the virtual address has never been part of the heap.
307 // This pointer may be to some mmap'd region, so we allow it.
309 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
310 // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
311 // as they are the only platform where compiler's clobberdead mode is
312 // implemented. On these platforms clobberdeadPtr cannot be a valid address.
313 badPointer(s, p, refBase, refOff)
317 // If p is a bad pointer, it may not be in s's bounds.
319 // Check s.state to synchronize with span initialization
320 // before checking other fields. See also spanOfHeap.
321 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
322 // Pointers into stacks are also ok, the runtime manages these explicitly.
323 if state == mSpanManual {
326 // The following ensures that we are rigorous about what data
327 // structures hold valid pointers.
328 if debug.invalidptr != 0 {
329 badPointer(s, p, refBase, refOff)
334 objIndex = s.objIndex(p)
335 base = s.base() + objIndex*s.elemsize
339 // reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
341 //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
342 func reflect_verifyNotInHeapPtr(p uintptr) bool {
343 // Conversion to a pointer is ok as long as findObject above does not call badPointer.
344 // Since we're already promised that p doesn't point into the heap, just disallow heap
345 // pointers and the special clobbered pointer.
346 return spanOf(p) == nil && p != clobberdeadPtr
349 const ptrBits = 8 * goarch.PtrSize
351 // bulkBarrierBitmap executes write barriers for copying from [src,
352 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
353 // assumed to start maskOffset bytes into the data covered by the
354 // bitmap in bits (which may not be a multiple of 8).
356 // This is used by bulkBarrierPreWrite for writes to data and BSS.
359 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
360 word := maskOffset / goarch.PtrSize
361 bits = addb(bits, word/8)
362 mask := uint8(1) << (word % 8)
364 buf := &getg().m.p.ptr().wbBuf
365 for i := uintptr(0); i < size; i += goarch.PtrSize {
370 i += 7 * goarch.PtrSize
376 dstx := (*uintptr)(unsafe.Pointer(dst + i))
381 srcx := (*uintptr)(unsafe.Pointer(src + i))
391 // typeBitsBulkBarrier executes a write barrier for every
392 // pointer that would be copied from [src, src+size) to [dst,
393 // dst+size) by a memmove using the type bitmap to locate those
396 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
397 // dst, src, and size must be pointer-aligned.
398 // The type typ must have a plain bitmap, not a GC program.
399 // The only use of this function is in channel sends, and the
400 // 64 kB channel element limit takes care of this for us.
402 // Must not be preempted because it typically runs right before memmove,
403 // and the GC must observe them as an atomic action.
405 // Callers must perform cgo checks if goexperiment.CgoCheck2.
408 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
410 throw("runtime: typeBitsBulkBarrier without type")
412 if typ.Size_ != size {
413 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
414 throw("runtime: invalid typeBitsBulkBarrier")
416 if typ.Kind_&kindGCProg != 0 {
417 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
418 throw("runtime: invalid typeBitsBulkBarrier")
420 if !writeBarrier.enabled {
423 ptrmask := typ.GCData
424 buf := &getg().m.p.ptr().wbBuf
426 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
427 if i&(goarch.PtrSize*8-1) == 0 {
428 bits = uint32(*ptrmask)
429 ptrmask = addb(ptrmask, 1)
434 dstx := (*uintptr)(unsafe.Pointer(dst + i))
435 srcx := (*uintptr)(unsafe.Pointer(src + i))
443 // countAlloc returns the number of objects allocated in span s by
444 // scanning the allocation bitmap.
445 func (s *mspan) countAlloc() int {
447 bytes := divRoundUp(uintptr(s.nelems), 8)
448 // Iterate over each 8-byte chunk and count allocations
449 // with an intrinsic. Note that newMarkBits guarantees that
450 // gcmarkBits will be 8-byte aligned, so we don't have to
451 // worry about edge cases, irrelevant bits will simply be zero.
452 for i := uintptr(0); i < bytes; i += 8 {
453 // Extract 64 bits from the byte pointer and get a OnesCount.
454 // Note that the unsafe cast here doesn't preserve endianness,
455 // but that's OK. We only care about how many bits are 1, not
456 // about the order we discover them in.
457 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
458 count += sys.OnesCount64(mrkBits)
463 // Read the bytes starting at the aligned pointer p into a uintptr.
464 // Read is little-endian.
465 func readUintptr(p *byte) uintptr {
466 x := *(*uintptr)(unsafe.Pointer(p))
467 if goarch.BigEndian {
468 if goarch.PtrSize == 8 {
469 return uintptr(sys.Bswap64(uint64(x)))
471 return uintptr(sys.Bswap32(uint32(x)))
476 var debugPtrmask struct {
481 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
482 // size the size of the region described by prog, in bytes.
483 // The resulting bitvector will have no more than size/goarch.PtrSize bits.
484 func progToPointerMask(prog *byte, size uintptr) bitvector {
485 n := (size/goarch.PtrSize + 7) / 8
486 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
487 x[len(x)-1] = 0xa1 // overflow check sentinel
488 n = runGCProg(prog, &x[0])
489 if x[len(x)-1] != 0xa1 {
490 throw("progToPointerMask: overflow")
492 return bitvector{int32(n), &x[0]}
495 // Packed GC pointer bitmaps, aka GC programs.
497 // For large types containing arrays, the type information has a
498 // natural repetition that can be encoded to save space in the
499 // binary and in the memory representation of the type information.
501 // The encoding is a simple Lempel-Ziv style bytecode machine
502 // with the following instructions:
505 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
506 // 10000000 n c: repeat the previous n bits c times; n, c are varints
507 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
509 // runGCProg returns the number of 1-bit entries written to memory.
510 func runGCProg(prog, dst *byte) uintptr {
513 // Bits waiting to be written to memory.
520 // Flush accumulated full bytes.
521 // The rest of the loop assumes that nbits <= 7.
522 for ; nbits >= 8; nbits -= 8 {
528 // Process one instruction.
533 // Literal bits; n == 0 means end of program.
539 for i := uintptr(0); i < nbyte; i++ {
540 bits |= uintptr(*p) << nbits
547 bits |= uintptr(*p) << nbits
554 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
556 for off := uint(0); ; off += 7 {
559 n |= (x & 0x7F) << off
566 // Count is encoded in a varint in the next bytes.
568 for off := uint(0); ; off += 7 {
571 c |= (x & 0x7F) << off
576 c *= n // now total number of bits to copy
578 // If the number of bits being repeated is small, load them
579 // into a register and use that register for the entire loop
580 // instead of repeatedly reading from memory.
581 // Handling fewer than 8 bits here makes the general loop simpler.
582 // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
583 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
584 // it will not overflow.
586 const maxBits = goarch.PtrSize*8 - 7
588 // Start with bits in output buffer.
592 // If we need more bits, fetch them from memory.
596 pattern |= uintptr(*src)
601 // We started with the whole bit output buffer,
602 // and then we loaded bits from whole bytes.
603 // Either way, we might now have too many instead of too few.
604 // Discard the extra.
606 pattern >>= npattern - n
610 // Replicate pattern to at most maxBits.
612 // One bit being repeated.
613 // If the bit is 1, make the pattern all 1s.
614 // If the bit is 0, the pattern is already all 0s,
615 // but we can claim that the number of bits
616 // in the word is equal to the number we need (c),
617 // because right shift of bits will zero fill.
619 pattern = 1<<maxBits - 1
627 if nb+nb <= maxBits {
628 // Double pattern until the whole uintptr is filled.
629 for nb <= goarch.PtrSize*8 {
633 // Trim away incomplete copy of original pattern in high bits.
634 // TODO(rsc): Replace with table lookup or loop on systems without divide?
635 nb = maxBits / npattern * npattern
642 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
643 // Since pattern contains >8 bits, there will be full bytes to flush
644 // on each iteration.
645 for ; c >= npattern; c -= npattern {
646 bits |= pattern << nbits
656 // Add final fragment to bit buffer.
659 bits |= pattern << nbits
665 // Repeat; n too large to fit in a register.
666 // Since nbits <= 7, we know the first few bytes of repeated data
667 // are already written to memory.
668 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
669 // Leading src fragment.
670 src = subtractb(src, (off+7)/8)
671 if frag := off & 7; frag != 0 {
672 bits |= uintptr(*src) >> (8 - frag) << nbits
677 // Main loop: load one byte, write another.
678 // The bits are rotating through the bit buffer.
679 for i := c / 8; i > 0; i-- {
680 bits |= uintptr(*src) << nbits
686 // Final src fragment.
688 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
693 // Write any final bits out, using full-byte writes, even for the final byte.
694 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
696 for ; nbits > 0; nbits -= 8 {
704 // materializeGCProg allocates space for the (1-bit) pointer bitmask
705 // for an object of size ptrdata. Then it fills that space with the
706 // pointer bitmask specified by the program prog.
707 // The bitmask starts at s.startAddr.
708 // The result must be deallocated with dematerializeGCProg.
709 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
710 // Each word of ptrdata needs one bit in the bitmap.
711 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
712 // Compute the number of pages needed for bitmapBytes.
713 pages := divRoundUp(bitmapBytes, pageSize)
714 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
715 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
718 func dematerializeGCProg(s *mspan) {
719 mheap_.freeManual(s, spanAllocPtrScalarBits)
722 func dumpGCProg(p *byte) {
728 print("\t", nptr, " end\n")
732 print("\t", nptr, " lit ", x, ":")
734 for i := 0; i < n; i++ {
741 nbit := int(x &^ 0x80)
743 for nb := uint(0); ; nb += 7 {
746 nbit |= int(x&0x7f) << nb
753 for nb := uint(0); ; nb += 7 {
756 count |= int(x&0x7f) << nb
761 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
769 // reflect_gcbits returns the GC type info for x, for testing.
770 // The result is the bitmap entries (0 or 1), one entry per byte.
772 //go:linkname reflect_gcbits reflect.gcbits
773 func reflect_gcbits(x any) []byte {