1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
13 "runtime/internal/atomic"
14 "runtime/internal/sys"
25 var Fintto64 = fintto64
26 var F64toint = f64toint
28 var Entersyscall = entersyscall
29 var Exitsyscall = exitsyscall
30 var LockedOSThread = lockedOSThread
31 var Xadduintptr = atomic.Xadduintptr
33 var Fastlog2 = fastlog2
37 var ParseByteCount = parseByteCount
39 var Nanotime = nanotime
40 var NetpollBreak = netpollBreak
43 var PhysPageSize = physPageSize
44 var PhysHugePageSize = physHugePageSize
46 var NetpollGenericInit = netpollGenericInit
49 var MemclrNoHeapPointers = memclrNoHeapPointers
51 var CgoCheckPointer = cgoCheckPointer
53 const CrashStackImplemented = crashStackImplemented
55 const TracebackInnerFrames = tracebackInnerFrames
56 const TracebackOuterFrames = tracebackOuterFrames
59 var MapValues = values
61 var LockPartialOrder = lockPartialOrder
63 type LockRank lockRank
65 func (l LockRank) String() string {
66 return lockRank(l).String()
69 const PreemptMSupported = preemptMSupported
76 func LFStackPush(head *uint64, node *LFNode) {
77 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
80 func LFStackPop(head *uint64) *LFNode {
81 return (*LFNode)((*lfstack)(head).pop())
83 func LFNodeValidate(node *LFNode) {
84 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
87 func Netpoll(delta int64) {
93 func GCMask(x any) (ret []byte) {
100 func RunSchedLocalQueueTest() {
102 gs := make([]g, len(pp.runq))
103 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
104 for i := 0; i < len(pp.runq); i++ {
105 if g, _ := runqget(pp); g != nil {
106 throw("runq is not empty initially")
108 for j := 0; j < i; j++ {
109 runqput(pp, &gs[i], false)
111 for j := 0; j < i; j++ {
112 if g, _ := runqget(pp); g != &gs[i] {
113 print("bad element at iter ", i, "/", j, "\n")
117 if g, _ := runqget(pp); g != nil {
118 throw("runq is not empty afterwards")
123 func RunSchedLocalQueueStealTest() {
126 gs := make([]g, len(p1.runq))
127 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
128 for i := 0; i < len(p1.runq); i++ {
129 for j := 0; j < i; j++ {
131 runqput(p1, &gs[j], false)
133 gp := runqsteal(p2, p1, true)
154 for j := 0; j < i; j++ {
156 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
160 if s != i/2 && s != i/2+1 {
161 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
167 func RunSchedLocalQueueEmptyTest(iters int) {
168 // Test that runq is not spuriously reported as empty.
169 // Runq emptiness affects scheduling decisions and spurious emptiness
170 // can lead to underutilization (both runnable Gs and idle Ps coexist
171 // for arbitrary long time).
172 done := make(chan bool, 1)
175 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
177 for i := 0; i < iters; i++ {
179 next0 := (i & 1) == 0
180 next1 := (i & 2) == 0
181 runqput(p, &gs[0], next0)
183 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
186 println("next:", next0, next1)
187 throw("queue is empty")
191 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
193 runqput(p, &gs[1], next1)
201 StringHash = stringHash
202 BytesHash = bytesHash
203 Int32Hash = int32Hash
204 Int64Hash = int64Hash
206 MemHash32 = memhash32
207 MemHash64 = memhash64
208 EfaceHash = efaceHash
209 IfaceHash = ifaceHash
212 var UseAeshash = &useAeshash
214 func MemclrBytes(b []byte) {
215 s := (*slice)(unsafe.Pointer(&b))
216 memclrNoHeapPointers(s.array, uintptr(s.len))
219 const HashLoad = hashLoad
221 // entry point for testing
222 func GostringW(w []uint16) (s string) {
234 func Envs() []string { return envs }
235 func SetEnvs(e []string) { envs = e }
239 // blockWrapper is a wrapper type that ensures a T is placed within a
240 // large object. This is necessary for safely benchmarking things
241 // that manipulate the heap bitmap, like heapBitsSetType.
243 // More specifically, allocating threads assume they're the sole writers
244 // to their span's heap bits, which allows those writes to be non-atomic.
245 // The heap bitmap is written byte-wise, so if one tried to call heapBitsSetType
246 // on an existing object in a small object span, we might corrupt that
247 // span's bitmap with a concurrent byte write to the heap bitmap. Large
248 // object spans contain exactly one object, so we can be sure no other P
249 // is going to be allocating from it concurrently, hence this wrapper type
250 // which ensures we have a T in a large object span.
251 type blockWrapper[T any] struct {
253 _ [_MaxSmallSize]byte // Ensure we're a large object.
256 func BenchSetType[T any](n int, resetTimer func()) {
257 x := new(blockWrapper[T])
259 // Escape x to ensure it is allocated on the heap, as we are
260 // working on the heap bits here.
268 // Benchmark setting the type bits for just the internal T of the block.
269 benchSetType(n, resetTimer, 1, unsafe.Pointer(&x.value), t)
272 const maxArrayBlockWrapperLen = 32
274 // arrayBlockWrapper is like blockWrapper, but the interior value is intended
275 // to be used as a backing store for a slice.
276 type arrayBlockWrapper[T any] struct {
277 value [maxArrayBlockWrapperLen]T
278 _ [_MaxSmallSize]byte // Ensure we're a large object.
281 // arrayLargeBlockWrapper is like arrayBlockWrapper, but the interior array
282 // accommodates many more elements.
283 type arrayLargeBlockWrapper[T any] struct {
285 _ [_MaxSmallSize]byte // Ensure we're a large object.
288 func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
289 // We have two separate cases here because we want to avoid
290 // tests on big types but relatively small slices to avoid generating
291 // an allocation that's really big. This will likely force a GC which will
292 // skew the test results.
294 if len <= maxArrayBlockWrapperLen {
295 x := new(arrayBlockWrapper[T])
296 // Escape x to ensure it is allocated on the heap, as we are
297 // working on the heap bits here.
299 y = unsafe.Pointer(&x.value[0])
301 x := new(arrayLargeBlockWrapper[T])
303 y = unsafe.Pointer(&x.value[0])
311 // Benchmark setting the type for a slice created from the array
312 // of T within the arrayBlock.
313 benchSetType(n, resetTimer, len, y, t)
316 // benchSetType is the implementation of the BenchSetType* functions.
317 // x must be len consecutive Ts allocated within a large object span (to
318 // avoid a race on the heap bitmap).
320 // Note: this function cannot be generic. It would get its type from one of
321 // its callers (BenchSetType or BenchSetTypeSlice) whose type parameters are
322 // set by a call in the runtime_test package. That means this function and its
323 // callers will get instantiated in the package that provides the type argument,
324 // i.e. runtime_test. However, we call a function on the system stack. In race
325 // mode the runtime package is usually left uninstrumented because e.g. g0 has
326 // no valid racectx, but if we're instantiated in the runtime_test package,
327 // we might accidentally cause runtime code to be incorrectly instrumented.
328 func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
329 // Compute the input sizes.
330 size := t.Size() * uintptr(len)
332 // Validate this function's invariant.
333 s := spanOfHeap(uintptr(x))
335 panic("no heap span for input")
337 if s.spanclass.sizeclass() != 0 {
338 panic("span is not a large object span")
341 // Round up the size to the size class to make the benchmark a little more
342 // realistic. However, validate it, to make sure this is safe.
343 allocSize := roundupsize(size)
344 if s.npages*pageSize < allocSize {
345 panic("backing span not large enough for benchmark")
348 // Benchmark heapBitsSetType by calling it in a loop. This is safe because
349 // x is in a large object span.
352 for i := 0; i < n; i++ {
353 heapBitsSetType(uintptr(x), allocSize, size, t)
357 // Make sure x doesn't get freed, since we're taking a uintptr.
361 const PtrSize = goarch.PtrSize
363 var ForceGCPeriod = &forcegcperiod
365 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
366 // the "environment" traceback level, so later calls to
367 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
368 func SetTracebackEnv(level string) {
370 traceback_env = traceback_cache
373 var ReadUnaligned32 = readUnaligned32
374 var ReadUnaligned64 = readUnaligned64
376 func CountPagesInUse() (pagesInUse, counted uintptr) {
377 stopTheWorld(stwForTestCountPagesInUse)
379 pagesInUse = mheap_.pagesInUse.Load()
381 for _, s := range mheap_.allspans {
382 if s.state.get() == mSpanInUse {
392 func Fastrand() uint32 { return fastrand() }
393 func Fastrand64() uint64 { return fastrand64() }
394 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
398 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
399 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
402 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
403 (*profBuf)(p).write(tag, now, hdr, stk)
407 ProfBufBlocking = profBufBlocking
408 ProfBufNonBlocking = profBufNonBlocking
411 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
412 return (*profBuf)(p).read(mode)
415 func (p *ProfBuf) Close() {
416 (*profBuf)(p).close()
419 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
420 stopTheWorld(stwForTestReadMetricsSlow)
422 // Initialize the metrics beforehand because this could
423 // allocate and skew the stats.
428 // Donate the racectx to g0. readMetricsLocked calls into the race detector
430 getg().racectx = getg().m.curg.racectx
432 // Read the metrics once before in case it allocates and skews the metrics.
433 // readMetricsLocked is designed to only allocate the first time it is called
434 // with a given slice of samples. In effect, this extra read tests that this
435 // remains true, since otherwise the second readMetricsLocked below could
436 // allocate before it returns.
437 readMetricsLocked(samplesp, len, cap)
439 // Read memstats first. It's going to flush
440 // the mcaches which readMetrics does not do, so
441 // going the other way around may result in
442 // inconsistent statistics.
443 readmemstats_m(memStats)
445 // Read metrics again. We need to be sure we're on the
446 // system stack with readmemstats_m so that we don't call into
447 // the stack allocator and adjust metrics between there and here.
448 readMetricsLocked(samplesp, len, cap)
450 // Undo the donation.
458 // ReadMemStatsSlow returns both the runtime-computed MemStats and
459 // MemStats accumulated by scanning the heap.
460 func ReadMemStatsSlow() (base, slow MemStats) {
461 stopTheWorld(stwForTestReadMemStatsSlow)
463 // Run on the system stack to avoid stack growth allocation.
465 // Make sure stats don't change.
468 readmemstats_m(&base)
470 // Initialize slow from base and zero the fields we're
477 slow.HeapReleased = 0
478 var bySize [_NumSizeClasses]struct {
479 Mallocs, Frees uint64
482 // Add up current allocations in spans.
483 for _, s := range mheap_.allspans {
484 if s.state.get() != mSpanInUse {
487 if s.isUnusedUserArenaChunk() {
490 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
492 slow.Alloc += uint64(s.elemsize)
494 slow.Mallocs += uint64(s.allocCount)
495 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
496 bySize[sizeclass].Mallocs += uint64(s.allocCount)
500 // Add in frees by just reading the stats for those directly.
502 memstats.heapStats.unsafeRead(&m)
504 // Collect per-sizeclass free stats.
506 for i := 0; i < _NumSizeClasses; i++ {
507 slow.Frees += m.smallFreeCount[i]
508 bySize[i].Frees += m.smallFreeCount[i]
509 bySize[i].Mallocs += m.smallFreeCount[i]
510 smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
512 slow.Frees += m.tinyAllocCount + m.largeFreeCount
513 slow.Mallocs += slow.Frees
515 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
517 for i := range slow.BySize {
518 slow.BySize[i].Mallocs = bySize[i].Mallocs
519 slow.BySize[i].Frees = bySize[i].Frees
522 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
523 chunk := mheap_.pages.tryChunkOf(i)
527 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
528 slow.HeapReleased += uint64(pg) * pageSize
530 for _, p := range allp {
531 pg := sys.OnesCount64(p.pcache.scav)
532 slow.HeapReleased += uint64(pg) * pageSize
542 // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
543 // and verifies that unwinding the new stack doesn't crash, even if the old
544 // stack has been freed or reused (simulated via poisoning).
545 func ShrinkStackAndVerifyFramePointers() {
546 before := stackPoisonCopy
547 defer func() { stackPoisonCopy = before }()
554 // If our new stack contains frame pointers into the old stack, this will
555 // crash because the old stack has been poisoned.
556 FPCallers(make([]uintptr, 1024))
559 // BlockOnSystemStack switches to the system stack, prints "x\n" to
560 // stderr, and blocks in a stack containing
561 // "runtime.blockOnSystemStackInternal".
562 func BlockOnSystemStack() {
563 systemstack(blockOnSystemStackInternal)
566 func blockOnSystemStackInternal() {
572 type RWMutex struct {
576 func (rw *RWMutex) RLock() {
580 func (rw *RWMutex) RUnlock() {
584 func (rw *RWMutex) Lock() {
588 func (rw *RWMutex) Unlock() {
592 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
594 func MapBucketsCount(m map[int]int) int {
595 h := *(**hmap)(unsafe.Pointer(&m))
599 func MapBucketsPointerIsNil(m map[int]int) bool {
600 h := *(**hmap)(unsafe.Pointer(&m))
601 return h.buckets == nil
604 func OverLoadFactor(count int, B uint8) bool {
605 return overLoadFactor(count, B)
608 func LockOSCounts() (external, internal uint32) {
610 if gp.m.lockedExt+gp.m.lockedInt == 0 {
612 panic("lockedm on non-locked goroutine")
616 panic("nil lockedm on locked goroutine")
619 return gp.m.lockedExt, gp.m.lockedInt
623 func TracebackSystemstack(stk []uintptr, i int) int {
625 pc, sp := getcallerpc(), getcallersp()
627 u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
628 return tracebackPCs(&u, 0, stk)
632 n = TracebackSystemstack(stk, i-1)
637 func KeepNArenaHints(n int) {
638 hint := mheap_.arenaHints
639 for i := 1; i < n; i++ {
648 // MapNextArenaHint reserves a page at the next arena growth hint,
649 // preventing the arena from growing there, and returns the range of
650 // addresses that are no longer viable.
652 // This may fail to reserve memory. If it fails, it still returns the
653 // address range it attempted to reserve.
654 func MapNextArenaHint() (start, end uintptr, ok bool) {
655 hint := mheap_.arenaHints
658 start, end = addr-heapArenaBytes, addr
661 start, end = addr, addr+heapArenaBytes
663 got := sysReserve(unsafe.Pointer(addr), physPageSize)
664 ok = (addr == uintptr(got))
666 // We were unable to get the requested reservation.
667 // Release what we did get and fail.
668 sysFreeOS(got, physPageSize)
673 func GetNextArenaHint() uintptr {
674 return mheap_.arenaHints.addr
689 func GIsWaitingOnMutex(gp *G) bool {
690 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
693 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
696 func PanicForTesting(b []byte, i int) byte {
697 return unexportedPanicForTesting(b, i)
701 func unexportedPanicForTesting(b []byte, i int) byte {
705 func G0StackOverflow() {
709 // The stack bounds for g0 stack is not always precise.
710 // Use an artificially small stack, to trigger a stack overflow
711 // without actually run out of the system stack (which may seg fault).
712 g0.stack.lo = sp - 4096 - stackSystem
713 g0.stackguard0 = g0.stack.lo + stackGuard
714 g0.stackguard1 = g0.stackguard0
720 func stackOverflow(x *byte) {
722 stackOverflow(&buf[0])
725 func MapTombstoneCheck(m map[int]int) {
726 // Make sure emptyOne and emptyRest are distributed correctly.
727 // We should have a series of filled and emptyOne cells, followed by
728 // a series of emptyRest cells.
729 h := *(**hmap)(unsafe.Pointer(&m))
731 t := *(**maptype)(unsafe.Pointer(&i))
733 for x := 0; x < 1<<h.B; x++ {
734 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
736 for b := b0; b != nil; b = b.overflow(t) {
737 for i := 0; i < bucketCnt; i++ {
738 if b.tophash[i] != emptyRest {
744 for b := b0; b != nil; b = b.overflow(t) {
745 for i := 0; i < bucketCnt; i++ {
746 if k < n && b.tophash[i] == emptyRest {
747 panic("early emptyRest")
749 if k >= n && b.tophash[i] != emptyRest {
750 panic("late non-emptyRest")
752 if k == n-1 && b.tophash[i] == emptyOne {
753 panic("last non-emptyRest entry is emptyOne")
761 func RunGetgThreadSwitchTest() {
762 // Test that getg works correctly with thread switch.
763 // With gccgo, if we generate getg inlined, the backend
764 // may cache the address of the TLS variable, which
765 // will become invalid after a thread switch. This test
766 // checks that the bad caching doesn't happen.
769 go func(ch chan int) {
776 // Block on a receive. This is likely to get us a thread
777 // switch. If we yield to the sender goroutine, it will
778 // lock the thread, forcing us to resume on a different
787 // Also test getg after some control flow, as the
788 // backend is sensitive to control flow.
797 PallocChunkPages = pallocChunkPages
798 PageAlloc64Bit = pageAlloc64Bit
799 PallocSumBytes = pallocSumBytes
802 // Expose pallocSum for testing.
803 type PallocSum pallocSum
805 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
806 func (m PallocSum) Start() uint { return pallocSum(m).start() }
807 func (m PallocSum) Max() uint { return pallocSum(m).max() }
808 func (m PallocSum) End() uint { return pallocSum(m).end() }
810 // Expose pallocBits for testing.
811 type PallocBits pallocBits
813 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
814 return (*pallocBits)(b).find(npages, searchIdx)
816 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
817 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
818 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
819 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
821 // SummarizeSlow is a slow but more obviously correct implementation
822 // of (*pallocBits).summarize. Used for testing.
823 func SummarizeSlow(b *PallocBits) PallocSum {
824 var start, most, end uint
826 const N = uint(len(b)) * 64
827 for start < N && (*pageBits)(b).get(start) == 0 {
830 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
834 for i := uint(0); i < N; i++ {
835 if (*pageBits)(b).get(i) == 0 {
840 most = max(most, run)
842 return PackPallocSum(start, most, end)
845 // Expose non-trivial helpers for testing.
846 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
848 // Given two PallocBits, returns a set of bit ranges where
850 func DiffPallocBits(a, b *PallocBits) []BitRange {
855 base, size := uint(0), uint(0)
856 for i := uint(0); i < uint(len(ba))*64; i++ {
857 if ba.get(i) != bb.get(i) {
864 d = append(d, BitRange{base, size})
870 d = append(d, BitRange{base, size})
875 // StringifyPallocBits gets the bits in the bit range r from b,
876 // and returns a string containing the bits as ASCII 0 and 1
878 func StringifyPallocBits(b *PallocBits, r BitRange) string {
880 for j := r.I; j < r.I+r.N; j++ {
881 if (*pageBits)(b).get(j) != 0 {
890 // Expose pallocData for testing.
891 type PallocData pallocData
893 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
894 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
896 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
897 func (d *PallocData) ScavengedSetRange(i, n uint) {
898 (*pallocData)(d).scavenged.setRange(i, n)
900 func (d *PallocData) PallocBits() *PallocBits {
901 return (*PallocBits)(&(*pallocData)(d).pallocBits)
903 func (d *PallocData) Scavenged() *PallocBits {
904 return (*PallocBits)(&(*pallocData)(d).scavenged)
907 // Expose fillAligned for testing.
908 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
910 // Expose pageCache for testing.
911 type PageCache pageCache
913 const PageCachePages = pageCachePages
915 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
916 return PageCache(pageCache{base: base, cache: cache, scav: scav})
918 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
919 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
920 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
921 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
922 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
923 return (*pageCache)(c).alloc(npages)
925 func (c *PageCache) Flush(s *PageAlloc) {
926 cp := (*pageCache)(c)
927 sp := (*pageAlloc)(s)
930 // None of the tests need any higher-level locking, so we just
931 // take the lock internally.
938 // Expose chunk index type.
939 type ChunkIdx chunkIdx
941 // Expose pageAlloc for testing. Note that because pageAlloc is
942 // not in the heap, so is PageAlloc.
943 type PageAlloc pageAlloc
945 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
946 pp := (*pageAlloc)(p)
948 var addr, scav uintptr
950 // None of the tests need any higher-level locking, so we just
951 // take the lock internally.
953 addr, scav = pp.alloc(npages)
958 func (p *PageAlloc) AllocToCache() PageCache {
959 pp := (*pageAlloc)(p)
963 // None of the tests need any higher-level locking, so we just
964 // take the lock internally.
966 c = PageCache(pp.allocToCache())
971 func (p *PageAlloc) Free(base, npages uintptr) {
972 pp := (*pageAlloc)(p)
975 // None of the tests need any higher-level locking, so we just
976 // take the lock internally.
978 pp.free(base, npages)
982 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
983 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
985 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
986 pp := (*pageAlloc)(p)
988 r = pp.scavenge(nbytes, nil, true)
992 func (p *PageAlloc) InUse() []AddrRange {
993 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
994 for _, r := range p.inUse.ranges {
995 ranges = append(ranges, AddrRange{r})
1000 // Returns nil if the PallocData's L2 is missing.
1001 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
1003 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
1006 // AddrRange is a wrapper around addrRange for testing.
1007 type AddrRange struct {
1011 // MakeAddrRange creates a new address range.
1012 func MakeAddrRange(base, limit uintptr) AddrRange {
1013 return AddrRange{makeAddrRange(base, limit)}
1016 // Base returns the virtual base address of the address range.
1017 func (a AddrRange) Base() uintptr {
1018 return a.addrRange.base.addr()
1021 // Base returns the virtual address of the limit of the address range.
1022 func (a AddrRange) Limit() uintptr {
1023 return a.addrRange.limit.addr()
1026 // Equals returns true if the two address ranges are exactly equal.
1027 func (a AddrRange) Equals(b AddrRange) bool {
1031 // Size returns the size in bytes of the address range.
1032 func (a AddrRange) Size() uintptr {
1033 return a.addrRange.size()
1036 // testSysStat is the sysStat passed to test versions of various
1037 // runtime structures. We do actually have to keep track of this
1038 // because otherwise memstats.mappedReady won't actually line up
1039 // with other stats in the runtime during tests.
1040 var testSysStat = &memstats.other_sys
1042 // AddrRanges is a wrapper around addrRanges for testing.
1043 type AddrRanges struct {
1048 // NewAddrRanges creates a new empty addrRanges.
1050 // Note that this initializes addrRanges just like in the
1051 // runtime, so its memory is persistentalloc'd. Call this
1052 // function sparingly since the memory it allocates is
1055 // This AddrRanges is mutable, so we can test methods like
1057 func NewAddrRanges() AddrRanges {
1060 return AddrRanges{r, true}
1063 // MakeAddrRanges creates a new addrRanges populated with
1066 // The returned AddrRanges is immutable, so methods like
1068 func MakeAddrRanges(a ...AddrRange) AddrRanges {
1069 // Methods that manipulate the backing store of addrRanges.ranges should
1070 // not be used on the result from this function (e.g. add) since they may
1071 // trigger reallocation. That would normally be fine, except the new
1072 // backing store won't come from the heap, but from persistentalloc, so
1073 // we'll leak some memory implicitly.
1074 ranges := make([]addrRange, 0, len(a))
1076 for _, r := range a {
1077 ranges = append(ranges, r.addrRange)
1080 return AddrRanges{addrRanges{
1083 sysStat: testSysStat,
1087 // Ranges returns a copy of the ranges described by the
1089 func (a *AddrRanges) Ranges() []AddrRange {
1090 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
1091 for _, r := range a.addrRanges.ranges {
1092 result = append(result, AddrRange{r})
1097 // FindSucc returns the successor to base. See addrRanges.findSucc
1098 // for more details.
1099 func (a *AddrRanges) FindSucc(base uintptr) int {
1100 return a.findSucc(base)
1103 // Add adds a new AddrRange to the AddrRanges.
1105 // The AddrRange must be mutable (i.e. created by NewAddrRanges),
1106 // otherwise this method will throw.
1107 func (a *AddrRanges) Add(r AddrRange) {
1109 throw("attempt to mutate immutable AddrRanges")
1114 // TotalBytes returns the totalBytes field of the addrRanges.
1115 func (a *AddrRanges) TotalBytes() uintptr {
1116 return a.addrRanges.totalBytes
1119 // BitRange represents a range over a bitmap.
1120 type BitRange struct {
1121 I, N uint // bit index and length in bits
1124 // NewPageAlloc creates a new page allocator for testing and
1125 // initializes it with the scav and chunks maps. Each key in these maps
1126 // represents a chunk index and each value is a series of bit ranges to
1127 // set within each bitmap's chunk.
1129 // The initialization of the pageAlloc preserves the invariant that if a
1130 // scavenged bit is set the alloc bit is necessarily unset, so some
1131 // of the bits described by scav may be cleared in the final bitmap if
1132 // ranges in chunks overlap with them.
1134 // scav is optional, and if nil, the scavenged bitmap will be cleared
1135 // (as opposed to all 1s, which it usually is). Furthermore, every
1136 // chunk index in scav must appear in chunks; ones that do not are
1138 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1141 // We've got an entry, so initialize the pageAlloc.
1142 p.init(new(mutex), testSysStat, true)
1143 lockInit(p.mheapLock, lockRankMheap)
1144 for i, init := range chunks {
1145 addr := chunkBase(chunkIdx(i))
1147 // Mark the chunk's existence in the pageAlloc.
1148 systemstack(func() {
1150 p.grow(addr, pallocChunkBytes)
1154 // Initialize the bitmap and update pageAlloc metadata.
1155 ci := chunkIndex(addr)
1156 chunk := p.chunkOf(ci)
1158 // Clear all the scavenged bits which grow set.
1159 chunk.scavenged.clearRange(0, pallocChunkPages)
1161 // Simulate the allocation and subsequent free of all pages in
1162 // the chunk for the scavenge index. This sets the state equivalent
1163 // with all pages within the index being free.
1164 p.scav.index.alloc(ci, pallocChunkPages)
1165 p.scav.index.free(ci, 0, pallocChunkPages)
1167 // Apply scavenge state if applicable.
1169 if scvg, ok := scav[i]; ok {
1170 for _, s := range scvg {
1171 // Ignore the case of s.N == 0. setRange doesn't handle
1172 // it and it's a no-op anyway.
1174 chunk.scavenged.setRange(s.I, s.N)
1180 // Apply alloc state.
1181 for _, s := range init {
1182 // Ignore the case of s.N == 0. allocRange doesn't handle
1183 // it and it's a no-op anyway.
1185 chunk.allocRange(s.I, s.N)
1187 // Make sure the scavenge index is updated.
1188 p.scav.index.alloc(ci, s.N)
1192 // Update heap metadata for the allocRange calls above.
1193 systemstack(func() {
1195 p.update(addr, pallocChunkPages, false, false)
1200 return (*PageAlloc)(p)
1203 // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
1204 // is called the pageAlloc may no longer be used. The object itself will be
1205 // collected by the garbage collector once it is no longer live.
1206 func FreePageAlloc(pp *PageAlloc) {
1207 p := (*pageAlloc)(pp)
1209 // Free all the mapped space for the summary levels.
1210 if pageAlloc64Bit != 0 {
1211 for l := 0; l < summaryLevels; l++ {
1212 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1215 resSize := uintptr(0)
1216 for _, s := range p.summary {
1217 resSize += uintptr(cap(s)) * pallocSumBytes
1219 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1222 // Free extra data structures.
1223 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1225 // Subtract back out whatever we mapped for the summaries.
1226 // sysUsed adds to p.sysStat and memstats.mappedReady no matter what
1227 // (and in anger should actually be accounted for), and there's no other
1228 // way to figure out how much we actually mapped.
1229 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1230 testSysStat.add(-int64(p.summaryMappedReady))
1232 // Free the mapped space for chunks.
1233 for i := range p.chunks {
1234 if x := p.chunks[i]; x != nil {
1236 // This memory comes from sysAlloc and will always be page-aligned.
1237 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1242 // BaseChunkIdx is a convenient chunkIdx value which works on both
1243 // 64 bit and 32 bit platforms, allowing the tests to share code
1246 // This should not be higher than 0x100*pallocChunkBytes to support
1247 // mips and mipsle, which only have 31-bit address spaces.
1248 var BaseChunkIdx = func() ChunkIdx {
1250 if pageAlloc64Bit != 0 {
1255 baseAddr := prefix * pallocChunkBytes
1256 if goos.IsAix != 0 {
1257 baseAddr += arenaBaseOffset
1259 return ChunkIdx(chunkIndex(baseAddr))
1262 // PageBase returns an address given a chunk index and a page index
1263 // relative to that chunk.
1264 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1265 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1268 type BitsMismatch struct {
1273 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1276 // Run on the system stack to avoid stack growth allocation.
1277 systemstack(func() {
1278 getg().m.mallocing++
1280 // Lock so that we can safely access the bitmap.
1283 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1284 chunk := mheap_.pages.tryChunkOf(i)
1288 for j := 0; j < pallocChunkPages/64; j++ {
1289 // Run over each 64-bit bitmap section and ensure
1290 // scavenged is being cleared properly on allocation.
1291 // If a used bit and scavenged bit are both set, that's
1292 // an error, and could indicate a larger problem, or
1293 // an accounting problem.
1294 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1295 got := chunk.scavenged[j]
1298 if n >= len(mismatches) {
1301 mismatches[n] = BitsMismatch{
1302 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1310 unlock(&mheap_.lock)
1312 getg().m.mallocing--
1317 func PageCachePagesLeaked() (leaked uintptr) {
1318 stopTheWorld(stwForTestPageCachePagesLeaked)
1320 // Walk over destroyed Ps and look for unflushed caches.
1321 deadp := allp[len(allp):cap(allp)]
1322 for _, p := range deadp {
1323 // Since we're going past len(allp) we may see nil Ps.
1324 // Just ignore them.
1326 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1334 var Semacquire = semacquire
1335 var Semrelease1 = semrelease1
1337 func SemNwait(addr *uint32) uint32 {
1338 root := semtable.rootFor(addr)
1339 return root.nwait.Load()
1342 const SemTableSize = semTabSize
1344 // SemTable is a wrapper around semTable exported for testing.
1345 type SemTable struct {
1349 // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
1350 func (t *SemTable) Enqueue(addr *uint32) {
1355 t.semTable.rootFor(addr).queue(addr, s, false)
1358 // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
1360 // Returns true if there actually was a waiter to be dequeued.
1361 func (t *SemTable) Dequeue(addr *uint32) bool {
1362 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1370 // mspan wrapper for testing.
1373 // Allocate an mspan for testing.
1374 func AllocMSpan() *MSpan {
1376 systemstack(func() {
1378 s = (*mspan)(mheap_.spanalloc.alloc())
1379 unlock(&mheap_.lock)
1384 // Free an allocated mspan.
1385 func FreeMSpan(s *MSpan) {
1386 systemstack(func() {
1388 mheap_.spanalloc.free(unsafe.Pointer(s))
1389 unlock(&mheap_.lock)
1393 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1395 s.nelems = uint16(len(bits) * 8)
1396 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1397 result := s.countAlloc()
1403 TimeHistSubBucketBits = timeHistSubBucketBits
1404 TimeHistNumSubBuckets = timeHistNumSubBuckets
1405 TimeHistNumBuckets = timeHistNumBuckets
1406 TimeHistMinBucketBits = timeHistMinBucketBits
1407 TimeHistMaxBucketBits = timeHistMaxBucketBits
1410 type TimeHistogram timeHistogram
1412 // Counts returns the counts for the given bucket, subBucket indices.
1413 // Returns true if the bucket was valid, otherwise returns the counts
1414 // for the overflow bucket if bucket > 0 or the underflow bucket if
1415 // bucket < 0, and false.
1416 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1417 t := (*timeHistogram)(th)
1419 return t.underflow.Load(), false
1421 i := bucket*TimeHistNumSubBuckets + subBucket
1422 if i >= len(t.counts) {
1423 return t.overflow.Load(), false
1425 return t.counts[i].Load(), true
1428 func (th *TimeHistogram) Record(duration int64) {
1429 (*timeHistogram)(th).record(duration)
1432 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1434 func SetIntArgRegs(a int) int {
1444 func FinalizerGAsleep() bool {
1445 return fingStatus.Load()&fingWait != 0
1448 // For GCTestMoveStackOnNextCall, it's important not to introduce an
1449 // extra layer of call, since then there's a return before the "real"
1451 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1453 // For GCTestIsReachable, it's important that we do this as a call so
1454 // escape analysis can see through it.
1455 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1456 return gcTestIsReachable(ptrs...)
1459 // For GCTestPointerClass, it's important that we do this as a call so
1460 // escape analysis can see through it.
1462 // This is nosplit because gcTestPointerClass is.
1465 func GCTestPointerClass(p unsafe.Pointer) string {
1466 return gcTestPointerClass(p)
1469 const Raceenabled = raceenabled
1472 GCBackgroundUtilization = gcBackgroundUtilization
1473 GCGoalUtilization = gcGoalUtilization
1474 DefaultHeapMinimum = defaultHeapMinimum
1475 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1476 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1479 type GCController struct {
1483 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1484 // Force the controller to escape. We're going to
1485 // do 64-bit atomics on it, and if it gets stack-allocated
1486 // on a 32-bit architecture, it may get allocated unaligned
1488 g := Escape(new(GCController))
1489 g.gcControllerState.test = true // Mark it as a test copy.
1490 g.init(int32(gcPercent), memoryLimit)
1494 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1495 trigger, _ := c.trigger()
1496 if c.heapMarked > trigger {
1497 trigger = c.heapMarked
1499 c.maxStackScan.Store(stackSize)
1500 c.globalsScan.Store(globalsSize)
1501 c.heapLive.Store(trigger)
1502 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1503 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1506 func (c *GCController) AssistWorkPerByte() float64 {
1507 return c.assistWorkPerByte.Load()
1510 func (c *GCController) HeapGoal() uint64 {
1514 func (c *GCController) HeapLive() uint64 {
1515 return c.heapLive.Load()
1518 func (c *GCController) HeapMarked() uint64 {
1522 func (c *GCController) Triggered() uint64 {
1526 type GCControllerReviseDelta struct {
1531 GlobalsScanWork int64
1534 func (c *GCController) Revise(d GCControllerReviseDelta) {
1535 c.heapLive.Add(d.HeapLive)
1536 c.heapScan.Add(d.HeapScan)
1537 c.heapScanWork.Add(d.HeapScanWork)
1538 c.stackScanWork.Add(d.StackScanWork)
1539 c.globalsScanWork.Add(d.GlobalsScanWork)
1543 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1544 c.assistTime.Store(assistTime)
1545 c.endCycle(elapsed, gomaxprocs, false)
1546 c.resetLive(bytesMarked)
1550 func (c *GCController) AddIdleMarkWorker() bool {
1551 return c.addIdleMarkWorker()
1554 func (c *GCController) NeedIdleMarkWorker() bool {
1555 return c.needIdleMarkWorker()
1558 func (c *GCController) RemoveIdleMarkWorker() {
1559 c.removeIdleMarkWorker()
1562 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1563 c.setMaxIdleMarkWorkers(max)
1566 var alwaysFalse bool
1569 func Escape[T any](x T) T {
1576 // Acquirem blocks preemption.
1585 var Timediv = timediv
1587 type PIController struct {
1591 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1592 return &PIController{piController{
1601 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1602 return c.piController.next(input, setpoint, period)
1606 CapacityPerProc = capacityPerProc
1607 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1610 type GCCPULimiter struct {
1611 limiter gcCPULimiterState
1614 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1615 // Force the controller to escape. We're going to
1616 // do 64-bit atomics on it, and if it gets stack-allocated
1617 // on a 32-bit architecture, it may get allocated unaligned
1619 l := Escape(new(GCCPULimiter))
1620 l.limiter.test = true
1621 l.limiter.resetCapacity(now, gomaxprocs)
1625 func (l *GCCPULimiter) Fill() uint64 {
1626 return l.limiter.bucket.fill
1629 func (l *GCCPULimiter) Capacity() uint64 {
1630 return l.limiter.bucket.capacity
1633 func (l *GCCPULimiter) Overflow() uint64 {
1634 return l.limiter.overflow
1637 func (l *GCCPULimiter) Limiting() bool {
1638 return l.limiter.limiting()
1641 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1642 return l.limiter.needUpdate(now)
1645 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1646 l.limiter.startGCTransition(enableGC, now)
1649 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1650 l.limiter.finishGCTransition(now)
1653 func (l *GCCPULimiter) Update(now int64) {
1654 l.limiter.update(now)
1657 func (l *GCCPULimiter) AddAssistTime(t int64) {
1658 l.limiter.addAssistTime(t)
1661 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1662 l.limiter.resetCapacity(now, nprocs)
1665 const ScavengePercent = scavengePercent
1667 type Scavenger struct {
1668 Sleep func(int64) int64
1669 Scavenge func(uintptr) (uintptr, int64)
1670 ShouldStop func() bool
1671 GoMaxProcs func() int32
1673 released atomic.Uintptr
1674 scavenger scavengerState
1675 stop chan<- struct{}
1676 done <-chan struct{}
1679 func (s *Scavenger) Start() {
1680 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1681 panic("must populate all stubs")
1685 s.scavenger.sleepStub = s.Sleep
1686 s.scavenger.scavenge = s.Scavenge
1687 s.scavenger.shouldStop = s.ShouldStop
1688 s.scavenger.gomaxprocs = s.GoMaxProcs
1690 // Start up scavenger goroutine, and wait for it to be ready.
1691 stop := make(chan struct{})
1693 done := make(chan struct{})
1696 // This should match bgscavenge, loosely.
1706 released, workTime := s.scavenger.run()
1711 s.released.Add(released)
1712 s.scavenger.sleep(workTime)
1715 if !s.BlockUntilParked(1e9 /* 1 second */) {
1716 panic("timed out waiting for scavenger to get ready")
1720 // BlockUntilParked blocks until the scavenger parks, or until
1721 // timeout is exceeded. Returns true if the scavenger parked.
1723 // Note that in testing, parked means something slightly different.
1724 // In anger, the scavenger parks to sleep, too, but in testing,
1725 // it only parks when it actually has no work to do.
1726 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1727 // Just spin, waiting for it to park.
1729 // The actual parking process is racy with respect to
1730 // wakeups, which is fine, but for testing we need something
1731 // a bit more robust.
1733 for nanotime()-start < timeout {
1734 lock(&s.scavenger.lock)
1735 parked := s.scavenger.parked
1736 unlock(&s.scavenger.lock)
1745 // Released returns how many bytes the scavenger released.
1746 func (s *Scavenger) Released() uintptr {
1747 return s.released.Load()
1750 // Wake wakes up a parked scavenger to keep running.
1751 func (s *Scavenger) Wake() {
1755 // Stop cleans up the scavenger's resources. The scavenger
1756 // must be parked for this to work.
1757 func (s *Scavenger) Stop() {
1758 lock(&s.scavenger.lock)
1759 parked := s.scavenger.parked
1760 unlock(&s.scavenger.lock)
1762 panic("tried to clean up scavenger that is not parked")
1769 type ScavengeIndex struct {
1773 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1774 s := new(ScavengeIndex)
1775 // This is a bit lazy but we easily guarantee we'll be able
1776 // to reference all the relevant chunks. The worst-case
1777 // memory usage here is 512 MiB, but tests generally use
1778 // small offsets from BaseChunkIdx, which results in ~100s
1779 // of KiB in memory use.
1781 // This may still be worth making better, at least by sharing
1782 // this fairly large array across calls with a sync.Pool or
1783 // something. Currently, when the tests are run serially,
1784 // it takes around 0.5s. Not all that much, but if we have
1785 // a lot of tests like this it could add up.
1786 s.i.chunks = make([]atomicScavChunkData, max)
1787 s.i.min.Store(uintptr(min))
1788 s.i.max.Store(uintptr(max))
1789 s.i.minHeapIdx.Store(uintptr(min))
1794 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1795 ci, off := s.i.find(force)
1796 return ChunkIdx(ci), off
1799 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1800 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1801 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1804 // The range doesn't cross any chunk boundaries.
1805 s.i.alloc(sc, ei+1-si)
1807 // The range crosses at least one chunk boundary.
1808 s.i.alloc(sc, pallocChunkPages-si)
1809 for c := sc + 1; c < ec; c++ {
1810 s.i.alloc(c, pallocChunkPages)
1816 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1817 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1818 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1821 // The range doesn't cross any chunk boundaries.
1822 s.i.free(sc, si, ei+1-si)
1824 // The range crosses at least one chunk boundary.
1825 s.i.free(sc, si, pallocChunkPages-si)
1826 for c := sc + 1; c < ec; c++ {
1827 s.i.free(c, 0, pallocChunkPages)
1829 s.i.free(ec, 0, ei+1)
1833 func (s *ScavengeIndex) ResetSearchAddrs() {
1834 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1835 addr, marked := a.Load()
1837 a.StoreUnmark(addr, addr)
1841 s.i.freeHWM = minOffAddr
1844 func (s *ScavengeIndex) NextGen() {
1848 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1849 s.i.setEmpty(chunkIdx(ci))
1852 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1853 sc0 := scavChunkData{
1856 lastInUse: lastInUse,
1857 scavChunkFlags: scavChunkFlags(flags),
1860 sc1 := unpackScavChunkData(scp)
1864 const GTrackingPeriod = gTrackingPeriod
1866 var ZeroBase = unsafe.Pointer(&zerobase)
1868 const UserArenaChunkBytes = userArenaChunkBytes
1870 type UserArena struct {
1874 func NewUserArena() *UserArena {
1875 return &UserArena{newUserArena()}
1878 func (a *UserArena) New(out *any) {
1881 if typ.Kind_&kindMask != kindPtr {
1882 panic("new result of non-ptr type")
1884 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1885 i.data = a.arena.new(typ)
1888 func (a *UserArena) Slice(sl any, cap int) {
1889 a.arena.slice(sl, cap)
1892 func (a *UserArena) Free() {
1896 func GlobalWaitingArenaChunks() int {
1898 systemstack(func() {
1900 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1903 unlock(&mheap_.lock)
1908 func UserArenaClone[T any](s T) T {
1909 return arena_heapify(s).(T)
1912 var AlignUp = alignUp
1914 // BlockUntilEmptyFinalizerQueue blocks until either the finalizer
1915 // queue is emptied (and the finalizers have executed) or the timeout
1916 // is reached. Returns true if the finalizer queue was emptied.
1917 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1919 for nanotime()-start < timeout {
1921 // We know the queue has been drained when both finq is nil
1922 // and the finalizer g has stopped executing.
1923 empty := finq == nil
1924 empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
1934 func FrameStartLine(f *Frame) int {
1938 // PersistentAlloc allocates some memory that lives outside the Go heap.
1939 // This memory will never be freed; use sparingly.
1940 func PersistentAlloc(n uintptr) unsafe.Pointer {
1941 return persistentalloc(n, 0, &memstats.other_sys)
1944 // FPCallers works like Callers and uses frame pointer unwinding to populate
1945 // pcBuf with the return addresses of the physical frames on the stack.
1946 func FPCallers(pcBuf []uintptr) int {
1947 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1950 const FramePointerEnabled = framepointer_enabled
1954 GetPinCounter = pinnerGetPinCounter
1957 func SetPinnerLeakPanic(f func()) {
1960 func GetPinnerLeakPanic() func() {
1961 return pinnerLeakPanic
1964 var testUintptr uintptr
1966 func MyGenericFunc[T any]() {
1967 systemstack(func() {
1972 func UnsafePoint(pc uintptr) bool {
1974 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1976 case abi.UnsafePointUnsafe:
1978 case abi.UnsafePointSafe:
1980 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1981 // These are all interruptible, they just encode a nonstandard
1982 // way of recovering when interrupted.
1986 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))