1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
12 "internal/goexperiment"
14 "runtime/internal/atomic"
15 "runtime/internal/sys"
26 var Fintto64 = fintto64
27 var F64toint = f64toint
29 var Entersyscall = entersyscall
30 var Exitsyscall = exitsyscall
31 var LockedOSThread = lockedOSThread
32 var Xadduintptr = atomic.Xadduintptr
34 var Fastlog2 = fastlog2
38 var ParseByteCount = parseByteCount
40 var Nanotime = nanotime
41 var NetpollBreak = netpollBreak
44 var PhysPageSize = physPageSize
45 var PhysHugePageSize = physHugePageSize
47 var NetpollGenericInit = netpollGenericInit
50 var MemclrNoHeapPointers = memclrNoHeapPointers
52 var CgoCheckPointer = cgoCheckPointer
54 const CrashStackImplemented = crashStackImplemented
56 const TracebackInnerFrames = tracebackInnerFrames
57 const TracebackOuterFrames = tracebackOuterFrames
60 var MapValues = values
62 var LockPartialOrder = lockPartialOrder
64 type LockRank lockRank
66 func (l LockRank) String() string {
67 return lockRank(l).String()
70 const PreemptMSupported = preemptMSupported
77 func LFStackPush(head *uint64, node *LFNode) {
78 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
81 func LFStackPop(head *uint64) *LFNode {
82 return (*LFNode)((*lfstack)(head).pop())
84 func LFNodeValidate(node *LFNode) {
85 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
88 func Netpoll(delta int64) {
94 func GCMask(x any) (ret []byte) {
101 func RunSchedLocalQueueTest() {
103 gs := make([]g, len(pp.runq))
104 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
105 for i := 0; i < len(pp.runq); i++ {
106 if g, _ := runqget(pp); g != nil {
107 throw("runq is not empty initially")
109 for j := 0; j < i; j++ {
110 runqput(pp, &gs[i], false)
112 for j := 0; j < i; j++ {
113 if g, _ := runqget(pp); g != &gs[i] {
114 print("bad element at iter ", i, "/", j, "\n")
118 if g, _ := runqget(pp); g != nil {
119 throw("runq is not empty afterwards")
124 func RunSchedLocalQueueStealTest() {
127 gs := make([]g, len(p1.runq))
128 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
129 for i := 0; i < len(p1.runq); i++ {
130 for j := 0; j < i; j++ {
132 runqput(p1, &gs[j], false)
134 gp := runqsteal(p2, p1, true)
155 for j := 0; j < i; j++ {
157 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
161 if s != i/2 && s != i/2+1 {
162 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
168 func RunSchedLocalQueueEmptyTest(iters int) {
169 // Test that runq is not spuriously reported as empty.
170 // Runq emptiness affects scheduling decisions and spurious emptiness
171 // can lead to underutilization (both runnable Gs and idle Ps coexist
172 // for arbitrary long time).
173 done := make(chan bool, 1)
176 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
178 for i := 0; i < iters; i++ {
180 next0 := (i & 1) == 0
181 next1 := (i & 2) == 0
182 runqput(p, &gs[0], next0)
184 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
187 println("next:", next0, next1)
188 throw("queue is empty")
192 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
194 runqput(p, &gs[1], next1)
202 StringHash = stringHash
203 BytesHash = bytesHash
204 Int32Hash = int32Hash
205 Int64Hash = int64Hash
207 MemHash32 = memhash32
208 MemHash64 = memhash64
209 EfaceHash = efaceHash
210 IfaceHash = ifaceHash
213 var UseAeshash = &useAeshash
215 func MemclrBytes(b []byte) {
216 s := (*slice)(unsafe.Pointer(&b))
217 memclrNoHeapPointers(s.array, uintptr(s.len))
220 const HashLoad = hashLoad
222 // entry point for testing
223 func GostringW(w []uint16) (s string) {
235 func Envs() []string { return envs }
236 func SetEnvs(e []string) { envs = e }
240 // blockWrapper is a wrapper type that ensures a T is placed within a
241 // large object. This is necessary for safely benchmarking things
242 // that manipulate the heap bitmap, like heapBitsSetType.
244 // More specifically, allocating threads assume they're the sole writers
245 // to their span's heap bits, which allows those writes to be non-atomic.
246 // The heap bitmap is written byte-wise, so if one tried to call heapBitsSetType
247 // on an existing object in a small object span, we might corrupt that
248 // span's bitmap with a concurrent byte write to the heap bitmap. Large
249 // object spans contain exactly one object, so we can be sure no other P
250 // is going to be allocating from it concurrently, hence this wrapper type
251 // which ensures we have a T in a large object span.
252 type blockWrapper[T any] struct {
254 _ [_MaxSmallSize]byte // Ensure we're a large object.
257 func BenchSetType[T any](n int, resetTimer func()) {
258 x := new(blockWrapper[T])
260 // Escape x to ensure it is allocated on the heap, as we are
261 // working on the heap bits here.
269 // Benchmark setting the type bits for just the internal T of the block.
270 benchSetType(n, resetTimer, 1, unsafe.Pointer(&x.value), t)
273 const maxArrayBlockWrapperLen = 32
275 // arrayBlockWrapper is like blockWrapper, but the interior value is intended
276 // to be used as a backing store for a slice.
277 type arrayBlockWrapper[T any] struct {
278 value [maxArrayBlockWrapperLen]T
279 _ [_MaxSmallSize]byte // Ensure we're a large object.
282 // arrayLargeBlockWrapper is like arrayBlockWrapper, but the interior array
283 // accommodates many more elements.
284 type arrayLargeBlockWrapper[T any] struct {
286 _ [_MaxSmallSize]byte // Ensure we're a large object.
289 func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
290 // We have two separate cases here because we want to avoid
291 // tests on big types but relatively small slices to avoid generating
292 // an allocation that's really big. This will likely force a GC which will
293 // skew the test results.
295 if len <= maxArrayBlockWrapperLen {
296 x := new(arrayBlockWrapper[T])
297 // Escape x to ensure it is allocated on the heap, as we are
298 // working on the heap bits here.
300 y = unsafe.Pointer(&x.value[0])
302 x := new(arrayLargeBlockWrapper[T])
304 y = unsafe.Pointer(&x.value[0])
312 // Benchmark setting the type for a slice created from the array
313 // of T within the arrayBlock.
314 benchSetType(n, resetTimer, len, y, t)
317 // benchSetType is the implementation of the BenchSetType* functions.
318 // x must be len consecutive Ts allocated within a large object span (to
319 // avoid a race on the heap bitmap).
321 // Note: this function cannot be generic. It would get its type from one of
322 // its callers (BenchSetType or BenchSetTypeSlice) whose type parameters are
323 // set by a call in the runtime_test package. That means this function and its
324 // callers will get instantiated in the package that provides the type argument,
325 // i.e. runtime_test. However, we call a function on the system stack. In race
326 // mode the runtime package is usually left uninstrumented because e.g. g0 has
327 // no valid racectx, but if we're instantiated in the runtime_test package,
328 // we might accidentally cause runtime code to be incorrectly instrumented.
329 func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
330 // This benchmark doesn't work with the allocheaders experiment. It sets up
331 // an elaborate scenario to be able to benchmark the function safely, but doing
332 // this work for the allocheaders' version of the function would be complex.
333 // Just fail instead and rely on the test code making sure we never get here.
334 if goexperiment.AllocHeaders {
335 panic("called benchSetType with allocheaders experiment enabled")
338 // Compute the input sizes.
339 size := t.Size() * uintptr(len)
341 // Validate this function's invariant.
342 s := spanOfHeap(uintptr(x))
344 panic("no heap span for input")
346 if s.spanclass.sizeclass() != 0 {
347 panic("span is not a large object span")
350 // Round up the size to the size class to make the benchmark a little more
351 // realistic. However, validate it, to make sure this is safe.
352 allocSize := roundupsize(size, t.PtrBytes == 0)
353 if s.npages*pageSize < allocSize {
354 panic("backing span not large enough for benchmark")
357 // Benchmark heapBitsSetType by calling it in a loop. This is safe because
358 // x is in a large object span.
361 for i := 0; i < n; i++ {
362 heapBitsSetType(uintptr(x), allocSize, size, t)
366 // Make sure x doesn't get freed, since we're taking a uintptr.
370 const PtrSize = goarch.PtrSize
372 var ForceGCPeriod = &forcegcperiod
374 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
375 // the "environment" traceback level, so later calls to
376 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
377 func SetTracebackEnv(level string) {
379 traceback_env = traceback_cache
382 var ReadUnaligned32 = readUnaligned32
383 var ReadUnaligned64 = readUnaligned64
385 func CountPagesInUse() (pagesInUse, counted uintptr) {
386 stopTheWorld(stwForTestCountPagesInUse)
388 pagesInUse = mheap_.pagesInUse.Load()
390 for _, s := range mheap_.allspans {
391 if s.state.get() == mSpanInUse {
401 func Fastrand() uint32 { return fastrand() }
402 func Fastrand64() uint64 { return fastrand64() }
403 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
407 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
408 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
411 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
412 (*profBuf)(p).write(tag, now, hdr, stk)
416 ProfBufBlocking = profBufBlocking
417 ProfBufNonBlocking = profBufNonBlocking
420 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
421 return (*profBuf)(p).read(mode)
424 func (p *ProfBuf) Close() {
425 (*profBuf)(p).close()
428 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
429 stopTheWorld(stwForTestReadMetricsSlow)
431 // Initialize the metrics beforehand because this could
432 // allocate and skew the stats.
437 // Donate the racectx to g0. readMetricsLocked calls into the race detector
439 getg().racectx = getg().m.curg.racectx
441 // Read the metrics once before in case it allocates and skews the metrics.
442 // readMetricsLocked is designed to only allocate the first time it is called
443 // with a given slice of samples. In effect, this extra read tests that this
444 // remains true, since otherwise the second readMetricsLocked below could
445 // allocate before it returns.
446 readMetricsLocked(samplesp, len, cap)
448 // Read memstats first. It's going to flush
449 // the mcaches which readMetrics does not do, so
450 // going the other way around may result in
451 // inconsistent statistics.
452 readmemstats_m(memStats)
454 // Read metrics again. We need to be sure we're on the
455 // system stack with readmemstats_m so that we don't call into
456 // the stack allocator and adjust metrics between there and here.
457 readMetricsLocked(samplesp, len, cap)
459 // Undo the donation.
467 // ReadMemStatsSlow returns both the runtime-computed MemStats and
468 // MemStats accumulated by scanning the heap.
469 func ReadMemStatsSlow() (base, slow MemStats) {
470 stopTheWorld(stwForTestReadMemStatsSlow)
472 // Run on the system stack to avoid stack growth allocation.
474 // Make sure stats don't change.
477 readmemstats_m(&base)
479 // Initialize slow from base and zero the fields we're
486 slow.HeapReleased = 0
487 var bySize [_NumSizeClasses]struct {
488 Mallocs, Frees uint64
491 // Add up current allocations in spans.
492 for _, s := range mheap_.allspans {
493 if s.state.get() != mSpanInUse {
496 if s.isUnusedUserArenaChunk() {
499 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
501 slow.Alloc += uint64(s.elemsize)
503 slow.Mallocs += uint64(s.allocCount)
504 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
505 bySize[sizeclass].Mallocs += uint64(s.allocCount)
509 // Add in frees by just reading the stats for those directly.
511 memstats.heapStats.unsafeRead(&m)
513 // Collect per-sizeclass free stats.
515 for i := 0; i < _NumSizeClasses; i++ {
516 slow.Frees += m.smallFreeCount[i]
517 bySize[i].Frees += m.smallFreeCount[i]
518 bySize[i].Mallocs += m.smallFreeCount[i]
519 smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
521 slow.Frees += m.tinyAllocCount + m.largeFreeCount
522 slow.Mallocs += slow.Frees
524 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
526 for i := range slow.BySize {
527 slow.BySize[i].Mallocs = bySize[i].Mallocs
528 slow.BySize[i].Frees = bySize[i].Frees
531 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
532 chunk := mheap_.pages.tryChunkOf(i)
536 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
537 slow.HeapReleased += uint64(pg) * pageSize
539 for _, p := range allp {
540 pg := sys.OnesCount64(p.pcache.scav)
541 slow.HeapReleased += uint64(pg) * pageSize
551 // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
552 // and verifies that unwinding the new stack doesn't crash, even if the old
553 // stack has been freed or reused (simulated via poisoning).
554 func ShrinkStackAndVerifyFramePointers() {
555 before := stackPoisonCopy
556 defer func() { stackPoisonCopy = before }()
563 // If our new stack contains frame pointers into the old stack, this will
564 // crash because the old stack has been poisoned.
565 FPCallers(make([]uintptr, 1024))
568 // BlockOnSystemStack switches to the system stack, prints "x\n" to
569 // stderr, and blocks in a stack containing
570 // "runtime.blockOnSystemStackInternal".
571 func BlockOnSystemStack() {
572 systemstack(blockOnSystemStackInternal)
575 func blockOnSystemStackInternal() {
581 type RWMutex struct {
585 func (rw *RWMutex) RLock() {
589 func (rw *RWMutex) RUnlock() {
593 func (rw *RWMutex) Lock() {
597 func (rw *RWMutex) Unlock() {
601 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
603 func MapBucketsCount(m map[int]int) int {
604 h := *(**hmap)(unsafe.Pointer(&m))
608 func MapBucketsPointerIsNil(m map[int]int) bool {
609 h := *(**hmap)(unsafe.Pointer(&m))
610 return h.buckets == nil
613 func OverLoadFactor(count int, B uint8) bool {
614 return overLoadFactor(count, B)
617 func LockOSCounts() (external, internal uint32) {
619 if gp.m.lockedExt+gp.m.lockedInt == 0 {
621 panic("lockedm on non-locked goroutine")
625 panic("nil lockedm on locked goroutine")
628 return gp.m.lockedExt, gp.m.lockedInt
632 func TracebackSystemstack(stk []uintptr, i int) int {
634 pc, sp := getcallerpc(), getcallersp()
636 u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
637 return tracebackPCs(&u, 0, stk)
641 n = TracebackSystemstack(stk, i-1)
646 func KeepNArenaHints(n int) {
647 hint := mheap_.arenaHints
648 for i := 1; i < n; i++ {
657 // MapNextArenaHint reserves a page at the next arena growth hint,
658 // preventing the arena from growing there, and returns the range of
659 // addresses that are no longer viable.
661 // This may fail to reserve memory. If it fails, it still returns the
662 // address range it attempted to reserve.
663 func MapNextArenaHint() (start, end uintptr, ok bool) {
664 hint := mheap_.arenaHints
667 start, end = addr-heapArenaBytes, addr
670 start, end = addr, addr+heapArenaBytes
672 got := sysReserve(unsafe.Pointer(addr), physPageSize)
673 ok = (addr == uintptr(got))
675 // We were unable to get the requested reservation.
676 // Release what we did get and fail.
677 sysFreeOS(got, physPageSize)
682 func GetNextArenaHint() uintptr {
683 return mheap_.arenaHints.addr
698 func GIsWaitingOnMutex(gp *G) bool {
699 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
702 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
705 func PanicForTesting(b []byte, i int) byte {
706 return unexportedPanicForTesting(b, i)
710 func unexportedPanicForTesting(b []byte, i int) byte {
714 func G0StackOverflow() {
718 // The stack bounds for g0 stack is not always precise.
719 // Use an artificially small stack, to trigger a stack overflow
720 // without actually run out of the system stack (which may seg fault).
721 g0.stack.lo = sp - 4096 - stackSystem
722 g0.stackguard0 = g0.stack.lo + stackGuard
723 g0.stackguard1 = g0.stackguard0
729 func stackOverflow(x *byte) {
731 stackOverflow(&buf[0])
734 func MapTombstoneCheck(m map[int]int) {
735 // Make sure emptyOne and emptyRest are distributed correctly.
736 // We should have a series of filled and emptyOne cells, followed by
737 // a series of emptyRest cells.
738 h := *(**hmap)(unsafe.Pointer(&m))
740 t := *(**maptype)(unsafe.Pointer(&i))
742 for x := 0; x < 1<<h.B; x++ {
743 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
745 for b := b0; b != nil; b = b.overflow(t) {
746 for i := 0; i < bucketCnt; i++ {
747 if b.tophash[i] != emptyRest {
753 for b := b0; b != nil; b = b.overflow(t) {
754 for i := 0; i < bucketCnt; i++ {
755 if k < n && b.tophash[i] == emptyRest {
756 panic("early emptyRest")
758 if k >= n && b.tophash[i] != emptyRest {
759 panic("late non-emptyRest")
761 if k == n-1 && b.tophash[i] == emptyOne {
762 panic("last non-emptyRest entry is emptyOne")
770 func RunGetgThreadSwitchTest() {
771 // Test that getg works correctly with thread switch.
772 // With gccgo, if we generate getg inlined, the backend
773 // may cache the address of the TLS variable, which
774 // will become invalid after a thread switch. This test
775 // checks that the bad caching doesn't happen.
778 go func(ch chan int) {
785 // Block on a receive. This is likely to get us a thread
786 // switch. If we yield to the sender goroutine, it will
787 // lock the thread, forcing us to resume on a different
796 // Also test getg after some control flow, as the
797 // backend is sensitive to control flow.
806 PallocChunkPages = pallocChunkPages
807 PageAlloc64Bit = pageAlloc64Bit
808 PallocSumBytes = pallocSumBytes
811 // Expose pallocSum for testing.
812 type PallocSum pallocSum
814 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
815 func (m PallocSum) Start() uint { return pallocSum(m).start() }
816 func (m PallocSum) Max() uint { return pallocSum(m).max() }
817 func (m PallocSum) End() uint { return pallocSum(m).end() }
819 // Expose pallocBits for testing.
820 type PallocBits pallocBits
822 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
823 return (*pallocBits)(b).find(npages, searchIdx)
825 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
826 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
827 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
828 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
830 // SummarizeSlow is a slow but more obviously correct implementation
831 // of (*pallocBits).summarize. Used for testing.
832 func SummarizeSlow(b *PallocBits) PallocSum {
833 var start, most, end uint
835 const N = uint(len(b)) * 64
836 for start < N && (*pageBits)(b).get(start) == 0 {
839 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
843 for i := uint(0); i < N; i++ {
844 if (*pageBits)(b).get(i) == 0 {
849 most = max(most, run)
851 return PackPallocSum(start, most, end)
854 // Expose non-trivial helpers for testing.
855 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
857 // Given two PallocBits, returns a set of bit ranges where
859 func DiffPallocBits(a, b *PallocBits) []BitRange {
864 base, size := uint(0), uint(0)
865 for i := uint(0); i < uint(len(ba))*64; i++ {
866 if ba.get(i) != bb.get(i) {
873 d = append(d, BitRange{base, size})
879 d = append(d, BitRange{base, size})
884 // StringifyPallocBits gets the bits in the bit range r from b,
885 // and returns a string containing the bits as ASCII 0 and 1
887 func StringifyPallocBits(b *PallocBits, r BitRange) string {
889 for j := r.I; j < r.I+r.N; j++ {
890 if (*pageBits)(b).get(j) != 0 {
899 // Expose pallocData for testing.
900 type PallocData pallocData
902 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
903 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
905 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
906 func (d *PallocData) ScavengedSetRange(i, n uint) {
907 (*pallocData)(d).scavenged.setRange(i, n)
909 func (d *PallocData) PallocBits() *PallocBits {
910 return (*PallocBits)(&(*pallocData)(d).pallocBits)
912 func (d *PallocData) Scavenged() *PallocBits {
913 return (*PallocBits)(&(*pallocData)(d).scavenged)
916 // Expose fillAligned for testing.
917 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
919 // Expose pageCache for testing.
920 type PageCache pageCache
922 const PageCachePages = pageCachePages
924 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
925 return PageCache(pageCache{base: base, cache: cache, scav: scav})
927 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
928 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
929 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
930 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
931 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
932 return (*pageCache)(c).alloc(npages)
934 func (c *PageCache) Flush(s *PageAlloc) {
935 cp := (*pageCache)(c)
936 sp := (*pageAlloc)(s)
939 // None of the tests need any higher-level locking, so we just
940 // take the lock internally.
947 // Expose chunk index type.
948 type ChunkIdx chunkIdx
950 // Expose pageAlloc for testing. Note that because pageAlloc is
951 // not in the heap, so is PageAlloc.
952 type PageAlloc pageAlloc
954 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
955 pp := (*pageAlloc)(p)
957 var addr, scav uintptr
959 // None of the tests need any higher-level locking, so we just
960 // take the lock internally.
962 addr, scav = pp.alloc(npages)
967 func (p *PageAlloc) AllocToCache() PageCache {
968 pp := (*pageAlloc)(p)
972 // None of the tests need any higher-level locking, so we just
973 // take the lock internally.
975 c = PageCache(pp.allocToCache())
980 func (p *PageAlloc) Free(base, npages uintptr) {
981 pp := (*pageAlloc)(p)
984 // None of the tests need any higher-level locking, so we just
985 // take the lock internally.
987 pp.free(base, npages)
991 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
992 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
994 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
995 pp := (*pageAlloc)(p)
997 r = pp.scavenge(nbytes, nil, true)
1001 func (p *PageAlloc) InUse() []AddrRange {
1002 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
1003 for _, r := range p.inUse.ranges {
1004 ranges = append(ranges, AddrRange{r})
1009 // Returns nil if the PallocData's L2 is missing.
1010 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
1012 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
1015 // AddrRange is a wrapper around addrRange for testing.
1016 type AddrRange struct {
1020 // MakeAddrRange creates a new address range.
1021 func MakeAddrRange(base, limit uintptr) AddrRange {
1022 return AddrRange{makeAddrRange(base, limit)}
1025 // Base returns the virtual base address of the address range.
1026 func (a AddrRange) Base() uintptr {
1027 return a.addrRange.base.addr()
1030 // Base returns the virtual address of the limit of the address range.
1031 func (a AddrRange) Limit() uintptr {
1032 return a.addrRange.limit.addr()
1035 // Equals returns true if the two address ranges are exactly equal.
1036 func (a AddrRange) Equals(b AddrRange) bool {
1040 // Size returns the size in bytes of the address range.
1041 func (a AddrRange) Size() uintptr {
1042 return a.addrRange.size()
1045 // testSysStat is the sysStat passed to test versions of various
1046 // runtime structures. We do actually have to keep track of this
1047 // because otherwise memstats.mappedReady won't actually line up
1048 // with other stats in the runtime during tests.
1049 var testSysStat = &memstats.other_sys
1051 // AddrRanges is a wrapper around addrRanges for testing.
1052 type AddrRanges struct {
1057 // NewAddrRanges creates a new empty addrRanges.
1059 // Note that this initializes addrRanges just like in the
1060 // runtime, so its memory is persistentalloc'd. Call this
1061 // function sparingly since the memory it allocates is
1064 // This AddrRanges is mutable, so we can test methods like
1066 func NewAddrRanges() AddrRanges {
1069 return AddrRanges{r, true}
1072 // MakeAddrRanges creates a new addrRanges populated with
1075 // The returned AddrRanges is immutable, so methods like
1077 func MakeAddrRanges(a ...AddrRange) AddrRanges {
1078 // Methods that manipulate the backing store of addrRanges.ranges should
1079 // not be used on the result from this function (e.g. add) since they may
1080 // trigger reallocation. That would normally be fine, except the new
1081 // backing store won't come from the heap, but from persistentalloc, so
1082 // we'll leak some memory implicitly.
1083 ranges := make([]addrRange, 0, len(a))
1085 for _, r := range a {
1086 ranges = append(ranges, r.addrRange)
1089 return AddrRanges{addrRanges{
1092 sysStat: testSysStat,
1096 // Ranges returns a copy of the ranges described by the
1098 func (a *AddrRanges) Ranges() []AddrRange {
1099 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
1100 for _, r := range a.addrRanges.ranges {
1101 result = append(result, AddrRange{r})
1106 // FindSucc returns the successor to base. See addrRanges.findSucc
1107 // for more details.
1108 func (a *AddrRanges) FindSucc(base uintptr) int {
1109 return a.findSucc(base)
1112 // Add adds a new AddrRange to the AddrRanges.
1114 // The AddrRange must be mutable (i.e. created by NewAddrRanges),
1115 // otherwise this method will throw.
1116 func (a *AddrRanges) Add(r AddrRange) {
1118 throw("attempt to mutate immutable AddrRanges")
1123 // TotalBytes returns the totalBytes field of the addrRanges.
1124 func (a *AddrRanges) TotalBytes() uintptr {
1125 return a.addrRanges.totalBytes
1128 // BitRange represents a range over a bitmap.
1129 type BitRange struct {
1130 I, N uint // bit index and length in bits
1133 // NewPageAlloc creates a new page allocator for testing and
1134 // initializes it with the scav and chunks maps. Each key in these maps
1135 // represents a chunk index and each value is a series of bit ranges to
1136 // set within each bitmap's chunk.
1138 // The initialization of the pageAlloc preserves the invariant that if a
1139 // scavenged bit is set the alloc bit is necessarily unset, so some
1140 // of the bits described by scav may be cleared in the final bitmap if
1141 // ranges in chunks overlap with them.
1143 // scav is optional, and if nil, the scavenged bitmap will be cleared
1144 // (as opposed to all 1s, which it usually is). Furthermore, every
1145 // chunk index in scav must appear in chunks; ones that do not are
1147 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1150 // We've got an entry, so initialize the pageAlloc.
1151 p.init(new(mutex), testSysStat, true)
1152 lockInit(p.mheapLock, lockRankMheap)
1153 for i, init := range chunks {
1154 addr := chunkBase(chunkIdx(i))
1156 // Mark the chunk's existence in the pageAlloc.
1157 systemstack(func() {
1159 p.grow(addr, pallocChunkBytes)
1163 // Initialize the bitmap and update pageAlloc metadata.
1164 ci := chunkIndex(addr)
1165 chunk := p.chunkOf(ci)
1167 // Clear all the scavenged bits which grow set.
1168 chunk.scavenged.clearRange(0, pallocChunkPages)
1170 // Simulate the allocation and subsequent free of all pages in
1171 // the chunk for the scavenge index. This sets the state equivalent
1172 // with all pages within the index being free.
1173 p.scav.index.alloc(ci, pallocChunkPages)
1174 p.scav.index.free(ci, 0, pallocChunkPages)
1176 // Apply scavenge state if applicable.
1178 if scvg, ok := scav[i]; ok {
1179 for _, s := range scvg {
1180 // Ignore the case of s.N == 0. setRange doesn't handle
1181 // it and it's a no-op anyway.
1183 chunk.scavenged.setRange(s.I, s.N)
1189 // Apply alloc state.
1190 for _, s := range init {
1191 // Ignore the case of s.N == 0. allocRange doesn't handle
1192 // it and it's a no-op anyway.
1194 chunk.allocRange(s.I, s.N)
1196 // Make sure the scavenge index is updated.
1197 p.scav.index.alloc(ci, s.N)
1201 // Update heap metadata for the allocRange calls above.
1202 systemstack(func() {
1204 p.update(addr, pallocChunkPages, false, false)
1209 return (*PageAlloc)(p)
1212 // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
1213 // is called the pageAlloc may no longer be used. The object itself will be
1214 // collected by the garbage collector once it is no longer live.
1215 func FreePageAlloc(pp *PageAlloc) {
1216 p := (*pageAlloc)(pp)
1218 // Free all the mapped space for the summary levels.
1219 if pageAlloc64Bit != 0 {
1220 for l := 0; l < summaryLevels; l++ {
1221 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1224 resSize := uintptr(0)
1225 for _, s := range p.summary {
1226 resSize += uintptr(cap(s)) * pallocSumBytes
1228 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1231 // Free extra data structures.
1232 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1234 // Subtract back out whatever we mapped for the summaries.
1235 // sysUsed adds to p.sysStat and memstats.mappedReady no matter what
1236 // (and in anger should actually be accounted for), and there's no other
1237 // way to figure out how much we actually mapped.
1238 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1239 testSysStat.add(-int64(p.summaryMappedReady))
1241 // Free the mapped space for chunks.
1242 for i := range p.chunks {
1243 if x := p.chunks[i]; x != nil {
1245 // This memory comes from sysAlloc and will always be page-aligned.
1246 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1251 // BaseChunkIdx is a convenient chunkIdx value which works on both
1252 // 64 bit and 32 bit platforms, allowing the tests to share code
1255 // This should not be higher than 0x100*pallocChunkBytes to support
1256 // mips and mipsle, which only have 31-bit address spaces.
1257 var BaseChunkIdx = func() ChunkIdx {
1259 if pageAlloc64Bit != 0 {
1264 baseAddr := prefix * pallocChunkBytes
1265 if goos.IsAix != 0 {
1266 baseAddr += arenaBaseOffset
1268 return ChunkIdx(chunkIndex(baseAddr))
1271 // PageBase returns an address given a chunk index and a page index
1272 // relative to that chunk.
1273 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1274 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1277 type BitsMismatch struct {
1282 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1285 // Run on the system stack to avoid stack growth allocation.
1286 systemstack(func() {
1287 getg().m.mallocing++
1289 // Lock so that we can safely access the bitmap.
1292 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1293 chunk := mheap_.pages.tryChunkOf(i)
1297 for j := 0; j < pallocChunkPages/64; j++ {
1298 // Run over each 64-bit bitmap section and ensure
1299 // scavenged is being cleared properly on allocation.
1300 // If a used bit and scavenged bit are both set, that's
1301 // an error, and could indicate a larger problem, or
1302 // an accounting problem.
1303 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1304 got := chunk.scavenged[j]
1307 if n >= len(mismatches) {
1310 mismatches[n] = BitsMismatch{
1311 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1319 unlock(&mheap_.lock)
1321 getg().m.mallocing--
1326 func PageCachePagesLeaked() (leaked uintptr) {
1327 stopTheWorld(stwForTestPageCachePagesLeaked)
1329 // Walk over destroyed Ps and look for unflushed caches.
1330 deadp := allp[len(allp):cap(allp)]
1331 for _, p := range deadp {
1332 // Since we're going past len(allp) we may see nil Ps.
1333 // Just ignore them.
1335 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1343 var Semacquire = semacquire
1344 var Semrelease1 = semrelease1
1346 func SemNwait(addr *uint32) uint32 {
1347 root := semtable.rootFor(addr)
1348 return root.nwait.Load()
1351 const SemTableSize = semTabSize
1353 // SemTable is a wrapper around semTable exported for testing.
1354 type SemTable struct {
1358 // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
1359 func (t *SemTable) Enqueue(addr *uint32) {
1364 t.semTable.rootFor(addr).queue(addr, s, false)
1367 // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
1369 // Returns true if there actually was a waiter to be dequeued.
1370 func (t *SemTable) Dequeue(addr *uint32) bool {
1371 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1379 // mspan wrapper for testing.
1382 // Allocate an mspan for testing.
1383 func AllocMSpan() *MSpan {
1385 systemstack(func() {
1387 s = (*mspan)(mheap_.spanalloc.alloc())
1388 unlock(&mheap_.lock)
1393 // Free an allocated mspan.
1394 func FreeMSpan(s *MSpan) {
1395 systemstack(func() {
1397 mheap_.spanalloc.free(unsafe.Pointer(s))
1398 unlock(&mheap_.lock)
1402 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1404 s.nelems = uint16(len(bits) * 8)
1405 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1406 result := s.countAlloc()
1412 TimeHistSubBucketBits = timeHistSubBucketBits
1413 TimeHistNumSubBuckets = timeHistNumSubBuckets
1414 TimeHistNumBuckets = timeHistNumBuckets
1415 TimeHistMinBucketBits = timeHistMinBucketBits
1416 TimeHistMaxBucketBits = timeHistMaxBucketBits
1419 type TimeHistogram timeHistogram
1421 // Counts returns the counts for the given bucket, subBucket indices.
1422 // Returns true if the bucket was valid, otherwise returns the counts
1423 // for the overflow bucket if bucket > 0 or the underflow bucket if
1424 // bucket < 0, and false.
1425 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1426 t := (*timeHistogram)(th)
1428 return t.underflow.Load(), false
1430 i := bucket*TimeHistNumSubBuckets + subBucket
1431 if i >= len(t.counts) {
1432 return t.overflow.Load(), false
1434 return t.counts[i].Load(), true
1437 func (th *TimeHistogram) Record(duration int64) {
1438 (*timeHistogram)(th).record(duration)
1441 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1443 func SetIntArgRegs(a int) int {
1453 func FinalizerGAsleep() bool {
1454 return fingStatus.Load()&fingWait != 0
1457 // For GCTestMoveStackOnNextCall, it's important not to introduce an
1458 // extra layer of call, since then there's a return before the "real"
1460 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1462 // For GCTestIsReachable, it's important that we do this as a call so
1463 // escape analysis can see through it.
1464 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1465 return gcTestIsReachable(ptrs...)
1468 // For GCTestPointerClass, it's important that we do this as a call so
1469 // escape analysis can see through it.
1471 // This is nosplit because gcTestPointerClass is.
1474 func GCTestPointerClass(p unsafe.Pointer) string {
1475 return gcTestPointerClass(p)
1478 const Raceenabled = raceenabled
1481 GCBackgroundUtilization = gcBackgroundUtilization
1482 GCGoalUtilization = gcGoalUtilization
1483 DefaultHeapMinimum = defaultHeapMinimum
1484 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1485 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1488 type GCController struct {
1492 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1493 // Force the controller to escape. We're going to
1494 // do 64-bit atomics on it, and if it gets stack-allocated
1495 // on a 32-bit architecture, it may get allocated unaligned
1497 g := Escape(new(GCController))
1498 g.gcControllerState.test = true // Mark it as a test copy.
1499 g.init(int32(gcPercent), memoryLimit)
1503 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1504 trigger, _ := c.trigger()
1505 if c.heapMarked > trigger {
1506 trigger = c.heapMarked
1508 c.maxStackScan.Store(stackSize)
1509 c.globalsScan.Store(globalsSize)
1510 c.heapLive.Store(trigger)
1511 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1512 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1515 func (c *GCController) AssistWorkPerByte() float64 {
1516 return c.assistWorkPerByte.Load()
1519 func (c *GCController) HeapGoal() uint64 {
1523 func (c *GCController) HeapLive() uint64 {
1524 return c.heapLive.Load()
1527 func (c *GCController) HeapMarked() uint64 {
1531 func (c *GCController) Triggered() uint64 {
1535 type GCControllerReviseDelta struct {
1540 GlobalsScanWork int64
1543 func (c *GCController) Revise(d GCControllerReviseDelta) {
1544 c.heapLive.Add(d.HeapLive)
1545 c.heapScan.Add(d.HeapScan)
1546 c.heapScanWork.Add(d.HeapScanWork)
1547 c.stackScanWork.Add(d.StackScanWork)
1548 c.globalsScanWork.Add(d.GlobalsScanWork)
1552 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1553 c.assistTime.Store(assistTime)
1554 c.endCycle(elapsed, gomaxprocs, false)
1555 c.resetLive(bytesMarked)
1559 func (c *GCController) AddIdleMarkWorker() bool {
1560 return c.addIdleMarkWorker()
1563 func (c *GCController) NeedIdleMarkWorker() bool {
1564 return c.needIdleMarkWorker()
1567 func (c *GCController) RemoveIdleMarkWorker() {
1568 c.removeIdleMarkWorker()
1571 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1572 c.setMaxIdleMarkWorkers(max)
1575 var alwaysFalse bool
1578 func Escape[T any](x T) T {
1585 // Acquirem blocks preemption.
1594 var Timediv = timediv
1596 type PIController struct {
1600 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1601 return &PIController{piController{
1610 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1611 return c.piController.next(input, setpoint, period)
1615 CapacityPerProc = capacityPerProc
1616 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1619 type GCCPULimiter struct {
1620 limiter gcCPULimiterState
1623 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1624 // Force the controller to escape. We're going to
1625 // do 64-bit atomics on it, and if it gets stack-allocated
1626 // on a 32-bit architecture, it may get allocated unaligned
1628 l := Escape(new(GCCPULimiter))
1629 l.limiter.test = true
1630 l.limiter.resetCapacity(now, gomaxprocs)
1634 func (l *GCCPULimiter) Fill() uint64 {
1635 return l.limiter.bucket.fill
1638 func (l *GCCPULimiter) Capacity() uint64 {
1639 return l.limiter.bucket.capacity
1642 func (l *GCCPULimiter) Overflow() uint64 {
1643 return l.limiter.overflow
1646 func (l *GCCPULimiter) Limiting() bool {
1647 return l.limiter.limiting()
1650 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1651 return l.limiter.needUpdate(now)
1654 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1655 l.limiter.startGCTransition(enableGC, now)
1658 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1659 l.limiter.finishGCTransition(now)
1662 func (l *GCCPULimiter) Update(now int64) {
1663 l.limiter.update(now)
1666 func (l *GCCPULimiter) AddAssistTime(t int64) {
1667 l.limiter.addAssistTime(t)
1670 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1671 l.limiter.resetCapacity(now, nprocs)
1674 const ScavengePercent = scavengePercent
1676 type Scavenger struct {
1677 Sleep func(int64) int64
1678 Scavenge func(uintptr) (uintptr, int64)
1679 ShouldStop func() bool
1680 GoMaxProcs func() int32
1682 released atomic.Uintptr
1683 scavenger scavengerState
1684 stop chan<- struct{}
1685 done <-chan struct{}
1688 func (s *Scavenger) Start() {
1689 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1690 panic("must populate all stubs")
1694 s.scavenger.sleepStub = s.Sleep
1695 s.scavenger.scavenge = s.Scavenge
1696 s.scavenger.shouldStop = s.ShouldStop
1697 s.scavenger.gomaxprocs = s.GoMaxProcs
1699 // Start up scavenger goroutine, and wait for it to be ready.
1700 stop := make(chan struct{})
1702 done := make(chan struct{})
1705 // This should match bgscavenge, loosely.
1715 released, workTime := s.scavenger.run()
1720 s.released.Add(released)
1721 s.scavenger.sleep(workTime)
1724 if !s.BlockUntilParked(1e9 /* 1 second */) {
1725 panic("timed out waiting for scavenger to get ready")
1729 // BlockUntilParked blocks until the scavenger parks, or until
1730 // timeout is exceeded. Returns true if the scavenger parked.
1732 // Note that in testing, parked means something slightly different.
1733 // In anger, the scavenger parks to sleep, too, but in testing,
1734 // it only parks when it actually has no work to do.
1735 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1736 // Just spin, waiting for it to park.
1738 // The actual parking process is racy with respect to
1739 // wakeups, which is fine, but for testing we need something
1740 // a bit more robust.
1742 for nanotime()-start < timeout {
1743 lock(&s.scavenger.lock)
1744 parked := s.scavenger.parked
1745 unlock(&s.scavenger.lock)
1754 // Released returns how many bytes the scavenger released.
1755 func (s *Scavenger) Released() uintptr {
1756 return s.released.Load()
1759 // Wake wakes up a parked scavenger to keep running.
1760 func (s *Scavenger) Wake() {
1764 // Stop cleans up the scavenger's resources. The scavenger
1765 // must be parked for this to work.
1766 func (s *Scavenger) Stop() {
1767 lock(&s.scavenger.lock)
1768 parked := s.scavenger.parked
1769 unlock(&s.scavenger.lock)
1771 panic("tried to clean up scavenger that is not parked")
1778 type ScavengeIndex struct {
1782 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1783 s := new(ScavengeIndex)
1784 // This is a bit lazy but we easily guarantee we'll be able
1785 // to reference all the relevant chunks. The worst-case
1786 // memory usage here is 512 MiB, but tests generally use
1787 // small offsets from BaseChunkIdx, which results in ~100s
1788 // of KiB in memory use.
1790 // This may still be worth making better, at least by sharing
1791 // this fairly large array across calls with a sync.Pool or
1792 // something. Currently, when the tests are run serially,
1793 // it takes around 0.5s. Not all that much, but if we have
1794 // a lot of tests like this it could add up.
1795 s.i.chunks = make([]atomicScavChunkData, max)
1796 s.i.min.Store(uintptr(min))
1797 s.i.max.Store(uintptr(max))
1798 s.i.minHeapIdx.Store(uintptr(min))
1803 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1804 ci, off := s.i.find(force)
1805 return ChunkIdx(ci), off
1808 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1809 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1810 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1813 // The range doesn't cross any chunk boundaries.
1814 s.i.alloc(sc, ei+1-si)
1816 // The range crosses at least one chunk boundary.
1817 s.i.alloc(sc, pallocChunkPages-si)
1818 for c := sc + 1; c < ec; c++ {
1819 s.i.alloc(c, pallocChunkPages)
1825 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1826 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1827 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1830 // The range doesn't cross any chunk boundaries.
1831 s.i.free(sc, si, ei+1-si)
1833 // The range crosses at least one chunk boundary.
1834 s.i.free(sc, si, pallocChunkPages-si)
1835 for c := sc + 1; c < ec; c++ {
1836 s.i.free(c, 0, pallocChunkPages)
1838 s.i.free(ec, 0, ei+1)
1842 func (s *ScavengeIndex) ResetSearchAddrs() {
1843 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1844 addr, marked := a.Load()
1846 a.StoreUnmark(addr, addr)
1850 s.i.freeHWM = minOffAddr
1853 func (s *ScavengeIndex) NextGen() {
1857 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1858 s.i.setEmpty(chunkIdx(ci))
1861 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1862 sc0 := scavChunkData{
1865 lastInUse: lastInUse,
1866 scavChunkFlags: scavChunkFlags(flags),
1869 sc1 := unpackScavChunkData(scp)
1873 const GTrackingPeriod = gTrackingPeriod
1875 var ZeroBase = unsafe.Pointer(&zerobase)
1877 const UserArenaChunkBytes = userArenaChunkBytes
1879 type UserArena struct {
1883 func NewUserArena() *UserArena {
1884 return &UserArena{newUserArena()}
1887 func (a *UserArena) New(out *any) {
1890 if typ.Kind_&kindMask != kindPtr {
1891 panic("new result of non-ptr type")
1893 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1894 i.data = a.arena.new(typ)
1897 func (a *UserArena) Slice(sl any, cap int) {
1898 a.arena.slice(sl, cap)
1901 func (a *UserArena) Free() {
1905 func GlobalWaitingArenaChunks() int {
1907 systemstack(func() {
1909 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1912 unlock(&mheap_.lock)
1917 func UserArenaClone[T any](s T) T {
1918 return arena_heapify(s).(T)
1921 var AlignUp = alignUp
1923 // BlockUntilEmptyFinalizerQueue blocks until either the finalizer
1924 // queue is emptied (and the finalizers have executed) or the timeout
1925 // is reached. Returns true if the finalizer queue was emptied.
1926 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1928 for nanotime()-start < timeout {
1930 // We know the queue has been drained when both finq is nil
1931 // and the finalizer g has stopped executing.
1932 empty := finq == nil
1933 empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
1943 func FrameStartLine(f *Frame) int {
1947 // PersistentAlloc allocates some memory that lives outside the Go heap.
1948 // This memory will never be freed; use sparingly.
1949 func PersistentAlloc(n uintptr) unsafe.Pointer {
1950 return persistentalloc(n, 0, &memstats.other_sys)
1953 // FPCallers works like Callers and uses frame pointer unwinding to populate
1954 // pcBuf with the return addresses of the physical frames on the stack.
1955 func FPCallers(pcBuf []uintptr) int {
1956 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1959 const FramePointerEnabled = framepointer_enabled
1963 GetPinCounter = pinnerGetPinCounter
1966 func SetPinnerLeakPanic(f func()) {
1969 func GetPinnerLeakPanic() func() {
1970 return pinnerLeakPanic
1973 var testUintptr uintptr
1975 func MyGenericFunc[T any]() {
1976 systemstack(func() {
1981 func UnsafePoint(pc uintptr) bool {
1983 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1985 case abi.UnsafePointUnsafe:
1987 case abi.UnsafePointSafe:
1989 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1990 // These are all interruptible, they just encode a nonstandard
1991 // way of recovering when interrupted.
1995 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))