1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
24 var Fintto64 = fintto64
25 var F64toint = f64toint
27 var Entersyscall = entersyscall
28 var Exitsyscall = exitsyscall
29 var LockedOSThread = lockedOSThread
30 var Xadduintptr = atomic.Xadduintptr
32 var Fastlog2 = fastlog2
36 var ParseByteCount = parseByteCount
38 var Nanotime = nanotime
39 var NetpollBreak = netpollBreak
42 var PhysPageSize = physPageSize
43 var PhysHugePageSize = physHugePageSize
45 var NetpollGenericInit = netpollGenericInit
48 var MemclrNoHeapPointers = memclrNoHeapPointers
50 const TracebackInnerFrames = tracebackInnerFrames
51 const TracebackOuterFrames = tracebackOuterFrames
53 var LockPartialOrder = lockPartialOrder
55 type LockRank lockRank
57 func (l LockRank) String() string {
58 return lockRank(l).String()
61 const PreemptMSupported = preemptMSupported
68 func LFStackPush(head *uint64, node *LFNode) {
69 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
72 func LFStackPop(head *uint64) *LFNode {
73 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
75 func LFNodeValidate(node *LFNode) {
76 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
79 func Netpoll(delta int64) {
85 func GCMask(x any) (ret []byte) {
92 func RunSchedLocalQueueTest() {
94 gs := make([]g, len(pp.runq))
95 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
96 for i := 0; i < len(pp.runq); i++ {
97 if g, _ := runqget(pp); g != nil {
98 throw("runq is not empty initially")
100 for j := 0; j < i; j++ {
101 runqput(pp, &gs[i], false)
103 for j := 0; j < i; j++ {
104 if g, _ := runqget(pp); g != &gs[i] {
105 print("bad element at iter ", i, "/", j, "\n")
109 if g, _ := runqget(pp); g != nil {
110 throw("runq is not empty afterwards")
115 func RunSchedLocalQueueStealTest() {
118 gs := make([]g, len(p1.runq))
119 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
120 for i := 0; i < len(p1.runq); i++ {
121 for j := 0; j < i; j++ {
123 runqput(p1, &gs[j], false)
125 gp := runqsteal(p2, p1, true)
146 for j := 0; j < i; j++ {
148 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
152 if s != i/2 && s != i/2+1 {
153 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
159 func RunSchedLocalQueueEmptyTest(iters int) {
160 // Test that runq is not spuriously reported as empty.
161 // Runq emptiness affects scheduling decisions and spurious emptiness
162 // can lead to underutilization (both runnable Gs and idle Ps coexist
163 // for arbitrary long time).
164 done := make(chan bool, 1)
167 Escape(gs) // Ensure gs doesn't move, since we use guintptrs
169 for i := 0; i < iters; i++ {
171 next0 := (i & 1) == 0
172 next1 := (i & 2) == 0
173 runqput(p, &gs[0], next0)
175 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
178 println("next:", next0, next1)
179 throw("queue is empty")
183 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
185 runqput(p, &gs[1], next1)
193 StringHash = stringHash
194 BytesHash = bytesHash
195 Int32Hash = int32Hash
196 Int64Hash = int64Hash
198 MemHash32 = memhash32
199 MemHash64 = memhash64
200 EfaceHash = efaceHash
201 IfaceHash = ifaceHash
204 var UseAeshash = &useAeshash
206 func MemclrBytes(b []byte) {
207 s := (*slice)(unsafe.Pointer(&b))
208 memclrNoHeapPointers(s.array, uintptr(s.len))
211 const HashLoad = hashLoad
213 // entry point for testing
214 func GostringW(w []uint16) (s string) {
226 func Envs() []string { return envs }
227 func SetEnvs(e []string) { envs = e }
231 func BenchSetType(n int, x any) {
232 // Escape x to ensure it is allocated on the heap, as we are
233 // working on the heap bits here.
239 switch t.Kind_ & kindMask {
241 t = (*ptrtype)(unsafe.Pointer(t)).Elem
249 t = (*slicetype)(unsafe.Pointer(t)).Elem
250 size = t.Size_ * slice.len
253 allocSize := roundupsize(size)
255 for i := 0; i < n; i++ {
256 heapBitsSetType(uintptr(p), allocSize, size, t)
261 const PtrSize = goarch.PtrSize
263 var ForceGCPeriod = &forcegcperiod
265 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
266 // the "environment" traceback level, so later calls to
267 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
268 func SetTracebackEnv(level string) {
270 traceback_env = traceback_cache
273 var ReadUnaligned32 = readUnaligned32
274 var ReadUnaligned64 = readUnaligned64
276 func CountPagesInUse() (pagesInUse, counted uintptr) {
277 stopTheWorld("CountPagesInUse")
279 pagesInUse = uintptr(mheap_.pagesInUse.Load())
281 for _, s := range mheap_.allspans {
282 if s.state.get() == mSpanInUse {
292 func Fastrand() uint32 { return fastrand() }
293 func Fastrand64() uint64 { return fastrand64() }
294 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
298 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
299 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
302 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
303 (*profBuf)(p).write(tag, now, hdr, stk)
307 ProfBufBlocking = profBufBlocking
308 ProfBufNonBlocking = profBufNonBlocking
311 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
312 return (*profBuf)(p).read(profBufReadMode(mode))
315 func (p *ProfBuf) Close() {
316 (*profBuf)(p).close()
319 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
320 stopTheWorld("ReadMetricsSlow")
322 // Initialize the metrics beforehand because this could
323 // allocate and skew the stats.
329 // Read memstats first. It's going to flush
330 // the mcaches which readMetrics does not do, so
331 // going the other way around may result in
332 // inconsistent statistics.
333 readmemstats_m(memStats)
336 // Read metrics off the system stack.
338 // The only part of readMetrics that could allocate
339 // and skew the stats is initMetrics.
340 readMetrics(samplesp, len, cap)
345 // ReadMemStatsSlow returns both the runtime-computed MemStats and
346 // MemStats accumulated by scanning the heap.
347 func ReadMemStatsSlow() (base, slow MemStats) {
348 stopTheWorld("ReadMemStatsSlow")
350 // Run on the system stack to avoid stack growth allocation.
352 // Make sure stats don't change.
355 readmemstats_m(&base)
357 // Initialize slow from base and zero the fields we're
364 slow.HeapReleased = 0
365 var bySize [_NumSizeClasses]struct {
366 Mallocs, Frees uint64
369 // Add up current allocations in spans.
370 for _, s := range mheap_.allspans {
371 if s.state.get() != mSpanInUse {
374 if s.isUnusedUserArenaChunk() {
377 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
379 slow.Alloc += uint64(s.elemsize)
381 slow.Mallocs += uint64(s.allocCount)
382 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
383 bySize[sizeclass].Mallocs += uint64(s.allocCount)
387 // Add in frees by just reading the stats for those directly.
389 memstats.heapStats.unsafeRead(&m)
391 // Collect per-sizeclass free stats.
393 for i := 0; i < _NumSizeClasses; i++ {
394 slow.Frees += uint64(m.smallFreeCount[i])
395 bySize[i].Frees += uint64(m.smallFreeCount[i])
396 bySize[i].Mallocs += uint64(m.smallFreeCount[i])
397 smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
399 slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
400 slow.Mallocs += slow.Frees
402 slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
404 for i := range slow.BySize {
405 slow.BySize[i].Mallocs = bySize[i].Mallocs
406 slow.BySize[i].Frees = bySize[i].Frees
409 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
410 chunk := mheap_.pages.tryChunkOf(i)
414 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
415 slow.HeapReleased += uint64(pg) * pageSize
417 for _, p := range allp {
418 pg := sys.OnesCount64(p.pcache.scav)
419 slow.HeapReleased += uint64(pg) * pageSize
429 // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
430 // and verifies that unwinding the new stack doesn't crash, even if the old
431 // stack has been freed or reused (simulated via poisoning).
432 func ShrinkStackAndVerifyFramePointers() {
433 before := stackPoisonCopy
434 defer func() { stackPoisonCopy = before }()
441 // If our new stack contains frame pointers into the old stack, this will
442 // crash because the old stack has been poisoned.
443 FPCallers(make([]uintptr, 1024))
446 // BlockOnSystemStack switches to the system stack, prints "x\n" to
447 // stderr, and blocks in a stack containing
448 // "runtime.blockOnSystemStackInternal".
449 func BlockOnSystemStack() {
450 systemstack(blockOnSystemStackInternal)
453 func blockOnSystemStackInternal() {
459 type RWMutex struct {
463 func (rw *RWMutex) RLock() {
467 func (rw *RWMutex) RUnlock() {
471 func (rw *RWMutex) Lock() {
475 func (rw *RWMutex) Unlock() {
479 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
481 func MapBucketsCount(m map[int]int) int {
482 h := *(**hmap)(unsafe.Pointer(&m))
486 func MapBucketsPointerIsNil(m map[int]int) bool {
487 h := *(**hmap)(unsafe.Pointer(&m))
488 return h.buckets == nil
491 func LockOSCounts() (external, internal uint32) {
493 if gp.m.lockedExt+gp.m.lockedInt == 0 {
495 panic("lockedm on non-locked goroutine")
499 panic("nil lockedm on locked goroutine")
502 return gp.m.lockedExt, gp.m.lockedInt
506 func TracebackSystemstack(stk []uintptr, i int) int {
508 pc, sp := getcallerpc(), getcallersp()
510 u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
511 return tracebackPCs(&u, 0, stk)
515 n = TracebackSystemstack(stk, i-1)
520 func KeepNArenaHints(n int) {
521 hint := mheap_.arenaHints
522 for i := 1; i < n; i++ {
531 // MapNextArenaHint reserves a page at the next arena growth hint,
532 // preventing the arena from growing there, and returns the range of
533 // addresses that are no longer viable.
535 // This may fail to reserve memory. If it fails, it still returns the
536 // address range it attempted to reserve.
537 func MapNextArenaHint() (start, end uintptr, ok bool) {
538 hint := mheap_.arenaHints
541 start, end = addr-heapArenaBytes, addr
544 start, end = addr, addr+heapArenaBytes
546 got := sysReserve(unsafe.Pointer(addr), physPageSize)
547 ok = (addr == uintptr(got))
549 // We were unable to get the requested reservation.
550 // Release what we did get and fail.
551 sysFreeOS(got, physPageSize)
556 func GetNextArenaHint() uintptr {
557 return mheap_.arenaHints.addr
572 func GIsWaitingOnMutex(gp *G) bool {
573 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
576 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
579 func PanicForTesting(b []byte, i int) byte {
580 return unexportedPanicForTesting(b, i)
584 func unexportedPanicForTesting(b []byte, i int) byte {
588 func G0StackOverflow() {
594 func stackOverflow(x *byte) {
596 stackOverflow(&buf[0])
599 func MapTombstoneCheck(m map[int]int) {
600 // Make sure emptyOne and emptyRest are distributed correctly.
601 // We should have a series of filled and emptyOne cells, followed by
602 // a series of emptyRest cells.
603 h := *(**hmap)(unsafe.Pointer(&m))
605 t := *(**maptype)(unsafe.Pointer(&i))
607 for x := 0; x < 1<<h.B; x++ {
608 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
610 for b := b0; b != nil; b = b.overflow(t) {
611 for i := 0; i < bucketCnt; i++ {
612 if b.tophash[i] != emptyRest {
618 for b := b0; b != nil; b = b.overflow(t) {
619 for i := 0; i < bucketCnt; i++ {
620 if k < n && b.tophash[i] == emptyRest {
621 panic("early emptyRest")
623 if k >= n && b.tophash[i] != emptyRest {
624 panic("late non-emptyRest")
626 if k == n-1 && b.tophash[i] == emptyOne {
627 panic("last non-emptyRest entry is emptyOne")
635 func RunGetgThreadSwitchTest() {
636 // Test that getg works correctly with thread switch.
637 // With gccgo, if we generate getg inlined, the backend
638 // may cache the address of the TLS variable, which
639 // will become invalid after a thread switch. This test
640 // checks that the bad caching doesn't happen.
643 go func(ch chan int) {
650 // Block on a receive. This is likely to get us a thread
651 // switch. If we yield to the sender goroutine, it will
652 // lock the thread, forcing us to resume on a different
661 // Also test getg after some control flow, as the
662 // backend is sensitive to control flow.
671 PallocChunkPages = pallocChunkPages
672 PageAlloc64Bit = pageAlloc64Bit
673 PallocSumBytes = pallocSumBytes
676 // Expose pallocSum for testing.
677 type PallocSum pallocSum
679 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
680 func (m PallocSum) Start() uint { return pallocSum(m).start() }
681 func (m PallocSum) Max() uint { return pallocSum(m).max() }
682 func (m PallocSum) End() uint { return pallocSum(m).end() }
684 // Expose pallocBits for testing.
685 type PallocBits pallocBits
687 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
688 return (*pallocBits)(b).find(npages, searchIdx)
690 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
691 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
692 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
693 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
695 // SummarizeSlow is a slow but more obviously correct implementation
696 // of (*pallocBits).summarize. Used for testing.
697 func SummarizeSlow(b *PallocBits) PallocSum {
698 var start, max, end uint
700 const N = uint(len(b)) * 64
701 for start < N && (*pageBits)(b).get(start) == 0 {
704 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
708 for i := uint(0); i < N; i++ {
709 if (*pageBits)(b).get(i) == 0 {
718 return PackPallocSum(start, max, end)
721 // Expose non-trivial helpers for testing.
722 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
724 // Given two PallocBits, returns a set of bit ranges where
726 func DiffPallocBits(a, b *PallocBits) []BitRange {
731 base, size := uint(0), uint(0)
732 for i := uint(0); i < uint(len(ba))*64; i++ {
733 if ba.get(i) != bb.get(i) {
740 d = append(d, BitRange{base, size})
746 d = append(d, BitRange{base, size})
751 // StringifyPallocBits gets the bits in the bit range r from b,
752 // and returns a string containing the bits as ASCII 0 and 1
754 func StringifyPallocBits(b *PallocBits, r BitRange) string {
756 for j := r.I; j < r.I+r.N; j++ {
757 if (*pageBits)(b).get(j) != 0 {
766 // Expose pallocData for testing.
767 type PallocData pallocData
769 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
770 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
772 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
773 func (d *PallocData) ScavengedSetRange(i, n uint) {
774 (*pallocData)(d).scavenged.setRange(i, n)
776 func (d *PallocData) PallocBits() *PallocBits {
777 return (*PallocBits)(&(*pallocData)(d).pallocBits)
779 func (d *PallocData) Scavenged() *PallocBits {
780 return (*PallocBits)(&(*pallocData)(d).scavenged)
783 // Expose fillAligned for testing.
784 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
786 // Expose pageCache for testing.
787 type PageCache pageCache
789 const PageCachePages = pageCachePages
791 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
792 return PageCache(pageCache{base: base, cache: cache, scav: scav})
794 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
795 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
796 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
797 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
798 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
799 return (*pageCache)(c).alloc(npages)
801 func (c *PageCache) Flush(s *PageAlloc) {
802 cp := (*pageCache)(c)
803 sp := (*pageAlloc)(s)
806 // None of the tests need any higher-level locking, so we just
807 // take the lock internally.
814 // Expose chunk index type.
815 type ChunkIdx chunkIdx
817 // Expose pageAlloc for testing. Note that because pageAlloc is
818 // not in the heap, so is PageAlloc.
819 type PageAlloc pageAlloc
821 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
822 pp := (*pageAlloc)(p)
824 var addr, scav uintptr
826 // None of the tests need any higher-level locking, so we just
827 // take the lock internally.
829 addr, scav = pp.alloc(npages)
834 func (p *PageAlloc) AllocToCache() PageCache {
835 pp := (*pageAlloc)(p)
839 // None of the tests need any higher-level locking, so we just
840 // take the lock internally.
842 c = PageCache(pp.allocToCache())
847 func (p *PageAlloc) Free(base, npages uintptr) {
848 pp := (*pageAlloc)(p)
851 // None of the tests need any higher-level locking, so we just
852 // take the lock internally.
854 pp.free(base, npages)
858 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
859 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
861 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
862 pp := (*pageAlloc)(p)
864 r = pp.scavenge(nbytes, nil, true)
868 func (p *PageAlloc) InUse() []AddrRange {
869 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
870 for _, r := range p.inUse.ranges {
871 ranges = append(ranges, AddrRange{r})
876 // Returns nil if the PallocData's L2 is missing.
877 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
879 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
882 // AddrRange is a wrapper around addrRange for testing.
883 type AddrRange struct {
887 // MakeAddrRange creates a new address range.
888 func MakeAddrRange(base, limit uintptr) AddrRange {
889 return AddrRange{makeAddrRange(base, limit)}
892 // Base returns the virtual base address of the address range.
893 func (a AddrRange) Base() uintptr {
894 return a.addrRange.base.addr()
897 // Base returns the virtual address of the limit of the address range.
898 func (a AddrRange) Limit() uintptr {
899 return a.addrRange.limit.addr()
902 // Equals returns true if the two address ranges are exactly equal.
903 func (a AddrRange) Equals(b AddrRange) bool {
907 // Size returns the size in bytes of the address range.
908 func (a AddrRange) Size() uintptr {
909 return a.addrRange.size()
912 // testSysStat is the sysStat passed to test versions of various
913 // runtime structures. We do actually have to keep track of this
914 // because otherwise memstats.mappedReady won't actually line up
915 // with other stats in the runtime during tests.
916 var testSysStat = &memstats.other_sys
918 // AddrRanges is a wrapper around addrRanges for testing.
919 type AddrRanges struct {
924 // NewAddrRanges creates a new empty addrRanges.
926 // Note that this initializes addrRanges just like in the
927 // runtime, so its memory is persistentalloc'd. Call this
928 // function sparingly since the memory it allocates is
931 // This AddrRanges is mutable, so we can test methods like
933 func NewAddrRanges() AddrRanges {
936 return AddrRanges{r, true}
939 // MakeAddrRanges creates a new addrRanges populated with
942 // The returned AddrRanges is immutable, so methods like
944 func MakeAddrRanges(a ...AddrRange) AddrRanges {
945 // Methods that manipulate the backing store of addrRanges.ranges should
946 // not be used on the result from this function (e.g. add) since they may
947 // trigger reallocation. That would normally be fine, except the new
948 // backing store won't come from the heap, but from persistentalloc, so
949 // we'll leak some memory implicitly.
950 ranges := make([]addrRange, 0, len(a))
952 for _, r := range a {
953 ranges = append(ranges, r.addrRange)
956 return AddrRanges{addrRanges{
959 sysStat: testSysStat,
963 // Ranges returns a copy of the ranges described by the
965 func (a *AddrRanges) Ranges() []AddrRange {
966 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
967 for _, r := range a.addrRanges.ranges {
968 result = append(result, AddrRange{r})
973 // FindSucc returns the successor to base. See addrRanges.findSucc
975 func (a *AddrRanges) FindSucc(base uintptr) int {
976 return a.findSucc(base)
979 // Add adds a new AddrRange to the AddrRanges.
981 // The AddrRange must be mutable (i.e. created by NewAddrRanges),
982 // otherwise this method will throw.
983 func (a *AddrRanges) Add(r AddrRange) {
985 throw("attempt to mutate immutable AddrRanges")
990 // TotalBytes returns the totalBytes field of the addrRanges.
991 func (a *AddrRanges) TotalBytes() uintptr {
992 return a.addrRanges.totalBytes
995 // BitRange represents a range over a bitmap.
996 type BitRange struct {
997 I, N uint // bit index and length in bits
1000 // NewPageAlloc creates a new page allocator for testing and
1001 // initializes it with the scav and chunks maps. Each key in these maps
1002 // represents a chunk index and each value is a series of bit ranges to
1003 // set within each bitmap's chunk.
1005 // The initialization of the pageAlloc preserves the invariant that if a
1006 // scavenged bit is set the alloc bit is necessarily unset, so some
1007 // of the bits described by scav may be cleared in the final bitmap if
1008 // ranges in chunks overlap with them.
1010 // scav is optional, and if nil, the scavenged bitmap will be cleared
1011 // (as opposed to all 1s, which it usually is). Furthermore, every
1012 // chunk index in scav must appear in chunks; ones that do not are
1014 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1017 // We've got an entry, so initialize the pageAlloc.
1018 p.init(new(mutex), testSysStat, true)
1019 lockInit(p.mheapLock, lockRankMheap)
1020 for i, init := range chunks {
1021 addr := chunkBase(chunkIdx(i))
1023 // Mark the chunk's existence in the pageAlloc.
1024 systemstack(func() {
1026 p.grow(addr, pallocChunkBytes)
1030 // Initialize the bitmap and update pageAlloc metadata.
1031 ci := chunkIndex(addr)
1032 chunk := p.chunkOf(ci)
1034 // Clear all the scavenged bits which grow set.
1035 chunk.scavenged.clearRange(0, pallocChunkPages)
1037 // Simulate the allocation and subsequent free of all pages in
1038 // the chunk for the scavenge index. This sets the state equivalent
1039 // with all pages within the index being free.
1040 p.scav.index.alloc(ci, pallocChunkPages)
1041 p.scav.index.free(ci, 0, pallocChunkPages)
1043 // Apply scavenge state if applicable.
1045 if scvg, ok := scav[i]; ok {
1046 for _, s := range scvg {
1047 // Ignore the case of s.N == 0. setRange doesn't handle
1048 // it and it's a no-op anyway.
1050 chunk.scavenged.setRange(s.I, s.N)
1056 // Apply alloc state.
1057 for _, s := range init {
1058 // Ignore the case of s.N == 0. allocRange doesn't handle
1059 // it and it's a no-op anyway.
1061 chunk.allocRange(s.I, s.N)
1063 // Make sure the scavenge index is updated.
1064 p.scav.index.alloc(ci, s.N)
1068 // Update heap metadata for the allocRange calls above.
1069 systemstack(func() {
1071 p.update(addr, pallocChunkPages, false, false)
1076 return (*PageAlloc)(p)
1079 // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
1080 // is called the pageAlloc may no longer be used. The object itself will be
1081 // collected by the garbage collector once it is no longer live.
1082 func FreePageAlloc(pp *PageAlloc) {
1083 p := (*pageAlloc)(pp)
1085 // Free all the mapped space for the summary levels.
1086 if pageAlloc64Bit != 0 {
1087 for l := 0; l < summaryLevels; l++ {
1088 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1091 resSize := uintptr(0)
1092 for _, s := range p.summary {
1093 resSize += uintptr(cap(s)) * pallocSumBytes
1095 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1098 // Free extra data structures.
1099 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1101 // Subtract back out whatever we mapped for the summaries.
1102 // sysUsed adds to p.sysStat and memstats.mappedReady no matter what
1103 // (and in anger should actually be accounted for), and there's no other
1104 // way to figure out how much we actually mapped.
1105 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1106 testSysStat.add(-int64(p.summaryMappedReady))
1108 // Free the mapped space for chunks.
1109 for i := range p.chunks {
1110 if x := p.chunks[i]; x != nil {
1112 // This memory comes from sysAlloc and will always be page-aligned.
1113 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1118 // BaseChunkIdx is a convenient chunkIdx value which works on both
1119 // 64 bit and 32 bit platforms, allowing the tests to share code
1122 // This should not be higher than 0x100*pallocChunkBytes to support
1123 // mips and mipsle, which only have 31-bit address spaces.
1124 var BaseChunkIdx = func() ChunkIdx {
1126 if pageAlloc64Bit != 0 {
1131 baseAddr := prefix * pallocChunkBytes
1132 if goos.IsAix != 0 {
1133 baseAddr += arenaBaseOffset
1135 return ChunkIdx(chunkIndex(baseAddr))
1138 // PageBase returns an address given a chunk index and a page index
1139 // relative to that chunk.
1140 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1141 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1144 type BitsMismatch struct {
1149 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1152 // Run on the system stack to avoid stack growth allocation.
1153 systemstack(func() {
1154 getg().m.mallocing++
1156 // Lock so that we can safely access the bitmap.
1159 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1160 chunk := mheap_.pages.tryChunkOf(i)
1164 for j := 0; j < pallocChunkPages/64; j++ {
1165 // Run over each 64-bit bitmap section and ensure
1166 // scavenged is being cleared properly on allocation.
1167 // If a used bit and scavenged bit are both set, that's
1168 // an error, and could indicate a larger problem, or
1169 // an accounting problem.
1170 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1171 got := chunk.scavenged[j]
1174 if n >= len(mismatches) {
1177 mismatches[n] = BitsMismatch{
1178 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1186 unlock(&mheap_.lock)
1188 getg().m.mallocing--
1193 func PageCachePagesLeaked() (leaked uintptr) {
1194 stopTheWorld("PageCachePagesLeaked")
1196 // Walk over destroyed Ps and look for unflushed caches.
1197 deadp := allp[len(allp):cap(allp)]
1198 for _, p := range deadp {
1199 // Since we're going past len(allp) we may see nil Ps.
1200 // Just ignore them.
1202 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1210 var Semacquire = semacquire
1211 var Semrelease1 = semrelease1
1213 func SemNwait(addr *uint32) uint32 {
1214 root := semtable.rootFor(addr)
1215 return root.nwait.Load()
1218 const SemTableSize = semTabSize
1220 // SemTable is a wrapper around semTable exported for testing.
1221 type SemTable struct {
1225 // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
1226 func (t *SemTable) Enqueue(addr *uint32) {
1231 t.semTable.rootFor(addr).queue(addr, s, false)
1234 // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
1236 // Returns true if there actually was a waiter to be dequeued.
1237 func (t *SemTable) Dequeue(addr *uint32) bool {
1238 s, _ := t.semTable.rootFor(addr).dequeue(addr)
1246 // mspan wrapper for testing.
1249 // Allocate an mspan for testing.
1250 func AllocMSpan() *MSpan {
1252 systemstack(func() {
1254 s = (*mspan)(mheap_.spanalloc.alloc())
1255 unlock(&mheap_.lock)
1260 // Free an allocated mspan.
1261 func FreeMSpan(s *MSpan) {
1262 systemstack(func() {
1264 mheap_.spanalloc.free(unsafe.Pointer(s))
1265 unlock(&mheap_.lock)
1269 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1271 s.nelems = uintptr(len(bits) * 8)
1272 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1273 result := s.countAlloc()
1279 TimeHistSubBucketBits = timeHistSubBucketBits
1280 TimeHistNumSubBuckets = timeHistNumSubBuckets
1281 TimeHistNumBuckets = timeHistNumBuckets
1282 TimeHistMinBucketBits = timeHistMinBucketBits
1283 TimeHistMaxBucketBits = timeHistMaxBucketBits
1286 type TimeHistogram timeHistogram
1288 // Counts returns the counts for the given bucket, subBucket indices.
1289 // Returns true if the bucket was valid, otherwise returns the counts
1290 // for the overflow bucket if bucket > 0 or the underflow bucket if
1291 // bucket < 0, and false.
1292 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1293 t := (*timeHistogram)(th)
1295 return t.underflow.Load(), false
1297 i := bucket*TimeHistNumSubBuckets + subBucket
1298 if i >= len(t.counts) {
1299 return t.overflow.Load(), false
1301 return t.counts[i].Load(), true
1304 func (th *TimeHistogram) Record(duration int64) {
1305 (*timeHistogram)(th).record(duration)
1308 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1310 func SetIntArgRegs(a int) int {
1320 func FinalizerGAsleep() bool {
1321 return fingStatus.Load()&fingWait != 0
1324 // For GCTestMoveStackOnNextCall, it's important not to introduce an
1325 // extra layer of call, since then there's a return before the "real"
1327 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1329 // For GCTestIsReachable, it's important that we do this as a call so
1330 // escape analysis can see through it.
1331 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1332 return gcTestIsReachable(ptrs...)
1335 // For GCTestPointerClass, it's important that we do this as a call so
1336 // escape analysis can see through it.
1338 // This is nosplit because gcTestPointerClass is.
1341 func GCTestPointerClass(p unsafe.Pointer) string {
1342 return gcTestPointerClass(p)
1345 const Raceenabled = raceenabled
1348 GCBackgroundUtilization = gcBackgroundUtilization
1349 GCGoalUtilization = gcGoalUtilization
1350 DefaultHeapMinimum = defaultHeapMinimum
1351 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1352 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1355 type GCController struct {
1359 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1360 // Force the controller to escape. We're going to
1361 // do 64-bit atomics on it, and if it gets stack-allocated
1362 // on a 32-bit architecture, it may get allocated unaligned
1364 g := Escape(new(GCController))
1365 g.gcControllerState.test = true // Mark it as a test copy.
1366 g.init(int32(gcPercent), memoryLimit)
1370 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1371 trigger, _ := c.trigger()
1372 if c.heapMarked > trigger {
1373 trigger = c.heapMarked
1375 c.maxStackScan.Store(stackSize)
1376 c.globalsScan.Store(globalsSize)
1377 c.heapLive.Store(trigger)
1378 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1379 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1382 func (c *GCController) AssistWorkPerByte() float64 {
1383 return c.assistWorkPerByte.Load()
1386 func (c *GCController) HeapGoal() uint64 {
1390 func (c *GCController) HeapLive() uint64 {
1391 return c.heapLive.Load()
1394 func (c *GCController) HeapMarked() uint64 {
1398 func (c *GCController) Triggered() uint64 {
1402 type GCControllerReviseDelta struct {
1407 GlobalsScanWork int64
1410 func (c *GCController) Revise(d GCControllerReviseDelta) {
1411 c.heapLive.Add(d.HeapLive)
1412 c.heapScan.Add(d.HeapScan)
1413 c.heapScanWork.Add(d.HeapScanWork)
1414 c.stackScanWork.Add(d.StackScanWork)
1415 c.globalsScanWork.Add(d.GlobalsScanWork)
1419 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1420 c.assistTime.Store(assistTime)
1421 c.endCycle(elapsed, gomaxprocs, false)
1422 c.resetLive(bytesMarked)
1426 func (c *GCController) AddIdleMarkWorker() bool {
1427 return c.addIdleMarkWorker()
1430 func (c *GCController) NeedIdleMarkWorker() bool {
1431 return c.needIdleMarkWorker()
1434 func (c *GCController) RemoveIdleMarkWorker() {
1435 c.removeIdleMarkWorker()
1438 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1439 c.setMaxIdleMarkWorkers(max)
1442 var alwaysFalse bool
1445 func Escape[T any](x T) T {
1452 // Acquirem blocks preemption.
1461 var Timediv = timediv
1463 type PIController struct {
1467 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1468 return &PIController{piController{
1477 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1478 return c.piController.next(input, setpoint, period)
1482 CapacityPerProc = capacityPerProc
1483 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1486 type GCCPULimiter struct {
1487 limiter gcCPULimiterState
1490 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1491 // Force the controller to escape. We're going to
1492 // do 64-bit atomics on it, and if it gets stack-allocated
1493 // on a 32-bit architecture, it may get allocated unaligned
1495 l := Escape(new(GCCPULimiter))
1496 l.limiter.test = true
1497 l.limiter.resetCapacity(now, gomaxprocs)
1501 func (l *GCCPULimiter) Fill() uint64 {
1502 return l.limiter.bucket.fill
1505 func (l *GCCPULimiter) Capacity() uint64 {
1506 return l.limiter.bucket.capacity
1509 func (l *GCCPULimiter) Overflow() uint64 {
1510 return l.limiter.overflow
1513 func (l *GCCPULimiter) Limiting() bool {
1514 return l.limiter.limiting()
1517 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1518 return l.limiter.needUpdate(now)
1521 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1522 l.limiter.startGCTransition(enableGC, now)
1525 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1526 l.limiter.finishGCTransition(now)
1529 func (l *GCCPULimiter) Update(now int64) {
1530 l.limiter.update(now)
1533 func (l *GCCPULimiter) AddAssistTime(t int64) {
1534 l.limiter.addAssistTime(t)
1537 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1538 l.limiter.resetCapacity(now, nprocs)
1541 const ScavengePercent = scavengePercent
1543 type Scavenger struct {
1544 Sleep func(int64) int64
1545 Scavenge func(uintptr) (uintptr, int64)
1546 ShouldStop func() bool
1547 GoMaxProcs func() int32
1549 released atomic.Uintptr
1550 scavenger scavengerState
1551 stop chan<- struct{}
1552 done <-chan struct{}
1555 func (s *Scavenger) Start() {
1556 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1557 panic("must populate all stubs")
1561 s.scavenger.sleepStub = s.Sleep
1562 s.scavenger.scavenge = s.Scavenge
1563 s.scavenger.shouldStop = s.ShouldStop
1564 s.scavenger.gomaxprocs = s.GoMaxProcs
1566 // Start up scavenger goroutine, and wait for it to be ready.
1567 stop := make(chan struct{})
1569 done := make(chan struct{})
1572 // This should match bgscavenge, loosely.
1582 released, workTime := s.scavenger.run()
1587 s.released.Add(released)
1588 s.scavenger.sleep(workTime)
1591 if !s.BlockUntilParked(1e9 /* 1 second */) {
1592 panic("timed out waiting for scavenger to get ready")
1596 // BlockUntilParked blocks until the scavenger parks, or until
1597 // timeout is exceeded. Returns true if the scavenger parked.
1599 // Note that in testing, parked means something slightly different.
1600 // In anger, the scavenger parks to sleep, too, but in testing,
1601 // it only parks when it actually has no work to do.
1602 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1603 // Just spin, waiting for it to park.
1605 // The actual parking process is racy with respect to
1606 // wakeups, which is fine, but for testing we need something
1607 // a bit more robust.
1609 for nanotime()-start < timeout {
1610 lock(&s.scavenger.lock)
1611 parked := s.scavenger.parked
1612 unlock(&s.scavenger.lock)
1621 // Released returns how many bytes the scavenger released.
1622 func (s *Scavenger) Released() uintptr {
1623 return s.released.Load()
1626 // Wake wakes up a parked scavenger to keep running.
1627 func (s *Scavenger) Wake() {
1631 // Stop cleans up the scavenger's resources. The scavenger
1632 // must be parked for this to work.
1633 func (s *Scavenger) Stop() {
1634 lock(&s.scavenger.lock)
1635 parked := s.scavenger.parked
1636 unlock(&s.scavenger.lock)
1638 panic("tried to clean up scavenger that is not parked")
1645 type ScavengeIndex struct {
1649 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1650 s := new(ScavengeIndex)
1651 // This is a bit lazy but we easily guarantee we'll be able
1652 // to reference all the relevant chunks. The worst-case
1653 // memory usage here is 512 MiB, but tests generally use
1654 // small offsets from BaseChunkIdx, which results in ~100s
1655 // of KiB in memory use.
1657 // This may still be worth making better, at least by sharing
1658 // this fairly large array across calls with a sync.Pool or
1659 // something. Currently, when the tests are run serially,
1660 // it takes around 0.5s. Not all that much, but if we have
1661 // a lot of tests like this it could add up.
1662 s.i.chunks = make([]atomicScavChunkData, max)
1663 s.i.min.Store(uintptr(min))
1664 s.i.max.Store(uintptr(max))
1665 s.i.minHeapIdx.Store(uintptr(min))
1670 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1671 ci, off := s.i.find(force)
1672 return ChunkIdx(ci), off
1675 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1676 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1677 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1680 // The range doesn't cross any chunk boundaries.
1681 s.i.alloc(sc, ei+1-si)
1683 // The range crosses at least one chunk boundary.
1684 s.i.alloc(sc, pallocChunkPages-si)
1685 for c := sc + 1; c < ec; c++ {
1686 s.i.alloc(c, pallocChunkPages)
1692 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1693 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1694 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1697 // The range doesn't cross any chunk boundaries.
1698 s.i.free(sc, si, ei+1-si)
1700 // The range crosses at least one chunk boundary.
1701 s.i.free(sc, si, pallocChunkPages-si)
1702 for c := sc + 1; c < ec; c++ {
1703 s.i.free(c, 0, pallocChunkPages)
1705 s.i.free(ec, 0, ei+1)
1709 func (s *ScavengeIndex) ResetSearchAddrs() {
1710 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1711 addr, marked := a.Load()
1713 a.StoreUnmark(addr, addr)
1717 s.i.freeHWM = minOffAddr
1720 func (s *ScavengeIndex) NextGen() {
1724 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1725 s.i.setEmpty(chunkIdx(ci))
1728 func (s *ScavengeIndex) SetNoHugePage(ci ChunkIdx) bool {
1729 return s.i.setNoHugePage(chunkIdx(ci))
1732 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1733 sc0 := scavChunkData{
1736 lastInUse: lastInUse,
1737 scavChunkFlags: scavChunkFlags(flags),
1740 sc1 := unpackScavChunkData(scp)
1744 const GTrackingPeriod = gTrackingPeriod
1746 var ZeroBase = unsafe.Pointer(&zerobase)
1748 const UserArenaChunkBytes = userArenaChunkBytes
1750 type UserArena struct {
1754 func NewUserArena() *UserArena {
1755 return &UserArena{newUserArena()}
1758 func (a *UserArena) New(out *any) {
1761 if typ.Kind_&kindMask != kindPtr {
1762 panic("new result of non-ptr type")
1764 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1765 i.data = a.arena.new(typ)
1768 func (a *UserArena) Slice(sl any, cap int) {
1769 a.arena.slice(sl, cap)
1772 func (a *UserArena) Free() {
1776 func GlobalWaitingArenaChunks() int {
1778 systemstack(func() {
1780 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1783 unlock(&mheap_.lock)
1788 func UserArenaClone[T any](s T) T {
1789 return arena_heapify(s).(T)
1792 var AlignUp = alignUp
1794 // BlockUntilEmptyFinalizerQueue blocks until either the finalizer
1795 // queue is emptied (and the finalizers have executed) or the timeout
1796 // is reached. Returns true if the finalizer queue was emptied.
1797 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1799 for nanotime()-start < timeout {
1801 // We know the queue has been drained when both finq is nil
1802 // and the finalizer g has stopped executing.
1803 empty := finq == nil
1804 empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
1814 func FrameStartLine(f *Frame) int {
1818 // PersistentAlloc allocates some memory that lives outside the Go heap.
1819 // This memory will never be freed; use sparingly.
1820 func PersistentAlloc(n uintptr) unsafe.Pointer {
1821 return persistentalloc(n, 0, &memstats.other_sys)
1824 // FPCallers works like Callers and uses frame pointer unwinding to populate
1825 // pcBuf with the return addresses of the physical frames on the stack.
1826 func FPCallers(pcBuf []uintptr) int {
1827 return fpTracebackPCs(unsafe.Pointer(getcallerfp()), pcBuf)