1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
9 "internal/goexperiment"
24 func TestGcSys(t *testing.T) {
25 t.Skip("skipping known-flaky test; golang.org/issue/37331")
26 if os.Getenv("GOGC") == "off" {
27 t.Skip("skipping test; GOGC=off in environment")
29 got := runTestProg(t, "testprog", "GCSys")
32 t.Fatalf("expected %q, but got %q", want, got)
36 func TestGcDeepNesting(t *testing.T) {
37 type T [2][2][2][2][2][2][2][2][2][2]*int
40 // Prevent the compiler from applying escape analysis.
41 // This makes sure new(T) is allocated on heap, not on the stack.
44 a[0][0][0][0][0][0][0][0][0][0] = new(int)
45 *a[0][0][0][0][0][0][0][0][0][0] = 13
47 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
52 func TestGcMapIndirection(t *testing.T) {
53 defer debug.SetGCPercent(debug.SetGCPercent(1))
59 for i := 0; i < 2000; i++ {
66 func TestGcArraySlice(t *testing.T) {
73 for i := 0; i < 10; i++ {
78 p.nextbuf = head.buf[:]
83 for p := head; p != nil; p = p.next {
85 t.Fatal("corrupted heap")
90 func TestGcRescan(t *testing.T) {
101 for i := 0; i < 10; i++ {
103 p.c = make(chan error)
113 for p := head; p != nil; p = p.nexty {
115 t.Fatal("corrupted heap")
120 func TestGcLastTime(t *testing.T) {
121 ms := new(runtime.MemStats)
122 t0 := time.Now().UnixNano()
124 t1 := time.Now().UnixNano()
125 runtime.ReadMemStats(ms)
126 last := int64(ms.LastGC)
127 if t0 > last || last > t1 {
128 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
130 pause := ms.PauseNs[(ms.NumGC+255)%256]
131 // Due to timer granularity, pause can actually be 0 on windows
132 // or on virtualized environments.
134 t.Logf("last GC pause was 0")
135 } else if pause > 10e9 {
136 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
142 func TestHugeGCInfo(t *testing.T) {
143 // The test ensures that compiler can chew these huge types even on weakest machines.
144 // The types are not allocated at runtime.
146 // 400MB on 32 bots, 4TB on 64-bits.
147 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
148 hugeSink = new([n]*byte)
149 hugeSink = new([n]uintptr)
150 hugeSink = new(struct {
155 hugeSink = new(struct {
163 func TestPeriodicGC(t *testing.T) {
164 if runtime.GOARCH == "wasm" {
165 t.Skip("no sysmon on wasm yet")
168 // Make sure we're not in the middle of a GC.
171 var ms1, ms2 runtime.MemStats
172 runtime.ReadMemStats(&ms1)
174 // Make periodic GC run continuously.
175 orig := *runtime.ForceGCPeriod
176 *runtime.ForceGCPeriod = 0
178 // Let some periodic GCs happen. In a heavily loaded system,
179 // it's possible these will be delayed, so this is designed to
180 // succeed quickly if things are working, but to give it some
181 // slack if things are slow.
184 for i := 0; i < 200 && numGCs < want; i++ {
185 time.Sleep(5 * time.Millisecond)
187 // Test that periodic GC actually happened.
188 runtime.ReadMemStats(&ms2)
189 numGCs = ms2.NumGC - ms1.NumGC
191 *runtime.ForceGCPeriod = orig
194 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
198 func TestGcZombieReporting(t *testing.T) {
199 // This test is somewhat sensitive to how the allocator works.
200 // Pointers in zombies slice may cross-span, thus we
201 // add invalidptr=0 for avoiding the badPointer check.
202 // See issue https://golang.org/issues/49613/
203 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
204 want := "found pointer to free object"
205 if !strings.Contains(got, want) {
206 t.Fatalf("expected %q in output, but got %q", want, got)
210 func TestGCTestMoveStackOnNextCall(t *testing.T) {
213 // GCTestMoveStackOnNextCall can fail in rare cases if there's
214 // a preemption. This won't happen many times in quick
215 // succession, so just retry a few times.
216 for retry := 0; retry < 5; retry++ {
217 runtime.GCTestMoveStackOnNextCall()
218 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
223 t.Fatal("stack did not move")
226 // This must not be inlined because the point is to force a stack
227 // growth check and move the stack.
230 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
231 // new should have been updated by the stack move;
232 // old should not have.
234 // Capture new's value before doing anything that could
235 // further move the stack.
236 new2 := uintptr(unsafe.Pointer(new))
238 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
240 // Check that we didn't screw up the test's escape analysis.
241 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
242 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
244 // This was a real failure.
250 func TestGCTestMoveStackRepeatedly(t *testing.T) {
251 // Move the stack repeatedly to make sure we're not doubling
253 for i := 0; i < 100; i++ {
254 runtime.GCTestMoveStackOnNextCall()
260 func moveStack1(x bool) {
261 // Make sure this function doesn't get auto-nosplit.
267 func TestGCTestIsReachable(t *testing.T) {
268 var all, half []unsafe.Pointer
270 for i := 0; i < 16; i++ {
271 // The tiny allocator muddies things, so we use a
273 p := unsafe.Pointer(new(*int))
276 half = append(half, p)
281 got := runtime.GCTestIsReachable(all...)
283 t.Fatalf("did not get expected reachable set; want %b, got %b", want, got)
285 runtime.KeepAlive(half)
288 var pointerClassBSS *int
289 var pointerClassData = 42
291 func TestGCTestPointerClass(t *testing.T) {
293 check := func(p unsafe.Pointer, want string) {
295 got := runtime.GCTestPointerClass(p)
297 // Convert the pointer to a uintptr to avoid
299 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
304 check(unsafe.Pointer(&onStack), "stack")
305 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
306 check(unsafe.Pointer(&pointerClassBSS), "bss")
307 check(unsafe.Pointer(&pointerClassData), "data")
311 func BenchmarkSetTypePtr(b *testing.B) {
312 benchSetType[*byte](b)
315 func BenchmarkSetTypePtr8(b *testing.B) {
316 benchSetType[[8]*byte](b)
319 func BenchmarkSetTypePtr16(b *testing.B) {
320 benchSetType[[16]*byte](b)
323 func BenchmarkSetTypePtr32(b *testing.B) {
324 benchSetType[[32]*byte](b)
327 func BenchmarkSetTypePtr64(b *testing.B) {
328 benchSetType[[64]*byte](b)
331 func BenchmarkSetTypePtr126(b *testing.B) {
332 benchSetType[[126]*byte](b)
335 func BenchmarkSetTypePtr128(b *testing.B) {
336 benchSetType[[128]*byte](b)
339 func BenchmarkSetTypePtrSlice(b *testing.B) {
340 benchSetTypeSlice[*byte](b, 1<<10)
348 func BenchmarkSetTypeNode1(b *testing.B) {
349 benchSetType[Node1](b)
352 func BenchmarkSetTypeNode1Slice(b *testing.B) {
353 benchSetTypeSlice[Node1](b, 32)
361 func BenchmarkSetTypeNode8(b *testing.B) {
362 benchSetType[Node8](b)
365 func BenchmarkSetTypeNode8Slice(b *testing.B) {
366 benchSetTypeSlice[Node8](b, 32)
374 func BenchmarkSetTypeNode64(b *testing.B) {
375 benchSetType[Node64](b)
378 func BenchmarkSetTypeNode64Slice(b *testing.B) {
379 benchSetTypeSlice[Node64](b, 32)
382 type Node64Dead struct {
387 func BenchmarkSetTypeNode64Dead(b *testing.B) {
388 benchSetType[Node64Dead](b)
391 func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
392 benchSetTypeSlice[Node64Dead](b, 32)
395 type Node124 struct {
400 func BenchmarkSetTypeNode124(b *testing.B) {
401 benchSetType[Node124](b)
404 func BenchmarkSetTypeNode124Slice(b *testing.B) {
405 benchSetTypeSlice[Node124](b, 32)
408 type Node126 struct {
413 func BenchmarkSetTypeNode126(b *testing.B) {
414 benchSetType[Node126](b)
417 func BenchmarkSetTypeNode126Slice(b *testing.B) {
418 benchSetTypeSlice[Node126](b, 32)
421 type Node128 struct {
426 func BenchmarkSetTypeNode128(b *testing.B) {
427 benchSetType[Node128](b)
430 func BenchmarkSetTypeNode128Slice(b *testing.B) {
431 benchSetTypeSlice[Node128](b, 32)
434 type Node130 struct {
439 func BenchmarkSetTypeNode130(b *testing.B) {
440 benchSetType[Node130](b)
443 func BenchmarkSetTypeNode130Slice(b *testing.B) {
444 benchSetTypeSlice[Node130](b, 32)
447 type Node1024 struct {
452 func BenchmarkSetTypeNode1024(b *testing.B) {
453 benchSetType[Node1024](b)
456 func BenchmarkSetTypeNode1024Slice(b *testing.B) {
457 benchSetTypeSlice[Node1024](b, 32)
460 func benchSetType[T any](b *testing.B) {
461 if goexperiment.AllocHeaders {
462 b.Skip("not supported with allocation headers experiment")
464 b.SetBytes(int64(unsafe.Sizeof(*new(T))))
465 runtime.BenchSetType[T](b.N, b.ResetTimer)
468 func benchSetTypeSlice[T any](b *testing.B, len int) {
469 if goexperiment.AllocHeaders {
470 b.Skip("not supported with allocation headers experiment")
472 b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len)))
473 runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len)
476 func BenchmarkAllocation(b *testing.B) {
480 ngo := runtime.GOMAXPROCS(0)
481 work := make(chan bool, b.N+ngo)
482 result := make(chan *T)
483 for i := 0; i < b.N; i++ {
486 for i := 0; i < ngo; i++ {
489 for i := 0; i < ngo; i++ {
493 for i := 0; i < 1000; i++ {
500 for i := 0; i < ngo; i++ {
505 func TestPrintGC(t *testing.T) {
507 t.Skip("Skipping in short mode")
509 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
510 done := make(chan bool)
521 for i := 0; i < 1e4; i++ {
529 func testTypeSwitch(x any) error {
530 switch y := x.(type) {
539 func testAssert(x any) error {
540 if y, ok := x.(error); ok {
546 func testAssertVar(x any) error {
547 var y, ok = x.(error)
557 func testIfaceEqual(x any) {
563 func TestPageAccounting(t *testing.T) {
564 // Grow the heap in small increments. This used to drop the
565 // pages-in-use count below zero because of a rounding
566 // mismatch (golang.org/issue/15022).
567 const blockSize = 64 << 10
568 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
569 for i := range blocks {
570 blocks[i] = new([blockSize]byte)
573 // Check that the running page count matches reality.
574 pagesInUse, counted := runtime.CountPagesInUse()
575 if pagesInUse != counted {
576 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
580 func TestReadMemStats(t *testing.T) {
581 base, slow := runtime.ReadMemStatsSlow()
583 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
584 t.Fatal("memstats mismatch")
588 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
591 case reflect.Array, reflect.Slice:
592 if got.Len() != want.Len() {
593 t.Logf("len(%s): got %v, want %v", prefix, got, want)
596 for i := 0; i < got.Len(); i++ {
597 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
600 for i := 0; i < typ.NumField(); i++ {
601 gf, wf := got.Field(i), want.Field(i)
602 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
605 t.Fatal("not implemented: logDiff for map")
607 if got.Interface() != want.Interface() {
608 t.Logf("%s: got %v, want %v", prefix, got, want)
613 func BenchmarkReadMemStats(b *testing.B) {
614 var ms runtime.MemStats
615 const heapSize = 100 << 20
616 x := make([]*[1024]byte, heapSize/1024)
618 x[i] = new([1024]byte)
622 for i := 0; i < b.N; i++ {
623 runtime.ReadMemStats(&ms)
629 func applyGCLoad(b *testing.B) func() {
630 // We’ll apply load to the runtime with maxProcs-1 goroutines
631 // and use one more to actually benchmark. It doesn't make sense
632 // to try to run this test with only 1 P (that's what
633 // BenchmarkReadMemStats is for).
634 maxProcs := runtime.GOMAXPROCS(-1)
636 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
639 // Code to build a big tree with lots of pointers.
643 var buildTree func(depth int) *node
644 buildTree = func(depth int) *node {
647 for i := range tree.children {
648 tree.children[i] = buildTree(depth - 1)
654 // Keep the GC busy by continuously generating large trees.
655 done := make(chan struct{})
656 var wg sync.WaitGroup
657 for i := 0; i < maxProcs-1; i++ {
671 runtime.KeepAlive(hold)
680 func BenchmarkReadMemStatsLatency(b *testing.B) {
681 stop := applyGCLoad(b)
683 // Spend this much time measuring latencies.
684 latencies := make([]time.Duration, 0, 1024)
686 // Run for timeToBench hitting ReadMemStats continuously
687 // and measuring the latency.
689 var ms runtime.MemStats
690 for i := 0; i < b.N; i++ {
691 // Sleep for a bit, otherwise we're just going to keep
692 // stopping the world and no one will get to do anything.
693 time.Sleep(100 * time.Millisecond)
695 runtime.ReadMemStats(&ms)
696 latencies = append(latencies, time.Since(start))
698 // Make sure to stop the timer before we wait! The load created above
699 // is very heavy-weight and not easy to stop, so we could end up
700 // confusing the benchmarking framework for small b.N.
704 // Disable the default */op metrics.
705 // ns/op doesn't mean anything because it's an average, but we
706 // have a sleep in our b.N loop above which skews this significantly.
707 b.ReportMetric(0, "ns/op")
708 b.ReportMetric(0, "B/op")
709 b.ReportMetric(0, "allocs/op")
711 // Sort latencies then report percentiles.
712 sort.Slice(latencies, func(i, j int) bool {
713 return latencies[i] < latencies[j]
715 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
716 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
717 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
720 func TestUserForcedGC(t *testing.T) {
721 // Test that runtime.GC() triggers a GC even if GOGC=off.
722 defer debug.SetGCPercent(debug.SetGCPercent(-1))
724 var ms1, ms2 runtime.MemStats
725 runtime.ReadMemStats(&ms1)
727 runtime.ReadMemStats(&ms2)
728 if ms1.NumGC == ms2.NumGC {
729 t.Fatalf("runtime.GC() did not trigger GC")
731 if ms1.NumForcedGC == ms2.NumForcedGC {
732 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
736 func writeBarrierBenchmark(b *testing.B, f func()) {
738 var ms runtime.MemStats
739 runtime.ReadMemStats(&ms)
740 //b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
742 // Keep GC running continuously during the benchmark, which in
743 // turn keeps the write barrier on continuously.
745 done := make(chan bool)
747 for atomic.LoadUint32(&stop) == 0 {
753 atomic.StoreUint32(&stop, 1)
762 func BenchmarkWriteBarrier(b *testing.B) {
763 if runtime.GOMAXPROCS(-1) < 2 {
764 // We don't want GC to take our time.
765 b.Skip("need GOMAXPROCS >= 2")
768 // Construct a large tree both so the GC runs for a while and
769 // so we have a data structure to manipulate the pointers of.
774 var mkTree func(level int) *node
775 mkTree = func(level int) *node {
779 n := &node{mkTree(level - 1), mkTree(level - 1)}
781 // Seed GC with enough early pointers so it
782 // doesn't start termination barriers when it
783 // only has the top of the tree.
784 wbRoots = append(wbRoots, n)
788 const depth = 22 // 64 MB
791 writeBarrierBenchmark(b, func() {
792 var stack [depth]*node
795 // There are two write barriers per iteration, so i+=2.
796 for i := 0; i < b.N; i += 2 {
802 // Perform one step of reversing the tree.
814 // Avoid non-preemptible loops (see issue #10958).
820 runtime.KeepAlive(wbRoots)
823 func BenchmarkBulkWriteBarrier(b *testing.B) {
824 if runtime.GOMAXPROCS(-1) < 2 {
825 // We don't want GC to take our time.
826 b.Skip("need GOMAXPROCS >= 2")
829 // Construct a large set of objects we can copy around.
830 const heapSize = 64 << 20
832 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
833 for i := range ptrs {
837 writeBarrierBenchmark(b, func() {
838 const blockSize = 1024
840 for i := 0; i < b.N; i += blockSize {
842 block := ptrs[pos : pos+blockSize]
844 copy(block, block[1:])
845 block[blockSize-1] = first
848 if pos+blockSize > len(ptrs) {
856 runtime.KeepAlive(ptrs)
859 func BenchmarkScanStackNoLocals(b *testing.B) {
860 var ready sync.WaitGroup
861 teardown := make(chan bool)
862 for j := 0; j < 10; j++ {
866 countpwg(&x, &ready, teardown)
871 for i := 0; i < b.N; i++ {
880 func BenchmarkMSpanCountAlloc(b *testing.B) {
881 // Allocate one dummy mspan for the whole benchmark.
882 s := runtime.AllocMSpan()
883 defer runtime.FreeMSpan(s)
885 // n is the number of bytes to benchmark against.
886 // n must always be a multiple of 8, since gcBits is
887 // always rounded up 8 bytes.
888 for _, n := range []int{8, 16, 32, 64, 128} {
889 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
890 // Initialize a new byte slice with pseduo-random data.
891 bits := make([]byte, n)
895 for i := 0; i < b.N; i++ {
896 runtime.MSpanCountAlloc(s, bits)
902 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
909 countpwg(n, ready, teardown)
912 func TestMemoryLimit(t *testing.T) {
914 t.Skip("stress test that takes time to run")
916 if runtime.NumCPU() < 4 {
917 t.Skip("want at least 4 CPUs for this test")
919 got := runTestProg(t, "testprog", "GCMemoryLimit")
922 t.Fatalf("expected %q, but got %q", want, got)
926 func TestMemoryLimitNoGCPercent(t *testing.T) {
928 t.Skip("stress test that takes time to run")
930 if runtime.NumCPU() < 4 {
931 t.Skip("want at least 4 CPUs for this test")
933 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
936 t.Fatalf("expected %q, but got %q", want, got)
940 func TestMyGenericFunc(t *testing.T) {
941 runtime.MyGenericFunc[int]()