1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
23 func TestGcSys(t *testing.T) {
24 t.Skip("skipping known-flaky test; golang.org/issue/37331")
25 if os.Getenv("GOGC") == "off" {
26 t.Skip("skipping test; GOGC=off in environment")
28 got := runTestProg(t, "testprog", "GCSys")
31 t.Fatalf("expected %q, but got %q", want, got)
35 func TestGcDeepNesting(t *testing.T) {
36 type T [2][2][2][2][2][2][2][2][2][2]*int
39 // Prevent the compiler from applying escape analysis.
40 // This makes sure new(T) is allocated on heap, not on the stack.
43 a[0][0][0][0][0][0][0][0][0][0] = new(int)
44 *a[0][0][0][0][0][0][0][0][0][0] = 13
46 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
51 func TestGcMapIndirection(t *testing.T) {
52 defer debug.SetGCPercent(debug.SetGCPercent(1))
58 for i := 0; i < 2000; i++ {
65 func TestGcArraySlice(t *testing.T) {
72 for i := 0; i < 10; i++ {
77 p.nextbuf = head.buf[:]
82 for p := head; p != nil; p = p.next {
84 t.Fatal("corrupted heap")
89 func TestGcRescan(t *testing.T) {
100 for i := 0; i < 10; i++ {
102 p.c = make(chan error)
112 for p := head; p != nil; p = p.nexty {
114 t.Fatal("corrupted heap")
119 func TestGcLastTime(t *testing.T) {
120 ms := new(runtime.MemStats)
121 t0 := time.Now().UnixNano()
123 t1 := time.Now().UnixNano()
124 runtime.ReadMemStats(ms)
125 last := int64(ms.LastGC)
126 if t0 > last || last > t1 {
127 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
129 pause := ms.PauseNs[(ms.NumGC+255)%256]
130 // Due to timer granularity, pause can actually be 0 on windows
131 // or on virtualized environments.
133 t.Logf("last GC pause was 0")
134 } else if pause > 10e9 {
135 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
141 func TestHugeGCInfo(t *testing.T) {
142 // The test ensures that compiler can chew these huge types even on weakest machines.
143 // The types are not allocated at runtime.
145 // 400MB on 32 bots, 4TB on 64-bits.
146 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
147 hugeSink = new([n]*byte)
148 hugeSink = new([n]uintptr)
149 hugeSink = new(struct {
154 hugeSink = new(struct {
162 func TestPeriodicGC(t *testing.T) {
163 if runtime.GOARCH == "wasm" {
164 t.Skip("no sysmon on wasm yet")
167 // Make sure we're not in the middle of a GC.
170 var ms1, ms2 runtime.MemStats
171 runtime.ReadMemStats(&ms1)
173 // Make periodic GC run continuously.
174 orig := *runtime.ForceGCPeriod
175 *runtime.ForceGCPeriod = 0
177 // Let some periodic GCs happen. In a heavily loaded system,
178 // it's possible these will be delayed, so this is designed to
179 // succeed quickly if things are working, but to give it some
180 // slack if things are slow.
183 for i := 0; i < 200 && numGCs < want; i++ {
184 time.Sleep(5 * time.Millisecond)
186 // Test that periodic GC actually happened.
187 runtime.ReadMemStats(&ms2)
188 numGCs = ms2.NumGC - ms1.NumGC
190 *runtime.ForceGCPeriod = orig
193 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
197 func TestGcZombieReporting(t *testing.T) {
198 // This test is somewhat sensitive to how the allocator works.
199 // Pointers in zombies slice may cross-span, thus we
200 // add invalidptr=0 for avoiding the badPointer check.
201 // See issue https://golang.org/issues/49613/
202 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
203 want := "found pointer to free object"
204 if !strings.Contains(got, want) {
205 t.Fatalf("expected %q in output, but got %q", want, got)
209 func TestGCTestMoveStackOnNextCall(t *testing.T) {
212 // GCTestMoveStackOnNextCall can fail in rare cases if there's
213 // a preemption. This won't happen many times in quick
214 // succession, so just retry a few times.
215 for retry := 0; retry < 5; retry++ {
216 runtime.GCTestMoveStackOnNextCall()
217 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
222 t.Fatal("stack did not move")
225 // This must not be inlined because the point is to force a stack
226 // growth check and move the stack.
229 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
230 // new should have been updated by the stack move;
231 // old should not have.
233 // Capture new's value before doing anything that could
234 // further move the stack.
235 new2 := uintptr(unsafe.Pointer(new))
237 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
239 // Check that we didn't screw up the test's escape analysis.
240 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
241 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
243 // This was a real failure.
249 func TestGCTestMoveStackRepeatedly(t *testing.T) {
250 // Move the stack repeatedly to make sure we're not doubling
252 for i := 0; i < 100; i++ {
253 runtime.GCTestMoveStackOnNextCall()
259 func moveStack1(x bool) {
260 // Make sure this function doesn't get auto-nosplit.
266 func TestGCTestIsReachable(t *testing.T) {
267 var all, half []unsafe.Pointer
269 for i := 0; i < 16; i++ {
270 // The tiny allocator muddies things, so we use a
272 p := unsafe.Pointer(new(*int))
275 half = append(half, p)
280 got := runtime.GCTestIsReachable(all...)
282 t.Fatalf("did not get expected reachable set; want %b, got %b", want, got)
284 runtime.KeepAlive(half)
287 var pointerClassBSS *int
288 var pointerClassData = 42
290 func TestGCTestPointerClass(t *testing.T) {
292 check := func(p unsafe.Pointer, want string) {
294 got := runtime.GCTestPointerClass(p)
296 // Convert the pointer to a uintptr to avoid
298 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
303 check(unsafe.Pointer(&onStack), "stack")
304 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
305 check(unsafe.Pointer(&pointerClassBSS), "bss")
306 check(unsafe.Pointer(&pointerClassData), "data")
310 func BenchmarkSetTypePtr(b *testing.B) {
311 benchSetType[*byte](b)
314 func BenchmarkSetTypePtr8(b *testing.B) {
315 benchSetType[[8]*byte](b)
318 func BenchmarkSetTypePtr16(b *testing.B) {
319 benchSetType[[16]*byte](b)
322 func BenchmarkSetTypePtr32(b *testing.B) {
323 benchSetType[[32]*byte](b)
326 func BenchmarkSetTypePtr64(b *testing.B) {
327 benchSetType[[64]*byte](b)
330 func BenchmarkSetTypePtr126(b *testing.B) {
331 benchSetType[[126]*byte](b)
334 func BenchmarkSetTypePtr128(b *testing.B) {
335 benchSetType[[128]*byte](b)
338 func BenchmarkSetTypePtrSlice(b *testing.B) {
339 benchSetTypeSlice[*byte](b, 1<<10)
347 func BenchmarkSetTypeNode1(b *testing.B) {
348 benchSetType[Node1](b)
351 func BenchmarkSetTypeNode1Slice(b *testing.B) {
352 benchSetTypeSlice[Node1](b, 32)
360 func BenchmarkSetTypeNode8(b *testing.B) {
361 benchSetType[Node8](b)
364 func BenchmarkSetTypeNode8Slice(b *testing.B) {
365 benchSetTypeSlice[Node8](b, 32)
373 func BenchmarkSetTypeNode64(b *testing.B) {
374 benchSetType[Node64](b)
377 func BenchmarkSetTypeNode64Slice(b *testing.B) {
378 benchSetTypeSlice[Node64](b, 32)
381 type Node64Dead struct {
386 func BenchmarkSetTypeNode64Dead(b *testing.B) {
387 benchSetType[Node64Dead](b)
390 func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
391 benchSetTypeSlice[Node64Dead](b, 32)
394 type Node124 struct {
399 func BenchmarkSetTypeNode124(b *testing.B) {
400 benchSetType[Node124](b)
403 func BenchmarkSetTypeNode124Slice(b *testing.B) {
404 benchSetTypeSlice[Node124](b, 32)
407 type Node126 struct {
412 func BenchmarkSetTypeNode126(b *testing.B) {
413 benchSetType[Node126](b)
416 func BenchmarkSetTypeNode126Slice(b *testing.B) {
417 benchSetTypeSlice[Node126](b, 32)
420 type Node128 struct {
425 func BenchmarkSetTypeNode128(b *testing.B) {
426 benchSetType[Node128](b)
429 func BenchmarkSetTypeNode128Slice(b *testing.B) {
430 benchSetTypeSlice[Node128](b, 32)
433 type Node130 struct {
438 func BenchmarkSetTypeNode130(b *testing.B) {
439 benchSetType[Node130](b)
442 func BenchmarkSetTypeNode130Slice(b *testing.B) {
443 benchSetTypeSlice[Node130](b, 32)
446 type Node1024 struct {
451 func BenchmarkSetTypeNode1024(b *testing.B) {
452 benchSetType[Node1024](b)
455 func BenchmarkSetTypeNode1024Slice(b *testing.B) {
456 benchSetTypeSlice[Node1024](b, 32)
459 func benchSetType[T any](b *testing.B) {
460 b.SetBytes(int64(unsafe.Sizeof(*new(T))))
461 runtime.BenchSetType[T](b.N, b.ResetTimer)
464 func benchSetTypeSlice[T any](b *testing.B, len int) {
465 b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len)))
466 runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len)
469 func BenchmarkAllocation(b *testing.B) {
473 ngo := runtime.GOMAXPROCS(0)
474 work := make(chan bool, b.N+ngo)
475 result := make(chan *T)
476 for i := 0; i < b.N; i++ {
479 for i := 0; i < ngo; i++ {
482 for i := 0; i < ngo; i++ {
486 for i := 0; i < 1000; i++ {
493 for i := 0; i < ngo; i++ {
498 func TestPrintGC(t *testing.T) {
500 t.Skip("Skipping in short mode")
502 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
503 done := make(chan bool)
514 for i := 0; i < 1e4; i++ {
522 func testTypeSwitch(x any) error {
523 switch y := x.(type) {
532 func testAssert(x any) error {
533 if y, ok := x.(error); ok {
539 func testAssertVar(x any) error {
540 var y, ok = x.(error)
550 func testIfaceEqual(x any) {
556 func TestPageAccounting(t *testing.T) {
557 // Grow the heap in small increments. This used to drop the
558 // pages-in-use count below zero because of a rounding
559 // mismatch (golang.org/issue/15022).
560 const blockSize = 64 << 10
561 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
562 for i := range blocks {
563 blocks[i] = new([blockSize]byte)
566 // Check that the running page count matches reality.
567 pagesInUse, counted := runtime.CountPagesInUse()
568 if pagesInUse != counted {
569 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
573 func TestReadMemStats(t *testing.T) {
574 base, slow := runtime.ReadMemStatsSlow()
576 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
577 t.Fatal("memstats mismatch")
581 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
584 case reflect.Array, reflect.Slice:
585 if got.Len() != want.Len() {
586 t.Logf("len(%s): got %v, want %v", prefix, got, want)
589 for i := 0; i < got.Len(); i++ {
590 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
593 for i := 0; i < typ.NumField(); i++ {
594 gf, wf := got.Field(i), want.Field(i)
595 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
598 t.Fatal("not implemented: logDiff for map")
600 if got.Interface() != want.Interface() {
601 t.Logf("%s: got %v, want %v", prefix, got, want)
606 func BenchmarkReadMemStats(b *testing.B) {
607 var ms runtime.MemStats
608 const heapSize = 100 << 20
609 x := make([]*[1024]byte, heapSize/1024)
611 x[i] = new([1024]byte)
615 for i := 0; i < b.N; i++ {
616 runtime.ReadMemStats(&ms)
622 func applyGCLoad(b *testing.B) func() {
623 // We’ll apply load to the runtime with maxProcs-1 goroutines
624 // and use one more to actually benchmark. It doesn't make sense
625 // to try to run this test with only 1 P (that's what
626 // BenchmarkReadMemStats is for).
627 maxProcs := runtime.GOMAXPROCS(-1)
629 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
632 // Code to build a big tree with lots of pointers.
636 var buildTree func(depth int) *node
637 buildTree = func(depth int) *node {
640 for i := range tree.children {
641 tree.children[i] = buildTree(depth - 1)
647 // Keep the GC busy by continuously generating large trees.
648 done := make(chan struct{})
649 var wg sync.WaitGroup
650 for i := 0; i < maxProcs-1; i++ {
664 runtime.KeepAlive(hold)
673 func BenchmarkReadMemStatsLatency(b *testing.B) {
674 stop := applyGCLoad(b)
676 // Spend this much time measuring latencies.
677 latencies := make([]time.Duration, 0, 1024)
679 // Run for timeToBench hitting ReadMemStats continuously
680 // and measuring the latency.
682 var ms runtime.MemStats
683 for i := 0; i < b.N; i++ {
684 // Sleep for a bit, otherwise we're just going to keep
685 // stopping the world and no one will get to do anything.
686 time.Sleep(100 * time.Millisecond)
688 runtime.ReadMemStats(&ms)
689 latencies = append(latencies, time.Since(start))
691 // Make sure to stop the timer before we wait! The load created above
692 // is very heavy-weight and not easy to stop, so we could end up
693 // confusing the benchmarking framework for small b.N.
697 // Disable the default */op metrics.
698 // ns/op doesn't mean anything because it's an average, but we
699 // have a sleep in our b.N loop above which skews this significantly.
700 b.ReportMetric(0, "ns/op")
701 b.ReportMetric(0, "B/op")
702 b.ReportMetric(0, "allocs/op")
704 // Sort latencies then report percentiles.
705 sort.Slice(latencies, func(i, j int) bool {
706 return latencies[i] < latencies[j]
708 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
709 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
710 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
713 func TestUserForcedGC(t *testing.T) {
714 // Test that runtime.GC() triggers a GC even if GOGC=off.
715 defer debug.SetGCPercent(debug.SetGCPercent(-1))
717 var ms1, ms2 runtime.MemStats
718 runtime.ReadMemStats(&ms1)
720 runtime.ReadMemStats(&ms2)
721 if ms1.NumGC == ms2.NumGC {
722 t.Fatalf("runtime.GC() did not trigger GC")
724 if ms1.NumForcedGC == ms2.NumForcedGC {
725 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
729 func writeBarrierBenchmark(b *testing.B, f func()) {
731 var ms runtime.MemStats
732 runtime.ReadMemStats(&ms)
733 //b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
735 // Keep GC running continuously during the benchmark, which in
736 // turn keeps the write barrier on continuously.
738 done := make(chan bool)
740 for atomic.LoadUint32(&stop) == 0 {
746 atomic.StoreUint32(&stop, 1)
755 func BenchmarkWriteBarrier(b *testing.B) {
756 if runtime.GOMAXPROCS(-1) < 2 {
757 // We don't want GC to take our time.
758 b.Skip("need GOMAXPROCS >= 2")
761 // Construct a large tree both so the GC runs for a while and
762 // so we have a data structure to manipulate the pointers of.
767 var mkTree func(level int) *node
768 mkTree = func(level int) *node {
772 n := &node{mkTree(level - 1), mkTree(level - 1)}
774 // Seed GC with enough early pointers so it
775 // doesn't start termination barriers when it
776 // only has the top of the tree.
777 wbRoots = append(wbRoots, n)
781 const depth = 22 // 64 MB
784 writeBarrierBenchmark(b, func() {
785 var stack [depth]*node
788 // There are two write barriers per iteration, so i+=2.
789 for i := 0; i < b.N; i += 2 {
795 // Perform one step of reversing the tree.
807 // Avoid non-preemptible loops (see issue #10958).
813 runtime.KeepAlive(wbRoots)
816 func BenchmarkBulkWriteBarrier(b *testing.B) {
817 if runtime.GOMAXPROCS(-1) < 2 {
818 // We don't want GC to take our time.
819 b.Skip("need GOMAXPROCS >= 2")
822 // Construct a large set of objects we can copy around.
823 const heapSize = 64 << 20
825 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
826 for i := range ptrs {
830 writeBarrierBenchmark(b, func() {
831 const blockSize = 1024
833 for i := 0; i < b.N; i += blockSize {
835 block := ptrs[pos : pos+blockSize]
837 copy(block, block[1:])
838 block[blockSize-1] = first
841 if pos+blockSize > len(ptrs) {
849 runtime.KeepAlive(ptrs)
852 func BenchmarkScanStackNoLocals(b *testing.B) {
853 var ready sync.WaitGroup
854 teardown := make(chan bool)
855 for j := 0; j < 10; j++ {
859 countpwg(&x, &ready, teardown)
864 for i := 0; i < b.N; i++ {
873 func BenchmarkMSpanCountAlloc(b *testing.B) {
874 // Allocate one dummy mspan for the whole benchmark.
875 s := runtime.AllocMSpan()
876 defer runtime.FreeMSpan(s)
878 // n is the number of bytes to benchmark against.
879 // n must always be a multiple of 8, since gcBits is
880 // always rounded up 8 bytes.
881 for _, n := range []int{8, 16, 32, 64, 128} {
882 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
883 // Initialize a new byte slice with pseduo-random data.
884 bits := make([]byte, n)
888 for i := 0; i < b.N; i++ {
889 runtime.MSpanCountAlloc(s, bits)
895 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
902 countpwg(n, ready, teardown)
905 func TestMemoryLimit(t *testing.T) {
907 t.Skip("stress test that takes time to run")
909 if runtime.NumCPU() < 4 {
910 t.Skip("want at least 4 CPUs for this test")
912 got := runTestProg(t, "testprog", "GCMemoryLimit")
915 t.Fatalf("expected %q, but got %q", want, got)
919 func TestMemoryLimitNoGCPercent(t *testing.T) {
921 t.Skip("stress test that takes time to run")
923 if runtime.NumCPU() < 4 {
924 t.Skip("want at least 4 CPUs for this test")
926 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
929 t.Fatalf("expected %q, but got %q", want, got)
933 func TestMyGenericFunc(t *testing.T) {
934 runtime.MyGenericFunc[int]()