1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
18 _ "unsafe" // for go:linkname
21 // TestStackMem measures per-thread stack segment cache behavior.
22 // The test consumed up to 500MB in the past.
23 func TestStackMem(t *testing.T) {
33 defer GOMAXPROCS(GOMAXPROCS(BatchSize))
36 for b := 0; b < BatchCount; b++ {
37 c := make(chan bool, BatchSize)
38 for i := 0; i < BatchSize; i++ {
40 var f func(k int, a [ArraySize]byte)
41 f = func(k int, a [ArraySize]byte) {
43 time.Sleep(time.Millisecond)
48 f(RecursionDepth, [ArraySize]byte{})
52 for i := 0; i < BatchSize; i++ {
56 // The goroutines have signaled via c that they are ready to exit.
57 // Give them a chance to exit by sleeping. If we don't wait, we
58 // might not reuse them on the next batch.
59 time.Sleep(10 * time.Millisecond)
63 consumed := int64(s1.StackSys - s0.StackSys)
64 t.Logf("Consumed %vMB for stack mem", consumed>>20)
65 estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
66 if consumed > estimate {
67 t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
69 // Due to broken stack memory accounting (https://golang.org/issue/7468),
70 // StackInuse can decrease during function execution, so we cast the values to int64.
71 inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
72 t.Logf("Inuse %vMB for stack mem", inuse>>20)
74 t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
78 // Test stack growing in different contexts.
79 func TestStackGrowth(t *testing.T) {
86 // in a normal goroutine
87 var growDuration time.Duration // For debugging failures
93 growDuration = time.Since(start)
96 t.Log("first growStack took", growDuration)
98 // in locked goroutine
109 var finalizerStart time.Time
110 var started atomic.Bool
111 var progress atomic.Uint32
113 s := new(string) // Must be of a type that avoids the tiny allocator, or else the finalizer might not run.
114 SetFinalizer(s, func(ss *string) {
116 finalizerStart = time.Now()
120 setFinalizerTime := time.Now()
123 if d, ok := t.Deadline(); ok {
124 // Pad the timeout by an arbitrary 5% to give the AfterFunc time to run.
125 timeout := time.Until(d) * 19 / 20
126 timer := time.AfterFunc(timeout, func() {
127 // Panic — instead of calling t.Error and returning from the test — so
128 // that we get a useful goroutine dump if the test times out, especially
129 // if GOTRACEBACK=system or GOTRACEBACK=crash is set.
131 panic("finalizer did not start")
133 panic(fmt.Sprintf("finalizer started %s ago (%s after registration) and ran %d iterations, but did not return", time.Since(finalizerStart), finalizerStart.Sub(setFinalizerTime), progress.Load()))
141 t.Logf("finalizer started after %s and ran %d iterations in %v", finalizerStart.Sub(setFinalizerTime), progress.Load(), time.Since(finalizerStart))
149 func growStack(progress *atomic.Uint32) {
154 for i := 0; i < n; i++ {
158 panic("stack is corrupted")
161 progress.Store(uint32(i))
167 // This function is not an anonymous func, so that the compiler can do escape
168 // analysis and place x on stack (and subsequently stack growth update the pointer).
169 func growStackIter(p *int, n int) {
177 growStackIter(&x, n-1)
179 panic("stack is corrupted")
183 func TestStackGrowthCallback(t *testing.T) {
185 var wg sync.WaitGroup
187 // test stack growth at chan op
191 c := make(chan int, 1)
192 growStackWithCallback(func() {
198 // test stack growth at map op
202 m := make(map[int]int)
203 growStackWithCallback(func() {
209 // test stack growth at goroutine creation
213 growStackWithCallback(func() {
214 done := make(chan bool)
224 func growStackWithCallback(cb func()) {
233 for i := 0; i < 1<<10; i++ {
238 // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
239 // during a stack copy.
240 func set(p *int, x int) {
243 func TestDeferPtrs(t *testing.T) {
248 t.Errorf("defer's stack references were not adjusted appropriately")
255 type bigBuf [4 * 1024]byte
257 // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
258 // stack grows as part of starting the deferred function. It calls Goexit at various
259 // stack depths, forcing the deferred function (with >4kB of args) to be run at
260 // the bottom of the stack. The goal is to find a stack depth less than 4kB from
261 // the end of the stack. Each trial runs in a different goroutine so that an earlier
262 // stack growth does not invalidate a later attempt.
263 func TestDeferPtrsGoexit(t *testing.T) {
264 for i := 0; i < 100; i++ {
265 c := make(chan int, 1)
266 go testDeferPtrsGoexit(c, i)
267 if n := <-c; n != 42 {
268 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
273 func testDeferPtrsGoexit(c chan int, i int) {
278 defer setBig(&y, 42, bigBuf{})
279 useStackAndCall(i, Goexit)
282 func setBig(p *int, x int, b bigBuf) {
286 // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
287 // of Goexit to run the Defers. Those two are different execution paths
289 func TestDeferPtrsPanic(t *testing.T) {
290 for i := 0; i < 100; i++ {
291 c := make(chan int, 1)
292 go testDeferPtrsGoexit(c, i)
293 if n := <-c; n != 42 {
294 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
299 func testDeferPtrsPanic(c chan int, i int) {
302 if recover() == nil {
308 defer setBig(&y, 42, bigBuf{})
309 useStackAndCall(i, func() { panic(1) })
313 func testDeferLeafSigpanic1() {
314 // Cause a sigpanic to be injected in this frame.
316 // This function has to be declared before
317 // TestDeferLeafSigpanic so the runtime will crash if we think
318 // this function's continuation PC is in
319 // TestDeferLeafSigpanic.
323 // TestDeferLeafSigpanic tests defer matching around leaf functions
324 // that sigpanic. This is tricky because on LR machines the outer
325 // function and the inner function have the same SP, but it's critical
326 // that we match up the defer correctly to get the right liveness map.
328 func TestDeferLeafSigpanic(t *testing.T) {
329 // Push a defer that will walk the stack.
331 if err := recover(); err == nil {
332 t.Fatal("expected panic from nil pointer")
336 // Call a leaf function. We must set up the exact call stack:
338 // deferring function -> leaf function -> sigpanic
340 // On LR machines, the leaf function will have the same SP as
341 // the SP pushed for the defer frame.
342 testDeferLeafSigpanic1()
345 // TestPanicUseStack checks that a chain of Panic structs on the stack are
346 // updated correctly if the stack grows during the deferred execution that
347 // happens as a result of the panic.
348 func TestPanicUseStack(t *testing.T) {
349 pc := make([]uintptr, 10000)
352 Callers(0, pc) // force stack walk
353 useStackAndCall(100, func() {
356 Callers(0, pc) // force stack walk
357 useStackAndCall(200, func() {
360 Callers(0, pc) // force stack walk
371 func TestPanicFar(t *testing.T) {
373 pc := make([]uintptr, 10000)
375 // At this point we created a large stack and unwound
376 // it via recovery. Force a stack walk, which will
377 // check the stack's consistency.
383 useStackAndCall(100, func() {
384 // Kick off the GC and make it do something nontrivial.
385 // (This used to force stack barriers to stick around.)
387 // Give the GC time to start scanning stacks.
388 time.Sleep(time.Millisecond)
394 type xtreeNode struct {
398 func makeTree(d int) *xtreeNode {
400 return new(xtreeNode)
402 return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
405 // use about n KB of stack and call f
406 func useStackAndCall(n int, f func()) {
411 var b [1024]byte // makes frame about 1KB
412 useStackAndCall(n-1+int(b[99]), f)
415 func useStack(n int) {
416 useStackAndCall(n, func() {})
419 func growing(c chan int, done chan struct{}) {
427 func TestStackCache(t *testing.T) {
428 // Allocate a bunch of goroutines and grow their stacks.
429 // Repeat a few times to test the stack cache.
435 for i := 0; i < R; i++ {
436 var reqchans [G]chan int
437 done := make(chan struct{})
438 for j := 0; j < G; j++ {
439 reqchans[j] = make(chan int)
440 go growing(reqchans[j], done)
442 for s := 0; s < S; s++ {
443 for j := 0; j < G; j++ {
444 reqchans[j] <- 1 << uint(s)
446 for j := 0; j < G; j++ {
450 for j := 0; j < G; j++ {
453 for j := 0; j < G; j++ {
459 func TestStackOutput(t *testing.T) {
460 b := make([]byte, 1024)
461 stk := string(b[:Stack(b, false)])
462 if !strings.HasPrefix(stk, "goroutine ") {
463 t.Errorf("Stack (len %d):\n%s", len(stk), stk)
464 t.Errorf("Stack output should begin with \"goroutine \"")
468 func TestStackAllOutput(t *testing.T) {
469 b := make([]byte, 1024)
470 stk := string(b[:Stack(b, true)])
471 if !strings.HasPrefix(stk, "goroutine ") {
472 t.Errorf("Stack (len %d):\n%s", len(stk), stk)
473 t.Errorf("Stack output should begin with \"goroutine \"")
477 func TestStackPanic(t *testing.T) {
478 // Test that stack copying copies panics correctly. This is difficult
479 // to test because it is very unlikely that the stack will be copied
480 // in the middle of gopanic. But it can happen.
481 // To make this test effective, edit panic.go:gopanic and uncomment
482 // the GC() call just before freedefer(d).
484 if x := recover(); x == nil {
485 t.Errorf("recover failed")
492 func BenchmarkStackCopyPtr(b *testing.B) {
494 for i := 0; i < b.N; i++ {
504 func countp(n *int) {
512 func BenchmarkStackCopy(b *testing.B) {
514 for i := 0; i < b.N; i++ {
523 func count(n int) int {
527 return 1 + count(n-1)
530 func BenchmarkStackCopyNoCache(b *testing.B) {
532 for i := 0; i < b.N; i++ {
541 func count1(n int) int {
545 return 1 + count2(n-1)
548 func count2(n int) int { return 1 + count3(n-1) }
549 func count3(n int) int { return 1 + count4(n-1) }
550 func count4(n int) int { return 1 + count5(n-1) }
551 func count5(n int) int { return 1 + count6(n-1) }
552 func count6(n int) int { return 1 + count7(n-1) }
553 func count7(n int) int { return 1 + count8(n-1) }
554 func count8(n int) int { return 1 + count9(n-1) }
555 func count9(n int) int { return 1 + count10(n-1) }
556 func count10(n int) int { return 1 + count11(n-1) }
557 func count11(n int) int { return 1 + count12(n-1) }
558 func count12(n int) int { return 1 + count13(n-1) }
559 func count13(n int) int { return 1 + count14(n-1) }
560 func count14(n int) int { return 1 + count15(n-1) }
561 func count15(n int) int { return 1 + count16(n-1) }
562 func count16(n int) int { return 1 + count17(n-1) }
563 func count17(n int) int { return 1 + count18(n-1) }
564 func count18(n int) int { return 1 + count19(n-1) }
565 func count19(n int) int { return 1 + count20(n-1) }
566 func count20(n int) int { return 1 + count21(n-1) }
567 func count21(n int) int { return 1 + count22(n-1) }
568 func count22(n int) int { return 1 + count23(n-1) }
569 func count23(n int) int { return 1 + count1(n-1) }
571 type stkobjT struct {
574 y [20]int // consume some stack
577 // Sum creates a linked list of stkobjTs.
578 func Sum(n int64, p *stkobjT) {
582 s := stkobjT{p: p, x: n}
587 func BenchmarkStackCopyWithStkobj(b *testing.B) {
589 for i := 0; i < b.N; i++ {
599 func BenchmarkIssue18138(b *testing.B) {
600 // Channel with N "can run a goroutine" tokens
602 c := make(chan []byte, N)
603 for i := 0; i < N; i++ {
607 for i := 0; i < b.N; i++ {
610 useStackPtrs(1000, false) // uses ~1MB max
611 m := make([]byte, 8192) // make GC trigger occasionally
612 c <- m // return token
617 func useStackPtrs(n int, b bool) {
619 // This code contributes to the stack frame size, and hence to the
620 // stack copying cost. But since b is always false, it costs no
621 // execution time (not even the zeroing of a).
622 var a [128]*int // 1KB of pointers
632 type structWithMethod struct{}
634 func (s structWithMethod) caller() string {
635 _, file, line, ok := Caller(1)
637 panic("Caller failed")
639 return fmt.Sprintf("%s:%d", file, line)
642 func (s structWithMethod) callers() []uintptr {
643 pc := make([]uintptr, 16)
644 return pc[:Callers(0, pc)]
647 func (s structWithMethod) stack() string {
648 buf := make([]byte, 4<<10)
649 return string(buf[:Stack(buf, false)])
652 func (s structWithMethod) nop() {}
654 func (s structWithMethod) inlinablePanic() { panic("panic") }
656 func TestStackWrapperCaller(t *testing.T) {
657 var d structWithMethod
658 // Force the compiler to construct a wrapper method.
659 wrapper := (*structWithMethod).caller
660 // Check that the wrapper doesn't affect the stack trace.
661 if dc, ic := d.caller(), wrapper(&d); dc != ic {
662 t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
666 func TestStackWrapperCallers(t *testing.T) {
667 var d structWithMethod
668 wrapper := (*structWithMethod).callers
669 // Check that <autogenerated> doesn't appear in the stack trace.
671 frames := CallersFrames(pcs)
673 fr, more := frames.Next()
674 if fr.File == "<autogenerated>" {
675 t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
683 func TestStackWrapperStack(t *testing.T) {
684 var d structWithMethod
685 wrapper := (*structWithMethod).stack
686 // Check that <autogenerated> doesn't appear in the stack trace.
688 if strings.Contains(stk, "<autogenerated>") {
689 t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
693 func TestStackWrapperStackInlinePanic(t *testing.T) {
694 // Test that inline unwinding correctly tracks the callee by creating a
695 // stack of the form wrapper -> inlined function -> panic. If we mess up
696 // callee tracking, it will look like the wrapper called panic and we'll see
697 // the wrapper in the stack trace.
698 var d structWithMethod
699 wrapper := (*structWithMethod).inlinablePanic
703 t.Fatalf("expected panic")
705 buf := make([]byte, 4<<10)
706 stk := string(buf[:Stack(buf, false)])
707 if strings.Contains(stk, "<autogenerated>") {
708 t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
710 // Self-check: make sure inlinablePanic got inlined.
711 if !testenv.OptimizationOff() {
712 if !strings.Contains(stk, "inlinablePanic(...)") {
713 t.Fatalf("inlinablePanic not inlined")
724 func TestStackWrapperStackPanic(t *testing.T) {
725 t.Run("sigpanic", func(t *testing.T) {
726 // nil calls to interface methods cause a sigpanic.
727 testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
729 t.Run("panicwrap", func(t *testing.T) {
730 // Nil calls to value method wrappers call panicwrap.
731 wrapper := (*structWithMethod).nop
732 testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
736 func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
737 // Test that the stack trace from a panicking wrapper includes
738 // the wrapper, even though elide these when they don't panic.
739 t.Run("CallersFrames", func(t *testing.T) {
743 t.Fatalf("expected panic")
745 pcs := make([]uintptr, 10)
747 frames := CallersFrames(pcs[:n])
749 frame, more := frames.Next()
750 t.Log(frame.Function)
751 if frame.Function == expect {
758 t.Fatalf("panicking wrapper %s missing from stack trace", expect)
762 t.Run("Stack", func(t *testing.T) {
766 t.Fatalf("expected panic")
768 buf := make([]byte, 4<<10)
769 stk := string(buf[:Stack(buf, false)])
770 if !strings.Contains(stk, "\n"+expect) {
771 t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
778 func TestCallersFromWrapper(t *testing.T) {
779 // Test that invoking CallersFrames on a stack where the first
780 // PC is an autogenerated wrapper keeps the wrapper in the
781 // trace. Normally we elide these, assuming that the wrapper
782 // calls the thing you actually wanted to see, but in this
783 // case we need to keep it.
784 pc := reflect.ValueOf(I.M).Pointer()
785 frames := CallersFrames([]uintptr{pc})
786 frame, more := frames.Next()
787 if frame.Function != "runtime_test.I.M" {
788 t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
791 t.Fatalf("want 1 frame, got > 1")
795 func TestTracebackSystemstack(t *testing.T) {
796 if GOARCH == "ppc64" || GOARCH == "ppc64le" {
797 t.Skip("systemstack tail call not implemented on ppc64x")
800 // Test that profiles correctly jump over systemstack,
801 // including nested systemstack calls.
802 pcs := make([]uintptr, 20)
803 pcs = pcs[:TracebackSystemstack(pcs, 5)]
804 // Check that runtime.TracebackSystemstack appears five times
805 // and that we see TestTracebackSystemstack.
806 countIn, countOut := 0, 0
807 frames := CallersFrames(pcs)
808 var tb strings.Builder
810 frame, more := frames.Next()
811 fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
812 switch frame.Function {
813 case "runtime.TracebackSystemstack":
815 case "runtime_test.TestTracebackSystemstack":
822 if countIn != 5 || countOut != 1 {
823 t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
827 func TestTracebackAncestors(t *testing.T) {
828 goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
829 for _, tracebackDepth := range []int{0, 1, 5, 50} {
830 output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
834 ancestorsExpected := numGoroutines
835 if numGoroutines > tracebackDepth {
836 ancestorsExpected = tracebackDepth
839 matches := goroutineRegex.FindAllStringSubmatch(output, -1)
840 if len(matches) != 2 {
841 t.Fatalf("want 2 goroutines, got:\n%s", output)
844 // Check functions in the traceback.
845 fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
846 for _, fn := range fns {
847 if !strings.Contains(output, "\n"+fn+"(") {
848 t.Fatalf("expected %q function in traceback:\n%s", fn, output)
852 if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
853 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
856 if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
857 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
860 if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
861 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
866 // Test that defer closure is correctly scanned when the stack is scanned.
867 func TestDeferLiveness(t *testing.T) {
868 output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
870 t.Errorf("output:\n%s\n\nwant no output", output)
874 func TestDeferHeapAndStack(t *testing.T) {
876 N := 10000 //iterations
877 D := 200 // stack depth
885 for p := 0; p < P; p++ {
887 for i := 0; i < N; i++ {
888 if deferHeapAndStack(D) != 2*D {
895 for p := 0; p < P; p++ {
900 // deferHeapAndStack(n) computes 2*n
901 func deferHeapAndStack(n int) (r int) {
906 // heap-allocated defers
907 for i := 0; i < 2; i++ {
913 // stack-allocated defers
921 r = deferHeapAndStack(n - 1)
922 escapeMe(new([1024]byte)) // force some GCs
926 // Pass a value to escapeMe to force it to escape.
927 var escapeMe = func(x any) {}
929 func TestFramePointerAdjust(t *testing.T) {
931 case "amd64", "arm64":
933 t.Skipf("frame pointer is not supported on %s", GOARCH)
935 output := runTestProg(t, "testprog", "FramePointerAdjust")
937 t.Errorf("output:\n%s\n\nwant no output", output)
941 // TestSystemstackFramePointerAdjust is a regression test for issue 59692 that
942 // ensures that the frame pointer of systemstack is correctly adjusted. See CL
943 // 489015 for more details.
944 func TestSystemstackFramePointerAdjust(t *testing.T) {
945 growAndShrinkStack(512, [1024]byte{})
948 // growAndShrinkStack grows the stack of the current goroutine in order to
949 // shrink it again and verify that all frame pointers on the new stack have
950 // been correctly adjusted. stackBallast is used to ensure we're not depending
951 // on the current heuristics of stack shrinking too much.
952 func growAndShrinkStack(n int, stackBallast [1024]byte) {
956 growAndShrinkStack(n-1, stackBallast)
957 ShrinkStackAndVerifyFramePointers()