1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
23 var stop = make(chan bool, 1)
25 func perpetuumMobile() {
33 func TestStopTheWorldDeadlock(t *testing.T) {
34 if runtime.GOARCH == "wasm" {
35 t.Skip("no preemption on wasm yet")
38 t.Skip("skipping during short test")
40 maxprocs := runtime.GOMAXPROCS(3)
41 compl := make(chan bool, 2)
43 for i := 0; i != 1000; i += 1 {
49 for i := 0; i != 1000; i += 1 {
58 runtime.GOMAXPROCS(maxprocs)
61 func TestYieldProgress(t *testing.T) {
62 testYieldProgress(false)
65 func TestYieldLockedProgress(t *testing.T) {
66 testYieldProgress(true)
69 func testYieldProgress(locked bool) {
71 cack := make(chan bool)
74 runtime.LockOSThread()
86 time.Sleep(10 * time.Millisecond)
91 func TestYieldLocked(t *testing.T) {
95 runtime.LockOSThread()
96 for i := 0; i < N; i++ {
98 time.Sleep(time.Millisecond)
101 // runtime.UnlockOSThread() is deliberately omitted
106 func TestGoroutineParallelism(t *testing.T) {
107 if runtime.NumCPU() == 1 {
108 // Takes too long, too easy to deadlock, etc.
109 t.Skip("skipping on uniprocessor")
117 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
118 // If runtime triggers a forced GC during this test then it will deadlock,
119 // since the goroutines can't be stopped/preempted.
120 // Disable GC for this test (see issue #10958).
121 defer debug.SetGCPercent(debug.SetGCPercent(-1))
122 // SetGCPercent waits until the mark phase is over, but the runtime
123 // also preempts at the start of the sweep phase, so make sure that's
124 // done too. See #45867.
126 for try := 0; try < N; try++ {
127 done := make(chan bool)
129 for p := 0; p < P; p++ {
130 // Test that all P goroutines are scheduled at the same time
132 for i := 0; i < 3; i++ {
133 expected := uint32(P*i + p)
134 for atomic.LoadUint32(&x) != expected {
136 atomic.StoreUint32(&x, expected+1)
141 for p := 0; p < P; p++ {
147 // Test that all runnable goroutines are scheduled at the same time.
148 func TestGoroutineParallelism2(t *testing.T) {
149 //testGoroutineParallelism2(t, false, false)
150 testGoroutineParallelism2(t, true, false)
151 testGoroutineParallelism2(t, false, true)
152 testGoroutineParallelism2(t, true, true)
155 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
156 if runtime.NumCPU() == 1 {
157 // Takes too long, too easy to deadlock, etc.
158 t.Skip("skipping on uniprocessor")
165 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
166 // If runtime triggers a forced GC during this test then it will deadlock,
167 // since the goroutines can't be stopped/preempted.
168 // Disable GC for this test (see issue #10958).
169 defer debug.SetGCPercent(debug.SetGCPercent(-1))
170 // SetGCPercent waits until the mark phase is over, but the runtime
171 // also preempts at the start of the sweep phase, so make sure that's
172 // done too. See #45867.
174 for try := 0; try < N; try++ {
176 // Create P goroutines and wait until they all run.
177 // When we run the actual test below, worker threads
178 // running the goroutines will start parking.
179 done := make(chan bool)
181 for p := 0; p < P; p++ {
183 if atomic.AddUint32(&x, 1) == uint32(P) {
187 for atomic.LoadUint32(&x) != uint32(P) {
194 // Enable netpoller, affects schedler behavior.
195 laddr := "localhost:0"
196 if runtime.GOOS == "android" {
197 // On some Android devices, there are no records for localhost,
198 // see https://golang.org/issues/14486.
199 // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
200 laddr = "127.0.0.1:0"
202 ln, err := net.Listen("tcp", laddr)
204 defer ln.Close() // yup, defer in a loop
207 done := make(chan bool)
209 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
210 for p := 0; p < P/2; p++ {
212 for p2 := 0; p2 < 2; p2++ {
214 for i := 0; i < 3; i++ {
215 expected := uint32(P*i + p*2 + p2)
216 for atomic.LoadUint32(&x) != expected {
218 atomic.StoreUint32(&x, expected+1)
225 for p := 0; p < P; p++ {
231 func TestBlockLocked(t *testing.T) {
235 runtime.LockOSThread()
236 for i := 0; i < N; i++ {
239 runtime.UnlockOSThread()
241 for i := 0; i < N; i++ {
246 func TestTimerFairness(t *testing.T) {
247 if runtime.GOARCH == "wasm" {
248 t.Skip("no preemption on wasm yet")
251 done := make(chan bool)
253 for i := 0; i < 2; i++ {
265 timer := time.After(20 * time.Millisecond)
276 func TestTimerFairness2(t *testing.T) {
277 if runtime.GOARCH == "wasm" {
278 t.Skip("no preemption on wasm yet")
281 done := make(chan bool)
283 for i := 0; i < 2; i++ {
285 timer := time.After(20 * time.Millisecond)
288 syscall.Read(0, buf[0:0])
303 // The function is used to test preemption at split stack checks.
304 // Declaring a var avoids inlining at the call site.
305 var preempt = func() int {
308 for _, v := range a {
314 func TestPreemption(t *testing.T) {
315 if runtime.GOARCH == "wasm" {
316 t.Skip("no preemption on wasm yet")
319 // Test that goroutines are preempted at function calls.
326 for g := 0; g < 2; g++ {
328 for i := 0; i < N; i++ {
329 for atomic.LoadUint32(&x) != uint32(g) {
332 atomic.StoreUint32(&x, uint32(1-g))
341 func TestPreemptionGC(t *testing.T) {
342 if runtime.GOARCH == "wasm" {
343 t.Skip("no preemption on wasm yet")
346 // Test that pending GC preempts running goroutines.
353 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
355 for i := 0; i < P; i++ {
357 for atomic.LoadUint32(&stop) == 0 {
362 for i := 0; i < N; i++ {
366 atomic.StoreUint32(&stop, 1)
369 func TestAsyncPreempt(t *testing.T) {
370 if !runtime.PreemptMSupported {
371 t.Skip("asynchronous preemption not supported on this platform")
373 output := runTestProg(t, "testprog", "AsyncPreempt")
376 t.Fatalf("want %s, got %s\n", want, output)
380 func TestGCFairness(t *testing.T) {
381 output := runTestProg(t, "testprog", "GCFairness")
384 t.Fatalf("want %s, got %s\n", want, output)
388 func TestGCFairness2(t *testing.T) {
389 output := runTestProg(t, "testprog", "GCFairness2")
392 t.Fatalf("want %s, got %s\n", want, output)
396 func TestNumGoroutine(t *testing.T) {
397 output := runTestProg(t, "testprog", "NumGoroutine")
400 t.Fatalf("want %q, got %q", want, output)
403 buf := make([]byte, 1<<20)
405 // Try up to 10 times for a match before giving up.
406 // This is a fundamentally racy check but it's important
407 // to notice if NumGoroutine and Stack are _always_ out of sync.
409 // Give goroutines about to exit a chance to exit.
410 // The NumGoroutine and Stack below need to see
411 // the same state of the world, so anything we can do
412 // to keep it quiet is good.
415 n := runtime.NumGoroutine()
416 buf = buf[:runtime.Stack(buf, true)]
418 // To avoid double-counting "goroutine" in "goroutine $m [running]:"
419 // and "created by $func in goroutine $n", remove the latter
420 output := strings.ReplaceAll(string(buf), "in goroutine", "")
421 nstk := strings.Count(output, "goroutine ")
426 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
431 func TestPingPongHog(t *testing.T) {
432 if runtime.GOARCH == "wasm" {
433 t.Skip("no preemption on wasm yet")
436 t.Skip("skipping in -short mode")
439 // The race detector randomizes the scheduler,
440 // which causes this test to fail (#38266).
441 t.Skip("skipping in -race mode")
444 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
445 done := make(chan bool)
446 hogChan, lightChan := make(chan bool), make(chan bool)
447 hogCount, lightCount := 0, 0
449 run := func(limit int, counter *int, wake chan bool) {
456 for i := 0; i < limit; i++ {
464 // Start two co-scheduled hog goroutines.
465 for i := 0; i < 2; i++ {
466 go run(1e6, &hogCount, hogChan)
469 // Start two co-scheduled light goroutines.
470 for i := 0; i < 2; i++ {
471 go run(1e3, &lightCount, lightChan)
474 // Start goroutine pairs and wait for a few preemption rounds.
477 time.Sleep(100 * time.Millisecond)
482 // Check that hogCount and lightCount are within a factor of
483 // 20, which indicates that both pairs of goroutines handed off
484 // the P within a time-slice to their buddy. We can use a
485 // fairly large factor here to make this robust: if the
486 // scheduler isn't working right, the gap should be ~1000X
487 // (was 5, increased to 20, see issue 52207).
489 if hogCount/factor > lightCount || lightCount/factor > hogCount {
490 t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
494 func BenchmarkPingPongHog(b *testing.B) {
498 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
501 stop, done := make(chan bool), make(chan bool)
513 // Ping-pong b.N times
514 ping, pong := make(chan bool), make(chan bool)
516 for j := 0; j < b.N; j++ {
523 for i := 0; i < b.N; i++ {
529 ping <- true // Start ping-pong
532 <-ping // Let last ponger exit
533 <-done // Make sure goroutines exit
538 var padData [128]uint64
540 func stackGrowthRecursive(i int) {
549 stackGrowthRecursive(i - 1)
553 func TestPreemptSplitBig(t *testing.T) {
555 t.Skip("skipping in -short mode")
557 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
558 stop := make(chan int)
560 for i := 0; i < 3; i++ {
561 time.Sleep(10 * time.Microsecond) // let big start running
567 func big(stop chan int) int {
570 // delay so that gc is sure to have asked for a preemption
571 for i := 0; i < 1e9; i++ {
575 // call bigframe, which used to miss the preemption in its prologue.
578 // check if we've been asked to stop.
586 func bigframe(stop chan int) int {
587 // not splitting the stack will overflow.
588 // small will notice that it needs a stack split and will
589 // catch the overflow.
591 return small(stop, &x)
594 func small(stop chan int, x *[8192]byte) int {
603 // keep small from being a leaf function, which might
604 // make it not do any stack check at all.
610 func nonleaf(stop chan int) bool {
611 // do something that won't be inlined:
620 func TestSchedLocalQueue(t *testing.T) {
621 runtime.RunSchedLocalQueueTest()
624 func TestSchedLocalQueueSteal(t *testing.T) {
625 runtime.RunSchedLocalQueueStealTest()
628 func TestSchedLocalQueueEmpty(t *testing.T) {
629 if runtime.NumCPU() == 1 {
630 // Takes too long and does not trigger the race.
631 t.Skip("skipping on uniprocessor")
633 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
635 // If runtime triggers a forced GC during this test then it will deadlock,
636 // since the goroutines can't be stopped/preempted during spin wait.
637 defer debug.SetGCPercent(debug.SetGCPercent(-1))
638 // SetGCPercent waits until the mark phase is over, but the runtime
639 // also preempts at the start of the sweep phase, so make sure that's
640 // done too. See #45867.
647 runtime.RunSchedLocalQueueEmptyTest(iters)
650 func benchmarkStackGrowth(b *testing.B, rec int) {
651 b.RunParallel(func(pb *testing.PB) {
653 stackGrowthRecursive(rec)
658 func BenchmarkStackGrowth(b *testing.B) {
659 benchmarkStackGrowth(b, 10)
662 func BenchmarkStackGrowthDeep(b *testing.B) {
663 benchmarkStackGrowth(b, 1024)
666 func BenchmarkCreateGoroutines(b *testing.B) {
667 benchmarkCreateGoroutines(b, 1)
670 func BenchmarkCreateGoroutinesParallel(b *testing.B) {
671 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
674 func benchmarkCreateGoroutines(b *testing.B, procs int) {
684 for i := 0; i < procs; i++ {
687 for i := 0; i < procs; i++ {
692 func BenchmarkCreateGoroutinesCapture(b *testing.B) {
694 for i := 0; i < b.N; i++ {
696 var wg sync.WaitGroup
698 for i := 0; i < N; i++ {
702 b.Logf("bad") // just to capture b
711 // warmupScheduler ensures the scheduler has at least targetThreadCount threads
712 // in its thread pool.
713 func warmupScheduler(targetThreadCount int) {
714 var wg sync.WaitGroup
716 for i := 0; i < targetThreadCount; i++ {
719 atomic.AddInt32(&count, 1)
720 for atomic.LoadInt32(&count) < int32(targetThreadCount) {
721 // spin until all threads started
724 // spin a bit more to ensure they are all running on separate CPUs.
725 doWork(time.Millisecond)
732 func doWork(dur time.Duration) {
734 for time.Since(start) < dur {
738 // BenchmarkCreateGoroutinesSingle creates many goroutines, all from a single
739 // producer (the main benchmark goroutine).
741 // Compared to BenchmarkCreateGoroutines, this causes different behavior in the
742 // scheduler because Ms are much more likely to need to steal work from the
743 // main P rather than having work in the local run queue.
744 func BenchmarkCreateGoroutinesSingle(b *testing.B) {
745 // Since we are interested in stealing behavior, warm the scheduler to
746 // get all the Ps running first.
747 warmupScheduler(runtime.GOMAXPROCS(0))
750 var wg sync.WaitGroup
752 for i := 0; i < b.N; i++ {
760 func BenchmarkClosureCall(b *testing.B) {
763 for i := 0; i < b.N; i++ {
766 sum += i + off1 + off2
772 func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
773 if runtime.GOMAXPROCS(0) == 1 {
774 b.Skip("skipping: GOMAXPROCS=1")
777 wakeDelay := 5 * time.Microsecond
778 for _, delay := range []time.Duration{
780 1 * time.Microsecond,
781 2 * time.Microsecond,
782 5 * time.Microsecond,
783 10 * time.Microsecond,
784 20 * time.Microsecond,
785 50 * time.Microsecond,
786 100 * time.Microsecond,
788 b.Run(delay.String(), func(b *testing.B) {
792 // Start two goroutines, which alternate between being
793 // sender and receiver in the following protocol:
795 // - The receiver spins for `delay` and then does a
796 // blocking receive on a channel.
798 // - The sender spins for `delay+wakeDelay` and then
799 // sends to the same channel. (The addition of
800 // `wakeDelay` improves the probability that the
801 // receiver will be blocking when the send occurs when
802 // the goroutines execute in parallel.)
804 // In each iteration of the benchmark, each goroutine
805 // acts once as sender and once as receiver, so each
806 // goroutine spins for delay twice.
808 // BenchmarkWakeupParallel is used to estimate how
809 // efficiently the scheduler parallelizes goroutines in
810 // the presence of blocking:
812 // - If both goroutines are executed on the same core,
813 // an increase in delay by N will increase the time per
814 // iteration by 4*N, because all 4 delays are
817 // - Otherwise, an increase in delay by N will increase
818 // the time per iteration by 2*N, and the time per
819 // iteration is 2 * (runtime overhead + chan
820 // send/receive pair + delay + wakeDelay). This allows
821 // the runtime overhead, including the time it takes
822 // for the unblocked goroutine to be scheduled, to be
824 ping, pong := make(chan struct{}), make(chan struct{})
825 start := make(chan struct{})
826 done := make(chan struct{})
829 for i := 0; i < b.N; i++ {
831 spin(delay + wakeDelay)
840 for i := 0; i < b.N; i++ {
845 spin(delay + wakeDelay)
858 func BenchmarkWakeupParallelSpinning(b *testing.B) {
859 benchmarkWakeupParallel(b, func(d time.Duration) {
860 end := time.Now().Add(d)
861 for time.Now().Before(end) {
867 // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
868 // to sleep for the given duration. If nil, dependent tests are skipped.
869 // The implementation should invoke a blocking system call and not
870 // call time.Sleep, which would deschedule the goroutine.
871 var sysNanosleep func(d time.Duration)
873 func BenchmarkWakeupParallelSyscall(b *testing.B) {
874 if sysNanosleep == nil {
875 b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
877 benchmarkWakeupParallel(b, func(d time.Duration) {
882 type Matrix [][]float64
884 func BenchmarkMatmult(b *testing.B) {
886 // matmult is O(N**3) but testing expects O(b.N),
887 // so we need to take cube root of b.N
888 n := int(math.Cbrt(float64(b.N))) + 1
893 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
896 func makeMatrix(n int) Matrix {
898 for i := 0; i < n; i++ {
899 m[i] = make([]float64, n)
900 for j := 0; j < n; j++ {
901 m[i][j] = float64(i*n + j)
907 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
911 if di >= dj && di >= dk && di >= threshold {
912 // divide in two by y axis
914 done1 := make(chan struct{}, 1)
915 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
916 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
918 } else if dj >= dk && dj >= threshold {
919 // divide in two by x axis
921 done1 := make(chan struct{}, 1)
922 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
923 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
925 } else if dk >= threshold {
926 // divide in two by "k" axis
927 // deliberately not parallel because of data races
929 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
930 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
932 // the matrices are small enough, compute directly
933 for i := i0; i < i1; i++ {
934 for j := j0; j < j1; j++ {
935 for k := k0; k < k1; k++ {
936 C[i][j] += A[i][k] * B[k][j]
946 func TestStealOrder(t *testing.T) {
947 runtime.RunStealOrderTest()
950 func TestLockOSThreadNesting(t *testing.T) {
951 if runtime.GOARCH == "wasm" {
952 t.Skip("no threads on wasm yet")
956 e, i := runtime.LockOSCounts()
957 if e != 0 || i != 0 {
958 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
961 runtime.LockOSThread()
962 runtime.LockOSThread()
963 runtime.UnlockOSThread()
964 e, i = runtime.LockOSCounts()
965 if e != 1 || i != 0 {
966 t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
969 runtime.UnlockOSThread()
970 e, i = runtime.LockOSCounts()
971 if e != 0 || i != 0 {
972 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
978 func TestLockOSThreadExit(t *testing.T) {
979 testLockOSThreadExit(t, "testprog")
982 func testLockOSThreadExit(t *testing.T, prog string) {
983 output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
986 t.Errorf("want %q, got %q", want, output)
989 output = runTestProg(t, prog, "LockOSThreadAlt")
991 t.Errorf("want %q, got %q", want, output)
995 func TestLockOSThreadAvoidsStatePropagation(t *testing.T) {
997 skip := "unshare not permitted\n"
998 output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
1000 t.Skip("unshare syscall not permitted on this system")
1001 } else if output != want {
1002 t.Errorf("want %q, got %q", want, output)
1006 func TestLockOSThreadTemplateThreadRace(t *testing.T) {
1007 testenv.MustHaveGoRun(t)
1009 exe, err := buildTestProg(t, "testprog")
1015 if testing.Short() {
1016 // Reduce run time to ~100ms, with much lower probability of
1020 for i := 0; i < iterations; i++ {
1022 output := runBuiltTestProg(t, exe, "LockOSThreadTemplateThreadRace")
1024 t.Fatalf("run %d: want %q, got %q", i, want, output)
1029 // fakeSyscall emulates a system call.
1032 func fakeSyscall(duration time.Duration) {
1033 runtime.Entersyscall()
1034 for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {
1036 runtime.Exitsyscall()
1039 // Check that a goroutine will be preempted if it is calling short system calls.
1040 func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) {
1041 if runtime.GOARCH == "wasm" {
1042 t.Skip("no preemption on wasm yet")
1045 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
1048 if testing.Short() {
1052 maxDuration = 5 * time.Second
1056 for i := 0; i < iterations; i++ {
1057 c := make(chan bool, nroutines)
1061 for g := 0; g < nroutines; g++ {
1062 go func(stop *uint32) {
1064 for atomic.LoadUint32(stop) == 0 {
1065 fakeSyscall(syscallDuration)
1070 // wait until all goroutines have started.
1071 for g := 0; g < nroutines; g++ {
1074 atomic.StoreUint32(&stop, 1)
1075 // wait until all goroutines have finished.
1076 for g := 0; g < nroutines; g++ {
1079 duration := time.Since(start)
1081 if duration > maxDuration {
1082 t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration)
1087 func TestPreemptionAfterSyscall(t *testing.T) {
1088 if runtime.GOOS == "plan9" {
1089 testenv.SkipFlaky(t, 41015)
1092 for _, i := range []time.Duration{10, 100, 1000} {
1093 d := i * time.Microsecond
1094 t.Run(fmt.Sprint(d), func(t *testing.T) {
1095 testPreemptionAfterSyscall(t, d)
1100 func TestGetgThreadSwitch(t *testing.T) {
1101 runtime.RunGetgThreadSwitchTest()
1104 // TestNetpollBreak tests that netpollBreak can break a netpoll.
1105 // This test is not particularly safe since the call to netpoll
1106 // will pick up any stray files that are ready, but it should work
1107 // OK as long it is not run in parallel.
1108 func TestNetpollBreak(t *testing.T) {
1109 if runtime.GOMAXPROCS(0) == 1 {
1110 t.Skip("skipping: GOMAXPROCS=1")
1113 // Make sure that netpoll is initialized.
1114 runtime.NetpollGenericInit()
1117 c := make(chan bool, 2)
1120 runtime.Netpoll(10 * time.Second.Nanoseconds())
1124 // Loop because the break might get eaten by the scheduler.
1125 // Break twice to break both the netpoll we started and the
1126 // scheduler netpoll.
1130 runtime.NetpollBreak()
1131 runtime.NetpollBreak()
1138 if dur := time.Since(start); dur > 5*time.Second {
1139 t.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur)
1143 // TestBigGOMAXPROCS tests that setting GOMAXPROCS to a large value
1144 // doesn't cause a crash at startup. See issue 38474.
1145 func TestBigGOMAXPROCS(t *testing.T) {
1147 output := runTestProg(t, "testprog", "NonexistentTest", "GOMAXPROCS=1024")
1148 // Ignore error conditions on small machines.
1149 for _, errstr := range []string{
1150 "failed to create new OS thread",
1151 "cannot allocate memory",
1153 if strings.Contains(output, errstr) {
1154 t.Skipf("failed to create 1024 threads")
1157 if !strings.Contains(output, "unknown function: NonexistentTest") {
1158 t.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output)