1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
20 // Go runtime uses different Windows timers for time.Now and sleeping.
21 // These can tick at different frequencies and can arrive out of sync.
22 // The effect can be seen, for example, as time.Sleep(100ms) is actually
23 // shorter then 100ms when measured as difference between time.Now before and
24 // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
25 // windowsInaccuracy is to ignore such errors.
26 const windowsInaccuracy = 17 * Millisecond
28 func TestSleep(t *testing.T) {
29 const delay = 100 * Millisecond
37 if runtime.GOOS == "windows" {
38 delayadj -= windowsInaccuracy
40 duration := Now().Sub(start)
41 if duration < delayadj {
42 t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
46 // Test the basic function calling behavior. Correct queueing
47 // behavior is tested elsewhere, since After and AfterFunc share
49 func TestAfterFunc(t *testing.T) {
67 func TestAfterStress(t *testing.T) {
72 // Yield so that the OS can wake up the timer thread,
73 // so that it can generate channel sends for the main goroutine,
74 // which will eventually set stop = 1 for us.
78 ticker := NewTicker(1)
79 for i := 0; i < 100; i++ {
86 func benchmark(b *testing.B, bench func(n int)) {
88 // Create equal number of garbage timers on each P before starting
91 garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
92 for i := range garbageAll {
96 garbage := make([]*Timer, 1<<15)
97 for j := range garbage {
98 garbage[j] = AfterFunc(Hour, nil)
100 garbageAll[i] = garbage
106 b.RunParallel(func(pb *testing.PB) {
113 for _, garbage := range garbageAll {
114 for _, t := range garbage {
120 func BenchmarkAfterFunc(b *testing.B) {
121 benchmark(b, func(n int) {
138 func BenchmarkAfter(b *testing.B) {
139 benchmark(b, func(n int) {
140 for i := 0; i < n; i++ {
146 func BenchmarkStop(b *testing.B) {
147 benchmark(b, func(n int) {
148 for i := 0; i < n; i++ {
149 NewTimer(1 * Second).Stop()
154 func BenchmarkSimultaneousAfterFunc(b *testing.B) {
155 benchmark(b, func(n int) {
156 var wg sync.WaitGroup
158 for i := 0; i < n; i++ {
159 AfterFunc(0, wg.Done)
165 func BenchmarkStartStop(b *testing.B) {
166 benchmark(b, func(n int) {
167 timers := make([]*Timer, n)
168 for i := 0; i < n; i++ {
169 timers[i] = AfterFunc(Hour, nil)
172 for i := 0; i < n; i++ {
178 func BenchmarkReset(b *testing.B) {
179 benchmark(b, func(n int) {
181 for i := 0; i < n; i++ {
188 func BenchmarkSleep(b *testing.B) {
189 benchmark(b, func(n int) {
190 var wg sync.WaitGroup
192 for i := 0; i < n; i++ {
202 func TestAfter(t *testing.T) {
203 const delay = 100 * Millisecond
205 end := <-After(delay)
207 if runtime.GOOS == "windows" {
208 delayadj -= windowsInaccuracy
210 if duration := Now().Sub(start); duration < delayadj {
211 t.Fatalf("After(%s) slept for only %d ns", delay, duration)
213 if min := start.Add(delayadj); end.Before(min) {
214 t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
218 func TestAfterTick(t *testing.T) {
220 Delta := 100 * Millisecond
222 Delta = 10 * Millisecond
225 for i := 0; i < Count; i++ {
230 target := Delta * Count
232 t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
234 if !testing.Short() && d > target*30/10 {
235 t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
239 func TestAfterStop(t *testing.T) {
240 // We want to test that we stop a timer before it runs.
241 // We also want to test that it didn't run after a longer timer.
242 // Since we don't want the test to run for too long, we don't
243 // want to use lengthy times. That makes the test inherently flaky.
244 // So only report an error if it fails five times in a row.
248 for _, e := range errs {
253 for i := 0; i < 5; i++ {
254 AfterFunc(100*Millisecond, func() {})
255 t0 := NewTimer(50 * Millisecond)
256 c1 := make(chan bool, 1)
257 t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
258 c2 := After(200 * Millisecond)
260 errs = append(errs, "failed to stop event 0")
264 errs = append(errs, "failed to stop event 1")
270 errs = append(errs, "event 0 was not stopped")
273 errs = append(errs, "event 1 was not stopped")
278 errs = append(errs, "Stop returned true twice")
282 // Test passed, so all done.
284 t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
291 t.Errorf("saw %d errors", len(errs))
295 func TestAfterQueuing(t *testing.T) {
296 // This test flakes out on some systems,
297 // so we'll try it a few times before declaring it a failure.
299 err := errors.New("!=nil")
300 for i := 0; i < attempts && err != nil; i++ {
301 delta := Duration(20+i*50) * Millisecond
302 if err = testAfterQueuing(delta); err != nil {
303 t.Logf("attempt %v failed: %v", i, err)
311 var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
313 type afterResult struct {
318 func await(slot int, result chan<- afterResult, ac <-chan Time) {
319 result <- afterResult{slot, <-ac}
322 func testAfterQueuing(delta Duration) error {
323 // make the result channel buffered because we don't want
324 // to depend on channel queueing semantics that might
325 // possibly change in the future.
326 result := make(chan afterResult, len(slots))
329 for _, slot := range slots {
330 go await(slot, result, After(Duration(slot)*delta))
336 order = append(order, r.slot)
337 times = append(times, r.t)
339 for i := range order {
340 if i > 0 && order[i] < order[i-1] {
341 return fmt.Errorf("After calls returned out of order: %v", order)
344 for i, t := range times {
346 target := Duration(order[i]) * delta
347 if dt < target-delta/2 || dt > target+delta*10 {
348 return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
354 func TestTimerStopStress(t *testing.T) {
358 for i := 0; i < 100; i++ {
360 timer := AfterFunc(2*Second, func() {
361 t.Errorf("timer %d was not stopped", i)
370 func TestSleepZeroDeadlock(t *testing.T) {
371 // Sleep(0) used to hang, the sequence of events was as follows.
372 // Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
373 // Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
374 // After the GC nobody wakes up the goroutine from Gwaiting status.
375 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
378 for i := 0; i < 100; i++ {
383 for i := 0; i < 100; i++ {
385 tmp := make(chan bool, 1)
392 func testReset(d Duration) error {
393 t0 := NewTimer(2 * d)
395 if !t0.Reset(3 * d) {
396 return errors.New("resetting unfired timer returned false")
401 return errors.New("timer fired early")
408 return errors.New("reset timer did not fire")
411 if t0.Reset(50 * Millisecond) {
412 return errors.New("resetting expired timer returned true")
417 func TestReset(t *testing.T) {
418 // We try to run this test with increasingly larger multiples
419 // until one works so slow, loaded hardware isn't as flaky,
420 // but without slowing down fast machines unnecessarily.
422 // (maxDuration is several orders of magnitude longer than we
423 // expect this test to actually take on a fast, unloaded machine.)
425 const maxDuration = 10 * Second
435 t.Logf("%v; trying duration %v", err, d)
439 // Test that sleeping (via Sleep or Timer) for an interval so large it
440 // overflows does not result in a short sleep duration. Nor does it interfere
441 // with execution of other timers. If it does, timers in this or subsequent
442 // tests may not fire.
443 func TestOverflowSleep(t *testing.T) {
444 const big = Duration(int64(1<<63 - 1))
448 // On failure, this may return after the test has completed, so
449 // we need to panic instead.
450 panic("big sleep returned")
455 t.Fatalf("big timeout fired")
456 case <-After(25 * Millisecond):
460 const neg = Duration(-1 << 63)
461 Sleep(neg) // Returns immediately.
465 case <-After(1 * Second):
466 t.Fatalf("negative timeout didn't fire")
470 // Test that a panic while deleting a timer does not leave
471 // the timers mutex held, deadlocking a ticker.Stop in a defer.
472 func TestIssue5745(t *testing.T) {
473 ticker := NewTicker(Hour)
475 // would deadlock here before the fix due to
476 // lock taken before the segfault.
479 if r := recover(); r == nil {
480 t.Error("Expected panic, but none happened.")
484 // cause a panic due to a segfault
487 t.Error("Should be unreachable.")
490 func TestOverflowPeriodRuntimeTimer(t *testing.T) {
491 // This may hang forever if timers are broken. See comment near
492 // the end of CheckRuntimeTimerOverflow in internal_test.go.
493 CheckRuntimeTimerPeriodOverflow()
496 func checkZeroPanicString(t *testing.T) {
499 if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
500 t.Errorf("panic = %v; want substring %q", e, want)
504 func TestZeroTimerResetPanics(t *testing.T) {
505 defer checkZeroPanicString(t)
510 func TestZeroTimerStopPanics(t *testing.T) {
511 defer checkZeroPanicString(t)
516 // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
517 func TestZeroTimer(t *testing.T) {
522 for i := 0; i < 1000000; i++ {
526 if diff := Since(s); diff > 2*Second {
527 t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
532 // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
534 func TestTimerModifiedEarlier(t *testing.T) {
535 if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" {
536 testenv.SkipFlaky(t, 50470)
539 past := Until(Unix(0, 0))
542 for i := 0; i < count; i++ {
543 timer := NewTimer(Hour)
544 for j := 0; j < 10; j++ {
551 deadline := NewTimer(10 * Second)
552 defer deadline.Stop()
556 if since := Since(now); since > 8*Second {
557 t.Errorf("timer took too long (%v)", since)
561 t.Error("deadline expired")
566 t.Errorf("%d failures", fail)
570 // Test that rapidly moving timers earlier and later doesn't cause
571 // some of the sleep times to be lost.
573 func TestAdjustTimers(t *testing.T) {
574 var rnd = rand.New(rand.NewSource(Now().UnixNano()))
576 timers := make([]*Timer, 100)
577 states := make([]int, len(timers))
578 indices := rnd.Perm(len(timers))
580 for len(indices) != 0 {
581 var ii = rnd.Intn(len(indices))
584 var timer = timers[i]
585 var state = states[i]
590 timers[i] = NewTimer(0)
592 <-timer.C // Timer is now idle.
594 // Reset to various long durations, which we'll cancel.
596 if timer.Reset(1 * Minute) {
597 panic("shouldn't be active (1)")
600 if timer.Reset(3 * Minute) {
601 panic("shouldn't be active (3)")
604 if timer.Reset(2 * Minute) {
605 panic("shouldn't be active (2)")
608 // Stop and drain a long-duration timer.
611 t.Logf("timer %d state %d Stop returned false", i, state)
615 // Start a short-duration timer we expect to select without blocking.
618 t.Fatal("timer.Reset returned true")
624 if dur > 750*Millisecond {
625 t.Errorf("timer %d took %v to complete", i, dur)
628 // Timer is done. Swap with tail and remove.
630 indices[ii] = indices[len(indices)-1]
631 indices = indices[:len(indices)-1]
636 // Benchmark timer latency when the thread that creates the timer is busy with
637 // other work and the timers must be serviced by other threads.
638 // https://golang.org/issue/38860
639 func BenchmarkParallelTimerLatency(b *testing.B) {
640 gmp := runtime.GOMAXPROCS(0)
641 if gmp < 2 || runtime.NumCPU() < gmp {
642 b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
645 // allocate memory now to avoid GC interference later.
646 timerCount := gmp - 1
647 stats := make([]struct {
651 _ [5]int64 // cache line padding
654 // Ensure the time to start new threads to service timers will not pollute
658 // Note that other than the AfterFunc calls this benchmark is measuring it
659 // avoids using any other timers. In particular, the main goroutine uses
660 // doWork to spin for some durations because up through Go 1.15 if all
661 // threads are idle sysmon could leave deep sleep when we wake.
663 // Ensure sysmon is in deep sleep.
664 doWork(30 * Millisecond)
668 const delay = Millisecond
669 var wg sync.WaitGroup
671 for i := 0; i < b.N; i++ {
673 atomic.StoreInt32(&count, 0)
674 for j := 0; j < timerCount; j++ {
676 expectedWakeup := Now().Add(delay)
677 AfterFunc(delay, func() {
678 late := Since(expectedWakeup)
683 stats[j].sum += float64(late.Nanoseconds())
684 if late > stats[j].max {
687 atomic.AddInt32(&count, 1)
688 for atomic.LoadInt32(&count) < int32(timerCount) {
689 // spin until all timers fired
695 for atomic.LoadInt32(&count) < int32(timerCount) {
696 // spin until all timers fired
700 // Spin for a bit to let the other scheduler threads go idle before the
707 for _, s := range stats {
712 samples += float64(s.count)
714 b.ReportMetric(0, "ns/op")
715 b.ReportMetric(total/samples, "avg-late-ns")
716 b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
719 // Benchmark timer latency with staggered wakeup times and varying CPU bound
720 // workloads. https://golang.org/issue/38860
721 func BenchmarkStaggeredTickerLatency(b *testing.B) {
722 gmp := runtime.GOMAXPROCS(0)
723 if gmp < 2 || runtime.NumCPU() < gmp {
724 b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
727 const delay = 3 * Millisecond
729 for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
730 b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
731 for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
732 tickerCount := gmp * tickersPerP
733 b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
734 // allocate memory now to avoid GC interference later.
735 stats := make([]struct {
739 _ [5]int64 // cache line padding
742 // Ensure the time to start new threads to service timers
743 // will not pollute the results.
748 var wg sync.WaitGroup
750 for j := 0; j < tickerCount; j++ {
752 doWork(delay / Duration(gmp))
753 expectedWakeup := Now().Add(delay)
754 ticker := NewTicker(delay)
755 go func(c int, ticker *Ticker, firstWake Time) {
760 late := Since(expectedWakeup)
765 stats[j].sum += float64(late.Nanoseconds())
766 if late > stats[j].max {
769 expectedWakeup = expectedWakeup.Add(delay)
773 }(b.N, ticker, expectedWakeup)
780 for _, s := range stats {
785 samples += float64(s.count)
787 b.ReportMetric(0, "ns/op")
788 b.ReportMetric(total/samples, "avg-late-ns")
789 b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
796 // warmupScheduler ensures the scheduler has at least targetThreadCount threads
797 // in its thread pool.
798 func warmupScheduler(targetThreadCount int) {
799 var wg sync.WaitGroup
801 for i := 0; i < targetThreadCount; i++ {
804 atomic.AddInt32(&count, 1)
805 for atomic.LoadInt32(&count) < int32(targetThreadCount) {
806 // spin until all threads started
809 // spin a bit more to ensure they are all running on separate CPUs.
817 func doWork(dur Duration) {
819 for Since(start) < dur {