sleep_test.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. // Copyright 2009 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package time_test
  5. import (
  6. "errors"
  7. "fmt"
  8. "internal/testenv"
  9. "math/rand"
  10. "runtime"
  11. "strings"
  12. "sync"
  13. "sync/atomic"
  14. "testing"
  15. . "time"
  16. )
  17. // Go runtime uses different Windows timers for time.Now and sleeping.
  18. // These can tick at different frequencies and can arrive out of sync.
  19. // The effect can be seen, for example, as time.Sleep(100ms) is actually
  20. // shorter then 100ms when measured as difference between time.Now before and
  21. // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
  22. // windowsInaccuracy is to ignore such errors.
  23. const windowsInaccuracy = 17 * Millisecond
  24. func TestSleep(t *testing.T) {
  25. const delay = 100 * Millisecond
  26. go func() {
  27. Sleep(delay / 2)
  28. Interrupt()
  29. }()
  30. start := Now()
  31. Sleep(delay)
  32. delayadj := delay
  33. if runtime.GOOS == "windows" {
  34. delayadj -= windowsInaccuracy
  35. }
  36. duration := Now().Sub(start)
  37. if duration < delayadj {
  38. t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
  39. }
  40. }
  41. // Test the basic function calling behavior. Correct queueing
  42. // behavior is tested elsewhere, since After and AfterFunc share
  43. // the same code.
  44. func TestAfterFunc(t *testing.T) {
  45. i := 10
  46. c := make(chan bool)
  47. var f func()
  48. f = func() {
  49. i--
  50. if i >= 0 {
  51. AfterFunc(0, f)
  52. Sleep(1 * Second)
  53. } else {
  54. c <- true
  55. }
  56. }
  57. AfterFunc(0, f)
  58. <-c
  59. }
  60. func TestAfterStress(t *testing.T) {
  61. stop := uint32(0)
  62. go func() {
  63. for atomic.LoadUint32(&stop) == 0 {
  64. runtime.GC()
  65. // Yield so that the OS can wake up the timer thread,
  66. // so that it can generate channel sends for the main goroutine,
  67. // which will eventually set stop = 1 for us.
  68. Sleep(Nanosecond)
  69. }
  70. }()
  71. ticker := NewTicker(1)
  72. for i := 0; i < 100; i++ {
  73. <-ticker.C
  74. }
  75. ticker.Stop()
  76. atomic.StoreUint32(&stop, 1)
  77. }
  78. func benchmark(b *testing.B, bench func(n int)) {
  79. // Create equal number of garbage timers on each P before starting
  80. // the benchmark.
  81. var wg sync.WaitGroup
  82. garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
  83. for i := range garbageAll {
  84. wg.Add(1)
  85. go func(i int) {
  86. defer wg.Done()
  87. garbage := make([]*Timer, 1<<15)
  88. for j := range garbage {
  89. garbage[j] = AfterFunc(Hour, nil)
  90. }
  91. garbageAll[i] = garbage
  92. }(i)
  93. }
  94. wg.Wait()
  95. b.ResetTimer()
  96. b.RunParallel(func(pb *testing.PB) {
  97. for pb.Next() {
  98. bench(1000)
  99. }
  100. })
  101. b.StopTimer()
  102. for _, garbage := range garbageAll {
  103. for _, t := range garbage {
  104. t.Stop()
  105. }
  106. }
  107. }
  108. func BenchmarkAfterFunc(b *testing.B) {
  109. benchmark(b, func(n int) {
  110. c := make(chan bool)
  111. var f func()
  112. f = func() {
  113. n--
  114. if n >= 0 {
  115. AfterFunc(0, f)
  116. } else {
  117. c <- true
  118. }
  119. }
  120. AfterFunc(0, f)
  121. <-c
  122. })
  123. }
  124. func BenchmarkAfter(b *testing.B) {
  125. benchmark(b, func(n int) {
  126. for i := 0; i < n; i++ {
  127. <-After(1)
  128. }
  129. })
  130. }
  131. func BenchmarkStop(b *testing.B) {
  132. benchmark(b, func(n int) {
  133. for i := 0; i < n; i++ {
  134. NewTimer(1 * Second).Stop()
  135. }
  136. })
  137. }
  138. func BenchmarkSimultaneousAfterFunc(b *testing.B) {
  139. benchmark(b, func(n int) {
  140. var wg sync.WaitGroup
  141. wg.Add(n)
  142. for i := 0; i < n; i++ {
  143. AfterFunc(0, wg.Done)
  144. }
  145. wg.Wait()
  146. })
  147. }
  148. func BenchmarkStartStop(b *testing.B) {
  149. benchmark(b, func(n int) {
  150. timers := make([]*Timer, n)
  151. for i := 0; i < n; i++ {
  152. timers[i] = AfterFunc(Hour, nil)
  153. }
  154. for i := 0; i < n; i++ {
  155. timers[i].Stop()
  156. }
  157. })
  158. }
  159. func BenchmarkReset(b *testing.B) {
  160. benchmark(b, func(n int) {
  161. t := NewTimer(Hour)
  162. for i := 0; i < n; i++ {
  163. t.Reset(Hour)
  164. }
  165. t.Stop()
  166. })
  167. }
  168. func BenchmarkSleep(b *testing.B) {
  169. benchmark(b, func(n int) {
  170. var wg sync.WaitGroup
  171. wg.Add(n)
  172. for i := 0; i < n; i++ {
  173. go func() {
  174. Sleep(Nanosecond)
  175. wg.Done()
  176. }()
  177. }
  178. wg.Wait()
  179. })
  180. }
  181. func TestAfter(t *testing.T) {
  182. const delay = 100 * Millisecond
  183. start := Now()
  184. end := <-After(delay)
  185. delayadj := delay
  186. if runtime.GOOS == "windows" {
  187. delayadj -= windowsInaccuracy
  188. }
  189. if duration := Now().Sub(start); duration < delayadj {
  190. t.Fatalf("After(%s) slept for only %d ns", delay, duration)
  191. }
  192. if min := start.Add(delayadj); end.Before(min) {
  193. t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
  194. }
  195. }
  196. func TestAfterTick(t *testing.T) {
  197. const Count = 10
  198. Delta := 100 * Millisecond
  199. if testing.Short() {
  200. Delta = 10 * Millisecond
  201. }
  202. t0 := Now()
  203. for i := 0; i < Count; i++ {
  204. <-After(Delta)
  205. }
  206. t1 := Now()
  207. d := t1.Sub(t0)
  208. target := Delta * Count
  209. if d < target*9/10 {
  210. t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
  211. }
  212. if !testing.Short() && d > target*30/10 {
  213. t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
  214. }
  215. }
  216. func TestAfterStop(t *testing.T) {
  217. // We want to test that we stop a timer before it runs.
  218. // We also want to test that it didn't run after a longer timer.
  219. // Since we don't want the test to run for too long, we don't
  220. // want to use lengthy times. That makes the test inherently flaky.
  221. // So only report an error if it fails five times in a row.
  222. var errs []string
  223. logErrs := func() {
  224. for _, e := range errs {
  225. t.Log(e)
  226. }
  227. }
  228. for i := 0; i < 5; i++ {
  229. AfterFunc(100*Millisecond, func() {})
  230. t0 := NewTimer(50 * Millisecond)
  231. c1 := make(chan bool, 1)
  232. t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
  233. c2 := After(200 * Millisecond)
  234. if !t0.Stop() {
  235. errs = append(errs, "failed to stop event 0")
  236. continue
  237. }
  238. if !t1.Stop() {
  239. errs = append(errs, "failed to stop event 1")
  240. continue
  241. }
  242. <-c2
  243. select {
  244. case <-t0.C:
  245. errs = append(errs, "event 0 was not stopped")
  246. continue
  247. case <-c1:
  248. errs = append(errs, "event 1 was not stopped")
  249. continue
  250. default:
  251. }
  252. if t1.Stop() {
  253. errs = append(errs, "Stop returned true twice")
  254. continue
  255. }
  256. // Test passed, so all done.
  257. if len(errs) > 0 {
  258. t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
  259. logErrs()
  260. }
  261. return
  262. }
  263. t.Errorf("saw %d errors", len(errs))
  264. logErrs()
  265. }
  266. func TestAfterQueuing(t *testing.T) {
  267. // This test flakes out on some systems,
  268. // so we'll try it a few times before declaring it a failure.
  269. const attempts = 5
  270. err := errors.New("!=nil")
  271. for i := 0; i < attempts && err != nil; i++ {
  272. delta := Duration(20+i*50) * Millisecond
  273. if err = testAfterQueuing(delta); err != nil {
  274. t.Logf("attempt %v failed: %v", i, err)
  275. }
  276. }
  277. if err != nil {
  278. t.Fatal(err)
  279. }
  280. }
  281. // For gccgo omit 0 for now because it can take too long to start the
  282. var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8 /*0*/}
  283. type afterResult struct {
  284. slot int
  285. t Time
  286. }
  287. func await(slot int, result chan<- afterResult, ac <-chan Time) {
  288. result <- afterResult{slot, <-ac}
  289. }
  290. func testAfterQueuing(delta Duration) error {
  291. // make the result channel buffered because we don't want
  292. // to depend on channel queueing semantics that might
  293. // possibly change in the future.
  294. result := make(chan afterResult, len(slots))
  295. t0 := Now()
  296. for _, slot := range slots {
  297. go await(slot, result, After(Duration(slot)*delta))
  298. }
  299. var order []int
  300. var times []Time
  301. for range slots {
  302. r := <-result
  303. order = append(order, r.slot)
  304. times = append(times, r.t)
  305. }
  306. for i := range order {
  307. if i > 0 && order[i] < order[i-1] {
  308. return fmt.Errorf("After calls returned out of order: %v", order)
  309. }
  310. }
  311. for i, t := range times {
  312. dt := t.Sub(t0)
  313. target := Duration(order[i]) * delta
  314. if dt < target-delta/2 || dt > target+delta*10 {
  315. return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
  316. }
  317. }
  318. return nil
  319. }
  320. func TestTimerStopStress(t *testing.T) {
  321. if testing.Short() {
  322. return
  323. }
  324. for i := 0; i < 100; i++ {
  325. go func(i int) {
  326. timer := AfterFunc(2*Second, func() {
  327. t.Errorf("timer %d was not stopped", i)
  328. })
  329. Sleep(1 * Second)
  330. timer.Stop()
  331. }(i)
  332. }
  333. Sleep(3 * Second)
  334. }
  335. func TestSleepZeroDeadlock(t *testing.T) {
  336. // Sleep(0) used to hang, the sequence of events was as follows.
  337. // Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
  338. // Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
  339. // After the GC nobody wakes up the goroutine from Gwaiting status.
  340. defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
  341. c := make(chan bool)
  342. go func() {
  343. for i := 0; i < 100; i++ {
  344. runtime.GC()
  345. }
  346. c <- true
  347. }()
  348. for i := 0; i < 100; i++ {
  349. Sleep(0)
  350. tmp := make(chan bool, 1)
  351. tmp <- true
  352. <-tmp
  353. }
  354. <-c
  355. }
  356. func testReset(d Duration) error {
  357. t0 := NewTimer(2 * d)
  358. Sleep(d)
  359. if !t0.Reset(3 * d) {
  360. return errors.New("resetting unfired timer returned false")
  361. }
  362. Sleep(2 * d)
  363. select {
  364. case <-t0.C:
  365. return errors.New("timer fired early")
  366. default:
  367. }
  368. Sleep(2 * d)
  369. select {
  370. case <-t0.C:
  371. default:
  372. return errors.New("reset timer did not fire")
  373. }
  374. if t0.Reset(50 * Millisecond) {
  375. return errors.New("resetting expired timer returned true")
  376. }
  377. return nil
  378. }
  379. func TestReset(t *testing.T) {
  380. // We try to run this test with increasingly larger multiples
  381. // until one works so slow, loaded hardware isn't as flaky,
  382. // but without slowing down fast machines unnecessarily.
  383. const unit = 25 * Millisecond
  384. tries := []Duration{
  385. 1 * unit,
  386. 3 * unit,
  387. 7 * unit,
  388. 15 * unit,
  389. }
  390. var err error
  391. for _, d := range tries {
  392. err = testReset(d)
  393. if err == nil {
  394. t.Logf("passed using duration %v", d)
  395. return
  396. }
  397. }
  398. t.Error(err)
  399. }
  400. // Test that sleeping (via Sleep or Timer) for an interval so large it
  401. // overflows does not result in a short sleep duration. Nor does it interfere
  402. // with execution of other timers. If it does, timers in this or subsequent
  403. // tests may not fire.
  404. func TestOverflowSleep(t *testing.T) {
  405. const big = Duration(int64(1<<63 - 1))
  406. go func() {
  407. Sleep(big)
  408. // On failure, this may return after the test has completed, so
  409. // we need to panic instead.
  410. panic("big sleep returned")
  411. }()
  412. select {
  413. case <-After(big):
  414. t.Fatalf("big timeout fired")
  415. case <-After(25 * Millisecond):
  416. // OK
  417. }
  418. const neg = Duration(-1 << 63)
  419. Sleep(neg) // Returns immediately.
  420. select {
  421. case <-After(neg):
  422. // OK
  423. case <-After(1 * Second):
  424. t.Fatalf("negative timeout didn't fire")
  425. }
  426. }
  427. // Test that a panic while deleting a timer does not leave
  428. // the timers mutex held, deadlocking a ticker.Stop in a defer.
  429. func TestIssue5745(t *testing.T) {
  430. ticker := NewTicker(Hour)
  431. defer func() {
  432. // would deadlock here before the fix due to
  433. // lock taken before the segfault.
  434. ticker.Stop()
  435. if r := recover(); r == nil {
  436. t.Error("Expected panic, but none happened.")
  437. }
  438. }()
  439. // cause a panic due to a segfault
  440. var timer *Timer
  441. timer.Stop()
  442. t.Error("Should be unreachable.")
  443. }
  444. func TestOverflowPeriodRuntimeTimer(t *testing.T) {
  445. // This may hang forever if timers are broken. See comment near
  446. // the end of CheckRuntimeTimerOverflow in internal_test.go.
  447. CheckRuntimeTimerPeriodOverflow()
  448. }
  449. func checkZeroPanicString(t *testing.T) {
  450. e := recover()
  451. s, _ := e.(string)
  452. if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
  453. t.Errorf("panic = %v; want substring %q", e, want)
  454. }
  455. }
  456. func TestZeroTimerResetPanics(t *testing.T) {
  457. defer checkZeroPanicString(t)
  458. var tr Timer
  459. tr.Reset(1)
  460. }
  461. func TestZeroTimerStopPanics(t *testing.T) {
  462. defer checkZeroPanicString(t)
  463. var tr Timer
  464. tr.Stop()
  465. }
  466. // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
  467. func TestZeroTimer(t *testing.T) {
  468. if testing.Short() {
  469. t.Skip("-short")
  470. }
  471. for i := 0; i < 1000000; i++ {
  472. s := Now()
  473. ti := NewTimer(0)
  474. <-ti.C
  475. if diff := Since(s); diff > 2*Second {
  476. t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
  477. }
  478. }
  479. }
  480. // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
  481. // Issue 47329.
  482. func TestTimerModifiedEarlier(t *testing.T) {
  483. if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" {
  484. testenv.SkipFlaky(t, 50470)
  485. }
  486. past := Until(Unix(0, 0))
  487. count := 1000
  488. fail := 0
  489. for i := 0; i < count; i++ {
  490. timer := NewTimer(Hour)
  491. for j := 0; j < 10; j++ {
  492. if !timer.Stop() {
  493. <-timer.C
  494. }
  495. timer.Reset(past)
  496. }
  497. deadline := NewTimer(10 * Second)
  498. defer deadline.Stop()
  499. now := Now()
  500. select {
  501. case <-timer.C:
  502. if since := Since(now); since > 8*Second {
  503. t.Errorf("timer took too long (%v)", since)
  504. fail++
  505. }
  506. case <-deadline.C:
  507. t.Error("deadline expired")
  508. }
  509. }
  510. if fail > 0 {
  511. t.Errorf("%d failures", fail)
  512. }
  513. }
  514. // Test that rapidly moving timers earlier and later doesn't cause
  515. // some of the sleep times to be lost.
  516. // Issue 47762
  517. func TestAdjustTimers(t *testing.T) {
  518. var rnd = rand.New(rand.NewSource(Now().UnixNano()))
  519. timers := make([]*Timer, 100)
  520. states := make([]int, len(timers))
  521. indices := rnd.Perm(len(timers))
  522. for len(indices) != 0 {
  523. var ii = rnd.Intn(len(indices))
  524. var i = indices[ii]
  525. var timer = timers[i]
  526. var state = states[i]
  527. states[i]++
  528. switch state {
  529. case 0:
  530. timers[i] = NewTimer(0)
  531. case 1:
  532. <-timer.C // Timer is now idle.
  533. // Reset to various long durations, which we'll cancel.
  534. case 2:
  535. if timer.Reset(1 * Minute) {
  536. panic("shouldn't be active (1)")
  537. }
  538. case 4:
  539. if timer.Reset(3 * Minute) {
  540. panic("shouldn't be active (3)")
  541. }
  542. case 6:
  543. if timer.Reset(2 * Minute) {
  544. panic("shouldn't be active (2)")
  545. }
  546. // Stop and drain a long-duration timer.
  547. case 3, 5, 7:
  548. if !timer.Stop() {
  549. t.Logf("timer %d state %d Stop returned false", i, state)
  550. <-timer.C
  551. }
  552. // Start a short-duration timer we expect to select without blocking.
  553. case 8:
  554. if timer.Reset(0) {
  555. t.Fatal("timer.Reset returned true")
  556. }
  557. case 9:
  558. now := Now()
  559. <-timer.C
  560. dur := Since(now)
  561. if dur > 750*Millisecond {
  562. t.Errorf("timer %d took %v to complete", i, dur)
  563. }
  564. // Timer is done. Swap with tail and remove.
  565. case 10:
  566. indices[ii] = indices[len(indices)-1]
  567. indices = indices[:len(indices)-1]
  568. }
  569. }
  570. }
  571. // Benchmark timer latency when the thread that creates the timer is busy with
  572. // other work and the timers must be serviced by other threads.
  573. // https://golang.org/issue/38860
  574. func BenchmarkParallelTimerLatency(b *testing.B) {
  575. gmp := runtime.GOMAXPROCS(0)
  576. if gmp < 2 || runtime.NumCPU() < gmp {
  577. b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
  578. }
  579. // allocate memory now to avoid GC interference later.
  580. timerCount := gmp - 1
  581. stats := make([]struct {
  582. sum float64
  583. max Duration
  584. count int64
  585. _ [5]int64 // cache line padding
  586. }, timerCount)
  587. // Ensure the time to start new threads to service timers will not pollute
  588. // the results.
  589. warmupScheduler(gmp)
  590. // Note that other than the AfterFunc calls this benchmark is measuring it
  591. // avoids using any other timers. In particular, the main goroutine uses
  592. // doWork to spin for some durations because up through Go 1.15 if all
  593. // threads are idle sysmon could leave deep sleep when we wake.
  594. // Ensure sysmon is in deep sleep.
  595. doWork(30 * Millisecond)
  596. b.ResetTimer()
  597. const delay = Millisecond
  598. var wg sync.WaitGroup
  599. var count int32
  600. for i := 0; i < b.N; i++ {
  601. wg.Add(timerCount)
  602. atomic.StoreInt32(&count, 0)
  603. for j := 0; j < timerCount; j++ {
  604. j := j
  605. expectedWakeup := Now().Add(delay)
  606. AfterFunc(delay, func() {
  607. late := Since(expectedWakeup)
  608. if late < 0 {
  609. late = 0
  610. }
  611. stats[j].count++
  612. stats[j].sum += float64(late.Nanoseconds())
  613. if late > stats[j].max {
  614. stats[j].max = late
  615. }
  616. atomic.AddInt32(&count, 1)
  617. for atomic.LoadInt32(&count) < int32(timerCount) {
  618. // spin until all timers fired
  619. }
  620. wg.Done()
  621. })
  622. }
  623. for atomic.LoadInt32(&count) < int32(timerCount) {
  624. // spin until all timers fired
  625. }
  626. wg.Wait()
  627. // Spin for a bit to let the other scheduler threads go idle before the
  628. // next round.
  629. doWork(Millisecond)
  630. }
  631. var total float64
  632. var samples float64
  633. max := Duration(0)
  634. for _, s := range stats {
  635. if s.max > max {
  636. max = s.max
  637. }
  638. total += s.sum
  639. samples += float64(s.count)
  640. }
  641. b.ReportMetric(0, "ns/op")
  642. b.ReportMetric(total/samples, "avg-late-ns")
  643. b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
  644. }
  645. // Benchmark timer latency with staggered wakeup times and varying CPU bound
  646. // workloads. https://golang.org/issue/38860
  647. func BenchmarkStaggeredTickerLatency(b *testing.B) {
  648. gmp := runtime.GOMAXPROCS(0)
  649. if gmp < 2 || runtime.NumCPU() < gmp {
  650. b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
  651. }
  652. const delay = 3 * Millisecond
  653. for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
  654. b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
  655. for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
  656. tickerCount := gmp * tickersPerP
  657. b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
  658. // allocate memory now to avoid GC interference later.
  659. stats := make([]struct {
  660. sum float64
  661. max Duration
  662. count int64
  663. _ [5]int64 // cache line padding
  664. }, tickerCount)
  665. // Ensure the time to start new threads to service timers
  666. // will not pollute the results.
  667. warmupScheduler(gmp)
  668. b.ResetTimer()
  669. var wg sync.WaitGroup
  670. wg.Add(tickerCount)
  671. for j := 0; j < tickerCount; j++ {
  672. j := j
  673. doWork(delay / Duration(gmp))
  674. expectedWakeup := Now().Add(delay)
  675. ticker := NewTicker(delay)
  676. go func(c int, ticker *Ticker, firstWake Time) {
  677. defer ticker.Stop()
  678. for ; c > 0; c-- {
  679. <-ticker.C
  680. late := Since(expectedWakeup)
  681. if late < 0 {
  682. late = 0
  683. }
  684. stats[j].count++
  685. stats[j].sum += float64(late.Nanoseconds())
  686. if late > stats[j].max {
  687. stats[j].max = late
  688. }
  689. expectedWakeup = expectedWakeup.Add(delay)
  690. doWork(dur)
  691. }
  692. wg.Done()
  693. }(b.N, ticker, expectedWakeup)
  694. }
  695. wg.Wait()
  696. var total float64
  697. var samples float64
  698. max := Duration(0)
  699. for _, s := range stats {
  700. if s.max > max {
  701. max = s.max
  702. }
  703. total += s.sum
  704. samples += float64(s.count)
  705. }
  706. b.ReportMetric(0, "ns/op")
  707. b.ReportMetric(total/samples, "avg-late-ns")
  708. b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
  709. })
  710. }
  711. })
  712. }
  713. }
  714. // warmupScheduler ensures the scheduler has at least targetThreadCount threads
  715. // in its thread pool.
  716. func warmupScheduler(targetThreadCount int) {
  717. var wg sync.WaitGroup
  718. var count int32
  719. for i := 0; i < targetThreadCount; i++ {
  720. wg.Add(1)
  721. go func() {
  722. atomic.AddInt32(&count, 1)
  723. for atomic.LoadInt32(&count) < int32(targetThreadCount) {
  724. // spin until all threads started
  725. }
  726. // spin a bit more to ensure they are all running on separate CPUs.
  727. doWork(Millisecond)
  728. wg.Done()
  729. }()
  730. }
  731. wg.Wait()
  732. }
  733. func doWork(dur Duration) {
  734. start := Now()
  735. for Since(start) < dur {
  736. }
  737. }