unsafe.Offsetof(p{}.timer0When),
unsafe.Offsetof(p{}.timerModifiedEarliest),
unsafe.Offsetof(p{}.gcFractionalMarkTime),
- unsafe.Offsetof(schedt{}.pollUntil),
unsafe.Offsetof(schedt{}.timeToRun),
unsafe.Offsetof(timeHistogram{}.underflow),
unsafe.Offsetof(profBuf{}.overflow),
// Poll network until next timer.
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
- atomic.Store64(&sched.pollUntil, uint64(pollUntil))
+ sched.pollUntil.Store(pollUntil)
if mp.p != 0 {
throw("findrunnable: netpoll with p")
}
delay = 0
}
list := netpoll(delay) // block until new work is available
- atomic.Store64(&sched.pollUntil, 0)
+ sched.pollUntil.Store(0)
sched.lastpoll.Store(now)
if faketime != 0 && list.empty() {
// Using fake time and nothing is ready; stop M.
goto top
}
} else if pollUntil != 0 && netpollinited() {
- pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
+ pollerPollUntil := sched.pollUntil.Load()
if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
netpollBreak()
}
// field is either zero or the time to which the current
// poll is expected to run. This can have a spurious wakeup
// but should never miss a wakeup.
- pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
+ pollerPollUntil := sched.pollUntil.Load()
if pollerPollUntil == 0 || pollerPollUntil > when {
netpollBreak()
}
}
type schedt struct {
- // accessed atomically. keep at top to ensure alignment on 32-bit systems.
goidgen atomic.Uint64
lastpoll atomic.Int64 // time of last network poll, 0 if currently polling
- pollUntil uint64 // time to which current poll is sleeping
+ pollUntil atomic.Int64 // time to which current poll is sleeping
lock mutex