1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
13 // Goroutine scheduler
14 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
16 // The main concepts are:
18 // M - worker thread, or machine.
19 // P - processor, a resource that is required to execute Go code.
20 // M must have an associated P to execute Go code, however it can be
21 // blocked or in a syscall w/o an associated P.
23 // Design doc at https://golang.org/s/go11sched.
30 //go:linkname runtime_init runtime.init
33 //go:linkname main_init main.init
36 // main_init_done is a signal used by cgocallbackg that initialization
37 // has been completed. It is made before _cgo_notify_runtime_init_done,
38 // so all cgo calls can rely on it existing. When main_init is complete,
39 // it is closed, meaning cgocallbackg can reliably receive from it.
40 var main_init_done chan bool
42 //go:linkname main_main main.main
45 // runtimeInitTime is the nanotime() at which the runtime started.
46 var runtimeInitTime int64
48 // The main goroutine.
52 // Racectx of m0->g0 is used only as the parent of the main goroutine.
53 // It must not be used for anything else.
56 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
57 // Using decimal instead of binary GB and MB because
58 // they look nicer in the stack overflow failure message.
60 maxstacksize = 1000000000
62 maxstacksize = 250000000
65 // Record when the world started.
66 runtimeInitTime = nanotime()
72 // Lock the main goroutine onto this, the main OS thread,
73 // during initialization. Most programs won't care, but a few
74 // do require certain calls to be made by the main thread.
75 // Those can arrange for main.main to run in the main thread
76 // by calling runtime.LockOSThread during initialization
77 // to preserve the lock.
81 throw("runtime.main not on m0")
84 runtime_init() // must be before defer
86 // Defer unlock so that runtime.Goexit during init does the unlock too.
96 main_init_done = make(chan bool)
98 if _cgo_thread_start == nil {
99 throw("_cgo_thread_start missing")
101 if _cgo_malloc == nil {
102 throw("_cgo_malloc missing")
104 if _cgo_free == nil {
105 throw("_cgo_free missing")
107 if GOOS != "windows" {
108 if _cgo_setenv == nil {
109 throw("_cgo_setenv missing")
111 if _cgo_unsetenv == nil {
112 throw("_cgo_unsetenv missing")
115 if _cgo_notify_runtime_init_done == nil {
116 throw("_cgo_notify_runtime_init_done missing")
118 cgocall(_cgo_notify_runtime_init_done, nil)
122 close(main_init_done)
127 if isarchive || islibrary {
128 // A program compiled with -buildmode=c-archive or c-shared
129 // has a main, but it is not executed.
137 // Make racy client program work: if panicking on
138 // another goroutine at the same time as main returns,
139 // let the other goroutine finish printing the panic trace.
140 // Once it does, it will exit. See issue 3934.
142 gopark(nil, nil, "panicwait", traceEvGoStop, 1)
152 // os_beforeExit is called from os.Exit(0).
153 //go:linkname os_beforeExit os.runtime_beforeExit
154 func os_beforeExit() {
160 // start forcegc helper goroutine
165 func forcegchelper() {
169 if forcegc.idle != 0 {
170 throw("forcegc: phase error")
172 atomic.Store(&forcegc.idle, 1)
173 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
174 // this goroutine is explicitly resumed by sysmon
175 if debug.gctrace > 0 {
178 gcStart(gcBackgroundMode, true)
184 // Gosched yields the processor, allowing other goroutines to run. It does not
185 // suspend the current goroutine, so execution resumes automatically.
190 // Puts the current goroutine into a waiting state and calls unlockf.
191 // If unlockf returns false, the goroutine is resumed.
192 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
195 status := readgstatus(gp)
196 if status != _Grunning && status != _Gscanrunning {
197 throw("gopark: bad g status")
200 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
201 gp.waitreason = reason
202 mp.waittraceev = traceEv
203 mp.waittraceskip = traceskip
205 // can't do anything that might move the G between Ms here.
209 // Puts the current goroutine into a waiting state and unlocks the lock.
210 // The goroutine can be made runnable again by calling goready(gp).
211 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
212 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
215 func goready(gp *g, traceskip int) {
222 func acquireSudog() *sudog {
223 // Delicate dance: the semaphore implementation calls
224 // acquireSudog, acquireSudog calls new(sudog),
225 // new calls malloc, malloc can call the garbage collector,
226 // and the garbage collector calls the semaphore implementation
228 // Break the cycle by doing acquirem/releasem around new(sudog).
229 // The acquirem/releasem increments m.locks during new(sudog),
230 // which keeps the garbage collector from being invoked.
233 if len(pp.sudogcache) == 0 {
234 lock(&sched.sudoglock)
235 // First, try to grab a batch from central cache.
236 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
237 s := sched.sudogcache
238 sched.sudogcache = s.next
240 pp.sudogcache = append(pp.sudogcache, s)
242 unlock(&sched.sudoglock)
243 // If the central cache is empty, allocate a new one.
244 if len(pp.sudogcache) == 0 {
245 pp.sudogcache = append(pp.sudogcache, new(sudog))
248 n := len(pp.sudogcache)
249 s := pp.sudogcache[n-1]
250 pp.sudogcache[n-1] = nil
251 pp.sudogcache = pp.sudogcache[:n-1]
253 throw("acquireSudog: found s.elem != nil in cache")
260 func releaseSudog(s *sudog) {
262 throw("runtime: sudog with non-nil elem")
264 if s.selectdone != nil {
265 throw("runtime: sudog with non-nil selectdone")
268 throw("runtime: sudog with non-nil next")
271 throw("runtime: sudog with non-nil prev")
273 if s.waitlink != nil {
274 throw("runtime: sudog with non-nil waitlink")
278 throw("runtime: releaseSudog with non-nil gp.param")
280 mp := acquirem() // avoid rescheduling to another P
282 if len(pp.sudogcache) == cap(pp.sudogcache) {
283 // Transfer half of local cache to the central cache.
284 var first, last *sudog
285 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
286 n := len(pp.sudogcache)
287 p := pp.sudogcache[n-1]
288 pp.sudogcache[n-1] = nil
289 pp.sudogcache = pp.sudogcache[:n-1]
297 lock(&sched.sudoglock)
298 last.next = sched.sudogcache
299 sched.sudogcache = first
300 unlock(&sched.sudoglock)
302 pp.sudogcache = append(pp.sudogcache, s)
306 // funcPC returns the entry PC of the function f.
307 // It assumes that f is a func value. Otherwise the behavior is undefined.
309 func funcPC(f interface{}) uintptr {
310 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
313 // called from assembly
314 func badmcall(fn func(*g)) {
315 throw("runtime: mcall called on m->g0 stack")
318 func badmcall2(fn func(*g)) {
319 throw("runtime: mcall function returned")
322 func badreflectcall() {
323 panic("runtime: arg size to reflect.call more than 1GB")
326 func lockedOSThread() bool {
328 return gp.lockedm != nil && gp.m.lockedg != nil
336 func allgadd(gp *g) {
337 if readgstatus(gp) == _Gidle {
338 throw("allgadd: bad status Gidle")
342 allgs = append(allgs, gp)
343 allglen = uintptr(len(allgs))
348 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
349 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
353 // The bootstrap sequence is:
357 // make & queue new G
358 // call runtime·mstart
360 // The new G calls runtime·main.
362 // raceinit must be the first call to race detector.
363 // In particular, it must be done before mallocinit below calls racemapshadow.
366 _g_.racectx = raceinit()
369 sched.maxmcount = 10000
371 // Cache the framepointer experiment. This affects stack unwinding.
372 framepointer_enabled = haveexperiment("framepointer")
385 sched.lastpoll = uint64(nanotime())
387 if n := atoi(gogetenv("GOMAXPROCS")); n > 0 {
388 if n > _MaxGomaxprocs {
393 if procresize(int32(procs)) != nil {
394 throw("unknown runnable goroutine during bootstrap")
397 if sys.BuildVersion == "" {
398 // Condition should never trigger. This code just serves
399 // to ensure runtime·buildVersion is kept in the resulting binary.
400 sys.BuildVersion = "unknown"
404 func dumpgstatus(gp *g) {
406 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
407 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
411 // sched lock is held
412 if sched.mcount > sched.maxmcount {
413 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
414 throw("thread exhaustion")
418 func mcommoninit(mp *m) {
421 // g0 stack won't make sense for user (and is not necessary unwindable).
423 callers(1, mp.createstack[:])
426 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks())
427 if mp.fastrand == 0 {
428 mp.fastrand = 0x49f6428a
436 if mp.gsignal != nil {
437 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
440 // Add to allm so garbage collector doesn't free g->m
441 // when it is just in a register or thread-local storage.
444 // NumCgoCall() iterates over allm w/o schedlock,
445 // so we need to publish it safely.
446 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
450 // Mark gp ready to run.
451 func ready(gp *g, traceskip int) {
453 traceGoUnpark(gp, traceskip)
456 status := readgstatus(gp)
460 _g_.m.locks++ // disable preemption because it can be holding p in a local var
461 if status&^_Gscan != _Gwaiting {
463 throw("bad g->status in ready")
466 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
467 casgstatus(gp, _Gwaiting, _Grunnable)
468 runqput(_g_.m.p.ptr(), gp, true)
469 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic
473 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
474 _g_.stackguard0 = stackPreempt
478 func gcprocs() int32 {
479 // Figure out how many CPUs to use during GC.
480 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
489 if n > sched.nmidle+1 { // one M is currently running
496 func needaddgcproc() bool {
505 n -= sched.nmidle + 1 // one M is currently running
510 func helpgc(nproc int32) {
514 for n := int32(1); n < nproc; n++ { // one M is currently running
515 if allp[pos].mcache == _g_.m.mcache {
520 throw("gcprocs inconsistency")
524 mp.mcache = allp[pos].mcache
531 // freezeStopWait is a large value that freezetheworld sets
532 // sched.stopwait to in order to request that all Gs permanently stop.
533 const freezeStopWait = 0x7fffffff
535 // Similar to stopTheWorld but best-effort and can be called several times.
536 // There is no reverse operation, used during crashing.
537 // This function must not lock any mutexes.
538 func freezetheworld() {
539 // stopwait and preemption requests can be lost
540 // due to races with concurrently executing threads,
541 // so try several times
542 for i := 0; i < 5; i++ {
543 // this should tell the scheduler to not start any new goroutines
544 sched.stopwait = freezeStopWait
545 atomic.Store(&sched.gcwaiting, 1)
546 // this should stop running goroutines
548 break // no running goroutines
558 func isscanstatus(status uint32) bool {
559 if status == _Gscan {
560 throw("isscanstatus: Bad status Gscan")
562 return status&_Gscan == _Gscan
565 // All reads and writes of g's status go through readgstatus, casgstatus
566 // castogscanstatus, casfrom_Gscanstatus.
568 func readgstatus(gp *g) uint32 {
569 return atomic.Load(&gp.atomicstatus)
572 // Ownership of gscanvalid:
574 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
575 // then gp owns gp.gscanvalid, and other goroutines must not modify it.
577 // Otherwise, a second goroutine can lock the scan state by setting _Gscan
578 // in the status bit and then modify gscanvalid, and then unlock the scan state.
580 // Note that the first condition implies an exception to the second:
581 // if a second goroutine changes gp's status to _Grunning|_Gscan,
582 // that second goroutine still does not have the right to modify gscanvalid.
584 // The Gscanstatuses are acting like locks and this releases them.
585 // If it proves to be a performance hit we should be able to make these
586 // simple atomic stores but for now we are going to throw if
587 // we see an inconsistent state.
588 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
591 // Check that transition is valid.
594 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
596 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
601 if newval == oldval&^_Gscan {
602 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
605 if newval == _Gwaiting {
606 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
610 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
612 throw("casfrom_Gscanstatus: gp->status is not in scan state")
614 if newval == _Grunning {
615 gp.gcscanvalid = false
619 // This will return false if the gp is not in the expected status and the cas fails.
620 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
621 func castogscanstatus(gp *g, oldval, newval uint32) bool {
626 if newval == oldval|_Gscan {
627 return atomic.Cas(&gp.atomicstatus, oldval, newval)
630 if newval == _Gscanrunning || newval == _Gscanenqueue {
631 return atomic.Cas(&gp.atomicstatus, oldval, newval)
634 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
635 throw("castogscanstatus")
639 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
640 // and casfrom_Gscanstatus instead.
641 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
642 // put it in the Gscan state is finished.
644 func casgstatus(gp *g, oldval, newval uint32) {
645 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
647 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
648 throw("casgstatus: bad incoming values")
652 if oldval == _Grunning && gp.gcscanvalid {
653 // If oldvall == _Grunning, then the actual status must be
654 // _Grunning or _Grunning|_Gscan; either way,
655 // we own gp.gcscanvalid, so it's safe to read.
656 // gp.gcscanvalid must not be true when we are running.
657 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
661 // loop if gp->atomicstatus is in a scan state giving
662 // GC time to finish and change the state to oldval.
663 for !atomic.Cas(&gp.atomicstatus, oldval, newval) {
664 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
666 throw("casgstatus: waiting for Gwaiting but is Grunnable")
669 // Help GC if needed.
670 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
671 // gp.preemptscan = false
672 // systemstack(func() {
677 if newval == _Grunning {
678 gp.gcscanvalid = false
682 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
683 // Returns old status. Cannot call casgstatus directly, because we are racing with an
684 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
685 // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
686 // it would loop waiting for the status to go back to Gwaiting, which it never will.
688 func casgcopystack(gp *g) uint32 {
690 oldstatus := readgstatus(gp) &^ _Gscan
691 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
692 throw("copystack: bad status, not Gwaiting or Grunnable")
694 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
700 // scang blocks until gp's stack has been scanned.
701 // It might be scanned by scang or it might be scanned by the goroutine itself.
702 // Either way, the stack scan has completed when scang returns.
704 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
705 // Nothing is racing with us now, but gcscandone might be set to true left over
706 // from an earlier round of stack scanning (we scan twice per GC).
707 // We use gcscandone to record whether the scan has been done during this round.
708 // It is important that the scan happens exactly once: if called twice,
709 // the installation of stack barriers will detect the double scan and die.
711 gp.gcscandone = false
713 // Endeavor to get gcscandone set to true,
714 // either by doing the stack scan ourselves or by coercing gp to scan itself.
715 // gp.gcscandone can transition from false to true when we're not looking
716 // (if we asked for preemption), so any time we lock the status using
717 // castogscanstatus we have to double-check that the scan is still not done.
719 switch s := readgstatus(gp); s {
722 throw("stopg: invalid status")
729 // Stack being switched. Go around again.
731 case _Grunnable, _Gsyscall, _Gwaiting:
732 // Claim goroutine by setting scan bit.
733 // Racing with execution or readying of gp.
734 // The scan bit keeps them from running
735 // the goroutine until we're done.
736 if castogscanstatus(gp, s, s|_Gscan) {
738 // Coordinate with traceback
740 for !atomic.Cas(&gp.stackLock, 0, 1) {
744 atomic.Store(&gp.stackLock, 0)
751 // newstack is doing a scan for us right now. Wait.
754 // Goroutine running. Try to preempt execution so it can scan itself.
755 // The preemption handler (in newstack) does the actual scan.
757 // Optimization: if there is already a pending preemption request
758 // (from the previous loop iteration), don't bother with the atomics.
759 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
763 // Ask for preemption and self scan.
764 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
766 gp.preemptscan = true
768 gp.stackguard0 = stackPreempt
770 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
775 gp.preemptscan = false // cancel scan request if no longer needed
778 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
779 func restartg(gp *g) {
784 throw("restartg: unexpected status")
792 casfrom_Gscanstatus(gp, s, s&^_Gscan)
794 // Scan is now completed.
795 // Goroutine now needs to be made runnable.
796 // We put it on the global run queue; ready blocks on the global scheduler lock.
798 casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
799 if gp != getg().m.curg {
800 throw("processing Gscanenqueue on wrong m")
807 // stopTheWorld stops all P's from executing goroutines, interrupting
808 // all goroutines at GC safe points and records reason as the reason
809 // for the stop. On return, only the current goroutine's P is running.
810 // stopTheWorld must not be called from a system stack and the caller
811 // must not hold worldsema. The caller must call startTheWorld when
812 // other P's should resume execution.
814 // stopTheWorld is safe for multiple goroutines to call at the
815 // same time. Each will execute its own stop, and the stops will
818 // This is also used by routines that do stack dumps. If the system is
819 // in panic or being exited, this may not reliably stop all
821 func stopTheWorld(reason string) {
822 semacquire(&worldsema, false)
823 getg().m.preemptoff = reason
824 systemstack(stopTheWorldWithSema)
827 // startTheWorld undoes the effects of stopTheWorld.
828 func startTheWorld() {
829 systemstack(startTheWorldWithSema)
830 // worldsema must be held over startTheWorldWithSema to ensure
831 // gomaxprocs cannot change while worldsema is held.
832 semrelease(&worldsema)
833 getg().m.preemptoff = ""
836 // Holding worldsema grants an M the right to try to stop the world
837 // and prevents gomaxprocs from changing concurrently.
838 var worldsema uint32 = 1
840 // stopTheWorldWithSema is the core implementation of stopTheWorld.
841 // The caller is responsible for acquiring worldsema and disabling
842 // preemption first and then should stopTheWorldWithSema on the system
845 // semacquire(&worldsema, false)
846 // m.preemptoff = "reason"
847 // systemstack(stopTheWorldWithSema)
849 // When finished, the caller must either call startTheWorld or undo
850 // these three operations separately:
853 // systemstack(startTheWorldWithSema)
854 // semrelease(&worldsema)
856 // It is allowed to acquire worldsema once and then execute multiple
857 // startTheWorldWithSema/stopTheWorldWithSema pairs.
858 // Other P's are able to execute between successive calls to
859 // startTheWorldWithSema and stopTheWorldWithSema.
860 // Holding worldsema causes any other goroutines invoking
861 // stopTheWorld to block.
862 func stopTheWorldWithSema() {
865 // If we hold a lock, then we won't be able to stop another M
866 // that is blocked trying to acquire the lock.
868 throw("stopTheWorld: holding locks")
872 sched.stopwait = gomaxprocs
873 atomic.Store(&sched.gcwaiting, 1)
876 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
878 // try to retake all P's in Psyscall status
879 for i := 0; i < int(gomaxprocs); i++ {
882 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
900 wait := sched.stopwait > 0
903 // wait for remaining P's to stop voluntarily
906 // wait for 100us, then try to re-preempt in case of any races
907 if notetsleep(&sched.stopnote, 100*1000) {
908 noteclear(&sched.stopnote)
914 if sched.stopwait != 0 {
915 throw("stopTheWorld: not stopped")
917 for i := 0; i < int(gomaxprocs); i++ {
919 if p.status != _Pgcstop {
920 throw("stopTheWorld: not stopped")
930 func startTheWorldWithSema() {
933 _g_.m.locks++ // disable preemption because it can be holding p in a local var
934 gp := netpoll(false) // non-blocking
936 add := needaddgcproc()
944 p1 := procresize(procs)
946 if sched.sysmonwait != 0 {
948 notewakeup(&sched.sysmonnote)
959 throw("startTheWorld: inconsistent mp->nextp")
964 // Start M to run P. Do not start another M below.
970 // Wakeup an additional proc in case we have excessive runnable goroutines
971 // in local queues or in the global queue. If we don't, the proc will park itself.
972 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
973 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
978 // If GC could have used another helper proc, start one now,
979 // in the hope that it will be available next time.
980 // It would have been even better to start it before the collection,
981 // but doing so requires allocating memory, so it's tricky to
982 // coordinate. This lazy approach works out in practice:
983 // we don't mind if the first couple gc rounds don't have quite
984 // the maximum number of procs.
988 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
989 _g_.stackguard0 = stackPreempt
993 // Called to start an M.
998 if _g_.stack.lo == 0 {
999 // Initialize stack bounds from system stack.
1000 // Cgo may have left stack size in stack.hi.
1001 size := _g_.stack.hi
1003 size = 8192 * sys.StackGuardMultiplier
1005 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1006 _g_.stack.lo = _g_.stack.hi - size + 1024
1008 // Initialize stack guards so that we can start calling
1009 // both Go and C functions with stack growth prologues.
1010 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1011 _g_.stackguard1 = _g_.stackguard0
1018 if _g_ != _g_.m.g0 {
1019 throw("bad runtime·mstart")
1022 // Record top of stack for use by mcall.
1023 // Once we call schedule we're never coming back,
1024 // so other calls can reuse this stack space.
1025 gosave(&_g_.m.g0.sched)
1026 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
1030 // Install signal handlers; after minit so that minit can
1031 // prepare the thread to be able to handle the signals.
1033 // Create an extra M for callbacks on threads not created by Go.
1034 if iscgo && !cgoHasExtraM {
1041 if fn := _g_.m.mstartfn; fn != nil {
1045 if _g_.m.helpgc != 0 {
1048 } else if _g_.m != &m0 {
1049 acquirep(_g_.m.nextp.ptr())
1055 // forEachP calls fn(p) for every P p when p reaches a GC safe point.
1056 // If a P is currently executing code, this will bring the P to a GC
1057 // safe point and execute fn on that P. If the P is not executing code
1058 // (it is idle or in a syscall), this will call fn(p) directly while
1059 // preventing the P from exiting its state. This does not ensure that
1060 // fn will run on every CPU executing Go code, but it acts as a global
1061 // memory barrier. GC uses this as a "ragged barrier."
1063 // The caller must hold worldsema.
1066 func forEachP(fn func(*p)) {
1068 _p_ := getg().m.p.ptr()
1071 if sched.safePointWait != 0 {
1072 throw("forEachP: sched.safePointWait != 0")
1074 sched.safePointWait = gomaxprocs - 1
1075 sched.safePointFn = fn
1077 // Ask all Ps to run the safe point function.
1078 for _, p := range allp[:gomaxprocs] {
1080 atomic.Store(&p.runSafePointFn, 1)
1085 // Any P entering _Pidle or _Psyscall from now on will observe
1086 // p.runSafePointFn == 1 and will call runSafePointFn when
1087 // changing its status to _Pidle/_Psyscall.
1089 // Run safe point function for all idle Ps. sched.pidle will
1090 // not change because we hold sched.lock.
1091 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1092 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1094 sched.safePointWait--
1098 wait := sched.safePointWait > 0
1101 // Run fn for the current P.
1104 // Force Ps currently in _Psyscall into _Pidle and hand them
1105 // off to induce safe point function execution.
1106 for i := 0; i < int(gomaxprocs); i++ {
1109 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1119 // Wait for remaining Ps to run fn.
1122 // Wait for 100us, then try to re-preempt in
1123 // case of any races.
1125 // Requires system stack.
1126 if notetsleep(&sched.safePointNote, 100*1000) {
1127 noteclear(&sched.safePointNote)
1133 if sched.safePointWait != 0 {
1134 throw("forEachP: not done")
1136 for i := 0; i < int(gomaxprocs); i++ {
1138 if p.runSafePointFn != 0 {
1139 throw("forEachP: P did not run fn")
1144 sched.safePointFn = nil
1149 // runSafePointFn runs the safe point function, if any, for this P.
1150 // This should be called like
1152 // if getg().m.p.runSafePointFn != 0 {
1156 // runSafePointFn must be checked on any transition in to _Pidle or
1157 // _Psyscall to avoid a race where forEachP sees that the P is running
1158 // just before the P goes into _Pidle/_Psyscall and neither forEachP
1159 // nor the P run the safe-point function.
1160 func runSafePointFn() {
1161 p := getg().m.p.ptr()
1162 // Resolve the race between forEachP running the safe-point
1163 // function on this P's behalf and this P running the
1164 // safe-point function directly.
1165 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1168 sched.safePointFn(p)
1170 sched.safePointWait--
1171 if sched.safePointWait == 0 {
1172 notewakeup(&sched.safePointNote)
1177 // When running with cgo, we call _cgo_thread_start
1178 // to start threads for us so that we can play nicely with
1180 var cgoThreadStart unsafe.Pointer
1182 type cgothreadstart struct {
1188 // Allocate a new m unassociated with any thread.
1189 // Can use p for allocation context if needed.
1190 // fn is recorded as the new m's m.mstartfn.
1191 func allocm(_p_ *p, fn func()) *m {
1193 _g_.m.locks++ // disable GC because it can be called from sysmon
1195 acquirep(_p_) // temporarily borrow p for mallocs in this function
1201 // In case of cgo or Solaris, pthread_create will make us a stack.
1202 // Windows and Plan 9 will layout sched stack on OS stack.
1203 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
1206 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1210 if _p_ == _g_.m.p.ptr() {
1214 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
1215 _g_.stackguard0 = stackPreempt
1221 // needm is called when a cgo callback happens on a
1222 // thread without an m (a thread not created by Go).
1223 // In this case, needm is expected to find an m to use
1224 // and return with m, g initialized correctly.
1225 // Since m and g are not set now (likely nil, but see below)
1226 // needm is limited in what routines it can call. In particular
1227 // it can only call nosplit functions (textflag 7) and cannot
1228 // do any scheduling that requires an m.
1230 // In order to avoid needing heavy lifting here, we adopt
1231 // the following strategy: there is a stack of available m's
1232 // that can be stolen. Using compare-and-swap
1233 // to pop from the stack has ABA races, so we simulate
1234 // a lock by doing an exchange (via casp) to steal the stack
1235 // head and replace the top pointer with MLOCKED (1).
1236 // This serves as a simple spin lock that we can use even
1237 // without an m. The thread that locks the stack in this way
1238 // unlocks the stack by storing a valid stack head pointer.
1240 // In order to make sure that there is always an m structure
1241 // available to be stolen, we maintain the invariant that there
1242 // is always one more than needed. At the beginning of the
1243 // program (if cgo is in use) the list is seeded with a single m.
1244 // If needm finds that it has taken the last m off the list, its job
1245 // is - once it has installed its own m so that it can do things like
1246 // allocate memory - to create a spare m and put it on the list.
1248 // Each of these extra m's also has a g0 and a curg that are
1249 // pressed into service as the scheduling stack and current
1250 // goroutine for the duration of the cgo callback.
1252 // When the callback is done with the m, it calls dropm to
1253 // put the m back on the list.
1255 func needm(x byte) {
1256 if iscgo && !cgoHasExtraM {
1257 // Can happen if C/C++ code calls Go from a global ctor.
1258 // Can not throw, because scheduler is not initialized yet.
1259 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1263 // Lock extra list, take head, unlock popped list.
1264 // nilokay=false is safe here because of the invariant above,
1265 // that the extra list always contains or will soon contain
1267 mp := lockextra(false)
1269 // Set needextram when we've just emptied the list,
1270 // so that the eventual call into cgocallbackg will
1271 // allocate a new m for the extra list. We delay the
1272 // allocation until then so that it can be done
1273 // after exitsyscall makes sure it is okay to be
1274 // running at all (that is, there's no garbage collection
1275 // running right now).
1276 mp.needextram = mp.schedlink == 0
1277 unlockextra(mp.schedlink.ptr())
1279 // Install g (= m->g0) and set the stack bounds
1280 // to match the current stack. We don't actually know
1281 // how big the stack is, like we don't know how big any
1282 // scheduling stack is, but we assume there's at least 32 kB,
1283 // which is more than enough for us.
1286 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
1287 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
1288 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1291 // Initialize this thread to use the m.
1296 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1298 // newextram allocates an m and puts it on the extra list.
1299 // It is called with a working local m, so that it can do things
1300 // like call schedlock and allocate.
1302 // Create extra goroutine locked to extra m.
1303 // The goroutine is the context in which the cgo callback will run.
1304 // The sched.pc will never be returned to, but setting it to
1305 // goexit makes clear to the traceback routines where
1306 // the goroutine stack ends.
1307 mp := allocm(nil, nil)
1309 gp.sched.pc = funcPC(goexit) + sys.PCQuantum
1310 gp.sched.sp = gp.stack.hi
1311 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
1313 gp.sched.g = guintptr(unsafe.Pointer(gp))
1314 gp.syscallpc = gp.sched.pc
1315 gp.syscallsp = gp.sched.sp
1316 gp.stktopsp = gp.sched.sp
1317 // malg returns status as Gidle, change to Gsyscall before adding to allg
1318 // where GC will see it.
1319 casgstatus(gp, _Gidle, _Gsyscall)
1322 mp.locked = _LockInternal
1325 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1327 gp.racectx = racegostart(funcPC(newextram))
1329 // put on allg for garbage collector
1332 // Add m to the extra list.
1333 mnext := lockextra(true)
1334 mp.schedlink.set(mnext)
1338 // dropm is called when a cgo callback has called needm but is now
1339 // done with the callback and returning back into the non-Go thread.
1340 // It puts the current m back onto the extra list.
1342 // The main expense here is the call to signalstack to release the
1343 // m's signal stack, and then the call to needm on the next callback
1344 // from this thread. It is tempting to try to save the m for next time,
1345 // which would eliminate both these costs, but there might not be
1346 // a next time: the current thread (which Go does not control) might exit.
1347 // If we saved the m for that thread, there would be an m leak each time
1348 // such a thread exited. Instead, we acquire and release an m on each
1349 // call. These should typically not be scheduling operations, just a few
1350 // atomics, so the cost should be small.
1352 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1353 // variable using pthread_key_create. Unlike the pthread keys we already use
1354 // on OS X, this dummy key would never be read by Go code. It would exist
1355 // only so that we could register at thread-exit-time destructor.
1356 // That destructor would put the m back onto the extra list.
1357 // This is purely a performance optimization. The current version,
1358 // in which dropm happens on each cgo call, is still correct too.
1359 // We may have to keep the current version on systems with cgo
1360 // but without pthreads, like Windows.
1362 // Undo whatever initialization minit did during needm.
1365 // Clear m and g, and return m to the extra list.
1366 // After the call to setg we can only call nosplit functions
1367 // with no pointer manipulation.
1369 mnext := lockextra(true)
1370 mp.schedlink.set(mnext)
1378 // lockextra locks the extra list and returns the list head.
1379 // The caller must unlock the list by storing a new list head
1380 // to extram. If nilokay is true, then lockextra will
1381 // return a nil list head if that's what it finds. If nilokay is false,
1382 // lockextra will keep waiting until the list head is no longer nil.
1384 func lockextra(nilokay bool) *m {
1388 old := atomic.Loaduintptr(&extram)
1394 if old == 0 && !nilokay {
1398 if atomic.Casuintptr(&extram, old, locked) {
1399 return (*m)(unsafe.Pointer(old))
1408 func unlockextra(mp *m) {
1409 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1412 // Create a new m. It will start off with a call to fn, or else the scheduler.
1413 // fn needs to be static and not a heap allocated closure.
1414 // May run with m.p==nil, so write barriers are not allowed.
1416 func newm(fn func(), _p_ *p) {
1417 mp := allocm(_p_, fn)
1421 var ts cgothreadstart
1422 if _cgo_thread_start == nil {
1423 throw("_cgo_thread_start missing")
1426 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
1427 ts.fn = unsafe.Pointer(funcPC(mstart))
1428 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
1431 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
1434 // Stops execution of the current m until new work is available.
1435 // Returns with acquired P.
1439 if _g_.m.locks != 0 {
1440 throw("stopm holding locks")
1443 throw("stopm holding p")
1446 _g_.m.spinning = false
1447 atomic.Xadd(&sched.nmspinning, -1)
1454 notesleep(&_g_.m.park)
1455 noteclear(&_g_.m.park)
1456 if _g_.m.helpgc != 0 {
1463 acquirep(_g_.m.nextp.ptr())
1469 if !runqempty(gp.m.nextp.ptr()) {
1470 // Something (presumably the GC) was readied while the
1471 // runtime was starting up this M, so the M is no
1473 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1474 throw("mspinning: nmspinning underflowed")
1477 gp.m.spinning = true
1481 // Schedules some M to run the p (creates an M if necessary).
1482 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1483 // May run with m.p==nil, so write barriers are not allowed.
1485 func startm(_p_ *p, spinning bool) {
1492 atomic.Xadd(&sched.nmspinning, -1)
1508 throw("startm: m is spinning")
1511 throw("startm: m has p")
1513 if spinning && !runqempty(_p_) {
1514 throw("startm: p has runnable gs")
1516 mp.spinning = spinning
1518 notewakeup(&mp.park)
1521 // Hands off P from syscall or locked M.
1522 // Always runs without a P, so write barriers are not allowed.
1524 func handoffp(_p_ *p) {
1525 // if it has local work, start it straight away
1526 if !runqempty(_p_) || sched.runqsize != 0 {
1530 // no local work, check that there are no spinning/idle M's,
1531 // otherwise our help is not required
1532 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
1537 if sched.gcwaiting != 0 {
1538 _p_.status = _Pgcstop
1540 if sched.stopwait == 0 {
1541 notewakeup(&sched.stopnote)
1546 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
1547 sched.safePointFn(_p_)
1548 sched.safePointWait--
1549 if sched.safePointWait == 0 {
1550 notewakeup(&sched.safePointNote)
1553 if sched.runqsize != 0 {
1558 // If this is the last running P and nobody is polling network,
1559 // need to wakeup another M to poll network.
1560 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
1569 // Tries to add one more P to execute G's.
1570 // Called when a G is made runnable (newproc, ready).
1572 // be conservative about spinning threads
1573 if !atomic.Cas(&sched.nmspinning, 0, 1) {
1579 // Stops execution of the current m that is locked to a g until the g is runnable again.
1580 // Returns with acquired P.
1581 func stoplockedm() {
1584 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
1585 throw("stoplockedm: inconsistent locking")
1588 // Schedule another M to run this p.
1593 // Wait until another thread schedules lockedg again.
1594 notesleep(&_g_.m.park)
1595 noteclear(&_g_.m.park)
1596 status := readgstatus(_g_.m.lockedg)
1597 if status&^_Gscan != _Grunnable {
1598 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
1600 throw("stoplockedm: not runnable")
1602 acquirep(_g_.m.nextp.ptr())
1606 // Schedules the locked m to run the locked gp.
1607 // May run during STW, so write barriers are not allowed.
1609 func startlockedm(gp *g) {
1614 throw("startlockedm: locked to me")
1617 throw("startlockedm: m has p")
1619 // directly handoff current P to the locked m
1623 notewakeup(&mp.park)
1627 // Stops the current m for stopTheWorld.
1628 // Returns when the world is restarted.
1632 if sched.gcwaiting == 0 {
1633 throw("gcstopm: not waiting for gc")
1636 _g_.m.spinning = false
1637 atomic.Xadd(&sched.nmspinning, -1)
1641 _p_.status = _Pgcstop
1643 if sched.stopwait == 0 {
1644 notewakeup(&sched.stopnote)
1650 // Schedules gp to run on the current M.
1651 // If inheritTime is true, gp inherits the remaining time in the
1652 // current time slice. Otherwise, it starts a new time slice.
1654 func execute(gp *g, inheritTime bool) {
1657 casgstatus(gp, _Grunnable, _Grunning)
1660 gp.stackguard0 = gp.stack.lo + _StackGuard
1662 _g_.m.p.ptr().schedtick++
1667 // Check whether the profiler needs to be turned on or off.
1668 hz := sched.profilehz
1669 if _g_.m.profilehz != hz {
1670 resetcpuprofiler(hz)
1674 // GoSysExit has to happen when we have a P, but before GoStart.
1675 // So we emit it here.
1676 if gp.syscallsp != 0 && gp.sysblocktraced {
1677 // Since gp.sysblocktraced is true, we must emit an event.
1678 // There is a race between the code that initializes sysexitseq
1679 // and sysexitticks (in exitsyscall, which runs without a P,
1680 // and therefore is not stopped with the rest of the world)
1681 // and the code that initializes a new trace.
1682 // The recorded sysexitseq and sysexitticks must therefore
1683 // be treated as "best effort". If they are valid for this trace,
1684 // then great, use them for greater accuracy.
1685 // But if they're not valid for this trace, assume that the
1686 // trace was started after the actual syscall exit (but before
1687 // we actually managed to start the goroutine, aka right now),
1688 // and assign a fresh time stamp to keep the log consistent.
1689 seq, ts := gp.sysexitseq, gp.sysexitticks
1690 if seq == 0 || int64(seq)-int64(trace.seqStart) < 0 {
1691 seq, ts = tracestamp()
1693 traceGoSysExit(seq, ts)
1701 // Finds a runnable goroutine to execute.
1702 // Tries to steal from other P's, get g from global queue, poll network.
1703 func findrunnable() (gp *g, inheritTime bool) {
1707 if sched.gcwaiting != 0 {
1711 if _g_.m.p.ptr().runSafePointFn != 0 {
1714 if fingwait && fingwake {
1715 if gp := wakefing(); gp != nil {
1721 if gp, inheritTime := runqget(_g_.m.p.ptr()); gp != nil {
1722 return gp, inheritTime
1726 if sched.runqsize != 0 {
1728 gp := globrunqget(_g_.m.p.ptr(), 0)
1736 // This netpoll is only an optimization before we resort to stealing.
1737 // We can safely skip it if there a thread blocked in netpoll already.
1738 // If there is any kind of logical race with that blocked thread
1739 // (e.g. it has already returned from netpoll, but does not set lastpoll yet),
1740 // this thread will do blocking netpoll below anyway.
1741 if netpollinited() && sched.lastpoll != 0 {
1742 if gp := netpoll(false); gp != nil { // non-blocking
1743 // netpoll returns list of goroutines linked by schedlink.
1744 injectglist(gp.schedlink.ptr())
1745 casgstatus(gp, _Gwaiting, _Grunnable)
1747 traceGoUnpark(gp, 0)
1753 // If number of spinning M's >= number of busy P's, block.
1754 // This is necessary to prevent excessive CPU consumption
1755 // when GOMAXPROCS>>1 but the program parallelism is low.
1756 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= uint32(gomaxprocs)-atomic.Load(&sched.npidle) { // TODO: fast atomic
1759 if !_g_.m.spinning {
1760 _g_.m.spinning = true
1761 atomic.Xadd(&sched.nmspinning, 1)
1763 // random steal from other P's
1764 for i := 0; i < int(4*gomaxprocs); i++ {
1765 if sched.gcwaiting != 0 {
1768 _p_ := allp[fastrand1()%uint32(gomaxprocs)]
1770 if _p_ == _g_.m.p.ptr() {
1771 gp, _ = runqget(_p_)
1773 stealRunNextG := i > 2*int(gomaxprocs) // first look for ready queues with more than 1 g
1774 gp = runqsteal(_g_.m.p.ptr(), _p_, stealRunNextG)
1783 // We have nothing to do. If we're in the GC mark phase, can
1784 // safely scan and blacken objects, and have work to do, run
1785 // idle-time marking rather than give up the P.
1786 if _p_ := _g_.m.p.ptr(); gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != nil && gcMarkWorkAvailable(_p_) {
1787 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
1788 gp := _p_.gcBgMarkWorker
1789 casgstatus(gp, _Gwaiting, _Grunnable)
1791 traceGoUnpark(gp, 0)
1796 // return P and block
1798 if sched.gcwaiting != 0 || _g_.m.p.ptr().runSafePointFn != 0 {
1802 if sched.runqsize != 0 {
1803 gp := globrunqget(_g_.m.p.ptr(), 0)
1811 _g_.m.spinning = false
1812 atomic.Xadd(&sched.nmspinning, -1)
1815 // check all runqueues once again
1816 for i := 0; i < int(gomaxprocs); i++ {
1818 if _p_ != nil && !runqempty(_p_) {
1831 if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
1833 throw("findrunnable: netpoll with p")
1836 throw("findrunnable: netpoll with spinning")
1838 gp := netpoll(true) // block until new work is available
1839 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
1846 injectglist(gp.schedlink.ptr())
1847 casgstatus(gp, _Gwaiting, _Grunnable)
1849 traceGoUnpark(gp, 0)
1860 func resetspinning() {
1863 var nmspinning uint32
1865 _g_.m.spinning = false
1866 nmspinning = atomic.Xadd(&sched.nmspinning, -1)
1867 if int32(nmspinning) < 0 {
1868 throw("findrunnable: negative nmspinning")
1871 nmspinning = atomic.Load(&sched.nmspinning)
1874 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
1875 // so see if we need to wakeup another P here.
1876 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
1881 // Injects the list of runnable G's into the scheduler.
1882 // Can run concurrently with GC.
1883 func injectglist(glist *g) {
1888 for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
1889 traceGoUnpark(gp, 0)
1894 for n = 0; glist != nil; n++ {
1896 glist = gp.schedlink.ptr()
1897 casgstatus(gp, _Gwaiting, _Grunnable)
1901 for ; n != 0 && sched.npidle != 0; n-- {
1906 // One round of scheduler: find a runnable goroutine and execute it.
1911 if _g_.m.locks != 0 {
1912 throw("schedule: holding locks")
1915 if _g_.m.lockedg != nil {
1917 execute(_g_.m.lockedg, false) // Never returns.
1921 if sched.gcwaiting != 0 {
1925 if _g_.m.p.ptr().runSafePointFn != 0 {
1930 var inheritTime bool
1931 if trace.enabled || trace.shutdown {
1934 casgstatus(gp, _Gwaiting, _Grunnable)
1935 traceGoUnpark(gp, 0)
1939 if gp == nil && gcBlackenEnabled != 0 {
1940 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
1946 // Check the global runnable queue once in a while to ensure fairness.
1947 // Otherwise two goroutines can completely occupy the local runqueue
1948 // by constantly respawning each other.
1949 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
1951 gp = globrunqget(_g_.m.p.ptr(), 1)
1959 gp, inheritTime = runqget(_g_.m.p.ptr())
1960 if gp != nil && _g_.m.spinning {
1961 throw("schedule: spinning with local work")
1965 gp, inheritTime = findrunnable() // blocks until work is available
1969 if gp.lockedm != nil {
1970 // Hands off own p to the locked m,
1971 // then blocks waiting for a new p.
1976 execute(gp, inheritTime)
1979 // dropg removes the association between m and the current goroutine m->curg (gp for short).
1980 // Typically a caller sets gp's status away from Grunning and then
1981 // immediately calls dropg to finish the job. The caller is also responsible
1982 // for arranging that gp will be restarted using ready at an
1983 // appropriate time. After calling dropg and arranging for gp to be
1984 // readied later, the caller can do other work but eventually should
1985 // call schedule to restart the scheduling of goroutines on this m.
1989 if _g_.m.lockedg == nil {
1995 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
1996 unlock((*mutex)(lock))
2000 // park continuation on g0.
2001 func park_m(gp *g) {
2005 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp)
2008 casgstatus(gp, _Grunning, _Gwaiting)
2011 if _g_.m.waitunlockf != nil {
2012 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
2013 ok := fn(gp, _g_.m.waitlock)
2014 _g_.m.waitunlockf = nil
2015 _g_.m.waitlock = nil
2018 traceGoUnpark(gp, 2)
2020 casgstatus(gp, _Gwaiting, _Grunnable)
2021 execute(gp, true) // Schedule it back, never returns.
2027 func goschedImpl(gp *g) {
2028 status := readgstatus(gp)
2029 if status&^_Gscan != _Grunning {
2031 throw("bad g status")
2033 casgstatus(gp, _Grunning, _Grunnable)
2042 // Gosched continuation on g0.
2043 func gosched_m(gp *g) {
2050 func gopreempt_m(gp *g) {
2057 // Finishes execution of the current goroutine.
2068 // goexit continuation on g0.
2069 func goexit0(gp *g) {
2072 casgstatus(gp, _Grunning, _Gdead)
2076 gp.paniconfault = false
2077 gp._defer = nil // should be true already but just in case.
2078 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
2085 if _g_.m.locked&^_LockExternal != 0 {
2086 print("invalid m->locked = ", _g_.m.locked, "\n")
2087 throw("internal lockOSThread error")
2090 gfput(_g_.m.p.ptr(), gp)
2096 func save(pc, sp uintptr) {
2103 _g_.sched.ctxt = nil
2104 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
2107 // The goroutine g is about to enter a system call.
2108 // Record that it's not using the cpu anymore.
2109 // This is called only from the go syscall library and cgocall,
2110 // not from the low-level system calls used by the runtime.
2112 // Entersyscall cannot split the stack: the gosave must
2113 // make g->sched refer to the caller's stack segment, because
2114 // entersyscall is going to return immediately after.
2116 // Nothing entersyscall calls can split the stack either.
2117 // We cannot safely move the stack during an active call to syscall,
2118 // because we do not know which of the uintptr arguments are
2119 // really pointers (back into the stack).
2120 // In practice, this means that we make the fast path run through
2121 // entersyscall doing no-split things, and the slow path has to use systemstack
2122 // to run bigger things on the system stack.
2124 // reentersyscall is the entry point used by cgo callbacks, where explicitly
2125 // saved SP and PC are restored. This is needed when exitsyscall will be called
2126 // from a function further up in the call stack than the parent, as g->syscallsp
2127 // must always point to a valid stack frame. entersyscall below is the normal
2128 // entry point for syscalls, which obtains the SP and PC from the caller.
2131 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
2132 // If the syscall does not block, that is it, we do not emit any other events.
2133 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
2134 // when syscall returns we emit traceGoSysExit and when the goroutine starts running
2135 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
2136 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
2137 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
2138 // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
2139 // and we wait for the increment before emitting traceGoSysExit.
2140 // Note that the increment is done even if tracing is not enabled,
2141 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
2144 func reentersyscall(pc, sp uintptr) {
2147 // Disable preemption because during this function g is in Gsyscall status,
2148 // but can have inconsistent g->sched, do not let GC observe it.
2151 // Entersyscall must not call any function that might split/grow the stack.
2152 // (See details in comment above.)
2153 // Catch calls that might, by replacing the stack guard with something that
2154 // will trip any stack check and leaving a flag to tell newstack to die.
2155 _g_.stackguard0 = stackPreempt
2156 _g_.throwsplit = true
2158 // Leave SP around for GC and traceback.
2162 casgstatus(_g_, _Grunning, _Gsyscall)
2163 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2164 systemstack(func() {
2165 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2166 throw("entersyscall")
2171 systemstack(traceGoSysCall)
2172 // systemstack itself clobbers g.sched.{pc,sp} and we might
2173 // need them later when the G is genuinely blocked in a
2178 if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic
2179 systemstack(entersyscall_sysmon)
2183 if _g_.m.p.ptr().runSafePointFn != 0 {
2184 // runSafePointFn may stack split if run on this stack
2185 systemstack(runSafePointFn)
2189 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2190 _g_.sysblocktraced = true
2193 atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
2194 if sched.gcwaiting != 0 {
2195 systemstack(entersyscall_gcwait)
2199 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
2200 // We set _StackGuard to StackPreempt so that first split stack check calls morestack.
2201 // Morestack detects this case and throws.
2202 _g_.stackguard0 = stackPreempt
2206 // Standard syscall entry used by the go syscall library and normal cgo calls.
2208 func entersyscall(dummy int32) {
2209 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
2212 func entersyscall_sysmon() {
2214 if atomic.Load(&sched.sysmonwait) != 0 {
2215 atomic.Store(&sched.sysmonwait, 0)
2216 notewakeup(&sched.sysmonnote)
2221 func entersyscall_gcwait() {
2223 _p_ := _g_.m.p.ptr()
2226 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2228 traceGoSysBlock(_p_)
2232 if sched.stopwait--; sched.stopwait == 0 {
2233 notewakeup(&sched.stopnote)
2239 // The same as entersyscall(), but with a hint that the syscall is blocking.
2241 func entersyscallblock(dummy int32) {
2244 _g_.m.locks++ // see comment in entersyscall
2245 _g_.throwsplit = true
2246 _g_.stackguard0 = stackPreempt // see comment in entersyscall
2247 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2248 _g_.sysblocktraced = true
2249 _g_.m.p.ptr().syscalltick++
2251 // Leave SP around for GC and traceback.
2252 pc := getcallerpc(unsafe.Pointer(&dummy))
2253 sp := getcallersp(unsafe.Pointer(&dummy))
2255 _g_.syscallsp = _g_.sched.sp
2256 _g_.syscallpc = _g_.sched.pc
2257 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2260 sp3 := _g_.syscallsp
2261 systemstack(func() {
2262 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2263 throw("entersyscallblock")
2266 casgstatus(_g_, _Grunning, _Gsyscall)
2267 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2268 systemstack(func() {
2269 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2270 throw("entersyscallblock")
2274 systemstack(entersyscallblock_handoff)
2276 // Resave for traceback during blocked call.
2277 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
2282 func entersyscallblock_handoff() {
2285 traceGoSysBlock(getg().m.p.ptr())
2287 handoffp(releasep())
2290 // The goroutine g exited its system call.
2291 // Arrange for it to run on a cpu again.
2292 // This is called only from the go syscall library, not
2293 // from the low-level system calls used by the
2295 func exitsyscall(dummy int32) {
2298 _g_.m.locks++ // see comment in entersyscall
2299 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
2300 throw("exitsyscall: syscall frame is no longer valid")
2304 oldp := _g_.m.p.ptr()
2305 if exitsyscallfast() {
2306 if _g_.m.mcache == nil {
2307 throw("lost mcache")
2310 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2311 systemstack(traceGoStart)
2314 // There's a cpu for us, so we can run.
2315 _g_.m.p.ptr().syscalltick++
2316 // We need to cas the status and scan before resuming...
2317 casgstatus(_g_, _Gsyscall, _Grunning)
2319 // Garbage collector isn't running (since we are),
2320 // so okay to clear syscallsp.
2324 // restore the preemption request in case we've cleared it in newstack
2325 _g_.stackguard0 = stackPreempt
2327 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
2328 _g_.stackguard0 = _g_.stack.lo + _StackGuard
2330 _g_.throwsplit = false
2334 _g_.sysexitticks = 0
2337 // Wait till traceGoSysBlock event is emitted.
2338 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2339 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
2342 // We can't trace syscall exit right now because we don't have a P.
2343 // Tracing code can invoke write barriers that cannot run without a P.
2344 // So instead we remember the syscall exit time and emit the event
2345 // in execute when we have a P.
2346 _g_.sysexitseq, _g_.sysexitticks = tracestamp()
2351 // Call the scheduler.
2354 if _g_.m.mcache == nil {
2355 throw("lost mcache")
2358 // Scheduler returned, so we're allowed to run now.
2359 // Delete the syscallsp information that we left for
2360 // the garbage collector during the system call.
2361 // Must wait until now because until gosched returns
2362 // we don't know for sure that the garbage collector
2365 _g_.m.p.ptr().syscalltick++
2366 _g_.throwsplit = false
2370 func exitsyscallfast() bool {
2373 // Freezetheworld sets stopwait but does not retake P's.
2374 if sched.stopwait == freezeStopWait {
2380 // Try to re-acquire the last P.
2381 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
2382 // There's a cpu for us, so we can run.
2383 _g_.m.mcache = _g_.m.p.ptr().mcache
2384 _g_.m.p.ptr().m.set(_g_.m)
2385 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2387 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
2388 // traceGoSysBlock for this syscall was already emitted,
2389 // but here we effectively retake the p from the new syscall running on the same p.
2390 systemstack(func() {
2391 // Denote blocking of the new syscall.
2392 traceGoSysBlock(_g_.m.p.ptr())
2393 // Denote completion of the current syscall.
2394 traceGoSysExit(tracestamp())
2397 _g_.m.p.ptr().syscalltick++
2402 // Try to get any other idle P.
2403 oldp := _g_.m.p.ptr()
2406 if sched.pidle != 0 {
2408 systemstack(func() {
2409 ok = exitsyscallfast_pidle()
2410 if ok && trace.enabled {
2412 // Wait till traceGoSysBlock event is emitted.
2413 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2414 for oldp.syscalltick == _g_.m.syscalltick {
2418 traceGoSysExit(tracestamp())
2428 func exitsyscallfast_pidle() bool {
2431 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
2432 atomic.Store(&sched.sysmonwait, 0)
2433 notewakeup(&sched.sysmonnote)
2443 // exitsyscall slow path on g0.
2444 // Failed to acquire P, enqueue gp as runnable.
2445 func exitsyscall0(gp *g) {
2448 casgstatus(gp, _Gsyscall, _Grunnable)
2454 } else if atomic.Load(&sched.sysmonwait) != 0 {
2455 atomic.Store(&sched.sysmonwait, 0)
2456 notewakeup(&sched.sysmonnote)
2461 execute(gp, false) // Never returns.
2463 if _g_.m.lockedg != nil {
2464 // Wait until another thread schedules gp and so m again.
2466 execute(gp, false) // Never returns.
2469 schedule() // Never returns.
2475 // Fork can hang if preempted with signals frequently enough (see issue 5517).
2476 // Ensure that we stay on the same M where we disable profiling.
2478 if gp.m.profilehz != 0 {
2482 // This function is called before fork in syscall package.
2483 // Code between fork and exec must not allocate memory nor even try to grow stack.
2484 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
2485 // runtime_AfterFork will undo this in parent process, but not in child.
2486 gp.stackguard0 = stackFork
2489 // Called from syscall package before fork.
2490 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
2492 func syscall_runtime_BeforeFork() {
2493 systemstack(beforefork)
2499 // See the comment in beforefork.
2500 gp.stackguard0 = gp.stack.lo + _StackGuard
2502 hz := sched.profilehz
2504 resetcpuprofiler(hz)
2509 // Called from syscall package after fork in parent.
2510 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
2512 func syscall_runtime_AfterFork() {
2513 systemstack(afterfork)
2516 // Allocate a new g, with a stack big enough for stacksize bytes.
2517 func malg(stacksize int32) *g {
2520 stacksize = round2(_StackSystem + stacksize)
2521 systemstack(func() {
2522 newg.stack, newg.stkbar = stackalloc(uint32(stacksize))
2524 newg.stackguard0 = newg.stack.lo + _StackGuard
2525 newg.stackguard1 = ^uintptr(0)
2526 newg.stackAlloc = uintptr(stacksize)
2531 // Create a new g running fn with siz bytes of arguments.
2532 // Put it on the queue of g's waiting to run.
2533 // The compiler turns a go statement into a call to this.
2534 // Cannot split the stack because it assumes that the arguments
2535 // are available sequentially after &fn; they would not be
2536 // copied if a stack split occurred.
2538 func newproc(siz int32, fn *funcval) {
2539 argp := add(unsafe.Pointer(&fn), sys.PtrSize)
2540 pc := getcallerpc(unsafe.Pointer(&siz))
2541 systemstack(func() {
2542 newproc1(fn, (*uint8)(argp), siz, 0, pc)
2546 // Create a new g running fn with narg bytes of arguments starting
2547 // at argp and returning nret bytes of results. callerpc is the
2548 // address of the go statement that created this. The new g is put
2549 // on the queue of g's waiting to run.
2550 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
2554 _g_.m.throwing = -1 // do not dump full stacks
2555 throw("go of nil func value")
2557 _g_.m.locks++ // disable preemption because it can be holding p in a local var
2559 siz = (siz + 7) &^ 7
2561 // We could allocate a larger initial stack if necessary.
2562 // Not worth it: this is almost always an error.
2563 // 4*sizeof(uintreg): extra space added below
2564 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
2565 if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
2566 throw("newproc: function arguments too large for new goroutine")
2569 _p_ := _g_.m.p.ptr()
2572 newg = malg(_StackMin)
2573 casgstatus(newg, _Gidle, _Gdead)
2574 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
2576 if newg.stack.hi == 0 {
2577 throw("newproc1: newg missing stack")
2580 if readgstatus(newg) != _Gdead {
2581 throw("newproc1: new g is not Gdead")
2584 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
2585 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign
2586 sp := newg.stack.hi - totalSize
2590 *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil
2591 spArg += sys.MinFrameSize
2593 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
2595 memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
2598 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
2599 newg.sched.g = guintptr(unsafe.Pointer(newg))
2600 gostartcallfn(&newg.sched, fn)
2601 newg.gopc = callerpc
2602 newg.startpc = fn.fn
2603 casgstatus(newg, _Gdead, _Grunnable)
2605 if _p_.goidcache == _p_.goidcacheend {
2606 // Sched.goidgen is the last allocated id,
2607 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
2608 // At startup sched.goidgen=0, so main goroutine receives goid=1.
2609 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
2610 _p_.goidcache -= _GoidCacheBatch - 1
2611 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
2613 newg.goid = int64(_p_.goidcache)
2616 newg.racectx = racegostart(callerpc)
2619 traceGoCreate(newg, newg.startpc)
2621 runqput(_p_, newg, true)
2623 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
2627 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
2628 _g_.stackguard0 = stackPreempt
2633 // Put on gfree list.
2634 // If local list is too long, transfer a batch to the global list.
2635 func gfput(_p_ *p, gp *g) {
2636 if readgstatus(gp) != _Gdead {
2637 throw("gfput: bad status (not Gdead)")
2640 stksize := gp.stackAlloc
2642 if stksize != _FixedStack {
2643 // non-standard stack size - free it.
2644 stackfree(gp.stack, gp.stackAlloc)
2651 // Reset stack barriers.
2652 gp.stkbar = gp.stkbar[:0]
2656 gp.schedlink.set(_p_.gfree)
2659 if _p_.gfreecnt >= 64 {
2661 for _p_.gfreecnt >= 32 {
2664 _p_.gfree = gp.schedlink.ptr()
2665 gp.schedlink.set(sched.gfree)
2669 unlock(&sched.gflock)
2673 // Get from gfree list.
2674 // If local list is empty, grab a batch from global list.
2675 func gfget(_p_ *p) *g {
2678 if gp == nil && sched.gfree != nil {
2680 for _p_.gfreecnt < 32 && sched.gfree != nil {
2683 sched.gfree = gp.schedlink.ptr()
2685 gp.schedlink.set(_p_.gfree)
2688 unlock(&sched.gflock)
2692 _p_.gfree = gp.schedlink.ptr()
2694 if gp.stack.lo == 0 {
2695 // Stack was deallocated in gfput. Allocate a new one.
2696 systemstack(func() {
2697 gp.stack, gp.stkbar = stackalloc(_FixedStack)
2699 gp.stackguard0 = gp.stack.lo + _StackGuard
2700 gp.stackAlloc = _FixedStack
2703 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
2706 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
2713 // Purge all cached G's from gfree list to the global list.
2714 func gfpurge(_p_ *p) {
2716 for _p_.gfreecnt != 0 {
2719 _p_.gfree = gp.schedlink.ptr()
2720 gp.schedlink.set(sched.gfree)
2724 unlock(&sched.gflock)
2727 // Breakpoint executes a breakpoint trap.
2732 // dolockOSThread is called by LockOSThread and lockOSThread below
2733 // after they modify m.locked. Do not allow preemption during this call,
2734 // or else the m might be different in this function than in the caller.
2736 func dolockOSThread() {
2744 // LockOSThread wires the calling goroutine to its current operating system thread.
2745 // Until the calling goroutine exits or calls UnlockOSThread, it will always
2746 // execute in that thread, and no other goroutine can.
2747 func LockOSThread() {
2748 getg().m.locked |= _LockExternal
2753 func lockOSThread() {
2754 getg().m.locked += _LockInternal
2758 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
2759 // after they update m->locked. Do not allow preemption during this call,
2760 // or else the m might be in different in this function than in the caller.
2762 func dounlockOSThread() {
2764 if _g_.m.locked != 0 {
2773 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
2774 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
2775 func UnlockOSThread() {
2776 getg().m.locked &^= _LockExternal
2781 func unlockOSThread() {
2783 if _g_.m.locked < _LockInternal {
2784 systemstack(badunlockosthread)
2786 _g_.m.locked -= _LockInternal
2790 func badunlockosthread() {
2791 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
2794 func gcount() int32 {
2795 n := int32(allglen) - sched.ngfree
2804 // All these variables can be changed concurrently, so the result can be inconsistent.
2805 // But at least the current goroutine is running.
2812 func mcount() int32 {
2821 func _System() { _System() }
2822 func _ExternalCode() { _ExternalCode() }
2823 func _GC() { _GC() }
2825 // Called if we receive a SIGPROF signal.
2826 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
2831 // Profiling runs concurrently with GC, so it must not allocate.
2834 // Coordinate with stack barrier insertion in scanstack.
2835 for !atomic.Cas(&gp.stackLock, 0, 1) {
2839 // Define that a "user g" is a user-created goroutine, and a "system g"
2840 // is one that is m->g0 or m->gsignal.
2842 // We might be interrupted for profiling halfway through a
2843 // goroutine switch. The switch involves updating three (or four) values:
2844 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
2845 // because once it gets updated the new g is running.
2847 // When switching from a user g to a system g, LR is not considered live,
2848 // so the update only affects g, SP, and PC. Since PC must be last, there
2849 // the possible partial transitions in ordinary execution are (1) g alone is updated,
2850 // (2) both g and SP are updated, and (3) SP alone is updated.
2851 // If SP or g alone is updated, we can detect the partial transition by checking
2852 // whether the SP is within g's stack bounds. (We could also require that SP
2853 // be changed only after g, but the stack bounds check is needed by other
2854 // cases, so there is no need to impose an additional requirement.)
2856 // There is one exceptional transition to a system g, not in ordinary execution.
2857 // When a signal arrives, the operating system starts the signal handler running
2858 // with an updated PC and SP. The g is updated last, at the beginning of the
2859 // handler. There are two reasons this is okay. First, until g is updated the
2860 // g and SP do not match, so the stack bounds check detects the partial transition.
2861 // Second, signal handlers currently run with signals disabled, so a profiling
2862 // signal cannot arrive during the handler.
2864 // When switching from a system g to a user g, there are three possibilities.
2866 // First, it may be that the g switch has no PC update, because the SP
2867 // either corresponds to a user g throughout (as in asmcgocall)
2868 // or because it has been arranged to look like a user g frame
2869 // (as in cgocallback_gofunc). In this case, since the entire
2870 // transition is a g+SP update, a partial transition updating just one of
2871 // those will be detected by the stack bounds check.
2873 // Second, when returning from a signal handler, the PC and SP updates
2874 // are performed by the operating system in an atomic update, so the g
2875 // update must be done before them. The stack bounds check detects
2876 // the partial transition here, and (again) signal handlers run with signals
2877 // disabled, so a profiling signal cannot arrive then anyway.
2879 // Third, the common case: it may be that the switch updates g, SP, and PC
2880 // separately. If the PC is within any of the functions that does this,
2881 // we don't ask for a traceback. C.F. the function setsSP for more about this.
2883 // There is another apparently viable approach, recorded here in case
2884 // the "PC within setsSP function" check turns out not to be usable.
2885 // It would be possible to delay the update of either g or SP until immediately
2886 // before the PC update instruction. Then, because of the stack bounds check,
2887 // the only problematic interrupt point is just before that PC update instruction,
2888 // and the sigprof handler can detect that instruction and simulate stepping past
2889 // it in order to reach a consistent state. On ARM, the update of g must be made
2890 // in two places (in R10 and also in a TLS slot), so the delayed update would
2891 // need to be the SP update. The sigprof handler must read the instruction at
2892 // the current PC and if it was the known instruction (for example, JMP BX or
2893 // MOV R2, PC), use that other register in place of the PC value.
2894 // The biggest drawback to this solution is that it requires that we can tell
2895 // whether it's safe to read from the memory pointed at by PC.
2896 // In a correct program, we can test PC == nil and otherwise read,
2897 // but if a profiling signal happens at the instant that a program executes
2898 // a bad jump (before the program manages to handle the resulting fault)
2899 // the profiling handler could fault trying to read nonexistent memory.
2901 // To recap, there are no constraints on the assembly being used for the
2902 // transition. We simply require that g and SP match and that the PC is not
2905 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
2908 var stk [maxCPUProfStack]uintptr
2910 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
2911 // Cgo, we can't unwind and symbolize arbitrary C code,
2912 // so instead collect Go stack that leads to the cgo call.
2913 // This is especially important on windows, since all syscalls are cgo calls.
2914 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0)
2915 } else if traceback {
2916 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
2918 if !traceback || n <= 0 {
2919 // Normal traceback is impossible or has failed.
2920 // See if it falls into several common cases.
2922 if GOOS == "windows" && n == 0 && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
2923 // Libcall, i.e. runtime syscall on windows.
2924 // Collect Go stack that leads to the call.
2925 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
2928 // If all of the above has failed, account it against abstract "System" or "GC".
2930 // "ExternalCode" is better than "etext".
2931 if pc > firstmoduledata.etext {
2932 pc = funcPC(_ExternalCode) + sys.PCQuantum
2935 if mp.preemptoff != "" || mp.helpgc != 0 {
2936 stk[1] = funcPC(_GC) + sys.PCQuantum
2938 stk[1] = funcPC(_System) + sys.PCQuantum
2942 atomic.Store(&gp.stackLock, 0)
2945 // Simple cas-lock to coordinate with setcpuprofilerate.
2946 for !atomic.Cas(&prof.lock, 0, 1) {
2950 cpuprof.add(stk[:n])
2952 atomic.Store(&prof.lock, 0)
2957 // Reports whether a function will set the SP
2958 // to an absolute value. Important that
2959 // we don't traceback when these are at the bottom
2960 // of the stack since we can't be sure that we will
2963 // If the function is not on the bottom of the stack
2964 // we assume that it will have set it up so that traceback will be consistent,
2965 // either by being a traceback terminating function
2966 // or putting one on the stack at the right offset.
2967 func setsSP(pc uintptr) bool {
2970 // couldn't find the function for this PC,
2971 // so assume the worst and stop traceback
2975 case gogoPC, systemstackPC, mcallPC, morestackPC:
2981 // Arrange to call fn with a traceback hz times a second.
2982 func setcpuprofilerate_m(hz int32) {
2983 // Force sane arguments.
2988 // Disable preemption, otherwise we can be rescheduled to another thread
2989 // that has profiling enabled.
2993 // Stop profiler on this thread so that it is safe to lock prof.
2994 // if a profiling signal came in while we had prof locked,
2995 // it would deadlock.
2998 for !atomic.Cas(&prof.lock, 0, 1) {
3002 atomic.Store(&prof.lock, 0)
3005 sched.profilehz = hz
3009 resetcpuprofiler(hz)
3015 // Change number of processors. The world is stopped, sched is locked.
3016 // gcworkbufs are not being modified by either the GC or
3017 // the write barrier code.
3018 // Returns list of Ps with local work, they need to be scheduled by the caller.
3019 func procresize(nprocs int32) *p {
3021 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs {
3022 throw("procresize: invalid arg")
3025 traceGomaxprocs(nprocs)
3028 // update statistics
3030 if sched.procresizetime != 0 {
3031 sched.totaltime += int64(old) * (now - sched.procresizetime)
3033 sched.procresizetime = now
3035 // initialize new P's
3036 for i := int32(0); i < nprocs; i++ {
3041 pp.status = _Pgcstop
3042 pp.sudogcache = pp.sudogbuf[:0]
3043 for i := range pp.deferpool {
3044 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
3046 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3048 if pp.mcache == nil {
3049 if old == 0 && i == 0 {
3050 if getg().m.mcache == nil {
3051 throw("missing mcache?")
3053 pp.mcache = getg().m.mcache // bootstrap
3055 pp.mcache = allocmcache()
3061 for i := nprocs; i < old; i++ {
3064 if p == getg().m.p.ptr() {
3065 // moving to p[0], pretend that we were descheduled
3066 // and then scheduled again to keep the trace sane.
3071 // move all runnable goroutines to the global queue
3072 for p.runqhead != p.runqtail {
3073 // pop from tail of local queue
3075 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
3076 // push onto head of global queue
3080 globrunqputhead(p.runnext.ptr())
3083 // if there's a background worker, make it runnable and put
3084 // it on the global queue so it can clean itself up
3085 if p.gcBgMarkWorker != nil {
3086 casgstatus(p.gcBgMarkWorker, _Gwaiting, _Grunnable)
3088 traceGoUnpark(p.gcBgMarkWorker, 0)
3090 globrunqput(p.gcBgMarkWorker)
3091 p.gcBgMarkWorker = nil
3093 for i := range p.sudogbuf {
3096 p.sudogcache = p.sudogbuf[:0]
3097 for i := range p.deferpool {
3098 for j := range p.deferpoolbuf[i] {
3099 p.deferpoolbuf[i][j] = nil
3101 p.deferpool[i] = p.deferpoolbuf[i][:0]
3103 freemcache(p.mcache)
3108 // can't free P itself because it can be referenced by an M in syscall
3112 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
3113 // continue to use the current P
3114 _g_.m.p.ptr().status = _Prunning
3116 // release the current P and acquire allp[0]
3131 for i := nprocs - 1; i >= 0; i-- {
3133 if _g_.m.p.ptr() == p {
3141 p.link.set(runnablePs)
3145 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
3146 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
3150 // Associate p and the current m.
3151 func acquirep(_p_ *p) {
3154 // have p; write barriers now allowed
3156 _g_.m.mcache = _p_.mcache
3163 // May run during STW, so write barriers are not allowed.
3165 func acquirep1(_p_ *p) {
3168 if _g_.m.p != 0 || _g_.m.mcache != nil {
3169 throw("acquirep: already in go")
3171 if _p_.m != 0 || _p_.status != _Pidle {
3176 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
3177 throw("acquirep: invalid p state")
3181 _p_.status = _Prunning
3184 // Disassociate p and the current m.
3185 func releasep() *p {
3188 if _g_.m.p == 0 || _g_.m.mcache == nil {
3189 throw("releasep: invalid arg")
3191 _p_ := _g_.m.p.ptr()
3192 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
3193 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
3194 throw("releasep: invalid p state")
3197 traceProcStop(_g_.m.p.ptr())
3206 func incidlelocked(v int32) {
3208 sched.nmidlelocked += v
3215 // Check for deadlock situation.
3216 // The check is based on number of running M's, if 0 -> deadlock.
3218 // For -buildmode=c-shared or -buildmode=c-archive it's OK if
3219 // there are no running goroutines. The calling program is
3220 // assumed to be running.
3221 if islibrary || isarchive {
3225 // If we are dying because of a signal caught on an already idle thread,
3226 // freezetheworld will cause all running threads to block.
3227 // And runtime will essentially enter into deadlock state,
3228 // except that there is a thread that will call exit soon.
3234 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
3239 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
3240 throw("checkdead: inconsistent counts")
3245 for i := 0; i < len(allgs); i++ {
3247 if isSystemGoroutine(gp) {
3250 s := readgstatus(gp)
3251 switch s &^ _Gscan {
3258 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
3259 throw("checkdead: runnable g")
3263 if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
3264 throw("no goroutines (main called runtime.Goexit) - deadlock!")
3267 // Maybe jump time forward for playground.
3270 casgstatus(gp, _Gwaiting, _Grunnable)
3274 throw("checkdead: no p for timer")
3278 // There should always be a free M since
3279 // nothing is running.
3280 throw("checkdead: no m for timer")
3283 notewakeup(&mp.park)
3287 getg().m.throwing = -1 // do not dump full stacks
3288 throw("all goroutines are asleep - deadlock!")
3291 // forcegcperiod is the maximum time in nanoseconds between garbage
3292 // collections. If we go this long without a garbage collection, one
3293 // is forced to run.
3295 // This is a variable for testing purposes. It normally doesn't change.
3296 var forcegcperiod int64 = 2 * 60 * 1e9
3299 // If a heap span goes unused for 5 minutes after a garbage collection,
3300 // we hand it back to the operating system.
3301 scavengelimit := int64(5 * 60 * 1e9)
3303 if debug.scavenge > 0 {
3304 // Scavenge-a-lot for testing.
3305 forcegcperiod = 10 * 1e6
3306 scavengelimit = 20 * 1e6
3309 lastscavenge := nanotime()
3312 lasttrace := int64(0)
3313 idle := 0 // how many cycles in succession we had not wokeup somebody
3316 if idle == 0 { // start with 20us sleep...
3318 } else if idle > 50 { // start doubling the sleep after 1ms...
3321 if delay > 10*1000 { // up to 10ms
3325 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
3327 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
3328 atomic.Store(&sched.sysmonwait, 1)
3330 // Make wake-up period small enough
3331 // for the sampling to be correct.
3332 maxsleep := forcegcperiod / 2
3333 if scavengelimit < forcegcperiod {
3334 maxsleep = scavengelimit / 2
3336 notetsleep(&sched.sysmonnote, maxsleep)
3338 atomic.Store(&sched.sysmonwait, 0)
3339 noteclear(&sched.sysmonnote)
3345 // poll network if not polled for more than 10ms
3346 lastpoll := int64(atomic.Load64(&sched.lastpoll))
3348 unixnow := unixnanotime()
3349 if lastpoll != 0 && lastpoll+10*1000*1000 < now {
3350 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
3351 gp := netpoll(false) // non-blocking - returns list of goroutines
3353 // Need to decrement number of idle locked M's
3354 // (pretending that one more is running) before injectglist.
3355 // Otherwise it can lead to the following situation:
3356 // injectglist grabs all P's but before it starts M's to run the P's,
3357 // another M returns from syscall, finishes running its G,
3358 // observes that there is no work to do and no other running M's
3359 // and reports deadlock.
3365 // retake P's blocked in syscalls
3366 // and preempt long running G's
3367 if retake(now) != 0 {
3372 // check if we need to force a GC
3373 lastgc := int64(atomic.Load64(&memstats.last_gc))
3374 if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 {
3377 forcegc.g.schedlink = 0
3378 injectglist(forcegc.g)
3379 unlock(&forcegc.lock)
3381 // scavenge heap once in a while
3382 if lastscavenge+scavengelimit/2 < now {
3383 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
3387 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace*1000000) <= now {
3389 schedtrace(debug.scheddetail > 0)
3394 var pdesc [_MaxGomaxprocs]struct {
3401 // forcePreemptNS is the time slice given to a G before it is
3403 const forcePreemptNS = 10 * 1000 * 1000 // 10ms
3405 func retake(now int64) uint32 {
3407 for i := int32(0); i < gomaxprocs; i++ {
3415 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
3416 t := int64(_p_.syscalltick)
3417 if int64(pd.syscalltick) != t {
3418 pd.syscalltick = uint32(t)
3419 pd.syscallwhen = now
3422 // On the one hand we don't want to retake Ps if there is no other work to do,
3423 // but on the other hand we want to retake them eventually
3424 // because they can prevent the sysmon thread from deep sleep.
3425 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
3428 // Need to decrement number of idle locked M's
3429 // (pretending that one more is running) before the CAS.
3430 // Otherwise the M from which we retake can exit the syscall,
3431 // increment nmidle and report deadlock.
3433 if atomic.Cas(&_p_.status, s, _Pidle) {
3435 traceGoSysBlock(_p_)
3443 } else if s == _Prunning {
3444 // Preempt G if it's running for too long.
3445 t := int64(_p_.schedtick)
3446 if int64(pd.schedtick) != t {
3447 pd.schedtick = uint32(t)
3451 if pd.schedwhen+forcePreemptNS > now {
3460 // Tell all goroutines that they have been preempted and they should stop.
3461 // This function is purely best-effort. It can fail to inform a goroutine if a
3462 // processor just started running it.
3463 // No locks need to be held.
3464 // Returns true if preemption request was issued to at least one goroutine.
3465 func preemptall() bool {
3467 for i := int32(0); i < gomaxprocs; i++ {
3469 if _p_ == nil || _p_.status != _Prunning {
3472 if preemptone(_p_) {
3479 // Tell the goroutine running on processor P to stop.
3480 // This function is purely best-effort. It can incorrectly fail to inform the
3481 // goroutine. It can send inform the wrong goroutine. Even if it informs the
3482 // correct goroutine, that goroutine might ignore the request if it is
3483 // simultaneously executing newstack.
3484 // No lock needs to be held.
3485 // Returns true if preemption request was issued.
3486 // The actual preemption will happen at some point in the future
3487 // and will be indicated by the gp->status no longer being
3489 func preemptone(_p_ *p) bool {
3491 if mp == nil || mp == getg().m {
3495 if gp == nil || gp == mp.g0 {
3501 // Every call in a go routine checks for stack overflow by
3502 // comparing the current stack pointer to gp->stackguard0.
3503 // Setting gp->stackguard0 to StackPreempt folds
3504 // preemption into the normal stack overflow check.
3505 gp.stackguard0 = stackPreempt
3511 func schedtrace(detailed bool) {
3518 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
3520 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
3522 // We must be careful while reading data from P's, M's and G's.
3523 // Even if we hold schedlock, most data can be changed concurrently.
3524 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
3525 for i := int32(0); i < gomaxprocs; i++ {
3531 h := atomic.Load(&_p_.runqhead)
3532 t := atomic.Load(&_p_.runqtail)
3538 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
3540 // In non-detailed mode format lengths of per-P run queues as:
3541 // [len1 len2 len3 len4]
3547 if i == gomaxprocs-1 {
3558 for mp := allm; mp != nil; mp = mp.alllink {
3561 lockedg := mp.lockedg
3574 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n")
3578 for gi := 0; gi < len(allgs); gi++ {
3581 lockedm := gp.lockedm
3590 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
3596 // Put mp on midle list.
3597 // Sched must be locked.
3598 // May run during STW, so write barriers are not allowed.
3601 mp.schedlink = sched.midle
3607 // Try to get an m from midle list.
3608 // Sched must be locked.
3609 // May run during STW, so write barriers are not allowed.
3612 mp := sched.midle.ptr()
3614 sched.midle = mp.schedlink
3620 // Put gp on the global runnable queue.
3621 // Sched must be locked.
3622 // May run during STW, so write barriers are not allowed.
3624 func globrunqput(gp *g) {
3626 if sched.runqtail != 0 {
3627 sched.runqtail.ptr().schedlink.set(gp)
3629 sched.runqhead.set(gp)
3631 sched.runqtail.set(gp)
3635 // Put gp at the head of the global runnable queue.
3636 // Sched must be locked.
3637 // May run during STW, so write barriers are not allowed.
3639 func globrunqputhead(gp *g) {
3640 gp.schedlink = sched.runqhead
3641 sched.runqhead.set(gp)
3642 if sched.runqtail == 0 {
3643 sched.runqtail.set(gp)
3648 // Put a batch of runnable goroutines on the global runnable queue.
3649 // Sched must be locked.
3650 func globrunqputbatch(ghead *g, gtail *g, n int32) {
3652 if sched.runqtail != 0 {
3653 sched.runqtail.ptr().schedlink.set(ghead)
3655 sched.runqhead.set(ghead)
3657 sched.runqtail.set(gtail)
3661 // Try get a batch of G's from the global runnable queue.
3662 // Sched must be locked.
3663 func globrunqget(_p_ *p, max int32) *g {
3664 if sched.runqsize == 0 {
3668 n := sched.runqsize/gomaxprocs + 1
3669 if n > sched.runqsize {
3672 if max > 0 && n > max {
3675 if n > int32(len(_p_.runq))/2 {
3676 n = int32(len(_p_.runq)) / 2
3680 if sched.runqsize == 0 {
3684 gp := sched.runqhead.ptr()
3685 sched.runqhead = gp.schedlink
3688 gp1 := sched.runqhead.ptr()
3689 sched.runqhead = gp1.schedlink
3690 runqput(_p_, gp1, false)
3695 // Put p to on _Pidle list.
3696 // Sched must be locked.
3697 // May run during STW, so write barriers are not allowed.
3699 func pidleput(_p_ *p) {
3700 if !runqempty(_p_) {
3701 throw("pidleput: P has non-empty run queue")
3703 _p_.link = sched.pidle
3704 sched.pidle.set(_p_)
3705 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
3708 // Try get a p from _Pidle list.
3709 // Sched must be locked.
3710 // May run during STW, so write barriers are not allowed.
3712 func pidleget() *p {
3713 _p_ := sched.pidle.ptr()
3715 sched.pidle = _p_.link
3716 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
3721 // runqempty returns true if _p_ has no Gs on its local run queue.
3722 // Note that this test is generally racy.
3723 func runqempty(_p_ *p) bool {
3724 return _p_.runqhead == _p_.runqtail && _p_.runnext == 0
3727 // To shake out latent assumptions about scheduling order,
3728 // we introduce some randomness into scheduling decisions
3729 // when running with the race detector.
3730 // The need for this was made obvious by changing the
3731 // (deterministic) scheduling order in Go 1.5 and breaking
3732 // many poorly-written tests.
3733 // With the randomness here, as long as the tests pass
3734 // consistently with -race, they shouldn't have latent scheduling
3736 const randomizeScheduler = raceenabled
3738 // runqput tries to put g on the local runnable queue.
3739 // If next if false, runqput adds g to the tail of the runnable queue.
3740 // If next is true, runqput puts g in the _p_.runnext slot.
3741 // If the run queue is full, runnext puts g on the global queue.
3742 // Executed only by the owner P.
3743 func runqput(_p_ *p, gp *g, next bool) {
3744 if randomizeScheduler && next && fastrand1()%2 == 0 {
3750 oldnext := _p_.runnext
3751 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
3757 // Kick the old runnext out to the regular run queue.
3762 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
3764 if t-h < uint32(len(_p_.runq)) {
3765 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
3766 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
3769 if runqputslow(_p_, gp, h, t) {
3772 // the queue is not full, now the put above must suceed
3776 // Put g and a batch of work from local runnable queue on global queue.
3777 // Executed only by the owner P.
3778 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
3779 var batch [len(_p_.runq)/2 + 1]*g
3781 // First, grab a batch from local queue.
3784 if n != uint32(len(_p_.runq)/2) {
3785 throw("runqputslow: queue is not full")
3787 for i := uint32(0); i < n; i++ {
3788 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
3790 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
3795 if randomizeScheduler {
3796 for i := uint32(1); i <= n; i++ {
3797 j := fastrand1() % (i + 1)
3798 batch[i], batch[j] = batch[j], batch[i]
3802 // Link the goroutines.
3803 for i := uint32(0); i < n; i++ {
3804 batch[i].schedlink.set(batch[i+1])
3807 // Now put the batch on global queue.
3809 globrunqputbatch(batch[0], batch[n], int32(n+1))
3814 // Get g from local runnable queue.
3815 // If inheritTime is true, gp should inherit the remaining time in the
3816 // current time slice. Otherwise, it should start a new time slice.
3817 // Executed only by the owner P.
3818 func runqget(_p_ *p) (gp *g, inheritTime bool) {
3819 // If there's a runnext, it's the next G to run.
3825 if _p_.runnext.cas(next, 0) {
3826 return next.ptr(), true
3831 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
3836 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
3837 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
3843 // Grabs a batch of goroutines from _p_'s runnable queue into batch.
3844 // Batch is a ring buffer starting at batchHead.
3845 // Returns number of grabbed goroutines.
3846 // Can be executed by any P.
3847 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
3849 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
3850 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
3855 // Try to steal from _p_.runnext.
3856 if next := _p_.runnext; next != 0 {
3857 // Sleep to ensure that _p_ isn't about to run the g we
3858 // are about to steal.
3859 // The important use case here is when the g running on _p_
3860 // ready()s another g and then almost immediately blocks.
3861 // Instead of stealing runnext in this window, back off
3862 // to give _p_ a chance to schedule runnext. This will avoid
3863 // thrashing gs between different Ps.
3865 if !_p_.runnext.cas(next, 0) {
3868 batch[batchHead%uint32(len(batch))] = next
3874 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
3877 for i := uint32(0); i < n; i++ {
3878 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
3879 batch[(batchHead+i)%uint32(len(batch))] = g
3881 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
3887 // Steal half of elements from local runnable queue of p2
3888 // and put onto local runnable queue of p.
3889 // Returns one of the stolen elements (or nil if failed).
3890 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
3892 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
3897 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
3901 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
3902 if t-h+n >= uint32(len(_p_.runq)) {
3903 throw("runqsteal: runq overflow")
3905 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
3909 func testSchedLocalQueue() {
3911 gs := make([]g, len(_p_.runq))
3912 for i := 0; i < len(_p_.runq); i++ {
3913 if g, _ := runqget(_p_); g != nil {
3914 throw("runq is not empty initially")
3916 for j := 0; j < i; j++ {
3917 runqput(_p_, &gs[i], false)
3919 for j := 0; j < i; j++ {
3920 if g, _ := runqget(_p_); g != &gs[i] {
3921 print("bad element at iter ", i, "/", j, "\n")
3922 throw("bad element")
3925 if g, _ := runqget(_p_); g != nil {
3926 throw("runq is not empty afterwards")
3931 func testSchedLocalQueueSteal() {
3934 gs := make([]g, len(p1.runq))
3935 for i := 0; i < len(p1.runq); i++ {
3936 for j := 0; j < i; j++ {
3938 runqput(p1, &gs[j], false)
3940 gp := runqsteal(p2, p1, true)
3961 for j := 0; j < i; j++ {
3963 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
3964 throw("bad element")
3967 if s != i/2 && s != i/2+1 {
3968 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
3974 //go:linkname setMaxThreads runtime/debug.setMaxThreads
3975 func setMaxThreads(in int) (out int) {
3977 out = int(sched.maxmcount)
3978 sched.maxmcount = int32(in)
3984 func haveexperiment(name string) bool {
3985 x := sys.Goexperiment
3992 xname, x = x[:i], x[i+1:]
4002 func procPin() int {
4007 return int(mp.p.ptr().id)
4016 //go:linkname sync_runtime_procPin sync.runtime_procPin
4018 func sync_runtime_procPin() int {
4022 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
4024 func sync_runtime_procUnpin() {
4028 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
4030 func sync_atomic_runtime_procPin() int {
4034 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
4036 func sync_atomic_runtime_procUnpin() {
4040 // Active spinning for sync.Mutex.
4041 //go:linkname sync_runtime_canSpin sync.runtime_canSpin
4043 func sync_runtime_canSpin(i int) bool {
4044 // sync.Mutex is cooperative, so we are conservative with spinning.
4045 // Spin only few times and only if running on a multicore machine and
4046 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
4047 // As opposed to runtime mutex we don't do passive spinning here,
4048 // because there can be work on global runq on on other Ps.
4049 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
4052 if p := getg().m.p.ptr(); !runqempty(p) {
4058 //go:linkname sync_runtime_doSpin sync.runtime_doSpin
4060 func sync_runtime_doSpin() {
4061 procyield(active_spin_cnt)