1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 #include "arch_GOARCH.h"
7 #include "zaexperiment.h"
15 // Goroutine scheduler
16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
18 // The main concepts are:
20 // M - worker thread, or machine.
21 // P - processor, a resource that is required to execute Go code.
22 // M must have an associated P to execute Go code, however it can be
23 // blocked or in a syscall w/o an associated P.
25 // Design doc at http://golang.org/s/go11sched.
29 // Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
30 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
35 int32 runtime·gomaxprocs;
36 uint32 runtime·needextram;
39 G runtime·g0; // idle goroutine for m0
43 P* runtime·allp[MaxGomaxprocs+1];
46 int32 runtime·newprocs;
48 Mutex runtime·allglock; // the following vars are protected by this lock or by stoptheworld
51 uintptr runtime·allglen;
52 ForceGCState runtime·forcegc;
54 void runtime·mstart(void);
55 static void runqput(P*, G*);
56 static G* runqget(P*);
57 static bool runqputslow(P*, G*, uint32, uint32);
58 static G* runqsteal(P*, P*);
61 static void mcommoninit(M*);
62 static void schedule(void);
63 static void procresize(int32);
64 static void acquirep(P*);
65 static P* releasep(void);
66 static void newm(void(*)(void), P*);
67 static void stopm(void);
68 static void startm(P*, bool);
69 static void handoffp(P*);
70 static void wakep(void);
71 static void stoplockedm(void);
72 static void startlockedm(G*);
73 static void sysmon(void);
74 static uint32 retake(int64);
75 static void incidlelocked(int32);
76 static void checkdead(void);
77 static void exitsyscall0(G*);
78 void runtime·park_m(G*);
79 static void goexit0(G*);
80 static void gfput(P*, G*);
82 static void gfpurge(P*);
83 static void globrunqput(G*);
84 static void globrunqputbatch(G*, G*, int32);
85 static G* globrunqget(P*, int32);
86 static P* pidleget(void);
87 static void pidleput(P*);
88 static void injectglist(G*);
89 static bool preemptall(void);
90 static bool preemptone(P*);
91 static bool exitsyscallfast(void);
92 static bool haveexperiment(int8*);
93 void runtime·allgadd(G*);
94 static void dropg(void);
96 extern String runtime·buildVersion;
98 // For cgo-using programs with external linking,
99 // export "main" (defined in assembly) so that libc can handle basic
100 // C runtime startup and call the Go program as if it were
101 // the C main function.
102 #pragma cgo_export_static main
104 // Filled in by dynamic linker when Cgo is available.
105 void (*_cgo_init)(void);
106 void (*_cgo_malloc)(void);
107 void (*_cgo_free)(void);
110 void* runtime·cgoMalloc;
111 void* runtime·cgoFree;
113 // The bootstrap sequence is:
117 // make & queue new G
118 // call runtime·mstart
120 // The new G calls runtime·main.
122 runtime·schedinit(void)
127 // raceinit must be the first call to race detector.
128 // In particular, it must be done before mallocinit below calls racemapshadow.
130 g->racectx = runtime·raceinit();
132 runtime·sched.maxmcount = 10000;
134 runtime·tracebackinit();
135 runtime·symtabinit();
137 runtime·mallocinit();
142 runtime·parsedebugvars();
145 runtime·sched.lastpoll = runtime·nanotime();
147 p = runtime·getenv("GOMAXPROCS");
148 if(p != nil && (n = runtime·atoi(p)) > 0) {
149 if(n > MaxGomaxprocs)
155 if(runtime·buildVersion.str == nil) {
156 // Condition should never trigger. This code just serves
157 // to ensure runtime·buildVersion is kept in the resulting binary.
158 runtime·buildVersion.str = (uint8*)"unknown";
159 runtime·buildVersion.len = 7;
162 runtime·cgoMalloc = _cgo_malloc;
163 runtime·cgoFree = _cgo_free;
167 runtime·newsysmon(void)
175 runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
176 runtime·printf("runtime: g: g=%p, goid=%D, g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
182 // sched lock is held
183 if(runtime·sched.mcount > runtime·sched.maxmcount){
184 runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
185 runtime·throw("thread exhaustion");
192 // g0 stack won't make sense for user (and is not necessary unwindable).
194 runtime·callers(1, mp->createstack, nelem(mp->createstack));
196 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
198 runtime·lock(&runtime·sched.lock);
199 mp->id = runtime·sched.mcount++;
201 runtime·mpreinit(mp);
203 mp->gsignal->stackguard1 = mp->gsignal->stack.lo + StackGuard;
205 // Add to runtime·allm so garbage collector doesn't free g->m
206 // when it is just in a register or thread-local storage.
207 mp->alllink = runtime·allm;
208 // runtime·NumCgoCall() iterates over allm w/o schedlock,
209 // so we need to publish it safely.
210 runtime·atomicstorep(&runtime·allm, mp);
211 runtime·unlock(&runtime·sched.lock);
214 // Mark gp ready to run.
220 status = runtime·readgstatus(gp);
222 g->m->locks++; // disable preemption because it can be holding p in a local var
223 if((status&~Gscan) != Gwaiting){
225 runtime·throw("bad g->status in ready");
227 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
228 runtime·casgstatus(gp, Gwaiting, Grunnable);
229 runqput(g->m->p, gp);
230 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0) // TODO: fast atomic
233 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
234 g->stackguard0 = StackPreempt;
238 runtime·ready_m(void)
242 gp = g->m->ptrarg[0];
243 g->m->ptrarg[0] = nil;
248 runtime·gcprocs(void)
252 // Figure out how many CPUs to use during GC.
253 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
254 runtime·lock(&runtime·sched.lock);
255 n = runtime·gomaxprocs;
260 if(n > runtime·sched.nmidle+1) // one M is currently running
261 n = runtime·sched.nmidle+1;
262 runtime·unlock(&runtime·sched.lock);
271 runtime·lock(&runtime·sched.lock);
272 n = runtime·gomaxprocs;
277 n -= runtime·sched.nmidle+1; // one M is currently running
278 runtime·unlock(&runtime·sched.lock);
283 runtime·helpgc(int32 nproc)
288 runtime·lock(&runtime·sched.lock);
290 for(n = 1; n < nproc; n++) { // one M is currently running
291 if(runtime·allp[pos]->mcache == g->m->mcache)
295 runtime·throw("runtime·gcprocs inconsistency");
297 mp->mcache = runtime·allp[pos]->mcache;
299 runtime·notewakeup(&mp->park);
301 runtime·unlock(&runtime·sched.lock);
304 // Similar to stoptheworld but best-effort and can be called several times.
305 // There is no reverse operation, used during crashing.
306 // This function must not lock any mutexes.
308 runtime·freezetheworld(void)
312 if(runtime·gomaxprocs == 1)
314 // stopwait and preemption requests can be lost
315 // due to races with concurrently executing threads,
316 // so try several times
317 for(i = 0; i < 5; i++) {
318 // this should tell the scheduler to not start any new goroutines
319 runtime·sched.stopwait = 0x7fffffff;
320 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
321 // this should stop running goroutines
323 break; // no running goroutines
324 runtime·usleep(1000);
327 runtime·usleep(1000);
329 runtime·usleep(1000);
333 isscanstatus(uint32 status)
336 runtime·throw("isscanstatus: Bad status Gscan");
337 return (status&Gscan) == Gscan;
340 // All reads and writes of g's status go through readgstatus, casgstatus
341 // castogscanstatus, casfromgscanstatus.
342 #pragma textflag NOSPLIT
344 runtime·readgstatus(G *gp)
346 return runtime·atomicload(&gp->atomicstatus);
349 // The Gscanstatuses are acting like locks and this releases them.
350 // If it proves to be a performance hit we should be able to make these
351 // simple atomic stores but for now we are going to throw if
352 // we see an inconsistent state.
354 runtime·casfromgscanstatus(G *gp, uint32 oldval, uint32 newval)
356 bool success = false;
358 // Check that transition is valid.
364 if(newval == (oldval&~Gscan))
365 success = runtime·cas(&gp->atomicstatus, oldval, newval);
368 if(newval == Gwaiting)
369 success = runtime·cas(&gp->atomicstatus, oldval, newval);
373 runtime·printf("runtime: casfromgscanstatus failed gp=%p, oldval=%d, newval=%d\n",
376 runtime·throw("casfromgscanstatus: gp->status is not in scan state");
380 // This will return false if the gp is not in the expected status and the cas fails.
381 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
383 runtime·castogscanstatus(G *gp, uint32 oldval, uint32 newval)
389 if(newval == (oldval|Gscan))
390 return runtime·cas(&gp->atomicstatus, oldval, newval);
393 if(newval == Gscanrunning || newval == Gscanenqueue)
394 return runtime·cas(&gp->atomicstatus, oldval, newval);
398 runtime·printf("runtime: castogscanstatus oldval=%d newval=%d\n", oldval, newval);
399 runtime·throw("castogscanstatus");
400 return false; // not reached
403 static void badcasgstatus(void);
404 static void helpcasgstatus(void);
406 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
407 // and casfromgscanstatus instead.
408 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
409 // put it in the Gscan state is finished.
410 #pragma textflag NOSPLIT
412 runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
416 if((oldval&Gscan) || (newval&Gscan) || oldval == newval) {
417 g->m->scalararg[0] = oldval;
418 g->m->scalararg[1] = newval;
423 // loop if gp->atomicstatus is in a scan state giving
424 // GC time to finish and change the state to oldval.
425 while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
433 uint32 oldval, newval;
435 oldval = g->m->scalararg[0];
436 newval = g->m->scalararg[1];
437 g->m->scalararg[0] = 0;
438 g->m->scalararg[1] = 0;
440 runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, newval);
441 runtime·throw("casgstatus: bad incoming values");
449 gp = g->m->ptrarg[0];
451 runtime·gcphasework(gp);
454 // stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
455 // or in the context of a moving collector the pointers can be flipped from pointing
456 // to old object to pointing to new objects.
457 // If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
458 // the caller calls restartg.
459 // If stopg returns false, the caller is not responsible for calling restartg. This can happen
460 // if another thread, either the gp itself or another GC thread is taking the responsibility
461 // to do the GC work related to this thread.
471 s = runtime·readgstatus(gp);
475 runtime·throw("stopg: gp->atomicstatus is not valid");
481 // Loop until a new stack is in place.
487 // Claim goroutine by setting scan bit.
488 if(!runtime·castogscanstatus(gp, s, s|Gscan))
490 // In scan state, do work.
491 runtime·gcphasework(gp);
497 // Goroutine already claimed by another GC helper.
501 if(runtime·gcphase == GCscan) {
502 gp->gcworkdone = true;
504 // Running routines not scanned during
505 // GCscan phase, we only scan non-running routines.
508 // Claim goroutine, so we aren't racing with a status
509 // transition away from Grunning.
510 if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
513 // Mark gp for preemption.
514 if(!gp->gcworkdone) {
515 gp->preemptscan = true;
517 gp->stackguard0 = StackPreempt;
521 runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
525 // Should not be here....
528 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
530 runtime·restartg (G *gp)
534 s = runtime·readgstatus(gp);
538 runtime·throw("restartg: unexpected status");
546 runtime·casfromgscanstatus(gp, s, s&~Gscan);
550 // Scan is now completed.
551 // Goroutine now needs to be made runnable.
552 // We put it on the global run queue; ready blocks on the global scheduler lock.
553 runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
555 runtime·throw("processing Gscanenqueue on wrong m");
566 runtime·throw("GC not moved to G0");
567 if(runtime·stopg(gp)) {
568 if(!isscanstatus(runtime·readgstatus(gp))) {
570 runtime·throw("GC not in scan state");
572 runtime·restartg(gp);
576 // Runs on g0 and does the actual work after putting the g back on the run queue.
578 mquiesce(G *gpmaster)
585 // enqueue the calling goroutine.
586 runtime·restartg(gpmaster);
588 activeglen = runtime·allglen;
589 for(i = 0; i < activeglen; i++) {
590 gp = runtime·allg[i];
591 if(runtime·readgstatus(gp) == Gdead)
592 gp->gcworkdone = true; // noop scan.
594 gp->gcworkdone = false;
598 // Check that the G's gcwork (such as scanning) has been done. If not do it now.
599 // You can end up doing work here if the page trap on a Grunning Goroutine has
600 // not been sprung or in some race situations. For example a runnable goes dead
601 // and is started up again with a gp->gcworkdone set to false.
602 for(i = 0; i < activeglen; i++) {
603 gp = runtime·allg[i];
604 while (!gp->gcworkdone) {
605 status = runtime·readgstatus(gp);
606 if(status == Gdead) {
607 gp->gcworkdone = true; // scan is a noop
609 //do nothing, scan not needed.
611 if(status == Grunning && gp->stackguard0 == (uintptr)StackPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecond arg
612 runtime·noteclear(&runtime·sched.stopnote);
618 for(i = 0; i < activeglen; i++) {
619 gp = runtime·allg[i];
620 status = runtime·readgstatus(gp);
621 if(isscanstatus(status)) {
622 runtime·printf("mstopandscang:bottom: post scan bad status gp=%p has status %x\n", gp, status);
625 if(!gp->gcworkdone && status != Gdead) {
626 runtime·printf("mstopandscang:bottom: post scan gp=%p->gcworkdone still false\n", gp);
631 schedule(); // Never returns.
634 // quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
635 // If the global runtime·gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
636 // have been scanned before it returns.
638 runtime·quiesce(G* mastergp)
642 runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
643 // Now move this to the g0 (aka m) stack.
644 // g0 will potentially scan this thread and put mastergp on the runqueue
649 // This is used by the GC as well as the routines that do stack dumps. In the case
650 // of GC all the routines can be reliably stopped. This is not always the case
651 // when the system is in panic or being exited.
653 runtime·stoptheworld(void)
660 // If we hold a lock, then we won't be able to stop another M
661 // that is blocked trying to acquire the lock.
663 runtime·throw("stoptheworld: holding locks");
665 runtime·lock(&runtime·sched.lock);
666 runtime·sched.stopwait = runtime·gomaxprocs;
667 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
670 g->m->p->status = Pgcstop; // Pgcstop is only diagnostic.
671 runtime·sched.stopwait--;
672 // try to retake all P's in Psyscall status
673 for(i = 0; i < runtime·gomaxprocs; i++) {
676 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
677 runtime·sched.stopwait--;
680 while(p = pidleget()) {
682 runtime·sched.stopwait--;
684 wait = runtime·sched.stopwait > 0;
685 runtime·unlock(&runtime·sched.lock);
687 // wait for remaining P's to stop voluntarily
690 // wait for 100us, then try to re-preempt in case of any races
691 if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
692 runtime·noteclear(&runtime·sched.stopnote);
698 if(runtime·sched.stopwait)
699 runtime·throw("stoptheworld: not stopped");
700 for(i = 0; i < runtime·gomaxprocs; i++) {
702 if(p->status != Pgcstop)
703 runtime·throw("stoptheworld: not stopped");
714 runtime·starttheworld(void)
721 g->m->locks++; // disable preemption because it can be holding p in a local var
722 gp = runtime·netpoll(false); // non-blocking
724 add = needaddgcproc();
725 runtime·lock(&runtime·sched.lock);
726 if(runtime·newprocs) {
727 procresize(runtime·newprocs);
728 runtime·newprocs = 0;
730 procresize(runtime·gomaxprocs);
731 runtime·sched.gcwaiting = 0;
734 while(p = pidleget()) {
735 // procresize() puts p's with work at the beginning of the list.
736 // Once we reach a p without a run queue, the rest don't have one either.
737 if(p->runqhead == p->runqtail) {
745 if(runtime·sched.sysmonwait) {
746 runtime·sched.sysmonwait = false;
747 runtime·notewakeup(&runtime·sched.sysmonnote);
749 runtime·unlock(&runtime·sched.lock);
758 runtime·throw("starttheworld: inconsistent mp->nextp");
760 runtime·notewakeup(&mp->park);
762 // Start M to run P. Do not start another M below.
769 // If GC could have used another helper proc, start one now,
770 // in the hope that it will be available next time.
771 // It would have been even better to start it before the collection,
772 // but doing so requires allocating memory, so it's tricky to
773 // coordinate. This lazy approach works out in practice:
774 // we don't mind if the first couple gc rounds don't have quite
775 // the maximum number of procs.
779 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
780 g->stackguard0 = StackPreempt;
783 static void mstart(void);
785 // Called to start an M.
786 #pragma textflag NOSPLIT
792 if(g->stack.lo == 0) {
793 // Initialize stack bounds from system stack.
794 // Cgo may have left stack size in stack.hi.
798 g->stack.hi = (uintptr)&x;
799 g->stack.lo = g->stack.hi - size + 1024;
802 // Initialize stack guards so that we can start calling
803 // both Go and C functions with stack growth prologues.
804 g->stackguard0 = g->stack.lo + StackGuard;
805 g->stackguard1 = g->stackguard0;
813 runtime·throw("bad runtime·mstart");
815 // Record top of stack for use by mcall.
816 // Once we call schedule we're never coming back,
817 // so other calls can reuse this stack space.
818 runtime·gosave(&g->m->g0->sched);
819 g->m->g0->sched.pc = (uintptr)-1; // make sure it is never used
823 // Install signal handlers; after minit so that minit can
824 // prepare the thread to be able to handle the signals.
825 if(g->m == &runtime·m0)
834 } else if(g->m != &runtime·m0) {
835 acquirep(g->m->nextp);
840 // TODO(brainman): This point is never reached, because scheduler
841 // does not release os threads at the moment. But once this path
842 // is enabled, we must remove our seh here.
845 // When running with cgo, we call _cgo_thread_start
846 // to start threads for us so that we can play nicely with
848 void (*_cgo_thread_start)(void*);
850 typedef struct CgoThreadStart CgoThreadStart;
851 struct CgoThreadStart
858 M *runtime·newM(void); // in proc.go
860 // Allocate a new m unassociated with any thread.
861 // Can use p for allocation context if needed.
867 g->m->locks++; // disable GC because it can be called from sysmon
869 acquirep(p); // temporarily borrow p for mallocs in this function
873 // In case of cgo or Solaris, pthread_create will make us a stack.
874 // Windows and Plan 9 will layout sched stack on OS stack.
875 if(runtime·iscgo || Solaris || Windows || Plan9)
876 mp->g0 = runtime·malg(-1);
878 mp->g0 = runtime·malg(8192);
879 runtime·writebarrierptr_nostore(&mp->g0, mp->g0);
881 runtime·writebarrierptr_nostore(&mp->g0->m, mp->g0->m);
886 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
887 g->stackguard0 = StackPreempt;
892 G *runtime·newG(void); // in proc.go
897 return runtime·newG();
900 static M* lockextra(bool nilokay);
901 static void unlockextra(M*);
903 // needm is called when a cgo callback happens on a
904 // thread without an m (a thread not created by Go).
905 // In this case, needm is expected to find an m to use
906 // and return with m, g initialized correctly.
907 // Since m and g are not set now (likely nil, but see below)
908 // needm is limited in what routines it can call. In particular
909 // it can only call nosplit functions (textflag 7) and cannot
910 // do any scheduling that requires an m.
912 // In order to avoid needing heavy lifting here, we adopt
913 // the following strategy: there is a stack of available m's
914 // that can be stolen. Using compare-and-swap
915 // to pop from the stack has ABA races, so we simulate
916 // a lock by doing an exchange (via casp) to steal the stack
917 // head and replace the top pointer with MLOCKED (1).
918 // This serves as a simple spin lock that we can use even
919 // without an m. The thread that locks the stack in this way
920 // unlocks the stack by storing a valid stack head pointer.
922 // In order to make sure that there is always an m structure
923 // available to be stolen, we maintain the invariant that there
924 // is always one more than needed. At the beginning of the
925 // program (if cgo is in use) the list is seeded with a single m.
926 // If needm finds that it has taken the last m off the list, its job
927 // is - once it has installed its own m so that it can do things like
928 // allocate memory - to create a spare m and put it on the list.
930 // Each of these extra m's also has a g0 and a curg that are
931 // pressed into service as the scheduling stack and current
932 // goroutine for the duration of the cgo callback.
934 // When the callback is done with the m, it calls dropm to
935 // put the m back on the list.
936 #pragma textflag NOSPLIT
938 runtime·needm(byte x)
942 if(runtime·needextram) {
943 // Can happen if C/C++ code calls Go from a global ctor.
944 // Can not throw, because scheduler is not initialized yet.
945 runtime·write(2, "fatal error: cgo callback before cgo call\n",
946 sizeof("fatal error: cgo callback before cgo call\n")-1);
950 // Lock extra list, take head, unlock popped list.
951 // nilokay=false is safe here because of the invariant above,
952 // that the extra list always contains or will soon contain
954 mp = lockextra(false);
956 // Set needextram when we've just emptied the list,
957 // so that the eventual call into cgocallbackg will
958 // allocate a new m for the extra list. We delay the
959 // allocation until then so that it can be done
960 // after exitsyscall makes sure it is okay to be
961 // running at all (that is, there's no garbage collection
962 // running right now).
963 mp->needextram = mp->schedlink == nil;
964 unlockextra(mp->schedlink);
966 // Install g (= m->g0) and set the stack bounds
967 // to match the current stack. We don't actually know
968 // how big the stack is, like we don't know how big any
969 // scheduling stack is, but we assume there's at least 32 kB,
970 // which is more than enough for us.
971 runtime·setg(mp->g0);
972 g->stack.hi = (uintptr)(&x + 1024);
973 g->stack.lo = (uintptr)(&x - 32*1024);
974 g->stackguard0 = g->stack.lo + StackGuard;
976 // Initialize this thread to use the m.
981 // newextram allocates an m and puts it on the extra list.
982 // It is called with a working local m, so that it can do things
983 // like call schedlock and allocate.
985 runtime·newextram(void)
990 // Create extra goroutine locked to extra m.
991 // The goroutine is the context in which the cgo callback will run.
992 // The sched.pc will never be returned to, but setting it to
993 // runtime.goexit makes clear to the traceback routines where
994 // the goroutine stack ends.
995 mp = runtime·allocm(nil);
996 gp = runtime·malg(4096);
997 gp->sched.pc = (uintptr)runtime·goexit + PCQuantum;
998 gp->sched.sp = gp->stack.hi;
999 gp->sched.sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
1002 gp->syscallpc = gp->sched.pc;
1003 gp->syscallsp = gp->sched.sp;
1004 // malg returns status as Gidle, change to Gsyscall before adding to allg
1005 // where GC will see it.
1006 runtime·casgstatus(gp, Gidle, Gsyscall);
1009 mp->locked = LockInternal;
1012 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
1014 gp->racectx = runtime·racegostart(runtime·newextram);
1015 // put on allg for garbage collector
1016 runtime·allgadd(gp);
1018 // Add m to the extra list.
1019 mnext = lockextra(true);
1020 mp->schedlink = mnext;
1024 // dropm is called when a cgo callback has called needm but is now
1025 // done with the callback and returning back into the non-Go thread.
1026 // It puts the current m back onto the extra list.
1028 // The main expense here is the call to signalstack to release the
1029 // m's signal stack, and then the call to needm on the next callback
1030 // from this thread. It is tempting to try to save the m for next time,
1031 // which would eliminate both these costs, but there might not be
1032 // a next time: the current thread (which Go does not control) might exit.
1033 // If we saved the m for that thread, there would be an m leak each time
1034 // such a thread exited. Instead, we acquire and release an m on each
1035 // call. These should typically not be scheduling operations, just a few
1036 // atomics, so the cost should be small.
1038 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1039 // variable using pthread_key_create. Unlike the pthread keys we already use
1040 // on OS X, this dummy key would never be read by Go code. It would exist
1041 // only so that we could register at thread-exit-time destructor.
1042 // That destructor would put the m back onto the extra list.
1043 // This is purely a performance optimization. The current version,
1044 // in which dropm happens on each cgo call, is still correct too.
1045 // We may have to keep the current version on systems with cgo
1046 // but without pthreads, like Windows.
1052 // Undo whatever initialization minit did during needm.
1055 // Clear m and g, and return m to the extra list.
1056 // After the call to setmg we can only call nosplit functions.
1060 mnext = lockextra(true);
1061 mp->schedlink = mnext;
1067 // lockextra locks the extra list and returns the list head.
1068 // The caller must unlock the list by storing a new list head
1069 // to runtime.extram. If nilokay is true, then lockextra will
1070 // return a nil list head if that's what it finds. If nilokay is false,
1071 // lockextra will keep waiting until the list head is no longer nil.
1072 #pragma textflag NOSPLIT
1074 lockextra(bool nilokay)
1077 void (*yield)(void);
1080 mpx = runtime·atomicloaduintptr((uintptr*)&runtime·extram);
1081 if(mpx == MLOCKED) {
1082 yield = runtime·osyield;
1086 if(mpx == 0 && !nilokay) {
1090 if(!runtime·casuintptr((uintptr*)&runtime·extram, mpx, MLOCKED)) {
1091 yield = runtime·osyield;
1100 #pragma textflag NOSPLIT
1104 runtime·atomicstorep(&runtime·extram, mp);
1108 // Create a new m. It will start off with a call to fn, or else the scheduler.
1110 newm(void(*fn)(void), P *p)
1114 mp = runtime·allocm(p);
1121 if(_cgo_thread_start == nil)
1122 runtime·throw("_cgo_thread_start missing");
1125 ts.fn = runtime·mstart;
1126 runtime·asmcgocall(_cgo_thread_start, &ts);
1129 runtime·newosproc(mp, (byte*)mp->g0->stack.hi);
1132 // Stops execution of the current m until new work is available.
1133 // Returns with acquired P.
1138 runtime·throw("stopm holding locks");
1140 runtime·throw("stopm holding p");
1141 if(g->m->spinning) {
1142 g->m->spinning = false;
1143 runtime·xadd(&runtime·sched.nmspinning, -1);
1147 runtime·lock(&runtime·sched.lock);
1149 runtime·unlock(&runtime·sched.lock);
1150 runtime·notesleep(&g->m->park);
1151 runtime·noteclear(&g->m->park);
1158 acquirep(g->m->nextp);
1165 g->m->spinning = true;
1168 // Schedules some M to run the p (creates an M if necessary).
1169 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1171 startm(P *p, bool spinning)
1176 runtime·lock(&runtime·sched.lock);
1180 runtime·unlock(&runtime·sched.lock);
1182 runtime·xadd(&runtime·sched.nmspinning, -1);
1187 runtime·unlock(&runtime·sched.lock);
1196 runtime·throw("startm: m is spinning");
1198 runtime·throw("startm: m has p");
1199 mp->spinning = spinning;
1201 runtime·notewakeup(&mp->park);
1204 // Hands off P from syscall or locked M.
1208 // if it has local work, start it straight away
1209 if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
1213 // no local work, check that there are no spinning/idle M's,
1214 // otherwise our help is not required
1215 if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 && // TODO: fast atomic
1216 runtime·cas(&runtime·sched.nmspinning, 0, 1)){
1220 runtime·lock(&runtime·sched.lock);
1221 if(runtime·sched.gcwaiting) {
1222 p->status = Pgcstop;
1223 if(--runtime·sched.stopwait == 0)
1224 runtime·notewakeup(&runtime·sched.stopnote);
1225 runtime·unlock(&runtime·sched.lock);
1228 if(runtime·sched.runqsize) {
1229 runtime·unlock(&runtime·sched.lock);
1233 // If this is the last running P and nobody is polling network,
1234 // need to wakeup another M to poll network.
1235 if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
1236 runtime·unlock(&runtime·sched.lock);
1241 runtime·unlock(&runtime·sched.lock);
1244 // Tries to add one more P to execute G's.
1245 // Called when a G is made runnable (newproc, ready).
1249 // be conservative about spinning threads
1250 if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
1255 // Stops execution of the current m that is locked to a g until the g is runnable again.
1256 // Returns with acquired P.
1263 if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
1264 runtime·throw("stoplockedm: inconsistent locking");
1266 // Schedule another M to run this p.
1271 // Wait until another thread schedules lockedg again.
1272 runtime·notesleep(&g->m->park);
1273 runtime·noteclear(&g->m->park);
1274 status = runtime·readgstatus(g->m->lockedg);
1275 if((status&~Gscan) != Grunnable){
1276 runtime·printf("runtime:stoplockedm: g is not Grunnable or Gscanrunnable");
1278 runtime·throw("stoplockedm: not runnable");
1280 acquirep(g->m->nextp);
1284 // Schedules the locked m to run the locked gp.
1293 runtime·throw("startlockedm: locked to me");
1295 runtime·throw("startlockedm: m has p");
1296 // directly handoff current P to the locked m
1300 runtime·notewakeup(&mp->park);
1304 // Stops the current m for stoptheworld.
1305 // Returns when the world is restarted.
1311 if(!runtime·sched.gcwaiting)
1312 runtime·throw("gcstopm: not waiting for gc");
1313 if(g->m->spinning) {
1314 g->m->spinning = false;
1315 runtime·xadd(&runtime·sched.nmspinning, -1);
1318 runtime·lock(&runtime·sched.lock);
1319 p->status = Pgcstop;
1320 if(--runtime·sched.stopwait == 0)
1321 runtime·notewakeup(&runtime·sched.stopnote);
1322 runtime·unlock(&runtime·sched.lock);
1326 // Schedules gp to run on the current M.
1333 runtime·casgstatus(gp, Grunnable, Grunning);
1335 gp->preempt = false;
1336 gp->stackguard0 = gp->stack.lo + StackGuard;
1337 g->m->p->schedtick++;
1341 // Check whether the profiler needs to be turned on or off.
1342 hz = runtime·sched.profilehz;
1343 if(g->m->profilehz != hz)
1344 runtime·resetcpuprofiler(hz);
1346 runtime·gogo(&gp->sched);
1349 // Finds a runnable goroutine to execute.
1350 // Tries to steal from other P's, get g from global queue, poll network.
1359 if(runtime·sched.gcwaiting) {
1363 if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
1366 gp = runqget(g->m->p);
1370 if(runtime·sched.runqsize) {
1371 runtime·lock(&runtime·sched.lock);
1372 gp = globrunqget(g->m->p, 0);
1373 runtime·unlock(&runtime·sched.lock);
1378 gp = runtime·netpoll(false); // non-blocking
1380 injectglist(gp->schedlink);
1381 runtime·casgstatus(gp, Gwaiting, Grunnable);
1384 // If number of spinning M's >= number of busy P's, block.
1385 // This is necessary to prevent excessive CPU consumption
1386 // when GOMAXPROCS>>1 but the program parallelism is low.
1387 if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle)) // TODO: fast atomic
1389 if(!g->m->spinning) {
1390 g->m->spinning = true;
1391 runtime·xadd(&runtime·sched.nmspinning, 1);
1393 // random steal from other P's
1394 for(i = 0; i < 2*runtime·gomaxprocs; i++) {
1395 if(runtime·sched.gcwaiting)
1397 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
1401 gp = runqsteal(g->m->p, p);
1406 // return P and block
1407 runtime·lock(&runtime·sched.lock);
1408 if(runtime·sched.gcwaiting) {
1409 runtime·unlock(&runtime·sched.lock);
1412 if(runtime·sched.runqsize) {
1413 gp = globrunqget(g->m->p, 0);
1414 runtime·unlock(&runtime·sched.lock);
1419 runtime·unlock(&runtime·sched.lock);
1420 if(g->m->spinning) {
1421 g->m->spinning = false;
1422 runtime·xadd(&runtime·sched.nmspinning, -1);
1424 // check all runqueues once again
1425 for(i = 0; i < runtime·gomaxprocs; i++) {
1426 p = runtime·allp[i];
1427 if(p && p->runqhead != p->runqtail) {
1428 runtime·lock(&runtime·sched.lock);
1430 runtime·unlock(&runtime·sched.lock);
1439 if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
1441 runtime·throw("findrunnable: netpoll with p");
1443 runtime·throw("findrunnable: netpoll with spinning");
1444 gp = runtime·netpoll(true); // block until new work is available
1445 runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
1447 runtime·lock(&runtime·sched.lock);
1449 runtime·unlock(&runtime·sched.lock);
1452 injectglist(gp->schedlink);
1453 runtime·casgstatus(gp, Gwaiting, Grunnable);
1468 if(g->m->spinning) {
1469 g->m->spinning = false;
1470 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
1472 runtime·throw("findrunnable: negative nmspinning");
1474 nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
1476 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
1477 // so see if we need to wakeup another P here.
1478 if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
1482 // Injects the list of runnable G's into the scheduler.
1483 // Can run concurrently with GC.
1485 injectglist(G *glist)
1492 runtime·lock(&runtime·sched.lock);
1493 for(n = 0; glist; n++) {
1495 glist = gp->schedlink;
1496 runtime·casgstatus(gp, Gwaiting, Grunnable);
1499 runtime·unlock(&runtime·sched.lock);
1501 for(; n && runtime·sched.npidle; n--)
1505 // One round of scheduler: find a runnable goroutine and execute it.
1514 runtime·throw("schedule: holding locks");
1518 execute(g->m->lockedg); // Never returns.
1522 if(runtime·sched.gcwaiting) {
1528 // Check the global runnable queue once in a while to ensure fairness.
1529 // Otherwise two goroutines can completely occupy the local runqueue
1530 // by constantly respawning each other.
1531 tick = g->m->p->schedtick;
1532 // This is a fancy way to say tick%61==0,
1533 // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
1534 if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
1535 runtime·lock(&runtime·sched.lock);
1536 gp = globrunqget(g->m->p, 1);
1537 runtime·unlock(&runtime·sched.lock);
1542 gp = runqget(g->m->p);
1543 if(gp && g->m->spinning)
1544 runtime·throw("schedule: spinning with local work");
1547 gp = findrunnable(); // blocks until work is available
1552 // Hands off own p to the locked m,
1553 // then blocks waiting for a new p.
1561 // dropg removes the association between m and the current goroutine m->curg (gp for short).
1562 // Typically a caller sets gp's status away from Grunning and then
1563 // immediately calls dropg to finish the job. The caller is also responsible
1564 // for arranging that gp will be restarted using runtime·ready at an
1565 // appropriate time. After calling dropg and arranging for gp to be
1566 // readied later, the caller can do other work but eventually should
1567 // call schedule to restart the scheduling of goroutines on this m.
1571 if(g->m->lockedg == nil) {
1572 g->m->curg->m = nil;
1577 // Puts the current goroutine into a waiting state and calls unlockf.
1578 // If unlockf returns false, the goroutine is resumed.
1580 runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
1584 g->m->waitlock = lock;
1585 g->m->waitunlockf = unlockf;
1586 g->waitreason = reason;
1587 fn = runtime·park_m;
1592 runtime·parkunlock_c(G *gp, void *lock)
1595 runtime·unlock(lock);
1599 // Puts the current goroutine into a waiting state and unlocks the lock.
1600 // The goroutine can be made runnable again by calling runtime·ready(gp).
1602 runtime·parkunlock(Mutex *lock, String reason)
1604 runtime·park(runtime·parkunlock_c, lock, reason);
1607 // runtime·park continuation on g0.
1609 runtime·park_m(G *gp)
1613 runtime·casgstatus(gp, Grunning, Gwaiting);
1616 if(g->m->waitunlockf) {
1617 ok = g->m->waitunlockf(gp, g->m->waitlock);
1618 g->m->waitunlockf = nil;
1619 g->m->waitlock = nil;
1621 runtime·casgstatus(gp, Gwaiting, Grunnable);
1622 execute(gp); // Schedule it back, never returns.
1629 // Gosched continuation on g0.
1631 runtime·gosched_m(G *gp)
1635 status = runtime·readgstatus(gp);
1636 if((status&~Gscan) != Grunning){
1638 runtime·throw("bad g status");
1640 runtime·casgstatus(gp, Grunning, Grunnable);
1642 runtime·lock(&runtime·sched.lock);
1644 runtime·unlock(&runtime·sched.lock);
1649 // Finishes execution of the current goroutine.
1650 // Must be NOSPLIT because it is called from Go.
1651 #pragma textflag NOSPLIT
1653 runtime·goexit1(void)
1658 runtime·racegoend();
1663 // runtime·goexit continuation on g0.
1667 runtime·casgstatus(gp, Grunning, Gdead);
1670 g->m->lockedg = nil;
1671 gp->paniconfault = 0;
1672 gp->defer = nil; // should be true already but just in case.
1673 gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
1674 gp->writebuf.array = nil;
1675 gp->writebuf.len = 0;
1676 gp->writebuf.cap = 0;
1677 gp->waitreason.str = nil;
1678 gp->waitreason.len = 0;
1683 if(g->m->locked & ~LockExternal) {
1684 runtime·printf("invalid m->locked = %d\n", g->m->locked);
1685 runtime·throw("internal lockOSThread error");
1692 #pragma textflag NOSPLIT
1694 save(uintptr pc, uintptr sp)
1704 static void entersyscall_bad(void);
1705 static void entersyscall_sysmon(void);
1706 static void entersyscall_gcwait(void);
1708 // The goroutine g is about to enter a system call.
1709 // Record that it's not using the cpu anymore.
1710 // This is called only from the go syscall library and cgocall,
1711 // not from the low-level system calls used by the runtime.
1713 // Entersyscall cannot split the stack: the runtime·gosave must
1714 // make g->sched refer to the caller's stack segment, because
1715 // entersyscall is going to return immediately after.
1717 // Nothing entersyscall calls can split the stack either.
1718 // We cannot safely move the stack during an active call to syscall,
1719 // because we do not know which of the uintptr arguments are
1720 // really pointers (back into the stack).
1721 // In practice, this means that we make the fast path run through
1722 // entersyscall doing no-split things, and the slow path has to use onM
1723 // to run bigger things on the m stack.
1725 // reentersyscall is the entry point used by cgo callbacks, where explicitly
1726 // saved SP and PC are restored. This is needed when exitsyscall will be called
1727 // from a function further up in the call stack than the parent, as g->syscallsp
1728 // must always point to a valid stack frame. entersyscall below is the normal
1729 // entry point for syscalls, which obtains the SP and PC from the caller.
1730 #pragma textflag NOSPLIT
1732 runtime·reentersyscall(uintptr pc, uintptr sp)
1736 // Disable preemption because during this function g is in Gsyscall status,
1737 // but can have inconsistent g->sched, do not let GC observe it.
1740 // Entersyscall must not call any function that might split/grow the stack.
1741 // (See details in comment above.)
1742 // Catch calls that might, by replacing the stack guard with something that
1743 // will trip any stack check and leaving a flag to tell newstack to die.
1744 g->stackguard0 = StackPreempt;
1747 // Leave SP around for GC and traceback.
1751 runtime·casgstatus(g, Grunning, Gsyscall);
1752 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1753 fn = entersyscall_bad;
1757 if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomic
1758 fn = entersyscall_sysmon;
1765 runtime·atomicstore(&g->m->p->status, Psyscall);
1766 if(runtime·sched.gcwaiting) {
1767 fn = entersyscall_gcwait;
1772 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
1773 // We set stackguard to StackPreempt so that first split stack check calls morestack.
1774 // Morestack detects this case and throws.
1775 g->stackguard0 = StackPreempt;
1779 // Standard syscall entry used by the go syscall library and normal cgo calls.
1780 #pragma textflag NOSPLIT
1782 ·entersyscall(int32 dummy)
1784 runtime·reentersyscall((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1788 entersyscall_bad(void)
1793 runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
1794 gp->syscallsp, gp->stack.lo, gp->stack.hi);
1795 runtime·throw("entersyscall");
1799 entersyscall_sysmon(void)
1801 runtime·lock(&runtime·sched.lock);
1802 if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1803 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1804 runtime·notewakeup(&runtime·sched.sysmonnote);
1806 runtime·unlock(&runtime·sched.lock);
1810 entersyscall_gcwait(void)
1812 runtime·lock(&runtime·sched.lock);
1813 if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
1814 if(--runtime·sched.stopwait == 0)
1815 runtime·notewakeup(&runtime·sched.stopnote);
1817 runtime·unlock(&runtime·sched.lock);
1820 static void entersyscallblock_handoff(void);
1822 // The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
1823 #pragma textflag NOSPLIT
1825 ·entersyscallblock(int32 dummy)
1829 g->m->locks++; // see comment in entersyscall
1831 g->stackguard0 = StackPreempt; // see comment in entersyscall
1833 // Leave SP around for GC and traceback.
1834 save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1835 g->syscallsp = g->sched.sp;
1836 g->syscallpc = g->sched.pc;
1837 runtime·casgstatus(g, Grunning, Gsyscall);
1838 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1839 fn = entersyscall_bad;
1843 fn = entersyscallblock_handoff;
1846 // Resave for traceback during blocked call.
1847 save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1853 entersyscallblock_handoff(void)
1855 handoffp(releasep());
1858 // The goroutine g exited its system call.
1859 // Arrange for it to run on a cpu again.
1860 // This is called only from the go syscall library, not
1861 // from the low-level system calls used by the runtime.
1862 #pragma textflag NOSPLIT
1864 ·exitsyscall(int32 dummy)
1868 g->m->locks++; // see comment in entersyscall
1870 if(runtime·getcallersp(&dummy) > g->syscallsp)
1871 runtime·throw("exitsyscall: syscall frame is no longer valid");
1874 if(exitsyscallfast()) {
1875 // There's a cpu for us, so we can run.
1876 g->m->p->syscalltick++;
1877 // We need to cas the status and scan before resuming...
1878 runtime·casgstatus(g, Gsyscall, Grunning);
1880 // Garbage collector isn't running (since we are),
1881 // so okay to clear syscallsp.
1882 g->syscallsp = (uintptr)nil;
1885 // restore the preemption request in case we've cleared it in newstack
1886 g->stackguard0 = StackPreempt;
1888 // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
1889 g->stackguard0 = g->stack.lo + StackGuard;
1897 // Call the scheduler.
1901 // Scheduler returned, so we're allowed to run now.
1902 // Delete the syscallsp information that we left for
1903 // the garbage collector during the system call.
1904 // Must wait until now because until gosched returns
1905 // we don't know for sure that the garbage collector
1907 g->syscallsp = (uintptr)nil;
1908 g->m->p->syscalltick++;
1912 static void exitsyscallfast_pidle(void);
1914 #pragma textflag NOSPLIT
1916 exitsyscallfast(void)
1920 // Freezetheworld sets stopwait but does not retake P's.
1921 if(runtime·sched.stopwait) {
1927 // Try to re-acquire the last P.
1928 if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->status, Psyscall, Prunning)) {
1929 // There's a cpu for us, so we can run.
1930 g->m->mcache = g->m->p->mcache;
1934 // Try to get any other idle P.
1937 if(runtime·sched.pidle) {
1938 fn = exitsyscallfast_pidle;
1940 if(g->m->scalararg[0]) {
1941 g->m->scalararg[0] = 0;
1949 exitsyscallfast_pidle(void)
1953 runtime·lock(&runtime·sched.lock);
1955 if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
1956 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1957 runtime·notewakeup(&runtime·sched.sysmonnote);
1959 runtime·unlock(&runtime·sched.lock);
1962 g->m->scalararg[0] = 1;
1964 g->m->scalararg[0] = 0;
1967 // runtime·exitsyscall slow path on g0.
1968 // Failed to acquire P, enqueue gp as runnable.
1974 runtime·casgstatus(gp, Gsyscall, Grunnable);
1976 runtime·lock(&runtime·sched.lock);
1980 else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1981 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1982 runtime·notewakeup(&runtime·sched.sysmonnote);
1984 runtime·unlock(&runtime·sched.lock);
1987 execute(gp); // Never returns.
1990 // Wait until another thread schedules gp and so m again.
1992 execute(gp); // Never returns.
1995 schedule(); // Never returns.
2004 // Fork can hang if preempted with signals frequently enough (see issue 5517).
2005 // Ensure that we stay on the same M where we disable profiling.
2007 if(gp->m->profilehz != 0)
2008 runtime·resetcpuprofiler(0);
2010 // This function is called before fork in syscall package.
2011 // Code between fork and exec must not allocate memory nor even try to grow stack.
2012 // Here we spoil g->stackguard to reliably detect any attempts to grow stack.
2013 // runtime_AfterFork will undo this in parent process, but not in child.
2014 gp->stackguard0 = StackFork;
2017 // Called from syscall package before fork.
2018 #pragma textflag NOSPLIT
2020 syscall·runtime_BeforeFork(void)
2035 // See the comment in runtime_BeforeFork.
2036 gp->stackguard0 = gp->stack.lo + StackGuard;
2038 hz = runtime·sched.profilehz;
2040 runtime·resetcpuprofiler(hz);
2044 // Called from syscall package after fork in parent.
2045 #pragma textflag NOSPLIT
2047 syscall·runtime_AfterFork(void)
2055 // Hook used by runtime·malg to call runtime·stackalloc on the
2056 // scheduler stack. This exists because runtime·stackalloc insists
2057 // on being called on the scheduler stack, to avoid trying to grow
2058 // the stack while allocating a new stack segment.
2065 newg = g->m->ptrarg[0];
2066 size = g->m->scalararg[0];
2068 newg->stack = runtime·stackalloc(size);
2070 runtime·gogo(&gp->sched);
2073 // Allocate a new g, with a stack big enough for stacksize bytes.
2075 runtime·malg(int32 stacksize)
2081 if(stacksize >= 0) {
2082 stacksize = runtime·round2(StackSystem + stacksize);
2084 // running on scheduler stack already.
2085 newg->stack = runtime·stackalloc(stacksize);
2087 // have to call stackalloc on scheduler stack.
2088 g->m->scalararg[0] = stacksize;
2089 g->m->ptrarg[0] = newg;
2092 g->m->ptrarg[0] = nil;
2094 newg->stackguard0 = newg->stack.lo + StackGuard;
2095 newg->stackguard1 = ~(uintptr)0;
2108 siz = g->m->scalararg[0];
2109 callerpc = (void*)g->m->scalararg[1];
2110 argp = g->m->ptrarg[0];
2111 fn = (FuncVal*)g->m->ptrarg[1];
2113 runtime·newproc1(fn, argp, siz, 0, callerpc);
2114 g->m->ptrarg[0] = nil;
2115 g->m->ptrarg[1] = nil;
2118 // Create a new g running fn with siz bytes of arguments.
2119 // Put it on the queue of g's waiting to run.
2120 // The compiler turns a go statement into a call to this.
2121 // Cannot split the stack because it assumes that the arguments
2122 // are available sequentially after &fn; they would not be
2123 // copied if a stack split occurred.
2124 #pragma textflag NOSPLIT
2126 runtime·newproc(int32 siz, FuncVal* fn, ...)
2131 if(thechar == '5' || thechar == '9')
2132 argp = (byte*)(&fn+2); // skip caller's saved LR
2134 argp = (byte*)(&fn+1);
2137 g->m->scalararg[0] = siz;
2138 g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
2139 g->m->ptrarg[0] = argp;
2140 g->m->ptrarg[1] = fn;
2146 void runtime·main(void);
2148 // Create a new g running fn with narg bytes of arguments starting
2149 // at argp and returning nret bytes of results. callerpc is the
2150 // address of the go statement that created this. The new g is put
2151 // on the queue of g's waiting to run.
2153 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
2161 g->m->throwing = -1; // do not dump full stacks
2162 runtime·throw("go of nil func value");
2164 g->m->locks++; // disable preemption because it can be holding p in a local var
2168 // We could allocate a larger initial stack if necessary.
2169 // Not worth it: this is almost always an error.
2170 // 4*sizeof(uintreg): extra space added below
2171 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
2172 if(siz >= StackMin - 4*sizeof(uintreg) - sizeof(uintreg))
2173 runtime·throw("runtime.newproc: function arguments too large for new goroutine");
2176 if((newg = gfget(p)) == nil) {
2177 newg = runtime·malg(StackMin);
2178 runtime·casgstatus(newg, Gidle, Gdead);
2179 runtime·allgadd(newg); // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
2181 if(newg->stack.hi == 0)
2182 runtime·throw("newproc1: newg missing stack");
2184 if(runtime·readgstatus(newg) != Gdead)
2185 runtime·throw("newproc1: new g is not Gdead");
2187 sp = (byte*)newg->stack.hi;
2188 sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
2190 runtime·memmove(sp, argp, narg);
2191 if(thechar == '5' || thechar == '9') {
2193 sp -= sizeof(void*);
2197 runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
2198 newg->sched.sp = (uintptr)sp;
2199 newg->sched.pc = (uintptr)runtime·goexit + PCQuantum; // +PCQuantum so that previous instruction is in same function
2200 newg->sched.g = newg;
2201 runtime·gostartcallfn(&newg->sched, fn);
2202 newg->gopc = (uintptr)callerpc;
2203 runtime·casgstatus(newg, Gdead, Grunnable);
2205 if(p->goidcache == p->goidcacheend) {
2206 // Sched.goidgen is the last allocated id,
2207 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
2208 // At startup sched.goidgen=0, so main goroutine receives goid=1.
2209 p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
2210 p->goidcache -= GoidCacheBatch - 1;
2211 p->goidcacheend = p->goidcache + GoidCacheBatch;
2213 newg->goid = p->goidcache++;
2215 newg->racectx = runtime·racegostart((void*)callerpc);
2218 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
2221 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
2222 g->stackguard0 = StackPreempt;
2226 // Put on gfree list.
2227 // If local list is too long, transfer a batch to the global list.
2233 if(runtime·readgstatus(gp) != Gdead)
2234 runtime·throw("gfput: bad status (not Gdead)");
2236 stksize = gp->stack.hi - gp->stack.lo;
2238 if(stksize != FixedStack) {
2239 // non-standard stack size - free it.
2240 runtime·stackfree(gp->stack);
2243 gp->stackguard0 = 0;
2245 gp->schedlink = p->gfree;
2248 if(p->gfreecnt >= 64) {
2249 runtime·lock(&runtime·sched.gflock);
2250 while(p->gfreecnt >= 32) {
2253 p->gfree = gp->schedlink;
2254 gp->schedlink = runtime·sched.gfree;
2255 runtime·sched.gfree = gp;
2256 runtime·sched.ngfree++;
2258 runtime·unlock(&runtime·sched.gflock);
2262 // Get from gfree list.
2263 // If local list is empty, grab a batch from global list.
2272 if(gp == nil && runtime·sched.gfree) {
2273 runtime·lock(&runtime·sched.gflock);
2274 while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
2276 gp = runtime·sched.gfree;
2277 runtime·sched.gfree = gp->schedlink;
2278 runtime·sched.ngfree--;
2279 gp->schedlink = p->gfree;
2282 runtime·unlock(&runtime·sched.gflock);
2286 p->gfree = gp->schedlink;
2289 if(gp->stack.lo == 0) {
2290 // Stack was deallocated in gfput. Allocate a new one.
2292 gp->stack = runtime·stackalloc(FixedStack);
2294 g->m->scalararg[0] = FixedStack;
2295 g->m->ptrarg[0] = gp;
2298 g->m->ptrarg[0] = nil;
2300 gp->stackguard0 = gp->stack.lo + StackGuard;
2303 runtime·racemalloc((void*)gp->stack.lo, gp->stack.hi - gp->stack.lo);
2309 // Purge all cached G's from gfree list to the global list.
2315 runtime·lock(&runtime·sched.gflock);
2316 while(p->gfreecnt != 0) {
2319 p->gfree = gp->schedlink;
2320 gp->schedlink = runtime·sched.gfree;
2321 runtime·sched.gfree = gp;
2322 runtime·sched.ngfree++;
2324 runtime·unlock(&runtime·sched.gflock);
2327 #pragma textflag NOSPLIT
2329 runtime·Breakpoint(void)
2331 runtime·breakpoint();
2334 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
2335 // after they modify m->locked. Do not allow preemption during this call,
2336 // or else the m might be different in this function than in the caller.
2337 #pragma textflag NOSPLIT
2345 #pragma textflag NOSPLIT
2347 runtime·LockOSThread(void)
2349 g->m->locked |= LockExternal;
2353 #pragma textflag NOSPLIT
2355 runtime·lockOSThread(void)
2357 g->m->locked += LockInternal;
2362 // unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
2363 // after they update m->locked. Do not allow preemption during this call,
2364 // or else the m might be in different in this function than in the caller.
2365 #pragma textflag NOSPLIT
2367 unlockOSThread(void)
2369 if(g->m->locked != 0)
2371 g->m->lockedg = nil;
2375 #pragma textflag NOSPLIT
2377 runtime·UnlockOSThread(void)
2379 g->m->locked &= ~LockExternal;
2383 static void badunlockOSThread(void);
2385 #pragma textflag NOSPLIT
2387 runtime·unlockOSThread(void)
2391 if(g->m->locked < LockInternal) {
2392 fn = badunlockOSThread;
2395 g->m->locked -= LockInternal;
2400 badunlockOSThread(void)
2402 runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
2405 #pragma textflag NOSPLIT
2407 runtime·gcount(void)
2412 n = runtime·allglen - runtime·sched.ngfree;
2413 for(pp=runtime·allp; p=*pp; pp++)
2415 // All these variables can be changed concurrently, so the result can be inconsistent.
2416 // But at least the current goroutine is running.
2423 runtime·mcount(void)
2425 return runtime·sched.mcount;
2428 static struct ProfState {
2433 static void System(void) { System(); }
2434 static void ExternalCode(void) { ExternalCode(); }
2435 static void GC(void) { GC(); }
2437 extern void runtime·cpuproftick(uintptr*, int32);
2438 extern byte runtime·etext[];
2440 // Called if we receive a SIGPROF signal.
2442 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
2446 // Do not use global m in this function, use mp instead.
2447 // On windows one m is sending reports about all the g's, so m means a wrong thing.
2457 // Profiling runs concurrently with GC, so it must not allocate.
2460 // Define that a "user g" is a user-created goroutine, and a "system g"
2461 // is one that is m->g0 or m->gsignal. We've only made sure that we
2462 // can unwind user g's, so exclude the system g's.
2464 // It is not quite as easy as testing gp == m->curg (the current user g)
2465 // because we might be interrupted for profiling halfway through a
2466 // goroutine switch. The switch involves updating three (or four) values:
2467 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
2468 // because once it gets updated the new g is running.
2470 // When switching from a user g to a system g, LR is not considered live,
2471 // so the update only affects g, SP, and PC. Since PC must be last, there
2472 // the possible partial transitions in ordinary execution are (1) g alone is updated,
2473 // (2) both g and SP are updated, and (3) SP alone is updated.
2474 // If g is updated, we'll see a system g and not look closer.
2475 // If SP alone is updated, we can detect the partial transition by checking
2476 // whether the SP is within g's stack bounds. (We could also require that SP
2477 // be changed only after g, but the stack bounds check is needed by other
2478 // cases, so there is no need to impose an additional requirement.)
2480 // There is one exceptional transition to a system g, not in ordinary execution.
2481 // When a signal arrives, the operating system starts the signal handler running
2482 // with an updated PC and SP. The g is updated last, at the beginning of the
2483 // handler. There are two reasons this is okay. First, until g is updated the
2484 // g and SP do not match, so the stack bounds check detects the partial transition.
2485 // Second, signal handlers currently run with signals disabled, so a profiling
2486 // signal cannot arrive during the handler.
2488 // When switching from a system g to a user g, there are three possibilities.
2490 // First, it may be that the g switch has no PC update, because the SP
2491 // either corresponds to a user g throughout (as in runtime.asmcgocall)
2492 // or because it has been arranged to look like a user g frame
2493 // (as in runtime.cgocallback_gofunc). In this case, since the entire
2494 // transition is a g+SP update, a partial transition updating just one of
2495 // those will be detected by the stack bounds check.
2497 // Second, when returning from a signal handler, the PC and SP updates
2498 // are performed by the operating system in an atomic update, so the g
2499 // update must be done before them. The stack bounds check detects
2500 // the partial transition here, and (again) signal handlers run with signals
2501 // disabled, so a profiling signal cannot arrive then anyway.
2503 // Third, the common case: it may be that the switch updates g, SP, and PC
2504 // separately, as in runtime.gogo.
2506 // Because runtime.gogo is the only instance, we check whether the PC lies
2507 // within that function, and if so, not ask for a traceback. This approach
2508 // requires knowing the size of the runtime.gogo function, which we
2509 // record in arch_*.h and check in runtime_test.go.
2511 // There is another apparently viable approach, recorded here in case
2512 // the "PC within runtime.gogo" check turns out not to be usable.
2513 // It would be possible to delay the update of either g or SP until immediately
2514 // before the PC update instruction. Then, because of the stack bounds check,
2515 // the only problematic interrupt point is just before that PC update instruction,
2516 // and the sigprof handler can detect that instruction and simulate stepping past
2517 // it in order to reach a consistent state. On ARM, the update of g must be made
2518 // in two places (in R10 and also in a TLS slot), so the delayed update would
2519 // need to be the SP update. The sigprof handler must read the instruction at
2520 // the current PC and if it was the known instruction (for example, JMP BX or
2521 // MOV R2, PC), use that other register in place of the PC value.
2522 // The biggest drawback to this solution is that it requires that we can tell
2523 // whether it's safe to read from the memory pointed at by PC.
2524 // In a correct program, we can test PC == nil and otherwise read,
2525 // but if a profiling signal happens at the instant that a program executes
2526 // a bad jump (before the program manages to handle the resulting fault)
2527 // the profiling handler could fault trying to read nonexistent memory.
2529 // To recap, there are no constraints on the assembly being used for the
2530 // transition. We simply require that g and SP match and that the PC is not
2533 if(gp == nil || gp != mp->curg ||
2534 (uintptr)sp < gp->stack.lo || gp->stack.hi < (uintptr)sp ||
2535 ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
2540 n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, stk, nelem(stk), nil, nil, TraceTrap);
2541 if(!traceback || n <= 0) {
2542 // Normal traceback is impossible or has failed.
2543 // See if it falls into several common cases.
2545 if(mp->ncgo > 0 && mp->curg != nil &&
2546 mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
2547 // Cgo, we can't unwind and symbolize arbitrary C code,
2548 // so instead collect Go stack that leads to the cgo call.
2549 // This is especially important on windows, since all syscalls are cgo calls.
2550 n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, stk, nelem(stk), nil, nil, 0);
2553 if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
2554 // Libcall, i.e. runtime syscall on windows.
2555 // Collect Go stack that leads to the call.
2556 n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, stk, nelem(stk), nil, nil, 0);
2560 // If all of the above has failed, account it against abstract "System" or "GC".
2562 // "ExternalCode" is better than "etext".
2563 if((uintptr)pc > (uintptr)runtime·etext)
2564 pc = (byte*)ExternalCode + PCQuantum;
2565 stk[0] = (uintptr)pc;
2566 if(mp->gcing || mp->helpgc)
2567 stk[1] = (uintptr)GC + PCQuantum;
2569 stk[1] = (uintptr)System + PCQuantum;
2574 // Simple cas-lock to coordinate with setcpuprofilerate.
2575 while(!runtime·cas(&prof.lock, 0, 1))
2578 runtime·cpuproftick(stk, n);
2579 runtime·atomicstore(&prof.lock, 0);
2584 // Arrange to call fn with a traceback hz times a second.
2586 runtime·setcpuprofilerate_m(void)
2590 hz = g->m->scalararg[0];
2591 g->m->scalararg[0] = 0;
2593 // Force sane arguments.
2597 // Disable preemption, otherwise we can be rescheduled to another thread
2598 // that has profiling enabled.
2601 // Stop profiler on this thread so that it is safe to lock prof.
2602 // if a profiling signal came in while we had prof locked,
2603 // it would deadlock.
2604 runtime·resetcpuprofiler(0);
2606 while(!runtime·cas(&prof.lock, 0, 1))
2609 runtime·atomicstore(&prof.lock, 0);
2611 runtime·lock(&runtime·sched.lock);
2612 runtime·sched.profilehz = hz;
2613 runtime·unlock(&runtime·sched.lock);
2616 runtime·resetcpuprofiler(hz);
2621 P *runtime·newP(void);
2623 // Change number of processors. The world is stopped, sched is locked.
2624 // gcworkbufs are not being modified by either the GC or
2625 // the write barrier code.
2627 procresize(int32 new)
2634 old = runtime·gomaxprocs;
2635 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
2636 runtime·throw("procresize: invalid arg");
2637 // initialize new P's
2638 for(i = 0; i < new; i++) {
2639 p = runtime·allp[i];
2643 p->status = Pgcstop;
2644 runtime·atomicstorep(&runtime·allp[i], p);
2646 if(p->mcache == nil) {
2648 p->mcache = g->m->mcache; // bootstrap
2650 p->mcache = runtime·allocmcache();
2654 // redistribute runnable G's evenly
2655 // collect all runnable goroutines in global queue preserving FIFO order
2656 // FIFO order is required to ensure fairness even during frequent GCs
2657 // see http://golang.org/issue/7126
2661 for(i = 0; i < old; i++) {
2662 p = runtime·allp[i];
2663 if(p->runqhead == p->runqtail)
2666 // pop from tail of local queue
2668 gp = p->runq[p->runqtail%nelem(p->runq)];
2669 // push onto head of global queue
2670 gp->schedlink = runtime·sched.runqhead;
2671 runtime·sched.runqhead = gp;
2672 if(runtime·sched.runqtail == nil)
2673 runtime·sched.runqtail = gp;
2674 runtime·sched.runqsize++;
2677 // fill local queues with at most nelem(p->runq)/2 goroutines
2678 // start at 1 because current M already executes some G and will acquire allp[0] below,
2679 // so if we have a spare G we want to put it into allp[1].
2680 for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
2681 gp = runtime·sched.runqhead;
2682 runtime·sched.runqhead = gp->schedlink;
2683 if(runtime·sched.runqhead == nil)
2684 runtime·sched.runqtail = nil;
2685 runtime·sched.runqsize--;
2686 runqput(runtime·allp[i%new], gp);
2690 for(i = new; i < old; i++) {
2691 p = runtime·allp[i];
2692 runtime·freemcache(p->mcache);
2696 // can't free P itself because it can be referenced by an M in syscall
2703 p = runtime·allp[0];
2707 for(i = new-1; i > 0; i--) {
2708 p = runtime·allp[i];
2712 runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
2715 // Associate p and the current m.
2719 if(g->m->p || g->m->mcache)
2720 runtime·throw("acquirep: already in go");
2721 if(p->m || p->status != Pidle) {
2722 runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
2723 runtime·throw("acquirep: invalid p state");
2725 g->m->mcache = p->mcache;
2728 p->status = Prunning;
2731 // Disassociate p and the current m.
2737 if(g->m->p == nil || g->m->mcache == nil)
2738 runtime·throw("releasep: invalid arg");
2740 if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
2741 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
2742 g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status);
2743 runtime·throw("releasep: invalid p state");
2753 incidlelocked(int32 v)
2755 runtime·lock(&runtime·sched.lock);
2756 runtime·sched.nmidlelocked += v;
2759 runtime·unlock(&runtime·sched.lock);
2762 // Check for deadlock situation.
2763 // The check is based on number of running M's, if 0 -> deadlock.
2770 int32 run, grunning, s;
2774 run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
2777 // If we are dying because of a signal caught on an already idle thread,
2778 // freezetheworld will cause all running threads to block.
2779 // And runtime will essentially enter into deadlock state,
2780 // except that there is a thread that will call runtime·exit soon.
2781 if(runtime·panicking > 0)
2784 runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
2785 runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
2786 runtime·throw("checkdead: inconsistent counts");
2789 runtime·lock(&runtime·allglock);
2790 for(i = 0; i < runtime·allglen; i++) {
2791 gp = runtime·allg[i];
2794 s = runtime·readgstatus(gp);
2802 runtime·unlock(&runtime·allglock);
2803 runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
2804 runtime·throw("checkdead: runnable g");
2808 runtime·unlock(&runtime·allglock);
2809 if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
2810 runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
2812 // Maybe jump time forward for playground.
2813 if((gp = runtime·timejump()) != nil) {
2814 runtime·casgstatus(gp, Gwaiting, Grunnable);
2818 runtime·throw("checkdead: no p for timer");
2824 runtime·notewakeup(&mp->park);
2829 g->m->throwing = -1; // do not dump full stacks
2830 runtime·throw("all goroutines are asleep - deadlock!");
2836 uint32 idle, delay, nscavenge;
2837 int64 now, unixnow, lastpoll, lasttrace, lastgc;
2838 int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
2841 // If we go two minutes without a garbage collection, force one to run.
2842 forcegcperiod = 2*60*1e9;
2843 // If a heap span goes unused for 5 minutes after a garbage collection,
2844 // we hand it back to the operating system.
2845 scavengelimit = 5*60*1e9;
2846 if(runtime·debug.scavenge > 0) {
2847 // Scavenge-a-lot for testing.
2848 forcegcperiod = 10*1e6;
2849 scavengelimit = 20*1e6;
2851 lastscavenge = runtime·nanotime();
2853 // Make wake-up period small enough for the sampling to be correct.
2854 maxsleep = forcegcperiod/2;
2855 if(scavengelimit < forcegcperiod)
2856 maxsleep = scavengelimit/2;
2859 idle = 0; // how many cycles in succession we had not wokeup somebody
2862 if(idle == 0) // start with 20us sleep...
2864 else if(idle > 50) // start doubling the sleep after 1ms...
2866 if(delay > 10*1000) // up to 10ms
2868 runtime·usleep(delay);
2869 if(runtime·debug.schedtrace <= 0 &&
2870 (runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) { // TODO: fast atomic
2871 runtime·lock(&runtime·sched.lock);
2872 if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
2873 runtime·atomicstore(&runtime·sched.sysmonwait, 1);
2874 runtime·unlock(&runtime·sched.lock);
2875 runtime·notetsleep(&runtime·sched.sysmonnote, maxsleep);
2876 runtime·lock(&runtime·sched.lock);
2877 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
2878 runtime·noteclear(&runtime·sched.sysmonnote);
2882 runtime·unlock(&runtime·sched.lock);
2884 // poll network if not polled for more than 10ms
2885 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
2886 now = runtime·nanotime();
2887 unixnow = runtime·unixnanotime();
2888 if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
2889 runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
2890 gp = runtime·netpoll(false); // non-blocking
2892 // Need to decrement number of idle locked M's
2893 // (pretending that one more is running) before injectglist.
2894 // Otherwise it can lead to the following situation:
2895 // injectglist grabs all P's but before it starts M's to run the P's,
2896 // another M returns from syscall, finishes running its G,
2897 // observes that there is no work to do and no other running M's
2898 // and reports deadlock.
2904 // retake P's blocked in syscalls
2905 // and preempt long running G's
2911 // check if we need to force a GC
2912 lastgc = runtime·atomicload64(&mstats.last_gc);
2913 if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
2914 runtime·lock(&runtime·forcegc.lock);
2915 runtime·forcegc.idle = 0;
2916 runtime·forcegc.g->schedlink = nil;
2917 injectglist(runtime·forcegc.g);
2918 runtime·unlock(&runtime·forcegc.lock);
2921 // scavenge heap once in a while
2922 if(lastscavenge + scavengelimit/2 < now) {
2923 runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
2928 if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
2930 runtime·schedtrace(runtime·debug.scheddetail);
2935 typedef struct Pdesc Pdesc;
2943 #pragma dataflag NOPTR
2944 static Pdesc pdesc[MaxGomaxprocs];
2955 for(i = 0; i < runtime·gomaxprocs; i++) {
2956 p = runtime·allp[i];
2962 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
2964 if(pd->syscalltick != t) {
2965 pd->syscalltick = t;
2966 pd->syscallwhen = now;
2969 // On the one hand we don't want to retake Ps if there is no other work to do,
2970 // but on the other hand we want to retake them eventually
2971 // because they can prevent the sysmon thread from deep sleep.
2972 if(p->runqhead == p->runqtail &&
2973 runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
2974 pd->syscallwhen + 10*1000*1000 > now)
2976 // Need to decrement number of idle locked M's
2977 // (pretending that one more is running) before the CAS.
2978 // Otherwise the M from which we retake can exit the syscall,
2979 // increment nmidle and report deadlock.
2981 if(runtime·cas(&p->status, s, Pidle)) {
2986 } else if(s == Prunning) {
2987 // Preempt G if it's running for more than 10ms.
2989 if(pd->schedtick != t) {
2991 pd->schedwhen = now;
2994 if(pd->schedwhen + 10*1000*1000 > now)
3002 // Tell all goroutines that they have been preempted and they should stop.
3003 // This function is purely best-effort. It can fail to inform a goroutine if a
3004 // processor just started running it.
3005 // No locks need to be held.
3006 // Returns true if preemption request was issued to at least one goroutine.
3015 for(i = 0; i < runtime·gomaxprocs; i++) {
3016 p = runtime·allp[i];
3017 if(p == nil || p->status != Prunning)
3019 res |= preemptone(p);
3024 // Tell the goroutine running on processor P to stop.
3025 // This function is purely best-effort. It can incorrectly fail to inform the
3026 // goroutine. It can send inform the wrong goroutine. Even if it informs the
3027 // correct goroutine, that goroutine might ignore the request if it is
3028 // simultaneously executing runtime·newstack.
3029 // No lock needs to be held.
3030 // Returns true if preemption request was issued.
3031 // The actual preemption will happen at some point in the future
3032 // and will be indicated by the gp->status no longer being
3041 if(mp == nil || mp == g->m)
3044 if(gp == nil || gp == mp->g0)
3047 // Every call in a go routine checks for stack overflow by
3048 // comparing the current stack pointer to gp->stackguard0.
3049 // Setting gp->stackguard0 to StackPreempt folds
3050 // preemption into the normal stack overflow check.
3051 gp->stackguard0 = StackPreempt;
3056 runtime·schedtrace(bool detailed)
3058 static int64 starttime;
3060 int64 id1, id2, id3;
3068 now = runtime·nanotime();
3072 runtime·lock(&runtime·sched.lock);
3073 runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinningthreads=%d idlethreads=%d runqueue=%d",
3074 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
3075 runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.runqsize);
3077 runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmonwait=%d\n",
3078 runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
3079 runtime·sched.stopwait, runtime·sched.sysmonwait);
3081 // We must be careful while reading data from P's, M's and G's.
3082 // Even if we hold schedlock, most data can be changed concurrently.
3083 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
3084 for(i = 0; i < runtime·gomaxprocs; i++) {
3085 p = runtime·allp[i];
3089 h = runtime·atomicload(&p->runqhead);
3090 t = runtime·atomicload(&p->runqtail);
3092 runtime·printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
3093 i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
3095 // In non-detailed mode format lengths of per-P run queues as:
3096 // [len1 len2 len3 len4]
3098 if(runtime·gomaxprocs == 1)
3102 else if(i == runtime·gomaxprocs-1)
3104 runtime·printf(fmt, t-h);
3108 runtime·unlock(&runtime·sched.lock);
3111 for(mp = runtime·allm; mp; mp = mp->alllink) {
3114 lockedg = mp->lockedg;
3123 id3 = lockedg->goid;
3124 runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
3125 " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
3127 mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
3128 mp->spinning, g->m->blocked, id3);
3130 runtime·lock(&runtime·allglock);
3131 for(gi = 0; gi < runtime·allglen; gi++) {
3132 gp = runtime·allg[gi];
3134 lockedm = gp->lockedm;
3135 runtime·printf(" G%D: status=%d(%S) m=%d lockedm=%d\n",
3136 gp->goid, runtime·readgstatus(gp), gp->waitreason, mp ? mp->id : -1,
3137 lockedm ? lockedm->id : -1);
3139 runtime·unlock(&runtime·allglock);
3140 runtime·unlock(&runtime·sched.lock);
3143 // Put mp on midle list.
3144 // Sched must be locked.
3148 mp->schedlink = runtime·sched.midle;
3149 runtime·sched.midle = mp;
3150 runtime·sched.nmidle++;
3154 // Try to get an m from midle list.
3155 // Sched must be locked.
3161 if((mp = runtime·sched.midle) != nil){
3162 runtime·sched.midle = mp->schedlink;
3163 runtime·sched.nmidle--;
3168 // Put gp on the global runnable queue.
3169 // Sched must be locked.
3173 gp->schedlink = nil;
3174 if(runtime·sched.runqtail)
3175 runtime·sched.runqtail->schedlink = gp;
3177 runtime·sched.runqhead = gp;
3178 runtime·sched.runqtail = gp;
3179 runtime·sched.runqsize++;
3182 // Put a batch of runnable goroutines on the global runnable queue.
3183 // Sched must be locked.
3185 globrunqputbatch(G *ghead, G *gtail, int32 n)
3187 gtail->schedlink = nil;
3188 if(runtime·sched.runqtail)
3189 runtime·sched.runqtail->schedlink = ghead;
3191 runtime·sched.runqhead = ghead;
3192 runtime·sched.runqtail = gtail;
3193 runtime·sched.runqsize += n;
3196 // Try get a batch of G's from the global runnable queue.
3197 // Sched must be locked.
3199 globrunqget(P *p, int32 max)
3204 if(runtime·sched.runqsize == 0)
3206 n = runtime·sched.runqsize/runtime·gomaxprocs+1;
3207 if(n > runtime·sched.runqsize)
3208 n = runtime·sched.runqsize;
3209 if(max > 0 && n > max)
3211 if(n > nelem(p->runq)/2)
3212 n = nelem(p->runq)/2;
3213 runtime·sched.runqsize -= n;
3214 if(runtime·sched.runqsize == 0)
3215 runtime·sched.runqtail = nil;
3216 gp = runtime·sched.runqhead;
3217 runtime·sched.runqhead = gp->schedlink;
3220 gp1 = runtime·sched.runqhead;
3221 runtime·sched.runqhead = gp1->schedlink;
3227 // Put p to on pidle list.
3228 // Sched must be locked.
3232 p->link = runtime·sched.pidle;
3233 runtime·sched.pidle = p;
3234 runtime·xadd(&runtime·sched.npidle, 1); // TODO: fast atomic
3237 // Try get a p from pidle list.
3238 // Sched must be locked.
3244 p = runtime·sched.pidle;
3246 runtime·sched.pidle = p->link;
3247 runtime·xadd(&runtime·sched.npidle, -1); // TODO: fast atomic
3252 // Try to put g on local runnable queue.
3253 // If it's full, put onto global queue.
3254 // Executed only by the owner P.
3256 runqput(P *p, G *gp)
3261 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
3263 if(t - h < nelem(p->runq)) {
3264 p->runq[t%nelem(p->runq)] = gp;
3265 runtime·atomicstore(&p->runqtail, t+1); // store-release, makes the item available for consumption
3268 if(runqputslow(p, gp, h, t))
3270 // the queue is not full, now the put above must suceed
3274 // Put g and a batch of work from local runnable queue on global queue.
3275 // Executed only by the owner P.
3277 runqputslow(P *p, G *gp, uint32 h, uint32 t)
3279 G *batch[nelem(p->runq)/2+1];
3282 // First, grab a batch from local queue.
3285 if(n != nelem(p->runq)/2)
3286 runtime·throw("runqputslow: queue is not full");
3288 batch[i] = p->runq[(h+i)%nelem(p->runq)];
3289 if(!runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
3292 // Link the goroutines.
3294 batch[i]->schedlink = batch[i+1];
3295 // Now put the batch on global queue.
3296 runtime·lock(&runtime·sched.lock);
3297 globrunqputbatch(batch[0], batch[n], n+1);
3298 runtime·unlock(&runtime·sched.lock);
3302 // Get g from local runnable queue.
3303 // Executed only by the owner P.
3311 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
3315 gp = p->runq[h%nelem(p->runq)];
3316 if(runtime·cas(&p->runqhead, h, h+1)) // cas-release, commits consume
3321 // Grabs a batch of goroutines from local runnable queue.
3322 // batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
3323 // Can be executed by any P.
3325 runqgrab(P *p, G **batch)
3330 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
3331 t = runtime·atomicload(&p->runqtail); // load-acquire, synchronize with the producer
3336 if(n > nelem(p->runq)/2) // read inconsistent h and t
3339 batch[i] = p->runq[(h+i)%nelem(p->runq)];
3340 if(runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
3346 // Steal half of elements from local runnable queue of p2
3347 // and put onto local runnable queue of p.
3348 // Returns one of the stolen elements (or nil if failed).
3350 runqsteal(P *p, P *p2)
3353 G *batch[nelem(p->runq)/2];
3356 n = runqgrab(p2, batch);
3363 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
3365 if(t - h + n >= nelem(p->runq))
3366 runtime·throw("runqsteal: runq overflow");
3367 for(i=0; i<n; i++, t++)
3368 p->runq[t%nelem(p->runq)] = batch[i];
3369 runtime·atomicstore(&p->runqtail, t); // store-release, makes the item available for consumption
3374 runtime·testSchedLocalQueue(void)
3380 p = (P*)runtime·mallocgc(sizeof(*p), nil, FlagNoScan);
3381 gs = (G*)runtime·mallocgc(nelem(p->runq)*sizeof(*gs), nil, FlagNoScan);
3383 for(i = 0; i < nelem(p->runq); i++) {
3384 if(runqget(p) != nil)
3385 runtime·throw("runq is not empty initially");
3386 for(j = 0; j < i; j++)
3388 for(j = 0; j < i; j++) {
3389 if(runqget(p) != &gs[i]) {
3390 runtime·printf("bad element at iter %d/%d\n", i, j);
3391 runtime·throw("bad element");
3394 if(runqget(p) != nil)
3395 runtime·throw("runq is not empty afterwards");
3400 runtime·testSchedLocalQueueSteal(void)
3406 p1 = (P*)runtime·mallocgc(sizeof(*p1), nil, FlagNoScan);
3407 p2 = (P*)runtime·mallocgc(sizeof(*p2), nil, FlagNoScan);
3408 gs = (G*)runtime·mallocgc(nelem(p1->runq)*sizeof(*gs), nil, FlagNoScan);
3410 for(i = 0; i < nelem(p1->runq); i++) {
3411 for(j = 0; j < i; j++) {
3413 runqput(p1, &gs[j]);
3415 gp = runqsteal(p2, p1);
3421 while(gp = runqget(p2)) {
3425 while(gp = runqget(p1))
3427 for(j = 0; j < i; j++) {
3428 if(gs[j].sig != 1) {
3429 runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
3430 runtime·throw("bad element");
3433 if(s != i/2 && s != i/2+1) {
3434 runtime·printf("bad steal %d, want %d or %d, iter %d\n",
3436 runtime·throw("bad steal");
3442 runtime·setmaxthreads_m(void)
3447 in = g->m->scalararg[0];
3449 runtime·lock(&runtime·sched.lock);
3450 out = runtime·sched.maxmcount;
3451 runtime·sched.maxmcount = in;
3453 runtime·unlock(&runtime·sched.lock);
3455 g->m->scalararg[0] = out;
3458 static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
3461 haveexperiment(int8 *name)
3465 for(i=0; i<sizeof(experiment); i++) {
3466 if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
3467 for(j=0; name[j]; j++)
3468 if(experiment[i+j] != name[j])
3470 if(experiment[i+j] != '\0' && experiment[i+j] != ',')
3479 #pragma textflag NOSPLIT
3481 sync·runtime_procPin(intptr p)
3486 // Disable preemption.
3492 #pragma textflag NOSPLIT
3494 sync·runtime_procUnpin()