1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 #include "arch_GOARCH.h"
7 #include "zaexperiment.h"
15 // Goroutine scheduler
16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
18 // The main concepts are:
20 // M - worker thread, or machine.
21 // P - processor, a resource that is required to execute Go code.
22 // M must have an associated P to execute Go code, however it can be
23 // blocked or in a syscall w/o an associated P.
25 // Design doc at http://golang.org/s/go11sched.
29 // Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
30 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
35 int32 runtime·gomaxprocs;
36 uint32 runtime·needextram;
39 G runtime·g0; // idle goroutine for m0
43 P* runtime·allp[MaxGomaxprocs+1];
46 int32 runtime·newprocs;
48 Mutex runtime·allglock; // the following vars are protected by this lock or by stoptheworld
51 uintptr runtime·allglen;
52 ForceGCState runtime·forcegc;
54 void runtime·mstart(void);
55 static void runqput(P*, G*);
56 static G* runqget(P*);
57 static bool runqputslow(P*, G*, uint32, uint32);
58 static G* runqsteal(P*, P*);
61 static void mcommoninit(M*);
62 static void schedule(void);
63 static void procresize(int32);
64 static void acquirep(P*);
65 static P* releasep(void);
66 static void newm(void(*)(void), P*);
67 static void stopm(void);
68 static void startm(P*, bool);
69 static void handoffp(P*);
70 static void wakep(void);
71 static void stoplockedm(void);
72 static void startlockedm(G*);
73 static void sysmon(void);
74 static uint32 retake(int64);
75 static void incidlelocked(int32);
76 static void checkdead(void);
77 static void exitsyscall0(G*);
78 void runtime·park_m(G*);
79 static void goexit0(G*);
80 static void gfput(P*, G*);
82 static void gfpurge(P*);
83 static void globrunqput(G*);
84 static void globrunqputbatch(G*, G*, int32);
85 static G* globrunqget(P*, int32);
86 static P* pidleget(void);
87 static void pidleput(P*);
88 static void injectglist(G*);
89 static bool preemptall(void);
90 static bool preemptone(P*);
91 static bool exitsyscallfast(void);
92 static bool haveexperiment(int8*);
93 void runtime·allgadd(G*);
94 static void dropg(void);
96 extern String runtime·buildVersion;
98 // For cgo-using programs with external linking,
99 // export "main" (defined in assembly) so that libc can handle basic
100 // C runtime startup and call the Go program as if it were
101 // the C main function.
102 #pragma cgo_export_static main
104 // Filled in by dynamic linker when Cgo is available.
105 void (*_cgo_init)(void);
106 void (*_cgo_malloc)(void);
107 void (*_cgo_free)(void);
110 void* runtime·cgoMalloc;
111 void* runtime·cgoFree;
113 // The bootstrap sequence is:
117 // make & queue new G
118 // call runtime·mstart
120 // The new G calls runtime·main.
122 runtime·schedinit(void)
127 // raceinit must be the first call to race detector.
128 // In particular, it must be done before mallocinit below calls racemapshadow.
130 g->racectx = runtime·raceinit();
132 runtime·sched.maxmcount = 10000;
134 runtime·tracebackinit();
135 runtime·symtabinit();
137 runtime·mallocinit();
142 runtime·parsedebugvars();
145 runtime·sched.lastpoll = runtime·nanotime();
147 p = runtime·getenv("GOMAXPROCS");
148 if(p != nil && (n = runtime·atoi(p)) > 0) {
149 if(n > MaxGomaxprocs)
155 if(runtime·buildVersion.str == nil) {
156 // Condition should never trigger. This code just serves
157 // to ensure runtime·buildVersion is kept in the resulting binary.
158 runtime·buildVersion.str = (uint8*)"unknown";
159 runtime·buildVersion.len = 7;
162 runtime·cgoMalloc = _cgo_malloc;
163 runtime·cgoFree = _cgo_free;
167 runtime·newsysmon(void)
175 runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
176 runtime·printf("runtime: g: g=%p, goid=%D, g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
182 // sched lock is held
183 if(runtime·sched.mcount > runtime·sched.maxmcount){
184 runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
185 runtime·throw("thread exhaustion");
192 // g0 stack won't make sense for user (and is not necessary unwindable).
194 runtime·callers(1, mp->createstack, nelem(mp->createstack));
196 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
198 runtime·lock(&runtime·sched.lock);
199 mp->id = runtime·sched.mcount++;
201 runtime·mpreinit(mp);
203 mp->gsignal->stackguard1 = mp->gsignal->stack.lo + StackGuard;
205 // Add to runtime·allm so garbage collector doesn't free g->m
206 // when it is just in a register or thread-local storage.
207 mp->alllink = runtime·allm;
208 // runtime·NumCgoCall() iterates over allm w/o schedlock,
209 // so we need to publish it safely.
210 runtime·atomicstorep(&runtime·allm, mp);
211 runtime·unlock(&runtime·sched.lock);
214 // Mark gp ready to run.
220 status = runtime·readgstatus(gp);
222 g->m->locks++; // disable preemption because it can be holding p in a local var
223 if((status&~Gscan) != Gwaiting){
225 runtime·throw("bad g->status in ready");
227 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
228 runtime·casgstatus(gp, Gwaiting, Grunnable);
229 runqput(g->m->p, gp);
230 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0) // TODO: fast atomic
233 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
234 g->stackguard0 = StackPreempt;
238 runtime·ready_m(void)
242 gp = g->m->ptrarg[0];
243 g->m->ptrarg[0] = nil;
248 runtime·gcprocs(void)
252 // Figure out how many CPUs to use during GC.
253 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
254 runtime·lock(&runtime·sched.lock);
255 n = runtime·gomaxprocs;
260 if(n > runtime·sched.nmidle+1) // one M is currently running
261 n = runtime·sched.nmidle+1;
262 runtime·unlock(&runtime·sched.lock);
271 runtime·lock(&runtime·sched.lock);
272 n = runtime·gomaxprocs;
277 n -= runtime·sched.nmidle+1; // one M is currently running
278 runtime·unlock(&runtime·sched.lock);
283 runtime·helpgc(int32 nproc)
288 runtime·lock(&runtime·sched.lock);
290 for(n = 1; n < nproc; n++) { // one M is currently running
291 if(runtime·allp[pos]->mcache == g->m->mcache)
295 runtime·throw("runtime·gcprocs inconsistency");
297 mp->mcache = runtime·allp[pos]->mcache;
299 runtime·notewakeup(&mp->park);
301 runtime·unlock(&runtime·sched.lock);
304 // Similar to stoptheworld but best-effort and can be called several times.
305 // There is no reverse operation, used during crashing.
306 // This function must not lock any mutexes.
308 runtime·freezetheworld(void)
312 if(runtime·gomaxprocs == 1)
314 // stopwait and preemption requests can be lost
315 // due to races with concurrently executing threads,
316 // so try several times
317 for(i = 0; i < 5; i++) {
318 // this should tell the scheduler to not start any new goroutines
319 runtime·sched.stopwait = 0x7fffffff;
320 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
321 // this should stop running goroutines
323 break; // no running goroutines
324 runtime·usleep(1000);
327 runtime·usleep(1000);
329 runtime·usleep(1000);
333 isscanstatus(uint32 status)
336 runtime·throw("isscanstatus: Bad status Gscan");
337 return (status&Gscan) == Gscan;
340 // All reads and writes of g's status go through readgstatus, casgstatus
341 // castogscanstatus, casfromgscanstatus.
342 #pragma textflag NOSPLIT
344 runtime·readgstatus(G *gp)
346 return runtime·atomicload(&gp->atomicstatus);
349 // The Gscanstatuses are acting like locks and this releases them.
350 // If it proves to be a performance hit we should be able to make these
351 // simple atomic stores but for now we are going to throw if
352 // we see an inconsistent state.
354 runtime·casfromgscanstatus(G *gp, uint32 oldval, uint32 newval)
356 bool success = false;
358 // Check that transition is valid.
364 if(newval == (oldval&~Gscan))
365 success = runtime·cas(&gp->atomicstatus, oldval, newval);
368 if(newval == Gwaiting)
369 success = runtime·cas(&gp->atomicstatus, oldval, newval);
373 runtime·printf("runtime: casfromgscanstatus failed gp=%p, oldval=%d, newval=%d\n",
376 runtime·throw("casfromgscanstatus: gp->status is not in scan state");
380 // This will return false if the gp is not in the expected status and the cas fails.
381 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
383 runtime·castogscanstatus(G *gp, uint32 oldval, uint32 newval)
389 if(newval == (oldval|Gscan))
390 return runtime·cas(&gp->atomicstatus, oldval, newval);
393 if(newval == Gscanrunning || newval == Gscanenqueue)
394 return runtime·cas(&gp->atomicstatus, oldval, newval);
398 runtime·printf("runtime: castogscanstatus oldval=%d newval=%d\n", oldval, newval);
399 runtime·throw("castogscanstatus");
400 return false; // not reached
403 static void badcasgstatus(void);
404 static void helpcasgstatus(void);
406 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
407 // and casfromgscanstatus instead.
408 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
409 // put it in the Gscan state is finished.
410 #pragma textflag NOSPLIT
412 runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
416 if((oldval&Gscan) || (newval&Gscan) || oldval == newval) {
417 g->m->scalararg[0] = oldval;
418 g->m->scalararg[1] = newval;
423 // loop if gp->atomicstatus is in a scan state giving
424 // GC time to finish and change the state to oldval.
425 while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
426 // Help GC if needed.
427 if(gp->preemptscan && !gp->gcworkdone && (oldval == Grunning || oldval == Gsyscall)) {
428 gp->preemptscan = false;
429 g->m->ptrarg[0] = gp;
439 uint32 oldval, newval;
441 oldval = g->m->scalararg[0];
442 newval = g->m->scalararg[1];
443 g->m->scalararg[0] = 0;
444 g->m->scalararg[1] = 0;
446 runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, newval);
447 runtime·throw("casgstatus: bad incoming values");
455 gp = g->m->ptrarg[0];
457 runtime·gcphasework(gp);
460 // stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
461 // or in the context of a moving collector the pointers can be flipped from pointing
462 // to old object to pointing to new objects.
463 // If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
464 // the caller calls restartg.
465 // If stopg returns false, the caller is not responsible for calling restartg. This can happen
466 // if another thread, either the gp itself or another GC thread is taking the responsibility
467 // to do the GC work related to this thread.
477 s = runtime·readgstatus(gp);
481 runtime·throw("stopg: gp->atomicstatus is not valid");
487 // Loop until a new stack is in place.
493 // Claim goroutine by setting scan bit.
494 if(!runtime·castogscanstatus(gp, s, s|Gscan))
496 // In scan state, do work.
497 runtime·gcphasework(gp);
503 // Goroutine already claimed by another GC helper.
507 // Claim goroutine, so we aren't racing with a status
508 // transition away from Grunning.
509 if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
512 // Mark gp for preemption.
513 if(!gp->gcworkdone) {
514 gp->preemptscan = true;
516 gp->stackguard0 = StackPreempt;
520 runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
524 // Should not be here....
527 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
529 runtime·restartg (G *gp)
533 s = runtime·readgstatus(gp);
537 runtime·throw("restartg: unexpected status");
545 runtime·casfromgscanstatus(gp, s, s&~Gscan);
549 // Scan is now completed.
550 // Goroutine now needs to be made runnable.
551 // We put it on the global run queue; ready blocks on the global scheduler lock.
552 runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
554 runtime·throw("processing Gscanenqueue on wrong m");
565 runtime·throw("GC not moved to G0");
566 if(runtime·stopg(gp)) {
567 if(!isscanstatus(runtime·readgstatus(gp))) {
569 runtime·throw("GC not in scan state");
571 runtime·restartg(gp);
575 // Runs on g0 and does the actual work after putting the g back on the run queue.
577 mquiesce(G *gpmaster)
584 activeglen = runtime·allglen;
585 // enqueue the calling goroutine.
586 runtime·restartg(gpmaster);
587 for(i = 0; i < activeglen; i++) {
588 gp = runtime·allg[i];
589 if(runtime·readgstatus(gp) == Gdead)
590 gp->gcworkdone = true; // noop scan.
592 gp->gcworkdone = false;
596 // Check that the G's gcwork (such as scanning) has been done. If not do it now.
597 // You can end up doing work here if the page trap on a Grunning Goroutine has
598 // not been sprung or in some race situations. For example a runnable goes dead
599 // and is started up again with a gp->gcworkdone set to false.
600 for(i = 0; i < activeglen; i++) {
601 gp = runtime·allg[i];
602 while (!gp->gcworkdone) {
603 status = runtime·readgstatus(gp);
604 if(status == Gdead) {
605 gp->gcworkdone = true; // scan is a noop
607 //do nothing, scan not needed.
609 if(status == Grunning && gp->stackguard0 == (uintptr)StackPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecond arg
610 runtime·noteclear(&runtime·sched.stopnote);
616 for(i = 0; i < activeglen; i++) {
617 gp = runtime·allg[i];
618 status = runtime·readgstatus(gp);
619 if(isscanstatus(status)) {
620 runtime·printf("mstopandscang:bottom: post scan bad status gp=%p has status %x\n", gp, status);
623 if(!gp->gcworkdone && status != Gdead) {
624 runtime·printf("mstopandscang:bottom: post scan gp=%p->gcworkdone still false\n", gp);
629 schedule(); // Never returns.
632 // quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
633 // If the global runtime·gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
634 // have been scanned before it returns.
636 runtime·quiesce(G* mastergp)
640 runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
641 // Now move this to the g0 (aka m) stack.
642 // g0 will potentially scan this thread and put mastergp on the runqueue
647 // This is used by the GC as well as the routines that do stack dumps. In the case
648 // of GC all the routines can be reliably stopped. This is not always the case
649 // when the system is in panic or being exited.
651 runtime·stoptheworld(void)
658 // If we hold a lock, then we won't be able to stop another M
659 // that is blocked trying to acquire the lock.
661 runtime·throw("stoptheworld: holding locks");
663 runtime·lock(&runtime·sched.lock);
664 runtime·sched.stopwait = runtime·gomaxprocs;
665 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
668 g->m->p->status = Pgcstop; // Pgcstop is only diagnostic.
669 runtime·sched.stopwait--;
670 // try to retake all P's in Psyscall status
671 for(i = 0; i < runtime·gomaxprocs; i++) {
674 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
675 runtime·sched.stopwait--;
678 while(p = pidleget()) {
680 runtime·sched.stopwait--;
682 wait = runtime·sched.stopwait > 0;
683 runtime·unlock(&runtime·sched.lock);
685 // wait for remaining P's to stop voluntarily
688 // wait for 100us, then try to re-preempt in case of any races
689 if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
690 runtime·noteclear(&runtime·sched.stopnote);
696 if(runtime·sched.stopwait)
697 runtime·throw("stoptheworld: not stopped");
698 for(i = 0; i < runtime·gomaxprocs; i++) {
700 if(p->status != Pgcstop)
701 runtime·throw("stoptheworld: not stopped");
712 runtime·starttheworld(void)
719 g->m->locks++; // disable preemption because it can be holding p in a local var
720 gp = runtime·netpoll(false); // non-blocking
722 add = needaddgcproc();
723 runtime·lock(&runtime·sched.lock);
724 if(runtime·newprocs) {
725 procresize(runtime·newprocs);
726 runtime·newprocs = 0;
728 procresize(runtime·gomaxprocs);
729 runtime·sched.gcwaiting = 0;
732 while(p = pidleget()) {
733 // procresize() puts p's with work at the beginning of the list.
734 // Once we reach a p without a run queue, the rest don't have one either.
735 if(p->runqhead == p->runqtail) {
743 if(runtime·sched.sysmonwait) {
744 runtime·sched.sysmonwait = false;
745 runtime·notewakeup(&runtime·sched.sysmonnote);
747 runtime·unlock(&runtime·sched.lock);
756 runtime·throw("starttheworld: inconsistent mp->nextp");
758 runtime·notewakeup(&mp->park);
760 // Start M to run P. Do not start another M below.
767 // If GC could have used another helper proc, start one now,
768 // in the hope that it will be available next time.
769 // It would have been even better to start it before the collection,
770 // but doing so requires allocating memory, so it's tricky to
771 // coordinate. This lazy approach works out in practice:
772 // we don't mind if the first couple gc rounds don't have quite
773 // the maximum number of procs.
777 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
778 g->stackguard0 = StackPreempt;
781 static void mstart(void);
783 // Called to start an M.
784 #pragma textflag NOSPLIT
790 if(g->stack.lo == 0) {
791 // Initialize stack bounds from system stack.
792 // Cgo may have left stack size in stack.hi.
796 g->stack.hi = (uintptr)&x;
797 g->stack.lo = g->stack.hi - size + 1024;
800 // Initialize stack guards so that we can start calling
801 // both Go and C functions with stack growth prologues.
802 g->stackguard0 = g->stack.lo + StackGuard;
803 g->stackguard1 = g->stackguard0;
811 runtime·throw("bad runtime·mstart");
813 // Record top of stack for use by mcall.
814 // Once we call schedule we're never coming back,
815 // so other calls can reuse this stack space.
816 runtime·gosave(&g->m->g0->sched);
817 g->m->g0->sched.pc = (uintptr)-1; // make sure it is never used
821 // Install signal handlers; after minit so that minit can
822 // prepare the thread to be able to handle the signals.
823 if(g->m == &runtime·m0)
832 } else if(g->m != &runtime·m0) {
833 acquirep(g->m->nextp);
838 // TODO(brainman): This point is never reached, because scheduler
839 // does not release os threads at the moment. But once this path
840 // is enabled, we must remove our seh here.
843 // When running with cgo, we call _cgo_thread_start
844 // to start threads for us so that we can play nicely with
846 void (*_cgo_thread_start)(void*);
848 typedef struct CgoThreadStart CgoThreadStart;
849 struct CgoThreadStart
856 M *runtime·newM(void); // in proc.go
858 // Allocate a new m unassociated with any thread.
859 // Can use p for allocation context if needed.
865 g->m->locks++; // disable GC because it can be called from sysmon
867 acquirep(p); // temporarily borrow p for mallocs in this function
871 // In case of cgo or Solaris, pthread_create will make us a stack.
872 // Windows and Plan 9 will layout sched stack on OS stack.
873 if(runtime·iscgo || Solaris || Windows || Plan9)
874 mp->g0 = runtime·malg(-1);
876 mp->g0 = runtime·malg(8192);
882 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
883 g->stackguard0 = StackPreempt;
888 G *runtime·newG(void); // in proc.go
893 return runtime·newG();
896 static M* lockextra(bool nilokay);
897 static void unlockextra(M*);
899 // needm is called when a cgo callback happens on a
900 // thread without an m (a thread not created by Go).
901 // In this case, needm is expected to find an m to use
902 // and return with m, g initialized correctly.
903 // Since m and g are not set now (likely nil, but see below)
904 // needm is limited in what routines it can call. In particular
905 // it can only call nosplit functions (textflag 7) and cannot
906 // do any scheduling that requires an m.
908 // In order to avoid needing heavy lifting here, we adopt
909 // the following strategy: there is a stack of available m's
910 // that can be stolen. Using compare-and-swap
911 // to pop from the stack has ABA races, so we simulate
912 // a lock by doing an exchange (via casp) to steal the stack
913 // head and replace the top pointer with MLOCKED (1).
914 // This serves as a simple spin lock that we can use even
915 // without an m. The thread that locks the stack in this way
916 // unlocks the stack by storing a valid stack head pointer.
918 // In order to make sure that there is always an m structure
919 // available to be stolen, we maintain the invariant that there
920 // is always one more than needed. At the beginning of the
921 // program (if cgo is in use) the list is seeded with a single m.
922 // If needm finds that it has taken the last m off the list, its job
923 // is - once it has installed its own m so that it can do things like
924 // allocate memory - to create a spare m and put it on the list.
926 // Each of these extra m's also has a g0 and a curg that are
927 // pressed into service as the scheduling stack and current
928 // goroutine for the duration of the cgo callback.
930 // When the callback is done with the m, it calls dropm to
931 // put the m back on the list.
932 #pragma textflag NOSPLIT
934 runtime·needm(byte x)
938 if(runtime·needextram) {
939 // Can happen if C/C++ code calls Go from a global ctor.
940 // Can not throw, because scheduler is not initialized yet.
941 runtime·write(2, "fatal error: cgo callback before cgo call\n",
942 sizeof("fatal error: cgo callback before cgo call\n")-1);
946 // Lock extra list, take head, unlock popped list.
947 // nilokay=false is safe here because of the invariant above,
948 // that the extra list always contains or will soon contain
950 mp = lockextra(false);
952 // Set needextram when we've just emptied the list,
953 // so that the eventual call into cgocallbackg will
954 // allocate a new m for the extra list. We delay the
955 // allocation until then so that it can be done
956 // after exitsyscall makes sure it is okay to be
957 // running at all (that is, there's no garbage collection
958 // running right now).
959 mp->needextram = mp->schedlink == nil;
960 unlockextra(mp->schedlink);
962 // Install g (= m->g0) and set the stack bounds
963 // to match the current stack. We don't actually know
964 // how big the stack is, like we don't know how big any
965 // scheduling stack is, but we assume there's at least 32 kB,
966 // which is more than enough for us.
967 runtime·setg(mp->g0);
968 g->stack.hi = (uintptr)(&x + 1024);
969 g->stack.lo = (uintptr)(&x - 32*1024);
970 g->stackguard0 = g->stack.lo + StackGuard;
972 // Initialize this thread to use the m.
977 // newextram allocates an m and puts it on the extra list.
978 // It is called with a working local m, so that it can do things
979 // like call schedlock and allocate.
981 runtime·newextram(void)
986 // Create extra goroutine locked to extra m.
987 // The goroutine is the context in which the cgo callback will run.
988 // The sched.pc will never be returned to, but setting it to
989 // runtime.goexit makes clear to the traceback routines where
990 // the goroutine stack ends.
991 mp = runtime·allocm(nil);
992 gp = runtime·malg(4096);
993 gp->sched.pc = (uintptr)runtime·goexit;
994 gp->sched.sp = gp->stack.hi;
995 gp->sched.sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
998 gp->syscallpc = gp->sched.pc;
999 gp->syscallsp = gp->sched.sp;
1000 // malg returns status as Gidle, change to Gsyscall before adding to allg
1001 // where GC will see it.
1002 runtime·casgstatus(gp, Gidle, Gsyscall);
1005 mp->locked = LockInternal;
1008 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
1010 gp->racectx = runtime·racegostart(runtime·newextram);
1011 // put on allg for garbage collector
1012 runtime·allgadd(gp);
1014 // Add m to the extra list.
1015 mnext = lockextra(true);
1016 mp->schedlink = mnext;
1020 // dropm is called when a cgo callback has called needm but is now
1021 // done with the callback and returning back into the non-Go thread.
1022 // It puts the current m back onto the extra list.
1024 // The main expense here is the call to signalstack to release the
1025 // m's signal stack, and then the call to needm on the next callback
1026 // from this thread. It is tempting to try to save the m for next time,
1027 // which would eliminate both these costs, but there might not be
1028 // a next time: the current thread (which Go does not control) might exit.
1029 // If we saved the m for that thread, there would be an m leak each time
1030 // such a thread exited. Instead, we acquire and release an m on each
1031 // call. These should typically not be scheduling operations, just a few
1032 // atomics, so the cost should be small.
1034 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1035 // variable using pthread_key_create. Unlike the pthread keys we already use
1036 // on OS X, this dummy key would never be read by Go code. It would exist
1037 // only so that we could register at thread-exit-time destructor.
1038 // That destructor would put the m back onto the extra list.
1039 // This is purely a performance optimization. The current version,
1040 // in which dropm happens on each cgo call, is still correct too.
1041 // We may have to keep the current version on systems with cgo
1042 // but without pthreads, like Windows.
1048 // Undo whatever initialization minit did during needm.
1051 // Clear m and g, and return m to the extra list.
1052 // After the call to setmg we can only call nosplit functions.
1056 mnext = lockextra(true);
1057 mp->schedlink = mnext;
1061 #define MLOCKED ((M*)1)
1063 // lockextra locks the extra list and returns the list head.
1064 // The caller must unlock the list by storing a new list head
1065 // to runtime.extram. If nilokay is true, then lockextra will
1066 // return a nil list head if that's what it finds. If nilokay is false,
1067 // lockextra will keep waiting until the list head is no longer nil.
1068 #pragma textflag NOSPLIT
1070 lockextra(bool nilokay)
1073 void (*yield)(void);
1076 mp = runtime·atomicloadp(&runtime·extram);
1078 yield = runtime·osyield;
1082 if(mp == nil && !nilokay) {
1086 if(!runtime·casp(&runtime·extram, mp, MLOCKED)) {
1087 yield = runtime·osyield;
1096 #pragma textflag NOSPLIT
1100 runtime·atomicstorep(&runtime·extram, mp);
1104 // Create a new m. It will start off with a call to fn, or else the scheduler.
1106 newm(void(*fn)(void), P *p)
1110 mp = runtime·allocm(p);
1117 if(_cgo_thread_start == nil)
1118 runtime·throw("_cgo_thread_start missing");
1121 ts.fn = runtime·mstart;
1122 runtime·asmcgocall(_cgo_thread_start, &ts);
1125 runtime·newosproc(mp, (byte*)mp->g0->stack.hi);
1128 // Stops execution of the current m until new work is available.
1129 // Returns with acquired P.
1134 runtime·throw("stopm holding locks");
1136 runtime·throw("stopm holding p");
1137 if(g->m->spinning) {
1138 g->m->spinning = false;
1139 runtime·xadd(&runtime·sched.nmspinning, -1);
1143 runtime·lock(&runtime·sched.lock);
1145 runtime·unlock(&runtime·sched.lock);
1146 runtime·notesleep(&g->m->park);
1147 runtime·noteclear(&g->m->park);
1154 acquirep(g->m->nextp);
1161 g->m->spinning = true;
1164 // Schedules some M to run the p (creates an M if necessary).
1165 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1167 startm(P *p, bool spinning)
1172 runtime·lock(&runtime·sched.lock);
1176 runtime·unlock(&runtime·sched.lock);
1178 runtime·xadd(&runtime·sched.nmspinning, -1);
1183 runtime·unlock(&runtime·sched.lock);
1192 runtime·throw("startm: m is spinning");
1194 runtime·throw("startm: m has p");
1195 mp->spinning = spinning;
1197 runtime·notewakeup(&mp->park);
1200 // Hands off P from syscall or locked M.
1204 // if it has local work, start it straight away
1205 if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
1209 // no local work, check that there are no spinning/idle M's,
1210 // otherwise our help is not required
1211 if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 && // TODO: fast atomic
1212 runtime·cas(&runtime·sched.nmspinning, 0, 1)){
1216 runtime·lock(&runtime·sched.lock);
1217 if(runtime·sched.gcwaiting) {
1218 p->status = Pgcstop;
1219 if(--runtime·sched.stopwait == 0)
1220 runtime·notewakeup(&runtime·sched.stopnote);
1221 runtime·unlock(&runtime·sched.lock);
1224 if(runtime·sched.runqsize) {
1225 runtime·unlock(&runtime·sched.lock);
1229 // If this is the last running P and nobody is polling network,
1230 // need to wakeup another M to poll network.
1231 if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
1232 runtime·unlock(&runtime·sched.lock);
1237 runtime·unlock(&runtime·sched.lock);
1240 // Tries to add one more P to execute G's.
1241 // Called when a G is made runnable (newproc, ready).
1245 // be conservative about spinning threads
1246 if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
1251 // Stops execution of the current m that is locked to a g until the g is runnable again.
1252 // Returns with acquired P.
1259 if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
1260 runtime·throw("stoplockedm: inconsistent locking");
1262 // Schedule another M to run this p.
1267 // Wait until another thread schedules lockedg again.
1268 runtime·notesleep(&g->m->park);
1269 runtime·noteclear(&g->m->park);
1270 status = runtime·readgstatus(g->m->lockedg);
1271 if((status&~Gscan) != Grunnable){
1272 runtime·printf("runtime:stoplockedm: g is not Grunnable or Gscanrunnable");
1274 runtime·throw("stoplockedm: not runnable");
1276 acquirep(g->m->nextp);
1280 // Schedules the locked m to run the locked gp.
1289 runtime·throw("startlockedm: locked to me");
1291 runtime·throw("startlockedm: m has p");
1292 // directly handoff current P to the locked m
1296 runtime·notewakeup(&mp->park);
1300 // Stops the current m for stoptheworld.
1301 // Returns when the world is restarted.
1307 if(!runtime·sched.gcwaiting)
1308 runtime·throw("gcstopm: not waiting for gc");
1309 if(g->m->spinning) {
1310 g->m->spinning = false;
1311 runtime·xadd(&runtime·sched.nmspinning, -1);
1314 runtime·lock(&runtime·sched.lock);
1315 p->status = Pgcstop;
1316 if(--runtime·sched.stopwait == 0)
1317 runtime·notewakeup(&runtime·sched.stopnote);
1318 runtime·unlock(&runtime·sched.lock);
1322 // Schedules gp to run on the current M.
1329 runtime·casgstatus(gp, Grunnable, Grunning);
1331 gp->preempt = false;
1332 gp->stackguard0 = gp->stack.lo + StackGuard;
1333 g->m->p->schedtick++;
1337 // Check whether the profiler needs to be turned on or off.
1338 hz = runtime·sched.profilehz;
1339 if(g->m->profilehz != hz)
1340 runtime·resetcpuprofiler(hz);
1342 runtime·gogo(&gp->sched);
1345 // Finds a runnable goroutine to execute.
1346 // Tries to steal from other P's, get g from global queue, poll network.
1355 if(runtime·sched.gcwaiting) {
1359 if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
1362 gp = runqget(g->m->p);
1366 if(runtime·sched.runqsize) {
1367 runtime·lock(&runtime·sched.lock);
1368 gp = globrunqget(g->m->p, 0);
1369 runtime·unlock(&runtime·sched.lock);
1374 gp = runtime·netpoll(false); // non-blocking
1376 injectglist(gp->schedlink);
1377 runtime·casgstatus(gp, Gwaiting, Grunnable);
1380 // If number of spinning M's >= number of busy P's, block.
1381 // This is necessary to prevent excessive CPU consumption
1382 // when GOMAXPROCS>>1 but the program parallelism is low.
1383 if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle)) // TODO: fast atomic
1385 if(!g->m->spinning) {
1386 g->m->spinning = true;
1387 runtime·xadd(&runtime·sched.nmspinning, 1);
1389 // random steal from other P's
1390 for(i = 0; i < 2*runtime·gomaxprocs; i++) {
1391 if(runtime·sched.gcwaiting)
1393 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
1397 gp = runqsteal(g->m->p, p);
1402 // return P and block
1403 runtime·lock(&runtime·sched.lock);
1404 if(runtime·sched.gcwaiting) {
1405 runtime·unlock(&runtime·sched.lock);
1408 if(runtime·sched.runqsize) {
1409 gp = globrunqget(g->m->p, 0);
1410 runtime·unlock(&runtime·sched.lock);
1415 runtime·unlock(&runtime·sched.lock);
1416 if(g->m->spinning) {
1417 g->m->spinning = false;
1418 runtime·xadd(&runtime·sched.nmspinning, -1);
1420 // check all runqueues once again
1421 for(i = 0; i < runtime·gomaxprocs; i++) {
1422 p = runtime·allp[i];
1423 if(p && p->runqhead != p->runqtail) {
1424 runtime·lock(&runtime·sched.lock);
1426 runtime·unlock(&runtime·sched.lock);
1435 if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
1437 runtime·throw("findrunnable: netpoll with p");
1439 runtime·throw("findrunnable: netpoll with spinning");
1440 gp = runtime·netpoll(true); // block until new work is available
1441 runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
1443 runtime·lock(&runtime·sched.lock);
1445 runtime·unlock(&runtime·sched.lock);
1448 injectglist(gp->schedlink);
1449 runtime·casgstatus(gp, Gwaiting, Grunnable);
1464 if(g->m->spinning) {
1465 g->m->spinning = false;
1466 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
1468 runtime·throw("findrunnable: negative nmspinning");
1470 nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
1472 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
1473 // so see if we need to wakeup another P here.
1474 if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
1478 // Injects the list of runnable G's into the scheduler.
1479 // Can run concurrently with GC.
1481 injectglist(G *glist)
1488 runtime·lock(&runtime·sched.lock);
1489 for(n = 0; glist; n++) {
1491 glist = gp->schedlink;
1492 runtime·casgstatus(gp, Gwaiting, Grunnable);
1495 runtime·unlock(&runtime·sched.lock);
1497 for(; n && runtime·sched.npidle; n--)
1501 // One round of scheduler: find a runnable goroutine and execute it.
1510 runtime·throw("schedule: holding locks");
1514 execute(g->m->lockedg); // Never returns.
1518 if(runtime·sched.gcwaiting) {
1524 // Check the global runnable queue once in a while to ensure fairness.
1525 // Otherwise two goroutines can completely occupy the local runqueue
1526 // by constantly respawning each other.
1527 tick = g->m->p->schedtick;
1528 // This is a fancy way to say tick%61==0,
1529 // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
1530 if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
1531 runtime·lock(&runtime·sched.lock);
1532 gp = globrunqget(g->m->p, 1);
1533 runtime·unlock(&runtime·sched.lock);
1538 gp = runqget(g->m->p);
1539 if(gp && g->m->spinning)
1540 runtime·throw("schedule: spinning with local work");
1543 gp = findrunnable(); // blocks until work is available
1548 // Hands off own p to the locked m,
1549 // then blocks waiting for a new p.
1557 // dropg removes the association between m and the current goroutine m->curg (gp for short).
1558 // Typically a caller sets gp's status away from Grunning and then
1559 // immediately calls dropg to finish the job. The caller is also responsible
1560 // for arranging that gp will be restarted using runtime·ready at an
1561 // appropriate time. After calling dropg and arranging for gp to be
1562 // readied later, the caller can do other work but eventually should
1563 // call schedule to restart the scheduling of goroutines on this m.
1567 if(g->m->lockedg == nil) {
1568 g->m->curg->m = nil;
1573 // Puts the current goroutine into a waiting state and calls unlockf.
1574 // If unlockf returns false, the goroutine is resumed.
1576 runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
1580 g->m->waitlock = lock;
1581 g->m->waitunlockf = unlockf;
1582 g->waitreason = reason;
1583 fn = runtime·park_m;
1588 runtime·parkunlock_c(G *gp, void *lock)
1591 runtime·unlock(lock);
1595 // Puts the current goroutine into a waiting state and unlocks the lock.
1596 // The goroutine can be made runnable again by calling runtime·ready(gp).
1598 runtime·parkunlock(Mutex *lock, String reason)
1600 runtime·park(runtime·parkunlock_c, lock, reason);
1603 // runtime·park continuation on g0.
1605 runtime·park_m(G *gp)
1609 runtime·casgstatus(gp, Grunning, Gwaiting);
1612 if(g->m->waitunlockf) {
1613 ok = g->m->waitunlockf(gp, g->m->waitlock);
1614 g->m->waitunlockf = nil;
1615 g->m->waitlock = nil;
1617 runtime·casgstatus(gp, Gwaiting, Grunnable);
1618 execute(gp); // Schedule it back, never returns.
1625 // Gosched continuation on g0.
1627 runtime·gosched_m(G *gp)
1631 status = runtime·readgstatus(gp);
1632 if((status&~Gscan) != Grunning){
1634 runtime·throw("bad g status");
1636 runtime·casgstatus(gp, Grunning, Grunnable);
1638 runtime·lock(&runtime·sched.lock);
1640 runtime·unlock(&runtime·sched.lock);
1645 // Finishes execution of the current goroutine.
1646 // Must be NOSPLIT because it is called from Go.
1647 #pragma textflag NOSPLIT
1649 runtime·goexit1(void)
1654 runtime·racegoend();
1659 // runtime·goexit continuation on g0.
1663 runtime·casgstatus(gp, Grunning, Gdead);
1666 g->m->lockedg = nil;
1667 gp->paniconfault = 0;
1668 gp->defer = nil; // should be true already but just in case.
1669 gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
1670 gp->writebuf.array = nil;
1671 gp->writebuf.len = 0;
1672 gp->writebuf.cap = 0;
1673 gp->waitreason.str = nil;
1674 gp->waitreason.len = 0;
1679 if(g->m->locked & ~LockExternal) {
1680 runtime·printf("invalid m->locked = %d\n", g->m->locked);
1681 runtime·throw("internal lockOSThread error");
1688 #pragma textflag NOSPLIT
1690 save(uintptr pc, uintptr sp)
1700 static void entersyscall_bad(void);
1701 static void entersyscall_sysmon(void);
1702 static void entersyscall_gcwait(void);
1704 // The goroutine g is about to enter a system call.
1705 // Record that it's not using the cpu anymore.
1706 // This is called only from the go syscall library and cgocall,
1707 // not from the low-level system calls used by the runtime.
1709 // Entersyscall cannot split the stack: the runtime·gosave must
1710 // make g->sched refer to the caller's stack segment, because
1711 // entersyscall is going to return immediately after.
1713 // Nothing entersyscall calls can split the stack either.
1714 // We cannot safely move the stack during an active call to syscall,
1715 // because we do not know which of the uintptr arguments are
1716 // really pointers (back into the stack).
1717 // In practice, this means that we make the fast path run through
1718 // entersyscall doing no-split things, and the slow path has to use onM
1719 // to run bigger things on the m stack.
1721 // reentersyscall is the entry point used by cgo callbacks, where explicitly
1722 // saved SP and PC are restored. This is needed when exitsyscall will be called
1723 // from a function further up in the call stack than the parent, as g->syscallsp
1724 // must always point to a valid stack frame. entersyscall below is the normal
1725 // entry point for syscalls, which obtains the SP and PC from the caller.
1726 #pragma textflag NOSPLIT
1728 runtime·reentersyscall(uintptr pc, uintptr sp)
1732 // Disable preemption because during this function g is in Gsyscall status,
1733 // but can have inconsistent g->sched, do not let GC observe it.
1736 // Entersyscall must not call any function that might split/grow the stack.
1737 // (See details in comment above.)
1738 // Catch calls that might, by replacing the stack guard with something that
1739 // will trip any stack check and leaving a flag to tell newstack to die.
1740 g->stackguard0 = StackPreempt;
1743 // Leave SP around for GC and traceback.
1747 runtime·casgstatus(g, Grunning, Gsyscall);
1748 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1749 fn = entersyscall_bad;
1753 if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomic
1754 fn = entersyscall_sysmon;
1761 runtime·atomicstore(&g->m->p->status, Psyscall);
1762 if(runtime·sched.gcwaiting) {
1763 fn = entersyscall_gcwait;
1768 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
1769 // We set stackguard to StackPreempt so that first split stack check calls morestack.
1770 // Morestack detects this case and throws.
1771 g->stackguard0 = StackPreempt;
1775 // Standard syscall entry used by the go syscall library and normal cgo calls.
1776 #pragma textflag NOSPLIT
1778 ·entersyscall(int32 dummy)
1780 runtime·reentersyscall((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1784 entersyscall_bad(void)
1789 runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
1790 gp->syscallsp, gp->stack.lo, gp->stack.hi);
1791 runtime·throw("entersyscall");
1795 entersyscall_sysmon(void)
1797 runtime·lock(&runtime·sched.lock);
1798 if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1799 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1800 runtime·notewakeup(&runtime·sched.sysmonnote);
1802 runtime·unlock(&runtime·sched.lock);
1806 entersyscall_gcwait(void)
1808 runtime·lock(&runtime·sched.lock);
1809 if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
1810 if(--runtime·sched.stopwait == 0)
1811 runtime·notewakeup(&runtime·sched.stopnote);
1813 runtime·unlock(&runtime·sched.lock);
1816 static void entersyscallblock_handoff(void);
1818 // The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
1819 #pragma textflag NOSPLIT
1821 ·entersyscallblock(int32 dummy)
1825 g->m->locks++; // see comment in entersyscall
1827 g->stackguard0 = StackPreempt; // see comment in entersyscall
1829 // Leave SP around for GC and traceback.
1830 save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1831 g->syscallsp = g->sched.sp;
1832 g->syscallpc = g->sched.pc;
1833 runtime·casgstatus(g, Grunning, Gsyscall);
1834 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1835 fn = entersyscall_bad;
1839 fn = entersyscallblock_handoff;
1842 // Resave for traceback during blocked call.
1843 save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1849 entersyscallblock_handoff(void)
1851 handoffp(releasep());
1854 // The goroutine g exited its system call.
1855 // Arrange for it to run on a cpu again.
1856 // This is called only from the go syscall library, not
1857 // from the low-level system calls used by the runtime.
1858 #pragma textflag NOSPLIT
1860 ·exitsyscall(int32 dummy)
1864 g->m->locks++; // see comment in entersyscall
1866 if(runtime·getcallersp(&dummy) > g->syscallsp)
1867 runtime·throw("exitsyscall: syscall frame is no longer valid");
1870 if(exitsyscallfast()) {
1871 // There's a cpu for us, so we can run.
1872 g->m->p->syscalltick++;
1873 // We need to cas the status and scan before resuming...
1874 runtime·casgstatus(g, Gsyscall, Grunning);
1876 // Garbage collector isn't running (since we are),
1877 // so okay to clear syscallsp.
1878 g->syscallsp = (uintptr)nil;
1881 // restore the preemption request in case we've cleared it in newstack
1882 g->stackguard0 = StackPreempt;
1884 // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
1885 g->stackguard0 = g->stack.lo + StackGuard;
1893 // Call the scheduler.
1897 // Scheduler returned, so we're allowed to run now.
1898 // Delete the syscallsp information that we left for
1899 // the garbage collector during the system call.
1900 // Must wait until now because until gosched returns
1901 // we don't know for sure that the garbage collector
1903 g->syscallsp = (uintptr)nil;
1904 g->m->p->syscalltick++;
1908 static void exitsyscallfast_pidle(void);
1910 #pragma textflag NOSPLIT
1912 exitsyscallfast(void)
1916 // Freezetheworld sets stopwait but does not retake P's.
1917 if(runtime·sched.stopwait) {
1922 // Try to re-acquire the last P.
1923 if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->status, Psyscall, Prunning)) {
1924 // There's a cpu for us, so we can run.
1925 g->m->mcache = g->m->p->mcache;
1929 // Try to get any other idle P.
1931 if(runtime·sched.pidle) {
1932 fn = exitsyscallfast_pidle;
1934 if(g->m->scalararg[0]) {
1935 g->m->scalararg[0] = 0;
1943 exitsyscallfast_pidle(void)
1947 runtime·lock(&runtime·sched.lock);
1949 if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
1950 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1951 runtime·notewakeup(&runtime·sched.sysmonnote);
1953 runtime·unlock(&runtime·sched.lock);
1956 g->m->scalararg[0] = 1;
1958 g->m->scalararg[0] = 0;
1961 // runtime·exitsyscall slow path on g0.
1962 // Failed to acquire P, enqueue gp as runnable.
1968 runtime·casgstatus(gp, Gsyscall, Grunnable);
1970 runtime·lock(&runtime·sched.lock);
1974 else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1975 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1976 runtime·notewakeup(&runtime·sched.sysmonnote);
1978 runtime·unlock(&runtime·sched.lock);
1981 execute(gp); // Never returns.
1984 // Wait until another thread schedules gp and so m again.
1986 execute(gp); // Never returns.
1989 schedule(); // Never returns.
1998 // Fork can hang if preempted with signals frequently enough (see issue 5517).
1999 // Ensure that we stay on the same M where we disable profiling.
2001 if(gp->m->profilehz != 0)
2002 runtime·resetcpuprofiler(0);
2004 // This function is called before fork in syscall package.
2005 // Code between fork and exec must not allocate memory nor even try to grow stack.
2006 // Here we spoil g->stackguard to reliably detect any attempts to grow stack.
2007 // runtime_AfterFork will undo this in parent process, but not in child.
2008 gp->stackguard0 = StackFork;
2011 // Called from syscall package before fork.
2012 #pragma textflag NOSPLIT
2014 syscall·runtime_BeforeFork(void)
2029 // See the comment in runtime_BeforeFork.
2030 gp->stackguard0 = gp->stack.lo + StackGuard;
2032 hz = runtime·sched.profilehz;
2034 runtime·resetcpuprofiler(hz);
2038 // Called from syscall package after fork in parent.
2039 #pragma textflag NOSPLIT
2041 syscall·runtime_AfterFork(void)
2049 // Hook used by runtime·malg to call runtime·stackalloc on the
2050 // scheduler stack. This exists because runtime·stackalloc insists
2051 // on being called on the scheduler stack, to avoid trying to grow
2052 // the stack while allocating a new stack segment.
2059 newg = g->m->ptrarg[0];
2060 size = g->m->scalararg[0];
2062 newg->stack = runtime·stackalloc(size);
2064 runtime·gogo(&gp->sched);
2067 // Allocate a new g, with a stack big enough for stacksize bytes.
2069 runtime·malg(int32 stacksize)
2075 if(stacksize >= 0) {
2076 stacksize = runtime·round2(StackSystem + stacksize);
2078 // running on scheduler stack already.
2079 newg->stack = runtime·stackalloc(stacksize);
2081 // have to call stackalloc on scheduler stack.
2082 g->m->scalararg[0] = stacksize;
2083 g->m->ptrarg[0] = newg;
2086 g->m->ptrarg[0] = nil;
2088 newg->stackguard0 = newg->stack.lo + StackGuard;
2089 newg->stackguard1 = ~(uintptr)0;
2102 siz = g->m->scalararg[0];
2103 callerpc = (void*)g->m->scalararg[1];
2104 argp = g->m->ptrarg[0];
2105 fn = (FuncVal*)g->m->ptrarg[1];
2107 runtime·newproc1(fn, argp, siz, 0, callerpc);
2108 g->m->ptrarg[0] = nil;
2109 g->m->ptrarg[1] = nil;
2112 // Create a new g running fn with siz bytes of arguments.
2113 // Put it on the queue of g's waiting to run.
2114 // The compiler turns a go statement into a call to this.
2115 // Cannot split the stack because it assumes that the arguments
2116 // are available sequentially after &fn; they would not be
2117 // copied if a stack split occurred.
2118 #pragma textflag NOSPLIT
2120 runtime·newproc(int32 siz, FuncVal* fn, ...)
2125 if(thechar == '5' || thechar == '9')
2126 argp = (byte*)(&fn+2); // skip caller's saved LR
2128 argp = (byte*)(&fn+1);
2131 g->m->scalararg[0] = siz;
2132 g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
2133 g->m->ptrarg[0] = argp;
2134 g->m->ptrarg[1] = fn;
2140 void runtime·main(void);
2142 // Create a new g running fn with narg bytes of arguments starting
2143 // at argp and returning nret bytes of results. callerpc is the
2144 // address of the go statement that created this. The new g is put
2145 // on the queue of g's waiting to run.
2147 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
2155 g->m->throwing = -1; // do not dump full stacks
2156 runtime·throw("go of nil func value");
2158 g->m->locks++; // disable preemption because it can be holding p in a local var
2162 // We could allocate a larger initial stack if necessary.
2163 // Not worth it: this is almost always an error.
2164 // 4*sizeof(uintreg): extra space added below
2165 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
2166 if(siz >= StackMin - 4*sizeof(uintreg) - sizeof(uintreg))
2167 runtime·throw("runtime.newproc: function arguments too large for new goroutine");
2170 if((newg = gfget(p)) == nil) {
2171 newg = runtime·malg(StackMin);
2172 runtime·casgstatus(newg, Gidle, Gdead);
2173 runtime·allgadd(newg); // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
2175 if(newg->stack.hi == 0)
2176 runtime·throw("newproc1: newg missing stack");
2178 if(runtime·readgstatus(newg) != Gdead)
2179 runtime·throw("newproc1: new g is not Gdead");
2181 sp = (byte*)newg->stack.hi;
2182 sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
2184 runtime·memmove(sp, argp, narg);
2185 if(thechar == '5' || thechar == '9') {
2187 sp -= sizeof(void*);
2191 runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
2192 newg->sched.sp = (uintptr)sp;
2193 newg->sched.pc = (uintptr)runtime·goexit + PCQuantum; // +PCQuantum so that previous instruction is in same function
2194 newg->sched.g = newg;
2195 runtime·gostartcallfn(&newg->sched, fn);
2196 newg->gopc = (uintptr)callerpc;
2197 runtime·casgstatus(newg, Gdead, Grunnable);
2199 if(p->goidcache == p->goidcacheend) {
2200 // Sched.goidgen is the last allocated id,
2201 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
2202 // At startup sched.goidgen=0, so main goroutine receives goid=1.
2203 p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
2204 p->goidcache -= GoidCacheBatch - 1;
2205 p->goidcacheend = p->goidcache + GoidCacheBatch;
2207 newg->goid = p->goidcache++;
2209 newg->racectx = runtime·racegostart((void*)callerpc);
2212 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
2215 if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
2216 g->stackguard0 = StackPreempt;
2220 // Put on gfree list.
2221 // If local list is too long, transfer a batch to the global list.
2227 if(runtime·readgstatus(gp) != Gdead)
2228 runtime·throw("gfput: bad status (not Gdead)");
2230 stksize = gp->stack.hi - gp->stack.lo;
2232 if(stksize != FixedStack) {
2233 // non-standard stack size - free it.
2234 runtime·stackfree(gp->stack);
2237 gp->stackguard0 = 0;
2239 gp->schedlink = p->gfree;
2242 if(p->gfreecnt >= 64) {
2243 runtime·lock(&runtime·sched.gflock);
2244 while(p->gfreecnt >= 32) {
2247 p->gfree = gp->schedlink;
2248 gp->schedlink = runtime·sched.gfree;
2249 runtime·sched.gfree = gp;
2250 runtime·sched.ngfree++;
2252 runtime·unlock(&runtime·sched.gflock);
2256 // Get from gfree list.
2257 // If local list is empty, grab a batch from global list.
2266 if(gp == nil && runtime·sched.gfree) {
2267 runtime·lock(&runtime·sched.gflock);
2268 while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
2270 gp = runtime·sched.gfree;
2271 runtime·sched.gfree = gp->schedlink;
2272 runtime·sched.ngfree--;
2273 gp->schedlink = p->gfree;
2276 runtime·unlock(&runtime·sched.gflock);
2280 p->gfree = gp->schedlink;
2283 if(gp->stack.lo == 0) {
2284 // Stack was deallocated in gfput. Allocate a new one.
2286 gp->stack = runtime·stackalloc(FixedStack);
2288 g->m->scalararg[0] = FixedStack;
2289 g->m->ptrarg[0] = gp;
2292 g->m->ptrarg[0] = nil;
2294 gp->stackguard0 = gp->stack.lo + StackGuard;
2297 runtime·racemalloc((void*)gp->stack.lo, gp->stack.hi - gp->stack.lo);
2303 // Purge all cached G's from gfree list to the global list.
2309 runtime·lock(&runtime·sched.gflock);
2310 while(p->gfreecnt != 0) {
2313 p->gfree = gp->schedlink;
2314 gp->schedlink = runtime·sched.gfree;
2315 runtime·sched.gfree = gp;
2316 runtime·sched.ngfree++;
2318 runtime·unlock(&runtime·sched.gflock);
2321 #pragma textflag NOSPLIT
2323 runtime·Breakpoint(void)
2325 runtime·breakpoint();
2328 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
2329 // after they modify m->locked. Do not allow preemption during this call,
2330 // or else the m might be different in this function than in the caller.
2331 #pragma textflag NOSPLIT
2339 #pragma textflag NOSPLIT
2341 runtime·LockOSThread(void)
2343 g->m->locked |= LockExternal;
2347 #pragma textflag NOSPLIT
2349 runtime·lockOSThread(void)
2351 g->m->locked += LockInternal;
2356 // unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
2357 // after they update m->locked. Do not allow preemption during this call,
2358 // or else the m might be in different in this function than in the caller.
2359 #pragma textflag NOSPLIT
2361 unlockOSThread(void)
2363 if(g->m->locked != 0)
2365 g->m->lockedg = nil;
2369 #pragma textflag NOSPLIT
2371 runtime·UnlockOSThread(void)
2373 g->m->locked &= ~LockExternal;
2377 static void badunlockOSThread(void);
2379 #pragma textflag NOSPLIT
2381 runtime·unlockOSThread(void)
2385 if(g->m->locked < LockInternal) {
2386 fn = badunlockOSThread;
2389 g->m->locked -= LockInternal;
2394 badunlockOSThread(void)
2396 runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
2399 #pragma textflag NOSPLIT
2401 runtime·gcount(void)
2406 n = runtime·allglen - runtime·sched.ngfree;
2407 for(pp=runtime·allp; p=*pp; pp++)
2409 // All these variables can be changed concurrently, so the result can be inconsistent.
2410 // But at least the current goroutine is running.
2417 runtime·mcount(void)
2419 return runtime·sched.mcount;
2422 static struct ProfState {
2427 static void System(void) {}
2428 static void ExternalCode(void) {}
2429 static void GC(void) {}
2430 extern void runtime·cpuproftick(uintptr*, int32);
2431 extern byte runtime·etext[];
2433 // Called if we receive a SIGPROF signal.
2435 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
2439 // Do not use global m in this function, use mp instead.
2440 // On windows one m is sending reports about all the g's, so m means a wrong thing.
2450 // Profiling runs concurrently with GC, so it must not allocate.
2453 // Define that a "user g" is a user-created goroutine, and a "system g"
2454 // is one that is m->g0 or m->gsignal. We've only made sure that we
2455 // can unwind user g's, so exclude the system g's.
2457 // It is not quite as easy as testing gp == m->curg (the current user g)
2458 // because we might be interrupted for profiling halfway through a
2459 // goroutine switch. The switch involves updating three (or four) values:
2460 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
2461 // because once it gets updated the new g is running.
2463 // When switching from a user g to a system g, LR is not considered live,
2464 // so the update only affects g, SP, and PC. Since PC must be last, there
2465 // the possible partial transitions in ordinary execution are (1) g alone is updated,
2466 // (2) both g and SP are updated, and (3) SP alone is updated.
2467 // If g is updated, we'll see a system g and not look closer.
2468 // If SP alone is updated, we can detect the partial transition by checking
2469 // whether the SP is within g's stack bounds. (We could also require that SP
2470 // be changed only after g, but the stack bounds check is needed by other
2471 // cases, so there is no need to impose an additional requirement.)
2473 // There is one exceptional transition to a system g, not in ordinary execution.
2474 // When a signal arrives, the operating system starts the signal handler running
2475 // with an updated PC and SP. The g is updated last, at the beginning of the
2476 // handler. There are two reasons this is okay. First, until g is updated the
2477 // g and SP do not match, so the stack bounds check detects the partial transition.
2478 // Second, signal handlers currently run with signals disabled, so a profiling
2479 // signal cannot arrive during the handler.
2481 // When switching from a system g to a user g, there are three possibilities.
2483 // First, it may be that the g switch has no PC update, because the SP
2484 // either corresponds to a user g throughout (as in runtime.asmcgocall)
2485 // or because it has been arranged to look like a user g frame
2486 // (as in runtime.cgocallback_gofunc). In this case, since the entire
2487 // transition is a g+SP update, a partial transition updating just one of
2488 // those will be detected by the stack bounds check.
2490 // Second, when returning from a signal handler, the PC and SP updates
2491 // are performed by the operating system in an atomic update, so the g
2492 // update must be done before them. The stack bounds check detects
2493 // the partial transition here, and (again) signal handlers run with signals
2494 // disabled, so a profiling signal cannot arrive then anyway.
2496 // Third, the common case: it may be that the switch updates g, SP, and PC
2497 // separately, as in runtime.gogo.
2499 // Because runtime.gogo is the only instance, we check whether the PC lies
2500 // within that function, and if so, not ask for a traceback. This approach
2501 // requires knowing the size of the runtime.gogo function, which we
2502 // record in arch_*.h and check in runtime_test.go.
2504 // There is another apparently viable approach, recorded here in case
2505 // the "PC within runtime.gogo" check turns out not to be usable.
2506 // It would be possible to delay the update of either g or SP until immediately
2507 // before the PC update instruction. Then, because of the stack bounds check,
2508 // the only problematic interrupt point is just before that PC update instruction,
2509 // and the sigprof handler can detect that instruction and simulate stepping past
2510 // it in order to reach a consistent state. On ARM, the update of g must be made
2511 // in two places (in R10 and also in a TLS slot), so the delayed update would
2512 // need to be the SP update. The sigprof handler must read the instruction at
2513 // the current PC and if it was the known instruction (for example, JMP BX or
2514 // MOV R2, PC), use that other register in place of the PC value.
2515 // The biggest drawback to this solution is that it requires that we can tell
2516 // whether it's safe to read from the memory pointed at by PC.
2517 // In a correct program, we can test PC == nil and otherwise read,
2518 // but if a profiling signal happens at the instant that a program executes
2519 // a bad jump (before the program manages to handle the resulting fault)
2520 // the profiling handler could fault trying to read nonexistent memory.
2522 // To recap, there are no constraints on the assembly being used for the
2523 // transition. We simply require that g and SP match and that the PC is not
2526 if(gp == nil || gp != mp->curg ||
2527 (uintptr)sp < gp->stack.lo || gp->stack.hi < (uintptr)sp ||
2528 ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
2533 n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, stk, nelem(stk), nil, nil, TraceTrap);
2534 if(!traceback || n <= 0) {
2535 // Normal traceback is impossible or has failed.
2536 // See if it falls into several common cases.
2538 if(mp->ncgo > 0 && mp->curg != nil &&
2539 mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
2540 // Cgo, we can't unwind and symbolize arbitrary C code,
2541 // so instead collect Go stack that leads to the cgo call.
2542 // This is especially important on windows, since all syscalls are cgo calls.
2543 n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, stk, nelem(stk), nil, nil, 0);
2546 if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
2547 // Libcall, i.e. runtime syscall on windows.
2548 // Collect Go stack that leads to the call.
2549 n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, stk, nelem(stk), nil, nil, 0);
2553 // If all of the above has failed, account it against abstract "System" or "GC".
2555 // "ExternalCode" is better than "etext".
2556 if((uintptr)pc > (uintptr)runtime·etext)
2557 pc = (byte*)ExternalCode + PCQuantum;
2558 stk[0] = (uintptr)pc;
2559 if(mp->gcing || mp->helpgc)
2560 stk[1] = (uintptr)GC + PCQuantum;
2562 stk[1] = (uintptr)System + PCQuantum;
2567 // Simple cas-lock to coordinate with setcpuprofilerate.
2568 while(!runtime·cas(&prof.lock, 0, 1))
2571 runtime·cpuproftick(stk, n);
2572 runtime·atomicstore(&prof.lock, 0);
2577 // Arrange to call fn with a traceback hz times a second.
2579 runtime·setcpuprofilerate_m(void)
2583 hz = g->m->scalararg[0];
2584 g->m->scalararg[0] = 0;
2586 // Force sane arguments.
2590 // Disable preemption, otherwise we can be rescheduled to another thread
2591 // that has profiling enabled.
2594 // Stop profiler on this thread so that it is safe to lock prof.
2595 // if a profiling signal came in while we had prof locked,
2596 // it would deadlock.
2597 runtime·resetcpuprofiler(0);
2599 while(!runtime·cas(&prof.lock, 0, 1))
2602 runtime·atomicstore(&prof.lock, 0);
2604 runtime·lock(&runtime·sched.lock);
2605 runtime·sched.profilehz = hz;
2606 runtime·unlock(&runtime·sched.lock);
2609 runtime·resetcpuprofiler(hz);
2614 P *runtime·newP(void);
2616 // Change number of processors. The world is stopped, sched is locked.
2618 procresize(int32 new)
2625 old = runtime·gomaxprocs;
2626 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
2627 runtime·throw("procresize: invalid arg");
2628 // initialize new P's
2629 for(i = 0; i < new; i++) {
2630 p = runtime·allp[i];
2634 p->status = Pgcstop;
2635 runtime·atomicstorep(&runtime·allp[i], p);
2637 if(p->mcache == nil) {
2639 p->mcache = g->m->mcache; // bootstrap
2641 p->mcache = runtime·allocmcache();
2645 // redistribute runnable G's evenly
2646 // collect all runnable goroutines in global queue preserving FIFO order
2647 // FIFO order is required to ensure fairness even during frequent GCs
2648 // see http://golang.org/issue/7126
2652 for(i = 0; i < old; i++) {
2653 p = runtime·allp[i];
2654 if(p->runqhead == p->runqtail)
2657 // pop from tail of local queue
2659 gp = p->runq[p->runqtail%nelem(p->runq)];
2660 // push onto head of global queue
2661 gp->schedlink = runtime·sched.runqhead;
2662 runtime·sched.runqhead = gp;
2663 if(runtime·sched.runqtail == nil)
2664 runtime·sched.runqtail = gp;
2665 runtime·sched.runqsize++;
2668 // fill local queues with at most nelem(p->runq)/2 goroutines
2669 // start at 1 because current M already executes some G and will acquire allp[0] below,
2670 // so if we have a spare G we want to put it into allp[1].
2671 for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
2672 gp = runtime·sched.runqhead;
2673 runtime·sched.runqhead = gp->schedlink;
2674 if(runtime·sched.runqhead == nil)
2675 runtime·sched.runqtail = nil;
2676 runtime·sched.runqsize--;
2677 runqput(runtime·allp[i%new], gp);
2681 for(i = new; i < old; i++) {
2682 p = runtime·allp[i];
2683 runtime·freemcache(p->mcache);
2687 // can't free P itself because it can be referenced by an M in syscall
2694 p = runtime·allp[0];
2698 for(i = new-1; i > 0; i--) {
2699 p = runtime·allp[i];
2703 runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
2706 // Associate p and the current m.
2710 if(g->m->p || g->m->mcache)
2711 runtime·throw("acquirep: already in go");
2712 if(p->m || p->status != Pidle) {
2713 runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
2714 runtime·throw("acquirep: invalid p state");
2716 g->m->mcache = p->mcache;
2719 p->status = Prunning;
2722 // Disassociate p and the current m.
2728 if(g->m->p == nil || g->m->mcache == nil)
2729 runtime·throw("releasep: invalid arg");
2731 if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
2732 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
2733 g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status);
2734 runtime·throw("releasep: invalid p state");
2744 incidlelocked(int32 v)
2746 runtime·lock(&runtime·sched.lock);
2747 runtime·sched.nmidlelocked += v;
2750 runtime·unlock(&runtime·sched.lock);
2753 // Check for deadlock situation.
2754 // The check is based on number of running M's, if 0 -> deadlock.
2761 int32 run, grunning, s;
2765 run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
2768 // If we are dying because of a signal caught on an already idle thread,
2769 // freezetheworld will cause all running threads to block.
2770 // And runtime will essentially enter into deadlock state,
2771 // except that there is a thread that will call runtime·exit soon.
2772 if(runtime·panicking > 0)
2775 runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
2776 runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
2777 runtime·throw("checkdead: inconsistent counts");
2780 runtime·lock(&runtime·allglock);
2781 for(i = 0; i < runtime·allglen; i++) {
2782 gp = runtime·allg[i];
2785 s = runtime·readgstatus(gp);
2793 runtime·unlock(&runtime·allglock);
2794 runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
2795 runtime·throw("checkdead: runnable g");
2799 runtime·unlock(&runtime·allglock);
2800 if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
2801 runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
2803 // Maybe jump time forward for playground.
2804 if((gp = runtime·timejump()) != nil) {
2805 runtime·casgstatus(gp, Gwaiting, Grunnable);
2809 runtime·throw("checkdead: no p for timer");
2815 runtime·notewakeup(&mp->park);
2820 g->m->throwing = -1; // do not dump full stacks
2821 runtime·throw("all goroutines are asleep - deadlock!");
2827 uint32 idle, delay, nscavenge;
2828 int64 now, unixnow, lastpoll, lasttrace, lastgc;
2829 int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
2832 // If we go two minutes without a garbage collection, force one to run.
2833 forcegcperiod = 2*60*1e9;
2834 // If a heap span goes unused for 5 minutes after a garbage collection,
2835 // we hand it back to the operating system.
2836 scavengelimit = 5*60*1e9;
2837 if(runtime·debug.scavenge > 0) {
2838 // Scavenge-a-lot for testing.
2839 forcegcperiod = 10*1e6;
2840 scavengelimit = 20*1e6;
2842 lastscavenge = runtime·nanotime();
2844 // Make wake-up period small enough for the sampling to be correct.
2845 maxsleep = forcegcperiod/2;
2846 if(scavengelimit < forcegcperiod)
2847 maxsleep = scavengelimit/2;
2850 idle = 0; // how many cycles in succession we had not wokeup somebody
2853 if(idle == 0) // start with 20us sleep...
2855 else if(idle > 50) // start doubling the sleep after 1ms...
2857 if(delay > 10*1000) // up to 10ms
2859 runtime·usleep(delay);
2860 if(runtime·debug.schedtrace <= 0 &&
2861 (runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) { // TODO: fast atomic
2862 runtime·lock(&runtime·sched.lock);
2863 if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
2864 runtime·atomicstore(&runtime·sched.sysmonwait, 1);
2865 runtime·unlock(&runtime·sched.lock);
2866 runtime·notetsleep(&runtime·sched.sysmonnote, maxsleep);
2867 runtime·lock(&runtime·sched.lock);
2868 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
2869 runtime·noteclear(&runtime·sched.sysmonnote);
2873 runtime·unlock(&runtime·sched.lock);
2875 // poll network if not polled for more than 10ms
2876 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
2877 now = runtime·nanotime();
2878 unixnow = runtime·unixnanotime();
2879 if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
2880 runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
2881 gp = runtime·netpoll(false); // non-blocking
2883 // Need to decrement number of idle locked M's
2884 // (pretending that one more is running) before injectglist.
2885 // Otherwise it can lead to the following situation:
2886 // injectglist grabs all P's but before it starts M's to run the P's,
2887 // another M returns from syscall, finishes running its G,
2888 // observes that there is no work to do and no other running M's
2889 // and reports deadlock.
2895 // retake P's blocked in syscalls
2896 // and preempt long running G's
2902 // check if we need to force a GC
2903 lastgc = runtime·atomicload64(&mstats.last_gc);
2904 if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
2905 runtime·lock(&runtime·forcegc.lock);
2906 runtime·forcegc.idle = 0;
2907 runtime·forcegc.g->schedlink = nil;
2908 injectglist(runtime·forcegc.g);
2909 runtime·unlock(&runtime·forcegc.lock);
2912 // scavenge heap once in a while
2913 if(lastscavenge + scavengelimit/2 < now) {
2914 runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
2919 if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
2921 runtime·schedtrace(runtime·debug.scheddetail);
2926 typedef struct Pdesc Pdesc;
2934 #pragma dataflag NOPTR
2935 static Pdesc pdesc[MaxGomaxprocs];
2946 for(i = 0; i < runtime·gomaxprocs; i++) {
2947 p = runtime·allp[i];
2953 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
2955 if(pd->syscalltick != t) {
2956 pd->syscalltick = t;
2957 pd->syscallwhen = now;
2960 // On the one hand we don't want to retake Ps if there is no other work to do,
2961 // but on the other hand we want to retake them eventually
2962 // because they can prevent the sysmon thread from deep sleep.
2963 if(p->runqhead == p->runqtail &&
2964 runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
2965 pd->syscallwhen + 10*1000*1000 > now)
2967 // Need to decrement number of idle locked M's
2968 // (pretending that one more is running) before the CAS.
2969 // Otherwise the M from which we retake can exit the syscall,
2970 // increment nmidle and report deadlock.
2972 if(runtime·cas(&p->status, s, Pidle)) {
2977 } else if(s == Prunning) {
2978 // Preempt G if it's running for more than 10ms.
2980 if(pd->schedtick != t) {
2982 pd->schedwhen = now;
2985 if(pd->schedwhen + 10*1000*1000 > now)
2993 // Tell all goroutines that they have been preempted and they should stop.
2994 // This function is purely best-effort. It can fail to inform a goroutine if a
2995 // processor just started running it.
2996 // No locks need to be held.
2997 // Returns true if preemption request was issued to at least one goroutine.
3006 for(i = 0; i < runtime·gomaxprocs; i++) {
3007 p = runtime·allp[i];
3008 if(p == nil || p->status != Prunning)
3010 res |= preemptone(p);
3015 // Tell the goroutine running on processor P to stop.
3016 // This function is purely best-effort. It can incorrectly fail to inform the
3017 // goroutine. It can send inform the wrong goroutine. Even if it informs the
3018 // correct goroutine, that goroutine might ignore the request if it is
3019 // simultaneously executing runtime·newstack.
3020 // No lock needs to be held.
3021 // Returns true if preemption request was issued.
3022 // The actual preemption will happen at some point in the future
3023 // and will be indicated by the gp->status no longer being
3032 if(mp == nil || mp == g->m)
3035 if(gp == nil || gp == mp->g0)
3038 // Every call in a go routine checks for stack overflow by
3039 // comparing the current stack pointer to gp->stackguard0.
3040 // Setting gp->stackguard0 to StackPreempt folds
3041 // preemption into the normal stack overflow check.
3042 gp->stackguard0 = StackPreempt;
3047 runtime·schedtrace(bool detailed)
3049 static int64 starttime;
3051 int64 id1, id2, id3;
3059 now = runtime·nanotime();
3063 runtime·lock(&runtime·sched.lock);
3064 runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinningthreads=%d idlethreads=%d runqueue=%d",
3065 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
3066 runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.runqsize);
3068 runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmonwait=%d\n",
3069 runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
3070 runtime·sched.stopwait, runtime·sched.sysmonwait);
3072 // We must be careful while reading data from P's, M's and G's.
3073 // Even if we hold schedlock, most data can be changed concurrently.
3074 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
3075 for(i = 0; i < runtime·gomaxprocs; i++) {
3076 p = runtime·allp[i];
3080 h = runtime·atomicload(&p->runqhead);
3081 t = runtime·atomicload(&p->runqtail);
3083 runtime·printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
3084 i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
3086 // In non-detailed mode format lengths of per-P run queues as:
3087 // [len1 len2 len3 len4]
3089 if(runtime·gomaxprocs == 1)
3093 else if(i == runtime·gomaxprocs-1)
3095 runtime·printf(fmt, t-h);
3099 runtime·unlock(&runtime·sched.lock);
3102 for(mp = runtime·allm; mp; mp = mp->alllink) {
3105 lockedg = mp->lockedg;
3114 id3 = lockedg->goid;
3115 runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
3116 " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
3118 mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
3119 mp->spinning, g->m->blocked, id3);
3121 runtime·lock(&runtime·allglock);
3122 for(gi = 0; gi < runtime·allglen; gi++) {
3123 gp = runtime·allg[gi];
3125 lockedm = gp->lockedm;
3126 runtime·printf(" G%D: status=%d(%S) m=%d lockedm=%d\n",
3127 gp->goid, runtime·readgstatus(gp), gp->waitreason, mp ? mp->id : -1,
3128 lockedm ? lockedm->id : -1);
3130 runtime·unlock(&runtime·allglock);
3131 runtime·unlock(&runtime·sched.lock);
3134 // Put mp on midle list.
3135 // Sched must be locked.
3139 mp->schedlink = runtime·sched.midle;
3140 runtime·sched.midle = mp;
3141 runtime·sched.nmidle++;
3145 // Try to get an m from midle list.
3146 // Sched must be locked.
3152 if((mp = runtime·sched.midle) != nil){
3153 runtime·sched.midle = mp->schedlink;
3154 runtime·sched.nmidle--;
3159 // Put gp on the global runnable queue.
3160 // Sched must be locked.
3164 gp->schedlink = nil;
3165 if(runtime·sched.runqtail)
3166 runtime·sched.runqtail->schedlink = gp;
3168 runtime·sched.runqhead = gp;
3169 runtime·sched.runqtail = gp;
3170 runtime·sched.runqsize++;
3173 // Put a batch of runnable goroutines on the global runnable queue.
3174 // Sched must be locked.
3176 globrunqputbatch(G *ghead, G *gtail, int32 n)
3178 gtail->schedlink = nil;
3179 if(runtime·sched.runqtail)
3180 runtime·sched.runqtail->schedlink = ghead;
3182 runtime·sched.runqhead = ghead;
3183 runtime·sched.runqtail = gtail;
3184 runtime·sched.runqsize += n;
3187 // Try get a batch of G's from the global runnable queue.
3188 // Sched must be locked.
3190 globrunqget(P *p, int32 max)
3195 if(runtime·sched.runqsize == 0)
3197 n = runtime·sched.runqsize/runtime·gomaxprocs+1;
3198 if(n > runtime·sched.runqsize)
3199 n = runtime·sched.runqsize;
3200 if(max > 0 && n > max)
3202 if(n > nelem(p->runq)/2)
3203 n = nelem(p->runq)/2;
3204 runtime·sched.runqsize -= n;
3205 if(runtime·sched.runqsize == 0)
3206 runtime·sched.runqtail = nil;
3207 gp = runtime·sched.runqhead;
3208 runtime·sched.runqhead = gp->schedlink;
3211 gp1 = runtime·sched.runqhead;
3212 runtime·sched.runqhead = gp1->schedlink;
3218 // Put p to on pidle list.
3219 // Sched must be locked.
3223 p->link = runtime·sched.pidle;
3224 runtime·sched.pidle = p;
3225 runtime·xadd(&runtime·sched.npidle, 1); // TODO: fast atomic
3228 // Try get a p from pidle list.
3229 // Sched must be locked.
3235 p = runtime·sched.pidle;
3237 runtime·sched.pidle = p->link;
3238 runtime·xadd(&runtime·sched.npidle, -1); // TODO: fast atomic
3243 // Try to put g on local runnable queue.
3244 // If it's full, put onto global queue.
3245 // Executed only by the owner P.
3247 runqput(P *p, G *gp)
3252 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
3254 if(t - h < nelem(p->runq)) {
3255 p->runq[t%nelem(p->runq)] = gp;
3256 runtime·atomicstore(&p->runqtail, t+1); // store-release, makes the item available for consumption
3259 if(runqputslow(p, gp, h, t))
3261 // the queue is not full, now the put above must suceed
3265 // Put g and a batch of work from local runnable queue on global queue.
3266 // Executed only by the owner P.
3268 runqputslow(P *p, G *gp, uint32 h, uint32 t)
3270 G *batch[nelem(p->runq)/2+1];
3273 // First, grab a batch from local queue.
3276 if(n != nelem(p->runq)/2)
3277 runtime·throw("runqputslow: queue is not full");
3279 batch[i] = p->runq[(h+i)%nelem(p->runq)];
3280 if(!runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
3283 // Link the goroutines.
3285 batch[i]->schedlink = batch[i+1];
3286 // Now put the batch on global queue.
3287 runtime·lock(&runtime·sched.lock);
3288 globrunqputbatch(batch[0], batch[n], n+1);
3289 runtime·unlock(&runtime·sched.lock);
3293 // Get g from local runnable queue.
3294 // Executed only by the owner P.
3302 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
3306 gp = p->runq[h%nelem(p->runq)];
3307 if(runtime·cas(&p->runqhead, h, h+1)) // cas-release, commits consume
3312 // Grabs a batch of goroutines from local runnable queue.
3313 // batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
3314 // Can be executed by any P.
3316 runqgrab(P *p, G **batch)
3321 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
3322 t = runtime·atomicload(&p->runqtail); // load-acquire, synchronize with the producer
3327 if(n > nelem(p->runq)/2) // read inconsistent h and t
3330 batch[i] = p->runq[(h+i)%nelem(p->runq)];
3331 if(runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
3337 // Steal half of elements from local runnable queue of p2
3338 // and put onto local runnable queue of p.
3339 // Returns one of the stolen elements (or nil if failed).
3341 runqsteal(P *p, P *p2)
3344 G *batch[nelem(p->runq)/2];
3347 n = runqgrab(p2, batch);
3354 h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
3356 if(t - h + n >= nelem(p->runq))
3357 runtime·throw("runqsteal: runq overflow");
3358 for(i=0; i<n; i++, t++)
3359 p->runq[t%nelem(p->runq)] = batch[i];
3360 runtime·atomicstore(&p->runqtail, t); // store-release, makes the item available for consumption
3365 runtime·testSchedLocalQueue(void)
3371 p = (P*)runtime·mallocgc(sizeof(*p), nil, FlagNoScan);
3372 gs = (G*)runtime·mallocgc(nelem(p->runq)*sizeof(*gs), nil, FlagNoScan);
3374 for(i = 0; i < nelem(p->runq); i++) {
3375 if(runqget(p) != nil)
3376 runtime·throw("runq is not empty initially");
3377 for(j = 0; j < i; j++)
3379 for(j = 0; j < i; j++) {
3380 if(runqget(p) != &gs[i]) {
3381 runtime·printf("bad element at iter %d/%d\n", i, j);
3382 runtime·throw("bad element");
3385 if(runqget(p) != nil)
3386 runtime·throw("runq is not empty afterwards");
3391 runtime·testSchedLocalQueueSteal(void)
3397 p1 = (P*)runtime·mallocgc(sizeof(*p1), nil, FlagNoScan);
3398 p2 = (P*)runtime·mallocgc(sizeof(*p2), nil, FlagNoScan);
3399 gs = (G*)runtime·mallocgc(nelem(p1->runq)*sizeof(*gs), nil, FlagNoScan);
3401 for(i = 0; i < nelem(p1->runq); i++) {
3402 for(j = 0; j < i; j++) {
3404 runqput(p1, &gs[j]);
3406 gp = runqsteal(p2, p1);
3412 while(gp = runqget(p2)) {
3416 while(gp = runqget(p1))
3418 for(j = 0; j < i; j++) {
3419 if(gs[j].sig != 1) {
3420 runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
3421 runtime·throw("bad element");
3424 if(s != i/2 && s != i/2+1) {
3425 runtime·printf("bad steal %d, want %d or %d, iter %d\n",
3427 runtime·throw("bad steal");
3433 runtime·setmaxthreads_m(void)
3438 in = g->m->scalararg[0];
3440 runtime·lock(&runtime·sched.lock);
3441 out = runtime·sched.maxmcount;
3442 runtime·sched.maxmcount = in;
3444 runtime·unlock(&runtime·sched.lock);
3446 g->m->scalararg[0] = out;
3449 static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
3452 haveexperiment(int8 *name)
3456 for(i=0; i<sizeof(experiment); i++) {
3457 if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
3458 for(j=0; name[j]; j++)
3459 if(experiment[i+j] != name[j])
3461 if(experiment[i+j] != '\0' && experiment[i+j] != ',')
3470 #pragma textflag NOSPLIT
3472 sync·runtime_procPin(intptr p)
3477 // Disable preemption.
3483 #pragma textflag NOSPLIT
3485 sync·runtime_procUnpin()