]> Cypherpunks.ru repositories - gostls13.git/blob - src/runtime/proc.c
[dev.garbage] all: merge default (f38460037b72) into dev.garbage
[gostls13.git] / src / runtime / proc.c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "runtime.h"
6 #include "arch_GOARCH.h"
7 #include "zaexperiment.h"
8 #include "malloc.h"
9 #include "stack.h"
10 #include "race.h"
11 #include "type.h"
12 #include "mgc0.h"
13 #include "textflag.h"
14
15 // Goroutine scheduler
16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
17 //
18 // The main concepts are:
19 // G - goroutine.
20 // M - worker thread, or machine.
21 // P - processor, a resource that is required to execute Go code.
22 //     M must have an associated P to execute Go code, however it can be
23 //     blocked or in a syscall w/o an associated P.
24 //
25 // Design doc at http://golang.org/s/go11sched.
26
27 enum
28 {
29         // Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
30         // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
31         GoidCacheBatch = 16,
32 };
33
34 SchedT  runtime·sched;
35 int32   runtime·gomaxprocs;
36 uint32  runtime·needextram;
37 bool    runtime·iscgo;
38 M       runtime·m0;
39 G       runtime·g0;    // idle goroutine for m0
40 G*      runtime·lastg;
41 M*      runtime·allm;
42 M*      runtime·extram;
43 P*      runtime·allp[MaxGomaxprocs+1];
44 int8*   runtime·goos;
45 int32   runtime·ncpu;
46 int32   runtime·newprocs;
47
48 Mutex runtime·allglock;        // the following vars are protected by this lock or by stoptheworld
49 G**     runtime·allg;
50 Slice   runtime·allgs;
51 uintptr runtime·allglen;
52 ForceGCState    runtime·forcegc;
53
54 void runtime·mstart(void);
55 static void runqput(P*, G*);
56 static G* runqget(P*);
57 static bool runqputslow(P*, G*, uint32, uint32);
58 static G* runqsteal(P*, P*);
59 static void mput(M*);
60 static M* mget(void);
61 static void mcommoninit(M*);
62 static void schedule(void);
63 static void procresize(int32);
64 static void acquirep(P*);
65 static P* releasep(void);
66 static void newm(void(*)(void), P*);
67 static void stopm(void);
68 static void startm(P*, bool);
69 static void handoffp(P*);
70 static void wakep(void);
71 static void stoplockedm(void);
72 static void startlockedm(G*);
73 static void sysmon(void);
74 static uint32 retake(int64);
75 static void incidlelocked(int32);
76 static void checkdead(void);
77 static void exitsyscall0(G*);
78 void runtime·park_m(G*);
79 static void goexit0(G*);
80 static void gfput(P*, G*);
81 static G* gfget(P*);
82 static void gfpurge(P*);
83 static void globrunqput(G*);
84 static void globrunqputbatch(G*, G*, int32);
85 static G* globrunqget(P*, int32);
86 static P* pidleget(void);
87 static void pidleput(P*);
88 static void injectglist(G*);
89 static bool preemptall(void);
90 static bool preemptone(P*);
91 static bool exitsyscallfast(void);
92 static bool haveexperiment(int8*);
93 void runtime·allgadd(G*);
94 static void dropg(void);
95
96 extern String runtime·buildVersion;
97
98 // For cgo-using programs with external linking,
99 // export "main" (defined in assembly) so that libc can handle basic
100 // C runtime startup and call the Go program as if it were
101 // the C main function.
102 #pragma cgo_export_static main
103
104 // Filled in by dynamic linker when Cgo is available.
105 void (*_cgo_init)(void);
106 void (*_cgo_malloc)(void);
107 void (*_cgo_free)(void);
108
109 // Copy for Go code.
110 void* runtime·cgoMalloc;
111 void* runtime·cgoFree;
112
113 // The bootstrap sequence is:
114 //
115 //      call osinit
116 //      call schedinit
117 //      make & queue new G
118 //      call runtime·mstart
119 //
120 // The new G calls runtime·main.
121 void
122 runtime·schedinit(void)
123 {
124         int32 n, procs;
125         byte *p;
126
127         // raceinit must be the first call to race detector.
128         // In particular, it must be done before mallocinit below calls racemapshadow.
129         if(raceenabled)
130                 g->racectx = runtime·raceinit();
131
132         runtime·sched.maxmcount = 10000;
133
134         runtime·tracebackinit();
135         runtime·symtabinit();
136         runtime·stackinit();
137         runtime·mallocinit();
138         mcommoninit(g->m);
139         
140         runtime·goargs();
141         runtime·goenvs();
142         runtime·parsedebugvars();
143         runtime·gcinit();
144
145         runtime·sched.lastpoll = runtime·nanotime();
146         procs = 1;
147         p = runtime·getenv("GOMAXPROCS");
148         if(p != nil && (n = runtime·atoi(p)) > 0) {
149                 if(n > MaxGomaxprocs)
150                         n = MaxGomaxprocs;
151                 procs = n;
152         }
153         procresize(procs);
154
155         if(runtime·buildVersion.str == nil) {
156                 // Condition should never trigger.  This code just serves
157                 // to ensure runtime·buildVersion is kept in the resulting binary.
158                 runtime·buildVersion.str = (uint8*)"unknown";
159                 runtime·buildVersion.len = 7;
160         }
161
162         runtime·cgoMalloc = _cgo_malloc;
163         runtime·cgoFree = _cgo_free;
164 }
165
166 void
167 runtime·newsysmon(void)
168 {
169         newm(sysmon, nil);
170 }
171
172 static void
173 dumpgstatus(G* gp)
174 {
175         runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
176         runtime·printf("runtime:  g:  g=%p, goid=%D,  g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
177 }
178
179 static void
180 checkmcount(void)
181 {
182         // sched lock is held
183         if(runtime·sched.mcount > runtime·sched.maxmcount){
184                 runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
185                 runtime·throw("thread exhaustion");
186         }
187 }
188
189 static void
190 mcommoninit(M *mp)
191 {
192         // g0 stack won't make sense for user (and is not necessary unwindable).
193         if(g != g->m->g0)
194                 runtime·callers(1, mp->createstack, nelem(mp->createstack));
195
196         mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
197
198         runtime·lock(&runtime·sched.lock);
199         mp->id = runtime·sched.mcount++;
200         checkmcount();
201         runtime·mpreinit(mp);
202         if(mp->gsignal)
203                 mp->gsignal->stackguard1 = mp->gsignal->stack.lo + StackGuard;
204
205         // Add to runtime·allm so garbage collector doesn't free g->m
206         // when it is just in a register or thread-local storage.
207         mp->alllink = runtime·allm;
208         // runtime·NumCgoCall() iterates over allm w/o schedlock,
209         // so we need to publish it safely.
210         runtime·atomicstorep(&runtime·allm, mp);
211         runtime·unlock(&runtime·sched.lock);
212 }
213
214 // Mark gp ready to run.
215 void
216 runtime·ready(G *gp)
217 {
218         uint32 status;
219
220         status = runtime·readgstatus(gp);
221         // Mark runnable.
222         g->m->locks++;  // disable preemption because it can be holding p in a local var
223         if((status&~Gscan) != Gwaiting){
224                 dumpgstatus(gp);
225                 runtime·throw("bad g->status in ready");
226         }
227         // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
228         runtime·casgstatus(gp, Gwaiting, Grunnable);
229         runqput(g->m->p, gp);
230         if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0)  // TODO: fast atomic
231                 wakep();
232         g->m->locks--;
233         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
234                 g->stackguard0 = StackPreempt;
235 }
236
237 void
238 runtime·ready_m(void)
239 {
240         G *gp;
241
242         gp = g->m->ptrarg[0];
243         g->m->ptrarg[0] = nil;
244         runtime·ready(gp);
245 }
246
247 int32
248 runtime·gcprocs(void)
249 {
250         int32 n;
251
252         // Figure out how many CPUs to use during GC.
253         // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
254         runtime·lock(&runtime·sched.lock);
255         n = runtime·gomaxprocs;
256         if(n > runtime·ncpu)
257                 n = runtime·ncpu;
258         if(n > MaxGcproc)
259                 n = MaxGcproc;
260         if(n > runtime·sched.nmidle+1) // one M is currently running
261                 n = runtime·sched.nmidle+1;
262         runtime·unlock(&runtime·sched.lock);
263         return n;
264 }
265
266 static bool
267 needaddgcproc(void)
268 {
269         int32 n;
270
271         runtime·lock(&runtime·sched.lock);
272         n = runtime·gomaxprocs;
273         if(n > runtime·ncpu)
274                 n = runtime·ncpu;
275         if(n > MaxGcproc)
276                 n = MaxGcproc;
277         n -= runtime·sched.nmidle+1; // one M is currently running
278         runtime·unlock(&runtime·sched.lock);
279         return n > 0;
280 }
281
282 void
283 runtime·helpgc(int32 nproc)
284 {
285         M *mp;
286         int32 n, pos;
287
288         runtime·lock(&runtime·sched.lock);
289         pos = 0;
290         for(n = 1; n < nproc; n++) {  // one M is currently running
291                 if(runtime·allp[pos]->mcache == g->m->mcache)
292                         pos++;
293                 mp = mget();
294                 if(mp == nil)
295                         runtime·throw("runtime·gcprocs inconsistency");
296                 mp->helpgc = n;
297                 mp->mcache = runtime·allp[pos]->mcache;
298                 pos++;
299                 runtime·notewakeup(&mp->park);
300         }
301         runtime·unlock(&runtime·sched.lock);
302 }
303
304 // Similar to stoptheworld but best-effort and can be called several times.
305 // There is no reverse operation, used during crashing.
306 // This function must not lock any mutexes.
307 void
308 runtime·freezetheworld(void)
309 {
310         int32 i;
311
312         if(runtime·gomaxprocs == 1)
313                 return;
314         // stopwait and preemption requests can be lost
315         // due to races with concurrently executing threads,
316         // so try several times
317         for(i = 0; i < 5; i++) {
318                 // this should tell the scheduler to not start any new goroutines
319                 runtime·sched.stopwait = 0x7fffffff;
320                 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
321                 // this should stop running goroutines
322                 if(!preemptall())
323                         break;  // no running goroutines
324                 runtime·usleep(1000);
325         }
326         // to be sure
327         runtime·usleep(1000);
328         preemptall();
329         runtime·usleep(1000);
330 }
331
332 static bool
333 isscanstatus(uint32 status)
334 {
335         if(status == Gscan)
336                 runtime·throw("isscanstatus: Bad status Gscan");
337         return (status&Gscan) == Gscan;
338 }
339
340 // All reads and writes of g's status go through readgstatus, casgstatus
341 // castogscanstatus, casfromgscanstatus.
342 #pragma textflag NOSPLIT
343 uint32
344 runtime·readgstatus(G *gp)
345 {
346         return runtime·atomicload(&gp->atomicstatus);
347 }
348
349 // The Gscanstatuses are acting like locks and this releases them.
350 // If it proves to be a performance hit we should be able to make these
351 // simple atomic stores but for now we are going to throw if
352 // we see an inconsistent state.
353 void
354 runtime·casfromgscanstatus(G *gp, uint32 oldval, uint32 newval)
355 {
356         bool success = false;
357
358         // Check that transition is valid.
359         switch(oldval) {
360         case Gscanrunnable:
361         case Gscanwaiting:
362         case Gscanrunning:
363         case Gscansyscall:
364                 if(newval == (oldval&~Gscan))
365                         success = runtime·cas(&gp->atomicstatus, oldval, newval);
366                 break;
367         case Gscanenqueue:
368                 if(newval == Gwaiting)
369                         success = runtime·cas(&gp->atomicstatus, oldval, newval);
370                 break;
371         }       
372         if(!success){
373                 runtime·printf("runtime: casfromgscanstatus failed gp=%p, oldval=%d, newval=%d\n",  
374                         gp, oldval, newval);
375                 dumpgstatus(gp);
376                 runtime·throw("casfromgscanstatus: gp->status is not in scan state");
377         }
378 }
379
380 // This will return false if the gp is not in the expected status and the cas fails. 
381 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
382 bool
383 runtime·castogscanstatus(G *gp, uint32 oldval, uint32 newval)
384 {
385         switch(oldval) {
386         case Grunnable:
387         case Gwaiting:
388         case Gsyscall:
389                 if(newval == (oldval|Gscan))
390                         return runtime·cas(&gp->atomicstatus, oldval, newval);
391                 break;
392         case Grunning:
393                 if(newval == Gscanrunning || newval == Gscanenqueue)
394                         return runtime·cas(&gp->atomicstatus, oldval, newval);
395                 break;   
396         }
397
398         runtime·printf("runtime: castogscanstatus oldval=%d newval=%d\n", oldval, newval);
399         runtime·throw("castogscanstatus");
400         return false; // not reached
401 }
402
403 static void badcasgstatus(void);
404 static void helpcasgstatus(void);
405
406 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
407 // and casfromgscanstatus instead.
408 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 
409 // put it in the Gscan state is finished.
410 #pragma textflag NOSPLIT
411 void
412 runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
413 {
414         void (*fn)(void);
415
416         if((oldval&Gscan) || (newval&Gscan) || oldval == newval) {
417                 g->m->scalararg[0] = oldval;
418                 g->m->scalararg[1] = newval;
419                 fn = badcasgstatus;
420                 runtime·onM(&fn);
421         }
422
423         // loop if gp->atomicstatus is in a scan state giving
424         // GC time to finish and change the state to oldval.
425         while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
426
427         }       
428 }
429
430 static void
431 badcasgstatus(void)
432 {
433         uint32 oldval, newval;
434         
435         oldval = g->m->scalararg[0];
436         newval = g->m->scalararg[1];
437         g->m->scalararg[0] = 0;
438         g->m->scalararg[1] = 0;
439
440         runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, newval);
441         runtime·throw("casgstatus: bad incoming values");
442 }
443
444 static void
445 helpcasgstatus(void)
446 {
447         G *gp;
448         
449         gp = g->m->ptrarg[0];
450         g->m->ptrarg[0] = 0;
451         runtime·gcphasework(gp);
452 }
453
454 // stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
455 // or in the context of a moving collector the pointers can be flipped from pointing 
456 // to old object to pointing to new objects. 
457 // If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
458 // the caller calls restartg.
459 // If stopg returns false, the caller is not responsible for calling restartg. This can happen
460 // if another thread, either the gp itself or another GC thread is taking the responsibility 
461 // to do the GC work related to this thread.
462 bool
463 runtime·stopg(G *gp)
464 {
465         uint32 s;
466
467         for(;;) {
468                 if(gp->gcworkdone)
469                         return false;
470
471                 s = runtime·readgstatus(gp);
472                 switch(s) {
473                 default:
474                         dumpgstatus(gp);
475                         runtime·throw("stopg: gp->atomicstatus is not valid");
476
477                 case Gdead:
478                         return false;
479
480                 case Gcopystack:
481                         // Loop until a new stack is in place.
482                         break;
483
484                 case Grunnable:
485                 case Gsyscall:
486                 case Gwaiting:
487                         // Claim goroutine by setting scan bit.
488                         if(!runtime·castogscanstatus(gp, s, s|Gscan))
489                                 break;
490                         // In scan state, do work.
491                         runtime·gcphasework(gp);
492                         return true;
493
494                 case Gscanrunnable:
495                 case Gscanwaiting:
496                 case Gscansyscall:
497                         // Goroutine already claimed by another GC helper.
498                         return false;
499
500                 case Grunning:
501                         if(runtime·gcphase == GCscan) {
502                                 gp->gcworkdone = true;
503                                 return false;
504                                 // Running routines not scanned during
505                                 // GCscan phase, we only scan non-running routines.
506                         }
507                                 
508                         // Claim goroutine, so we aren't racing with a status
509                         // transition away from Grunning.
510                         if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
511                                 break;
512
513                         // Mark gp for preemption.
514                         if(!gp->gcworkdone) {
515                                 gp->preemptscan = true;
516                                 gp->preempt = true;
517                                 gp->stackguard0 = StackPreempt;
518                         }
519
520                         // Unclaim.
521                         runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
522                         return false;
523                 }
524         }
525         // Should not be here....
526 }
527
528 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
529 void 
530 runtime·restartg (G *gp)
531 {
532         uint32 s;
533
534         s = runtime·readgstatus(gp);
535         switch(s) {
536         default:
537                 dumpgstatus(gp); 
538                 runtime·throw("restartg: unexpected status");
539
540         case Gdead:
541                 break;
542
543         case Gscanrunnable:
544         case Gscanwaiting:
545         case Gscansyscall:
546                 runtime·casfromgscanstatus(gp, s, s&~Gscan);
547                 break;
548
549         case Gscanenqueue:
550                 // Scan is now completed.
551                 // Goroutine now needs to be made runnable.
552                 // We put it on the global run queue; ready blocks on the global scheduler lock.
553                 runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
554                 if(gp != g->m->curg)
555                         runtime·throw("processing Gscanenqueue on wrong m");
556                 dropg();
557                 runtime·ready(gp);
558                 break;
559         }
560 }
561
562 static void
563 stopscanstart(G* gp)
564 {
565         if(g == gp)
566                 runtime·throw("GC not moved to G0");
567         if(runtime·stopg(gp)) {
568                 if(!isscanstatus(runtime·readgstatus(gp))) {
569                         dumpgstatus(gp);
570                         runtime·throw("GC not in scan state");
571                 }
572                 runtime·restartg(gp);
573         }
574 }
575
576 // Runs on g0 and does the actual work after putting the g back on the run queue.
577 static void
578 mquiesce(G *gpmaster)
579 {
580         G* gp;
581         uint32 i;
582         uint32 status;
583         uint32 activeglen;
584
585         // enqueue the calling goroutine.
586         runtime·restartg(gpmaster);
587
588         activeglen = runtime·allglen;
589         for(i = 0; i < activeglen; i++) {
590                 gp = runtime·allg[i];
591                 if(runtime·readgstatus(gp) == Gdead) 
592                         gp->gcworkdone = true; // noop scan.
593                 else 
594                         gp->gcworkdone = false; 
595                 stopscanstart(gp); 
596         }
597
598         // Check that the G's gcwork (such as scanning) has been done. If not do it now. 
599         // You can end up doing work here if the page trap on a Grunning Goroutine has
600         // not been sprung or in some race situations. For example a runnable goes dead
601         // and is started up again with a gp->gcworkdone set to false.
602         for(i = 0; i < activeglen; i++) {
603                 gp = runtime·allg[i];
604                 while (!gp->gcworkdone) {
605                         status = runtime·readgstatus(gp);
606                         if(status == Gdead) {
607                                 gp->gcworkdone = true; // scan is a noop
608                                 break;
609                                 //do nothing, scan not needed. 
610                         }
611                         if(status == Grunning && gp->stackguard0 == (uintptr)StackPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecond arg 
612                                 runtime·noteclear(&runtime·sched.stopnote);
613                         else 
614                                 stopscanstart(gp);
615                 }
616         }
617
618         for(i = 0; i < activeglen; i++) {
619                 gp = runtime·allg[i];
620                 status = runtime·readgstatus(gp);
621                 if(isscanstatus(status)) {
622                         runtime·printf("mstopandscang:bottom: post scan bad status gp=%p has status %x\n", gp, status);
623                         dumpgstatus(gp);
624                 }
625                 if(!gp->gcworkdone && status != Gdead) {
626                         runtime·printf("mstopandscang:bottom: post scan gp=%p->gcworkdone still false\n", gp);
627                         dumpgstatus(gp);
628                 }
629         }
630
631         schedule(); // Never returns.
632 }
633
634 // quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
635 // If the global runtime·gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
636 // have been scanned before it returns.
637 void
638 runtime·quiesce(G* mastergp)
639 {
640         void (*fn)(G*);
641
642         runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
643         // Now move this to the g0 (aka m) stack.
644         // g0 will potentially scan this thread and put mastergp on the runqueue 
645         fn = mquiesce;
646         runtime·mcall(&fn);
647 }
648
649 // This is used by the GC as well as the routines that do stack dumps. In the case
650 // of GC all the routines can be reliably stopped. This is not always the case
651 // when the system is in panic or being exited.
652 void
653 runtime·stoptheworld(void)
654 {
655         int32 i;
656         uint32 s;
657         P *p;
658         bool wait;
659
660         // If we hold a lock, then we won't be able to stop another M
661         // that is blocked trying to acquire the lock.
662         if(g->m->locks > 0)
663                 runtime·throw("stoptheworld: holding locks");
664
665         runtime·lock(&runtime·sched.lock);
666         runtime·sched.stopwait = runtime·gomaxprocs;
667         runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
668         preemptall();
669         // stop current P
670         g->m->p->status = Pgcstop; // Pgcstop is only diagnostic.
671         runtime·sched.stopwait--;
672         // try to retake all P's in Psyscall status
673         for(i = 0; i < runtime·gomaxprocs; i++) {
674                 p = runtime·allp[i];
675                 s = p->status;
676                 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
677                         runtime·sched.stopwait--;
678         }
679         // stop idle P's
680         while(p = pidleget()) {
681                 p->status = Pgcstop;
682                 runtime·sched.stopwait--;
683         }
684         wait = runtime·sched.stopwait > 0;
685         runtime·unlock(&runtime·sched.lock);
686
687         // wait for remaining P's to stop voluntarily
688         if(wait) {
689                 for(;;) {
690                         // wait for 100us, then try to re-preempt in case of any races
691                         if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
692                                 runtime·noteclear(&runtime·sched.stopnote);
693                                 break;
694                         }
695                         preemptall();
696                 }
697         }
698         if(runtime·sched.stopwait)
699                 runtime·throw("stoptheworld: not stopped");
700         for(i = 0; i < runtime·gomaxprocs; i++) {
701                 p = runtime·allp[i];
702                 if(p->status != Pgcstop)
703                         runtime·throw("stoptheworld: not stopped");
704         }
705 }
706
707 static void
708 mhelpgc(void)
709 {
710         g->m->helpgc = -1;
711 }
712
713 void
714 runtime·starttheworld(void)
715 {
716         P *p, *p1;
717         M *mp;
718         G *gp;
719         bool add;
720
721         g->m->locks++;  // disable preemption because it can be holding p in a local var
722         gp = runtime·netpoll(false);  // non-blocking
723         injectglist(gp);
724         add = needaddgcproc();
725         runtime·lock(&runtime·sched.lock);
726         if(runtime·newprocs) {
727                 procresize(runtime·newprocs);
728                 runtime·newprocs = 0;
729         } else
730                 procresize(runtime·gomaxprocs);
731         runtime·sched.gcwaiting = 0;
732
733         p1 = nil;
734         while(p = pidleget()) {
735                 // procresize() puts p's with work at the beginning of the list.
736                 // Once we reach a p without a run queue, the rest don't have one either.
737                 if(p->runqhead == p->runqtail) {
738                         pidleput(p);
739                         break;
740                 }
741                 p->m = mget();
742                 p->link = p1;
743                 p1 = p;
744         }
745         if(runtime·sched.sysmonwait) {
746                 runtime·sched.sysmonwait = false;
747                 runtime·notewakeup(&runtime·sched.sysmonnote);
748         }
749         runtime·unlock(&runtime·sched.lock);
750
751         while(p1) {
752                 p = p1;
753                 p1 = p1->link;
754                 if(p->m) {
755                         mp = p->m;
756                         p->m = nil;
757                         if(mp->nextp)
758                                 runtime·throw("starttheworld: inconsistent mp->nextp");
759                         mp->nextp = p;
760                         runtime·notewakeup(&mp->park);
761                 } else {
762                         // Start M to run P.  Do not start another M below.
763                         newm(nil, p);
764                         add = false;
765                 }
766         }
767
768         if(add) {
769                 // If GC could have used another helper proc, start one now,
770                 // in the hope that it will be available next time.
771                 // It would have been even better to start it before the collection,
772                 // but doing so requires allocating memory, so it's tricky to
773                 // coordinate.  This lazy approach works out in practice:
774                 // we don't mind if the first couple gc rounds don't have quite
775                 // the maximum number of procs.
776                 newm(mhelpgc, nil);
777         }
778         g->m->locks--;
779         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
780                 g->stackguard0 = StackPreempt;
781 }
782
783 static void mstart(void);
784
785 // Called to start an M.
786 #pragma textflag NOSPLIT
787 void
788 runtime·mstart(void)
789 {
790         uintptr x, size;
791         
792         if(g->stack.lo == 0) {
793                 // Initialize stack bounds from system stack.
794                 // Cgo may have left stack size in stack.hi.
795                 size = g->stack.hi;
796                 if(size == 0)
797                         size = 8192;
798                 g->stack.hi = (uintptr)&x;
799                 g->stack.lo = g->stack.hi - size + 1024;
800         }
801         
802         // Initialize stack guards so that we can start calling
803         // both Go and C functions with stack growth prologues.
804         g->stackguard0 = g->stack.lo + StackGuard;
805         g->stackguard1 = g->stackguard0;
806         mstart();
807 }
808
809 static void
810 mstart(void)
811 {
812         if(g != g->m->g0)
813                 runtime·throw("bad runtime·mstart");
814
815         // Record top of stack for use by mcall.
816         // Once we call schedule we're never coming back,
817         // so other calls can reuse this stack space.
818         runtime·gosave(&g->m->g0->sched);
819         g->m->g0->sched.pc = (uintptr)-1;  // make sure it is never used
820         runtime·asminit();
821         runtime·minit();
822
823         // Install signal handlers; after minit so that minit can
824         // prepare the thread to be able to handle the signals.
825         if(g->m == &runtime·m0)
826                 runtime·initsig();
827         
828         if(g->m->mstartfn)
829                 g->m->mstartfn();
830
831         if(g->m->helpgc) {
832                 g->m->helpgc = 0;
833                 stopm();
834         } else if(g->m != &runtime·m0) {
835                 acquirep(g->m->nextp);
836                 g->m->nextp = nil;
837         }
838         schedule();
839
840         // TODO(brainman): This point is never reached, because scheduler
841         // does not release os threads at the moment. But once this path
842         // is enabled, we must remove our seh here.
843 }
844
845 // When running with cgo, we call _cgo_thread_start
846 // to start threads for us so that we can play nicely with
847 // foreign code.
848 void (*_cgo_thread_start)(void*);
849
850 typedef struct CgoThreadStart CgoThreadStart;
851 struct CgoThreadStart
852 {
853         G *g;
854         uintptr *tls;
855         void (*fn)(void);
856 };
857
858 M *runtime·newM(void); // in proc.go
859
860 // Allocate a new m unassociated with any thread.
861 // Can use p for allocation context if needed.
862 M*
863 runtime·allocm(P *p)
864 {
865         M *mp;
866
867         g->m->locks++;  // disable GC because it can be called from sysmon
868         if(g->m->p == nil)
869                 acquirep(p);  // temporarily borrow p for mallocs in this function
870         mp = runtime·newM();
871         mcommoninit(mp);
872
873         // In case of cgo or Solaris, pthread_create will make us a stack.
874         // Windows and Plan 9 will layout sched stack on OS stack.
875         if(runtime·iscgo || Solaris || Windows || Plan9)
876                 mp->g0 = runtime·malg(-1);
877         else
878                 mp->g0 = runtime·malg(8192);
879         runtime·writebarrierptr_nostore(&mp->g0, mp->g0);
880         mp->g0->m = mp;
881         runtime·writebarrierptr_nostore(&mp->g0->m, mp->g0->m);
882
883         if(p == g->m->p)
884                 releasep();
885         g->m->locks--;
886         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
887                 g->stackguard0 = StackPreempt;
888
889         return mp;
890 }
891
892 G *runtime·newG(void); // in proc.go
893
894 static G*
895 allocg(void)
896 {
897         return runtime·newG();
898 }
899
900 static M* lockextra(bool nilokay);
901 static void unlockextra(M*);
902
903 // needm is called when a cgo callback happens on a
904 // thread without an m (a thread not created by Go).
905 // In this case, needm is expected to find an m to use
906 // and return with m, g initialized correctly.
907 // Since m and g are not set now (likely nil, but see below)
908 // needm is limited in what routines it can call. In particular
909 // it can only call nosplit functions (textflag 7) and cannot
910 // do any scheduling that requires an m.
911 //
912 // In order to avoid needing heavy lifting here, we adopt
913 // the following strategy: there is a stack of available m's
914 // that can be stolen. Using compare-and-swap
915 // to pop from the stack has ABA races, so we simulate
916 // a lock by doing an exchange (via casp) to steal the stack
917 // head and replace the top pointer with MLOCKED (1).
918 // This serves as a simple spin lock that we can use even
919 // without an m. The thread that locks the stack in this way
920 // unlocks the stack by storing a valid stack head pointer.
921 //
922 // In order to make sure that there is always an m structure
923 // available to be stolen, we maintain the invariant that there
924 // is always one more than needed. At the beginning of the
925 // program (if cgo is in use) the list is seeded with a single m.
926 // If needm finds that it has taken the last m off the list, its job
927 // is - once it has installed its own m so that it can do things like
928 // allocate memory - to create a spare m and put it on the list.
929 //
930 // Each of these extra m's also has a g0 and a curg that are
931 // pressed into service as the scheduling stack and current
932 // goroutine for the duration of the cgo callback.
933 //
934 // When the callback is done with the m, it calls dropm to
935 // put the m back on the list.
936 #pragma textflag NOSPLIT
937 void
938 runtime·needm(byte x)
939 {
940         M *mp;
941
942         if(runtime·needextram) {
943                 // Can happen if C/C++ code calls Go from a global ctor.
944                 // Can not throw, because scheduler is not initialized yet.
945                 runtime·write(2, "fatal error: cgo callback before cgo call\n",
946                         sizeof("fatal error: cgo callback before cgo call\n")-1);
947                 runtime·exit(1);
948         }
949
950         // Lock extra list, take head, unlock popped list.
951         // nilokay=false is safe here because of the invariant above,
952         // that the extra list always contains or will soon contain
953         // at least one m.
954         mp = lockextra(false);
955
956         // Set needextram when we've just emptied the list,
957         // so that the eventual call into cgocallbackg will
958         // allocate a new m for the extra list. We delay the
959         // allocation until then so that it can be done
960         // after exitsyscall makes sure it is okay to be
961         // running at all (that is, there's no garbage collection
962         // running right now).
963         mp->needextram = mp->schedlink == nil;
964         unlockextra(mp->schedlink);
965
966         // Install g (= m->g0) and set the stack bounds
967         // to match the current stack. We don't actually know
968         // how big the stack is, like we don't know how big any
969         // scheduling stack is, but we assume there's at least 32 kB,
970         // which is more than enough for us.
971         runtime·setg(mp->g0);
972         g->stack.hi = (uintptr)(&x + 1024);
973         g->stack.lo = (uintptr)(&x - 32*1024);
974         g->stackguard0 = g->stack.lo + StackGuard;
975
976         // Initialize this thread to use the m.
977         runtime·asminit();
978         runtime·minit();
979 }
980
981 // newextram allocates an m and puts it on the extra list.
982 // It is called with a working local m, so that it can do things
983 // like call schedlock and allocate.
984 void
985 runtime·newextram(void)
986 {
987         M *mp, *mnext;
988         G *gp;
989
990         // Create extra goroutine locked to extra m.
991         // The goroutine is the context in which the cgo callback will run.
992         // The sched.pc will never be returned to, but setting it to
993         // runtime.goexit makes clear to the traceback routines where
994         // the goroutine stack ends.
995         mp = runtime·allocm(nil);
996         gp = runtime·malg(4096);
997         gp->sched.pc = (uintptr)runtime·goexit + PCQuantum;
998         gp->sched.sp = gp->stack.hi;
999         gp->sched.sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
1000         gp->sched.lr = 0;
1001         gp->sched.g = gp;
1002         gp->syscallpc = gp->sched.pc;
1003         gp->syscallsp = gp->sched.sp;
1004         // malg returns status as Gidle, change to Gsyscall before adding to allg
1005         // where GC will see it.
1006         runtime·casgstatus(gp, Gidle, Gsyscall);
1007         gp->m = mp;
1008         mp->curg = gp;
1009         mp->locked = LockInternal;
1010         mp->lockedg = gp;
1011         gp->lockedm = mp;
1012         gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
1013         if(raceenabled)
1014                 gp->racectx = runtime·racegostart(runtime·newextram);
1015         // put on allg for garbage collector
1016         runtime·allgadd(gp);
1017
1018         // Add m to the extra list.
1019         mnext = lockextra(true);
1020         mp->schedlink = mnext;
1021         unlockextra(mp);
1022 }
1023
1024 // dropm is called when a cgo callback has called needm but is now
1025 // done with the callback and returning back into the non-Go thread.
1026 // It puts the current m back onto the extra list.
1027 //
1028 // The main expense here is the call to signalstack to release the
1029 // m's signal stack, and then the call to needm on the next callback
1030 // from this thread. It is tempting to try to save the m for next time,
1031 // which would eliminate both these costs, but there might not be
1032 // a next time: the current thread (which Go does not control) might exit.
1033 // If we saved the m for that thread, there would be an m leak each time
1034 // such a thread exited. Instead, we acquire and release an m on each
1035 // call. These should typically not be scheduling operations, just a few
1036 // atomics, so the cost should be small.
1037 //
1038 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1039 // variable using pthread_key_create. Unlike the pthread keys we already use
1040 // on OS X, this dummy key would never be read by Go code. It would exist
1041 // only so that we could register at thread-exit-time destructor.
1042 // That destructor would put the m back onto the extra list.
1043 // This is purely a performance optimization. The current version,
1044 // in which dropm happens on each cgo call, is still correct too.
1045 // We may have to keep the current version on systems with cgo
1046 // but without pthreads, like Windows.
1047 void
1048 runtime·dropm(void)
1049 {
1050         M *mp, *mnext;
1051
1052         // Undo whatever initialization minit did during needm.
1053         runtime·unminit();
1054
1055         // Clear m and g, and return m to the extra list.
1056         // After the call to setmg we can only call nosplit functions.
1057         mp = g->m;
1058         runtime·setg(nil);
1059
1060         mnext = lockextra(true);
1061         mp->schedlink = mnext;
1062         unlockextra(mp);
1063 }
1064
1065 #define MLOCKED 1
1066
1067 // lockextra locks the extra list and returns the list head.
1068 // The caller must unlock the list by storing a new list head
1069 // to runtime.extram. If nilokay is true, then lockextra will
1070 // return a nil list head if that's what it finds. If nilokay is false,
1071 // lockextra will keep waiting until the list head is no longer nil.
1072 #pragma textflag NOSPLIT
1073 static M*
1074 lockextra(bool nilokay)
1075 {
1076         uintptr mpx;
1077         void (*yield)(void);
1078
1079         for(;;) {
1080                 mpx = runtime·atomicloaduintptr((uintptr*)&runtime·extram);
1081                 if(mpx == MLOCKED) {
1082                         yield = runtime·osyield;
1083                         yield();
1084                         continue;
1085                 }
1086                 if(mpx == 0 && !nilokay) {
1087                         runtime·usleep(1);
1088                         continue;
1089                 }
1090                 if(!runtime·casuintptr((uintptr*)&runtime·extram, mpx, MLOCKED)) {
1091                         yield = runtime·osyield;
1092                         yield();
1093                         continue;
1094                 }
1095                 break;
1096         }
1097         return (M*)mpx;
1098 }
1099
1100 #pragma textflag NOSPLIT
1101 static void
1102 unlockextra(M *mp)
1103 {
1104         runtime·atomicstorep(&runtime·extram, mp);
1105 }
1106
1107
1108 // Create a new m.  It will start off with a call to fn, or else the scheduler.
1109 static void
1110 newm(void(*fn)(void), P *p)
1111 {
1112         M *mp;
1113
1114         mp = runtime·allocm(p);
1115         mp->nextp = p;
1116         mp->mstartfn = fn;
1117
1118         if(runtime·iscgo) {
1119                 CgoThreadStart ts;
1120
1121                 if(_cgo_thread_start == nil)
1122                         runtime·throw("_cgo_thread_start missing");
1123                 ts.g = mp->g0;
1124                 ts.tls = mp->tls;
1125                 ts.fn = runtime·mstart;
1126                 runtime·asmcgocall(_cgo_thread_start, &ts);
1127                 return;
1128         }
1129         runtime·newosproc(mp, (byte*)mp->g0->stack.hi);
1130 }
1131
1132 // Stops execution of the current m until new work is available.
1133 // Returns with acquired P.
1134 static void
1135 stopm(void)
1136 {
1137         if(g->m->locks)
1138                 runtime·throw("stopm holding locks");
1139         if(g->m->p)
1140                 runtime·throw("stopm holding p");
1141         if(g->m->spinning) {
1142                 g->m->spinning = false;
1143                 runtime·xadd(&runtime·sched.nmspinning, -1);
1144         }
1145
1146 retry:
1147         runtime·lock(&runtime·sched.lock);
1148         mput(g->m);
1149         runtime·unlock(&runtime·sched.lock);
1150         runtime·notesleep(&g->m->park);
1151         runtime·noteclear(&g->m->park);
1152         if(g->m->helpgc) {
1153                 runtime·gchelper();
1154                 g->m->helpgc = 0;
1155                 g->m->mcache = nil;
1156                 goto retry;
1157         }
1158         acquirep(g->m->nextp);
1159         g->m->nextp = nil;
1160 }
1161
1162 static void
1163 mspinning(void)
1164 {
1165         g->m->spinning = true;
1166 }
1167
1168 // Schedules some M to run the p (creates an M if necessary).
1169 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1170 static void
1171 startm(P *p, bool spinning)
1172 {
1173         M *mp;
1174         void (*fn)(void);
1175
1176         runtime·lock(&runtime·sched.lock);
1177         if(p == nil) {
1178                 p = pidleget();
1179                 if(p == nil) {
1180                         runtime·unlock(&runtime·sched.lock);
1181                         if(spinning)
1182                                 runtime·xadd(&runtime·sched.nmspinning, -1);
1183                         return;
1184                 }
1185         }
1186         mp = mget();
1187         runtime·unlock(&runtime·sched.lock);
1188         if(mp == nil) {
1189                 fn = nil;
1190                 if(spinning)
1191                         fn = mspinning;
1192                 newm(fn, p);
1193                 return;
1194         }
1195         if(mp->spinning)
1196                 runtime·throw("startm: m is spinning");
1197         if(mp->nextp)
1198                 runtime·throw("startm: m has p");
1199         mp->spinning = spinning;
1200         mp->nextp = p;
1201         runtime·notewakeup(&mp->park);
1202 }
1203
1204 // Hands off P from syscall or locked M.
1205 static void
1206 handoffp(P *p)
1207 {
1208         // if it has local work, start it straight away
1209         if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
1210                 startm(p, false);
1211                 return;
1212         }
1213         // no local work, check that there are no spinning/idle M's,
1214         // otherwise our help is not required
1215         if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 &&  // TODO: fast atomic
1216                 runtime·cas(&runtime·sched.nmspinning, 0, 1)){
1217                 startm(p, true);
1218                 return;
1219         }
1220         runtime·lock(&runtime·sched.lock);
1221         if(runtime·sched.gcwaiting) {
1222                 p->status = Pgcstop;
1223                 if(--runtime·sched.stopwait == 0)
1224                         runtime·notewakeup(&runtime·sched.stopnote);
1225                 runtime·unlock(&runtime·sched.lock);
1226                 return;
1227         }
1228         if(runtime·sched.runqsize) {
1229                 runtime·unlock(&runtime·sched.lock);
1230                 startm(p, false);
1231                 return;
1232         }
1233         // If this is the last running P and nobody is polling network,
1234         // need to wakeup another M to poll network.
1235         if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
1236                 runtime·unlock(&runtime·sched.lock);
1237                 startm(p, false);
1238                 return;
1239         }
1240         pidleput(p);
1241         runtime·unlock(&runtime·sched.lock);
1242 }
1243
1244 // Tries to add one more P to execute G's.
1245 // Called when a G is made runnable (newproc, ready).
1246 static void
1247 wakep(void)
1248 {
1249         // be conservative about spinning threads
1250         if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
1251                 return;
1252         startm(nil, true);
1253 }
1254
1255 // Stops execution of the current m that is locked to a g until the g is runnable again.
1256 // Returns with acquired P.
1257 static void
1258 stoplockedm(void)
1259 {
1260         P *p;
1261         uint32 status;
1262
1263         if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
1264                 runtime·throw("stoplockedm: inconsistent locking");
1265         if(g->m->p) {
1266                 // Schedule another M to run this p.
1267                 p = releasep();
1268                 handoffp(p);
1269         }
1270         incidlelocked(1);
1271         // Wait until another thread schedules lockedg again.
1272         runtime·notesleep(&g->m->park);
1273         runtime·noteclear(&g->m->park);
1274         status = runtime·readgstatus(g->m->lockedg);
1275         if((status&~Gscan) != Grunnable){
1276                 runtime·printf("runtime:stoplockedm: g is not Grunnable or Gscanrunnable");
1277                 dumpgstatus(g);
1278                 runtime·throw("stoplockedm: not runnable");
1279         }
1280         acquirep(g->m->nextp);
1281         g->m->nextp = nil;
1282 }
1283
1284 // Schedules the locked m to run the locked gp.
1285 static void
1286 startlockedm(G *gp)
1287 {
1288         M *mp;
1289         P *p;
1290
1291         mp = gp->lockedm;
1292         if(mp == g->m)
1293                 runtime·throw("startlockedm: locked to me");
1294         if(mp->nextp)
1295                 runtime·throw("startlockedm: m has p");
1296         // directly handoff current P to the locked m
1297         incidlelocked(-1);
1298         p = releasep();
1299         mp->nextp = p;
1300         runtime·notewakeup(&mp->park);
1301         stopm();
1302 }
1303
1304 // Stops the current m for stoptheworld.
1305 // Returns when the world is restarted.
1306 static void
1307 gcstopm(void)
1308 {
1309         P *p;
1310
1311         if(!runtime·sched.gcwaiting)
1312                 runtime·throw("gcstopm: not waiting for gc");
1313         if(g->m->spinning) {
1314                 g->m->spinning = false;
1315                 runtime·xadd(&runtime·sched.nmspinning, -1);
1316         }
1317         p = releasep();
1318         runtime·lock(&runtime·sched.lock);
1319         p->status = Pgcstop;
1320         if(--runtime·sched.stopwait == 0)
1321                 runtime·notewakeup(&runtime·sched.stopnote);
1322         runtime·unlock(&runtime·sched.lock);
1323         stopm();
1324 }
1325
1326 // Schedules gp to run on the current M.
1327 // Never returns.
1328 static void
1329 execute(G *gp)
1330 {
1331         int32 hz;
1332         
1333         runtime·casgstatus(gp, Grunnable, Grunning);
1334         gp->waitsince = 0;
1335         gp->preempt = false;
1336         gp->stackguard0 = gp->stack.lo + StackGuard;
1337         g->m->p->schedtick++;
1338         g->m->curg = gp;
1339         gp->m = g->m;
1340
1341         // Check whether the profiler needs to be turned on or off.
1342         hz = runtime·sched.profilehz;
1343         if(g->m->profilehz != hz)
1344                 runtime·resetcpuprofiler(hz);
1345
1346         runtime·gogo(&gp->sched);
1347 }
1348
1349 // Finds a runnable goroutine to execute.
1350 // Tries to steal from other P's, get g from global queue, poll network.
1351 static G*
1352 findrunnable(void)
1353 {
1354         G *gp;
1355         P *p;
1356         int32 i;
1357
1358 top:
1359         if(runtime·sched.gcwaiting) {
1360                 gcstopm();
1361                 goto top;
1362         }
1363         if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
1364                 runtime·ready(gp);
1365         // local runq
1366         gp = runqget(g->m->p);
1367         if(gp)
1368                 return gp;
1369         // global runq
1370         if(runtime·sched.runqsize) {
1371                 runtime·lock(&runtime·sched.lock);
1372                 gp = globrunqget(g->m->p, 0);
1373                 runtime·unlock(&runtime·sched.lock);
1374                 if(gp)
1375                         return gp;
1376         }
1377         // poll network
1378         gp = runtime·netpoll(false);  // non-blocking
1379         if(gp) {
1380                 injectglist(gp->schedlink);
1381                 runtime·casgstatus(gp, Gwaiting, Grunnable);
1382                 return gp;
1383         }
1384         // If number of spinning M's >= number of busy P's, block.
1385         // This is necessary to prevent excessive CPU consumption
1386         // when GOMAXPROCS>>1 but the program parallelism is low.
1387         if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle))  // TODO: fast atomic
1388                 goto stop;
1389         if(!g->m->spinning) {
1390                 g->m->spinning = true;
1391                 runtime·xadd(&runtime·sched.nmspinning, 1);
1392         }
1393         // random steal from other P's
1394         for(i = 0; i < 2*runtime·gomaxprocs; i++) {
1395                 if(runtime·sched.gcwaiting)
1396                         goto top;
1397                 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
1398                 if(p == g->m->p)
1399                         gp = runqget(p);
1400                 else
1401                         gp = runqsteal(g->m->p, p);
1402                 if(gp)
1403                         return gp;
1404         }
1405 stop:
1406         // return P and block
1407         runtime·lock(&runtime·sched.lock);
1408         if(runtime·sched.gcwaiting) {
1409                 runtime·unlock(&runtime·sched.lock);
1410                 goto top;
1411         }
1412         if(runtime·sched.runqsize) {
1413                 gp = globrunqget(g->m->p, 0);
1414                 runtime·unlock(&runtime·sched.lock);
1415                 return gp;
1416         }
1417         p = releasep();
1418         pidleput(p);
1419         runtime·unlock(&runtime·sched.lock);
1420         if(g->m->spinning) {
1421                 g->m->spinning = false;
1422                 runtime·xadd(&runtime·sched.nmspinning, -1);
1423         }
1424         // check all runqueues once again
1425         for(i = 0; i < runtime·gomaxprocs; i++) {
1426                 p = runtime·allp[i];
1427                 if(p && p->runqhead != p->runqtail) {
1428                         runtime·lock(&runtime·sched.lock);
1429                         p = pidleget();
1430                         runtime·unlock(&runtime·sched.lock);
1431                         if(p) {
1432                                 acquirep(p);
1433                                 goto top;
1434                         }
1435                         break;
1436                 }
1437         }
1438         // poll network
1439         if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
1440                 if(g->m->p)
1441                         runtime·throw("findrunnable: netpoll with p");
1442                 if(g->m->spinning)
1443                         runtime·throw("findrunnable: netpoll with spinning");
1444                 gp = runtime·netpoll(true);  // block until new work is available
1445                 runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
1446                 if(gp) {
1447                         runtime·lock(&runtime·sched.lock);
1448                         p = pidleget();
1449                         runtime·unlock(&runtime·sched.lock);
1450                         if(p) {
1451                                 acquirep(p);
1452                                 injectglist(gp->schedlink);
1453                                 runtime·casgstatus(gp, Gwaiting, Grunnable);
1454                                 return gp;
1455                         }
1456                         injectglist(gp);
1457                 }
1458         }
1459         stopm();
1460         goto top;
1461 }
1462
1463 static void
1464 resetspinning(void)
1465 {
1466         int32 nmspinning;
1467
1468         if(g->m->spinning) {
1469                 g->m->spinning = false;
1470                 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
1471                 if(nmspinning < 0)
1472                         runtime·throw("findrunnable: negative nmspinning");
1473         } else
1474                 nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
1475
1476         // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
1477         // so see if we need to wakeup another P here.
1478         if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
1479                 wakep();
1480 }
1481
1482 // Injects the list of runnable G's into the scheduler.
1483 // Can run concurrently with GC.
1484 static void
1485 injectglist(G *glist)
1486 {
1487         int32 n;
1488         G *gp;
1489
1490         if(glist == nil)
1491                 return;
1492         runtime·lock(&runtime·sched.lock);
1493         for(n = 0; glist; n++) {
1494                 gp = glist;
1495                 glist = gp->schedlink;
1496                 runtime·casgstatus(gp, Gwaiting, Grunnable); 
1497                 globrunqput(gp);
1498         }
1499         runtime·unlock(&runtime·sched.lock);
1500
1501         for(; n && runtime·sched.npidle; n--)
1502                 startm(nil, false);
1503 }
1504
1505 // One round of scheduler: find a runnable goroutine and execute it.
1506 // Never returns.
1507 static void
1508 schedule(void)
1509 {
1510         G *gp;
1511         uint32 tick;
1512
1513         if(g->m->locks)
1514                 runtime·throw("schedule: holding locks");
1515
1516         if(g->m->lockedg) {
1517                 stoplockedm();
1518                 execute(g->m->lockedg);  // Never returns.
1519         }
1520
1521 top:
1522         if(runtime·sched.gcwaiting) {
1523                 gcstopm();
1524                 goto top;
1525         }
1526
1527         gp = nil;
1528         // Check the global runnable queue once in a while to ensure fairness.
1529         // Otherwise two goroutines can completely occupy the local runqueue
1530         // by constantly respawning each other.
1531         tick = g->m->p->schedtick;
1532         // This is a fancy way to say tick%61==0,
1533         // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
1534         if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
1535                 runtime·lock(&runtime·sched.lock);
1536                 gp = globrunqget(g->m->p, 1);
1537                 runtime·unlock(&runtime·sched.lock);
1538                 if(gp)
1539                         resetspinning();
1540         }
1541         if(gp == nil) {
1542                 gp = runqget(g->m->p);
1543                 if(gp && g->m->spinning)
1544                         runtime·throw("schedule: spinning with local work");
1545         }
1546         if(gp == nil) {
1547                 gp = findrunnable();  // blocks until work is available
1548                 resetspinning();
1549         }
1550
1551         if(gp->lockedm) {
1552                 // Hands off own p to the locked m,
1553                 // then blocks waiting for a new p.
1554                 startlockedm(gp);
1555                 goto top;
1556         }
1557
1558         execute(gp);
1559 }
1560
1561 // dropg removes the association between m and the current goroutine m->curg (gp for short).
1562 // Typically a caller sets gp's status away from Grunning and then
1563 // immediately calls dropg to finish the job. The caller is also responsible
1564 // for arranging that gp will be restarted using runtime·ready at an
1565 // appropriate time. After calling dropg and arranging for gp to be
1566 // readied later, the caller can do other work but eventually should
1567 // call schedule to restart the scheduling of goroutines on this m.
1568 static void
1569 dropg(void)
1570 {
1571         if(g->m->lockedg == nil) {
1572                 g->m->curg->m = nil;
1573                 g->m->curg = nil;
1574         }
1575 }
1576
1577 // Puts the current goroutine into a waiting state and calls unlockf.
1578 // If unlockf returns false, the goroutine is resumed.
1579 void
1580 runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
1581 {
1582         void (*fn)(G*);
1583
1584         g->m->waitlock = lock;
1585         g->m->waitunlockf = unlockf;
1586         g->waitreason = reason;
1587         fn = runtime·park_m;
1588         runtime·mcall(&fn);
1589 }
1590
1591 bool
1592 runtime·parkunlock_c(G *gp, void *lock)
1593 {
1594         USED(gp);
1595         runtime·unlock(lock);
1596         return true;
1597 }
1598
1599 // Puts the current goroutine into a waiting state and unlocks the lock.
1600 // The goroutine can be made runnable again by calling runtime·ready(gp).
1601 void
1602 runtime·parkunlock(Mutex *lock, String reason)
1603 {
1604         runtime·park(runtime·parkunlock_c, lock, reason);
1605 }
1606
1607 // runtime·park continuation on g0.
1608 void
1609 runtime·park_m(G *gp)
1610 {
1611         bool ok;
1612
1613         runtime·casgstatus(gp, Grunning, Gwaiting);
1614         dropg();
1615
1616         if(g->m->waitunlockf) {
1617                 ok = g->m->waitunlockf(gp, g->m->waitlock);
1618                 g->m->waitunlockf = nil;
1619                 g->m->waitlock = nil;
1620                 if(!ok) {
1621                         runtime·casgstatus(gp, Gwaiting, Grunnable); 
1622                         execute(gp);  // Schedule it back, never returns.
1623                 }
1624         }
1625
1626         schedule();
1627 }
1628
1629 // Gosched continuation on g0.
1630 void
1631 runtime·gosched_m(G *gp)
1632 {
1633         uint32 status;
1634
1635         status = runtime·readgstatus(gp);
1636         if((status&~Gscan) != Grunning){
1637                 dumpgstatus(gp);
1638                 runtime·throw("bad g status");
1639         }
1640         runtime·casgstatus(gp, Grunning, Grunnable);
1641         dropg();
1642         runtime·lock(&runtime·sched.lock);
1643         globrunqput(gp);
1644         runtime·unlock(&runtime·sched.lock);
1645
1646         schedule();
1647 }
1648
1649 // Finishes execution of the current goroutine.
1650 // Must be NOSPLIT because it is called from Go.
1651 #pragma textflag NOSPLIT
1652 void
1653 runtime·goexit1(void)
1654 {
1655         void (*fn)(G*);
1656
1657         if(raceenabled)
1658                 runtime·racegoend();
1659         fn = goexit0;
1660         runtime·mcall(&fn);
1661 }
1662
1663 // runtime·goexit continuation on g0.
1664 static void
1665 goexit0(G *gp)
1666 {
1667         runtime·casgstatus(gp, Grunning, Gdead);
1668         gp->m = nil;
1669         gp->lockedm = nil;
1670         g->m->lockedg = nil;
1671         gp->paniconfault = 0;
1672         gp->defer = nil; // should be true already but just in case.
1673         gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
1674         gp->writebuf.array = nil;
1675         gp->writebuf.len = 0;
1676         gp->writebuf.cap = 0;
1677         gp->waitreason.str = nil;
1678         gp->waitreason.len = 0;
1679         gp->param = nil;
1680
1681         dropg();
1682
1683         if(g->m->locked & ~LockExternal) {
1684                 runtime·printf("invalid m->locked = %d\n", g->m->locked);
1685                 runtime·throw("internal lockOSThread error");
1686         }       
1687         g->m->locked = 0;
1688         gfput(g->m->p, gp);
1689         schedule();
1690 }
1691
1692 #pragma textflag NOSPLIT
1693 static void
1694 save(uintptr pc, uintptr sp)
1695 {
1696         g->sched.pc = pc;
1697         g->sched.sp = sp;
1698         g->sched.lr = 0;
1699         g->sched.ret = 0;
1700         g->sched.ctxt = 0;
1701         g->sched.g = g;
1702 }
1703
1704 static void entersyscall_bad(void);
1705 static void entersyscall_sysmon(void);
1706 static void entersyscall_gcwait(void);
1707
1708 // The goroutine g is about to enter a system call.
1709 // Record that it's not using the cpu anymore.
1710 // This is called only from the go syscall library and cgocall,
1711 // not from the low-level system calls used by the runtime.
1712 //
1713 // Entersyscall cannot split the stack: the runtime·gosave must
1714 // make g->sched refer to the caller's stack segment, because
1715 // entersyscall is going to return immediately after.
1716 //
1717 // Nothing entersyscall calls can split the stack either.
1718 // We cannot safely move the stack during an active call to syscall,
1719 // because we do not know which of the uintptr arguments are
1720 // really pointers (back into the stack).
1721 // In practice, this means that we make the fast path run through
1722 // entersyscall doing no-split things, and the slow path has to use onM
1723 // to run bigger things on the m stack.
1724 //
1725 // reentersyscall is the entry point used by cgo callbacks, where explicitly
1726 // saved SP and PC are restored. This is needed when exitsyscall will be called
1727 // from a function further up in the call stack than the parent, as g->syscallsp
1728 // must always point to a valid stack frame. entersyscall below is the normal
1729 // entry point for syscalls, which obtains the SP and PC from the caller.
1730 #pragma textflag NOSPLIT
1731 void
1732 runtime·reentersyscall(uintptr pc, uintptr sp)
1733 {
1734         void (*fn)(void);
1735
1736         // Disable preemption because during this function g is in Gsyscall status,
1737         // but can have inconsistent g->sched, do not let GC observe it.
1738         g->m->locks++;
1739         
1740         // Entersyscall must not call any function that might split/grow the stack.
1741         // (See details in comment above.)
1742         // Catch calls that might, by replacing the stack guard with something that
1743         // will trip any stack check and leaving a flag to tell newstack to die.
1744         g->stackguard0 = StackPreempt;
1745         g->throwsplit = 1;
1746
1747         // Leave SP around for GC and traceback.
1748         save(pc, sp);
1749         g->syscallsp = sp;
1750         g->syscallpc = pc;
1751         runtime·casgstatus(g, Grunning, Gsyscall);
1752         if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1753                 fn = entersyscall_bad;
1754                 runtime·onM(&fn);
1755         }
1756
1757         if(runtime·atomicload(&runtime·sched.sysmonwait)) {  // TODO: fast atomic
1758                 fn = entersyscall_sysmon;
1759                 runtime·onM(&fn);
1760                 save(pc, sp);
1761         }
1762
1763         g->m->mcache = nil;
1764         g->m->p->m = nil;
1765         runtime·atomicstore(&g->m->p->status, Psyscall);
1766         if(runtime·sched.gcwaiting) {
1767                 fn = entersyscall_gcwait;
1768                 runtime·onM(&fn);
1769                 save(pc, sp);
1770         }
1771
1772         // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
1773         // We set stackguard to StackPreempt so that first split stack check calls morestack.
1774         // Morestack detects this case and throws.
1775         g->stackguard0 = StackPreempt;
1776         g->m->locks--;
1777 }
1778
1779 // Standard syscall entry used by the go syscall library and normal cgo calls.
1780 #pragma textflag NOSPLIT
1781 void
1782 ·entersyscall(int32 dummy)
1783 {
1784         runtime·reentersyscall((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1785 }
1786
1787 static void
1788 entersyscall_bad(void)
1789 {
1790         G *gp;
1791         
1792         gp = g->m->curg;
1793         runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
1794                 gp->syscallsp, gp->stack.lo, gp->stack.hi);
1795         runtime·throw("entersyscall");
1796 }
1797
1798 static void
1799 entersyscall_sysmon(void)
1800 {
1801         runtime·lock(&runtime·sched.lock);
1802         if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1803                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1804                 runtime·notewakeup(&runtime·sched.sysmonnote);
1805         }
1806         runtime·unlock(&runtime·sched.lock);
1807 }
1808
1809 static void
1810 entersyscall_gcwait(void)
1811 {
1812         runtime·lock(&runtime·sched.lock);
1813         if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
1814                 if(--runtime·sched.stopwait == 0)
1815                         runtime·notewakeup(&runtime·sched.stopnote);
1816         }
1817         runtime·unlock(&runtime·sched.lock);
1818 }
1819
1820 static void entersyscallblock_handoff(void);
1821
1822 // The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
1823 #pragma textflag NOSPLIT
1824 void
1825 ·entersyscallblock(int32 dummy)
1826 {
1827         void (*fn)(void);
1828
1829         g->m->locks++;  // see comment in entersyscall
1830         g->throwsplit = 1;
1831         g->stackguard0 = StackPreempt;  // see comment in entersyscall
1832
1833         // Leave SP around for GC and traceback.
1834         save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1835         g->syscallsp = g->sched.sp;
1836         g->syscallpc = g->sched.pc;
1837         runtime·casgstatus(g, Grunning, Gsyscall);
1838         if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1839                 fn = entersyscall_bad;
1840                 runtime·onM(&fn);
1841         }
1842         
1843         fn = entersyscallblock_handoff;
1844         runtime·onM(&fn);
1845
1846         // Resave for traceback during blocked call.
1847         save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1848
1849         g->m->locks--;
1850 }
1851
1852 static void
1853 entersyscallblock_handoff(void)
1854 {
1855         handoffp(releasep());
1856 }
1857
1858 // The goroutine g exited its system call.
1859 // Arrange for it to run on a cpu again.
1860 // This is called only from the go syscall library, not
1861 // from the low-level system calls used by the runtime.
1862 #pragma textflag NOSPLIT
1863 void
1864 ·exitsyscall(int32 dummy)
1865 {
1866         void (*fn)(G*);
1867
1868         g->m->locks++;  // see comment in entersyscall
1869
1870         if(runtime·getcallersp(&dummy) > g->syscallsp)
1871                 runtime·throw("exitsyscall: syscall frame is no longer valid");
1872
1873         g->waitsince = 0;
1874         if(exitsyscallfast()) {
1875                 // There's a cpu for us, so we can run.
1876                 g->m->p->syscalltick++;
1877                 // We need to cas the status and scan before resuming...
1878                 runtime·casgstatus(g, Gsyscall, Grunning);
1879
1880                 // Garbage collector isn't running (since we are),
1881                 // so okay to clear syscallsp.
1882                 g->syscallsp = (uintptr)nil;
1883                 g->m->locks--;
1884                 if(g->preempt) {
1885                         // restore the preemption request in case we've cleared it in newstack
1886                         g->stackguard0 = StackPreempt;
1887                 } else {
1888                         // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
1889                         g->stackguard0 = g->stack.lo + StackGuard;
1890                 }
1891                 g->throwsplit = 0;
1892                 return;
1893         }
1894
1895         g->m->locks--;
1896
1897         // Call the scheduler.
1898         fn = exitsyscall0;
1899         runtime·mcall(&fn);
1900
1901         // Scheduler returned, so we're allowed to run now.
1902         // Delete the syscallsp information that we left for
1903         // the garbage collector during the system call.
1904         // Must wait until now because until gosched returns
1905         // we don't know for sure that the garbage collector
1906         // is not running.
1907         g->syscallsp = (uintptr)nil;
1908         g->m->p->syscalltick++;
1909         g->throwsplit = 0;
1910 }
1911
1912 static void exitsyscallfast_pidle(void);
1913
1914 #pragma textflag NOSPLIT
1915 static bool
1916 exitsyscallfast(void)
1917 {
1918         void (*fn)(void);
1919
1920         // Freezetheworld sets stopwait but does not retake P's.
1921         if(runtime·sched.stopwait) {
1922                 g->m->mcache = nil; 
1923                 g->m->p = nil;
1924                 return false;
1925         }
1926
1927         // Try to re-acquire the last P.
1928         if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->status, Psyscall, Prunning)) {
1929                 // There's a cpu for us, so we can run.
1930                 g->m->mcache = g->m->p->mcache;
1931                 g->m->p->m = g->m;
1932                 return true;
1933         }
1934         // Try to get any other idle P.
1935         g->m->mcache = nil;
1936         g->m->p = nil;
1937         if(runtime·sched.pidle) {
1938                 fn = exitsyscallfast_pidle;
1939                 runtime·onM(&fn);
1940                 if(g->m->scalararg[0]) {
1941                         g->m->scalararg[0] = 0;
1942                         return true;
1943                 }
1944         }
1945         return false;
1946 }
1947
1948 static void
1949 exitsyscallfast_pidle(void)
1950 {
1951         P *p;
1952
1953         runtime·lock(&runtime·sched.lock);
1954         p = pidleget();
1955         if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
1956                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1957                 runtime·notewakeup(&runtime·sched.sysmonnote);
1958         }
1959         runtime·unlock(&runtime·sched.lock);
1960         if(p) {
1961                 acquirep(p);
1962                 g->m->scalararg[0] = 1;
1963         } else
1964                 g->m->scalararg[0] = 0;
1965 }
1966
1967 // runtime·exitsyscall slow path on g0.
1968 // Failed to acquire P, enqueue gp as runnable.
1969 static void
1970 exitsyscall0(G *gp)
1971 {
1972         P *p;
1973
1974         runtime·casgstatus(gp, Gsyscall, Grunnable);
1975         dropg();
1976         runtime·lock(&runtime·sched.lock);
1977         p = pidleget();
1978         if(p == nil)
1979                 globrunqput(gp);
1980         else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1981                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1982                 runtime·notewakeup(&runtime·sched.sysmonnote);
1983         }
1984         runtime·unlock(&runtime·sched.lock);
1985         if(p) {
1986                 acquirep(p);
1987                 execute(gp);  // Never returns.
1988         }
1989         if(g->m->lockedg) {
1990                 // Wait until another thread schedules gp and so m again.
1991                 stoplockedm();
1992                 execute(gp);  // Never returns.
1993         }
1994         stopm();
1995         schedule();  // Never returns.
1996 }
1997
1998 static void
1999 beforefork(void)
2000 {
2001         G *gp;
2002         
2003         gp = g->m->curg;
2004         // Fork can hang if preempted with signals frequently enough (see issue 5517).
2005         // Ensure that we stay on the same M where we disable profiling.
2006         gp->m->locks++;
2007         if(gp->m->profilehz != 0)
2008                 runtime·resetcpuprofiler(0);
2009
2010         // This function is called before fork in syscall package.
2011         // Code between fork and exec must not allocate memory nor even try to grow stack.
2012         // Here we spoil g->stackguard to reliably detect any attempts to grow stack.
2013         // runtime_AfterFork will undo this in parent process, but not in child.
2014         gp->stackguard0 = StackFork;
2015 }
2016
2017 // Called from syscall package before fork.
2018 #pragma textflag NOSPLIT
2019 void
2020 syscall·runtime_BeforeFork(void)
2021 {
2022         void (*fn)(void);
2023         
2024         fn = beforefork;
2025         runtime·onM(&fn);
2026 }
2027
2028 static void
2029 afterfork(void)
2030 {
2031         int32 hz;
2032         G *gp;
2033         
2034         gp = g->m->curg;
2035         // See the comment in runtime_BeforeFork.
2036         gp->stackguard0 = gp->stack.lo + StackGuard;
2037
2038         hz = runtime·sched.profilehz;
2039         if(hz != 0)
2040                 runtime·resetcpuprofiler(hz);
2041         gp->m->locks--;
2042 }
2043
2044 // Called from syscall package after fork in parent.
2045 #pragma textflag NOSPLIT
2046 void
2047 syscall·runtime_AfterFork(void)
2048 {
2049         void (*fn)(void);
2050         
2051         fn = afterfork;
2052         runtime·onM(&fn);
2053 }
2054
2055 // Hook used by runtime·malg to call runtime·stackalloc on the
2056 // scheduler stack.  This exists because runtime·stackalloc insists
2057 // on being called on the scheduler stack, to avoid trying to grow
2058 // the stack while allocating a new stack segment.
2059 static void
2060 mstackalloc(G *gp)
2061 {
2062         G *newg;
2063         uintptr size;
2064
2065         newg = g->m->ptrarg[0];
2066         size = g->m->scalararg[0];
2067
2068         newg->stack = runtime·stackalloc(size);
2069
2070         runtime·gogo(&gp->sched);
2071 }
2072
2073 // Allocate a new g, with a stack big enough for stacksize bytes.
2074 G*
2075 runtime·malg(int32 stacksize)
2076 {
2077         G *newg;
2078         void (*fn)(G*);
2079
2080         newg = allocg();
2081         if(stacksize >= 0) {
2082                 stacksize = runtime·round2(StackSystem + stacksize);
2083                 if(g == g->m->g0) {
2084                         // running on scheduler stack already.
2085                         newg->stack = runtime·stackalloc(stacksize);
2086                 } else {
2087                         // have to call stackalloc on scheduler stack.
2088                         g->m->scalararg[0] = stacksize;
2089                         g->m->ptrarg[0] = newg;
2090                         fn = mstackalloc;
2091                         runtime·mcall(&fn);
2092                         g->m->ptrarg[0] = nil;
2093                 }
2094                 newg->stackguard0 = newg->stack.lo + StackGuard;
2095                 newg->stackguard1 = ~(uintptr)0;
2096         }
2097         return newg;
2098 }
2099
2100 static void
2101 newproc_m(void)
2102 {
2103         byte *argp;
2104         void *callerpc;
2105         FuncVal *fn;
2106         int32 siz;
2107
2108         siz = g->m->scalararg[0];
2109         callerpc = (void*)g->m->scalararg[1];   
2110         argp = g->m->ptrarg[0];
2111         fn = (FuncVal*)g->m->ptrarg[1];
2112
2113         runtime·newproc1(fn, argp, siz, 0, callerpc);
2114         g->m->ptrarg[0] = nil;
2115         g->m->ptrarg[1] = nil;
2116 }
2117
2118 // Create a new g running fn with siz bytes of arguments.
2119 // Put it on the queue of g's waiting to run.
2120 // The compiler turns a go statement into a call to this.
2121 // Cannot split the stack because it assumes that the arguments
2122 // are available sequentially after &fn; they would not be
2123 // copied if a stack split occurred.
2124 #pragma textflag NOSPLIT
2125 void
2126 runtime·newproc(int32 siz, FuncVal* fn, ...)
2127 {
2128         byte *argp;
2129         void (*mfn)(void);
2130
2131         if(thechar == '5' || thechar == '9')
2132                 argp = (byte*)(&fn+2);  // skip caller's saved LR
2133         else
2134                 argp = (byte*)(&fn+1);
2135
2136         g->m->locks++;
2137         g->m->scalararg[0] = siz;
2138         g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
2139         g->m->ptrarg[0] = argp;
2140         g->m->ptrarg[1] = fn;
2141         mfn = newproc_m;
2142         runtime·onM(&mfn);
2143         g->m->locks--;
2144 }
2145
2146 void runtime·main(void);
2147
2148 // Create a new g running fn with narg bytes of arguments starting
2149 // at argp and returning nret bytes of results.  callerpc is the
2150 // address of the go statement that created this.  The new g is put
2151 // on the queue of g's waiting to run.
2152 G*
2153 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
2154 {
2155         byte *sp;
2156         G *newg;
2157         P *p;
2158         int32 siz;
2159
2160         if(fn == nil) {
2161                 g->m->throwing = -1;  // do not dump full stacks
2162                 runtime·throw("go of nil func value");
2163         }
2164         g->m->locks++;  // disable preemption because it can be holding p in a local var
2165         siz = narg + nret;
2166         siz = (siz+7) & ~7;
2167
2168         // We could allocate a larger initial stack if necessary.
2169         // Not worth it: this is almost always an error.
2170         // 4*sizeof(uintreg): extra space added below
2171         // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
2172         if(siz >= StackMin - 4*sizeof(uintreg) - sizeof(uintreg))
2173                 runtime·throw("runtime.newproc: function arguments too large for new goroutine");
2174
2175         p = g->m->p;
2176         if((newg = gfget(p)) == nil) {
2177                 newg = runtime·malg(StackMin);
2178                 runtime·casgstatus(newg, Gidle, Gdead);
2179                 runtime·allgadd(newg); // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
2180         }
2181         if(newg->stack.hi == 0)
2182                 runtime·throw("newproc1: newg missing stack");
2183
2184         if(runtime·readgstatus(newg) != Gdead) 
2185                 runtime·throw("newproc1: new g is not Gdead");
2186
2187         sp = (byte*)newg->stack.hi;
2188         sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
2189         sp -= siz;
2190         runtime·memmove(sp, argp, narg);
2191         if(thechar == '5' || thechar == '9') {
2192                 // caller's LR
2193                 sp -= sizeof(void*);
2194                 *(void**)sp = nil;
2195         }
2196
2197         runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
2198         newg->sched.sp = (uintptr)sp;
2199         newg->sched.pc = (uintptr)runtime·goexit + PCQuantum; // +PCQuantum so that previous instruction is in same function
2200         newg->sched.g = newg;
2201         runtime·gostartcallfn(&newg->sched, fn);
2202         newg->gopc = (uintptr)callerpc;
2203         runtime·casgstatus(newg, Gdead, Grunnable);
2204
2205         if(p->goidcache == p->goidcacheend) {
2206                 // Sched.goidgen is the last allocated id,
2207                 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
2208                 // At startup sched.goidgen=0, so main goroutine receives goid=1.
2209                 p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
2210                 p->goidcache -= GoidCacheBatch - 1;
2211                 p->goidcacheend = p->goidcache + GoidCacheBatch;
2212         }
2213         newg->goid = p->goidcache++;
2214         if(raceenabled)
2215                 newg->racectx = runtime·racegostart((void*)callerpc);
2216         runqput(p, newg);
2217
2218         if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main)  // TODO: fast atomic
2219                 wakep();
2220         g->m->locks--;
2221         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
2222                 g->stackguard0 = StackPreempt;
2223         return newg;
2224 }
2225
2226 // Put on gfree list.
2227 // If local list is too long, transfer a batch to the global list.
2228 static void
2229 gfput(P *p, G *gp)
2230 {
2231         uintptr stksize;
2232
2233         if(runtime·readgstatus(gp) != Gdead) 
2234                 runtime·throw("gfput: bad status (not Gdead)");
2235
2236         stksize = gp->stack.hi - gp->stack.lo;
2237         
2238         if(stksize != FixedStack) {
2239                 // non-standard stack size - free it.
2240                 runtime·stackfree(gp->stack);
2241                 gp->stack.lo = 0;
2242                 gp->stack.hi = 0;
2243                 gp->stackguard0 = 0;
2244         }
2245         gp->schedlink = p->gfree;
2246         p->gfree = gp;
2247         p->gfreecnt++;
2248         if(p->gfreecnt >= 64) {
2249                 runtime·lock(&runtime·sched.gflock);
2250                 while(p->gfreecnt >= 32) {
2251                         p->gfreecnt--;
2252                         gp = p->gfree;
2253                         p->gfree = gp->schedlink;
2254                         gp->schedlink = runtime·sched.gfree;
2255                         runtime·sched.gfree = gp;
2256                         runtime·sched.ngfree++;
2257                 }
2258                 runtime·unlock(&runtime·sched.gflock);
2259         }
2260 }
2261
2262 // Get from gfree list.
2263 // If local list is empty, grab a batch from global list.
2264 static G*
2265 gfget(P *p)
2266 {
2267         G *gp;
2268         void (*fn)(G*);
2269
2270 retry:
2271         gp = p->gfree;
2272         if(gp == nil && runtime·sched.gfree) {
2273                 runtime·lock(&runtime·sched.gflock);
2274                 while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
2275                         p->gfreecnt++;
2276                         gp = runtime·sched.gfree;
2277                         runtime·sched.gfree = gp->schedlink;
2278                         runtime·sched.ngfree--;
2279                         gp->schedlink = p->gfree;
2280                         p->gfree = gp;
2281                 }
2282                 runtime·unlock(&runtime·sched.gflock);
2283                 goto retry;
2284         }
2285         if(gp) {
2286                 p->gfree = gp->schedlink;
2287                 p->gfreecnt--;
2288
2289                 if(gp->stack.lo == 0) {
2290                         // Stack was deallocated in gfput.  Allocate a new one.
2291                         if(g == g->m->g0) {
2292                                 gp->stack = runtime·stackalloc(FixedStack);
2293                         } else {
2294                                 g->m->scalararg[0] = FixedStack;
2295                                 g->m->ptrarg[0] = gp;
2296                                 fn = mstackalloc;
2297                                 runtime·mcall(&fn);
2298                                 g->m->ptrarg[0] = nil;
2299                         }
2300                         gp->stackguard0 = gp->stack.lo + StackGuard;
2301                 } else {
2302                         if(raceenabled)
2303                                 runtime·racemalloc((void*)gp->stack.lo, gp->stack.hi - gp->stack.lo);
2304                 }
2305         }
2306         return gp;
2307 }
2308
2309 // Purge all cached G's from gfree list to the global list.
2310 static void
2311 gfpurge(P *p)
2312 {
2313         G *gp;
2314
2315         runtime·lock(&runtime·sched.gflock);
2316         while(p->gfreecnt != 0) {
2317                 p->gfreecnt--;
2318                 gp = p->gfree;
2319                 p->gfree = gp->schedlink;
2320                 gp->schedlink = runtime·sched.gfree;
2321                 runtime·sched.gfree = gp;
2322                 runtime·sched.ngfree++;
2323         }
2324         runtime·unlock(&runtime·sched.gflock);
2325 }
2326
2327 #pragma textflag NOSPLIT
2328 void
2329 runtime·Breakpoint(void)
2330 {
2331         runtime·breakpoint();
2332 }
2333
2334 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
2335 // after they modify m->locked. Do not allow preemption during this call,
2336 // or else the m might be different in this function than in the caller.
2337 #pragma textflag NOSPLIT
2338 static void
2339 lockOSThread(void)
2340 {
2341         g->m->lockedg = g;
2342         g->lockedm = g->m;
2343 }
2344
2345 #pragma textflag NOSPLIT
2346 void
2347 runtime·LockOSThread(void)
2348 {
2349         g->m->locked |= LockExternal;
2350         lockOSThread();
2351 }
2352
2353 #pragma textflag NOSPLIT
2354 void
2355 runtime·lockOSThread(void)
2356 {
2357         g->m->locked += LockInternal;
2358         lockOSThread();
2359 }
2360
2361
2362 // unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
2363 // after they update m->locked. Do not allow preemption during this call,
2364 // or else the m might be in different in this function than in the caller.
2365 #pragma textflag NOSPLIT
2366 static void
2367 unlockOSThread(void)
2368 {
2369         if(g->m->locked != 0)
2370                 return;
2371         g->m->lockedg = nil;
2372         g->lockedm = nil;
2373 }
2374
2375 #pragma textflag NOSPLIT
2376 void
2377 runtime·UnlockOSThread(void)
2378 {
2379         g->m->locked &= ~LockExternal;
2380         unlockOSThread();
2381 }
2382
2383 static void badunlockOSThread(void);
2384
2385 #pragma textflag NOSPLIT
2386 void
2387 runtime·unlockOSThread(void)
2388 {
2389         void (*fn)(void);
2390
2391         if(g->m->locked < LockInternal) {
2392                 fn = badunlockOSThread;
2393                 runtime·onM(&fn);
2394         }
2395         g->m->locked -= LockInternal;
2396         unlockOSThread();
2397 }
2398
2399 static void
2400 badunlockOSThread(void)
2401 {
2402         runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
2403 }
2404
2405 #pragma textflag NOSPLIT
2406 int32
2407 runtime·gcount(void)
2408 {
2409         P *p, **pp;
2410         int32 n;
2411
2412         n = runtime·allglen - runtime·sched.ngfree;
2413         for(pp=runtime·allp; p=*pp; pp++)
2414                 n -= p->gfreecnt;
2415         // All these variables can be changed concurrently, so the result can be inconsistent.
2416         // But at least the current goroutine is running.
2417         if(n < 1)
2418                 n = 1;
2419         return n;
2420 }
2421
2422 int32
2423 runtime·mcount(void)
2424 {
2425         return runtime·sched.mcount;
2426 }
2427
2428 static struct ProfState {
2429         uint32 lock;
2430         int32 hz;
2431 } prof;
2432
2433 static void System(void) { System(); }
2434 static void ExternalCode(void) { ExternalCode(); }
2435 static void GC(void) { GC(); }
2436
2437 extern void runtime·cpuproftick(uintptr*, int32);
2438 extern byte runtime·etext[];
2439
2440 // Called if we receive a SIGPROF signal.
2441 void
2442 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
2443 {
2444         int32 n;
2445         bool traceback;
2446         // Do not use global m in this function, use mp instead.
2447         // On windows one m is sending reports about all the g's, so m means a wrong thing.
2448         byte m;
2449         uintptr stk[100];
2450
2451         m = 0;
2452         USED(m);
2453
2454         if(prof.hz == 0)
2455                 return;
2456
2457         // Profiling runs concurrently with GC, so it must not allocate.
2458         mp->mallocing++;
2459
2460         // Define that a "user g" is a user-created goroutine, and a "system g"
2461         // is one that is m->g0 or m->gsignal. We've only made sure that we
2462         // can unwind user g's, so exclude the system g's.
2463         //
2464         // It is not quite as easy as testing gp == m->curg (the current user g)
2465         // because we might be interrupted for profiling halfway through a
2466         // goroutine switch. The switch involves updating three (or four) values:
2467         // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
2468         // because once it gets updated the new g is running.
2469         //
2470         // When switching from a user g to a system g, LR is not considered live,
2471         // so the update only affects g, SP, and PC. Since PC must be last, there
2472         // the possible partial transitions in ordinary execution are (1) g alone is updated,
2473         // (2) both g and SP are updated, and (3) SP alone is updated.
2474         // If g is updated, we'll see a system g and not look closer.
2475         // If SP alone is updated, we can detect the partial transition by checking
2476         // whether the SP is within g's stack bounds. (We could also require that SP
2477         // be changed only after g, but the stack bounds check is needed by other
2478         // cases, so there is no need to impose an additional requirement.)
2479         //
2480         // There is one exceptional transition to a system g, not in ordinary execution.
2481         // When a signal arrives, the operating system starts the signal handler running
2482         // with an updated PC and SP. The g is updated last, at the beginning of the
2483         // handler. There are two reasons this is okay. First, until g is updated the
2484         // g and SP do not match, so the stack bounds check detects the partial transition.
2485         // Second, signal handlers currently run with signals disabled, so a profiling
2486         // signal cannot arrive during the handler.
2487         //
2488         // When switching from a system g to a user g, there are three possibilities.
2489         //
2490         // First, it may be that the g switch has no PC update, because the SP
2491         // either corresponds to a user g throughout (as in runtime.asmcgocall)
2492         // or because it has been arranged to look like a user g frame
2493         // (as in runtime.cgocallback_gofunc). In this case, since the entire
2494         // transition is a g+SP update, a partial transition updating just one of 
2495         // those will be detected by the stack bounds check.
2496         //
2497         // Second, when returning from a signal handler, the PC and SP updates
2498         // are performed by the operating system in an atomic update, so the g
2499         // update must be done before them. The stack bounds check detects
2500         // the partial transition here, and (again) signal handlers run with signals
2501         // disabled, so a profiling signal cannot arrive then anyway.
2502         //
2503         // Third, the common case: it may be that the switch updates g, SP, and PC
2504         // separately, as in runtime.gogo.
2505         //
2506         // Because runtime.gogo is the only instance, we check whether the PC lies
2507         // within that function, and if so, not ask for a traceback. This approach
2508         // requires knowing the size of the runtime.gogo function, which we
2509         // record in arch_*.h and check in runtime_test.go.
2510         //
2511         // There is another apparently viable approach, recorded here in case
2512         // the "PC within runtime.gogo" check turns out not to be usable.
2513         // It would be possible to delay the update of either g or SP until immediately
2514         // before the PC update instruction. Then, because of the stack bounds check,
2515         // the only problematic interrupt point is just before that PC update instruction,
2516         // and the sigprof handler can detect that instruction and simulate stepping past
2517         // it in order to reach a consistent state. On ARM, the update of g must be made
2518         // in two places (in R10 and also in a TLS slot), so the delayed update would
2519         // need to be the SP update. The sigprof handler must read the instruction at
2520         // the current PC and if it was the known instruction (for example, JMP BX or 
2521         // MOV R2, PC), use that other register in place of the PC value.
2522         // The biggest drawback to this solution is that it requires that we can tell
2523         // whether it's safe to read from the memory pointed at by PC.
2524         // In a correct program, we can test PC == nil and otherwise read,
2525         // but if a profiling signal happens at the instant that a program executes
2526         // a bad jump (before the program manages to handle the resulting fault)
2527         // the profiling handler could fault trying to read nonexistent memory.
2528         //
2529         // To recap, there are no constraints on the assembly being used for the
2530         // transition. We simply require that g and SP match and that the PC is not
2531         // in runtime.gogo.
2532         traceback = true;
2533         if(gp == nil || gp != mp->curg ||
2534            (uintptr)sp < gp->stack.lo || gp->stack.hi < (uintptr)sp ||
2535            ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
2536                 traceback = false;
2537
2538         n = 0;
2539         if(traceback)
2540                 n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, stk, nelem(stk), nil, nil, TraceTrap);
2541         if(!traceback || n <= 0) {
2542                 // Normal traceback is impossible or has failed.
2543                 // See if it falls into several common cases.
2544                 n = 0;
2545                 if(mp->ncgo > 0 && mp->curg != nil &&
2546                         mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
2547                         // Cgo, we can't unwind and symbolize arbitrary C code,
2548                         // so instead collect Go stack that leads to the cgo call.
2549                         // This is especially important on windows, since all syscalls are cgo calls.
2550                         n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, stk, nelem(stk), nil, nil, 0);
2551                 }
2552 #ifdef GOOS_windows
2553                 if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
2554                         // Libcall, i.e. runtime syscall on windows.
2555                         // Collect Go stack that leads to the call.
2556                         n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, stk, nelem(stk), nil, nil, 0);
2557                 }
2558 #endif
2559                 if(n == 0) {
2560                         // If all of the above has failed, account it against abstract "System" or "GC".
2561                         n = 2;
2562                         // "ExternalCode" is better than "etext".
2563                         if((uintptr)pc > (uintptr)runtime·etext)
2564                                 pc = (byte*)ExternalCode + PCQuantum;
2565                         stk[0] = (uintptr)pc;
2566                         if(mp->gcing || mp->helpgc)
2567                                 stk[1] = (uintptr)GC + PCQuantum;
2568                         else
2569                                 stk[1] = (uintptr)System + PCQuantum;
2570                 }
2571         }
2572
2573         if(prof.hz != 0) {
2574                 // Simple cas-lock to coordinate with setcpuprofilerate.
2575                 while(!runtime·cas(&prof.lock, 0, 1))
2576                         runtime·osyield();
2577                 if(prof.hz != 0)
2578                         runtime·cpuproftick(stk, n);
2579                 runtime·atomicstore(&prof.lock, 0);
2580         }
2581         mp->mallocing--;
2582 }
2583
2584 // Arrange to call fn with a traceback hz times a second.
2585 void
2586 runtime·setcpuprofilerate_m(void)
2587 {
2588         int32 hz;
2589         
2590         hz = g->m->scalararg[0];
2591         g->m->scalararg[0] = 0;
2592
2593         // Force sane arguments.
2594         if(hz < 0)
2595                 hz = 0;
2596
2597         // Disable preemption, otherwise we can be rescheduled to another thread
2598         // that has profiling enabled.
2599         g->m->locks++;
2600
2601         // Stop profiler on this thread so that it is safe to lock prof.
2602         // if a profiling signal came in while we had prof locked,
2603         // it would deadlock.
2604         runtime·resetcpuprofiler(0);
2605
2606         while(!runtime·cas(&prof.lock, 0, 1))
2607                 runtime·osyield();
2608         prof.hz = hz;
2609         runtime·atomicstore(&prof.lock, 0);
2610
2611         runtime·lock(&runtime·sched.lock);
2612         runtime·sched.profilehz = hz;
2613         runtime·unlock(&runtime·sched.lock);
2614
2615         if(hz != 0)
2616                 runtime·resetcpuprofiler(hz);
2617
2618         g->m->locks--;
2619 }
2620
2621 P *runtime·newP(void);
2622
2623 // Change number of processors.  The world is stopped, sched is locked.
2624 // gcworkbufs are not being modified by either the GC or 
2625 // the write barrier code.
2626 static void
2627 procresize(int32 new)
2628 {
2629         int32 i, old;
2630         bool empty;
2631         G *gp;
2632         P *p;
2633
2634         old = runtime·gomaxprocs;
2635         if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
2636                 runtime·throw("procresize: invalid arg");
2637         // initialize new P's
2638         for(i = 0; i < new; i++) {
2639                 p = runtime·allp[i];
2640                 if(p == nil) {
2641                         p = runtime·newP();
2642                         p->id = i;
2643                         p->status = Pgcstop;
2644                         runtime·atomicstorep(&runtime·allp[i], p);
2645                 }
2646                 if(p->mcache == nil) {
2647                         if(old==0 && i==0)
2648                                 p->mcache = g->m->mcache;  // bootstrap
2649                         else
2650                                 p->mcache = runtime·allocmcache();
2651                 }
2652         }
2653
2654         // redistribute runnable G's evenly
2655         // collect all runnable goroutines in global queue preserving FIFO order
2656         // FIFO order is required to ensure fairness even during frequent GCs
2657         // see http://golang.org/issue/7126
2658         empty = false;
2659         while(!empty) {
2660                 empty = true;
2661                 for(i = 0; i < old; i++) {
2662                         p = runtime·allp[i];
2663                         if(p->runqhead == p->runqtail)
2664                                 continue;
2665                         empty = false;
2666                         // pop from tail of local queue
2667                         p->runqtail--;
2668                         gp = p->runq[p->runqtail%nelem(p->runq)];
2669                         // push onto head of global queue
2670                         gp->schedlink = runtime·sched.runqhead;
2671                         runtime·sched.runqhead = gp;
2672                         if(runtime·sched.runqtail == nil)
2673                                 runtime·sched.runqtail = gp;
2674                         runtime·sched.runqsize++;
2675                 }
2676         }
2677         // fill local queues with at most nelem(p->runq)/2 goroutines
2678         // start at 1 because current M already executes some G and will acquire allp[0] below,
2679         // so if we have a spare G we want to put it into allp[1].
2680         for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
2681                 gp = runtime·sched.runqhead;
2682                 runtime·sched.runqhead = gp->schedlink;
2683                 if(runtime·sched.runqhead == nil)
2684                         runtime·sched.runqtail = nil;
2685                 runtime·sched.runqsize--;
2686                 runqput(runtime·allp[i%new], gp);
2687         }
2688
2689         // free unused P's
2690         for(i = new; i < old; i++) {
2691                 p = runtime·allp[i];
2692                 runtime·freemcache(p->mcache);
2693                 p->mcache = nil;
2694                 gfpurge(p);
2695                 p->status = Pdead;
2696                 // can't free P itself because it can be referenced by an M in syscall
2697         }
2698
2699         if(g->m->p)
2700                 g->m->p->m = nil;
2701         g->m->p = nil;
2702         g->m->mcache = nil;
2703         p = runtime·allp[0];
2704         p->m = nil;
2705         p->status = Pidle;
2706         acquirep(p);
2707         for(i = new-1; i > 0; i--) {
2708                 p = runtime·allp[i];
2709                 p->status = Pidle;
2710                 pidleput(p);
2711         }
2712         runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
2713 }
2714
2715 // Associate p and the current m.
2716 static void
2717 acquirep(P *p)
2718 {
2719         if(g->m->p || g->m->mcache)
2720                 runtime·throw("acquirep: already in go");
2721         if(p->m || p->status != Pidle) {
2722                 runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
2723                 runtime·throw("acquirep: invalid p state");
2724         }
2725         g->m->mcache = p->mcache;
2726         g->m->p = p;
2727         p->m = g->m;
2728         p->status = Prunning;
2729 }
2730
2731 // Disassociate p and the current m.
2732 static P*
2733 releasep(void)
2734 {
2735         P *p;
2736
2737         if(g->m->p == nil || g->m->mcache == nil)
2738                 runtime·throw("releasep: invalid arg");
2739         p = g->m->p;
2740         if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
2741                 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
2742                         g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status);
2743                 runtime·throw("releasep: invalid p state");
2744         }
2745         g->m->p = nil;
2746         g->m->mcache = nil;
2747         p->m = nil;
2748         p->status = Pidle;
2749         return p;
2750 }
2751
2752 static void
2753 incidlelocked(int32 v)
2754 {
2755         runtime·lock(&runtime·sched.lock);
2756         runtime·sched.nmidlelocked += v;
2757         if(v > 0)
2758                 checkdead();
2759         runtime·unlock(&runtime·sched.lock);
2760 }
2761
2762 // Check for deadlock situation.
2763 // The check is based on number of running M's, if 0 -> deadlock.
2764 static void
2765 checkdead(void)
2766 {
2767         G *gp;
2768         P *p;
2769         M *mp;
2770         int32 run, grunning, s;
2771         uintptr i;
2772
2773         // -1 for sysmon
2774         run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
2775         if(run > 0)
2776                 return;
2777         // If we are dying because of a signal caught on an already idle thread,
2778         // freezetheworld will cause all running threads to block.
2779         // And runtime will essentially enter into deadlock state,
2780         // except that there is a thread that will call runtime·exit soon.
2781         if(runtime·panicking > 0)
2782                 return;
2783         if(run < 0) {
2784                 runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
2785                         runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
2786                 runtime·throw("checkdead: inconsistent counts");
2787         }
2788         grunning = 0;
2789         runtime·lock(&runtime·allglock);
2790         for(i = 0; i < runtime·allglen; i++) {
2791                 gp = runtime·allg[i];
2792                 if(gp->issystem)
2793                         continue;
2794                 s = runtime·readgstatus(gp);
2795                 switch(s&~Gscan) {
2796                 case Gwaiting:
2797                         grunning++;
2798                         break;
2799                 case Grunnable:
2800                 case Grunning:
2801                 case Gsyscall:
2802                         runtime·unlock(&runtime·allglock);
2803                         runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
2804                         runtime·throw("checkdead: runnable g");
2805                         break;
2806                 }
2807         }
2808         runtime·unlock(&runtime·allglock);
2809         if(grunning == 0)  // possible if main goroutine calls runtime·Goexit()
2810                 runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
2811
2812         // Maybe jump time forward for playground.
2813         if((gp = runtime·timejump()) != nil) {
2814                 runtime·casgstatus(gp, Gwaiting, Grunnable);
2815                 globrunqput(gp);
2816                 p = pidleget();
2817                 if(p == nil)
2818                         runtime·throw("checkdead: no p for timer");
2819                 mp = mget();
2820                 if(mp == nil)
2821                         newm(nil, p);
2822                 else {
2823                         mp->nextp = p;
2824                         runtime·notewakeup(&mp->park);
2825                 }
2826                 return;
2827         }
2828
2829         g->m->throwing = -1;  // do not dump full stacks
2830         runtime·throw("all goroutines are asleep - deadlock!");
2831 }
2832
2833 static void
2834 sysmon(void)
2835 {
2836         uint32 idle, delay, nscavenge;
2837         int64 now, unixnow, lastpoll, lasttrace, lastgc;
2838         int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
2839         G *gp;
2840
2841         // If we go two minutes without a garbage collection, force one to run.
2842         forcegcperiod = 2*60*1e9;
2843         // If a heap span goes unused for 5 minutes after a garbage collection,
2844         // we hand it back to the operating system.
2845         scavengelimit = 5*60*1e9;
2846         if(runtime·debug.scavenge > 0) {
2847                 // Scavenge-a-lot for testing.
2848                 forcegcperiod = 10*1e6;
2849                 scavengelimit = 20*1e6;
2850         }
2851         lastscavenge = runtime·nanotime();
2852         nscavenge = 0;
2853         // Make wake-up period small enough for the sampling to be correct.
2854         maxsleep = forcegcperiod/2;
2855         if(scavengelimit < forcegcperiod)
2856                 maxsleep = scavengelimit/2;
2857
2858         lasttrace = 0;
2859         idle = 0;  // how many cycles in succession we had not wokeup somebody
2860         delay = 0;
2861         for(;;) {
2862                 if(idle == 0)  // start with 20us sleep...
2863                         delay = 20;
2864                 else if(idle > 50)  // start doubling the sleep after 1ms...
2865                         delay *= 2;
2866                 if(delay > 10*1000)  // up to 10ms
2867                         delay = 10*1000;
2868                 runtime·usleep(delay);
2869                 if(runtime·debug.schedtrace <= 0 &&
2870                         (runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) {  // TODO: fast atomic
2871                         runtime·lock(&runtime·sched.lock);
2872                         if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
2873                                 runtime·atomicstore(&runtime·sched.sysmonwait, 1);
2874                                 runtime·unlock(&runtime·sched.lock);
2875                                 runtime·notetsleep(&runtime·sched.sysmonnote, maxsleep);
2876                                 runtime·lock(&runtime·sched.lock);
2877                                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
2878                                 runtime·noteclear(&runtime·sched.sysmonnote);
2879                                 idle = 0;
2880                                 delay = 20;
2881                         }
2882                         runtime·unlock(&runtime·sched.lock);
2883                 }
2884                 // poll network if not polled for more than 10ms
2885                 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
2886                 now = runtime·nanotime();
2887                 unixnow = runtime·unixnanotime();
2888                 if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
2889                         runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
2890                         gp = runtime·netpoll(false);  // non-blocking
2891                         if(gp) {
2892                                 // Need to decrement number of idle locked M's
2893                                 // (pretending that one more is running) before injectglist.
2894                                 // Otherwise it can lead to the following situation:
2895                                 // injectglist grabs all P's but before it starts M's to run the P's,
2896                                 // another M returns from syscall, finishes running its G,
2897                                 // observes that there is no work to do and no other running M's
2898                                 // and reports deadlock.
2899                                 incidlelocked(-1);
2900                                 injectglist(gp);
2901                                 incidlelocked(1);
2902                         }
2903                 }
2904                 // retake P's blocked in syscalls
2905                 // and preempt long running G's
2906                 if(retake(now))
2907                         idle = 0;
2908                 else
2909                         idle++;
2910
2911                 // check if we need to force a GC
2912                 lastgc = runtime·atomicload64(&mstats.last_gc);
2913                 if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
2914                         runtime·lock(&runtime·forcegc.lock);
2915                         runtime·forcegc.idle = 0;
2916                         runtime·forcegc.g->schedlink = nil;
2917                         injectglist(runtime·forcegc.g);
2918                         runtime·unlock(&runtime·forcegc.lock);
2919                 }
2920
2921                 // scavenge heap once in a while
2922                 if(lastscavenge + scavengelimit/2 < now) {
2923                         runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
2924                         lastscavenge = now;
2925                         nscavenge++;
2926                 }
2927
2928                 if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
2929                         lasttrace = now;
2930                         runtime·schedtrace(runtime·debug.scheddetail);
2931                 }
2932         }
2933 }
2934
2935 typedef struct Pdesc Pdesc;
2936 struct Pdesc
2937 {
2938         uint32  schedtick;
2939         int64   schedwhen;
2940         uint32  syscalltick;
2941         int64   syscallwhen;
2942 };
2943 #pragma dataflag NOPTR
2944 static Pdesc pdesc[MaxGomaxprocs];
2945
2946 static uint32
2947 retake(int64 now)
2948 {
2949         uint32 i, s, n;
2950         int64 t;
2951         P *p;
2952         Pdesc *pd;
2953
2954         n = 0;
2955         for(i = 0; i < runtime·gomaxprocs; i++) {
2956                 p = runtime·allp[i];
2957                 if(p==nil)
2958                         continue;
2959                 pd = &pdesc[i];
2960                 s = p->status;
2961                 if(s == Psyscall) {
2962                         // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
2963                         t = p->syscalltick;
2964                         if(pd->syscalltick != t) {
2965                                 pd->syscalltick = t;
2966                                 pd->syscallwhen = now;
2967                                 continue;
2968                         }
2969                         // On the one hand we don't want to retake Ps if there is no other work to do,
2970                         // but on the other hand we want to retake them eventually
2971                         // because they can prevent the sysmon thread from deep sleep.
2972                         if(p->runqhead == p->runqtail &&
2973                                 runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
2974                                 pd->syscallwhen + 10*1000*1000 > now)
2975                                 continue;
2976                         // Need to decrement number of idle locked M's
2977                         // (pretending that one more is running) before the CAS.
2978                         // Otherwise the M from which we retake can exit the syscall,
2979                         // increment nmidle and report deadlock.
2980                         incidlelocked(-1);
2981                         if(runtime·cas(&p->status, s, Pidle)) {
2982                                 n++;
2983                                 handoffp(p);
2984                         }
2985                         incidlelocked(1);
2986                 } else if(s == Prunning) {
2987                         // Preempt G if it's running for more than 10ms.
2988                         t = p->schedtick;
2989                         if(pd->schedtick != t) {
2990                                 pd->schedtick = t;
2991                                 pd->schedwhen = now;
2992                                 continue;
2993                         }
2994                         if(pd->schedwhen + 10*1000*1000 > now)
2995                                 continue;
2996                         preemptone(p);
2997                 }
2998         }
2999         return n;
3000 }
3001
3002 // Tell all goroutines that they have been preempted and they should stop.
3003 // This function is purely best-effort.  It can fail to inform a goroutine if a
3004 // processor just started running it.
3005 // No locks need to be held.
3006 // Returns true if preemption request was issued to at least one goroutine.
3007 static bool
3008 preemptall(void)
3009 {
3010         P *p;
3011         int32 i;
3012         bool res;
3013
3014         res = false;
3015         for(i = 0; i < runtime·gomaxprocs; i++) {
3016                 p = runtime·allp[i];
3017                 if(p == nil || p->status != Prunning)
3018                         continue;
3019                 res |= preemptone(p);
3020         }
3021         return res;
3022 }
3023
3024 // Tell the goroutine running on processor P to stop.
3025 // This function is purely best-effort.  It can incorrectly fail to inform the
3026 // goroutine.  It can send inform the wrong goroutine.  Even if it informs the
3027 // correct goroutine, that goroutine might ignore the request if it is
3028 // simultaneously executing runtime·newstack.
3029 // No lock needs to be held.
3030 // Returns true if preemption request was issued.
3031 // The actual preemption will happen at some point in the future
3032 // and will be indicated by the gp->status no longer being
3033 // Grunning
3034 static bool
3035 preemptone(P *p)
3036 {
3037         M *mp;
3038         G *gp;
3039
3040         mp = p->m;
3041         if(mp == nil || mp == g->m)
3042                 return false;
3043         gp = mp->curg;
3044         if(gp == nil || gp == mp->g0)
3045                 return false;
3046         gp->preempt = true;
3047         // Every call in a go routine checks for stack overflow by
3048         // comparing the current stack pointer to gp->stackguard0.
3049         // Setting gp->stackguard0 to StackPreempt folds
3050         // preemption into the normal stack overflow check.
3051         gp->stackguard0 = StackPreempt;
3052         return true;
3053 }
3054
3055 void
3056 runtime·schedtrace(bool detailed)
3057 {
3058         static int64 starttime;
3059         int64 now;
3060         int64 id1, id2, id3;
3061         int32 i, t, h;
3062         uintptr gi;
3063         int8 *fmt;
3064         M *mp, *lockedm;
3065         G *gp, *lockedg;
3066         P *p;
3067
3068         now = runtime·nanotime();
3069         if(starttime == 0)
3070                 starttime = now;
3071
3072         runtime·lock(&runtime·sched.lock);
3073         runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinningthreads=%d idlethreads=%d runqueue=%d",
3074                 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
3075                 runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.runqsize);
3076         if(detailed) {
3077                 runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmonwait=%d\n",
3078                         runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
3079                         runtime·sched.stopwait, runtime·sched.sysmonwait);
3080         }
3081         // We must be careful while reading data from P's, M's and G's.
3082         // Even if we hold schedlock, most data can be changed concurrently.
3083         // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
3084         for(i = 0; i < runtime·gomaxprocs; i++) {
3085                 p = runtime·allp[i];
3086                 if(p == nil)
3087                         continue;
3088                 mp = p->m;
3089                 h = runtime·atomicload(&p->runqhead);
3090                 t = runtime·atomicload(&p->runqtail);
3091                 if(detailed)
3092                         runtime·printf("  P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
3093                                 i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
3094                 else {
3095                         // In non-detailed mode format lengths of per-P run queues as:
3096                         // [len1 len2 len3 len4]
3097                         fmt = " %d";
3098                         if(runtime·gomaxprocs == 1)
3099                                 fmt = " [%d]\n";
3100                         else if(i == 0)
3101                                 fmt = " [%d";
3102                         else if(i == runtime·gomaxprocs-1)
3103                                 fmt = " %d]\n";
3104                         runtime·printf(fmt, t-h);
3105                 }
3106         }
3107         if(!detailed) {
3108                 runtime·unlock(&runtime·sched.lock);
3109                 return;
3110         }
3111         for(mp = runtime·allm; mp; mp = mp->alllink) {
3112                 p = mp->p;
3113                 gp = mp->curg;
3114                 lockedg = mp->lockedg;
3115                 id1 = -1;
3116                 if(p)
3117                         id1 = p->id;
3118                 id2 = -1;
3119                 if(gp)
3120                         id2 = gp->goid;
3121                 id3 = -1;
3122                 if(lockedg)
3123                         id3 = lockedg->goid;
3124                 runtime·printf("  M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
3125                         " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
3126                         mp->id, id1, id2,
3127                         mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
3128                         mp->spinning, g->m->blocked, id3);
3129         }
3130         runtime·lock(&runtime·allglock);
3131         for(gi = 0; gi < runtime·allglen; gi++) {
3132                 gp = runtime·allg[gi];
3133                 mp = gp->m;
3134                 lockedm = gp->lockedm;
3135                 runtime·printf("  G%D: status=%d(%S) m=%d lockedm=%d\n",
3136                         gp->goid, runtime·readgstatus(gp), gp->waitreason, mp ? mp->id : -1,
3137                         lockedm ? lockedm->id : -1);
3138         }
3139         runtime·unlock(&runtime·allglock);
3140         runtime·unlock(&runtime·sched.lock);
3141 }
3142
3143 // Put mp on midle list.
3144 // Sched must be locked.
3145 static void
3146 mput(M *mp)
3147 {
3148         mp->schedlink = runtime·sched.midle;
3149         runtime·sched.midle = mp;
3150         runtime·sched.nmidle++;
3151         checkdead();
3152 }
3153
3154 // Try to get an m from midle list.
3155 // Sched must be locked.
3156 static M*
3157 mget(void)
3158 {
3159         M *mp;
3160
3161         if((mp = runtime·sched.midle) != nil){
3162                 runtime·sched.midle = mp->schedlink;
3163                 runtime·sched.nmidle--;
3164         }
3165         return mp;
3166 }
3167
3168 // Put gp on the global runnable queue.
3169 // Sched must be locked.
3170 static void
3171 globrunqput(G *gp)
3172 {
3173         gp->schedlink = nil;
3174         if(runtime·sched.runqtail)
3175                 runtime·sched.runqtail->schedlink = gp;
3176         else
3177                 runtime·sched.runqhead = gp;
3178         runtime·sched.runqtail = gp;
3179         runtime·sched.runqsize++;
3180 }
3181
3182 // Put a batch of runnable goroutines on the global runnable queue.
3183 // Sched must be locked.
3184 static void
3185 globrunqputbatch(G *ghead, G *gtail, int32 n)
3186 {
3187         gtail->schedlink = nil;
3188         if(runtime·sched.runqtail)
3189                 runtime·sched.runqtail->schedlink = ghead;
3190         else
3191                 runtime·sched.runqhead = ghead;
3192         runtime·sched.runqtail = gtail;
3193         runtime·sched.runqsize += n;
3194 }
3195
3196 // Try get a batch of G's from the global runnable queue.
3197 // Sched must be locked.
3198 static G*
3199 globrunqget(P *p, int32 max)
3200 {
3201         G *gp, *gp1;
3202         int32 n;
3203
3204         if(runtime·sched.runqsize == 0)
3205                 return nil;
3206         n = runtime·sched.runqsize/runtime·gomaxprocs+1;
3207         if(n > runtime·sched.runqsize)
3208                 n = runtime·sched.runqsize;
3209         if(max > 0 && n > max)
3210                 n = max;
3211         if(n > nelem(p->runq)/2)
3212                 n = nelem(p->runq)/2;
3213         runtime·sched.runqsize -= n;
3214         if(runtime·sched.runqsize == 0)
3215                 runtime·sched.runqtail = nil;
3216         gp = runtime·sched.runqhead;
3217         runtime·sched.runqhead = gp->schedlink;
3218         n--;
3219         while(n--) {
3220                 gp1 = runtime·sched.runqhead;
3221                 runtime·sched.runqhead = gp1->schedlink;
3222                 runqput(p, gp1);
3223         }
3224         return gp;
3225 }
3226
3227 // Put p to on pidle list.
3228 // Sched must be locked.
3229 static void
3230 pidleput(P *p)
3231 {
3232         p->link = runtime·sched.pidle;
3233         runtime·sched.pidle = p;
3234         runtime·xadd(&runtime·sched.npidle, 1);  // TODO: fast atomic
3235 }
3236
3237 // Try get a p from pidle list.
3238 // Sched must be locked.
3239 static P*
3240 pidleget(void)
3241 {
3242         P *p;
3243
3244         p = runtime·sched.pidle;
3245         if(p) {
3246                 runtime·sched.pidle = p->link;
3247                 runtime·xadd(&runtime·sched.npidle, -1);  // TODO: fast atomic
3248         }
3249         return p;
3250 }
3251
3252 // Try to put g on local runnable queue.
3253 // If it's full, put onto global queue.
3254 // Executed only by the owner P.
3255 static void
3256 runqput(P *p, G *gp)
3257 {
3258         uint32 h, t;
3259
3260 retry:
3261         h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with consumers
3262         t = p->runqtail;
3263         if(t - h < nelem(p->runq)) {
3264                 p->runq[t%nelem(p->runq)] = gp;
3265                 runtime·atomicstore(&p->runqtail, t+1);  // store-release, makes the item available for consumption
3266                 return;
3267         }
3268         if(runqputslow(p, gp, h, t))
3269                 return;
3270         // the queue is not full, now the put above must suceed
3271         goto retry;
3272 }
3273
3274 // Put g and a batch of work from local runnable queue on global queue.
3275 // Executed only by the owner P.
3276 static bool
3277 runqputslow(P *p, G *gp, uint32 h, uint32 t)
3278 {
3279         G *batch[nelem(p->runq)/2+1];
3280         uint32 n, i;
3281
3282         // First, grab a batch from local queue.
3283         n = t-h;
3284         n = n/2;
3285         if(n != nelem(p->runq)/2)
3286                 runtime·throw("runqputslow: queue is not full");
3287         for(i=0; i<n; i++)
3288                 batch[i] = p->runq[(h+i)%nelem(p->runq)];
3289         if(!runtime·cas(&p->runqhead, h, h+n))  // cas-release, commits consume
3290                 return false;
3291         batch[n] = gp;
3292         // Link the goroutines.
3293         for(i=0; i<n; i++)
3294                 batch[i]->schedlink = batch[i+1];
3295         // Now put the batch on global queue.
3296         runtime·lock(&runtime·sched.lock);
3297         globrunqputbatch(batch[0], batch[n], n+1);
3298         runtime·unlock(&runtime·sched.lock);
3299         return true;
3300 }
3301
3302 // Get g from local runnable queue.
3303 // Executed only by the owner P.
3304 static G*
3305 runqget(P *p)
3306 {
3307         G *gp;
3308         uint32 t, h;
3309
3310         for(;;) {
3311                 h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with other consumers
3312                 t = p->runqtail;
3313                 if(t == h)
3314                         return nil;
3315                 gp = p->runq[h%nelem(p->runq)];
3316                 if(runtime·cas(&p->runqhead, h, h+1))  // cas-release, commits consume
3317                         return gp;
3318         }
3319 }
3320
3321 // Grabs a batch of goroutines from local runnable queue.
3322 // batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
3323 // Can be executed by any P.
3324 static uint32
3325 runqgrab(P *p, G **batch)
3326 {
3327         uint32 t, h, n, i;
3328
3329         for(;;) {
3330                 h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with other consumers
3331                 t = runtime·atomicload(&p->runqtail);  // load-acquire, synchronize with the producer
3332                 n = t-h;
3333                 n = n - n/2;
3334                 if(n == 0)
3335                         break;
3336                 if(n > nelem(p->runq)/2)  // read inconsistent h and t
3337                         continue;
3338                 for(i=0; i<n; i++)
3339                         batch[i] = p->runq[(h+i)%nelem(p->runq)];
3340                 if(runtime·cas(&p->runqhead, h, h+n))  // cas-release, commits consume
3341                         break;
3342         }
3343         return n;
3344 }
3345
3346 // Steal half of elements from local runnable queue of p2
3347 // and put onto local runnable queue of p.
3348 // Returns one of the stolen elements (or nil if failed).
3349 static G*
3350 runqsteal(P *p, P *p2)
3351 {
3352         G *gp;
3353         G *batch[nelem(p->runq)/2];
3354         uint32 t, h, n, i;
3355
3356         n = runqgrab(p2, batch);
3357         if(n == 0)
3358                 return nil;
3359         n--;
3360         gp = batch[n];
3361         if(n == 0)
3362                 return gp;
3363         h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with consumers
3364         t = p->runqtail;
3365         if(t - h + n >= nelem(p->runq))
3366                 runtime·throw("runqsteal: runq overflow");
3367         for(i=0; i<n; i++, t++)
3368                 p->runq[t%nelem(p->runq)] = batch[i];
3369         runtime·atomicstore(&p->runqtail, t);  // store-release, makes the item available for consumption
3370         return gp;
3371 }
3372
3373 void
3374 runtime·testSchedLocalQueue(void)
3375 {
3376         P *p;
3377         G *gs;
3378         int32 i, j;
3379
3380         p = (P*)runtime·mallocgc(sizeof(*p), nil, FlagNoScan);
3381         gs = (G*)runtime·mallocgc(nelem(p->runq)*sizeof(*gs), nil, FlagNoScan);
3382
3383         for(i = 0; i < nelem(p->runq); i++) {
3384                 if(runqget(p) != nil)
3385                         runtime·throw("runq is not empty initially");
3386                 for(j = 0; j < i; j++)
3387                         runqput(p, &gs[i]);
3388                 for(j = 0; j < i; j++) {
3389                         if(runqget(p) != &gs[i]) {
3390                                 runtime·printf("bad element at iter %d/%d\n", i, j);
3391                                 runtime·throw("bad element");
3392                         }
3393                 }
3394                 if(runqget(p) != nil)
3395                         runtime·throw("runq is not empty afterwards");
3396         }
3397 }
3398
3399 void
3400 runtime·testSchedLocalQueueSteal(void)
3401 {
3402         P *p1, *p2;
3403         G *gs, *gp;
3404         int32 i, j, s;
3405
3406         p1 = (P*)runtime·mallocgc(sizeof(*p1), nil, FlagNoScan);
3407         p2 = (P*)runtime·mallocgc(sizeof(*p2), nil, FlagNoScan);
3408         gs = (G*)runtime·mallocgc(nelem(p1->runq)*sizeof(*gs), nil, FlagNoScan);
3409
3410         for(i = 0; i < nelem(p1->runq); i++) {
3411                 for(j = 0; j < i; j++) {
3412                         gs[j].sig = 0;
3413                         runqput(p1, &gs[j]);
3414                 }
3415                 gp = runqsteal(p2, p1);
3416                 s = 0;
3417                 if(gp) {
3418                         s++;
3419                         gp->sig++;
3420                 }
3421                 while(gp = runqget(p2)) {
3422                         s++;
3423                         gp->sig++;
3424                 }
3425                 while(gp = runqget(p1))
3426                         gp->sig++;
3427                 for(j = 0; j < i; j++) {
3428                         if(gs[j].sig != 1) {
3429                                 runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
3430                                 runtime·throw("bad element");
3431                         }
3432                 }
3433                 if(s != i/2 && s != i/2+1) {
3434                         runtime·printf("bad steal %d, want %d or %d, iter %d\n",
3435                                 s, i/2, i/2+1, i);
3436                         runtime·throw("bad steal");
3437                 }
3438         }
3439 }
3440
3441 void
3442 runtime·setmaxthreads_m(void)
3443 {
3444         int32 in;
3445         int32 out;
3446
3447         in = g->m->scalararg[0];
3448
3449         runtime·lock(&runtime·sched.lock);
3450         out = runtime·sched.maxmcount;
3451         runtime·sched.maxmcount = in;
3452         checkmcount();
3453         runtime·unlock(&runtime·sched.lock);
3454
3455         g->m->scalararg[0] = out;
3456 }
3457
3458 static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
3459
3460 static bool
3461 haveexperiment(int8 *name)
3462 {
3463         int32 i, j;
3464         
3465         for(i=0; i<sizeof(experiment); i++) {
3466                 if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
3467                         for(j=0; name[j]; j++)
3468                                 if(experiment[i+j] != name[j])
3469                                         goto nomatch;
3470                         if(experiment[i+j] != '\0' && experiment[i+j] != ',')
3471                                 goto nomatch;
3472                         return 1;
3473                 }
3474         nomatch:;
3475         }
3476         return 0;
3477 }
3478
3479 #pragma textflag NOSPLIT
3480 void
3481 sync·runtime_procPin(intptr p)
3482 {
3483         M *mp;
3484
3485         mp = g->m;
3486         // Disable preemption.
3487         mp->locks++;
3488         p = mp->p->id;
3489         FLUSH(&p);
3490 }
3491
3492 #pragma textflag NOSPLIT
3493 void
3494 sync·runtime_procUnpin()
3495 {
3496         g->m->locks--;
3497 }