]> Cypherpunks.ru repositories - gostls13.git/blob - src/runtime/proc.c
[dev.power64] all: merge default into dev.power64
[gostls13.git] / src / runtime / proc.c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "runtime.h"
6 #include "arch_GOARCH.h"
7 #include "zaexperiment.h"
8 #include "malloc.h"
9 #include "stack.h"
10 #include "race.h"
11 #include "type.h"
12 #include "mgc0.h"
13 #include "textflag.h"
14
15 // Goroutine scheduler
16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
17 //
18 // The main concepts are:
19 // G - goroutine.
20 // M - worker thread, or machine.
21 // P - processor, a resource that is required to execute Go code.
22 //     M must have an associated P to execute Go code, however it can be
23 //     blocked or in a syscall w/o an associated P.
24 //
25 // Design doc at http://golang.org/s/go11sched.
26
27 enum
28 {
29         // Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
30         // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
31         GoidCacheBatch = 16,
32 };
33
34 SchedT  runtime·sched;
35 int32   runtime·gomaxprocs;
36 uint32  runtime·needextram;
37 bool    runtime·iscgo;
38 M       runtime·m0;
39 G       runtime·g0;    // idle goroutine for m0
40 G*      runtime·lastg;
41 M*      runtime·allm;
42 M*      runtime·extram;
43 P*      runtime·allp[MaxGomaxprocs+1];
44 int8*   runtime·goos;
45 int32   runtime·ncpu;
46 int32   runtime·newprocs;
47
48 Mutex runtime·allglock;        // the following vars are protected by this lock or by stoptheworld
49 G**     runtime·allg;
50 Slice   runtime·allgs;
51 uintptr runtime·allglen;
52 ForceGCState    runtime·forcegc;
53
54 void runtime·mstart(void);
55 static void runqput(P*, G*);
56 static G* runqget(P*);
57 static bool runqputslow(P*, G*, uint32, uint32);
58 static G* runqsteal(P*, P*);
59 static void mput(M*);
60 static M* mget(void);
61 static void mcommoninit(M*);
62 static void schedule(void);
63 static void procresize(int32);
64 static void acquirep(P*);
65 static P* releasep(void);
66 static void newm(void(*)(void), P*);
67 static void stopm(void);
68 static void startm(P*, bool);
69 static void handoffp(P*);
70 static void wakep(void);
71 static void stoplockedm(void);
72 static void startlockedm(G*);
73 static void sysmon(void);
74 static uint32 retake(int64);
75 static void incidlelocked(int32);
76 static void checkdead(void);
77 static void exitsyscall0(G*);
78 void runtime·park_m(G*);
79 static void goexit0(G*);
80 static void gfput(P*, G*);
81 static G* gfget(P*);
82 static void gfpurge(P*);
83 static void globrunqput(G*);
84 static void globrunqputbatch(G*, G*, int32);
85 static G* globrunqget(P*, int32);
86 static P* pidleget(void);
87 static void pidleput(P*);
88 static void injectglist(G*);
89 static bool preemptall(void);
90 static bool preemptone(P*);
91 static bool exitsyscallfast(void);
92 static bool haveexperiment(int8*);
93 void runtime·allgadd(G*);
94 static void dropg(void);
95
96 extern String runtime·buildVersion;
97
98 // For cgo-using programs with external linking,
99 // export "main" (defined in assembly) so that libc can handle basic
100 // C runtime startup and call the Go program as if it were
101 // the C main function.
102 #pragma cgo_export_static main
103
104 // Filled in by dynamic linker when Cgo is available.
105 void (*_cgo_init)(void);
106 void (*_cgo_malloc)(void);
107 void (*_cgo_free)(void);
108
109 // Copy for Go code.
110 void* runtime·cgoMalloc;
111 void* runtime·cgoFree;
112
113 // The bootstrap sequence is:
114 //
115 //      call osinit
116 //      call schedinit
117 //      make & queue new G
118 //      call runtime·mstart
119 //
120 // The new G calls runtime·main.
121 void
122 runtime·schedinit(void)
123 {
124         int32 n, procs;
125         byte *p;
126
127         // raceinit must be the first call to race detector.
128         // In particular, it must be done before mallocinit below calls racemapshadow.
129         if(raceenabled)
130                 g->racectx = runtime·raceinit();
131
132         runtime·sched.maxmcount = 10000;
133
134         runtime·tracebackinit();
135         runtime·symtabinit();
136         runtime·stackinit();
137         runtime·mallocinit();
138         mcommoninit(g->m);
139         
140         runtime·goargs();
141         runtime·goenvs();
142         runtime·parsedebugvars();
143         runtime·gcinit();
144
145         runtime·sched.lastpoll = runtime·nanotime();
146         procs = 1;
147         p = runtime·getenv("GOMAXPROCS");
148         if(p != nil && (n = runtime·atoi(p)) > 0) {
149                 if(n > MaxGomaxprocs)
150                         n = MaxGomaxprocs;
151                 procs = n;
152         }
153         procresize(procs);
154
155         if(runtime·buildVersion.str == nil) {
156                 // Condition should never trigger.  This code just serves
157                 // to ensure runtime·buildVersion is kept in the resulting binary.
158                 runtime·buildVersion.str = (uint8*)"unknown";
159                 runtime·buildVersion.len = 7;
160         }
161
162         runtime·cgoMalloc = _cgo_malloc;
163         runtime·cgoFree = _cgo_free;
164 }
165
166 void
167 runtime·newsysmon(void)
168 {
169         newm(sysmon, nil);
170 }
171
172 static void
173 dumpgstatus(G* gp)
174 {
175         runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
176         runtime·printf("runtime:  g:  g=%p, goid=%D,  g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
177 }
178
179 static void
180 checkmcount(void)
181 {
182         // sched lock is held
183         if(runtime·sched.mcount > runtime·sched.maxmcount){
184                 runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
185                 runtime·throw("thread exhaustion");
186         }
187 }
188
189 static void
190 mcommoninit(M *mp)
191 {
192         // g0 stack won't make sense for user (and is not necessary unwindable).
193         if(g != g->m->g0)
194                 runtime·callers(1, mp->createstack, nelem(mp->createstack));
195
196         mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
197
198         runtime·lock(&runtime·sched.lock);
199         mp->id = runtime·sched.mcount++;
200         checkmcount();
201         runtime·mpreinit(mp);
202         if(mp->gsignal)
203                 mp->gsignal->stackguard1 = mp->gsignal->stack.lo + StackGuard;
204
205         // Add to runtime·allm so garbage collector doesn't free g->m
206         // when it is just in a register or thread-local storage.
207         mp->alllink = runtime·allm;
208         // runtime·NumCgoCall() iterates over allm w/o schedlock,
209         // so we need to publish it safely.
210         runtime·atomicstorep(&runtime·allm, mp);
211         runtime·unlock(&runtime·sched.lock);
212 }
213
214 // Mark gp ready to run.
215 void
216 runtime·ready(G *gp)
217 {
218         uint32 status;
219
220         status = runtime·readgstatus(gp);
221         // Mark runnable.
222         g->m->locks++;  // disable preemption because it can be holding p in a local var
223         if((status&~Gscan) != Gwaiting){
224                 dumpgstatus(gp);
225                 runtime·throw("bad g->status in ready");
226         }
227         // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
228         runtime·casgstatus(gp, Gwaiting, Grunnable);
229         runqput(g->m->p, gp);
230         if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0)  // TODO: fast atomic
231                 wakep();
232         g->m->locks--;
233         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
234                 g->stackguard0 = StackPreempt;
235 }
236
237 void
238 runtime·ready_m(void)
239 {
240         G *gp;
241
242         gp = g->m->ptrarg[0];
243         g->m->ptrarg[0] = nil;
244         runtime·ready(gp);
245 }
246
247 int32
248 runtime·gcprocs(void)
249 {
250         int32 n;
251
252         // Figure out how many CPUs to use during GC.
253         // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
254         runtime·lock(&runtime·sched.lock);
255         n = runtime·gomaxprocs;
256         if(n > runtime·ncpu)
257                 n = runtime·ncpu;
258         if(n > MaxGcproc)
259                 n = MaxGcproc;
260         if(n > runtime·sched.nmidle+1) // one M is currently running
261                 n = runtime·sched.nmidle+1;
262         runtime·unlock(&runtime·sched.lock);
263         return n;
264 }
265
266 static bool
267 needaddgcproc(void)
268 {
269         int32 n;
270
271         runtime·lock(&runtime·sched.lock);
272         n = runtime·gomaxprocs;
273         if(n > runtime·ncpu)
274                 n = runtime·ncpu;
275         if(n > MaxGcproc)
276                 n = MaxGcproc;
277         n -= runtime·sched.nmidle+1; // one M is currently running
278         runtime·unlock(&runtime·sched.lock);
279         return n > 0;
280 }
281
282 void
283 runtime·helpgc(int32 nproc)
284 {
285         M *mp;
286         int32 n, pos;
287
288         runtime·lock(&runtime·sched.lock);
289         pos = 0;
290         for(n = 1; n < nproc; n++) {  // one M is currently running
291                 if(runtime·allp[pos]->mcache == g->m->mcache)
292                         pos++;
293                 mp = mget();
294                 if(mp == nil)
295                         runtime·throw("runtime·gcprocs inconsistency");
296                 mp->helpgc = n;
297                 mp->mcache = runtime·allp[pos]->mcache;
298                 pos++;
299                 runtime·notewakeup(&mp->park);
300         }
301         runtime·unlock(&runtime·sched.lock);
302 }
303
304 // Similar to stoptheworld but best-effort and can be called several times.
305 // There is no reverse operation, used during crashing.
306 // This function must not lock any mutexes.
307 void
308 runtime·freezetheworld(void)
309 {
310         int32 i;
311
312         if(runtime·gomaxprocs == 1)
313                 return;
314         // stopwait and preemption requests can be lost
315         // due to races with concurrently executing threads,
316         // so try several times
317         for(i = 0; i < 5; i++) {
318                 // this should tell the scheduler to not start any new goroutines
319                 runtime·sched.stopwait = 0x7fffffff;
320                 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
321                 // this should stop running goroutines
322                 if(!preemptall())
323                         break;  // no running goroutines
324                 runtime·usleep(1000);
325         }
326         // to be sure
327         runtime·usleep(1000);
328         preemptall();
329         runtime·usleep(1000);
330 }
331
332 static bool
333 isscanstatus(uint32 status)
334 {
335         if(status == Gscan)
336                 runtime·throw("isscanstatus: Bad status Gscan");
337         return (status&Gscan) == Gscan;
338 }
339
340 // All reads and writes of g's status go through readgstatus, casgstatus
341 // castogscanstatus, casfromgscanstatus.
342 #pragma textflag NOSPLIT
343 uint32
344 runtime·readgstatus(G *gp)
345 {
346         return runtime·atomicload(&gp->atomicstatus);
347 }
348
349 // The Gscanstatuses are acting like locks and this releases them.
350 // If it proves to be a performance hit we should be able to make these
351 // simple atomic stores but for now we are going to throw if
352 // we see an inconsistent state.
353 void
354 runtime·casfromgscanstatus(G *gp, uint32 oldval, uint32 newval)
355 {
356         bool success = false;
357
358         // Check that transition is valid.
359         switch(oldval) {
360         case Gscanrunnable:
361         case Gscanwaiting:
362         case Gscanrunning:
363         case Gscansyscall:
364                 if(newval == (oldval&~Gscan))
365                         success = runtime·cas(&gp->atomicstatus, oldval, newval);
366                 break;
367         case Gscanenqueue:
368                 if(newval == Gwaiting)
369                         success = runtime·cas(&gp->atomicstatus, oldval, newval);
370                 break;
371         }       
372         if(!success){
373                 runtime·printf("runtime: casfromgscanstatus failed gp=%p, oldval=%d, newval=%d\n",  
374                         gp, oldval, newval);
375                 dumpgstatus(gp);
376                 runtime·throw("casfromgscanstatus: gp->status is not in scan state");
377         }
378 }
379
380 // This will return false if the gp is not in the expected status and the cas fails. 
381 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
382 bool
383 runtime·castogscanstatus(G *gp, uint32 oldval, uint32 newval)
384 {
385         switch(oldval) {
386         case Grunnable:
387         case Gwaiting:
388         case Gsyscall:
389                 if(newval == (oldval|Gscan))
390                         return runtime·cas(&gp->atomicstatus, oldval, newval);
391                 break;
392         case Grunning:
393                 if(newval == Gscanrunning || newval == Gscanenqueue)
394                         return runtime·cas(&gp->atomicstatus, oldval, newval);
395                 break;   
396         }
397
398         runtime·printf("runtime: castogscanstatus oldval=%d newval=%d\n", oldval, newval);
399         runtime·throw("castogscanstatus");
400         return false; // not reached
401 }
402
403 static void badcasgstatus(void);
404 static void helpcasgstatus(void);
405
406 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
407 // and casfromgscanstatus instead.
408 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 
409 // put it in the Gscan state is finished.
410 #pragma textflag NOSPLIT
411 void
412 runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
413 {
414         void (*fn)(void);
415
416         if((oldval&Gscan) || (newval&Gscan) || oldval == newval) {
417                 g->m->scalararg[0] = oldval;
418                 g->m->scalararg[1] = newval;
419                 fn = badcasgstatus;
420                 runtime·onM(&fn);
421         }
422
423         // loop if gp->atomicstatus is in a scan state giving
424         // GC time to finish and change the state to oldval.
425         while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
426                 // Help GC if needed. 
427                 if(gp->preemptscan && !gp->gcworkdone && (oldval == Grunning || oldval == Gsyscall)) {
428                         gp->preemptscan = false;
429                         g->m->ptrarg[0] = gp;
430                         fn = helpcasgstatus;
431                         runtime·onM(&fn);
432                 }
433         }       
434 }
435
436 static void
437 badcasgstatus(void)
438 {
439         uint32 oldval, newval;
440         
441         oldval = g->m->scalararg[0];
442         newval = g->m->scalararg[1];
443         g->m->scalararg[0] = 0;
444         g->m->scalararg[1] = 0;
445
446         runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, newval);
447         runtime·throw("casgstatus: bad incoming values");
448 }
449
450 static void
451 helpcasgstatus(void)
452 {
453         G *gp;
454         
455         gp = g->m->ptrarg[0];
456         g->m->ptrarg[0] = 0;
457         runtime·gcphasework(gp);
458 }
459
460 // stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
461 // or in the context of a moving collector the pointers can be flipped from pointing 
462 // to old object to pointing to new objects. 
463 // If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
464 // the caller calls restartg.
465 // If stopg returns false, the caller is not responsible for calling restartg. This can happen
466 // if another thread, either the gp itself or another GC thread is taking the responsibility 
467 // to do the GC work related to this thread.
468 bool
469 runtime·stopg(G *gp)
470 {
471         uint32 s;
472
473         for(;;) {
474                 if(gp->gcworkdone)
475                         return false;
476
477                 s = runtime·readgstatus(gp);
478                 switch(s) {
479                 default:
480                         dumpgstatus(gp);
481                         runtime·throw("stopg: gp->atomicstatus is not valid");
482
483                 case Gdead:
484                         return false;
485
486                 case Gcopystack:
487                         // Loop until a new stack is in place.
488                         break;
489
490                 case Grunnable:
491                 case Gsyscall:
492                 case Gwaiting:
493                         // Claim goroutine by setting scan bit.
494                         if(!runtime·castogscanstatus(gp, s, s|Gscan))
495                                 break;
496                         // In scan state, do work.
497                         runtime·gcphasework(gp);
498                         return true;
499
500                 case Gscanrunnable:
501                 case Gscanwaiting:
502                 case Gscansyscall:
503                         // Goroutine already claimed by another GC helper.
504                         return false;
505
506                 case Grunning:
507                         // Claim goroutine, so we aren't racing with a status
508                         // transition away from Grunning.
509                         if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
510                                 break;
511
512                         // Mark gp for preemption.
513                         if(!gp->gcworkdone) {
514                                 gp->preemptscan = true;
515                                 gp->preempt = true;
516                                 gp->stackguard0 = StackPreempt;
517                         }
518
519                         // Unclaim.
520                         runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
521                         return false;
522                 }
523         }
524         // Should not be here....
525 }
526
527 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
528 void 
529 runtime·restartg (G *gp)
530 {
531         uint32 s;
532
533         s = runtime·readgstatus(gp);
534         switch(s) {
535         default:
536                 dumpgstatus(gp); 
537                 runtime·throw("restartg: unexpected status");
538
539         case Gdead:
540                 break;
541
542         case Gscanrunnable:
543         case Gscanwaiting:
544         case Gscansyscall:
545                 runtime·casfromgscanstatus(gp, s, s&~Gscan);
546                 break;
547
548         case Gscanenqueue:
549                 // Scan is now completed.
550                 // Goroutine now needs to be made runnable.
551                 // We put it on the global run queue; ready blocks on the global scheduler lock.
552                 runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
553                 if(gp != g->m->curg)
554                         runtime·throw("processing Gscanenqueue on wrong m");
555                 dropg();
556                 runtime·ready(gp);
557                 break;
558         }
559 }
560
561 static void
562 stopscanstart(G* gp)
563 {
564         if(g == gp)
565                 runtime·throw("GC not moved to G0");
566         if(runtime·stopg(gp)) {
567                 if(!isscanstatus(runtime·readgstatus(gp))) {
568                         dumpgstatus(gp);
569                         runtime·throw("GC not in scan state");
570                 }
571                 runtime·restartg(gp);
572         }
573 }
574
575 // Runs on g0 and does the actual work after putting the g back on the run queue.
576 static void
577 mquiesce(G *gpmaster)
578 {
579         G* gp;
580         uint32 i;
581         uint32 status;
582         uint32 activeglen;
583
584         activeglen = runtime·allglen;
585         // enqueue the calling goroutine.
586         runtime·restartg(gpmaster);
587         for(i = 0; i < activeglen; i++) {
588                 gp = runtime·allg[i];
589                 if(runtime·readgstatus(gp) == Gdead) 
590                         gp->gcworkdone = true; // noop scan.
591                 else 
592                         gp->gcworkdone = false; 
593                 stopscanstart(gp); 
594         }
595
596         // Check that the G's gcwork (such as scanning) has been done. If not do it now. 
597         // You can end up doing work here if the page trap on a Grunning Goroutine has
598         // not been sprung or in some race situations. For example a runnable goes dead
599         // and is started up again with a gp->gcworkdone set to false.
600         for(i = 0; i < activeglen; i++) {
601                 gp = runtime·allg[i];
602                 while (!gp->gcworkdone) {
603                         status = runtime·readgstatus(gp);
604                         if(status == Gdead) {
605                                 gp->gcworkdone = true; // scan is a noop
606                                 break;
607                                 //do nothing, scan not needed. 
608                         }
609                         if(status == Grunning && gp->stackguard0 == (uintptr)StackPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecond arg 
610                                 runtime·noteclear(&runtime·sched.stopnote);
611                         else 
612                                 stopscanstart(gp);
613                 }
614         }
615
616         for(i = 0; i < activeglen; i++) {
617                 gp = runtime·allg[i];
618                 status = runtime·readgstatus(gp);
619                 if(isscanstatus(status)) {
620                         runtime·printf("mstopandscang:bottom: post scan bad status gp=%p has status %x\n", gp, status);
621                         dumpgstatus(gp);
622                 }
623                 if(!gp->gcworkdone && status != Gdead) {
624                         runtime·printf("mstopandscang:bottom: post scan gp=%p->gcworkdone still false\n", gp);
625                         dumpgstatus(gp);
626                 }
627         }
628
629         schedule(); // Never returns.
630 }
631
632 // quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
633 // If the global runtime·gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
634 // have been scanned before it returns.
635 void
636 runtime·quiesce(G* mastergp)
637 {
638         void (*fn)(G*);
639
640         runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
641         // Now move this to the g0 (aka m) stack.
642         // g0 will potentially scan this thread and put mastergp on the runqueue 
643         fn = mquiesce;
644         runtime·mcall(&fn);
645 }
646
647 // This is used by the GC as well as the routines that do stack dumps. In the case
648 // of GC all the routines can be reliably stopped. This is not always the case
649 // when the system is in panic or being exited.
650 void
651 runtime·stoptheworld(void)
652 {
653         int32 i;
654         uint32 s;
655         P *p;
656         bool wait;
657
658         // If we hold a lock, then we won't be able to stop another M
659         // that is blocked trying to acquire the lock.
660         if(g->m->locks > 0)
661                 runtime·throw("stoptheworld: holding locks");
662
663         runtime·lock(&runtime·sched.lock);
664         runtime·sched.stopwait = runtime·gomaxprocs;
665         runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
666         preemptall();
667         // stop current P
668         g->m->p->status = Pgcstop; // Pgcstop is only diagnostic.
669         runtime·sched.stopwait--;
670         // try to retake all P's in Psyscall status
671         for(i = 0; i < runtime·gomaxprocs; i++) {
672                 p = runtime·allp[i];
673                 s = p->status;
674                 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
675                         runtime·sched.stopwait--;
676         }
677         // stop idle P's
678         while(p = pidleget()) {
679                 p->status = Pgcstop;
680                 runtime·sched.stopwait--;
681         }
682         wait = runtime·sched.stopwait > 0;
683         runtime·unlock(&runtime·sched.lock);
684
685         // wait for remaining P's to stop voluntarily
686         if(wait) {
687                 for(;;) {
688                         // wait for 100us, then try to re-preempt in case of any races
689                         if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
690                                 runtime·noteclear(&runtime·sched.stopnote);
691                                 break;
692                         }
693                         preemptall();
694                 }
695         }
696         if(runtime·sched.stopwait)
697                 runtime·throw("stoptheworld: not stopped");
698         for(i = 0; i < runtime·gomaxprocs; i++) {
699                 p = runtime·allp[i];
700                 if(p->status != Pgcstop)
701                         runtime·throw("stoptheworld: not stopped");
702         }
703 }
704
705 static void
706 mhelpgc(void)
707 {
708         g->m->helpgc = -1;
709 }
710
711 void
712 runtime·starttheworld(void)
713 {
714         P *p, *p1;
715         M *mp;
716         G *gp;
717         bool add;
718
719         g->m->locks++;  // disable preemption because it can be holding p in a local var
720         gp = runtime·netpoll(false);  // non-blocking
721         injectglist(gp);
722         add = needaddgcproc();
723         runtime·lock(&runtime·sched.lock);
724         if(runtime·newprocs) {
725                 procresize(runtime·newprocs);
726                 runtime·newprocs = 0;
727         } else
728                 procresize(runtime·gomaxprocs);
729         runtime·sched.gcwaiting = 0;
730
731         p1 = nil;
732         while(p = pidleget()) {
733                 // procresize() puts p's with work at the beginning of the list.
734                 // Once we reach a p without a run queue, the rest don't have one either.
735                 if(p->runqhead == p->runqtail) {
736                         pidleput(p);
737                         break;
738                 }
739                 p->m = mget();
740                 p->link = p1;
741                 p1 = p;
742         }
743         if(runtime·sched.sysmonwait) {
744                 runtime·sched.sysmonwait = false;
745                 runtime·notewakeup(&runtime·sched.sysmonnote);
746         }
747         runtime·unlock(&runtime·sched.lock);
748
749         while(p1) {
750                 p = p1;
751                 p1 = p1->link;
752                 if(p->m) {
753                         mp = p->m;
754                         p->m = nil;
755                         if(mp->nextp)
756                                 runtime·throw("starttheworld: inconsistent mp->nextp");
757                         mp->nextp = p;
758                         runtime·notewakeup(&mp->park);
759                 } else {
760                         // Start M to run P.  Do not start another M below.
761                         newm(nil, p);
762                         add = false;
763                 }
764         }
765
766         if(add) {
767                 // If GC could have used another helper proc, start one now,
768                 // in the hope that it will be available next time.
769                 // It would have been even better to start it before the collection,
770                 // but doing so requires allocating memory, so it's tricky to
771                 // coordinate.  This lazy approach works out in practice:
772                 // we don't mind if the first couple gc rounds don't have quite
773                 // the maximum number of procs.
774                 newm(mhelpgc, nil);
775         }
776         g->m->locks--;
777         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
778                 g->stackguard0 = StackPreempt;
779 }
780
781 static void mstart(void);
782
783 // Called to start an M.
784 #pragma textflag NOSPLIT
785 void
786 runtime·mstart(void)
787 {
788         uintptr x, size;
789         
790         if(g->stack.lo == 0) {
791                 // Initialize stack bounds from system stack.
792                 // Cgo may have left stack size in stack.hi.
793                 size = g->stack.hi;
794                 if(size == 0)
795                         size = 8192;
796                 g->stack.hi = (uintptr)&x;
797                 g->stack.lo = g->stack.hi - size + 1024;
798         }
799         
800         // Initialize stack guards so that we can start calling
801         // both Go and C functions with stack growth prologues.
802         g->stackguard0 = g->stack.lo + StackGuard;
803         g->stackguard1 = g->stackguard0;
804         mstart();
805 }
806
807 static void
808 mstart(void)
809 {
810         if(g != g->m->g0)
811                 runtime·throw("bad runtime·mstart");
812
813         // Record top of stack for use by mcall.
814         // Once we call schedule we're never coming back,
815         // so other calls can reuse this stack space.
816         runtime·gosave(&g->m->g0->sched);
817         g->m->g0->sched.pc = (uintptr)-1;  // make sure it is never used
818         runtime·asminit();
819         runtime·minit();
820
821         // Install signal handlers; after minit so that minit can
822         // prepare the thread to be able to handle the signals.
823         if(g->m == &runtime·m0)
824                 runtime·initsig();
825         
826         if(g->m->mstartfn)
827                 g->m->mstartfn();
828
829         if(g->m->helpgc) {
830                 g->m->helpgc = 0;
831                 stopm();
832         } else if(g->m != &runtime·m0) {
833                 acquirep(g->m->nextp);
834                 g->m->nextp = nil;
835         }
836         schedule();
837
838         // TODO(brainman): This point is never reached, because scheduler
839         // does not release os threads at the moment. But once this path
840         // is enabled, we must remove our seh here.
841 }
842
843 // When running with cgo, we call _cgo_thread_start
844 // to start threads for us so that we can play nicely with
845 // foreign code.
846 void (*_cgo_thread_start)(void*);
847
848 typedef struct CgoThreadStart CgoThreadStart;
849 struct CgoThreadStart
850 {
851         G *g;
852         uintptr *tls;
853         void (*fn)(void);
854 };
855
856 M *runtime·newM(void); // in proc.go
857
858 // Allocate a new m unassociated with any thread.
859 // Can use p for allocation context if needed.
860 M*
861 runtime·allocm(P *p)
862 {
863         M *mp;
864
865         g->m->locks++;  // disable GC because it can be called from sysmon
866         if(g->m->p == nil)
867                 acquirep(p);  // temporarily borrow p for mallocs in this function
868         mp = runtime·newM();
869         mcommoninit(mp);
870
871         // In case of cgo or Solaris, pthread_create will make us a stack.
872         // Windows and Plan 9 will layout sched stack on OS stack.
873         if(runtime·iscgo || Solaris || Windows || Plan9)
874                 mp->g0 = runtime·malg(-1);
875         else
876                 mp->g0 = runtime·malg(8192);
877         mp->g0->m = mp;
878
879         if(p == g->m->p)
880                 releasep();
881         g->m->locks--;
882         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
883                 g->stackguard0 = StackPreempt;
884
885         return mp;
886 }
887
888 G *runtime·newG(void); // in proc.go
889
890 static G*
891 allocg(void)
892 {
893         return runtime·newG();
894 }
895
896 static M* lockextra(bool nilokay);
897 static void unlockextra(M*);
898
899 // needm is called when a cgo callback happens on a
900 // thread without an m (a thread not created by Go).
901 // In this case, needm is expected to find an m to use
902 // and return with m, g initialized correctly.
903 // Since m and g are not set now (likely nil, but see below)
904 // needm is limited in what routines it can call. In particular
905 // it can only call nosplit functions (textflag 7) and cannot
906 // do any scheduling that requires an m.
907 //
908 // In order to avoid needing heavy lifting here, we adopt
909 // the following strategy: there is a stack of available m's
910 // that can be stolen. Using compare-and-swap
911 // to pop from the stack has ABA races, so we simulate
912 // a lock by doing an exchange (via casp) to steal the stack
913 // head and replace the top pointer with MLOCKED (1).
914 // This serves as a simple spin lock that we can use even
915 // without an m. The thread that locks the stack in this way
916 // unlocks the stack by storing a valid stack head pointer.
917 //
918 // In order to make sure that there is always an m structure
919 // available to be stolen, we maintain the invariant that there
920 // is always one more than needed. At the beginning of the
921 // program (if cgo is in use) the list is seeded with a single m.
922 // If needm finds that it has taken the last m off the list, its job
923 // is - once it has installed its own m so that it can do things like
924 // allocate memory - to create a spare m and put it on the list.
925 //
926 // Each of these extra m's also has a g0 and a curg that are
927 // pressed into service as the scheduling stack and current
928 // goroutine for the duration of the cgo callback.
929 //
930 // When the callback is done with the m, it calls dropm to
931 // put the m back on the list.
932 #pragma textflag NOSPLIT
933 void
934 runtime·needm(byte x)
935 {
936         M *mp;
937
938         if(runtime·needextram) {
939                 // Can happen if C/C++ code calls Go from a global ctor.
940                 // Can not throw, because scheduler is not initialized yet.
941                 runtime·write(2, "fatal error: cgo callback before cgo call\n",
942                         sizeof("fatal error: cgo callback before cgo call\n")-1);
943                 runtime·exit(1);
944         }
945
946         // Lock extra list, take head, unlock popped list.
947         // nilokay=false is safe here because of the invariant above,
948         // that the extra list always contains or will soon contain
949         // at least one m.
950         mp = lockextra(false);
951
952         // Set needextram when we've just emptied the list,
953         // so that the eventual call into cgocallbackg will
954         // allocate a new m for the extra list. We delay the
955         // allocation until then so that it can be done
956         // after exitsyscall makes sure it is okay to be
957         // running at all (that is, there's no garbage collection
958         // running right now).
959         mp->needextram = mp->schedlink == nil;
960         unlockextra(mp->schedlink);
961
962         // Install g (= m->g0) and set the stack bounds
963         // to match the current stack. We don't actually know
964         // how big the stack is, like we don't know how big any
965         // scheduling stack is, but we assume there's at least 32 kB,
966         // which is more than enough for us.
967         runtime·setg(mp->g0);
968         g->stack.hi = (uintptr)(&x + 1024);
969         g->stack.lo = (uintptr)(&x - 32*1024);
970         g->stackguard0 = g->stack.lo + StackGuard;
971
972         // Initialize this thread to use the m.
973         runtime·asminit();
974         runtime·minit();
975 }
976
977 // newextram allocates an m and puts it on the extra list.
978 // It is called with a working local m, so that it can do things
979 // like call schedlock and allocate.
980 void
981 runtime·newextram(void)
982 {
983         M *mp, *mnext;
984         G *gp;
985
986         // Create extra goroutine locked to extra m.
987         // The goroutine is the context in which the cgo callback will run.
988         // The sched.pc will never be returned to, but setting it to
989         // runtime.goexit makes clear to the traceback routines where
990         // the goroutine stack ends.
991         mp = runtime·allocm(nil);
992         gp = runtime·malg(4096);
993         gp->sched.pc = (uintptr)runtime·goexit;
994         gp->sched.sp = gp->stack.hi;
995         gp->sched.sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
996         gp->sched.lr = 0;
997         gp->sched.g = gp;
998         gp->syscallpc = gp->sched.pc;
999         gp->syscallsp = gp->sched.sp;
1000         // malg returns status as Gidle, change to Gsyscall before adding to allg
1001         // where GC will see it.
1002         runtime·casgstatus(gp, Gidle, Gsyscall);
1003         gp->m = mp;
1004         mp->curg = gp;
1005         mp->locked = LockInternal;
1006         mp->lockedg = gp;
1007         gp->lockedm = mp;
1008         gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
1009         if(raceenabled)
1010                 gp->racectx = runtime·racegostart(runtime·newextram);
1011         // put on allg for garbage collector
1012         runtime·allgadd(gp);
1013
1014         // Add m to the extra list.
1015         mnext = lockextra(true);
1016         mp->schedlink = mnext;
1017         unlockextra(mp);
1018 }
1019
1020 // dropm is called when a cgo callback has called needm but is now
1021 // done with the callback and returning back into the non-Go thread.
1022 // It puts the current m back onto the extra list.
1023 //
1024 // The main expense here is the call to signalstack to release the
1025 // m's signal stack, and then the call to needm on the next callback
1026 // from this thread. It is tempting to try to save the m for next time,
1027 // which would eliminate both these costs, but there might not be
1028 // a next time: the current thread (which Go does not control) might exit.
1029 // If we saved the m for that thread, there would be an m leak each time
1030 // such a thread exited. Instead, we acquire and release an m on each
1031 // call. These should typically not be scheduling operations, just a few
1032 // atomics, so the cost should be small.
1033 //
1034 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1035 // variable using pthread_key_create. Unlike the pthread keys we already use
1036 // on OS X, this dummy key would never be read by Go code. It would exist
1037 // only so that we could register at thread-exit-time destructor.
1038 // That destructor would put the m back onto the extra list.
1039 // This is purely a performance optimization. The current version,
1040 // in which dropm happens on each cgo call, is still correct too.
1041 // We may have to keep the current version on systems with cgo
1042 // but without pthreads, like Windows.
1043 void
1044 runtime·dropm(void)
1045 {
1046         M *mp, *mnext;
1047
1048         // Undo whatever initialization minit did during needm.
1049         runtime·unminit();
1050
1051         // Clear m and g, and return m to the extra list.
1052         // After the call to setmg we can only call nosplit functions.
1053         mp = g->m;
1054         runtime·setg(nil);
1055
1056         mnext = lockextra(true);
1057         mp->schedlink = mnext;
1058         unlockextra(mp);
1059 }
1060
1061 #define MLOCKED ((M*)1)
1062
1063 // lockextra locks the extra list and returns the list head.
1064 // The caller must unlock the list by storing a new list head
1065 // to runtime.extram. If nilokay is true, then lockextra will
1066 // return a nil list head if that's what it finds. If nilokay is false,
1067 // lockextra will keep waiting until the list head is no longer nil.
1068 #pragma textflag NOSPLIT
1069 static M*
1070 lockextra(bool nilokay)
1071 {
1072         M *mp;
1073         void (*yield)(void);
1074
1075         for(;;) {
1076                 mp = runtime·atomicloadp(&runtime·extram);
1077                 if(mp == MLOCKED) {
1078                         yield = runtime·osyield;
1079                         yield();
1080                         continue;
1081                 }
1082                 if(mp == nil && !nilokay) {
1083                         runtime·usleep(1);
1084                         continue;
1085                 }
1086                 if(!runtime·casp(&runtime·extram, mp, MLOCKED)) {
1087                         yield = runtime·osyield;
1088                         yield();
1089                         continue;
1090                 }
1091                 break;
1092         }
1093         return mp;
1094 }
1095
1096 #pragma textflag NOSPLIT
1097 static void
1098 unlockextra(M *mp)
1099 {
1100         runtime·atomicstorep(&runtime·extram, mp);
1101 }
1102
1103
1104 // Create a new m.  It will start off with a call to fn, or else the scheduler.
1105 static void
1106 newm(void(*fn)(void), P *p)
1107 {
1108         M *mp;
1109
1110         mp = runtime·allocm(p);
1111         mp->nextp = p;
1112         mp->mstartfn = fn;
1113
1114         if(runtime·iscgo) {
1115                 CgoThreadStart ts;
1116
1117                 if(_cgo_thread_start == nil)
1118                         runtime·throw("_cgo_thread_start missing");
1119                 ts.g = mp->g0;
1120                 ts.tls = mp->tls;
1121                 ts.fn = runtime·mstart;
1122                 runtime·asmcgocall(_cgo_thread_start, &ts);
1123                 return;
1124         }
1125         runtime·newosproc(mp, (byte*)mp->g0->stack.hi);
1126 }
1127
1128 // Stops execution of the current m until new work is available.
1129 // Returns with acquired P.
1130 static void
1131 stopm(void)
1132 {
1133         if(g->m->locks)
1134                 runtime·throw("stopm holding locks");
1135         if(g->m->p)
1136                 runtime·throw("stopm holding p");
1137         if(g->m->spinning) {
1138                 g->m->spinning = false;
1139                 runtime·xadd(&runtime·sched.nmspinning, -1);
1140         }
1141
1142 retry:
1143         runtime·lock(&runtime·sched.lock);
1144         mput(g->m);
1145         runtime·unlock(&runtime·sched.lock);
1146         runtime·notesleep(&g->m->park);
1147         runtime·noteclear(&g->m->park);
1148         if(g->m->helpgc) {
1149                 runtime·gchelper();
1150                 g->m->helpgc = 0;
1151                 g->m->mcache = nil;
1152                 goto retry;
1153         }
1154         acquirep(g->m->nextp);
1155         g->m->nextp = nil;
1156 }
1157
1158 static void
1159 mspinning(void)
1160 {
1161         g->m->spinning = true;
1162 }
1163
1164 // Schedules some M to run the p (creates an M if necessary).
1165 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1166 static void
1167 startm(P *p, bool spinning)
1168 {
1169         M *mp;
1170         void (*fn)(void);
1171
1172         runtime·lock(&runtime·sched.lock);
1173         if(p == nil) {
1174                 p = pidleget();
1175                 if(p == nil) {
1176                         runtime·unlock(&runtime·sched.lock);
1177                         if(spinning)
1178                                 runtime·xadd(&runtime·sched.nmspinning, -1);
1179                         return;
1180                 }
1181         }
1182         mp = mget();
1183         runtime·unlock(&runtime·sched.lock);
1184         if(mp == nil) {
1185                 fn = nil;
1186                 if(spinning)
1187                         fn = mspinning;
1188                 newm(fn, p);
1189                 return;
1190         }
1191         if(mp->spinning)
1192                 runtime·throw("startm: m is spinning");
1193         if(mp->nextp)
1194                 runtime·throw("startm: m has p");
1195         mp->spinning = spinning;
1196         mp->nextp = p;
1197         runtime·notewakeup(&mp->park);
1198 }
1199
1200 // Hands off P from syscall or locked M.
1201 static void
1202 handoffp(P *p)
1203 {
1204         // if it has local work, start it straight away
1205         if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
1206                 startm(p, false);
1207                 return;
1208         }
1209         // no local work, check that there are no spinning/idle M's,
1210         // otherwise our help is not required
1211         if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 &&  // TODO: fast atomic
1212                 runtime·cas(&runtime·sched.nmspinning, 0, 1)){
1213                 startm(p, true);
1214                 return;
1215         }
1216         runtime·lock(&runtime·sched.lock);
1217         if(runtime·sched.gcwaiting) {
1218                 p->status = Pgcstop;
1219                 if(--runtime·sched.stopwait == 0)
1220                         runtime·notewakeup(&runtime·sched.stopnote);
1221                 runtime·unlock(&runtime·sched.lock);
1222                 return;
1223         }
1224         if(runtime·sched.runqsize) {
1225                 runtime·unlock(&runtime·sched.lock);
1226                 startm(p, false);
1227                 return;
1228         }
1229         // If this is the last running P and nobody is polling network,
1230         // need to wakeup another M to poll network.
1231         if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
1232                 runtime·unlock(&runtime·sched.lock);
1233                 startm(p, false);
1234                 return;
1235         }
1236         pidleput(p);
1237         runtime·unlock(&runtime·sched.lock);
1238 }
1239
1240 // Tries to add one more P to execute G's.
1241 // Called when a G is made runnable (newproc, ready).
1242 static void
1243 wakep(void)
1244 {
1245         // be conservative about spinning threads
1246         if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
1247                 return;
1248         startm(nil, true);
1249 }
1250
1251 // Stops execution of the current m that is locked to a g until the g is runnable again.
1252 // Returns with acquired P.
1253 static void
1254 stoplockedm(void)
1255 {
1256         P *p;
1257         uint32 status;
1258
1259         if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
1260                 runtime·throw("stoplockedm: inconsistent locking");
1261         if(g->m->p) {
1262                 // Schedule another M to run this p.
1263                 p = releasep();
1264                 handoffp(p);
1265         }
1266         incidlelocked(1);
1267         // Wait until another thread schedules lockedg again.
1268         runtime·notesleep(&g->m->park);
1269         runtime·noteclear(&g->m->park);
1270         status = runtime·readgstatus(g->m->lockedg);
1271         if((status&~Gscan) != Grunnable){
1272                 runtime·printf("runtime:stoplockedm: g is not Grunnable or Gscanrunnable");
1273                 dumpgstatus(g);
1274                 runtime·throw("stoplockedm: not runnable");
1275         }
1276         acquirep(g->m->nextp);
1277         g->m->nextp = nil;
1278 }
1279
1280 // Schedules the locked m to run the locked gp.
1281 static void
1282 startlockedm(G *gp)
1283 {
1284         M *mp;
1285         P *p;
1286
1287         mp = gp->lockedm;
1288         if(mp == g->m)
1289                 runtime·throw("startlockedm: locked to me");
1290         if(mp->nextp)
1291                 runtime·throw("startlockedm: m has p");
1292         // directly handoff current P to the locked m
1293         incidlelocked(-1);
1294         p = releasep();
1295         mp->nextp = p;
1296         runtime·notewakeup(&mp->park);
1297         stopm();
1298 }
1299
1300 // Stops the current m for stoptheworld.
1301 // Returns when the world is restarted.
1302 static void
1303 gcstopm(void)
1304 {
1305         P *p;
1306
1307         if(!runtime·sched.gcwaiting)
1308                 runtime·throw("gcstopm: not waiting for gc");
1309         if(g->m->spinning) {
1310                 g->m->spinning = false;
1311                 runtime·xadd(&runtime·sched.nmspinning, -1);
1312         }
1313         p = releasep();
1314         runtime·lock(&runtime·sched.lock);
1315         p->status = Pgcstop;
1316         if(--runtime·sched.stopwait == 0)
1317                 runtime·notewakeup(&runtime·sched.stopnote);
1318         runtime·unlock(&runtime·sched.lock);
1319         stopm();
1320 }
1321
1322 // Schedules gp to run on the current M.
1323 // Never returns.
1324 static void
1325 execute(G *gp)
1326 {
1327         int32 hz;
1328         
1329         runtime·casgstatus(gp, Grunnable, Grunning);
1330         gp->waitsince = 0;
1331         gp->preempt = false;
1332         gp->stackguard0 = gp->stack.lo + StackGuard;
1333         g->m->p->schedtick++;
1334         g->m->curg = gp;
1335         gp->m = g->m;
1336
1337         // Check whether the profiler needs to be turned on or off.
1338         hz = runtime·sched.profilehz;
1339         if(g->m->profilehz != hz)
1340                 runtime·resetcpuprofiler(hz);
1341
1342         runtime·gogo(&gp->sched);
1343 }
1344
1345 // Finds a runnable goroutine to execute.
1346 // Tries to steal from other P's, get g from global queue, poll network.
1347 static G*
1348 findrunnable(void)
1349 {
1350         G *gp;
1351         P *p;
1352         int32 i;
1353
1354 top:
1355         if(runtime·sched.gcwaiting) {
1356                 gcstopm();
1357                 goto top;
1358         }
1359         if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
1360                 runtime·ready(gp);
1361         // local runq
1362         gp = runqget(g->m->p);
1363         if(gp)
1364                 return gp;
1365         // global runq
1366         if(runtime·sched.runqsize) {
1367                 runtime·lock(&runtime·sched.lock);
1368                 gp = globrunqget(g->m->p, 0);
1369                 runtime·unlock(&runtime·sched.lock);
1370                 if(gp)
1371                         return gp;
1372         }
1373         // poll network
1374         gp = runtime·netpoll(false);  // non-blocking
1375         if(gp) {
1376                 injectglist(gp->schedlink);
1377                 runtime·casgstatus(gp, Gwaiting, Grunnable);
1378                 return gp;
1379         }
1380         // If number of spinning M's >= number of busy P's, block.
1381         // This is necessary to prevent excessive CPU consumption
1382         // when GOMAXPROCS>>1 but the program parallelism is low.
1383         if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle))  // TODO: fast atomic
1384                 goto stop;
1385         if(!g->m->spinning) {
1386                 g->m->spinning = true;
1387                 runtime·xadd(&runtime·sched.nmspinning, 1);
1388         }
1389         // random steal from other P's
1390         for(i = 0; i < 2*runtime·gomaxprocs; i++) {
1391                 if(runtime·sched.gcwaiting)
1392                         goto top;
1393                 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
1394                 if(p == g->m->p)
1395                         gp = runqget(p);
1396                 else
1397                         gp = runqsteal(g->m->p, p);
1398                 if(gp)
1399                         return gp;
1400         }
1401 stop:
1402         // return P and block
1403         runtime·lock(&runtime·sched.lock);
1404         if(runtime·sched.gcwaiting) {
1405                 runtime·unlock(&runtime·sched.lock);
1406                 goto top;
1407         }
1408         if(runtime·sched.runqsize) {
1409                 gp = globrunqget(g->m->p, 0);
1410                 runtime·unlock(&runtime·sched.lock);
1411                 return gp;
1412         }
1413         p = releasep();
1414         pidleput(p);
1415         runtime·unlock(&runtime·sched.lock);
1416         if(g->m->spinning) {
1417                 g->m->spinning = false;
1418                 runtime·xadd(&runtime·sched.nmspinning, -1);
1419         }
1420         // check all runqueues once again
1421         for(i = 0; i < runtime·gomaxprocs; i++) {
1422                 p = runtime·allp[i];
1423                 if(p && p->runqhead != p->runqtail) {
1424                         runtime·lock(&runtime·sched.lock);
1425                         p = pidleget();
1426                         runtime·unlock(&runtime·sched.lock);
1427                         if(p) {
1428                                 acquirep(p);
1429                                 goto top;
1430                         }
1431                         break;
1432                 }
1433         }
1434         // poll network
1435         if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
1436                 if(g->m->p)
1437                         runtime·throw("findrunnable: netpoll with p");
1438                 if(g->m->spinning)
1439                         runtime·throw("findrunnable: netpoll with spinning");
1440                 gp = runtime·netpoll(true);  // block until new work is available
1441                 runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
1442                 if(gp) {
1443                         runtime·lock(&runtime·sched.lock);
1444                         p = pidleget();
1445                         runtime·unlock(&runtime·sched.lock);
1446                         if(p) {
1447                                 acquirep(p);
1448                                 injectglist(gp->schedlink);
1449                                 runtime·casgstatus(gp, Gwaiting, Grunnable);
1450                                 return gp;
1451                         }
1452                         injectglist(gp);
1453                 }
1454         }
1455         stopm();
1456         goto top;
1457 }
1458
1459 static void
1460 resetspinning(void)
1461 {
1462         int32 nmspinning;
1463
1464         if(g->m->spinning) {
1465                 g->m->spinning = false;
1466                 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
1467                 if(nmspinning < 0)
1468                         runtime·throw("findrunnable: negative nmspinning");
1469         } else
1470                 nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
1471
1472         // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
1473         // so see if we need to wakeup another P here.
1474         if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
1475                 wakep();
1476 }
1477
1478 // Injects the list of runnable G's into the scheduler.
1479 // Can run concurrently with GC.
1480 static void
1481 injectglist(G *glist)
1482 {
1483         int32 n;
1484         G *gp;
1485
1486         if(glist == nil)
1487                 return;
1488         runtime·lock(&runtime·sched.lock);
1489         for(n = 0; glist; n++) {
1490                 gp = glist;
1491                 glist = gp->schedlink;
1492                 runtime·casgstatus(gp, Gwaiting, Grunnable); 
1493                 globrunqput(gp);
1494         }
1495         runtime·unlock(&runtime·sched.lock);
1496
1497         for(; n && runtime·sched.npidle; n--)
1498                 startm(nil, false);
1499 }
1500
1501 // One round of scheduler: find a runnable goroutine and execute it.
1502 // Never returns.
1503 static void
1504 schedule(void)
1505 {
1506         G *gp;
1507         uint32 tick;
1508
1509         if(g->m->locks)
1510                 runtime·throw("schedule: holding locks");
1511
1512         if(g->m->lockedg) {
1513                 stoplockedm();
1514                 execute(g->m->lockedg);  // Never returns.
1515         }
1516
1517 top:
1518         if(runtime·sched.gcwaiting) {
1519                 gcstopm();
1520                 goto top;
1521         }
1522
1523         gp = nil;
1524         // Check the global runnable queue once in a while to ensure fairness.
1525         // Otherwise two goroutines can completely occupy the local runqueue
1526         // by constantly respawning each other.
1527         tick = g->m->p->schedtick;
1528         // This is a fancy way to say tick%61==0,
1529         // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
1530         if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
1531                 runtime·lock(&runtime·sched.lock);
1532                 gp = globrunqget(g->m->p, 1);
1533                 runtime·unlock(&runtime·sched.lock);
1534                 if(gp)
1535                         resetspinning();
1536         }
1537         if(gp == nil) {
1538                 gp = runqget(g->m->p);
1539                 if(gp && g->m->spinning)
1540                         runtime·throw("schedule: spinning with local work");
1541         }
1542         if(gp == nil) {
1543                 gp = findrunnable();  // blocks until work is available
1544                 resetspinning();
1545         }
1546
1547         if(gp->lockedm) {
1548                 // Hands off own p to the locked m,
1549                 // then blocks waiting for a new p.
1550                 startlockedm(gp);
1551                 goto top;
1552         }
1553
1554         execute(gp);
1555 }
1556
1557 // dropg removes the association between m and the current goroutine m->curg (gp for short).
1558 // Typically a caller sets gp's status away from Grunning and then
1559 // immediately calls dropg to finish the job. The caller is also responsible
1560 // for arranging that gp will be restarted using runtime·ready at an
1561 // appropriate time. After calling dropg and arranging for gp to be
1562 // readied later, the caller can do other work but eventually should
1563 // call schedule to restart the scheduling of goroutines on this m.
1564 static void
1565 dropg(void)
1566 {
1567         if(g->m->lockedg == nil) {
1568                 g->m->curg->m = nil;
1569                 g->m->curg = nil;
1570         }
1571 }
1572
1573 // Puts the current goroutine into a waiting state and calls unlockf.
1574 // If unlockf returns false, the goroutine is resumed.
1575 void
1576 runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
1577 {
1578         void (*fn)(G*);
1579
1580         g->m->waitlock = lock;
1581         g->m->waitunlockf = unlockf;
1582         g->waitreason = reason;
1583         fn = runtime·park_m;
1584         runtime·mcall(&fn);
1585 }
1586
1587 bool
1588 runtime·parkunlock_c(G *gp, void *lock)
1589 {
1590         USED(gp);
1591         runtime·unlock(lock);
1592         return true;
1593 }
1594
1595 // Puts the current goroutine into a waiting state and unlocks the lock.
1596 // The goroutine can be made runnable again by calling runtime·ready(gp).
1597 void
1598 runtime·parkunlock(Mutex *lock, String reason)
1599 {
1600         runtime·park(runtime·parkunlock_c, lock, reason);
1601 }
1602
1603 // runtime·park continuation on g0.
1604 void
1605 runtime·park_m(G *gp)
1606 {
1607         bool ok;
1608
1609         runtime·casgstatus(gp, Grunning, Gwaiting);
1610         dropg();
1611
1612         if(g->m->waitunlockf) {
1613                 ok = g->m->waitunlockf(gp, g->m->waitlock);
1614                 g->m->waitunlockf = nil;
1615                 g->m->waitlock = nil;
1616                 if(!ok) {
1617                         runtime·casgstatus(gp, Gwaiting, Grunnable); 
1618                         execute(gp);  // Schedule it back, never returns.
1619                 }
1620         }
1621
1622         schedule();
1623 }
1624
1625 // Gosched continuation on g0.
1626 void
1627 runtime·gosched_m(G *gp)
1628 {
1629         uint32 status;
1630
1631         status = runtime·readgstatus(gp);
1632         if((status&~Gscan) != Grunning){
1633                 dumpgstatus(gp);
1634                 runtime·throw("bad g status");
1635         }
1636         runtime·casgstatus(gp, Grunning, Grunnable);
1637         dropg();
1638         runtime·lock(&runtime·sched.lock);
1639         globrunqput(gp);
1640         runtime·unlock(&runtime·sched.lock);
1641
1642         schedule();
1643 }
1644
1645 // Finishes execution of the current goroutine.
1646 // Must be NOSPLIT because it is called from Go.
1647 #pragma textflag NOSPLIT
1648 void
1649 runtime·goexit1(void)
1650 {
1651         void (*fn)(G*);
1652
1653         if(raceenabled)
1654                 runtime·racegoend();
1655         fn = goexit0;
1656         runtime·mcall(&fn);
1657 }
1658
1659 // runtime·goexit continuation on g0.
1660 static void
1661 goexit0(G *gp)
1662 {
1663         runtime·casgstatus(gp, Grunning, Gdead);
1664         gp->m = nil;
1665         gp->lockedm = nil;
1666         g->m->lockedg = nil;
1667         gp->paniconfault = 0;
1668         gp->defer = nil; // should be true already but just in case.
1669         gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
1670         gp->writebuf.array = nil;
1671         gp->writebuf.len = 0;
1672         gp->writebuf.cap = 0;
1673         gp->waitreason.str = nil;
1674         gp->waitreason.len = 0;
1675         gp->param = nil;
1676
1677         dropg();
1678
1679         if(g->m->locked & ~LockExternal) {
1680                 runtime·printf("invalid m->locked = %d\n", g->m->locked);
1681                 runtime·throw("internal lockOSThread error");
1682         }       
1683         g->m->locked = 0;
1684         gfput(g->m->p, gp);
1685         schedule();
1686 }
1687
1688 #pragma textflag NOSPLIT
1689 static void
1690 save(uintptr pc, uintptr sp)
1691 {
1692         g->sched.pc = pc;
1693         g->sched.sp = sp;
1694         g->sched.lr = 0;
1695         g->sched.ret = 0;
1696         g->sched.ctxt = 0;
1697         g->sched.g = g;
1698 }
1699
1700 static void entersyscall_bad(void);
1701 static void entersyscall_sysmon(void);
1702 static void entersyscall_gcwait(void);
1703
1704 // The goroutine g is about to enter a system call.
1705 // Record that it's not using the cpu anymore.
1706 // This is called only from the go syscall library and cgocall,
1707 // not from the low-level system calls used by the runtime.
1708 //
1709 // Entersyscall cannot split the stack: the runtime·gosave must
1710 // make g->sched refer to the caller's stack segment, because
1711 // entersyscall is going to return immediately after.
1712 //
1713 // Nothing entersyscall calls can split the stack either.
1714 // We cannot safely move the stack during an active call to syscall,
1715 // because we do not know which of the uintptr arguments are
1716 // really pointers (back into the stack).
1717 // In practice, this means that we make the fast path run through
1718 // entersyscall doing no-split things, and the slow path has to use onM
1719 // to run bigger things on the m stack.
1720 //
1721 // reentersyscall is the entry point used by cgo callbacks, where explicitly
1722 // saved SP and PC are restored. This is needed when exitsyscall will be called
1723 // from a function further up in the call stack than the parent, as g->syscallsp
1724 // must always point to a valid stack frame. entersyscall below is the normal
1725 // entry point for syscalls, which obtains the SP and PC from the caller.
1726 #pragma textflag NOSPLIT
1727 void
1728 runtime·reentersyscall(uintptr pc, uintptr sp)
1729 {
1730         void (*fn)(void);
1731
1732         // Disable preemption because during this function g is in Gsyscall status,
1733         // but can have inconsistent g->sched, do not let GC observe it.
1734         g->m->locks++;
1735         
1736         // Entersyscall must not call any function that might split/grow the stack.
1737         // (See details in comment above.)
1738         // Catch calls that might, by replacing the stack guard with something that
1739         // will trip any stack check and leaving a flag to tell newstack to die.
1740         g->stackguard0 = StackPreempt;
1741         g->throwsplit = 1;
1742
1743         // Leave SP around for GC and traceback.
1744         save(pc, sp);
1745         g->syscallsp = sp;
1746         g->syscallpc = pc;
1747         runtime·casgstatus(g, Grunning, Gsyscall);
1748         if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1749                 fn = entersyscall_bad;
1750                 runtime·onM(&fn);
1751         }
1752
1753         if(runtime·atomicload(&runtime·sched.sysmonwait)) {  // TODO: fast atomic
1754                 fn = entersyscall_sysmon;
1755                 runtime·onM(&fn);
1756                 save(pc, sp);
1757         }
1758
1759         g->m->mcache = nil;
1760         g->m->p->m = nil;
1761         runtime·atomicstore(&g->m->p->status, Psyscall);
1762         if(runtime·sched.gcwaiting) {
1763                 fn = entersyscall_gcwait;
1764                 runtime·onM(&fn);
1765                 save(pc, sp);
1766         }
1767
1768         // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
1769         // We set stackguard to StackPreempt so that first split stack check calls morestack.
1770         // Morestack detects this case and throws.
1771         g->stackguard0 = StackPreempt;
1772         g->m->locks--;
1773 }
1774
1775 // Standard syscall entry used by the go syscall library and normal cgo calls.
1776 #pragma textflag NOSPLIT
1777 void
1778 ·entersyscall(int32 dummy)
1779 {
1780         runtime·reentersyscall((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1781 }
1782
1783 static void
1784 entersyscall_bad(void)
1785 {
1786         G *gp;
1787         
1788         gp = g->m->curg;
1789         runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
1790                 gp->syscallsp, gp->stack.lo, gp->stack.hi);
1791         runtime·throw("entersyscall");
1792 }
1793
1794 static void
1795 entersyscall_sysmon(void)
1796 {
1797         runtime·lock(&runtime·sched.lock);
1798         if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1799                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1800                 runtime·notewakeup(&runtime·sched.sysmonnote);
1801         }
1802         runtime·unlock(&runtime·sched.lock);
1803 }
1804
1805 static void
1806 entersyscall_gcwait(void)
1807 {
1808         runtime·lock(&runtime·sched.lock);
1809         if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
1810                 if(--runtime·sched.stopwait == 0)
1811                         runtime·notewakeup(&runtime·sched.stopnote);
1812         }
1813         runtime·unlock(&runtime·sched.lock);
1814 }
1815
1816 static void entersyscallblock_handoff(void);
1817
1818 // The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
1819 #pragma textflag NOSPLIT
1820 void
1821 ·entersyscallblock(int32 dummy)
1822 {
1823         void (*fn)(void);
1824
1825         g->m->locks++;  // see comment in entersyscall
1826         g->throwsplit = 1;
1827         g->stackguard0 = StackPreempt;  // see comment in entersyscall
1828
1829         // Leave SP around for GC and traceback.
1830         save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1831         g->syscallsp = g->sched.sp;
1832         g->syscallpc = g->sched.pc;
1833         runtime·casgstatus(g, Grunning, Gsyscall);
1834         if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
1835                 fn = entersyscall_bad;
1836                 runtime·onM(&fn);
1837         }
1838         
1839         fn = entersyscallblock_handoff;
1840         runtime·onM(&fn);
1841
1842         // Resave for traceback during blocked call.
1843         save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1844
1845         g->m->locks--;
1846 }
1847
1848 static void
1849 entersyscallblock_handoff(void)
1850 {
1851         handoffp(releasep());
1852 }
1853
1854 // The goroutine g exited its system call.
1855 // Arrange for it to run on a cpu again.
1856 // This is called only from the go syscall library, not
1857 // from the low-level system calls used by the runtime.
1858 #pragma textflag NOSPLIT
1859 void
1860 ·exitsyscall(int32 dummy)
1861 {
1862         void (*fn)(G*);
1863
1864         g->m->locks++;  // see comment in entersyscall
1865
1866         if(runtime·getcallersp(&dummy) > g->syscallsp)
1867                 runtime·throw("exitsyscall: syscall frame is no longer valid");
1868
1869         g->waitsince = 0;
1870         if(exitsyscallfast()) {
1871                 // There's a cpu for us, so we can run.
1872                 g->m->p->syscalltick++;
1873                 // We need to cas the status and scan before resuming...
1874                 runtime·casgstatus(g, Gsyscall, Grunning);
1875
1876                 // Garbage collector isn't running (since we are),
1877                 // so okay to clear syscallsp.
1878                 g->syscallsp = (uintptr)nil;
1879                 g->m->locks--;
1880                 if(g->preempt) {
1881                         // restore the preemption request in case we've cleared it in newstack
1882                         g->stackguard0 = StackPreempt;
1883                 } else {
1884                         // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
1885                         g->stackguard0 = g->stack.lo + StackGuard;
1886                 }
1887                 g->throwsplit = 0;
1888                 return;
1889         }
1890
1891         g->m->locks--;
1892
1893         // Call the scheduler.
1894         fn = exitsyscall0;
1895         runtime·mcall(&fn);
1896
1897         // Scheduler returned, so we're allowed to run now.
1898         // Delete the syscallsp information that we left for
1899         // the garbage collector during the system call.
1900         // Must wait until now because until gosched returns
1901         // we don't know for sure that the garbage collector
1902         // is not running.
1903         g->syscallsp = (uintptr)nil;
1904         g->m->p->syscalltick++;
1905         g->throwsplit = 0;
1906 }
1907
1908 static void exitsyscallfast_pidle(void);
1909
1910 #pragma textflag NOSPLIT
1911 static bool
1912 exitsyscallfast(void)
1913 {
1914         void (*fn)(void);
1915
1916         // Freezetheworld sets stopwait but does not retake P's.
1917         if(runtime·sched.stopwait) {
1918                 g->m->p = nil;
1919                 return false;
1920         }
1921
1922         // Try to re-acquire the last P.
1923         if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->status, Psyscall, Prunning)) {
1924                 // There's a cpu for us, so we can run.
1925                 g->m->mcache = g->m->p->mcache;
1926                 g->m->p->m = g->m;
1927                 return true;
1928         }
1929         // Try to get any other idle P.
1930         g->m->p = nil;
1931         if(runtime·sched.pidle) {
1932                 fn = exitsyscallfast_pidle;
1933                 runtime·onM(&fn);
1934                 if(g->m->scalararg[0]) {
1935                         g->m->scalararg[0] = 0;
1936                         return true;
1937                 }
1938         }
1939         return false;
1940 }
1941
1942 static void
1943 exitsyscallfast_pidle(void)
1944 {
1945         P *p;
1946
1947         runtime·lock(&runtime·sched.lock);
1948         p = pidleget();
1949         if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
1950                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1951                 runtime·notewakeup(&runtime·sched.sysmonnote);
1952         }
1953         runtime·unlock(&runtime·sched.lock);
1954         if(p) {
1955                 acquirep(p);
1956                 g->m->scalararg[0] = 1;
1957         } else
1958                 g->m->scalararg[0] = 0;
1959 }
1960
1961 // runtime·exitsyscall slow path on g0.
1962 // Failed to acquire P, enqueue gp as runnable.
1963 static void
1964 exitsyscall0(G *gp)
1965 {
1966         P *p;
1967
1968         runtime·casgstatus(gp, Gsyscall, Grunnable);
1969         dropg();
1970         runtime·lock(&runtime·sched.lock);
1971         p = pidleget();
1972         if(p == nil)
1973                 globrunqput(gp);
1974         else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1975                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1976                 runtime·notewakeup(&runtime·sched.sysmonnote);
1977         }
1978         runtime·unlock(&runtime·sched.lock);
1979         if(p) {
1980                 acquirep(p);
1981                 execute(gp);  // Never returns.
1982         }
1983         if(g->m->lockedg) {
1984                 // Wait until another thread schedules gp and so m again.
1985                 stoplockedm();
1986                 execute(gp);  // Never returns.
1987         }
1988         stopm();
1989         schedule();  // Never returns.
1990 }
1991
1992 static void
1993 beforefork(void)
1994 {
1995         G *gp;
1996         
1997         gp = g->m->curg;
1998         // Fork can hang if preempted with signals frequently enough (see issue 5517).
1999         // Ensure that we stay on the same M where we disable profiling.
2000         gp->m->locks++;
2001         if(gp->m->profilehz != 0)
2002                 runtime·resetcpuprofiler(0);
2003
2004         // This function is called before fork in syscall package.
2005         // Code between fork and exec must not allocate memory nor even try to grow stack.
2006         // Here we spoil g->stackguard to reliably detect any attempts to grow stack.
2007         // runtime_AfterFork will undo this in parent process, but not in child.
2008         gp->stackguard0 = StackFork;
2009 }
2010
2011 // Called from syscall package before fork.
2012 #pragma textflag NOSPLIT
2013 void
2014 syscall·runtime_BeforeFork(void)
2015 {
2016         void (*fn)(void);
2017         
2018         fn = beforefork;
2019         runtime·onM(&fn);
2020 }
2021
2022 static void
2023 afterfork(void)
2024 {
2025         int32 hz;
2026         G *gp;
2027         
2028         gp = g->m->curg;
2029         // See the comment in runtime_BeforeFork.
2030         gp->stackguard0 = gp->stack.lo + StackGuard;
2031
2032         hz = runtime·sched.profilehz;
2033         if(hz != 0)
2034                 runtime·resetcpuprofiler(hz);
2035         gp->m->locks--;
2036 }
2037
2038 // Called from syscall package after fork in parent.
2039 #pragma textflag NOSPLIT
2040 void
2041 syscall·runtime_AfterFork(void)
2042 {
2043         void (*fn)(void);
2044         
2045         fn = afterfork;
2046         runtime·onM(&fn);
2047 }
2048
2049 // Hook used by runtime·malg to call runtime·stackalloc on the
2050 // scheduler stack.  This exists because runtime·stackalloc insists
2051 // on being called on the scheduler stack, to avoid trying to grow
2052 // the stack while allocating a new stack segment.
2053 static void
2054 mstackalloc(G *gp)
2055 {
2056         G *newg;
2057         uintptr size;
2058
2059         newg = g->m->ptrarg[0];
2060         size = g->m->scalararg[0];
2061
2062         newg->stack = runtime·stackalloc(size);
2063
2064         runtime·gogo(&gp->sched);
2065 }
2066
2067 // Allocate a new g, with a stack big enough for stacksize bytes.
2068 G*
2069 runtime·malg(int32 stacksize)
2070 {
2071         G *newg;
2072         void (*fn)(G*);
2073
2074         newg = allocg();
2075         if(stacksize >= 0) {
2076                 stacksize = runtime·round2(StackSystem + stacksize);
2077                 if(g == g->m->g0) {
2078                         // running on scheduler stack already.
2079                         newg->stack = runtime·stackalloc(stacksize);
2080                 } else {
2081                         // have to call stackalloc on scheduler stack.
2082                         g->m->scalararg[0] = stacksize;
2083                         g->m->ptrarg[0] = newg;
2084                         fn = mstackalloc;
2085                         runtime·mcall(&fn);
2086                         g->m->ptrarg[0] = nil;
2087                 }
2088                 newg->stackguard0 = newg->stack.lo + StackGuard;
2089                 newg->stackguard1 = ~(uintptr)0;
2090         }
2091         return newg;
2092 }
2093
2094 static void
2095 newproc_m(void)
2096 {
2097         byte *argp;
2098         void *callerpc;
2099         FuncVal *fn;
2100         int32 siz;
2101
2102         siz = g->m->scalararg[0];
2103         callerpc = (void*)g->m->scalararg[1];   
2104         argp = g->m->ptrarg[0];
2105         fn = (FuncVal*)g->m->ptrarg[1];
2106
2107         runtime·newproc1(fn, argp, siz, 0, callerpc);
2108         g->m->ptrarg[0] = nil;
2109         g->m->ptrarg[1] = nil;
2110 }
2111
2112 // Create a new g running fn with siz bytes of arguments.
2113 // Put it on the queue of g's waiting to run.
2114 // The compiler turns a go statement into a call to this.
2115 // Cannot split the stack because it assumes that the arguments
2116 // are available sequentially after &fn; they would not be
2117 // copied if a stack split occurred.
2118 #pragma textflag NOSPLIT
2119 void
2120 runtime·newproc(int32 siz, FuncVal* fn, ...)
2121 {
2122         byte *argp;
2123         void (*mfn)(void);
2124
2125         if(thechar == '5' || thechar == '9')
2126                 argp = (byte*)(&fn+2);  // skip caller's saved LR
2127         else
2128                 argp = (byte*)(&fn+1);
2129
2130         g->m->locks++;
2131         g->m->scalararg[0] = siz;
2132         g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
2133         g->m->ptrarg[0] = argp;
2134         g->m->ptrarg[1] = fn;
2135         mfn = newproc_m;
2136         runtime·onM(&mfn);
2137         g->m->locks--;
2138 }
2139
2140 void runtime·main(void);
2141
2142 // Create a new g running fn with narg bytes of arguments starting
2143 // at argp and returning nret bytes of results.  callerpc is the
2144 // address of the go statement that created this.  The new g is put
2145 // on the queue of g's waiting to run.
2146 G*
2147 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
2148 {
2149         byte *sp;
2150         G *newg;
2151         P *p;
2152         int32 siz;
2153
2154         if(fn == nil) {
2155                 g->m->throwing = -1;  // do not dump full stacks
2156                 runtime·throw("go of nil func value");
2157         }
2158         g->m->locks++;  // disable preemption because it can be holding p in a local var
2159         siz = narg + nret;
2160         siz = (siz+7) & ~7;
2161
2162         // We could allocate a larger initial stack if necessary.
2163         // Not worth it: this is almost always an error.
2164         // 4*sizeof(uintreg): extra space added below
2165         // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
2166         if(siz >= StackMin - 4*sizeof(uintreg) - sizeof(uintreg))
2167                 runtime·throw("runtime.newproc: function arguments too large for new goroutine");
2168
2169         p = g->m->p;
2170         if((newg = gfget(p)) == nil) {
2171                 newg = runtime·malg(StackMin);
2172                 runtime·casgstatus(newg, Gidle, Gdead);
2173                 runtime·allgadd(newg); // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
2174         }
2175         if(newg->stack.hi == 0)
2176                 runtime·throw("newproc1: newg missing stack");
2177
2178         if(runtime·readgstatus(newg) != Gdead) 
2179                 runtime·throw("newproc1: new g is not Gdead");
2180
2181         sp = (byte*)newg->stack.hi;
2182         sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
2183         sp -= siz;
2184         runtime·memmove(sp, argp, narg);
2185         if(thechar == '5' || thechar == '9') {
2186                 // caller's LR
2187                 sp -= sizeof(void*);
2188                 *(void**)sp = nil;
2189         }
2190
2191         runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
2192         newg->sched.sp = (uintptr)sp;
2193         newg->sched.pc = (uintptr)runtime·goexit + PCQuantum; // +PCQuantum so that previous instruction is in same function
2194         newg->sched.g = newg;
2195         runtime·gostartcallfn(&newg->sched, fn);
2196         newg->gopc = (uintptr)callerpc;
2197         runtime·casgstatus(newg, Gdead, Grunnable);
2198
2199         if(p->goidcache == p->goidcacheend) {
2200                 // Sched.goidgen is the last allocated id,
2201                 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
2202                 // At startup sched.goidgen=0, so main goroutine receives goid=1.
2203                 p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
2204                 p->goidcache -= GoidCacheBatch - 1;
2205                 p->goidcacheend = p->goidcache + GoidCacheBatch;
2206         }
2207         newg->goid = p->goidcache++;
2208         if(raceenabled)
2209                 newg->racectx = runtime·racegostart((void*)callerpc);
2210         runqput(p, newg);
2211
2212         if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main)  // TODO: fast atomic
2213                 wakep();
2214         g->m->locks--;
2215         if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
2216                 g->stackguard0 = StackPreempt;
2217         return newg;
2218 }
2219
2220 // Put on gfree list.
2221 // If local list is too long, transfer a batch to the global list.
2222 static void
2223 gfput(P *p, G *gp)
2224 {
2225         uintptr stksize;
2226
2227         if(runtime·readgstatus(gp) != Gdead) 
2228                 runtime·throw("gfput: bad status (not Gdead)");
2229
2230         stksize = gp->stack.hi - gp->stack.lo;
2231         
2232         if(stksize != FixedStack) {
2233                 // non-standard stack size - free it.
2234                 runtime·stackfree(gp->stack);
2235                 gp->stack.lo = 0;
2236                 gp->stack.hi = 0;
2237                 gp->stackguard0 = 0;
2238         }
2239         gp->schedlink = p->gfree;
2240         p->gfree = gp;
2241         p->gfreecnt++;
2242         if(p->gfreecnt >= 64) {
2243                 runtime·lock(&runtime·sched.gflock);
2244                 while(p->gfreecnt >= 32) {
2245                         p->gfreecnt--;
2246                         gp = p->gfree;
2247                         p->gfree = gp->schedlink;
2248                         gp->schedlink = runtime·sched.gfree;
2249                         runtime·sched.gfree = gp;
2250                         runtime·sched.ngfree++;
2251                 }
2252                 runtime·unlock(&runtime·sched.gflock);
2253         }
2254 }
2255
2256 // Get from gfree list.
2257 // If local list is empty, grab a batch from global list.
2258 static G*
2259 gfget(P *p)
2260 {
2261         G *gp;
2262         void (*fn)(G*);
2263
2264 retry:
2265         gp = p->gfree;
2266         if(gp == nil && runtime·sched.gfree) {
2267                 runtime·lock(&runtime·sched.gflock);
2268                 while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
2269                         p->gfreecnt++;
2270                         gp = runtime·sched.gfree;
2271                         runtime·sched.gfree = gp->schedlink;
2272                         runtime·sched.ngfree--;
2273                         gp->schedlink = p->gfree;
2274                         p->gfree = gp;
2275                 }
2276                 runtime·unlock(&runtime·sched.gflock);
2277                 goto retry;
2278         }
2279         if(gp) {
2280                 p->gfree = gp->schedlink;
2281                 p->gfreecnt--;
2282
2283                 if(gp->stack.lo == 0) {
2284                         // Stack was deallocated in gfput.  Allocate a new one.
2285                         if(g == g->m->g0) {
2286                                 gp->stack = runtime·stackalloc(FixedStack);
2287                         } else {
2288                                 g->m->scalararg[0] = FixedStack;
2289                                 g->m->ptrarg[0] = gp;
2290                                 fn = mstackalloc;
2291                                 runtime·mcall(&fn);
2292                                 g->m->ptrarg[0] = nil;
2293                         }
2294                         gp->stackguard0 = gp->stack.lo + StackGuard;
2295                 } else {
2296                         if(raceenabled)
2297                                 runtime·racemalloc((void*)gp->stack.lo, gp->stack.hi - gp->stack.lo);
2298                 }
2299         }
2300         return gp;
2301 }
2302
2303 // Purge all cached G's from gfree list to the global list.
2304 static void
2305 gfpurge(P *p)
2306 {
2307         G *gp;
2308
2309         runtime·lock(&runtime·sched.gflock);
2310         while(p->gfreecnt != 0) {
2311                 p->gfreecnt--;
2312                 gp = p->gfree;
2313                 p->gfree = gp->schedlink;
2314                 gp->schedlink = runtime·sched.gfree;
2315                 runtime·sched.gfree = gp;
2316                 runtime·sched.ngfree++;
2317         }
2318         runtime·unlock(&runtime·sched.gflock);
2319 }
2320
2321 #pragma textflag NOSPLIT
2322 void
2323 runtime·Breakpoint(void)
2324 {
2325         runtime·breakpoint();
2326 }
2327
2328 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
2329 // after they modify m->locked. Do not allow preemption during this call,
2330 // or else the m might be different in this function than in the caller.
2331 #pragma textflag NOSPLIT
2332 static void
2333 lockOSThread(void)
2334 {
2335         g->m->lockedg = g;
2336         g->lockedm = g->m;
2337 }
2338
2339 #pragma textflag NOSPLIT
2340 void
2341 runtime·LockOSThread(void)
2342 {
2343         g->m->locked |= LockExternal;
2344         lockOSThread();
2345 }
2346
2347 #pragma textflag NOSPLIT
2348 void
2349 runtime·lockOSThread(void)
2350 {
2351         g->m->locked += LockInternal;
2352         lockOSThread();
2353 }
2354
2355
2356 // unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
2357 // after they update m->locked. Do not allow preemption during this call,
2358 // or else the m might be in different in this function than in the caller.
2359 #pragma textflag NOSPLIT
2360 static void
2361 unlockOSThread(void)
2362 {
2363         if(g->m->locked != 0)
2364                 return;
2365         g->m->lockedg = nil;
2366         g->lockedm = nil;
2367 }
2368
2369 #pragma textflag NOSPLIT
2370 void
2371 runtime·UnlockOSThread(void)
2372 {
2373         g->m->locked &= ~LockExternal;
2374         unlockOSThread();
2375 }
2376
2377 static void badunlockOSThread(void);
2378
2379 #pragma textflag NOSPLIT
2380 void
2381 runtime·unlockOSThread(void)
2382 {
2383         void (*fn)(void);
2384
2385         if(g->m->locked < LockInternal) {
2386                 fn = badunlockOSThread;
2387                 runtime·onM(&fn);
2388         }
2389         g->m->locked -= LockInternal;
2390         unlockOSThread();
2391 }
2392
2393 static void
2394 badunlockOSThread(void)
2395 {
2396         runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
2397 }
2398
2399 #pragma textflag NOSPLIT
2400 int32
2401 runtime·gcount(void)
2402 {
2403         P *p, **pp;
2404         int32 n;
2405
2406         n = runtime·allglen - runtime·sched.ngfree;
2407         for(pp=runtime·allp; p=*pp; pp++)
2408                 n -= p->gfreecnt;
2409         // All these variables can be changed concurrently, so the result can be inconsistent.
2410         // But at least the current goroutine is running.
2411         if(n < 1)
2412                 n = 1;
2413         return n;
2414 }
2415
2416 int32
2417 runtime·mcount(void)
2418 {
2419         return runtime·sched.mcount;
2420 }
2421
2422 static struct ProfState {
2423         uint32 lock;
2424         int32 hz;
2425 } prof;
2426
2427 static void System(void) {}
2428 static void ExternalCode(void) {}
2429 static void GC(void) {}
2430 extern void runtime·cpuproftick(uintptr*, int32);
2431 extern byte runtime·etext[];
2432
2433 // Called if we receive a SIGPROF signal.
2434 void
2435 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
2436 {
2437         int32 n;
2438         bool traceback;
2439         // Do not use global m in this function, use mp instead.
2440         // On windows one m is sending reports about all the g's, so m means a wrong thing.
2441         byte m;
2442         uintptr stk[100];
2443
2444         m = 0;
2445         USED(m);
2446
2447         if(prof.hz == 0)
2448                 return;
2449
2450         // Profiling runs concurrently with GC, so it must not allocate.
2451         mp->mallocing++;
2452
2453         // Define that a "user g" is a user-created goroutine, and a "system g"
2454         // is one that is m->g0 or m->gsignal. We've only made sure that we
2455         // can unwind user g's, so exclude the system g's.
2456         //
2457         // It is not quite as easy as testing gp == m->curg (the current user g)
2458         // because we might be interrupted for profiling halfway through a
2459         // goroutine switch. The switch involves updating three (or four) values:
2460         // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
2461         // because once it gets updated the new g is running.
2462         //
2463         // When switching from a user g to a system g, LR is not considered live,
2464         // so the update only affects g, SP, and PC. Since PC must be last, there
2465         // the possible partial transitions in ordinary execution are (1) g alone is updated,
2466         // (2) both g and SP are updated, and (3) SP alone is updated.
2467         // If g is updated, we'll see a system g and not look closer.
2468         // If SP alone is updated, we can detect the partial transition by checking
2469         // whether the SP is within g's stack bounds. (We could also require that SP
2470         // be changed only after g, but the stack bounds check is needed by other
2471         // cases, so there is no need to impose an additional requirement.)
2472         //
2473         // There is one exceptional transition to a system g, not in ordinary execution.
2474         // When a signal arrives, the operating system starts the signal handler running
2475         // with an updated PC and SP. The g is updated last, at the beginning of the
2476         // handler. There are two reasons this is okay. First, until g is updated the
2477         // g and SP do not match, so the stack bounds check detects the partial transition.
2478         // Second, signal handlers currently run with signals disabled, so a profiling
2479         // signal cannot arrive during the handler.
2480         //
2481         // When switching from a system g to a user g, there are three possibilities.
2482         //
2483         // First, it may be that the g switch has no PC update, because the SP
2484         // either corresponds to a user g throughout (as in runtime.asmcgocall)
2485         // or because it has been arranged to look like a user g frame
2486         // (as in runtime.cgocallback_gofunc). In this case, since the entire
2487         // transition is a g+SP update, a partial transition updating just one of 
2488         // those will be detected by the stack bounds check.
2489         //
2490         // Second, when returning from a signal handler, the PC and SP updates
2491         // are performed by the operating system in an atomic update, so the g
2492         // update must be done before them. The stack bounds check detects
2493         // the partial transition here, and (again) signal handlers run with signals
2494         // disabled, so a profiling signal cannot arrive then anyway.
2495         //
2496         // Third, the common case: it may be that the switch updates g, SP, and PC
2497         // separately, as in runtime.gogo.
2498         //
2499         // Because runtime.gogo is the only instance, we check whether the PC lies
2500         // within that function, and if so, not ask for a traceback. This approach
2501         // requires knowing the size of the runtime.gogo function, which we
2502         // record in arch_*.h and check in runtime_test.go.
2503         //
2504         // There is another apparently viable approach, recorded here in case
2505         // the "PC within runtime.gogo" check turns out not to be usable.
2506         // It would be possible to delay the update of either g or SP until immediately
2507         // before the PC update instruction. Then, because of the stack bounds check,
2508         // the only problematic interrupt point is just before that PC update instruction,
2509         // and the sigprof handler can detect that instruction and simulate stepping past
2510         // it in order to reach a consistent state. On ARM, the update of g must be made
2511         // in two places (in R10 and also in a TLS slot), so the delayed update would
2512         // need to be the SP update. The sigprof handler must read the instruction at
2513         // the current PC and if it was the known instruction (for example, JMP BX or 
2514         // MOV R2, PC), use that other register in place of the PC value.
2515         // The biggest drawback to this solution is that it requires that we can tell
2516         // whether it's safe to read from the memory pointed at by PC.
2517         // In a correct program, we can test PC == nil and otherwise read,
2518         // but if a profiling signal happens at the instant that a program executes
2519         // a bad jump (before the program manages to handle the resulting fault)
2520         // the profiling handler could fault trying to read nonexistent memory.
2521         //
2522         // To recap, there are no constraints on the assembly being used for the
2523         // transition. We simply require that g and SP match and that the PC is not
2524         // in runtime.gogo.
2525         traceback = true;
2526         if(gp == nil || gp != mp->curg ||
2527            (uintptr)sp < gp->stack.lo || gp->stack.hi < (uintptr)sp ||
2528            ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
2529                 traceback = false;
2530
2531         n = 0;
2532         if(traceback)
2533                 n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, stk, nelem(stk), nil, nil, TraceTrap);
2534         if(!traceback || n <= 0) {
2535                 // Normal traceback is impossible or has failed.
2536                 // See if it falls into several common cases.
2537                 n = 0;
2538                 if(mp->ncgo > 0 && mp->curg != nil &&
2539                         mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
2540                         // Cgo, we can't unwind and symbolize arbitrary C code,
2541                         // so instead collect Go stack that leads to the cgo call.
2542                         // This is especially important on windows, since all syscalls are cgo calls.
2543                         n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, stk, nelem(stk), nil, nil, 0);
2544                 }
2545 #ifdef GOOS_windows
2546                 if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
2547                         // Libcall, i.e. runtime syscall on windows.
2548                         // Collect Go stack that leads to the call.
2549                         n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, stk, nelem(stk), nil, nil, 0);
2550                 }
2551 #endif
2552                 if(n == 0) {
2553                         // If all of the above has failed, account it against abstract "System" or "GC".
2554                         n = 2;
2555                         // "ExternalCode" is better than "etext".
2556                         if((uintptr)pc > (uintptr)runtime·etext)
2557                                 pc = (byte*)ExternalCode + PCQuantum;
2558                         stk[0] = (uintptr)pc;
2559                         if(mp->gcing || mp->helpgc)
2560                                 stk[1] = (uintptr)GC + PCQuantum;
2561                         else
2562                                 stk[1] = (uintptr)System + PCQuantum;
2563                 }
2564         }
2565
2566         if(prof.hz != 0) {
2567                 // Simple cas-lock to coordinate with setcpuprofilerate.
2568                 while(!runtime·cas(&prof.lock, 0, 1))
2569                         runtime·osyield();
2570                 if(prof.hz != 0)
2571                         runtime·cpuproftick(stk, n);
2572                 runtime·atomicstore(&prof.lock, 0);
2573         }
2574         mp->mallocing--;
2575 }
2576
2577 // Arrange to call fn with a traceback hz times a second.
2578 void
2579 runtime·setcpuprofilerate_m(void)
2580 {
2581         int32 hz;
2582         
2583         hz = g->m->scalararg[0];
2584         g->m->scalararg[0] = 0;
2585
2586         // Force sane arguments.
2587         if(hz < 0)
2588                 hz = 0;
2589
2590         // Disable preemption, otherwise we can be rescheduled to another thread
2591         // that has profiling enabled.
2592         g->m->locks++;
2593
2594         // Stop profiler on this thread so that it is safe to lock prof.
2595         // if a profiling signal came in while we had prof locked,
2596         // it would deadlock.
2597         runtime·resetcpuprofiler(0);
2598
2599         while(!runtime·cas(&prof.lock, 0, 1))
2600                 runtime·osyield();
2601         prof.hz = hz;
2602         runtime·atomicstore(&prof.lock, 0);
2603
2604         runtime·lock(&runtime·sched.lock);
2605         runtime·sched.profilehz = hz;
2606         runtime·unlock(&runtime·sched.lock);
2607
2608         if(hz != 0)
2609                 runtime·resetcpuprofiler(hz);
2610
2611         g->m->locks--;
2612 }
2613
2614 P *runtime·newP(void);
2615
2616 // Change number of processors.  The world is stopped, sched is locked.
2617 static void
2618 procresize(int32 new)
2619 {
2620         int32 i, old;
2621         bool empty;
2622         G *gp;
2623         P *p;
2624
2625         old = runtime·gomaxprocs;
2626         if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
2627                 runtime·throw("procresize: invalid arg");
2628         // initialize new P's
2629         for(i = 0; i < new; i++) {
2630                 p = runtime·allp[i];
2631                 if(p == nil) {
2632                         p = runtime·newP();
2633                         p->id = i;
2634                         p->status = Pgcstop;
2635                         runtime·atomicstorep(&runtime·allp[i], p);
2636                 }
2637                 if(p->mcache == nil) {
2638                         if(old==0 && i==0)
2639                                 p->mcache = g->m->mcache;  // bootstrap
2640                         else
2641                                 p->mcache = runtime·allocmcache();
2642                 }
2643         }
2644
2645         // redistribute runnable G's evenly
2646         // collect all runnable goroutines in global queue preserving FIFO order
2647         // FIFO order is required to ensure fairness even during frequent GCs
2648         // see http://golang.org/issue/7126
2649         empty = false;
2650         while(!empty) {
2651                 empty = true;
2652                 for(i = 0; i < old; i++) {
2653                         p = runtime·allp[i];
2654                         if(p->runqhead == p->runqtail)
2655                                 continue;
2656                         empty = false;
2657                         // pop from tail of local queue
2658                         p->runqtail--;
2659                         gp = p->runq[p->runqtail%nelem(p->runq)];
2660                         // push onto head of global queue
2661                         gp->schedlink = runtime·sched.runqhead;
2662                         runtime·sched.runqhead = gp;
2663                         if(runtime·sched.runqtail == nil)
2664                                 runtime·sched.runqtail = gp;
2665                         runtime·sched.runqsize++;
2666                 }
2667         }
2668         // fill local queues with at most nelem(p->runq)/2 goroutines
2669         // start at 1 because current M already executes some G and will acquire allp[0] below,
2670         // so if we have a spare G we want to put it into allp[1].
2671         for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
2672                 gp = runtime·sched.runqhead;
2673                 runtime·sched.runqhead = gp->schedlink;
2674                 if(runtime·sched.runqhead == nil)
2675                         runtime·sched.runqtail = nil;
2676                 runtime·sched.runqsize--;
2677                 runqput(runtime·allp[i%new], gp);
2678         }
2679
2680         // free unused P's
2681         for(i = new; i < old; i++) {
2682                 p = runtime·allp[i];
2683                 runtime·freemcache(p->mcache);
2684                 p->mcache = nil;
2685                 gfpurge(p);
2686                 p->status = Pdead;
2687                 // can't free P itself because it can be referenced by an M in syscall
2688         }
2689
2690         if(g->m->p)
2691                 g->m->p->m = nil;
2692         g->m->p = nil;
2693         g->m->mcache = nil;
2694         p = runtime·allp[0];
2695         p->m = nil;
2696         p->status = Pidle;
2697         acquirep(p);
2698         for(i = new-1; i > 0; i--) {
2699                 p = runtime·allp[i];
2700                 p->status = Pidle;
2701                 pidleput(p);
2702         }
2703         runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
2704 }
2705
2706 // Associate p and the current m.
2707 static void
2708 acquirep(P *p)
2709 {
2710         if(g->m->p || g->m->mcache)
2711                 runtime·throw("acquirep: already in go");
2712         if(p->m || p->status != Pidle) {
2713                 runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
2714                 runtime·throw("acquirep: invalid p state");
2715         }
2716         g->m->mcache = p->mcache;
2717         g->m->p = p;
2718         p->m = g->m;
2719         p->status = Prunning;
2720 }
2721
2722 // Disassociate p and the current m.
2723 static P*
2724 releasep(void)
2725 {
2726         P *p;
2727
2728         if(g->m->p == nil || g->m->mcache == nil)
2729                 runtime·throw("releasep: invalid arg");
2730         p = g->m->p;
2731         if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
2732                 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
2733                         g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status);
2734                 runtime·throw("releasep: invalid p state");
2735         }
2736         g->m->p = nil;
2737         g->m->mcache = nil;
2738         p->m = nil;
2739         p->status = Pidle;
2740         return p;
2741 }
2742
2743 static void
2744 incidlelocked(int32 v)
2745 {
2746         runtime·lock(&runtime·sched.lock);
2747         runtime·sched.nmidlelocked += v;
2748         if(v > 0)
2749                 checkdead();
2750         runtime·unlock(&runtime·sched.lock);
2751 }
2752
2753 // Check for deadlock situation.
2754 // The check is based on number of running M's, if 0 -> deadlock.
2755 static void
2756 checkdead(void)
2757 {
2758         G *gp;
2759         P *p;
2760         M *mp;
2761         int32 run, grunning, s;
2762         uintptr i;
2763
2764         // -1 for sysmon
2765         run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
2766         if(run > 0)
2767                 return;
2768         // If we are dying because of a signal caught on an already idle thread,
2769         // freezetheworld will cause all running threads to block.
2770         // And runtime will essentially enter into deadlock state,
2771         // except that there is a thread that will call runtime·exit soon.
2772         if(runtime·panicking > 0)
2773                 return;
2774         if(run < 0) {
2775                 runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
2776                         runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
2777                 runtime·throw("checkdead: inconsistent counts");
2778         }
2779         grunning = 0;
2780         runtime·lock(&runtime·allglock);
2781         for(i = 0; i < runtime·allglen; i++) {
2782                 gp = runtime·allg[i];
2783                 if(gp->issystem)
2784                         continue;
2785                 s = runtime·readgstatus(gp);
2786                 switch(s&~Gscan) {
2787                 case Gwaiting:
2788                         grunning++;
2789                         break;
2790                 case Grunnable:
2791                 case Grunning:
2792                 case Gsyscall:
2793                         runtime·unlock(&runtime·allglock);
2794                         runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
2795                         runtime·throw("checkdead: runnable g");
2796                         break;
2797                 }
2798         }
2799         runtime·unlock(&runtime·allglock);
2800         if(grunning == 0)  // possible if main goroutine calls runtime·Goexit()
2801                 runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
2802
2803         // Maybe jump time forward for playground.
2804         if((gp = runtime·timejump()) != nil) {
2805                 runtime·casgstatus(gp, Gwaiting, Grunnable);
2806                 globrunqput(gp);
2807                 p = pidleget();
2808                 if(p == nil)
2809                         runtime·throw("checkdead: no p for timer");
2810                 mp = mget();
2811                 if(mp == nil)
2812                         newm(nil, p);
2813                 else {
2814                         mp->nextp = p;
2815                         runtime·notewakeup(&mp->park);
2816                 }
2817                 return;
2818         }
2819
2820         g->m->throwing = -1;  // do not dump full stacks
2821         runtime·throw("all goroutines are asleep - deadlock!");
2822 }
2823
2824 static void
2825 sysmon(void)
2826 {
2827         uint32 idle, delay, nscavenge;
2828         int64 now, unixnow, lastpoll, lasttrace, lastgc;
2829         int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
2830         G *gp;
2831
2832         // If we go two minutes without a garbage collection, force one to run.
2833         forcegcperiod = 2*60*1e9;
2834         // If a heap span goes unused for 5 minutes after a garbage collection,
2835         // we hand it back to the operating system.
2836         scavengelimit = 5*60*1e9;
2837         if(runtime·debug.scavenge > 0) {
2838                 // Scavenge-a-lot for testing.
2839                 forcegcperiod = 10*1e6;
2840                 scavengelimit = 20*1e6;
2841         }
2842         lastscavenge = runtime·nanotime();
2843         nscavenge = 0;
2844         // Make wake-up period small enough for the sampling to be correct.
2845         maxsleep = forcegcperiod/2;
2846         if(scavengelimit < forcegcperiod)
2847                 maxsleep = scavengelimit/2;
2848
2849         lasttrace = 0;
2850         idle = 0;  // how many cycles in succession we had not wokeup somebody
2851         delay = 0;
2852         for(;;) {
2853                 if(idle == 0)  // start with 20us sleep...
2854                         delay = 20;
2855                 else if(idle > 50)  // start doubling the sleep after 1ms...
2856                         delay *= 2;
2857                 if(delay > 10*1000)  // up to 10ms
2858                         delay = 10*1000;
2859                 runtime·usleep(delay);
2860                 if(runtime·debug.schedtrace <= 0 &&
2861                         (runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) {  // TODO: fast atomic
2862                         runtime·lock(&runtime·sched.lock);
2863                         if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
2864                                 runtime·atomicstore(&runtime·sched.sysmonwait, 1);
2865                                 runtime·unlock(&runtime·sched.lock);
2866                                 runtime·notetsleep(&runtime·sched.sysmonnote, maxsleep);
2867                                 runtime·lock(&runtime·sched.lock);
2868                                 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
2869                                 runtime·noteclear(&runtime·sched.sysmonnote);
2870                                 idle = 0;
2871                                 delay = 20;
2872                         }
2873                         runtime·unlock(&runtime·sched.lock);
2874                 }
2875                 // poll network if not polled for more than 10ms
2876                 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
2877                 now = runtime·nanotime();
2878                 unixnow = runtime·unixnanotime();
2879                 if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
2880                         runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
2881                         gp = runtime·netpoll(false);  // non-blocking
2882                         if(gp) {
2883                                 // Need to decrement number of idle locked M's
2884                                 // (pretending that one more is running) before injectglist.
2885                                 // Otherwise it can lead to the following situation:
2886                                 // injectglist grabs all P's but before it starts M's to run the P's,
2887                                 // another M returns from syscall, finishes running its G,
2888                                 // observes that there is no work to do and no other running M's
2889                                 // and reports deadlock.
2890                                 incidlelocked(-1);
2891                                 injectglist(gp);
2892                                 incidlelocked(1);
2893                         }
2894                 }
2895                 // retake P's blocked in syscalls
2896                 // and preempt long running G's
2897                 if(retake(now))
2898                         idle = 0;
2899                 else
2900                         idle++;
2901
2902                 // check if we need to force a GC
2903                 lastgc = runtime·atomicload64(&mstats.last_gc);
2904                 if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
2905                         runtime·lock(&runtime·forcegc.lock);
2906                         runtime·forcegc.idle = 0;
2907                         runtime·forcegc.g->schedlink = nil;
2908                         injectglist(runtime·forcegc.g);
2909                         runtime·unlock(&runtime·forcegc.lock);
2910                 }
2911
2912                 // scavenge heap once in a while
2913                 if(lastscavenge + scavengelimit/2 < now) {
2914                         runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
2915                         lastscavenge = now;
2916                         nscavenge++;
2917                 }
2918
2919                 if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
2920                         lasttrace = now;
2921                         runtime·schedtrace(runtime·debug.scheddetail);
2922                 }
2923         }
2924 }
2925
2926 typedef struct Pdesc Pdesc;
2927 struct Pdesc
2928 {
2929         uint32  schedtick;
2930         int64   schedwhen;
2931         uint32  syscalltick;
2932         int64   syscallwhen;
2933 };
2934 #pragma dataflag NOPTR
2935 static Pdesc pdesc[MaxGomaxprocs];
2936
2937 static uint32
2938 retake(int64 now)
2939 {
2940         uint32 i, s, n;
2941         int64 t;
2942         P *p;
2943         Pdesc *pd;
2944
2945         n = 0;
2946         for(i = 0; i < runtime·gomaxprocs; i++) {
2947                 p = runtime·allp[i];
2948                 if(p==nil)
2949                         continue;
2950                 pd = &pdesc[i];
2951                 s = p->status;
2952                 if(s == Psyscall) {
2953                         // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
2954                         t = p->syscalltick;
2955                         if(pd->syscalltick != t) {
2956                                 pd->syscalltick = t;
2957                                 pd->syscallwhen = now;
2958                                 continue;
2959                         }
2960                         // On the one hand we don't want to retake Ps if there is no other work to do,
2961                         // but on the other hand we want to retake them eventually
2962                         // because they can prevent the sysmon thread from deep sleep.
2963                         if(p->runqhead == p->runqtail &&
2964                                 runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
2965                                 pd->syscallwhen + 10*1000*1000 > now)
2966                                 continue;
2967                         // Need to decrement number of idle locked M's
2968                         // (pretending that one more is running) before the CAS.
2969                         // Otherwise the M from which we retake can exit the syscall,
2970                         // increment nmidle and report deadlock.
2971                         incidlelocked(-1);
2972                         if(runtime·cas(&p->status, s, Pidle)) {
2973                                 n++;
2974                                 handoffp(p);
2975                         }
2976                         incidlelocked(1);
2977                 } else if(s == Prunning) {
2978                         // Preempt G if it's running for more than 10ms.
2979                         t = p->schedtick;
2980                         if(pd->schedtick != t) {
2981                                 pd->schedtick = t;
2982                                 pd->schedwhen = now;
2983                                 continue;
2984                         }
2985                         if(pd->schedwhen + 10*1000*1000 > now)
2986                                 continue;
2987                         preemptone(p);
2988                 }
2989         }
2990         return n;
2991 }
2992
2993 // Tell all goroutines that they have been preempted and they should stop.
2994 // This function is purely best-effort.  It can fail to inform a goroutine if a
2995 // processor just started running it.
2996 // No locks need to be held.
2997 // Returns true if preemption request was issued to at least one goroutine.
2998 static bool
2999 preemptall(void)
3000 {
3001         P *p;
3002         int32 i;
3003         bool res;
3004
3005         res = false;
3006         for(i = 0; i < runtime·gomaxprocs; i++) {
3007                 p = runtime·allp[i];
3008                 if(p == nil || p->status != Prunning)
3009                         continue;
3010                 res |= preemptone(p);
3011         }
3012         return res;
3013 }
3014
3015 // Tell the goroutine running on processor P to stop.
3016 // This function is purely best-effort.  It can incorrectly fail to inform the
3017 // goroutine.  It can send inform the wrong goroutine.  Even if it informs the
3018 // correct goroutine, that goroutine might ignore the request if it is
3019 // simultaneously executing runtime·newstack.
3020 // No lock needs to be held.
3021 // Returns true if preemption request was issued.
3022 // The actual preemption will happen at some point in the future
3023 // and will be indicated by the gp->status no longer being
3024 // Grunning
3025 static bool
3026 preemptone(P *p)
3027 {
3028         M *mp;
3029         G *gp;
3030
3031         mp = p->m;
3032         if(mp == nil || mp == g->m)
3033                 return false;
3034         gp = mp->curg;
3035         if(gp == nil || gp == mp->g0)
3036                 return false;
3037         gp->preempt = true;
3038         // Every call in a go routine checks for stack overflow by
3039         // comparing the current stack pointer to gp->stackguard0.
3040         // Setting gp->stackguard0 to StackPreempt folds
3041         // preemption into the normal stack overflow check.
3042         gp->stackguard0 = StackPreempt;
3043         return true;
3044 }
3045
3046 void
3047 runtime·schedtrace(bool detailed)
3048 {
3049         static int64 starttime;
3050         int64 now;
3051         int64 id1, id2, id3;
3052         int32 i, t, h;
3053         uintptr gi;
3054         int8 *fmt;
3055         M *mp, *lockedm;
3056         G *gp, *lockedg;
3057         P *p;
3058
3059         now = runtime·nanotime();
3060         if(starttime == 0)
3061                 starttime = now;
3062
3063         runtime·lock(&runtime·sched.lock);
3064         runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinningthreads=%d idlethreads=%d runqueue=%d",
3065                 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
3066                 runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.runqsize);
3067         if(detailed) {
3068                 runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmonwait=%d\n",
3069                         runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
3070                         runtime·sched.stopwait, runtime·sched.sysmonwait);
3071         }
3072         // We must be careful while reading data from P's, M's and G's.
3073         // Even if we hold schedlock, most data can be changed concurrently.
3074         // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
3075         for(i = 0; i < runtime·gomaxprocs; i++) {
3076                 p = runtime·allp[i];
3077                 if(p == nil)
3078                         continue;
3079                 mp = p->m;
3080                 h = runtime·atomicload(&p->runqhead);
3081                 t = runtime·atomicload(&p->runqtail);
3082                 if(detailed)
3083                         runtime·printf("  P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
3084                                 i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
3085                 else {
3086                         // In non-detailed mode format lengths of per-P run queues as:
3087                         // [len1 len2 len3 len4]
3088                         fmt = " %d";
3089                         if(runtime·gomaxprocs == 1)
3090                                 fmt = " [%d]\n";
3091                         else if(i == 0)
3092                                 fmt = " [%d";
3093                         else if(i == runtime·gomaxprocs-1)
3094                                 fmt = " %d]\n";
3095                         runtime·printf(fmt, t-h);
3096                 }
3097         }
3098         if(!detailed) {
3099                 runtime·unlock(&runtime·sched.lock);
3100                 return;
3101         }
3102         for(mp = runtime·allm; mp; mp = mp->alllink) {
3103                 p = mp->p;
3104                 gp = mp->curg;
3105                 lockedg = mp->lockedg;
3106                 id1 = -1;
3107                 if(p)
3108                         id1 = p->id;
3109                 id2 = -1;
3110                 if(gp)
3111                         id2 = gp->goid;
3112                 id3 = -1;
3113                 if(lockedg)
3114                         id3 = lockedg->goid;
3115                 runtime·printf("  M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
3116                         " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
3117                         mp->id, id1, id2,
3118                         mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
3119                         mp->spinning, g->m->blocked, id3);
3120         }
3121         runtime·lock(&runtime·allglock);
3122         for(gi = 0; gi < runtime·allglen; gi++) {
3123                 gp = runtime·allg[gi];
3124                 mp = gp->m;
3125                 lockedm = gp->lockedm;
3126                 runtime·printf("  G%D: status=%d(%S) m=%d lockedm=%d\n",
3127                         gp->goid, runtime·readgstatus(gp), gp->waitreason, mp ? mp->id : -1,
3128                         lockedm ? lockedm->id : -1);
3129         }
3130         runtime·unlock(&runtime·allglock);
3131         runtime·unlock(&runtime·sched.lock);
3132 }
3133
3134 // Put mp on midle list.
3135 // Sched must be locked.
3136 static void
3137 mput(M *mp)
3138 {
3139         mp->schedlink = runtime·sched.midle;
3140         runtime·sched.midle = mp;
3141         runtime·sched.nmidle++;
3142         checkdead();
3143 }
3144
3145 // Try to get an m from midle list.
3146 // Sched must be locked.
3147 static M*
3148 mget(void)
3149 {
3150         M *mp;
3151
3152         if((mp = runtime·sched.midle) != nil){
3153                 runtime·sched.midle = mp->schedlink;
3154                 runtime·sched.nmidle--;
3155         }
3156         return mp;
3157 }
3158
3159 // Put gp on the global runnable queue.
3160 // Sched must be locked.
3161 static void
3162 globrunqput(G *gp)
3163 {
3164         gp->schedlink = nil;
3165         if(runtime·sched.runqtail)
3166                 runtime·sched.runqtail->schedlink = gp;
3167         else
3168                 runtime·sched.runqhead = gp;
3169         runtime·sched.runqtail = gp;
3170         runtime·sched.runqsize++;
3171 }
3172
3173 // Put a batch of runnable goroutines on the global runnable queue.
3174 // Sched must be locked.
3175 static void
3176 globrunqputbatch(G *ghead, G *gtail, int32 n)
3177 {
3178         gtail->schedlink = nil;
3179         if(runtime·sched.runqtail)
3180                 runtime·sched.runqtail->schedlink = ghead;
3181         else
3182                 runtime·sched.runqhead = ghead;
3183         runtime·sched.runqtail = gtail;
3184         runtime·sched.runqsize += n;
3185 }
3186
3187 // Try get a batch of G's from the global runnable queue.
3188 // Sched must be locked.
3189 static G*
3190 globrunqget(P *p, int32 max)
3191 {
3192         G *gp, *gp1;
3193         int32 n;
3194
3195         if(runtime·sched.runqsize == 0)
3196                 return nil;
3197         n = runtime·sched.runqsize/runtime·gomaxprocs+1;
3198         if(n > runtime·sched.runqsize)
3199                 n = runtime·sched.runqsize;
3200         if(max > 0 && n > max)
3201                 n = max;
3202         if(n > nelem(p->runq)/2)
3203                 n = nelem(p->runq)/2;
3204         runtime·sched.runqsize -= n;
3205         if(runtime·sched.runqsize == 0)
3206                 runtime·sched.runqtail = nil;
3207         gp = runtime·sched.runqhead;
3208         runtime·sched.runqhead = gp->schedlink;
3209         n--;
3210         while(n--) {
3211                 gp1 = runtime·sched.runqhead;
3212                 runtime·sched.runqhead = gp1->schedlink;
3213                 runqput(p, gp1);
3214         }
3215         return gp;
3216 }
3217
3218 // Put p to on pidle list.
3219 // Sched must be locked.
3220 static void
3221 pidleput(P *p)
3222 {
3223         p->link = runtime·sched.pidle;
3224         runtime·sched.pidle = p;
3225         runtime·xadd(&runtime·sched.npidle, 1);  // TODO: fast atomic
3226 }
3227
3228 // Try get a p from pidle list.
3229 // Sched must be locked.
3230 static P*
3231 pidleget(void)
3232 {
3233         P *p;
3234
3235         p = runtime·sched.pidle;
3236         if(p) {
3237                 runtime·sched.pidle = p->link;
3238                 runtime·xadd(&runtime·sched.npidle, -1);  // TODO: fast atomic
3239         }
3240         return p;
3241 }
3242
3243 // Try to put g on local runnable queue.
3244 // If it's full, put onto global queue.
3245 // Executed only by the owner P.
3246 static void
3247 runqput(P *p, G *gp)
3248 {
3249         uint32 h, t;
3250
3251 retry:
3252         h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with consumers
3253         t = p->runqtail;
3254         if(t - h < nelem(p->runq)) {
3255                 p->runq[t%nelem(p->runq)] = gp;
3256                 runtime·atomicstore(&p->runqtail, t+1);  // store-release, makes the item available for consumption
3257                 return;
3258         }
3259         if(runqputslow(p, gp, h, t))
3260                 return;
3261         // the queue is not full, now the put above must suceed
3262         goto retry;
3263 }
3264
3265 // Put g and a batch of work from local runnable queue on global queue.
3266 // Executed only by the owner P.
3267 static bool
3268 runqputslow(P *p, G *gp, uint32 h, uint32 t)
3269 {
3270         G *batch[nelem(p->runq)/2+1];
3271         uint32 n, i;
3272
3273         // First, grab a batch from local queue.
3274         n = t-h;
3275         n = n/2;
3276         if(n != nelem(p->runq)/2)
3277                 runtime·throw("runqputslow: queue is not full");
3278         for(i=0; i<n; i++)
3279                 batch[i] = p->runq[(h+i)%nelem(p->runq)];
3280         if(!runtime·cas(&p->runqhead, h, h+n))  // cas-release, commits consume
3281                 return false;
3282         batch[n] = gp;
3283         // Link the goroutines.
3284         for(i=0; i<n; i++)
3285                 batch[i]->schedlink = batch[i+1];
3286         // Now put the batch on global queue.
3287         runtime·lock(&runtime·sched.lock);
3288         globrunqputbatch(batch[0], batch[n], n+1);
3289         runtime·unlock(&runtime·sched.lock);
3290         return true;
3291 }
3292
3293 // Get g from local runnable queue.
3294 // Executed only by the owner P.
3295 static G*
3296 runqget(P *p)
3297 {
3298         G *gp;
3299         uint32 t, h;
3300
3301         for(;;) {
3302                 h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with other consumers
3303                 t = p->runqtail;
3304                 if(t == h)
3305                         return nil;
3306                 gp = p->runq[h%nelem(p->runq)];
3307                 if(runtime·cas(&p->runqhead, h, h+1))  // cas-release, commits consume
3308                         return gp;
3309         }
3310 }
3311
3312 // Grabs a batch of goroutines from local runnable queue.
3313 // batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
3314 // Can be executed by any P.
3315 static uint32
3316 runqgrab(P *p, G **batch)
3317 {
3318         uint32 t, h, n, i;
3319
3320         for(;;) {
3321                 h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with other consumers
3322                 t = runtime·atomicload(&p->runqtail);  // load-acquire, synchronize with the producer
3323                 n = t-h;
3324                 n = n - n/2;
3325                 if(n == 0)
3326                         break;
3327                 if(n > nelem(p->runq)/2)  // read inconsistent h and t
3328                         continue;
3329                 for(i=0; i<n; i++)
3330                         batch[i] = p->runq[(h+i)%nelem(p->runq)];
3331                 if(runtime·cas(&p->runqhead, h, h+n))  // cas-release, commits consume
3332                         break;
3333         }
3334         return n;
3335 }
3336
3337 // Steal half of elements from local runnable queue of p2
3338 // and put onto local runnable queue of p.
3339 // Returns one of the stolen elements (or nil if failed).
3340 static G*
3341 runqsteal(P *p, P *p2)
3342 {
3343         G *gp;
3344         G *batch[nelem(p->runq)/2];
3345         uint32 t, h, n, i;
3346
3347         n = runqgrab(p2, batch);
3348         if(n == 0)
3349                 return nil;
3350         n--;
3351         gp = batch[n];
3352         if(n == 0)
3353                 return gp;
3354         h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with consumers
3355         t = p->runqtail;
3356         if(t - h + n >= nelem(p->runq))
3357                 runtime·throw("runqsteal: runq overflow");
3358         for(i=0; i<n; i++, t++)
3359                 p->runq[t%nelem(p->runq)] = batch[i];
3360         runtime·atomicstore(&p->runqtail, t);  // store-release, makes the item available for consumption
3361         return gp;
3362 }
3363
3364 void
3365 runtime·testSchedLocalQueue(void)
3366 {
3367         P *p;
3368         G *gs;
3369         int32 i, j;
3370
3371         p = (P*)runtime·mallocgc(sizeof(*p), nil, FlagNoScan);
3372         gs = (G*)runtime·mallocgc(nelem(p->runq)*sizeof(*gs), nil, FlagNoScan);
3373
3374         for(i = 0; i < nelem(p->runq); i++) {
3375                 if(runqget(p) != nil)
3376                         runtime·throw("runq is not empty initially");
3377                 for(j = 0; j < i; j++)
3378                         runqput(p, &gs[i]);
3379                 for(j = 0; j < i; j++) {
3380                         if(runqget(p) != &gs[i]) {
3381                                 runtime·printf("bad element at iter %d/%d\n", i, j);
3382                                 runtime·throw("bad element");
3383                         }
3384                 }
3385                 if(runqget(p) != nil)
3386                         runtime·throw("runq is not empty afterwards");
3387         }
3388 }
3389
3390 void
3391 runtime·testSchedLocalQueueSteal(void)
3392 {
3393         P *p1, *p2;
3394         G *gs, *gp;
3395         int32 i, j, s;
3396
3397         p1 = (P*)runtime·mallocgc(sizeof(*p1), nil, FlagNoScan);
3398         p2 = (P*)runtime·mallocgc(sizeof(*p2), nil, FlagNoScan);
3399         gs = (G*)runtime·mallocgc(nelem(p1->runq)*sizeof(*gs), nil, FlagNoScan);
3400
3401         for(i = 0; i < nelem(p1->runq); i++) {
3402                 for(j = 0; j < i; j++) {
3403                         gs[j].sig = 0;
3404                         runqput(p1, &gs[j]);
3405                 }
3406                 gp = runqsteal(p2, p1);
3407                 s = 0;
3408                 if(gp) {
3409                         s++;
3410                         gp->sig++;
3411                 }
3412                 while(gp = runqget(p2)) {
3413                         s++;
3414                         gp->sig++;
3415                 }
3416                 while(gp = runqget(p1))
3417                         gp->sig++;
3418                 for(j = 0; j < i; j++) {
3419                         if(gs[j].sig != 1) {
3420                                 runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
3421                                 runtime·throw("bad element");
3422                         }
3423                 }
3424                 if(s != i/2 && s != i/2+1) {
3425                         runtime·printf("bad steal %d, want %d or %d, iter %d\n",
3426                                 s, i/2, i/2+1, i);
3427                         runtime·throw("bad steal");
3428                 }
3429         }
3430 }
3431
3432 void
3433 runtime·setmaxthreads_m(void)
3434 {
3435         int32 in;
3436         int32 out;
3437
3438         in = g->m->scalararg[0];
3439
3440         runtime·lock(&runtime·sched.lock);
3441         out = runtime·sched.maxmcount;
3442         runtime·sched.maxmcount = in;
3443         checkmcount();
3444         runtime·unlock(&runtime·sched.lock);
3445
3446         g->m->scalararg[0] = out;
3447 }
3448
3449 static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
3450
3451 static bool
3452 haveexperiment(int8 *name)
3453 {
3454         int32 i, j;
3455         
3456         for(i=0; i<sizeof(experiment); i++) {
3457                 if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
3458                         for(j=0; name[j]; j++)
3459                                 if(experiment[i+j] != name[j])
3460                                         goto nomatch;
3461                         if(experiment[i+j] != '\0' && experiment[i+j] != ',')
3462                                 goto nomatch;
3463                         return 1;
3464                 }
3465         nomatch:;
3466         }
3467         return 0;
3468 }
3469
3470 #pragma textflag NOSPLIT
3471 void
3472 sync·runtime_procPin(intptr p)
3473 {
3474         M *mp;
3475
3476         mp = g->m;
3477         // Disable preemption.
3478         mp->locks++;
3479         p = mp->p->id;
3480         FLUSH(&p);
3481 }
3482
3483 #pragma textflag NOSPLIT
3484 void
3485 sync·runtime_procUnpin()
3486 {
3487         g->m->locks--;
3488 }