1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
14 Stack layout parameters.
15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
17 The per-goroutine g->stackguard is set to point StackGuard bytes
18 above the bottom of the stack. Each function compares its stack
19 pointer against g->stackguard to check for overflow. To cut one
20 instruction from the check sequence for functions with tiny frames,
21 the stack is allowed to protrude StackSmall bytes below the stack
22 guard. Functions with large frames don't bother with the check and
23 always call morestack. The sequences are (for amd64, others are
27 frame = function's stack frame size
28 argsize = size of function arguments (call + return)
30 stack frame size <= StackSmall:
33 MOVQ m->morearg, $(argsize << 32)
36 stack frame size > StackSmall but < StackBig
37 LEAQ (frame-StackSmall)(SP), R0
40 MOVQ m->morearg, $(argsize << 32)
43 stack frame size >= StackBig:
44 MOVQ m->morearg, $((argsize << 32) | frame)
47 The bottom StackGuard - StackSmall bytes are important: there has
48 to be enough room to execute functions that refuse to check for
49 stack overflow, either because they need to be adjacent to the
50 actual caller's frame (deferproc) or because they handle the imminent
51 stack overflow (morestack).
53 For example, deferproc might call malloc, which does one of the
54 above checks (without allocating a full frame), which might trigger
55 a call to morestack. This sequence needs to fit in the bottom
56 section of the stack. On amd64, morestack's frame is 40 bytes, and
57 deferproc's frame is 56 bytes. That fits well within the
58 StackGuard - StackSmall bytes at the bottom.
59 The linkers explore all possible call traces involving non-splitting
60 functions to make sure that this limit cannot be violated.
64 // StackSystem is a number of additional bytes to add
65 // to each stack below the usual guard area for OS-specific
66 // purposes like signal handling. Used on Windows, Plan 9,
67 // and Darwin/ARM because they do not use a separate stack.
68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
70 // The minimum size of stack used by Go code
73 // The minimum stack size to allocate.
74 // The hackery here rounds FixedStack0 up to a power of 2.
75 _FixedStack0 = _StackMin + _StackSystem
76 _FixedStack1 = _FixedStack0 - 1
77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
82 _FixedStack = _FixedStack6 + 1
84 // Functions that need frames bigger than this use an extra
85 // instruction to do the stack split check, to avoid overflow
86 // in case SP - framesize wraps below zero.
87 // This value can be no bigger than the size of the unmapped
91 // The stack guard is a pointer this many bytes above the
92 // bottom of the stack.
93 _StackGuard = 720*sys.StackGuardMultiplier + _StackSystem
95 // After a stack split check the SP is allowed to be this
96 // many bytes below the stack guard. This saves an instruction
97 // in the checking sequence for tiny frames.
100 // The maximum number of bytes that a chain of NOSPLIT
101 // functions can use.
102 _StackLimit = _StackGuard - _StackSystem - _StackSmall
105 // Goroutine preemption request.
106 // Stored into g->stackguard0 to cause split stack check failure.
107 // Must be greater than any real sp.
108 // 0xfffffade in hex.
110 _StackPreempt = uintptrMask & -1314
111 _StackFork = uintptrMask & -1234
115 // stackDebug == 0: no logging
116 // == 1: logging of per-stack operations
117 // == 2: logging of per-frame operations
118 // == 3: logging of per-word updates
119 // == 4: logging of per-word reads
121 stackFromSystem = 0 // allocate stacks from system memory instead of the heap
122 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
123 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
129 uintptrMask = 1<<(8*sys.PtrSize) - 1
130 poisonStack = uintptrMask & 0x6868686868686868
132 // Goroutine preemption request.
133 // Stored into g->stackguard0 to cause split stack check failure.
134 // Must be greater than any real sp.
135 // 0xfffffade in hex.
136 stackPreempt = uintptrMask & -1314
138 // Thread is forking.
139 // Stored into g->stackguard0 to cause split stack check failure.
140 // Must be greater than any real sp.
141 stackFork = uintptrMask & -1234
144 // Global pool of spans that have free stacks.
145 // Stacks are assigned an order according to size.
146 // order = log_2(size/FixedStack)
147 // There is a free list for each order.
148 // TODO: one lock per order?
149 var stackpool [_NumStackOrders]mSpanList
150 var stackpoolmu mutex
152 // List of stack spans to be freed at the end of GC. Protected by
154 var stackFreeQueue mSpanList
156 // Cached value of haveexperiment("framepointer")
157 var framepointer_enabled bool
160 if _StackCacheSize&_PageMask != 0 {
161 throw("cache size must be a multiple of page size")
163 for i := range stackpool {
166 stackFreeQueue.init()
169 // Allocates a stack from the free pool. Must be called with
171 func stackpoolalloc(order uint8) gclinkptr {
172 list := &stackpool[order]
175 // no free stacks. Allocate another span worth.
176 s = mheap_.allocStack(_StackCacheSize >> _PageShift)
178 throw("out of memory")
183 if s.freelist.ptr() != nil {
184 throw("bad freelist")
186 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
187 x := gclinkptr(uintptr(s.start)<<_PageShift + i)
188 x.ptr().next = s.freelist
195 throw("span has no free stacks")
197 s.freelist = x.ptr().next
199 if s.freelist.ptr() == nil {
200 // all stacks in s are allocated.
206 // Adds stack x to the free pool. Must be called with stackpoolmu held.
207 func stackpoolfree(x gclinkptr, order uint8) {
208 s := mheap_.lookup(unsafe.Pointer(x))
209 if s.state != _MSpanStack {
210 throw("freeing stack not in a stack span")
212 if s.freelist.ptr() == nil {
213 // s will now have a free stack
214 stackpool[order].insert(s)
216 x.ptr().next = s.freelist
219 if gcphase == _GCoff && s.ref == 0 {
220 // Span is completely free. Return it to the heap
221 // immediately if we're sweeping.
223 // If GC is active, we delay the free until the end of
224 // GC to avoid the following type of situation:
226 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
227 // 2) The stack that pointer points to is copied
228 // 3) The old stack is freed
229 // 4) The containing span is marked free
230 // 5) GC attempts to mark the SudoG.elem pointer. The
231 // marking fails because the pointer looks like a
232 // pointer into a free span.
234 // By not freeing, we prevent step #4 until GC is done.
235 stackpool[order].remove(s)
241 // stackcacherefill/stackcacherelease implement a global pool of stack segments.
242 // The pool is required to prevent unlimited growth of per-thread caches.
243 func stackcacherefill(c *mcache, order uint8) {
245 print("stackcacherefill order=", order, "\n")
248 // Grab some stacks from the global cache.
249 // Grab half of the allowed capacity (to prevent thrashing).
253 for size < _StackCacheSize/2 {
254 x := stackpoolalloc(order)
257 size += _FixedStack << order
260 c.stackcache[order].list = list
261 c.stackcache[order].size = size
264 func stackcacherelease(c *mcache, order uint8) {
266 print("stackcacherelease order=", order, "\n")
268 x := c.stackcache[order].list
269 size := c.stackcache[order].size
271 for size > _StackCacheSize/2 {
273 stackpoolfree(x, order)
275 size -= _FixedStack << order
278 c.stackcache[order].list = x
279 c.stackcache[order].size = size
282 func stackcache_clear(c *mcache) {
284 print("stackcache clear\n")
287 for order := uint8(0); order < _NumStackOrders; order++ {
288 x := c.stackcache[order].list
291 stackpoolfree(x, order)
294 c.stackcache[order].list = 0
295 c.stackcache[order].size = 0
300 func stackalloc(n uint32) (stack, []stkbar) {
301 // Stackalloc must be called on scheduler stack, so that we
302 // never try to grow the stack during the code that stackalloc runs.
303 // Doing so would cause a deadlock (issue 1547).
305 if thisg != thisg.m.g0 {
306 throw("stackalloc not on scheduler stack")
309 throw("stack size not a power of 2")
312 print("stackalloc ", n, "\n")
315 // Compute the size of stack barrier array.
316 maxstkbar := gcMaxStackBarriers(int(n))
317 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
319 if debug.efence != 0 || stackFromSystem != 0 {
320 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
322 throw("out of memory (stackalloc)")
324 top := uintptr(n) - nstkbar
325 stkbarSlice := slice{add(v, top), 0, maxstkbar}
326 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
329 // Small stacks are allocated with a fixed-size free-list allocator.
330 // If we need a stack of a bigger size, we fall back on allocating
333 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
336 for n2 > _FixedStack {
342 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
343 // c == nil can happen in the guts of exitsyscall or
344 // procresize. Just get a stack from the global pool.
345 // Also don't touch stackcache during gc
346 // as it's flushed concurrently.
348 x = stackpoolalloc(order)
351 x = c.stackcache[order].list
353 stackcacherefill(c, order)
354 x = c.stackcache[order].list
356 c.stackcache[order].list = x.ptr().next
357 c.stackcache[order].size -= uintptr(n)
359 v = unsafe.Pointer(x)
361 s := mheap_.allocStack(round(uintptr(n), _PageSize) >> _PageShift)
363 throw("out of memory")
365 v = unsafe.Pointer(s.start << _PageShift)
369 racemalloc(v, uintptr(n))
372 msanmalloc(v, uintptr(n))
375 print(" allocated ", v, "\n")
377 top := uintptr(n) - nstkbar
378 stkbarSlice := slice{add(v, top), 0, maxstkbar}
379 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
382 func stackfree(stk stack, n uintptr) {
384 v := unsafe.Pointer(stk.lo)
386 throw("stack not a power of 2")
388 if stk.lo+n < stk.hi {
389 throw("bad stack size")
392 println("stackfree", v, n)
393 memclr(v, n) // for testing, clobber stack data
395 if debug.efence != 0 || stackFromSystem != 0 {
396 if debug.efence != 0 || stackFaultOnFree != 0 {
399 sysFree(v, n, &memstats.stacks_sys)
406 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
409 for n2 > _FixedStack {
415 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
417 stackpoolfree(x, order)
420 if c.stackcache[order].size >= _StackCacheSize {
421 stackcacherelease(c, order)
423 x.ptr().next = c.stackcache[order].list
424 c.stackcache[order].list = x
425 c.stackcache[order].size += n
428 s := mheap_.lookup(v)
429 if s.state != _MSpanStack {
430 println(hex(s.start<<_PageShift), v)
431 throw("bad span state")
433 if gcphase == _GCoff {
434 // Free the stack immediately if we're
438 // Otherwise, add it to a list of stack spans
439 // to be freed at the end of GC.
441 // TODO(austin): Make it possible to re-use
442 // these spans as stacks, like we do for small
443 // stack spans. (See issue #11466.)
445 stackFreeQueue.insert(s)
451 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
453 var ptrnames = []string{
458 // Stack frame layout
461 // +------------------+
462 // | args from caller |
463 // +------------------+ <- frame->argp
464 // | return address |
465 // +------------------+
466 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp
467 // +------------------+ <- frame->varp
469 // +------------------+
470 // | args to callee |
471 // +------------------+ <- frame->sp
474 // +------------------+
475 // | args from caller |
476 // +------------------+ <- frame->argp
477 // | caller's retaddr |
478 // +------------------+ <- frame->varp
480 // +------------------+
481 // | args to callee |
482 // +------------------+
483 // | return address |
484 // +------------------+ <- frame->sp
486 type adjustinfo struct {
488 delta uintptr // ptr distance from old to new stack (newbase - oldbase)
492 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
493 // If so, it rewrites *vpp to point into the new stack.
494 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
495 pp := (*unsafe.Pointer)(vpp)
498 print(" ", pp, ":", p, "\n")
500 if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
501 *pp = add(p, adjinfo.delta)
503 print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
508 // Information from the compiler about the layout of stack frames.
509 type bitvector struct {
514 type gobitvector struct {
519 func gobv(bv bitvector) gobitvector {
522 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
526 func ptrbit(bv *gobitvector, i uintptr) uint8 {
527 return (bv.bytedata[i/8] >> (i % 8)) & 1
530 // bv describes the memory starting at address scanp.
531 // Adjust any pointers contained therein.
532 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
534 minp := adjinfo.old.lo
535 maxp := adjinfo.old.hi
536 delta := adjinfo.delta
538 for i := uintptr(0); i < num; i++ {
540 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
542 if ptrbit(&bv, i) == 1 {
543 pp := (*uintptr)(add(scanp, i*sys.PtrSize))
545 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
546 // Looks like a junk value in a pointer slot.
547 // Live analysis wrong?
548 getg().m.traceback = 2
549 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
550 throw("invalid stack pointer")
552 if minp <= p && p < maxp {
554 print("adjust ptr ", p, " ", funcname(f), "\n")
562 // Note: the argument/return area is adjusted by the callee.
563 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
564 adjinfo := (*adjustinfo)(arg)
565 targetpc := frame.continpc
572 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
574 if f.entry == systemstack_switchPC {
575 // A special routine at the bottom of stack of a goroutine that does an systemstack call.
576 // We will allow it to be copied even though we don't
577 // have full GC info for it (because it is written in asm).
580 if targetpc != f.entry {
583 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
585 pcdata = 0 // in prologue
588 // Adjust local variables if stack frame has been allocated.
589 size := frame.varp - frame.sp
593 minsize = sys.SpAlign
595 minsize = sys.MinFrameSize
599 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
600 if stackmap == nil || stackmap.n <= 0 {
601 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
602 throw("missing stackmap")
604 // Locals bitmap information, scan just the pointers in locals.
605 if pcdata < 0 || pcdata >= stackmap.n {
606 // don't know where we are
607 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
608 throw("bad symbol table")
610 bv = stackmapdata(stackmap, pcdata)
611 size = uintptr(bv.n) * sys.PtrSize
613 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
615 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
618 // Adjust saved base pointer if there is one.
619 if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize {
620 if !framepointer_enabled {
621 print("runtime: found space for saved base pointer, but no framepointer experiment\n")
622 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
623 throw("bad frame layout")
628 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
632 if frame.arglen > 0 {
634 if frame.argmap != nil {
637 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
638 if stackmap == nil || stackmap.n <= 0 {
639 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
640 throw("missing stackmap")
642 if pcdata < 0 || pcdata >= stackmap.n {
643 // don't know where we are
644 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
645 throw("bad symbol table")
647 bv = stackmapdata(stackmap, pcdata)
652 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
657 func adjustctxt(gp *g, adjinfo *adjustinfo) {
658 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
661 func adjustdefers(gp *g, adjinfo *adjustinfo) {
662 // Adjust defer argument blocks the same way we adjust active stack frames.
663 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
665 // Adjust pointers in the Defer structs.
666 // Defer structs themselves are never on the stack.
667 for d := gp._defer; d != nil; d = d.link {
668 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
669 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
670 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
674 func adjustpanics(gp *g, adjinfo *adjustinfo) {
675 // Panics are on stack and already adjusted.
676 // Update pointer to head of list in G.
677 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
680 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
681 // the data elements pointed to by a SudoG structure
682 // might be in the stack.
683 for s := gp.waiting; s != nil; s = s.waitlink {
684 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
685 adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
689 func adjuststkbar(gp *g, adjinfo *adjustinfo) {
690 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
691 adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
695 func fillstack(stk stack, b byte) {
696 for p := stk.lo; p < stk.hi; p++ {
697 *(*byte)(unsafe.Pointer(p)) = b
701 // Copies gp's stack to a new stack of a different size.
702 // Caller must have changed gp status to Gcopystack.
703 func copystack(gp *g, newsize uintptr) {
704 if gp.syscallsp != 0 {
705 throw("stack growth not allowed in system call")
709 throw("nil stackbase")
711 used := old.hi - gp.sched.sp
713 // allocate new stack
714 new, newstkbar := stackalloc(uint32(newsize))
715 if stackPoisonCopy != 0 {
719 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
722 // adjust pointers in the to-be-copied frames
723 var adjinfo adjustinfo
725 adjinfo.delta = new.hi - old.hi
726 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
728 // adjust other miscellaneous things that have pointers into stacks.
729 adjustctxt(gp, &adjinfo)
730 adjustdefers(gp, &adjinfo)
731 adjustpanics(gp, &adjinfo)
732 adjustsudogs(gp, &adjinfo)
733 adjuststkbar(gp, &adjinfo)
735 // copy the stack to the new location
736 if stackPoisonCopy != 0 {
739 memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
741 // copy old stack barriers to new stack barrier array
742 newstkbar = newstkbar[:len(gp.stkbar)]
743 copy(newstkbar, gp.stkbar)
745 // Swap out old stack for new one
747 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
748 gp.sched.sp = new.hi - used
749 oldsize := gp.stackAlloc
750 gp.stackAlloc = newsize
751 gp.stkbar = newstkbar
752 gp.stktopsp += adjinfo.delta
755 if stackPoisonCopy != 0 {
758 stackfree(old, oldsize)
761 // round x up to a power of 2.
762 func round2(x int32) int32 {
770 // Called from runtime·morestack when more stack is needed.
771 // Allocate larger stack and relocate to new stack.
772 // Stack growth is multiplicative, for constant amortized cost.
774 // g->atomicstatus will be Grunning or Gscanrunning upon entry.
775 // If the GC is trying to stop this g then it will set preemptscan to true.
778 // TODO: double check all gp. shouldn't be getg().
779 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
780 throw("stack growth after fork")
782 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
783 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
784 morebuf := thisg.m.morebuf
785 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
786 throw("runtime: wrong goroutine in newstack")
788 if thisg.m.curg.throwsplit {
790 // Update syscallsp, syscallpc in case traceback uses them.
791 morebuf := thisg.m.morebuf
792 gp.syscallsp = morebuf.sp
793 gp.syscallpc = morebuf.pc
794 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
795 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
796 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
798 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
799 throw("runtime: stack split at bad time")
803 morebuf := thisg.m.morebuf
804 thisg.m.morebuf.pc = 0
805 thisg.m.morebuf.lr = 0
806 thisg.m.morebuf.sp = 0
807 thisg.m.morebuf.g = 0
808 rewindmorestack(&gp.sched)
810 // NOTE: stackguard0 may change underfoot, if another thread
811 // is about to try to preempt gp. Read it just once and use that same
812 // value now and below.
813 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
815 // Be conservative about where we preempt.
816 // We are interested in preempting user Go code, not runtime code.
817 // If we're holding locks, mallocing, or preemption is disabled, don't
819 // This check is very early in newstack so that even the status change
820 // from Grunning to Gwaiting and back doesn't happen in this case.
821 // That status change by itself can be viewed as a small preemption,
822 // because the GC might change Gwaiting to Gscanwaiting, and then
823 // this goroutine has to wait for the GC to finish before continuing.
824 // If the GC is in some way dependent on this goroutine (for example,
825 // it needs a lock held by the goroutine), that small preemption turns
826 // into a real deadlock.
828 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
829 // Let the goroutine keep running for now.
830 // gp->preempt is set, so it will be preempted next time.
831 gp.stackguard0 = gp.stack.lo + _StackGuard
832 gogo(&gp.sched) // never return
836 // The goroutine must be executing in order to call newstack,
837 // so it must be Grunning (or Gscanrunning).
838 casgstatus(gp, _Grunning, _Gwaiting)
839 gp.waitreason = "stack growth"
841 if gp.stack.lo == 0 {
842 throw("missing stack in newstack")
845 if sys.TheChar == '6' || sys.TheChar == '8' {
846 // The call to morestack cost a word.
849 if stackDebug >= 1 || sp < gp.stack.lo {
850 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
851 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
852 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
854 if sp < gp.stack.lo {
855 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
856 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
857 throw("runtime: split stack overflow")
860 if gp.sched.ctxt != nil {
861 // morestack wrote sched.ctxt on its way in here,
862 // without a write barrier. Run the write barrier now.
863 // It is not possible to be preempted between then
864 // and now, so it's okay.
865 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
869 if gp == thisg.m.g0 {
870 throw("runtime: preempt g0")
872 if thisg.m.p == 0 && thisg.m.locks == 0 {
873 throw("runtime: g is running but p is not")
876 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
877 // Likely to be racing with the GC as
878 // it sees a _Gwaiting and does the
879 // stack scan. If so, gcworkdone will
880 // be set and gcphasework will simply
887 gp.preemptscan = false
889 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
890 casgstatus(gp, _Gwaiting, _Grunning)
891 gp.stackguard0 = gp.stack.lo + _StackGuard
892 gogo(&gp.sched) // never return
895 // Act like goroutine called runtime.Gosched.
896 casgstatus(gp, _Gwaiting, _Grunning)
897 gopreempt_m(gp) // never return
900 // Allocate a bigger segment and move the stack.
901 oldsize := int(gp.stackAlloc)
902 newsize := oldsize * 2
903 if uintptr(newsize) > maxstacksize {
904 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
905 throw("stack overflow")
908 casgstatus(gp, _Gwaiting, _Gcopystack)
910 // The concurrent GC will not scan the stack while we are doing the copy since
911 // the gp is in a Gcopystack status.
912 copystack(gp, uintptr(newsize))
914 print("stack grow done\n")
916 casgstatus(gp, _Gcopystack, _Grunning)
925 // adjust Gobuf as if it executed a call to fn
926 // and then did an immediate gosave.
927 func gostartcallfn(gobuf *gobuf, fv *funcval) {
928 var fn unsafe.Pointer
930 fn = unsafe.Pointer(fv.fn)
932 fn = unsafe.Pointer(funcPC(nilfunc))
934 gostartcall(gobuf, fn, unsafe.Pointer(fv))
937 // Maybe shrink the stack being used by gp.
938 // Called at garbage collection time.
939 func shrinkstack(gp *g) {
940 if readgstatus(gp) == _Gdead {
941 if gp.stack.lo != 0 {
942 // Free whole stack - it will get reallocated
943 // if G is used again.
944 stackfree(gp.stack, gp.stackAlloc)
952 if gp.stack.lo == 0 {
953 throw("missing stack in shrinkstack")
956 if debug.gcshrinkstackoff > 0 {
960 oldsize := gp.stackAlloc
961 newsize := oldsize / 2
962 // Don't shrink the allocation below the minimum-sized stack
964 if newsize < _FixedStack {
967 // Compute how much of the stack is currently in use and only
968 // shrink the stack if gp is using less than a quarter of its
969 // current stack. The currently used stack includes everything
970 // down to the SP plus the stack guard space that ensures
971 // there's room for nosplit functions.
972 avail := gp.stack.hi - gp.stack.lo
973 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
977 // We can't copy the stack if we're in a syscall.
978 // The syscall might have pointers into the stack.
979 if gp.syscallsp != 0 {
982 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
987 print("shrinking stack ", oldsize, "->", newsize, "\n")
990 oldstatus := casgcopystack(gp)
991 copystack(gp, newsize)
992 casgstatus(gp, _Gcopystack, oldstatus)
995 // freeStackSpans frees unused stack spans at the end of GC.
996 func freeStackSpans() {
999 // Scan stack pools for empty stack spans.
1000 for order := range stackpool {
1001 list := &stackpool[order]
1002 for s := list.first; s != nil; {
1013 // Free queued stack spans.
1014 for !stackFreeQueue.isEmpty() {
1015 s := stackFreeQueue.first
1016 stackFreeQueue.remove(s)
1020 unlock(&stackpoolmu)
1025 systemstack(func() {
1026 throw("attempt to execute C code on Go stack")