1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
14 Stack layout parameters.
15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
17 The per-goroutine g->stackguard is set to point StackGuard bytes
18 above the bottom of the stack. Each function compares its stack
19 pointer against g->stackguard to check for overflow. To cut one
20 instruction from the check sequence for functions with tiny frames,
21 the stack is allowed to protrude StackSmall bytes below the stack
22 guard. Functions with large frames don't bother with the check and
23 always call morestack. The sequences are (for amd64, others are
27 frame = function's stack frame size
28 argsize = size of function arguments (call + return)
30 stack frame size <= StackSmall:
33 MOVQ m->morearg, $(argsize << 32)
36 stack frame size > StackSmall but < StackBig
37 LEAQ (frame-StackSmall)(SP), R0
40 MOVQ m->morearg, $(argsize << 32)
43 stack frame size >= StackBig:
44 MOVQ m->morearg, $((argsize << 32) | frame)
47 The bottom StackGuard - StackSmall bytes are important: there has
48 to be enough room to execute functions that refuse to check for
49 stack overflow, either because they need to be adjacent to the
50 actual caller's frame (deferproc) or because they handle the imminent
51 stack overflow (morestack).
53 For example, deferproc might call malloc, which does one of the
54 above checks (without allocating a full frame), which might trigger
55 a call to morestack. This sequence needs to fit in the bottom
56 section of the stack. On amd64, morestack's frame is 40 bytes, and
57 deferproc's frame is 56 bytes. That fits well within the
58 StackGuard - StackSmall bytes at the bottom.
59 The linkers explore all possible call traces involving non-splitting
60 functions to make sure that this limit cannot be violated.
64 // StackSystem is a number of additional bytes to add
65 // to each stack below the usual guard area for OS-specific
66 // purposes like signal handling. Used on Windows, Plan 9,
67 // and Darwin/ARM because they do not use a separate stack.
68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
70 // The minimum size of stack used by Go code
73 // The minimum stack size to allocate.
74 // The hackery here rounds FixedStack0 up to a power of 2.
75 _FixedStack0 = _StackMin + _StackSystem
76 _FixedStack1 = _FixedStack0 - 1
77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
82 _FixedStack = _FixedStack6 + 1
84 // Functions that need frames bigger than this use an extra
85 // instruction to do the stack split check, to avoid overflow
86 // in case SP - framesize wraps below zero.
87 // This value can be no bigger than the size of the unmapped
91 // The stack guard is a pointer this many bytes above the
92 // bottom of the stack.
93 _StackGuard = 720*sys.StackGuardMultiplier + _StackSystem
95 // After a stack split check the SP is allowed to be this
96 // many bytes below the stack guard. This saves an instruction
97 // in the checking sequence for tiny frames.
100 // The maximum number of bytes that a chain of NOSPLIT
101 // functions can use.
102 _StackLimit = _StackGuard - _StackSystem - _StackSmall
105 // Goroutine preemption request.
106 // Stored into g->stackguard0 to cause split stack check failure.
107 // Must be greater than any real sp.
108 // 0xfffffade in hex.
110 _StackPreempt = uintptrMask & -1314
111 _StackFork = uintptrMask & -1234
115 // stackDebug == 0: no logging
116 // == 1: logging of per-stack operations
117 // == 2: logging of per-frame operations
118 // == 3: logging of per-word updates
119 // == 4: logging of per-word reads
121 stackFromSystem = 0 // allocate stacks from system memory instead of the heap
122 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
123 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
129 uintptrMask = 1<<(8*sys.PtrSize) - 1
130 poisonStack = uintptrMask & 0x6868686868686868
132 // Goroutine preemption request.
133 // Stored into g->stackguard0 to cause split stack check failure.
134 // Must be greater than any real sp.
135 // 0xfffffade in hex.
136 stackPreempt = uintptrMask & -1314
138 // Thread is forking.
139 // Stored into g->stackguard0 to cause split stack check failure.
140 // Must be greater than any real sp.
141 stackFork = uintptrMask & -1234
144 // Global pool of spans that have free stacks.
145 // Stacks are assigned an order according to size.
146 // order = log_2(size/FixedStack)
147 // There is a free list for each order.
148 // TODO: one lock per order?
149 var stackpool [_NumStackOrders]mSpanList
150 var stackpoolmu mutex
152 // Global pool of large stack spans.
153 var stackLarge struct {
155 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
158 // Cached value of haveexperiment("framepointer")
159 var framepointer_enabled bool
162 if _StackCacheSize&_PageMask != 0 {
163 throw("cache size must be a multiple of page size")
165 for i := range stackpool {
168 for i := range stackLarge.free {
169 stackLarge.free[i].init()
173 // stacklog2 returns ⌊log_2(n)⌋.
174 func stacklog2(n uintptr) int {
183 // Allocates a stack from the free pool. Must be called with
185 func stackpoolalloc(order uint8) gclinkptr {
186 list := &stackpool[order]
189 // no free stacks. Allocate another span worth.
190 s = mheap_.allocStack(_StackCacheSize >> _PageShift)
192 throw("out of memory")
194 if s.allocCount != 0 {
195 throw("bad allocCount")
197 if s.stackfreelist.ptr() != nil {
198 throw("bad stackfreelist")
200 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
201 x := gclinkptr(uintptr(s.start)<<_PageShift + i)
202 x.ptr().next = s.stackfreelist
209 throw("span has no free stacks")
211 s.stackfreelist = x.ptr().next
213 if s.stackfreelist.ptr() == nil {
214 // all stacks in s are allocated.
220 // Adds stack x to the free pool. Must be called with stackpoolmu held.
221 func stackpoolfree(x gclinkptr, order uint8) {
222 s := mheap_.lookup(unsafe.Pointer(x))
223 if s.state != _MSpanStack {
224 throw("freeing stack not in a stack span")
226 if s.stackfreelist.ptr() == nil {
227 // s will now have a free stack
228 stackpool[order].insert(s)
230 x.ptr().next = s.stackfreelist
233 if gcphase == _GCoff && s.allocCount == 0 {
234 // Span is completely free. Return it to the heap
235 // immediately if we're sweeping.
237 // If GC is active, we delay the free until the end of
238 // GC to avoid the following type of situation:
240 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
241 // 2) The stack that pointer points to is copied
242 // 3) The old stack is freed
243 // 4) The containing span is marked free
244 // 5) GC attempts to mark the SudoG.elem pointer. The
245 // marking fails because the pointer looks like a
246 // pointer into a free span.
248 // By not freeing, we prevent step #4 until GC is done.
249 stackpool[order].remove(s)
255 // stackcacherefill/stackcacherelease implement a global pool of stack segments.
256 // The pool is required to prevent unlimited growth of per-thread caches.
257 func stackcacherefill(c *mcache, order uint8) {
259 print("stackcacherefill order=", order, "\n")
262 // Grab some stacks from the global cache.
263 // Grab half of the allowed capacity (to prevent thrashing).
267 for size < _StackCacheSize/2 {
268 x := stackpoolalloc(order)
271 size += _FixedStack << order
274 c.stackcache[order].list = list
275 c.stackcache[order].size = size
278 func stackcacherelease(c *mcache, order uint8) {
280 print("stackcacherelease order=", order, "\n")
282 x := c.stackcache[order].list
283 size := c.stackcache[order].size
285 for size > _StackCacheSize/2 {
287 stackpoolfree(x, order)
289 size -= _FixedStack << order
292 c.stackcache[order].list = x
293 c.stackcache[order].size = size
296 func stackcache_clear(c *mcache) {
298 print("stackcache clear\n")
301 for order := uint8(0); order < _NumStackOrders; order++ {
302 x := c.stackcache[order].list
305 stackpoolfree(x, order)
308 c.stackcache[order].list = 0
309 c.stackcache[order].size = 0
314 func stackalloc(n uint32) (stack, []stkbar) {
315 // Stackalloc must be called on scheduler stack, so that we
316 // never try to grow the stack during the code that stackalloc runs.
317 // Doing so would cause a deadlock (issue 1547).
319 if thisg != thisg.m.g0 {
320 throw("stackalloc not on scheduler stack")
323 throw("stack size not a power of 2")
326 print("stackalloc ", n, "\n")
329 // Compute the size of stack barrier array.
330 maxstkbar := gcMaxStackBarriers(int(n))
331 nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
333 if debug.efence != 0 || stackFromSystem != 0 {
334 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
336 throw("out of memory (stackalloc)")
338 top := uintptr(n) - nstkbar
339 stkbarSlice := slice{add(v, top), 0, maxstkbar}
340 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
343 // Small stacks are allocated with a fixed-size free-list allocator.
344 // If we need a stack of a bigger size, we fall back on allocating
347 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
350 for n2 > _FixedStack {
356 if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
357 // c == nil can happen in the guts of exitsyscall or
358 // procresize. Just get a stack from the global pool.
359 // Also don't touch stackcache during gc
360 // as it's flushed concurrently.
362 x = stackpoolalloc(order)
365 x = c.stackcache[order].list
367 stackcacherefill(c, order)
368 x = c.stackcache[order].list
370 c.stackcache[order].list = x.ptr().next
371 c.stackcache[order].size -= uintptr(n)
373 v = unsafe.Pointer(x)
376 npage := uintptr(n) >> _PageShift
377 log2npage := stacklog2(npage)
379 // Try to get a stack from the large stack cache.
380 lock(&stackLarge.lock)
381 if !stackLarge.free[log2npage].isEmpty() {
382 s = stackLarge.free[log2npage].first
383 stackLarge.free[log2npage].remove(s)
385 unlock(&stackLarge.lock)
388 // Allocate a new stack from the heap.
389 s = mheap_.allocStack(npage)
391 throw("out of memory")
394 v = unsafe.Pointer(s.start << _PageShift)
398 racemalloc(v, uintptr(n))
401 msanmalloc(v, uintptr(n))
404 print(" allocated ", v, "\n")
406 top := uintptr(n) - nstkbar
407 stkbarSlice := slice{add(v, top), 0, maxstkbar}
408 return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
411 func stackfree(stk stack, n uintptr) {
413 v := unsafe.Pointer(stk.lo)
415 throw("stack not a power of 2")
417 if stk.lo+n < stk.hi {
418 throw("bad stack size")
421 println("stackfree", v, n)
422 memclr(v, n) // for testing, clobber stack data
424 if debug.efence != 0 || stackFromSystem != 0 {
425 if debug.efence != 0 || stackFaultOnFree != 0 {
428 sysFree(v, n, &memstats.stacks_sys)
435 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
438 for n2 > _FixedStack {
444 if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
446 stackpoolfree(x, order)
449 if c.stackcache[order].size >= _StackCacheSize {
450 stackcacherelease(c, order)
452 x.ptr().next = c.stackcache[order].list
453 c.stackcache[order].list = x
454 c.stackcache[order].size += n
457 s := mheap_.lookup(v)
458 if s.state != _MSpanStack {
459 println(hex(s.start<<_PageShift), v)
460 throw("bad span state")
462 if gcphase == _GCoff {
463 // Free the stack immediately if we're
467 // If the GC is running, we can't return a
468 // stack span to the heap because it could be
469 // reused as a heap span, and this state
470 // change would race with GC. Add it to the
471 // large stack cache instead.
472 log2npage := stacklog2(s.npages)
473 lock(&stackLarge.lock)
474 stackLarge.free[log2npage].insert(s)
475 unlock(&stackLarge.lock)
480 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
482 var ptrnames = []string{
487 // Stack frame layout
490 // +------------------+
491 // | args from caller |
492 // +------------------+ <- frame->argp
493 // | return address |
494 // +------------------+
495 // | caller's BP (*) | (*) if framepointer_enabled && varp < sp
496 // +------------------+ <- frame->varp
498 // +------------------+
499 // | args to callee |
500 // +------------------+ <- frame->sp
503 // +------------------+
504 // | args from caller |
505 // +------------------+ <- frame->argp
506 // | caller's retaddr |
507 // +------------------+ <- frame->varp
509 // +------------------+
510 // | args to callee |
511 // +------------------+
512 // | return address |
513 // +------------------+ <- frame->sp
515 type adjustinfo struct {
517 delta uintptr // ptr distance from old to new stack (newbase - oldbase)
520 // sghi is the highest sudog.elem on the stack.
524 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
525 // If so, it rewrites *vpp to point into the new stack.
526 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
527 pp := (*uintptr)(vpp)
530 print(" ", pp, ":", hex(p), "\n")
532 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
533 *pp = p + adjinfo.delta
535 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
540 // Information from the compiler about the layout of stack frames.
541 type bitvector struct {
546 type gobitvector struct {
551 func gobv(bv bitvector) gobitvector {
554 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
558 func ptrbit(bv *gobitvector, i uintptr) uint8 {
559 return (bv.bytedata[i/8] >> (i % 8)) & 1
562 // bv describes the memory starting at address scanp.
563 // Adjust any pointers contained therein.
564 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
566 minp := adjinfo.old.lo
567 maxp := adjinfo.old.hi
568 delta := adjinfo.delta
570 // If this frame might contain channel receive slots, use CAS
571 // to adjust pointers. If the slot hasn't been received into
572 // yet, it may contain stack pointers and a concurrent send
573 // could race with adjusting those pointers. (The sent value
574 // itself can never contain stack pointers.)
575 useCAS := uintptr(scanp) < adjinfo.sghi
576 for i := uintptr(0); i < num; i++ {
578 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
580 if ptrbit(&bv, i) == 1 {
581 pp := (*uintptr)(add(scanp, i*sys.PtrSize))
584 if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
585 // Looks like a junk value in a pointer slot.
586 // Live analysis wrong?
587 getg().m.traceback = 2
588 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
589 throw("invalid stack pointer")
591 if minp <= p && p < maxp {
593 print("adjust ptr ", p, " ", funcname(f), "\n")
596 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
597 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
608 // Note: the argument/return area is adjusted by the callee.
609 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
610 adjinfo := (*adjustinfo)(arg)
611 targetpc := frame.continpc
618 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
620 if f.entry == systemstack_switchPC {
621 // A special routine at the bottom of stack of a goroutine that does an systemstack call.
622 // We will allow it to be copied even though we don't
623 // have full GC info for it (because it is written in asm).
626 if targetpc != f.entry {
629 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
631 pcdata = 0 // in prologue
634 // Adjust local variables if stack frame has been allocated.
635 size := frame.varp - frame.sp
637 switch sys.ArchFamily {
639 minsize = sys.SpAlign
641 minsize = sys.MinFrameSize
645 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
646 if stackmap == nil || stackmap.n <= 0 {
647 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
648 throw("missing stackmap")
650 // Locals bitmap information, scan just the pointers in locals.
651 if pcdata < 0 || pcdata >= stackmap.n {
652 // don't know where we are
653 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
654 throw("bad symbol table")
656 bv = stackmapdata(stackmap, pcdata)
657 size = uintptr(bv.n) * sys.PtrSize
659 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
661 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
664 // Adjust saved base pointer if there is one.
665 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
666 if !framepointer_enabled {
667 print("runtime: found space for saved base pointer, but no framepointer experiment\n")
668 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
669 throw("bad frame layout")
674 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
678 if frame.arglen > 0 {
680 if frame.argmap != nil {
683 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
684 if stackmap == nil || stackmap.n <= 0 {
685 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n")
686 throw("missing stackmap")
688 if pcdata < 0 || pcdata >= stackmap.n {
689 // don't know where we are
690 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
691 throw("bad symbol table")
693 bv = stackmapdata(stackmap, pcdata)
698 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
703 func adjustctxt(gp *g, adjinfo *adjustinfo) {
704 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
707 func adjustdefers(gp *g, adjinfo *adjustinfo) {
708 // Adjust defer argument blocks the same way we adjust active stack frames.
709 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
711 // Adjust pointers in the Defer structs.
712 // Defer structs themselves are never on the stack.
713 for d := gp._defer; d != nil; d = d.link {
714 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
715 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
716 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
720 func adjustpanics(gp *g, adjinfo *adjustinfo) {
721 // Panics are on stack and already adjusted.
722 // Update pointer to head of list in G.
723 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
726 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
727 // the data elements pointed to by a SudoG structure
728 // might be in the stack.
729 for s := gp.waiting; s != nil; s = s.waitlink {
730 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
731 adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
735 func adjuststkbar(gp *g, adjinfo *adjustinfo) {
736 for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
737 adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
741 func fillstack(stk stack, b byte) {
742 for p := stk.lo; p < stk.hi; p++ {
743 *(*byte)(unsafe.Pointer(p)) = b
747 func findsghi(gp *g, stk stack) uintptr {
749 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
750 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
751 if stk.lo <= p && p < stk.hi && p > sghi {
754 p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone)
755 if stk.lo <= p && p < stk.hi && p > sghi {
762 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
763 // stack they refer to while synchronizing with concurrent channel
764 // operations. It returns the number of bytes of stack copied.
765 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
766 if gp.waiting == nil {
770 // Lock channels to prevent concurrent send/receive.
771 // It's important that we *only* do this for async
772 // copystack; otherwise, gp may be in the middle of
773 // putting itself on wait queues and this would
775 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
780 adjustsudogs(gp, adjinfo)
782 // Copy the part of the stack the sudogs point in to
783 // while holding the lock to prevent races on
784 // send/receive slots.
786 if adjinfo.sghi != 0 {
787 oldBot := adjinfo.old.hi - used
788 newBot := oldBot + adjinfo.delta
789 sgsize = adjinfo.sghi - oldBot
790 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
794 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
801 // Copies gp's stack to a new stack of a different size.
802 // Caller must have changed gp status to Gcopystack.
804 // If sync is true, this is a self-triggered stack growth and, in
805 // particular, no other G may be writing to gp's stack (e.g., via a
806 // channel operation). If sync is false, copystack protects against
807 // concurrent channel operations.
808 func copystack(gp *g, newsize uintptr, sync bool) {
809 if gp.syscallsp != 0 {
810 throw("stack growth not allowed in system call")
814 throw("nil stackbase")
816 used := old.hi - gp.sched.sp
818 // allocate new stack
819 new, newstkbar := stackalloc(uint32(newsize))
820 if stackPoisonCopy != 0 {
824 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
827 // Compute adjustment.
828 var adjinfo adjustinfo
830 adjinfo.delta = new.hi - old.hi
832 // Adjust sudogs, synchronizing with channel ops if necessary.
835 adjustsudogs(gp, &adjinfo)
837 // sudogs can point in to the stack. During concurrent
838 // shrinking, these areas may be written to. Find the
839 // highest such pointer so we can handle everything
840 // there and below carefully. (This shouldn't be far
841 // from the bottom of the stack, so there's little
842 // cost in handling everything below it carefully.)
843 adjinfo.sghi = findsghi(gp, old)
845 // Synchronize with channel ops and copy the part of
846 // the stack they may interact with.
847 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
850 // Copy the stack (or the rest of it) to the new location
851 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
853 // Disallow sigprof scans of this stack and block if there's
855 gcLockStackBarriers(gp)
857 // Adjust remaining structures that have pointers into stacks.
858 // We have to do most of these before we traceback the new
859 // stack because gentraceback uses them.
860 adjustctxt(gp, &adjinfo)
861 adjustdefers(gp, &adjinfo)
862 adjustpanics(gp, &adjinfo)
863 adjuststkbar(gp, &adjinfo)
864 if adjinfo.sghi != 0 {
865 adjinfo.sghi += adjinfo.delta
868 // copy old stack barriers to new stack barrier array
869 newstkbar = newstkbar[:len(gp.stkbar)]
870 copy(newstkbar, gp.stkbar)
872 // Swap out old stack for new one
874 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
875 gp.sched.sp = new.hi - used
876 oldsize := gp.stackAlloc
877 gp.stackAlloc = newsize
878 gp.stkbar = newstkbar
879 gp.stktopsp += adjinfo.delta
881 // Adjust pointers in the new stack.
882 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
884 gcUnlockStackBarriers(gp)
887 if stackPoisonCopy != 0 {
890 stackfree(old, oldsize)
893 // round x up to a power of 2.
894 func round2(x int32) int32 {
902 // Called from runtime·morestack when more stack is needed.
903 // Allocate larger stack and relocate to new stack.
904 // Stack growth is multiplicative, for constant amortized cost.
906 // g->atomicstatus will be Grunning or Gscanrunning upon entry.
907 // If the GC is trying to stop this g then it will set preemptscan to true.
910 // TODO: double check all gp. shouldn't be getg().
911 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
912 throw("stack growth after fork")
914 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
915 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
916 morebuf := thisg.m.morebuf
917 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
918 throw("runtime: wrong goroutine in newstack")
920 if thisg.m.curg.throwsplit {
922 // Update syscallsp, syscallpc in case traceback uses them.
923 morebuf := thisg.m.morebuf
924 gp.syscallsp = morebuf.sp
925 gp.syscallpc = morebuf.pc
926 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
927 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
928 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
930 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
931 throw("runtime: stack split at bad time")
935 morebuf := thisg.m.morebuf
936 thisg.m.morebuf.pc = 0
937 thisg.m.morebuf.lr = 0
938 thisg.m.morebuf.sp = 0
939 thisg.m.morebuf.g = 0
940 rewindmorestack(&gp.sched)
942 // NOTE: stackguard0 may change underfoot, if another thread
943 // is about to try to preempt gp. Read it just once and use that same
944 // value now and below.
945 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
947 // Be conservative about where we preempt.
948 // We are interested in preempting user Go code, not runtime code.
949 // If we're holding locks, mallocing, or preemption is disabled, don't
951 // This check is very early in newstack so that even the status change
952 // from Grunning to Gwaiting and back doesn't happen in this case.
953 // That status change by itself can be viewed as a small preemption,
954 // because the GC might change Gwaiting to Gscanwaiting, and then
955 // this goroutine has to wait for the GC to finish before continuing.
956 // If the GC is in some way dependent on this goroutine (for example,
957 // it needs a lock held by the goroutine), that small preemption turns
958 // into a real deadlock.
960 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
961 // Let the goroutine keep running for now.
962 // gp->preempt is set, so it will be preempted next time.
963 gp.stackguard0 = gp.stack.lo + _StackGuard
964 gogo(&gp.sched) // never return
968 if gp.stack.lo == 0 {
969 throw("missing stack in newstack")
972 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 {
973 // The call to morestack cost a word.
976 if stackDebug >= 1 || sp < gp.stack.lo {
977 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
978 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
979 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
981 if sp < gp.stack.lo {
982 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
983 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
984 throw("runtime: split stack overflow")
987 if gp.sched.ctxt != nil {
988 // morestack wrote sched.ctxt on its way in here,
989 // without a write barrier. Run the write barrier now.
990 // It is not possible to be preempted between then
991 // and now, so it's okay.
992 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
996 if gp == thisg.m.g0 {
997 throw("runtime: preempt g0")
999 if thisg.m.p == 0 && thisg.m.locks == 0 {
1000 throw("runtime: g is running but p is not")
1002 // Synchronize with scang.
1003 casgstatus(gp, _Grunning, _Gwaiting)
1005 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
1006 // Likely to be racing with the GC as
1007 // it sees a _Gwaiting and does the
1008 // stack scan. If so, gcworkdone will
1009 // be set and gcphasework will simply
1014 gp.gcscandone = true
1016 gp.preemptscan = false
1018 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
1019 // This clears gcscanvalid.
1020 casgstatus(gp, _Gwaiting, _Grunning)
1021 gp.stackguard0 = gp.stack.lo + _StackGuard
1022 gogo(&gp.sched) // never return
1025 // Act like goroutine called runtime.Gosched.
1026 casgstatus(gp, _Gwaiting, _Grunning)
1027 gopreempt_m(gp) // never return
1030 // Allocate a bigger segment and move the stack.
1031 oldsize := int(gp.stackAlloc)
1032 newsize := oldsize * 2
1033 if uintptr(newsize) > maxstacksize {
1034 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1035 throw("stack overflow")
1038 // The goroutine must be executing in order to call newstack,
1039 // so it must be Grunning (or Gscanrunning).
1040 casgstatus(gp, _Grunning, _Gcopystack)
1042 // The concurrent GC will not scan the stack while we are doing the copy since
1043 // the gp is in a Gcopystack status.
1044 copystack(gp, uintptr(newsize), true)
1045 if stackDebug >= 1 {
1046 print("stack grow done\n")
1048 casgstatus(gp, _Gcopystack, _Grunning)
1057 // adjust Gobuf as if it executed a call to fn
1058 // and then did an immediate gosave.
1059 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1060 var fn unsafe.Pointer
1062 fn = unsafe.Pointer(fv.fn)
1064 fn = unsafe.Pointer(funcPC(nilfunc))
1066 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1069 // Maybe shrink the stack being used by gp.
1070 // Called at garbage collection time.
1071 // gp must be stopped, but the world need not be.
1072 func shrinkstack(gp *g) {
1073 gstatus := readgstatus(gp)
1074 if gstatus&^_Gscan == _Gdead {
1075 if gp.stack.lo != 0 {
1076 // Free whole stack - it will get reallocated
1077 // if G is used again.
1078 stackfree(gp.stack, gp.stackAlloc)
1086 if gp.stack.lo == 0 {
1087 throw("missing stack in shrinkstack")
1089 if gstatus&_Gscan == 0 {
1090 throw("bad status in shrinkstack")
1093 if debug.gcshrinkstackoff > 0 {
1097 oldsize := gp.stackAlloc
1098 newsize := oldsize / 2
1099 // Don't shrink the allocation below the minimum-sized stack
1101 if newsize < _FixedStack {
1104 // Compute how much of the stack is currently in use and only
1105 // shrink the stack if gp is using less than a quarter of its
1106 // current stack. The currently used stack includes everything
1107 // down to the SP plus the stack guard space that ensures
1108 // there's room for nosplit functions.
1109 avail := gp.stack.hi - gp.stack.lo
1110 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1114 // We can't copy the stack if we're in a syscall.
1115 // The syscall might have pointers into the stack.
1116 if gp.syscallsp != 0 {
1119 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
1124 print("shrinking stack ", oldsize, "->", newsize, "\n")
1127 copystack(gp, newsize, false)
1130 // freeStackSpans frees unused stack spans at the end of GC.
1131 func freeStackSpans() {
1134 // Scan stack pools for empty stack spans.
1135 for order := range stackpool {
1136 list := &stackpool[order]
1137 for s := list.first; s != nil; {
1139 if s.allocCount == 0 {
1148 unlock(&stackpoolmu)
1150 // Free large stack spans.
1151 lock(&stackLarge.lock)
1152 for i := range stackLarge.free {
1153 for s := stackLarge.free[i].first; s != nil; {
1155 stackLarge.free[i].remove(s)
1160 unlock(&stackLarge.lock)
1165 systemstack(func() {
1166 throw("attempt to execute C code on Go stack")