1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 // StackDebug == 0: no logging
11 // == 1: logging of per-stack operations
12 // == 2: logging of per-frame operations
13 // == 3: logging of per-word updates
14 // == 4: logging of per-word reads
16 stackFromSystem = 0 // allocate stacks from system memory instead of the heap
17 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
18 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
24 uintptrMask = 1<<(8*ptrSize) - 1
25 poisonGC = uintptrMask & 0xf969696969696969
26 poisonStack = uintptrMask & 0x6868686868686868
28 // Goroutine preemption request.
29 // Stored into g->stackguard0 to cause split stack check failure.
30 // Must be greater than any real sp.
32 stackPreempt = uintptrMask & -1314
35 // Stored into g->stackguard0 to cause split stack check failure.
36 // Must be greater than any real sp.
37 stackFork = uintptrMask & -1234
40 // Global pool of spans that have free stacks.
41 // Stacks are assigned an order according to size.
42 // order = log_2(size/FixedStack)
43 // There is a free list for each order.
44 // TODO: one lock per order?
45 var stackpool [_NumStackOrders]mspan
48 var stackfreequeue stack
51 if _StackCacheSize&_PageMask != 0 {
52 gothrow("cache size must be a multiple of page size")
54 for i := range stackpool {
55 mSpanList_Init(&stackpool[i])
59 // Allocates a stack from the free pool. Must be called with
61 func stackpoolalloc(order uint8) gclinkptr {
62 list := &stackpool[order]
65 // no free stacks. Allocate another span worth.
66 s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
68 gothrow("out of memory")
73 if s.freelist.ptr() != nil {
74 gothrow("bad freelist")
76 for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
77 x := gclinkptr(uintptr(s.start)<<_PageShift + i)
78 x.ptr().next = s.freelist
81 mSpanList_Insert(list, s)
85 gothrow("span has no free stacks")
87 s.freelist = x.ptr().next
89 if s.freelist.ptr() == nil {
90 // all stacks in s are allocated.
96 // Adds stack x to the free pool. Must be called with stackpoolmu held.
97 func stackpoolfree(x gclinkptr, order uint8) {
98 s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
99 if s.state != _MSpanStack {
100 gothrow("freeing stack not in a stack span")
102 if s.freelist.ptr() == nil {
103 // s will now have a free stack
104 mSpanList_Insert(&stackpool[order], s)
106 x.ptr().next = s.freelist
110 // span is completely free - return to heap
113 mHeap_FreeStack(&mheap_, s)
117 // stackcacherefill/stackcacherelease implement a global pool of stack segments.
118 // The pool is required to prevent unlimited growth of per-thread caches.
119 func stackcacherefill(c *mcache, order uint8) {
121 print("stackcacherefill order=", order, "\n")
124 // Grab some stacks from the global cache.
125 // Grab half of the allowed capacity (to prevent thrashing).
129 for size < _StackCacheSize/2 {
130 x := stackpoolalloc(order)
133 size += _FixedStack << order
136 c.stackcache[order].list = list
137 c.stackcache[order].size = size
140 func stackcacherelease(c *mcache, order uint8) {
142 print("stackcacherelease order=", order, "\n")
144 x := c.stackcache[order].list
145 size := c.stackcache[order].size
147 for size > _StackCacheSize/2 {
149 stackpoolfree(x, order)
151 size -= _FixedStack << order
154 c.stackcache[order].list = x
155 c.stackcache[order].size = size
158 func stackcache_clear(c *mcache) {
160 print("stackcache clear\n")
163 for order := uint8(0); order < _NumStackOrders; order++ {
164 x := c.stackcache[order].list
167 stackpoolfree(x, order)
170 c.stackcache[order].list = 0
171 c.stackcache[order].size = 0
176 func stackalloc(n uint32) stack {
177 // Stackalloc must be called on scheduler stack, so that we
178 // never try to grow the stack during the code that stackalloc runs.
179 // Doing so would cause a deadlock (issue 1547).
181 if thisg != thisg.m.g0 {
182 gothrow("stackalloc not on scheduler stack")
185 gothrow("stack size not a power of 2")
188 print("stackalloc ", n, "\n")
191 if debug.efence != 0 || stackFromSystem != 0 {
192 v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
194 gothrow("out of memory (stackalloc)")
196 return stack{uintptr(v), uintptr(v) + uintptr(n)}
199 // Small stacks are allocated with a fixed-size free-list allocator.
200 // If we need a stack of a bigger size, we fall back on allocating
203 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
206 for n2 > _FixedStack {
212 if c == nil || thisg.m.gcing != 0 || thisg.m.helpgc != 0 {
213 // c == nil can happen in the guts of exitsyscall or
214 // procresize. Just get a stack from the global pool.
215 // Also don't touch stackcache during gc
216 // as it's flushed concurrently.
218 x = stackpoolalloc(order)
221 x = c.stackcache[order].list
223 stackcacherefill(c, order)
224 x = c.stackcache[order].list
226 c.stackcache[order].list = x.ptr().next
227 c.stackcache[order].size -= uintptr(n)
229 v = (unsafe.Pointer)(x)
231 s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
233 gothrow("out of memory")
235 v = (unsafe.Pointer)(s.start << _PageShift)
239 racemalloc(v, uintptr(n))
242 print(" allocated ", v, "\n")
244 return stack{uintptr(v), uintptr(v) + uintptr(n)}
247 func stackfree(stk stack) {
250 v := (unsafe.Pointer)(stk.lo)
252 gothrow("stack not a power of 2")
255 println("stackfree", v, n)
256 memclr(v, n) // for testing, clobber stack data
258 if debug.efence != 0 || stackFromSystem != 0 {
259 if debug.efence != 0 || stackFaultOnFree != 0 {
262 sysFree(v, n, &memstats.stacks_sys)
266 if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
269 for n2 > _FixedStack {
275 if c == nil || gp.m.gcing != 0 || gp.m.helpgc != 0 {
277 stackpoolfree(x, order)
280 if c.stackcache[order].size >= _StackCacheSize {
281 stackcacherelease(c, order)
283 x.ptr().next = c.stackcache[order].list
284 c.stackcache[order].list = x
285 c.stackcache[order].size += n
288 s := mHeap_Lookup(&mheap_, v)
289 if s.state != _MSpanStack {
290 println(hex(s.start<<_PageShift), v)
291 gothrow("bad span state")
293 mHeap_FreeStack(&mheap_, s)
297 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
299 var mapnames = []string{
301 _BitsScalar: "scalar",
305 // Stack frame layout
308 // +------------------+
309 // | args from caller |
310 // +------------------+ <- frame->argp
311 // | return address |
312 // +------------------+ <- frame->varp
314 // +------------------+
315 // | args to callee |
316 // +------------------+ <- frame->sp
319 // +------------------+
320 // | args from caller |
321 // +------------------+ <- frame->argp
322 // | caller's retaddr |
323 // +------------------+ <- frame->varp
325 // +------------------+
326 // | args to callee |
327 // +------------------+
328 // | return address |
329 // +------------------+ <- frame->sp
331 type adjustinfo struct {
333 delta uintptr // ptr distance from old to new stack (newbase - oldbase)
336 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
337 // If so, it rewrites *vpp to point into the new stack.
338 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
339 pp := (*unsafe.Pointer)(vpp)
342 print(" ", pp, ":", p, "\n")
344 if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
345 *pp = add(p, adjinfo.delta)
347 print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
352 type gobitvector struct {
357 func gobv(bv bitvector) gobitvector {
360 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
364 func ptrbits(bv *gobitvector, i uintptr) uint8 {
365 return (bv.bytedata[i/4] >> ((i & 3) * 2)) & 3
368 // bv describes the memory starting at address scanp.
369 // Adjust any pointers contained therein.
370 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
372 minp := adjinfo.old.lo
373 maxp := adjinfo.old.hi
374 delta := adjinfo.delta
375 num := uintptr(bv.n / _BitsPerPointer)
376 for i := uintptr(0); i < num; i++ {
378 print(" ", add(scanp, i*ptrSize), ":", mapnames[ptrbits(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/4], "\n")
380 switch ptrbits(&bv, i) {
382 gothrow("unexpected pointer bits")
384 if debug.gcdead != 0 {
385 *(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
390 p := *(*unsafe.Pointer)(add(scanp, i*ptrSize))
392 if f != nil && 0 < up && up < _PageSize && invalidptr != 0 || up == poisonGC || up == poisonStack {
393 // Looks like a junk value in a pointer slot.
394 // Live analysis wrong?
395 getg().m.traceback = 2
396 print("runtime: bad pointer in frame ", gofuncname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
397 gothrow("invalid stack pointer")
399 if minp <= up && up < maxp {
401 print("adjust ptr ", p, " ", gofuncname(f), "\n")
403 *(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(up + delta)
409 // Note: the argument/return area is adjusted by the callee.
410 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
411 adjinfo := (*adjustinfo)(arg)
412 targetpc := frame.continpc
419 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
421 if f.entry == systemstack_switchPC {
422 // A special routine at the bottom of stack of a goroutine that does an systemstack call.
423 // We will allow it to be copied even though we don't
424 // have full GC info for it (because it is written in asm).
427 if targetpc != f.entry {
430 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
432 pcdata = 0 // in prologue
435 // Adjust local variables if stack frame has been allocated.
436 size := frame.varp - frame.sp
438 if thechar != '6' && thechar != '8' {
445 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
446 if stackmap == nil || stackmap.n <= 0 {
447 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
448 gothrow("missing stackmap")
450 // Locals bitmap information, scan just the pointers in locals.
451 if pcdata < 0 || pcdata >= stackmap.n {
452 // don't know where we are
453 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
454 gothrow("bad symbol table")
456 bv = stackmapdata(stackmap, pcdata)
457 size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
459 print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
461 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
465 if frame.arglen > 0 {
467 if frame.argmap != nil {
470 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
471 if stackmap == nil || stackmap.n <= 0 {
472 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
473 gothrow("missing stackmap")
475 if pcdata < 0 || pcdata >= stackmap.n {
476 // don't know where we are
477 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
478 gothrow("bad symbol table")
480 bv = stackmapdata(stackmap, pcdata)
485 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
490 func adjustctxt(gp *g, adjinfo *adjustinfo) {
491 adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
494 func adjustdefers(gp *g, adjinfo *adjustinfo) {
495 // Adjust defer argument blocks the same way we adjust active stack frames.
496 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
498 // Adjust pointers in the Defer structs.
499 // Defer structs themselves are never on the stack.
500 for d := gp._defer; d != nil; d = d.link {
501 adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
502 adjustpointer(adjinfo, (unsafe.Pointer)(&d.argp))
503 adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
507 func adjustpanics(gp *g, adjinfo *adjustinfo) {
508 // Panics are on stack and already adjusted.
509 // Update pointer to head of list in G.
510 adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
513 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
514 // the data elements pointed to by a SudoG structure
515 // might be in the stack.
516 for s := gp.waiting; s != nil; s = s.waitlink {
517 adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
518 adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
522 func fillstack(stk stack, b byte) {
523 for p := stk.lo; p < stk.hi; p++ {
524 *(*byte)(unsafe.Pointer(p)) = b
528 // Copies gp's stack to a new stack of a different size.
529 // Caller must have changed gp status to Gcopystack.
530 func copystack(gp *g, newsize uintptr) {
531 if gp.syscallsp != 0 {
532 gothrow("stack growth not allowed in system call")
536 gothrow("nil stackbase")
538 used := old.hi - gp.sched.sp
540 // allocate new stack
541 new := stackalloc(uint32(newsize))
542 if stackPoisonCopy != 0 {
546 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", old.hi-old.lo, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
549 // adjust pointers in the to-be-copied frames
550 var adjinfo adjustinfo
552 adjinfo.delta = new.hi - old.hi
553 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
555 // adjust other miscellaneous things that have pointers into stacks.
556 adjustctxt(gp, &adjinfo)
557 adjustdefers(gp, &adjinfo)
558 adjustpanics(gp, &adjinfo)
559 adjustsudogs(gp, &adjinfo)
561 // copy the stack to the new location
562 if stackPoisonCopy != 0 {
565 memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
567 // Swap out old stack for new one
569 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
570 gp.sched.sp = new.hi - used
573 if stackPoisonCopy != 0 {
576 if newsize > old.hi-old.lo {
577 // growing, free stack immediately
580 // shrinking, queue up free operation. We can't actually free the stack
581 // just yet because we might run into the following situation:
582 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
583 // 2) The stack that pointer points to is shrunk
584 // 3) The old stack is freed
585 // 4) The containing span is marked free
586 // 5) GC attempts to mark the SudoG.elem pointer. The marking fails because
587 // the pointer looks like a pointer into a free span.
588 // By not freeing, we prevent step #4 until GC is done.
590 *(*stack)(unsafe.Pointer(old.lo)) = stackfreequeue
596 // round x up to a power of 2.
597 func round2(x int32) int32 {
605 // Called from runtime·morestack when more stack is needed.
606 // Allocate larger stack and relocate to new stack.
607 // Stack growth is multiplicative, for constant amortized cost.
609 // g->atomicstatus will be Grunning or Gscanrunning upon entry.
610 // If the GC is trying to stop this g then it will set preemptscan to true.
613 // TODO: double check all gp. shouldn't be getg().
614 if thisg.m.morebuf.g.stackguard0 == stackFork {
615 gothrow("stack growth after fork")
617 if thisg.m.morebuf.g != thisg.m.curg {
618 print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
619 morebuf := thisg.m.morebuf
620 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g)
621 gothrow("runtime: wrong goroutine in newstack")
623 if thisg.m.curg.throwsplit {
625 // Update syscallsp, syscallpc in case traceback uses them.
626 morebuf := thisg.m.morebuf
627 gp.syscallsp = morebuf.sp
628 gp.syscallpc = morebuf.pc
629 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
630 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
631 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
632 gothrow("runtime: stack split at bad time")
635 // The goroutine must be executing in order to call newstack,
636 // so it must be Grunning or Gscanrunning.
639 morebuf := thisg.m.morebuf
640 thisg.m.morebuf.pc = 0
641 thisg.m.morebuf.lr = 0
642 thisg.m.morebuf.sp = 0
643 thisg.m.morebuf.g = nil
645 casgstatus(gp, _Grunning, _Gwaiting)
646 gp.waitreason = "stack growth"
648 rewindmorestack(&gp.sched)
650 if gp.stack.lo == 0 {
651 gothrow("missing stack in newstack")
654 if thechar == '6' || thechar == '8' {
655 // The call to morestack cost a word.
658 if stackDebug >= 1 || sp < gp.stack.lo {
659 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
660 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
661 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
663 if sp < gp.stack.lo {
664 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
665 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
666 gothrow("runtime: split stack overflow")
669 if gp.sched.ctxt != nil {
670 // morestack wrote sched.ctxt on its way in here,
671 // without a write barrier. Run the write barrier now.
672 // It is not possible to be preempted between then
673 // and now, so it's okay.
674 writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
677 if gp.stackguard0 == stackPreempt {
678 if gp == thisg.m.g0 {
679 gothrow("runtime: preempt g0")
681 if thisg.m.p == nil && thisg.m.locks == 0 {
682 gothrow("runtime: g is running but p is not")
685 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
686 // Likely to be racing with the GC as it sees a _Gwaiting and does the stack scan.
687 // If so this stack will be scanned twice which does not change correctness.
690 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
691 casgstatus(gp, _Gwaiting, _Grunning)
692 gp.stackguard0 = gp.stack.lo + _StackGuard
694 gp.preemptscan = false // Tells the GC premption was successful.
695 gogo(&gp.sched) // never return
698 // Be conservative about where we preempt.
699 // We are interested in preempting user Go code, not runtime code.
700 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
701 // Let the goroutine keep running for now.
702 // gp->preempt is set, so it will be preempted next time.
703 gp.stackguard0 = gp.stack.lo + _StackGuard
704 casgstatus(gp, _Gwaiting, _Grunning)
705 gogo(&gp.sched) // never return
708 // Act like goroutine called runtime.Gosched.
709 casgstatus(gp, _Gwaiting, _Grunning)
710 gosched_m(gp) // never return
713 // Allocate a bigger segment and move the stack.
714 oldsize := int(gp.stack.hi - gp.stack.lo)
715 newsize := oldsize * 2
716 if uintptr(newsize) > maxstacksize {
717 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
718 gothrow("stack overflow")
721 casgstatus(gp, _Gwaiting, _Gcopystack)
723 // The concurrent GC will not scan the stack while we are doing the copy since
724 // the gp is in a Gcopystack status.
725 copystack(gp, uintptr(newsize))
727 print("stack grow done\n")
729 casgstatus(gp, _Gcopystack, _Grunning)
738 // adjust Gobuf as if it executed a call to fn
739 // and then did an immediate gosave.
740 func gostartcallfn(gobuf *gobuf, fv *funcval) {
741 var fn unsafe.Pointer
743 fn = (unsafe.Pointer)(fv.fn)
745 fn = unsafe.Pointer(funcPC(nilfunc))
747 gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
750 // Maybe shrink the stack being used by gp.
751 // Called at garbage collection time.
752 func shrinkstack(gp *g) {
753 if readgstatus(gp) == _Gdead {
754 if gp.stack.lo != 0 {
755 // Free whole stack - it will get reallocated
756 // if G is used again.
763 if gp.stack.lo == 0 {
764 gothrow("missing stack in shrinkstack")
767 oldsize := gp.stack.hi - gp.stack.lo
768 newsize := oldsize / 2
769 if newsize < _FixedStack {
770 return // don't shrink below the minimum-sized stack
772 used := gp.stack.hi - gp.sched.sp
773 if used >= oldsize/4 {
774 return // still using at least 1/4 of the segment.
777 // We can't copy the stack if we're in a syscall.
778 // The syscall might have pointers into the stack.
779 if gp.syscallsp != 0 {
782 if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
787 print("shrinking stack ", oldsize, "->", newsize, "\n")
790 oldstatus := casgcopystack(gp)
791 copystack(gp, newsize)
792 casgstatus(gp, _Gcopystack, oldstatus)
795 // Do any delayed stack freeing that was queued up during GC.
796 func shrinkfinish() {
799 stackfreequeue = stack{}
802 t := *(*stack)(unsafe.Pointer(s.lo))
811 gothrow("attempt to execute C code on Go stack")