1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Cgo call and callback support.
7 // To call into the C function f from Go, the cgo-generated code calls
8 // runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
9 // gcc-compiled function written by cgo.
11 // runtime.cgocall (below) calls entersyscall so as not to block
12 // other goroutines or the garbage collector, and then calls
13 // runtime.asmcgocall(_cgo_Cfunc_f, frame).
15 // runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
16 // (assumed to be an operating system-allocated stack, so safe to run
17 // gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
19 // _cgo_Cfunc_f invokes the actual C function f with arguments
20 // taken from the frame structure, records the results in the frame,
21 // and returns to runtime.asmcgocall.
23 // After it regains control, runtime.asmcgocall switches back to the
24 // original g (m->curg)'s stack and returns to runtime.cgocall.
26 // After it regains control, runtime.cgocall calls exitsyscall, which blocks
27 // until this m can run Go code without violating the $GOMAXPROCS limit,
28 // and then unlocks g from m.
30 // The above description skipped over the possibility of the gcc-compiled
31 // function f calling back into Go. If that happens, we continue down
32 // the rabbit hole during the execution of f.
34 // To make it possible for gcc-compiled C code to call a Go function p.GoF,
35 // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
36 // know about packages). The gcc-compiled C function f calls GoF.
38 // GoF calls crosscall2(_cgoexp_GoF, frame, framesize, ctxt).
39 // Crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from
40 // the gcc function call ABI to the gc function call ABI.
41 // It is called from gcc to call gc functions. In this case it calls
42 // _cgoexp_GoF(frame, framesize), still running on m.g0's stack
43 // and outside the $GOMAXPROCS limit. Thus, this code cannot yet
44 // call arbitrary Go code directly and must be careful not to allocate
45 // memory or use up m.g0's stack.
47 // _cgoexp_GoF (generated by cmd/cgo) calls
48 // runtime.cgocallback(funcPC(p.GoF), frame, framesize, ctxt).
49 // (The reason for having _cgoexp_GoF instead of writing a crosscall3
50 // to make this call directly is that _cgoexp_GoF, because it is compiled
51 // with gc instead of gcc, can refer to dotted names like
52 // runtime.cgocallback and p.GoF.)
54 // runtime.cgocallback (in asm_$GOARCH.s) turns the raw PC of p.GoF
55 // into a Go function value and calls runtime.cgocallback_gofunc.
57 // runtime.cgocallback_gofunc (in asm_$GOARCH.s) switches from m.g0's
58 // stack to the original g (m.curg)'s stack, on which it calls
59 // runtime.cgocallbackg(p.GoF, frame, framesize).
60 // As part of the stack switch, runtime.cgocallback saves the current
61 // SP as m.g0.sched.sp, so that any use of m.g0's stack during the
62 // execution of the callback will be done below the existing stack frames.
63 // Before overwriting m.g0.sched.sp, it pushes the old value on the
64 // m.g0 stack, so that it can be restored later.
66 // runtime.cgocallbackg (below) is now running on a real goroutine
67 // stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will
68 // block until the $GOMAXPROCS limit allows running this goroutine.
69 // Once exitsyscall has returned, it is safe to do things like call the memory
70 // allocator or invoke the Go callback function p.GoF. runtime.cgocallbackg
71 // first defers a function to unwind m.g0.sched.sp, so that if p.GoF
72 // panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack
73 // and the m.curg stack will be unwound in lock step.
74 // Then it calls p.GoF. Finally it pops but does not execute the deferred
75 // function, calls runtime.entersyscall, and returns to runtime.cgocallback.
77 // After it regains control, runtime.cgocallback switches back to
78 // m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old
79 // m.g0.sched.sp value from the stack, and returns to _cgoexp_GoF.
81 // _cgoexp_GoF immediately returns to crosscall2, which restores the
82 // callee-save registers for gcc and returns to GoF, which returns to f.
87 "runtime/internal/atomic"
88 "runtime/internal/sys"
92 // Addresses collected in a cgo backtrace when crashing.
93 // Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
94 type cgoCallers [32]uintptr
96 // argset matches runtime/cgo/linux_syscall.c:argset_t
102 // wrapper for syscall package to call cgocall for libc (cgo) calls.
103 //go:linkname syscall_cgocaller syscall.cgocaller
106 func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr {
107 as := argset{args: unsafe.Pointer(&args[0])}
108 cgocall(fn, unsafe.Pointer(&as))
112 // Call from Go to C.
114 // This must be nosplit because it's used for syscalls on some
115 // platforms. Syscalls may have untyped arguments on the stack, so
116 // it's not safe to grow or scan the stack.
119 func cgocall(fn, arg unsafe.Pointer) int32 {
120 if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
121 throw("cgocall unavailable")
129 racereleasemerge(unsafe.Pointer(&racecgosync))
139 // Announce we are entering a system call
140 // so that the scheduler knows to create another
141 // M to run goroutines while we are in the
144 // The call to asmcgocall is guaranteed not to
145 // grow the stack and does not allocate memory,
146 // so it is safe to call while "in a system call", outside
147 // the $GOMAXPROCS accounting.
149 // fn may call back into Go code, in which case we'll exit the
150 // "system call", run the Go code (which may grow the stack),
151 // and then re-enter the "system call" reusing the PC and SP
152 // saved by entersyscall here.
155 // Tell asynchronous preemption that we're entering external
156 // code. We do this after entersyscall because this may block
157 // and cause an async preemption to fail, but at this point a
158 // sync preemption will succeed (though this is not a matter
160 osPreemptExtEnter(mp)
163 errno := asmcgocall(fn, arg)
165 // Update accounting before exitsyscall because exitsyscall may
166 // reschedule us on to a different M.
174 // Note that raceacquire must be called only after exitsyscall has
175 // wired this M to a P.
177 raceacquire(unsafe.Pointer(&racecgosync))
180 // From the garbage collector's perspective, time can move
181 // backwards in the sequence above. If there's a callback into
182 // Go code, GC will see this function at the call to
183 // asmcgocall. When the Go call later returns to C, the
184 // syscall PC/SP is rolled back and the GC sees this function
185 // back at the call to entersyscall. Normally, fn and arg
186 // would be live at entersyscall and dead at asmcgocall, so if
187 // time moved backwards, GC would see these arguments as dead
188 // and then live. Prevent these undead arguments from crashing
189 // GC by forcing them to stay live across this time warp.
197 // Call from C back to Go.
199 func cgocallbackg(ctxt uintptr) {
202 println("runtime: bad g in cgocallback")
206 // The call from C is on gp.m's g0 stack, so we must ensure
207 // that we stay on that M. We have to do this before calling
208 // exitsyscall, since it would otherwise be free to move us to
209 // a different M. The call to unlockOSThread is in unwindm.
212 // Save current syscall parameters, so m.syscall can be
213 // used again if callback decide to make syscall.
214 syscall := gp.m.syscall
216 // entersyscall saves the caller's SP to allow the GC to trace the Go
217 // stack. However, since we're returning to an earlier stack frame and
218 // need to pair with the entersyscall() call made by cgocall, we must
219 // save syscall* and let reentersyscall restore them.
220 savedsp := unsafe.Pointer(gp.syscallsp)
221 savedpc := gp.syscallpc
222 exitsyscall() // coming out of cgo call
225 osPreemptExtExit(gp.m)
229 // At this point unlockOSThread has been called.
230 // The following code must not change to a different m.
231 // This is enforced by checking incgo in the schedule function.
233 osPreemptExtEnter(gp.m)
236 // going back to cgo call
237 reentersyscall(savedpc, uintptr(savedsp))
239 gp.m.syscall = syscall
242 func cgocallbackg1(ctxt uintptr) {
244 if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
245 gp.m.needextram = false
246 systemstack(newextram)
250 s := append(gp.cgoCtxt, ctxt)
252 // Now we need to set gp.cgoCtxt = s, but we could get
253 // a SIGPROF signal while manipulating the slice, and
254 // the SIGPROF handler could pick up gp.cgoCtxt while
255 // tracing up the stack. We need to ensure that the
256 // handler always sees a valid slice, so set the
257 // values in an order such that it always does.
258 p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
259 atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
264 // Decrease the length of the slice by one, safely.
265 p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
271 // The C call to Go came from a thread not currently running
272 // any Go. In the case of -buildmode=c-archive or c-shared,
273 // this call may be coming in before package initialization
274 // is complete. Wait until it is.
278 // Add entry to defer stack in case of panic.
280 defer unwindm(&restore)
283 raceacquire(unsafe.Pointer(&racecgosync))
293 // Location of callback arguments depends on stack frame layout
294 // and size of stack frame of cgocallback_gofunc.
295 sp := gp.m.g0.sched.sp
298 throw("cgocallbackg is unimplemented on arch")
300 // On arm, stack frame is two words and there's a saved LR between
301 // SP and the stack frame and between the stack frame and the arguments.
302 cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
304 // On arm64, stack frame is four words and there's a saved LR between
305 // SP and the stack frame and between the stack frame and the arguments.
306 // Additional two words (16-byte alignment) are for saving FP.
307 cb = (*args)(unsafe.Pointer(sp + 7*sys.PtrSize))
309 // On amd64, stack frame is two words, plus caller PC and BP.
310 cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
312 // On 386, stack frame is three words, plus caller PC.
313 cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
314 case "ppc64", "ppc64le", "s390x":
315 // On ppc64 and s390x, the callback arguments are in the arguments area of
316 // cgocallback's stack frame. The stack looks like this:
317 // +--------------------+------------------------------+
319 // | cgoexp_$fn +------------------------------+
320 // | | fixed frame area |
321 // +--------------------+------------------------------+
322 // | | arguments area |
323 // | cgocallback +------------------------------+ <- sp + 2*minFrameSize + 2*ptrSize
324 // | | fixed frame area |
325 // +--------------------+------------------------------+ <- sp + minFrameSize + 2*ptrSize
326 // | | local variables (2 pointers) |
327 // | cgocallback_gofunc +------------------------------+ <- sp + minFrameSize
328 // | | fixed frame area |
329 // +--------------------+------------------------------+ <- sp
330 cb = (*args)(unsafe.Pointer(sp + 2*sys.MinFrameSize + 2*sys.PtrSize))
331 case "mips64", "mips64le":
332 // On mips64x, stack frame is two words and there's a saved LR between
333 // SP and the stack frame and between the stack frame and the arguments.
334 cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
335 case "mips", "mipsle":
336 // On mipsx, stack frame is two words and there's a saved LR between
337 // SP and the stack frame and between the stack frame and the arguments.
338 cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
342 // NOTE(rsc): passing nil for argtype means that the copying of the
343 // results back into cb.arg happens without any corresponding write barriers.
344 // For cgo, cb.arg points into a C stack frame and therefore doesn't
345 // hold any pointers that the GC can find anyway - the write barrier
347 reflectcall(nil, unsafe.Pointer(cb.fn), cb.arg, uint32(cb.argsize), 0)
350 racereleasemerge(unsafe.Pointer(&racecgosync))
353 // Tell msan that we wrote to the entire argument block.
354 // This tells msan that we set the results.
355 // Since we have already called the function it doesn't
356 // matter that we are writing to the non-result parameters.
357 msanwrite(cb.arg, cb.argsize)
360 // Do not unwind m->g0->sched.sp.
361 // Our caller, cgocallback, will do that.
365 func unwindm(restore *bool) {
367 // Restore sp saved by cgocallback during
368 // unwind of g's stack (see comment at top of file).
370 sched := &mp.g0.sched
373 throw("unwindm not implemented")
374 case "386", "amd64", "arm", "ppc64", "ppc64le", "mips64", "mips64le", "s390x", "mips", "mipsle":
375 sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
377 sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
380 // Do the accounting that cgocall will not have a chance to do
383 // In the case where a Go call originates from C, ncgo is 0
384 // and there is no matching cgocall to end.
394 // Undo the call to lockOSThread in cgocallbackg.
395 // We must still stay on the same m.
399 // called from assembly
400 func badcgocallback() {
401 throw("misaligned stack in cgocallback")
404 // called from (incomplete) assembly
406 throw("cgo not implemented")
409 var racecgosync uint64 // represents possible synchronization in C code
411 // Pointer checking for cgo code.
413 // We want to detect all cases where a program that does not use
414 // unsafe makes a cgo call passing a Go pointer to memory that
415 // contains a Go pointer. Here a Go pointer is defined as a pointer
416 // to memory allocated by the Go runtime. Programs that use unsafe
417 // can evade this restriction easily, so we don't try to catch them.
418 // The cgo program will rewrite all possibly bad pointer arguments to
419 // call cgoCheckPointer, where we can catch cases of a Go pointer
420 // pointing to a Go pointer.
422 // Complicating matters, taking the address of a slice or array
423 // element permits the C program to access all elements of the slice
424 // or array. In that case we will see a pointer to a single element,
425 // but we need to check the entire data structure.
427 // The cgoCheckPointer call takes additional arguments indicating that
428 // it was called on an address expression. An additional argument of
429 // true means that it only needs to check a single element. An
430 // additional argument of a slice or array means that it needs to
431 // check the entire slice/array, but nothing else. Otherwise, the
432 // pointer could be anything, and we check the entire heap object,
433 // which is conservative but safe.
435 // When and if we implement a moving garbage collector,
436 // cgoCheckPointer will pin the pointer for the duration of the cgo
437 // call. (This is necessary but not sufficient; the cgo program will
438 // also have to change to pin Go pointers that cannot point to Go
441 // cgoCheckPointer checks if the argument contains a Go pointer that
442 // points to a Go pointer, and panics if it does.
443 func cgoCheckPointer(ptr interface{}, arg interface{}) {
444 if debug.cgocheck == 0 {
452 if arg != nil && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) {
454 if t.kind&kindDirectIface == 0 {
455 p = *(*unsafe.Pointer)(p)
457 if p == nil || !cgoIsGoPointer(p) {
461 switch aep._type.kind & kindMask {
463 if t.kind&kindMask == kindUnsafePointer {
464 // We don't know the type of the element.
467 pt := (*ptrtype)(unsafe.Pointer(t))
468 cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail)
471 // Check the slice rather than the pointer.
475 // Check the array rather than the pointer.
476 // Pass top as false since we have a pointer
482 throw("can't happen")
486 cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail)
489 const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
490 const cgoResultFail = "cgo result has Go pointer"
492 // cgoCheckArg is the real work of cgoCheckPointer. The argument p
493 // is either a pointer to the value (of type t), or the value itself,
494 // depending on indir. The top parameter is whether we are at the top
495 // level, where Go pointers are allowed.
496 func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
497 if t.ptrdata == 0 || p == nil {
498 // If the type has no pointers there is nothing to do.
502 switch t.kind & kindMask {
504 throw("can't happen")
506 at := (*arraytype)(unsafe.Pointer(t))
509 throw("can't happen")
511 cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg)
514 for i := uintptr(0); i < at.len; i++ {
515 cgoCheckArg(at.elem, p, true, top, msg)
516 p = add(p, at.elem.size)
518 case kindChan, kindMap:
519 // These types contain internal pointers that will
520 // always be allocated in the Go heap. It's never OK
521 // to pass them to C.
522 panic(errorString(msg))
525 p = *(*unsafe.Pointer)(p)
527 if !cgoIsGoPointer(p) {
530 panic(errorString(msg))
536 // A type known at compile time is OK since it's
537 // constant. A type not known at compile time will be
538 // in the heap and will not be OK.
539 if inheap(uintptr(unsafe.Pointer(it))) {
540 panic(errorString(msg))
542 p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
543 if !cgoIsGoPointer(p) {
547 panic(errorString(msg))
549 cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg)
551 st := (*slicetype)(unsafe.Pointer(t))
554 if p == nil || !cgoIsGoPointer(p) {
558 panic(errorString(msg))
560 if st.elem.ptrdata == 0 {
563 for i := 0; i < s.cap; i++ {
564 cgoCheckArg(st.elem, p, true, false, msg)
565 p = add(p, st.elem.size)
568 ss := (*stringStruct)(p)
569 if !cgoIsGoPointer(ss.str) {
573 panic(errorString(msg))
576 st := (*structtype)(unsafe.Pointer(t))
578 if len(st.fields) != 1 {
579 throw("can't happen")
581 cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg)
584 for _, f := range st.fields {
585 if f.typ.ptrdata == 0 {
588 cgoCheckArg(f.typ, add(p, f.offset()), true, top, msg)
590 case kindPtr, kindUnsafePointer:
592 p = *(*unsafe.Pointer)(p)
598 if !cgoIsGoPointer(p) {
602 panic(errorString(msg))
605 cgoCheckUnknownPointer(p, msg)
609 // cgoCheckUnknownPointer is called for an arbitrary pointer into Go
610 // memory. It checks whether that Go memory contains any other
611 // pointer into Go memory. If it does, we panic.
612 // The return values are unused but useful to see in panic tracebacks.
613 func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
614 if inheap(uintptr(p)) {
615 b, span, _ := findObject(uintptr(p), 0, 0)
620 hbits := heapBitsForAddr(base)
622 for i = uintptr(0); i < n; i += sys.PtrSize {
623 if !hbits.morePointers() {
624 // No more possible pointers.
627 if hbits.isPointer() && cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) {
628 panic(errorString(msg))
636 for _, datap := range activeModules() {
637 if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
638 // We have no way to know the size of the object.
639 // We have to assume that it might contain a pointer.
640 panic(errorString(msg))
642 // In the text or noptr sections, we know that the
643 // pointer does not point to a Go pointer.
649 // cgoIsGoPointer reports whether the pointer is a Go pointer--a
650 // pointer to Go memory. We only care about Go memory that might
653 //go:nowritebarrierrec
654 func cgoIsGoPointer(p unsafe.Pointer) bool {
659 if inHeapOrStack(uintptr(p)) {
663 for _, datap := range activeModules() {
664 if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
672 // cgoInRange reports whether p is between start and end.
674 //go:nowritebarrierrec
675 func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
676 return start <= uintptr(p) && uintptr(p) < end
679 // cgoCheckResult is called to check the result parameter of an
680 // exported Go function. It panics if the result is or contains a Go
682 func cgoCheckResult(val interface{}) {
683 if debug.cgocheck == 0 {
689 cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)