1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
16 // sigTabT is the type of an entry in the global sigtable array.
17 // sigtable is inherently system dependent, and appears in OS-specific files,
18 // but sigTabT is the same for all Unixy systems.
19 // The sigtable array is indexed by a system signal number to get the flags
20 // and printable name of each signal.
26 //go:linkname os_sigpipe os.sigpipe
31 func signame(sig uint32) string {
32 if sig >= uint32(len(sigtable)) {
35 return sigtable[sig].name
43 // sigPreempt is the signal used for non-cooperative preemption.
45 // There's no good way to choose this signal, but there are some
48 // 1. It should be a signal that's passed-through by debuggers by
49 // default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO,
50 // SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals.
52 // 2. It shouldn't be used internally by libc in mixed Go/C binaries
53 // because libc may assume it's the only thing that can handle these
54 // signals. For example SIGCANCEL or SIGSETXID.
56 // 3. It should be a signal that can happen spuriously without
57 // consequences. For example, SIGALRM is a bad choice because the
58 // signal handler can't tell if it was caused by the real process
59 // alarm or not (arguably this means the signal is broken, but I
60 // digress). SIGUSR1 and SIGUSR2 are also bad because those are often
61 // used in meaningful ways by applications.
63 // 4. We need to deal with platforms without real-time signals (like
64 // macOS), so those are out.
66 // We use SIGURG because it meets all of these criteria, is extremely
67 // unlikely to be used by an application for its "real" meaning (both
68 // because out-of-band data is basically unused and because SIGURG
69 // doesn't report which socket has the condition, making it pretty
70 // useless), and even if it is, the application has to be ready for
71 // spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more
72 // likely to be used for real.
73 const sigPreempt = _SIGURG
75 // Stores the signal handlers registered before Go installed its own.
76 // These signal handlers will be invoked in cases where Go doesn't want to
77 // handle a particular signal (e.g., signal occurred on a non-Go thread).
78 // See sigfwdgo for more information on when the signals are forwarded.
80 // This is read by the signal handler; accesses should use
81 // atomic.Loaduintptr and atomic.Storeuintptr.
82 var fwdSig [_NSIG]uintptr
84 // handlingSig is indexed by signal number and is non-zero if we are
85 // currently handling the signal. Or, to put it another way, whether
86 // the signal handler is currently set to the Go signal handler or not.
87 // This is uint32 rather than bool so that we can use atomic instructions.
88 var handlingSig [_NSIG]uint32
90 // channels for synchronizing signal mask updates with the signal mask
93 disableSigChan chan uint32
94 enableSigChan chan uint32
95 maskUpdatedChan chan struct{}
99 // _NSIG is the number of signals on this operating system.
100 // sigtable should describe what to do for all the possible signals.
101 if len(sigtable) != _NSIG {
102 print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
103 throw("bad sigtable len")
109 // Initialize signals.
110 // Called by libpreinit so runtime may not be initialized.
113 //go:nowritebarrierrec
114 func initsig(preinit bool) {
116 // It's now OK for signal handlers to run.
120 // For c-archive/c-shared this is called by libpreinit with
122 if (isarchive || islibrary) && !preinit {
126 for i := uint32(0); i < _NSIG; i++ {
128 if t.flags == 0 || t.flags&_SigDefault != 0 {
132 // We don't need to use atomic operations here because
133 // there shouldn't be any other goroutines running yet.
134 fwdSig[i] = getsig(i)
136 if !sigInstallGoHandler(i) {
137 // Even if we are not installing a signal handler,
138 // set SA_ONSTACK if necessary.
139 if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN {
141 } else if fwdSig[i] == _SIG_IGN {
148 setsig(i, abi.FuncPCABIInternal(sighandler))
153 //go:nowritebarrierrec
154 func sigInstallGoHandler(sig uint32) bool {
155 // For some signals, we respect an inherited SIG_IGN handler
156 // rather than insist on installing our own default handler.
157 // Even these signals can be fetched using the os/signal package.
159 case _SIGHUP, _SIGINT:
160 if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN {
165 if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
166 // sigPerThreadSyscall is the same signal used by glibc for
167 // per-thread syscalls on Linux. We use it for the same purpose
168 // in non-cgo binaries.
173 if t.flags&_SigSetStack != 0 {
177 // When built using c-archive or c-shared, only install signal
178 // handlers for synchronous signals and SIGPIPE and sigPreempt.
179 if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt {
186 // sigenable enables the Go signal handler to catch the signal sig.
187 // It is only called while holding the os/signal.handlers lock,
188 // via os/signal.enableSignal and signal_enable.
189 func sigenable(sig uint32) {
190 if sig >= uint32(len(sigtable)) {
194 // SIGPROF is handled specially for profiling.
200 if t.flags&_SigNotify != 0 {
204 if atomic.Cas(&handlingSig[sig], 0, 1) {
205 atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
206 setsig(sig, abi.FuncPCABIInternal(sighandler))
211 // sigdisable disables the Go signal handler for the signal sig.
212 // It is only called while holding the os/signal.handlers lock,
213 // via os/signal.disableSignal and signal_disable.
214 func sigdisable(sig uint32) {
215 if sig >= uint32(len(sigtable)) {
219 // SIGPROF is handled specially for profiling.
225 if t.flags&_SigNotify != 0 {
227 disableSigChan <- sig
230 // If initsig does not install a signal handler for a
231 // signal, then to go back to the state before Notify
232 // we should remove the one we installed.
233 if !sigInstallGoHandler(sig) {
234 atomic.Store(&handlingSig[sig], 0)
235 setsig(sig, atomic.Loaduintptr(&fwdSig[sig]))
240 // sigignore ignores the signal sig.
241 // It is only called while holding the os/signal.handlers lock,
242 // via os/signal.ignoreSignal and signal_ignore.
243 func sigignore(sig uint32) {
244 if sig >= uint32(len(sigtable)) {
248 // SIGPROF is handled specially for profiling.
254 if t.flags&_SigNotify != 0 {
255 atomic.Store(&handlingSig[sig], 0)
256 setsig(sig, _SIG_IGN)
260 // clearSignalHandlers clears all signal handlers that are not ignored
261 // back to the default. This is called by the child after a fork, so that
262 // we can enable the signal mask for the exec without worrying about
263 // running a signal handler in the child.
266 //go:nowritebarrierrec
267 func clearSignalHandlers() {
268 for i := uint32(0); i < _NSIG; i++ {
269 if atomic.Load(&handlingSig[i]) != 0 {
275 // setProcessCPUProfilerTimer is called when the profiling timer changes.
276 // It is called with prof.signalLock held. hz is the new timer, and is 0 if
277 // profiling is being disabled. Enable or disable the signal as
278 // required for -buildmode=c-archive.
279 func setProcessCPUProfilerTimer(hz int32) {
281 // Enable the Go signal handler if not enabled.
282 if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
283 h := getsig(_SIGPROF)
284 // If no signal handler was installed before, then we record
285 // _SIG_IGN here. When we turn off profiling (below) we'll start
286 // ignoring SIGPROF signals. We do this, rather than change
287 // to SIG_DFL, because there may be a pending SIGPROF
288 // signal that has not yet been delivered to some other thread.
289 // If we change to SIG_DFL when turning off profiling, the
290 // program will crash when that SIGPROF is delivered. We assume
291 // that programs that use profiling don't want to crash on a
292 // stray SIGPROF. See issue 19320.
293 // We do the change here instead of when turning off profiling,
294 // because there we may race with a signal handler running
295 // concurrently, in particular, sigfwdgo may observe _SIG_DFL and
296 // die. See issue 43828.
300 atomic.Storeuintptr(&fwdSig[_SIGPROF], h)
301 setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
305 it.it_interval.tv_sec = 0
306 it.it_interval.set_usec(1000000 / hz)
307 it.it_value = it.it_interval
308 setitimer(_ITIMER_PROF, &it, nil)
310 setitimer(_ITIMER_PROF, &itimerval{}, nil)
312 // If the Go signal handler should be disabled by default,
313 // switch back to the signal handler that was installed
314 // when we enabled profiling. We don't try to handle the case
315 // of a program that changes the SIGPROF handler while Go
316 // profiling is enabled.
317 if !sigInstallGoHandler(_SIGPROF) {
318 if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) {
319 h := atomic.Loaduintptr(&fwdSig[_SIGPROF])
326 // setThreadCPUProfilerHz makes any thread-specific changes required to
327 // implement profiling at a rate of hz.
328 // No changes required on Unix systems when using setitimer.
329 func setThreadCPUProfilerHz(hz int32) {
330 getg().m.profilehz = hz
334 if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) {
337 dieFromSignal(_SIGPIPE)
340 // doSigPreempt handles a preemption signal on gp.
341 func doSigPreempt(gp *g, ctxt *sigctxt) {
342 // Check if this G wants to be preempted and is safe to
344 if wantAsyncPreempt(gp) {
345 if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
346 // Adjust the PC and inject a call to asyncPreempt.
347 ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
351 // Acknowledge the preemption.
352 gp.m.preemptGen.Add(1)
353 gp.m.signalPending.Store(0)
355 if GOOS == "darwin" || GOOS == "ios" {
356 pendingPreemptSignals.Add(-1)
360 const preemptMSupported = true
362 // preemptM sends a preemption request to mp. This request may be
363 // handled asynchronously and may be coalesced with other requests to
364 // the M. When the request is received, if the running G or P are
365 // marked for preemption and the goroutine is at an asynchronous
366 // safe-point, it will preempt the goroutine. It always atomically
367 // increments mp.preemptGen after handling a preemption request.
368 func preemptM(mp *m) {
369 // On Darwin, don't try to preempt threads during exec.
371 if GOOS == "darwin" || GOOS == "ios" {
375 if mp.signalPending.CompareAndSwap(0, 1) {
376 if GOOS == "darwin" || GOOS == "ios" {
377 pendingPreemptSignals.Add(1)
380 // If multiple threads are preempting the same M, it may send many
381 // signals to the same M such that it hardly make progress, causing
382 // live-lock problem. Apparently this could happen on darwin. See
384 // Only send a signal if there isn't already one pending.
385 signalM(mp, sigPreempt)
388 if GOOS == "darwin" || GOOS == "ios" {
393 // sigFetchG fetches the value of G safely when running in a signal handler.
394 // On some architectures, the g value may be clobbered when running in a VDSO.
398 func sigFetchG(c *sigctxt) *g {
400 case "arm", "arm64", "ppc64", "ppc64le", "riscv64", "s390x":
401 if !iscgo && inVDSOPage(c.sigpc()) {
402 // When using cgo, we save the g on TLS and load it from there
403 // in sigtramp. Just use that.
404 // Otherwise, before making a VDSO call we save the g to the
405 // bottom of the signal stack. Fetch from there.
406 // TODO: in efence mode, stack is sysAlloc'd, so this wouldn't
410 if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
411 gp := *(**g)(unsafe.Pointer(s.base()))
420 // sigtrampgo is called from the signal handler function, sigtramp,
421 // written in assembly code.
422 // This is called by the signal handler, and the world may be stopped.
424 // It must be nosplit because getg() is still the G that was running
425 // (if any) when the signal was delivered, but it's (usually) called
426 // on the gsignal stack. Until this switches the G to gsignal, the
427 // stack bounds check won't work.
430 //go:nowritebarrierrec
431 func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
432 if sigfwdgo(sig, info, ctx) {
435 c := &sigctxt{info, ctx}
440 // Some platforms (Linux) have per-thread timers, which we use in
441 // combination with the process-wide timer. Avoid double-counting.
442 if validSIGPROF(nil, c) {
443 sigprofNonGoPC(c.sigpc())
447 if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
448 // This is probably a signal from preemptM sent
449 // while executing Go code but received while
450 // executing non-Go code.
451 // We got past sigfwdgo, so we know that there is
452 // no non-Go signal handler for sigPreempt.
453 // The default behavior for sigPreempt is to ignore
454 // the signal, so badsignal will be a no-op anyway.
455 if GOOS == "darwin" || GOOS == "ios" {
456 pendingPreemptSignals.Add(-1)
461 badsignal(uintptr(sig), c)
467 // If some non-Go code called sigaltstack, adjust.
468 var gsignalStack gsignalStack
469 setStack := adjustSignalStack(sig, gp.m, &gsignalStack)
471 gp.m.gsignal.stktopsp = getcallersp()
474 if gp.stackguard0 == stackFork {
475 signalDuringFork(sig)
479 sighandler(sig, info, ctx, gp)
482 restoreGsignalStack(&gsignalStack)
486 // If the signal handler receives a SIGPROF signal on a non-Go thread,
487 // it tries to collect a traceback into sigprofCallers.
488 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
489 var sigprofCallers cgoCallers
490 var sigprofCallersUse uint32
492 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
493 // and the signal handler collected a stack trace in sigprofCallers.
494 // When this is called, sigprofCallersUse will be non-zero.
495 // g is nil, and what we can do is very limited.
497 // It is called from the signal handling functions written in assembly code that
498 // are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have
499 // not verified that the SIGPROF delivery corresponds to the best available
500 // profiling source for this thread.
503 //go:nowritebarrierrec
504 func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
505 if prof.hz.Load() != 0 {
506 c := &sigctxt{info, ctx}
507 // Some platforms (Linux) have per-thread timers, which we use in
508 // combination with the process-wide timer. Avoid double-counting.
509 if validSIGPROF(nil, c) {
511 for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
514 cpuprof.addNonGo(sigprofCallers[:n])
518 atomic.Store(&sigprofCallersUse, 0)
521 // sigprofNonGoPC is called when a profiling signal arrived on a
522 // non-Go thread and we have a single PC value, not a stack trace.
523 // g is nil, and what we can do is very limited.
526 //go:nowritebarrierrec
527 func sigprofNonGoPC(pc uintptr) {
528 if prof.hz.Load() != 0 {
531 abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
533 cpuprof.addNonGo(stk)
537 // adjustSignalStack adjusts the current stack guard based on the
538 // stack pointer that is actually in use while handling a signal.
539 // We do this in case some non-Go code called sigaltstack.
540 // This reports whether the stack was adjusted, and if so stores the old
541 // signal stack in *gsigstack.
544 func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
545 sp := uintptr(unsafe.Pointer(&sig))
546 if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi {
551 sigaltstack(nil, &st)
552 stsp := uintptr(unsafe.Pointer(st.ss_sp))
553 if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size {
554 setGsignalStack(&st, gsigStack)
558 if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi {
559 // The signal was delivered on the g0 stack.
560 // This can happen when linked with C code
561 // using the thread sanitizer, which collects
562 // signals then delivers them itself by calling
563 // the signal handler directly when C code,
564 // including C code called via cgo, calls a
565 // TSAN-intercepted function such as malloc.
567 // We check this condition last as g0.stack.lo
568 // may be not very accurate (see mstart).
569 st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
570 setSignalstackSP(&st, mp.g0.stack.lo)
571 setGsignalStack(&st, gsigStack)
575 // sp is not within gsignal stack, g0 stack, or sigaltstack. Bad.
578 if st.ss_flags&_SS_DISABLE != 0 {
587 // crashing is the number of m's we have waited for when implementing
588 // GOTRACEBACK=crash when a signal is received.
591 // testSigtrap and testSigusr1 are used by the runtime tests. If
592 // non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
593 // normal behavior on this signal is suppressed.
594 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
595 var testSigusr1 func(gp *g) bool
597 // sighandler is invoked when a signal occurs. The global g will be
598 // set to a gsignal goroutine and we will be running on the alternate
599 // signal stack. The parameter gp will be the value of the global g
600 // when the signal occurred. The sig, info, and ctxt parameters are
601 // from the system signal handler: they are the parameters passed when
602 // the SA is passed to the sigaction system call.
604 // The garbage collector may have stopped the world, so write barriers
607 //go:nowritebarrierrec
608 func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
609 // The g executing the signal handler. This is almost always
610 // mp.gsignal. See delayedSignal for an exception.
613 c := &sigctxt{info, ctxt}
615 // Cgo TSAN (not the Go race detector) intercepts signals and calls the
616 // signal handler at a later time. When the signal handler is called, the
617 // memory may have changed, but the signal context remains old. The
618 // unmatched signal context and memory makes it unsafe to unwind or inspect
619 // the stack. So we ignore delayed non-fatal signals that will cause a stack
620 // inspection (profiling signal and preemption signal).
621 // cgo_yield is only non-nil for TSAN, and is specifically used to trigger
622 // signal delivery. We use that as an indicator of delayed signals.
623 // For delayed signals, the handler is called on the g0 stack (see
624 // adjustSignalStack).
625 delayedSignal := *cgo_yield != nil && mp != nil && gsignal.stack == mp.g0.stack
628 // Some platforms (Linux) have per-thread timers, which we use in
629 // combination with the process-wide timer. Avoid double-counting.
630 if !delayedSignal && validSIGPROF(mp, c) {
631 sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp)
636 if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) {
640 if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) {
644 if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
645 // sigPerThreadSyscall is the same signal used by glibc for
646 // per-thread syscalls on Linux. We use it for the same purpose
647 // in non-cgo binaries. Since this signal is not _SigNotify,
648 // there is nothing more to do once we run the syscall.
649 runPerThreadSyscall()
653 if sig == sigPreempt && debug.asyncpreemptoff == 0 && !delayedSignal {
654 // Might be a preemption signal.
656 // Even if this was definitely a preemption signal, it
657 // may have been coalesced with another signal, so we
658 // still let it through to the application.
661 flags := int32(_SigThrow)
662 if sig < uint32(len(sigtable)) {
663 flags = sigtable[sig].flags
665 if !c.sigFromUser() && flags&_SigPanic != 0 && gp.throwsplit {
666 // We can't safely sigpanic because it may grow the
667 // stack. Abort in the signal handler instead.
670 if isAbortPC(c.sigpc()) {
671 // On many architectures, the abort function just
672 // causes a memory fault. Don't turn that into a panic.
675 if !c.sigFromUser() && flags&_SigPanic != 0 {
676 // The signal is going to cause a panic.
677 // Arrange the stack so that it looks like the point
678 // where the signal occurred made a call to the
679 // function sigpanic. Then set the PC to sigpanic.
681 // Have to pass arguments out of band since
682 // augmenting the stack frame would break
683 // the unwinding code.
685 gp.sigcode0 = uintptr(c.sigcode())
686 gp.sigcode1 = uintptr(c.fault())
689 c.preparePanic(sig, gp)
693 if c.sigFromUser() || flags&_SigNotify != 0 {
699 if c.sigFromUser() && signal_ignored(sig) {
703 if flags&_SigKill != 0 {
707 // _SigThrow means that we should exit now.
708 // If we get here with _SigPanic, it means that the signal
709 // was sent to us by a program (c.sigFromUser() is true);
710 // in that case, if we didn't handle it in sigsend, we exit now.
711 if flags&(_SigThrow|_SigPanic) == 0 {
715 mp.throwing = throwTypeRuntime
722 if sig < uint32(len(sigtable)) {
723 print(sigtable[sig].name, "\n")
725 print("Signal ", sig, "\n")
728 print("PC=", hex(c.sigpc()), " m=", mp.id, " sigcode=", c.sigcode(), "\n")
729 if mp.incgo && gp == mp.g0 && mp.curg != nil {
730 print("signal arrived during cgo execution\n")
731 // Switch to curg so that we get a traceback of the Go code
732 // leading up to the cgocall, which switched from curg to g0.
735 if sig == _SIGILL || sig == _SIGFPE {
736 // It would be nice to know how long the instruction is.
737 // Unfortunately, that's complicated to do in general (mostly for x86
738 // and s930x, but other archs have non-standard instruction lengths also).
739 // Opt to print 16 bytes, which covers most instructions.
742 // We have to be careful, though. If we're near the end of
743 // a page and the following page isn't mapped, we could
744 // segfault. So make sure we don't straddle a page (even though
745 // that could lead to printing an incomplete instruction).
746 // We're assuming here we can read at least the page containing the PC.
747 // I suppose it is possible that the page is mapped executable but not readable?
749 if n > physPageSize-pc%physPageSize {
750 n = physPageSize - pc%physPageSize
752 print("instruction bytes:")
753 b := (*[maxN]byte)(unsafe.Pointer(pc))
754 for i := uintptr(0); i < n; i++ {
755 print(" ", hex(b[i]))
761 level, _, docrash := gotraceback()
764 tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
765 if crashing > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
766 // tracebackothers on original m skipped this one; trace it now.
767 goroutineheader(mp.curg)
768 traceback(^uintptr(0), ^uintptr(0), 0, mp.curg)
769 } else if crashing == 0 {
778 if crashing < mcount()-int32(extraMCount) {
779 // There are other m's that need to dump their stacks.
780 // Relay SIGQUIT to the next m by sending it to the current process.
781 // All m's that have already received SIGQUIT have signal masks blocking
782 // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
783 // When the last m receives the SIGQUIT, it will fall through to the call to
784 // crash below. Just in case the relaying gets botched, each m involved in
785 // the relay sleeps for 5 seconds and then does the crash/exit itself.
786 // In expected operation, the last m has received the SIGQUIT and run
787 // crash/exit and the process is gone, all long before any of the
788 // 5-second sleeps have finished.
791 usleep(5 * 1000 * 1000)
801 // sigpanic turns a synchronous signal into a run-time panic.
802 // If the signal handler sees a synchronous panic, it arranges the
803 // stack to look like the function where the signal occurred called
804 // sigpanic, sets the signal's PC value to sigpanic, and returns from
805 // the signal handler. The effect is that the program will act as
806 // though the function that got the signal simply called sigpanic
809 // This must NOT be nosplit because the linker doesn't know where
810 // sigpanic calls can be injected.
812 // The signal handler must not inject a call to sigpanic if
813 // getg().throwsplit, since sigpanic may need to grow the stack.
815 // This is exported via linkname to assembly in runtime/cgo.
817 //go:linkname sigpanic
821 throw("unexpected signal during runtime execution")
826 if gp.sigcode0 == _BUS_ADRERR && gp.sigcode1 < 0x1000 {
829 // Support runtime/debug.SetPanicOnFault.
831 panicmemAddr(gp.sigcode1)
833 print("unexpected fault address ", hex(gp.sigcode1), "\n")
836 if (gp.sigcode0 == 0 || gp.sigcode0 == _SEGV_MAPERR || gp.sigcode0 == _SEGV_ACCERR) && gp.sigcode1 < 0x1000 {
839 // Support runtime/debug.SetPanicOnFault.
841 panicmemAddr(gp.sigcode1)
843 if inUserArenaChunk(gp.sigcode1) {
844 // We could check that the arena chunk is explicitly set to fault,
845 // but the fact that we faulted on accessing it is enough to prove
847 print("accessed data from freed user arena ", hex(gp.sigcode1), "\n")
849 print("unexpected fault address ", hex(gp.sigcode1), "\n")
862 if gp.sig >= uint32(len(sigtable)) {
863 // can't happen: we looked up gp.sig in sigtable to decide to call sigpanic
864 throw("unexpected signal value")
866 panic(errorString(sigtable[gp.sig].name))
869 // dieFromSignal kills the program with a signal.
870 // This provides the expected exit status for the shell.
871 // This is only called with fatal signals expected to kill the process.
874 //go:nowritebarrierrec
875 func dieFromSignal(sig uint32) {
877 // Mark the signal as unhandled to ensure it is forwarded.
878 atomic.Store(&handlingSig[sig], 0)
881 // That should have killed us. On some systems, though, raise
882 // sends the signal to the whole process rather than to just
883 // the current thread, which means that the signal may not yet
884 // have been delivered. Give other threads a chance to run and
885 // pick up the signal.
890 // If that didn't work, try _SIG_DFL.
891 setsig(sig, _SIG_DFL)
898 // If we are still somehow running, just exit with the wrong status.
902 // raisebadsignal is called when a signal is received on a non-Go
903 // thread, and the Go program does not want to handle it (that is, the
904 // program has not called os/signal.Notify for the signal).
905 func raisebadsignal(sig uint32, c *sigctxt) {
907 // Ignore profiling signals that arrive on non-Go threads.
915 handler = atomic.Loaduintptr(&fwdSig[sig])
918 // Reset the signal handler and raise the signal.
919 // We are currently running inside a signal handler, so the
920 // signal is blocked. We need to unblock it before raising the
921 // signal, or the signal we raise will be ignored until we return
922 // from the signal handler. We know that the signal was unblocked
923 // before entering the handler, or else we would not have received
924 // it. That means that we don't have to worry about blocking it
929 // If we're linked into a non-Go program we want to try to
930 // avoid modifying the original context in which the signal
931 // was raised. If the handler is the default, we know it
932 // is non-recoverable, so we don't have to worry about
933 // re-installing sighandler. At this point we can just
934 // return and the signal will be re-raised and caught by
935 // the default handler with the correct context.
937 // On FreeBSD, the libthr sigaction code prevents
938 // this from working so we fall through to raise.
939 if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && !c.sigFromUser() {
945 // Give the signal a chance to be delivered.
946 // In almost all real cases the program is about to crash,
947 // so sleeping here is not a waste of time.
950 // If the signal didn't cause the program to exit, restore the
951 // Go signal handler and carry on.
953 // We may receive another instance of the signal before we
954 // restore the Go handler, but that is not so bad: we know
955 // that the Go program has been ignoring the signal.
956 setsig(sig, abi.FuncPCABIInternal(sighandler))
961 // OS X core dumps are linear dumps of the mapped memory,
962 // from the first virtual byte to the last, with zeros in the gaps.
963 // Because of the way we arrange the address space on 64-bit systems,
964 // this means the OS X core file will be >128 GB and even on a zippy
965 // workstation can take OS X well over an hour to write (uninterruptible).
966 // Save users from making that mistake.
967 if GOOS == "darwin" && GOARCH == "amd64" {
971 dieFromSignal(_SIGABRT)
974 // ensureSigM starts one global, sleeping thread to make sure at least one thread
975 // is available to catch signals enabled for os/signal.
977 if maskUpdatedChan != nil {
980 maskUpdatedChan = make(chan struct{})
981 disableSigChan = make(chan uint32)
982 enableSigChan = make(chan uint32)
984 // Signal masks are per-thread, so make sure this goroutine stays on one
987 defer UnlockOSThread()
988 // The sigBlocked mask contains the signals not active for os/signal,
989 // initially all signals except the essential. When signal.Notify()/Stop is called,
990 // sigenable/sigdisable in turn notify this thread to update its signal
992 sigBlocked := sigset_all
993 for i := range sigtable {
994 if !blockableSig(uint32(i)) {
995 sigdelset(&sigBlocked, i)
998 sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
1001 case sig := <-enableSigChan:
1003 sigdelset(&sigBlocked, int(sig))
1005 case sig := <-disableSigChan:
1006 if sig > 0 && blockableSig(sig) {
1007 sigaddset(&sigBlocked, int(sig))
1010 sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
1011 maskUpdatedChan <- struct{}{}
1016 // This is called when we receive a signal when there is no signal stack.
1017 // This can only happen if non-Go code calls sigaltstack to disable the
1019 func noSignalStack(sig uint32) {
1020 println("signal", sig, "received on thread with no signal stack")
1021 throw("non-Go code disabled sigaltstack")
1024 // This is called if we receive a signal when there is a signal stack
1025 // but we are not on it. This can only happen if non-Go code called
1026 // sigaction without setting the SS_ONSTACK flag.
1027 func sigNotOnStack(sig uint32) {
1028 println("signal", sig, "received but handler not on signal stack")
1029 throw("non-Go code set up signal handler without SA_ONSTACK flag")
1032 // signalDuringFork is called if we receive a signal while doing a fork.
1033 // We do not want signals at that time, as a signal sent to the process
1034 // group may be delivered to the child process, causing confusion.
1035 // This should never be called, because we block signals across the fork;
1036 // this function is just a safety check. See issue 18600 for background.
1037 func signalDuringFork(sig uint32) {
1038 println("signal", sig, "received during fork")
1039 throw("signal received during fork")
1042 // This runs on a foreign stack, without an m or a g. No stack split.
1046 //go:nowritebarrierrec
1047 func badsignal(sig uintptr, c *sigctxt) {
1048 if !iscgo && !cgoHasExtraM {
1049 // There is no extra M. needm will not be able to grab
1050 // an M. Instead of hanging, just crash.
1051 // Cannot call split-stack function as there is no G.
1052 writeErrStr("fatal: bad g in signal handler\n")
1054 *(*uintptr)(unsafe.Pointer(uintptr(123))) = 2
1057 if !sigsend(uint32(sig)) {
1058 // A foreign thread received the signal sig, and the
1059 // Go code does not want to handle it.
1060 raisebadsignal(uint32(sig), c)
1066 func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
1068 // Determines if the signal should be handled by Go and if not, forwards the
1069 // signal to the handler that was installed before Go's. Returns whether the
1070 // signal was forwarded.
1071 // This is called by the signal handler, and the world may be stopped.
1074 //go:nowritebarrierrec
1075 func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
1076 if sig >= uint32(len(sigtable)) {
1079 fwdFn := atomic.Loaduintptr(&fwdSig[sig])
1080 flags := sigtable[sig].flags
1082 // If we aren't handling the signal, forward it.
1083 if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK {
1084 // If the signal is ignored, doing nothing is the same as forwarding.
1085 if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) {
1088 // We are not handling the signal and there is no other handler to forward to.
1089 // Crash with the default behavior.
1090 if fwdFn == _SIG_DFL {
1091 setsig(sig, _SIG_DFL)
1096 sigfwd(fwdFn, sig, info, ctx)
1100 // This function and its caller sigtrampgo assumes SIGPIPE is delivered on the
1101 // originating thread. This property does not hold on macOS (golang.org/issue/33384),
1102 // so we have no choice but to ignore SIGPIPE.
1103 if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE {
1107 // If there is no handler to forward to, no need to forward.
1108 if fwdFn == _SIG_DFL {
1112 c := &sigctxt{info, ctx}
1113 // Only forward synchronous signals and SIGPIPE.
1114 // Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code
1115 // is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket
1117 if (c.sigFromUser() || flags&_SigPanic == 0) && sig != _SIGPIPE {
1120 // Determine if the signal occurred inside Go code. We test that:
1121 // (1) we weren't in VDSO page,
1122 // (2) we were in a goroutine (i.e., m.curg != nil), and
1123 // (3) we weren't in CGO.
1125 if gp != nil && gp.m != nil && gp.m.curg != nil && !gp.m.incgo {
1129 // Signal not handled by Go, forward it.
1130 if fwdFn != _SIG_IGN {
1131 sigfwd(fwdFn, sig, info, ctx)
1137 // sigsave saves the current thread's signal mask into *p.
1138 // This is used to preserve the non-Go signal mask when a non-Go
1139 // thread calls a Go function.
1140 // This is nosplit and nowritebarrierrec because it is called by needm
1141 // which may be called on a non-Go thread with no g available.
1144 //go:nowritebarrierrec
1145 func sigsave(p *sigset) {
1146 sigprocmask(_SIG_SETMASK, nil, p)
1149 // msigrestore sets the current thread's signal mask to sigmask.
1150 // This is used to restore the non-Go signal mask when a non-Go thread
1151 // calls a Go function.
1152 // This is nosplit and nowritebarrierrec because it is called by dropm
1153 // after g has been cleared.
1156 //go:nowritebarrierrec
1157 func msigrestore(sigmask sigset) {
1158 sigprocmask(_SIG_SETMASK, &sigmask, nil)
1161 // sigsetAllExiting is used by sigblock(true) when a thread is
1162 // exiting. sigset_all is defined in OS specific code, and per GOOS
1163 // behavior may override this default for sigsetAllExiting: see
1165 var sigsetAllExiting = sigset_all
1167 // sigblock blocks signals in the current thread's signal mask.
1168 // This is used to block signals while setting up and tearing down g
1169 // when a non-Go thread calls a Go function. When a thread is exiting
1170 // we use the sigsetAllExiting value, otherwise the OS specific
1171 // definition of sigset_all is used.
1172 // This is nosplit and nowritebarrierrec because it is called by needm
1173 // which may be called on a non-Go thread with no g available.
1176 //go:nowritebarrierrec
1177 func sigblock(exiting bool) {
1179 sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
1182 sigprocmask(_SIG_SETMASK, &sigset_all, nil)
1185 // unblocksig removes sig from the current thread's signal mask.
1186 // This is nosplit and nowritebarrierrec because it is called from
1187 // dieFromSignal, which can be called by sigfwdgo while running in the
1188 // signal handler, on the signal stack, with no g available.
1191 //go:nowritebarrierrec
1192 func unblocksig(sig uint32) {
1194 sigaddset(&set, int(sig))
1195 sigprocmask(_SIG_UNBLOCK, &set, nil)
1198 // minitSignals is called when initializing a new m to set the
1199 // thread's alternate signal stack and signal mask.
1200 func minitSignals() {
1205 // minitSignalStack is called when initializing a new m to set the
1206 // alternate signal stack. If the alternate signal stack is not set
1207 // for the thread (the normal case) then set the alternate signal
1208 // stack to the gsignal stack. If the alternate signal stack is set
1209 // for the thread (the case when a non-Go thread sets the alternate
1210 // signal stack and then calls a Go function) then set the gsignal
1211 // stack to the alternate signal stack. We also set the alternate
1212 // signal stack to the gsignal stack if cgo is not used (regardless
1213 // of whether it is already set). Record which choice was made in
1214 // newSigstack, so that it can be undone in unminit.
1215 func minitSignalStack() {
1218 sigaltstack(nil, &st)
1219 if st.ss_flags&_SS_DISABLE != 0 || !iscgo {
1220 signalstack(&mp.gsignal.stack)
1221 mp.newSigstack = true
1223 setGsignalStack(&st, &mp.goSigStack)
1224 mp.newSigstack = false
1228 // minitSignalMask is called when initializing a new m to set the
1229 // thread's signal mask. When this is called all signals have been
1230 // blocked for the thread. This starts with m.sigmask, which was set
1231 // either from initSigmask for a newly created thread or by calling
1232 // sigsave if this is a non-Go thread calling a Go function. It
1233 // removes all essential signals from the mask, thus causing those
1234 // signals to not be blocked. Then it sets the thread's signal mask.
1235 // After this is called the thread can receive signals.
1236 func minitSignalMask() {
1237 nmask := getg().m.sigmask
1238 for i := range sigtable {
1239 if !blockableSig(uint32(i)) {
1240 sigdelset(&nmask, i)
1243 sigprocmask(_SIG_SETMASK, &nmask, nil)
1246 // unminitSignals is called from dropm, via unminit, to undo the
1247 // effect of calling minit on a non-Go thread.
1250 func unminitSignals() {
1251 if getg().m.newSigstack {
1252 st := stackt{ss_flags: _SS_DISABLE}
1253 sigaltstack(&st, nil)
1255 // We got the signal stack from someone else. Restore
1256 // the Go-allocated stack in case this M gets reused
1257 // for another thread (e.g., it's an extram). Also, on
1258 // Android, libc allocates a signal stack for all
1259 // threads, so it's important to restore the Go stack
1260 // even on Go-created threads so we can free it.
1261 restoreGsignalStack(&getg().m.goSigStack)
1265 // blockableSig reports whether sig may be blocked by the signal mask.
1266 // We never want to block the signals marked _SigUnblock;
1267 // these are the synchronous signals that turn into a Go panic.
1268 // We never want to block the preemption signal if it is being used.
1269 // In a Go program--not a c-archive/c-shared--we never want to block
1270 // the signals marked _SigKill or _SigThrow, as otherwise it's possible
1271 // for all running threads to block them and delay their delivery until
1272 // we start a new thread. When linked into a C program we let the C code
1273 // decide on the disposition of those signals.
1274 func blockableSig(sig uint32) bool {
1275 flags := sigtable[sig].flags
1276 if flags&_SigUnblock != 0 {
1279 if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
1282 if isarchive || islibrary {
1285 return flags&(_SigKill|_SigThrow) == 0
1288 // gsignalStack saves the fields of the gsignal stack changed by
1290 type gsignalStack struct {
1297 // setGsignalStack sets the gsignal stack of the current m to an
1298 // alternate signal stack returned from the sigaltstack system call.
1299 // It saves the old values in *old for use by restoreGsignalStack.
1300 // This is used when handling a signal if non-Go code has set the
1301 // alternate signal stack.
1304 //go:nowritebarrierrec
1305 func setGsignalStack(st *stackt, old *gsignalStack) {
1308 old.stack = gp.m.gsignal.stack
1309 old.stackguard0 = gp.m.gsignal.stackguard0
1310 old.stackguard1 = gp.m.gsignal.stackguard1
1311 old.stktopsp = gp.m.gsignal.stktopsp
1313 stsp := uintptr(unsafe.Pointer(st.ss_sp))
1314 gp.m.gsignal.stack.lo = stsp
1315 gp.m.gsignal.stack.hi = stsp + st.ss_size
1316 gp.m.gsignal.stackguard0 = stsp + _StackGuard
1317 gp.m.gsignal.stackguard1 = stsp + _StackGuard
1320 // restoreGsignalStack restores the gsignal stack to the value it had
1321 // before entering the signal handler.
1324 //go:nowritebarrierrec
1325 func restoreGsignalStack(st *gsignalStack) {
1326 gp := getg().m.gsignal
1328 gp.stackguard0 = st.stackguard0
1329 gp.stackguard1 = st.stackguard1
1330 gp.stktopsp = st.stktopsp
1333 // signalstack sets the current thread's alternate signal stack to s.
1336 func signalstack(s *stack) {
1337 st := stackt{ss_size: s.hi - s.lo}
1338 setSignalstackSP(&st, s.lo)
1339 sigaltstack(&st, nil)
1342 // setsigsegv is used on darwin/arm64 to fake a segmentation fault.
1344 // This is exported via linkname to assembly in runtime/cgo.
1347 //go:linkname setsigsegv
1348 func setsigsegv(pc uintptr) {
1352 gp.sigcode0 = _SEGV_MAPERR
1353 gp.sigcode1 = 0 // TODO: emulate si_addr