...

Source file src/runtime/signal_unix.go

Documentation: runtime

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build unix
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"runtime/internal/atomic"
    12  	"runtime/internal/sys"
    13  	"unsafe"
    14  )
    15  
    16  // sigTabT is the type of an entry in the global sigtable array.
    17  // sigtable is inherently system dependent, and appears in OS-specific files,
    18  // but sigTabT is the same for all Unixy systems.
    19  // The sigtable array is indexed by a system signal number to get the flags
    20  // and printable name of each signal.
    21  type sigTabT struct {
    22  	flags int32
    23  	name  string
    24  }
    25  
    26  //go:linkname os_sigpipe os.sigpipe
    27  func os_sigpipe() {
    28  	systemstack(sigpipe)
    29  }
    30  
    31  func signame(sig uint32) string {
    32  	if sig >= uint32(len(sigtable)) {
    33  		return ""
    34  	}
    35  	return sigtable[sig].name
    36  }
    37  
    38  const (
    39  	_SIG_DFL uintptr = 0
    40  	_SIG_IGN uintptr = 1
    41  )
    42  
    43  // sigPreempt is the signal used for non-cooperative preemption.
    44  //
    45  // There's no good way to choose this signal, but there are some
    46  // heuristics:
    47  //
    48  // 1. It should be a signal that's passed-through by debuggers by
    49  // default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO,
    50  // SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals.
    51  //
    52  // 2. It shouldn't be used internally by libc in mixed Go/C binaries
    53  // because libc may assume it's the only thing that can handle these
    54  // signals. For example SIGCANCEL or SIGSETXID.
    55  //
    56  // 3. It should be a signal that can happen spuriously without
    57  // consequences. For example, SIGALRM is a bad choice because the
    58  // signal handler can't tell if it was caused by the real process
    59  // alarm or not (arguably this means the signal is broken, but I
    60  // digress). SIGUSR1 and SIGUSR2 are also bad because those are often
    61  // used in meaningful ways by applications.
    62  //
    63  // 4. We need to deal with platforms without real-time signals (like
    64  // macOS), so those are out.
    65  //
    66  // We use SIGURG because it meets all of these criteria, is extremely
    67  // unlikely to be used by an application for its "real" meaning (both
    68  // because out-of-band data is basically unused and because SIGURG
    69  // doesn't report which socket has the condition, making it pretty
    70  // useless), and even if it is, the application has to be ready for
    71  // spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more
    72  // likely to be used for real.
    73  const sigPreempt = _SIGURG
    74  
    75  // Stores the signal handlers registered before Go installed its own.
    76  // These signal handlers will be invoked in cases where Go doesn't want to
    77  // handle a particular signal (e.g., signal occurred on a non-Go thread).
    78  // See sigfwdgo for more information on when the signals are forwarded.
    79  //
    80  // This is read by the signal handler; accesses should use
    81  // atomic.Loaduintptr and atomic.Storeuintptr.
    82  var fwdSig [_NSIG]uintptr
    83  
    84  // handlingSig is indexed by signal number and is non-zero if we are
    85  // currently handling the signal. Or, to put it another way, whether
    86  // the signal handler is currently set to the Go signal handler or not.
    87  // This is uint32 rather than bool so that we can use atomic instructions.
    88  var handlingSig [_NSIG]uint32
    89  
    90  // channels for synchronizing signal mask updates with the signal mask
    91  // thread
    92  var (
    93  	disableSigChan  chan uint32
    94  	enableSigChan   chan uint32
    95  	maskUpdatedChan chan struct{}
    96  )
    97  
    98  func init() {
    99  	// _NSIG is the number of signals on this operating system.
   100  	// sigtable should describe what to do for all the possible signals.
   101  	if len(sigtable) != _NSIG {
   102  		print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
   103  		throw("bad sigtable len")
   104  	}
   105  }
   106  
   107  var signalsOK bool
   108  
   109  // Initialize signals.
   110  // Called by libpreinit so runtime may not be initialized.
   111  //
   112  //go:nosplit
   113  //go:nowritebarrierrec
   114  func initsig(preinit bool) {
   115  	if !preinit {
   116  		// It's now OK for signal handlers to run.
   117  		signalsOK = true
   118  	}
   119  
   120  	// For c-archive/c-shared this is called by libpreinit with
   121  	// preinit == true.
   122  	if (isarchive || islibrary) && !preinit {
   123  		return
   124  	}
   125  
   126  	for i := uint32(0); i < _NSIG; i++ {
   127  		t := &sigtable[i]
   128  		if t.flags == 0 || t.flags&_SigDefault != 0 {
   129  			continue
   130  		}
   131  
   132  		// We don't need to use atomic operations here because
   133  		// there shouldn't be any other goroutines running yet.
   134  		fwdSig[i] = getsig(i)
   135  
   136  		if !sigInstallGoHandler(i) {
   137  			// Even if we are not installing a signal handler,
   138  			// set SA_ONSTACK if necessary.
   139  			if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN {
   140  				setsigstack(i)
   141  			} else if fwdSig[i] == _SIG_IGN {
   142  				sigInitIgnored(i)
   143  			}
   144  			continue
   145  		}
   146  
   147  		handlingSig[i] = 1
   148  		setsig(i, abi.FuncPCABIInternal(sighandler))
   149  	}
   150  }
   151  
   152  //go:nosplit
   153  //go:nowritebarrierrec
   154  func sigInstallGoHandler(sig uint32) bool {
   155  	// For some signals, we respect an inherited SIG_IGN handler
   156  	// rather than insist on installing our own default handler.
   157  	// Even these signals can be fetched using the os/signal package.
   158  	switch sig {
   159  	case _SIGHUP, _SIGINT:
   160  		if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN {
   161  			return false
   162  		}
   163  	}
   164  
   165  	if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
   166  		// sigPerThreadSyscall is the same signal used by glibc for
   167  		// per-thread syscalls on Linux. We use it for the same purpose
   168  		// in non-cgo binaries.
   169  		return true
   170  	}
   171  
   172  	t := &sigtable[sig]
   173  	if t.flags&_SigSetStack != 0 {
   174  		return false
   175  	}
   176  
   177  	// When built using c-archive or c-shared, only install signal
   178  	// handlers for synchronous signals and SIGPIPE and sigPreempt.
   179  	if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt {
   180  		return false
   181  	}
   182  
   183  	return true
   184  }
   185  
   186  // sigenable enables the Go signal handler to catch the signal sig.
   187  // It is only called while holding the os/signal.handlers lock,
   188  // via os/signal.enableSignal and signal_enable.
   189  func sigenable(sig uint32) {
   190  	if sig >= uint32(len(sigtable)) {
   191  		return
   192  	}
   193  
   194  	// SIGPROF is handled specially for profiling.
   195  	if sig == _SIGPROF {
   196  		return
   197  	}
   198  
   199  	t := &sigtable[sig]
   200  	if t.flags&_SigNotify != 0 {
   201  		ensureSigM()
   202  		enableSigChan <- sig
   203  		<-maskUpdatedChan
   204  		if atomic.Cas(&handlingSig[sig], 0, 1) {
   205  			atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
   206  			setsig(sig, abi.FuncPCABIInternal(sighandler))
   207  		}
   208  	}
   209  }
   210  
   211  // sigdisable disables the Go signal handler for the signal sig.
   212  // It is only called while holding the os/signal.handlers lock,
   213  // via os/signal.disableSignal and signal_disable.
   214  func sigdisable(sig uint32) {
   215  	if sig >= uint32(len(sigtable)) {
   216  		return
   217  	}
   218  
   219  	// SIGPROF is handled specially for profiling.
   220  	if sig == _SIGPROF {
   221  		return
   222  	}
   223  
   224  	t := &sigtable[sig]
   225  	if t.flags&_SigNotify != 0 {
   226  		ensureSigM()
   227  		disableSigChan <- sig
   228  		<-maskUpdatedChan
   229  
   230  		// If initsig does not install a signal handler for a
   231  		// signal, then to go back to the state before Notify
   232  		// we should remove the one we installed.
   233  		if !sigInstallGoHandler(sig) {
   234  			atomic.Store(&handlingSig[sig], 0)
   235  			setsig(sig, atomic.Loaduintptr(&fwdSig[sig]))
   236  		}
   237  	}
   238  }
   239  
   240  // sigignore ignores the signal sig.
   241  // It is only called while holding the os/signal.handlers lock,
   242  // via os/signal.ignoreSignal and signal_ignore.
   243  func sigignore(sig uint32) {
   244  	if sig >= uint32(len(sigtable)) {
   245  		return
   246  	}
   247  
   248  	// SIGPROF is handled specially for profiling.
   249  	if sig == _SIGPROF {
   250  		return
   251  	}
   252  
   253  	t := &sigtable[sig]
   254  	if t.flags&_SigNotify != 0 {
   255  		atomic.Store(&handlingSig[sig], 0)
   256  		setsig(sig, _SIG_IGN)
   257  	}
   258  }
   259  
   260  // clearSignalHandlers clears all signal handlers that are not ignored
   261  // back to the default. This is called by the child after a fork, so that
   262  // we can enable the signal mask for the exec without worrying about
   263  // running a signal handler in the child.
   264  //
   265  //go:nosplit
   266  //go:nowritebarrierrec
   267  func clearSignalHandlers() {
   268  	for i := uint32(0); i < _NSIG; i++ {
   269  		if atomic.Load(&handlingSig[i]) != 0 {
   270  			setsig(i, _SIG_DFL)
   271  		}
   272  	}
   273  }
   274  
   275  // setProcessCPUProfilerTimer is called when the profiling timer changes.
   276  // It is called with prof.signalLock held. hz is the new timer, and is 0 if
   277  // profiling is being disabled. Enable or disable the signal as
   278  // required for -buildmode=c-archive.
   279  func setProcessCPUProfilerTimer(hz int32) {
   280  	if hz != 0 {
   281  		// Enable the Go signal handler if not enabled.
   282  		if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
   283  			h := getsig(_SIGPROF)
   284  			// If no signal handler was installed before, then we record
   285  			// _SIG_IGN here. When we turn off profiling (below) we'll start
   286  			// ignoring SIGPROF signals. We do this, rather than change
   287  			// to SIG_DFL, because there may be a pending SIGPROF
   288  			// signal that has not yet been delivered to some other thread.
   289  			// If we change to SIG_DFL when turning off profiling, the
   290  			// program will crash when that SIGPROF is delivered. We assume
   291  			// that programs that use profiling don't want to crash on a
   292  			// stray SIGPROF. See issue 19320.
   293  			// We do the change here instead of when turning off profiling,
   294  			// because there we may race with a signal handler running
   295  			// concurrently, in particular, sigfwdgo may observe _SIG_DFL and
   296  			// die. See issue 43828.
   297  			if h == _SIG_DFL {
   298  				h = _SIG_IGN
   299  			}
   300  			atomic.Storeuintptr(&fwdSig[_SIGPROF], h)
   301  			setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
   302  		}
   303  
   304  		var it itimerval
   305  		it.it_interval.tv_sec = 0
   306  		it.it_interval.set_usec(1000000 / hz)
   307  		it.it_value = it.it_interval
   308  		setitimer(_ITIMER_PROF, &it, nil)
   309  	} else {
   310  		setitimer(_ITIMER_PROF, &itimerval{}, nil)
   311  
   312  		// If the Go signal handler should be disabled by default,
   313  		// switch back to the signal handler that was installed
   314  		// when we enabled profiling. We don't try to handle the case
   315  		// of a program that changes the SIGPROF handler while Go
   316  		// profiling is enabled.
   317  		if !sigInstallGoHandler(_SIGPROF) {
   318  			if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) {
   319  				h := atomic.Loaduintptr(&fwdSig[_SIGPROF])
   320  				setsig(_SIGPROF, h)
   321  			}
   322  		}
   323  	}
   324  }
   325  
   326  // setThreadCPUProfilerHz makes any thread-specific changes required to
   327  // implement profiling at a rate of hz.
   328  // No changes required on Unix systems when using setitimer.
   329  func setThreadCPUProfilerHz(hz int32) {
   330  	getg().m.profilehz = hz
   331  }
   332  
   333  func sigpipe() {
   334  	if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) {
   335  		return
   336  	}
   337  	dieFromSignal(_SIGPIPE)
   338  }
   339  
   340  // doSigPreempt handles a preemption signal on gp.
   341  func doSigPreempt(gp *g, ctxt *sigctxt) {
   342  	// Check if this G wants to be preempted and is safe to
   343  	// preempt.
   344  	if wantAsyncPreempt(gp) {
   345  		if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
   346  			// Adjust the PC and inject a call to asyncPreempt.
   347  			ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
   348  		}
   349  	}
   350  
   351  	// Acknowledge the preemption.
   352  	atomic.Xadd(&gp.m.preemptGen, 1)
   353  	atomic.Store(&gp.m.signalPending, 0)
   354  
   355  	if GOOS == "darwin" || GOOS == "ios" {
   356  		atomic.Xadd(&pendingPreemptSignals, -1)
   357  	}
   358  }
   359  
   360  const preemptMSupported = true
   361  
   362  // preemptM sends a preemption request to mp. This request may be
   363  // handled asynchronously and may be coalesced with other requests to
   364  // the M. When the request is received, if the running G or P are
   365  // marked for preemption and the goroutine is at an asynchronous
   366  // safe-point, it will preempt the goroutine. It always atomically
   367  // increments mp.preemptGen after handling a preemption request.
   368  func preemptM(mp *m) {
   369  	// On Darwin, don't try to preempt threads during exec.
   370  	// Issue #41702.
   371  	if GOOS == "darwin" || GOOS == "ios" {
   372  		execLock.rlock()
   373  	}
   374  
   375  	if atomic.Cas(&mp.signalPending, 0, 1) {
   376  		if GOOS == "darwin" || GOOS == "ios" {
   377  			atomic.Xadd(&pendingPreemptSignals, 1)
   378  		}
   379  
   380  		// If multiple threads are preempting the same M, it may send many
   381  		// signals to the same M such that it hardly make progress, causing
   382  		// live-lock problem. Apparently this could happen on darwin. See
   383  		// issue #37741.
   384  		// Only send a signal if there isn't already one pending.
   385  		signalM(mp, sigPreempt)
   386  	}
   387  
   388  	if GOOS == "darwin" || GOOS == "ios" {
   389  		execLock.runlock()
   390  	}
   391  }
   392  
   393  // sigFetchG fetches the value of G safely when running in a signal handler.
   394  // On some architectures, the g value may be clobbered when running in a VDSO.
   395  // See issue #32912.
   396  //
   397  //go:nosplit
   398  func sigFetchG(c *sigctxt) *g {
   399  	switch GOARCH {
   400  	case "arm", "arm64", "ppc64", "ppc64le", "riscv64", "s390x":
   401  		if !iscgo && inVDSOPage(c.sigpc()) {
   402  			// When using cgo, we save the g on TLS and load it from there
   403  			// in sigtramp. Just use that.
   404  			// Otherwise, before making a VDSO call we save the g to the
   405  			// bottom of the signal stack. Fetch from there.
   406  			// TODO: in efence mode, stack is sysAlloc'd, so this wouldn't
   407  			// work.
   408  			sp := getcallersp()
   409  			s := spanOf(sp)
   410  			if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
   411  				gp := *(**g)(unsafe.Pointer(s.base()))
   412  				return gp
   413  			}
   414  			return nil
   415  		}
   416  	}
   417  	return getg()
   418  }
   419  
   420  // sigtrampgo is called from the signal handler function, sigtramp,
   421  // written in assembly code.
   422  // This is called by the signal handler, and the world may be stopped.
   423  //
   424  // It must be nosplit because getg() is still the G that was running
   425  // (if any) when the signal was delivered, but it's (usually) called
   426  // on the gsignal stack. Until this switches the G to gsignal, the
   427  // stack bounds check won't work.
   428  //
   429  //go:nosplit
   430  //go:nowritebarrierrec
   431  func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
   432  	if sigfwdgo(sig, info, ctx) {
   433  		return
   434  	}
   435  	c := &sigctxt{info, ctx}
   436  	g := sigFetchG(c)
   437  	setg(g)
   438  	if g == nil {
   439  		if sig == _SIGPROF {
   440  			// Some platforms (Linux) have per-thread timers, which we use in
   441  			// combination with the process-wide timer. Avoid double-counting.
   442  			if validSIGPROF(nil, c) {
   443  				sigprofNonGoPC(c.sigpc())
   444  			}
   445  			return
   446  		}
   447  		if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
   448  			// This is probably a signal from preemptM sent
   449  			// while executing Go code but received while
   450  			// executing non-Go code.
   451  			// We got past sigfwdgo, so we know that there is
   452  			// no non-Go signal handler for sigPreempt.
   453  			// The default behavior for sigPreempt is to ignore
   454  			// the signal, so badsignal will be a no-op anyway.
   455  			if GOOS == "darwin" || GOOS == "ios" {
   456  				atomic.Xadd(&pendingPreemptSignals, -1)
   457  			}
   458  			return
   459  		}
   460  		c.fixsigcode(sig)
   461  		badsignal(uintptr(sig), c)
   462  		return
   463  	}
   464  
   465  	setg(g.m.gsignal)
   466  
   467  	// If some non-Go code called sigaltstack, adjust.
   468  	var gsignalStack gsignalStack
   469  	setStack := adjustSignalStack(sig, g.m, &gsignalStack)
   470  	if setStack {
   471  		g.m.gsignal.stktopsp = getcallersp()
   472  	}
   473  
   474  	if g.stackguard0 == stackFork {
   475  		signalDuringFork(sig)
   476  	}
   477  
   478  	c.fixsigcode(sig)
   479  	sighandler(sig, info, ctx, g)
   480  	setg(g)
   481  	if setStack {
   482  		restoreGsignalStack(&gsignalStack)
   483  	}
   484  }
   485  
   486  // If the signal handler receives a SIGPROF signal on a non-Go thread,
   487  // it tries to collect a traceback into sigprofCallers.
   488  // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
   489  var sigprofCallers cgoCallers
   490  var sigprofCallersUse uint32
   491  
   492  // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
   493  // and the signal handler collected a stack trace in sigprofCallers.
   494  // When this is called, sigprofCallersUse will be non-zero.
   495  // g is nil, and what we can do is very limited.
   496  //
   497  // It is called from the signal handling functions written in assembly code that
   498  // are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have
   499  // not verified that the SIGPROF delivery corresponds to the best available
   500  // profiling source for this thread.
   501  //
   502  //go:nosplit
   503  //go:nowritebarrierrec
   504  func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
   505  	if prof.hz != 0 {
   506  		c := &sigctxt{info, ctx}
   507  		// Some platforms (Linux) have per-thread timers, which we use in
   508  		// combination with the process-wide timer. Avoid double-counting.
   509  		if validSIGPROF(nil, c) {
   510  			n := 0
   511  			for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
   512  				n++
   513  			}
   514  			cpuprof.addNonGo(sigprofCallers[:n])
   515  		}
   516  	}
   517  
   518  	atomic.Store(&sigprofCallersUse, 0)
   519  }
   520  
   521  // sigprofNonGoPC is called when a profiling signal arrived on a
   522  // non-Go thread and we have a single PC value, not a stack trace.
   523  // g is nil, and what we can do is very limited.
   524  //
   525  //go:nosplit
   526  //go:nowritebarrierrec
   527  func sigprofNonGoPC(pc uintptr) {
   528  	if prof.hz != 0 {
   529  		stk := []uintptr{
   530  			pc,
   531  			abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
   532  		}
   533  		cpuprof.addNonGo(stk)
   534  	}
   535  }
   536  
   537  // adjustSignalStack adjusts the current stack guard based on the
   538  // stack pointer that is actually in use while handling a signal.
   539  // We do this in case some non-Go code called sigaltstack.
   540  // This reports whether the stack was adjusted, and if so stores the old
   541  // signal stack in *gsigstack.
   542  //
   543  //go:nosplit
   544  func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
   545  	sp := uintptr(unsafe.Pointer(&sig))
   546  	if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi {
   547  		return false
   548  	}
   549  
   550  	var st stackt
   551  	sigaltstack(nil, &st)
   552  	stsp := uintptr(unsafe.Pointer(st.ss_sp))
   553  	if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size {
   554  		setGsignalStack(&st, gsigStack)
   555  		return true
   556  	}
   557  
   558  	if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi {
   559  		// The signal was delivered on the g0 stack.
   560  		// This can happen when linked with C code
   561  		// using the thread sanitizer, which collects
   562  		// signals then delivers them itself by calling
   563  		// the signal handler directly when C code,
   564  		// including C code called via cgo, calls a
   565  		// TSAN-intercepted function such as malloc.
   566  		//
   567  		// We check this condition last as g0.stack.lo
   568  		// may be not very accurate (see mstart).
   569  		st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
   570  		setSignalstackSP(&st, mp.g0.stack.lo)
   571  		setGsignalStack(&st, gsigStack)
   572  		return true
   573  	}
   574  
   575  	// sp is not within gsignal stack, g0 stack, or sigaltstack. Bad.
   576  	setg(nil)
   577  	needm()
   578  	if st.ss_flags&_SS_DISABLE != 0 {
   579  		noSignalStack(sig)
   580  	} else {
   581  		sigNotOnStack(sig)
   582  	}
   583  	dropm()
   584  	return false
   585  }
   586  
   587  // crashing is the number of m's we have waited for when implementing
   588  // GOTRACEBACK=crash when a signal is received.
   589  var crashing int32
   590  
   591  // testSigtrap and testSigusr1 are used by the runtime tests. If
   592  // non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
   593  // normal behavior on this signal is suppressed.
   594  var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
   595  var testSigusr1 func(gp *g) bool
   596  
   597  // sighandler is invoked when a signal occurs. The global g will be
   598  // set to a gsignal goroutine and we will be running on the alternate
   599  // signal stack. The parameter g will be the value of the global g
   600  // when the signal occurred. The sig, info, and ctxt parameters are
   601  // from the system signal handler: they are the parameters passed when
   602  // the SA is passed to the sigaction system call.
   603  //
   604  // The garbage collector may have stopped the world, so write barriers
   605  // are not allowed.
   606  //
   607  //go:nowritebarrierrec
   608  func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
   609  	_g_ := getg()
   610  	c := &sigctxt{info, ctxt}
   611  	mp := _g_.m
   612  
   613  	// Cgo TSAN (not the Go race detector) intercepts signals and calls the
   614  	// signal handler at a later time. When the signal handler is called, the
   615  	// memory may have changed, but the signal context remains old. The
   616  	// unmatched signal context and memory makes it unsafe to unwind or inspect
   617  	// the stack. So we ignore delayed non-fatal signals that will cause a stack
   618  	// inspection (profiling signal and preemption signal).
   619  	// cgo_yield is only non-nil for TSAN, and is specifically used to trigger
   620  	// signal delivery. We use that as an indicator of delayed signals.
   621  	// For delayed signals, the handler is called on the g0 stack (see
   622  	// adjustSignalStack).
   623  	delayedSignal := *cgo_yield != nil && mp != nil && _g_.stack == mp.g0.stack
   624  
   625  	if sig == _SIGPROF {
   626  		// Some platforms (Linux) have per-thread timers, which we use in
   627  		// combination with the process-wide timer. Avoid double-counting.
   628  		if !delayedSignal && validSIGPROF(mp, c) {
   629  			sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp)
   630  		}
   631  		return
   632  	}
   633  
   634  	if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) {
   635  		return
   636  	}
   637  
   638  	if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) {
   639  		return
   640  	}
   641  
   642  	if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
   643  		// sigPerThreadSyscall is the same signal used by glibc for
   644  		// per-thread syscalls on Linux. We use it for the same purpose
   645  		// in non-cgo binaries. Since this signal is not _SigNotify,
   646  		// there is nothing more to do once we run the syscall.
   647  		runPerThreadSyscall()
   648  		return
   649  	}
   650  
   651  	if sig == sigPreempt && debug.asyncpreemptoff == 0 && !delayedSignal {
   652  		// Might be a preemption signal.
   653  		doSigPreempt(gp, c)
   654  		// Even if this was definitely a preemption signal, it
   655  		// may have been coalesced with another signal, so we
   656  		// still let it through to the application.
   657  	}
   658  
   659  	flags := int32(_SigThrow)
   660  	if sig < uint32(len(sigtable)) {
   661  		flags = sigtable[sig].flags
   662  	}
   663  	if c.sigcode() != _SI_USER && flags&_SigPanic != 0 && gp.throwsplit {
   664  		// We can't safely sigpanic because it may grow the
   665  		// stack. Abort in the signal handler instead.
   666  		flags = _SigThrow
   667  	}
   668  	if isAbortPC(c.sigpc()) {
   669  		// On many architectures, the abort function just
   670  		// causes a memory fault. Don't turn that into a panic.
   671  		flags = _SigThrow
   672  	}
   673  	if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
   674  		// The signal is going to cause a panic.
   675  		// Arrange the stack so that it looks like the point
   676  		// where the signal occurred made a call to the
   677  		// function sigpanic. Then set the PC to sigpanic.
   678  
   679  		// Have to pass arguments out of band since
   680  		// augmenting the stack frame would break
   681  		// the unwinding code.
   682  		gp.sig = sig
   683  		gp.sigcode0 = uintptr(c.sigcode())
   684  		gp.sigcode1 = uintptr(c.fault())
   685  		gp.sigpc = c.sigpc()
   686  
   687  		c.preparePanic(sig, gp)
   688  		return
   689  	}
   690  
   691  	if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
   692  		if sigsend(sig) {
   693  			return
   694  		}
   695  	}
   696  
   697  	if c.sigcode() == _SI_USER && signal_ignored(sig) {
   698  		return
   699  	}
   700  
   701  	if flags&_SigKill != 0 {
   702  		dieFromSignal(sig)
   703  	}
   704  
   705  	// _SigThrow means that we should exit now.
   706  	// If we get here with _SigPanic, it means that the signal
   707  	// was sent to us by a program (c.sigcode() == _SI_USER);
   708  	// in that case, if we didn't handle it in sigsend, we exit now.
   709  	if flags&(_SigThrow|_SigPanic) == 0 {
   710  		return
   711  	}
   712  
   713  	_g_.m.throwing = throwTypeRuntime
   714  	_g_.m.caughtsig.set(gp)
   715  
   716  	if crashing == 0 {
   717  		startpanic_m()
   718  	}
   719  
   720  	if sig < uint32(len(sigtable)) {
   721  		print(sigtable[sig].name, "\n")
   722  	} else {
   723  		print("Signal ", sig, "\n")
   724  	}
   725  
   726  	print("PC=", hex(c.sigpc()), " m=", _g_.m.id, " sigcode=", c.sigcode(), "\n")
   727  	if _g_.m.incgo && gp == _g_.m.g0 && _g_.m.curg != nil {
   728  		print("signal arrived during cgo execution\n")
   729  		// Switch to curg so that we get a traceback of the Go code
   730  		// leading up to the cgocall, which switched from curg to g0.
   731  		gp = _g_.m.curg
   732  	}
   733  	if sig == _SIGILL || sig == _SIGFPE {
   734  		// It would be nice to know how long the instruction is.
   735  		// Unfortunately, that's complicated to do in general (mostly for x86
   736  		// and s930x, but other archs have non-standard instruction lengths also).
   737  		// Opt to print 16 bytes, which covers most instructions.
   738  		const maxN = 16
   739  		n := uintptr(maxN)
   740  		// We have to be careful, though. If we're near the end of
   741  		// a page and the following page isn't mapped, we could
   742  		// segfault. So make sure we don't straddle a page (even though
   743  		// that could lead to printing an incomplete instruction).
   744  		// We're assuming here we can read at least the page containing the PC.
   745  		// I suppose it is possible that the page is mapped executable but not readable?
   746  		pc := c.sigpc()
   747  		if n > physPageSize-pc%physPageSize {
   748  			n = physPageSize - pc%physPageSize
   749  		}
   750  		print("instruction bytes:")
   751  		b := (*[maxN]byte)(unsafe.Pointer(pc))
   752  		for i := uintptr(0); i < n; i++ {
   753  			print(" ", hex(b[i]))
   754  		}
   755  		println()
   756  	}
   757  	print("\n")
   758  
   759  	level, _, docrash := gotraceback()
   760  	if level > 0 {
   761  		goroutineheader(gp)
   762  		tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
   763  		if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
   764  			// tracebackothers on original m skipped this one; trace it now.
   765  			goroutineheader(_g_.m.curg)
   766  			traceback(^uintptr(0), ^uintptr(0), 0, _g_.m.curg)
   767  		} else if crashing == 0 {
   768  			tracebackothers(gp)
   769  			print("\n")
   770  		}
   771  		dumpregs(c)
   772  	}
   773  
   774  	if docrash {
   775  		crashing++
   776  		if crashing < mcount()-int32(extraMCount) {
   777  			// There are other m's that need to dump their stacks.
   778  			// Relay SIGQUIT to the next m by sending it to the current process.
   779  			// All m's that have already received SIGQUIT have signal masks blocking
   780  			// receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
   781  			// When the last m receives the SIGQUIT, it will fall through to the call to
   782  			// crash below. Just in case the relaying gets botched, each m involved in
   783  			// the relay sleeps for 5 seconds and then does the crash/exit itself.
   784  			// In expected operation, the last m has received the SIGQUIT and run
   785  			// crash/exit and the process is gone, all long before any of the
   786  			// 5-second sleeps have finished.
   787  			print("\n-----\n\n")
   788  			raiseproc(_SIGQUIT)
   789  			usleep(5 * 1000 * 1000)
   790  		}
   791  		crash()
   792  	}
   793  
   794  	printDebugLog()
   795  
   796  	exit(2)
   797  }
   798  
   799  // sigpanic turns a synchronous signal into a run-time panic.
   800  // If the signal handler sees a synchronous panic, it arranges the
   801  // stack to look like the function where the signal occurred called
   802  // sigpanic, sets the signal's PC value to sigpanic, and returns from
   803  // the signal handler. The effect is that the program will act as
   804  // though the function that got the signal simply called sigpanic
   805  // instead.
   806  //
   807  // This must NOT be nosplit because the linker doesn't know where
   808  // sigpanic calls can be injected.
   809  //
   810  // The signal handler must not inject a call to sigpanic if
   811  // getg().throwsplit, since sigpanic may need to grow the stack.
   812  //
   813  // This is exported via linkname to assembly in runtime/cgo.
   814  //
   815  //go:linkname sigpanic
   816  func sigpanic() {
   817  	g := getg()
   818  	if !canpanic(g) {
   819  		throw("unexpected signal during runtime execution")
   820  	}
   821  
   822  	switch g.sig {
   823  	case _SIGBUS:
   824  		if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 {
   825  			panicmem()
   826  		}
   827  		// Support runtime/debug.SetPanicOnFault.
   828  		if g.paniconfault {
   829  			panicmemAddr(g.sigcode1)
   830  		}
   831  		print("unexpected fault address ", hex(g.sigcode1), "\n")
   832  		throw("fault")
   833  	case _SIGSEGV:
   834  		if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 {
   835  			panicmem()
   836  		}
   837  		// Support runtime/debug.SetPanicOnFault.
   838  		if g.paniconfault {
   839  			panicmemAddr(g.sigcode1)
   840  		}
   841  		print("unexpected fault address ", hex(g.sigcode1), "\n")
   842  		throw("fault")
   843  	case _SIGFPE:
   844  		switch g.sigcode0 {
   845  		case _FPE_INTDIV:
   846  			panicdivide()
   847  		case _FPE_INTOVF:
   848  			panicoverflow()
   849  		}
   850  		panicfloat()
   851  	}
   852  
   853  	if g.sig >= uint32(len(sigtable)) {
   854  		// can't happen: we looked up g.sig in sigtable to decide to call sigpanic
   855  		throw("unexpected signal value")
   856  	}
   857  	panic(errorString(sigtable[g.sig].name))
   858  }
   859  
   860  // dieFromSignal kills the program with a signal.
   861  // This provides the expected exit status for the shell.
   862  // This is only called with fatal signals expected to kill the process.
   863  //
   864  //go:nosplit
   865  //go:nowritebarrierrec
   866  func dieFromSignal(sig uint32) {
   867  	unblocksig(sig)
   868  	// Mark the signal as unhandled to ensure it is forwarded.
   869  	atomic.Store(&handlingSig[sig], 0)
   870  	raise(sig)
   871  
   872  	// That should have killed us. On some systems, though, raise
   873  	// sends the signal to the whole process rather than to just
   874  	// the current thread, which means that the signal may not yet
   875  	// have been delivered. Give other threads a chance to run and
   876  	// pick up the signal.
   877  	osyield()
   878  	osyield()
   879  	osyield()
   880  
   881  	// If that didn't work, try _SIG_DFL.
   882  	setsig(sig, _SIG_DFL)
   883  	raise(sig)
   884  
   885  	osyield()
   886  	osyield()
   887  	osyield()
   888  
   889  	// If we are still somehow running, just exit with the wrong status.
   890  	exit(2)
   891  }
   892  
   893  // raisebadsignal is called when a signal is received on a non-Go
   894  // thread, and the Go program does not want to handle it (that is, the
   895  // program has not called os/signal.Notify for the signal).
   896  func raisebadsignal(sig uint32, c *sigctxt) {
   897  	if sig == _SIGPROF {
   898  		// Ignore profiling signals that arrive on non-Go threads.
   899  		return
   900  	}
   901  
   902  	var handler uintptr
   903  	if sig >= _NSIG {
   904  		handler = _SIG_DFL
   905  	} else {
   906  		handler = atomic.Loaduintptr(&fwdSig[sig])
   907  	}
   908  
   909  	// Reset the signal handler and raise the signal.
   910  	// We are currently running inside a signal handler, so the
   911  	// signal is blocked. We need to unblock it before raising the
   912  	// signal, or the signal we raise will be ignored until we return
   913  	// from the signal handler. We know that the signal was unblocked
   914  	// before entering the handler, or else we would not have received
   915  	// it. That means that we don't have to worry about blocking it
   916  	// again.
   917  	unblocksig(sig)
   918  	setsig(sig, handler)
   919  
   920  	// If we're linked into a non-Go program we want to try to
   921  	// avoid modifying the original context in which the signal
   922  	// was raised. If the handler is the default, we know it
   923  	// is non-recoverable, so we don't have to worry about
   924  	// re-installing sighandler. At this point we can just
   925  	// return and the signal will be re-raised and caught by
   926  	// the default handler with the correct context.
   927  	//
   928  	// On FreeBSD, the libthr sigaction code prevents
   929  	// this from working so we fall through to raise.
   930  	if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && c.sigcode() != _SI_USER {
   931  		return
   932  	}
   933  
   934  	raise(sig)
   935  
   936  	// Give the signal a chance to be delivered.
   937  	// In almost all real cases the program is about to crash,
   938  	// so sleeping here is not a waste of time.
   939  	usleep(1000)
   940  
   941  	// If the signal didn't cause the program to exit, restore the
   942  	// Go signal handler and carry on.
   943  	//
   944  	// We may receive another instance of the signal before we
   945  	// restore the Go handler, but that is not so bad: we know
   946  	// that the Go program has been ignoring the signal.
   947  	setsig(sig, abi.FuncPCABIInternal(sighandler))
   948  }
   949  
   950  //go:nosplit
   951  func crash() {
   952  	// OS X core dumps are linear dumps of the mapped memory,
   953  	// from the first virtual byte to the last, with zeros in the gaps.
   954  	// Because of the way we arrange the address space on 64-bit systems,
   955  	// this means the OS X core file will be >128 GB and even on a zippy
   956  	// workstation can take OS X well over an hour to write (uninterruptible).
   957  	// Save users from making that mistake.
   958  	if GOOS == "darwin" && GOARCH == "amd64" {
   959  		return
   960  	}
   961  
   962  	dieFromSignal(_SIGABRT)
   963  }
   964  
   965  // ensureSigM starts one global, sleeping thread to make sure at least one thread
   966  // is available to catch signals enabled for os/signal.
   967  func ensureSigM() {
   968  	if maskUpdatedChan != nil {
   969  		return
   970  	}
   971  	maskUpdatedChan = make(chan struct{})
   972  	disableSigChan = make(chan uint32)
   973  	enableSigChan = make(chan uint32)
   974  	go func() {
   975  		// Signal masks are per-thread, so make sure this goroutine stays on one
   976  		// thread.
   977  		LockOSThread()
   978  		defer UnlockOSThread()
   979  		// The sigBlocked mask contains the signals not active for os/signal,
   980  		// initially all signals except the essential. When signal.Notify()/Stop is called,
   981  		// sigenable/sigdisable in turn notify this thread to update its signal
   982  		// mask accordingly.
   983  		sigBlocked := sigset_all
   984  		for i := range sigtable {
   985  			if !blockableSig(uint32(i)) {
   986  				sigdelset(&sigBlocked, i)
   987  			}
   988  		}
   989  		sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
   990  		for {
   991  			select {
   992  			case sig := <-enableSigChan:
   993  				if sig > 0 {
   994  					sigdelset(&sigBlocked, int(sig))
   995  				}
   996  			case sig := <-disableSigChan:
   997  				if sig > 0 && blockableSig(sig) {
   998  					sigaddset(&sigBlocked, int(sig))
   999  				}
  1000  			}
  1001  			sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
  1002  			maskUpdatedChan <- struct{}{}
  1003  		}
  1004  	}()
  1005  }
  1006  
  1007  // This is called when we receive a signal when there is no signal stack.
  1008  // This can only happen if non-Go code calls sigaltstack to disable the
  1009  // signal stack.
  1010  func noSignalStack(sig uint32) {
  1011  	println("signal", sig, "received on thread with no signal stack")
  1012  	throw("non-Go code disabled sigaltstack")
  1013  }
  1014  
  1015  // This is called if we receive a signal when there is a signal stack
  1016  // but we are not on it. This can only happen if non-Go code called
  1017  // sigaction without setting the SS_ONSTACK flag.
  1018  func sigNotOnStack(sig uint32) {
  1019  	println("signal", sig, "received but handler not on signal stack")
  1020  	throw("non-Go code set up signal handler without SA_ONSTACK flag")
  1021  }
  1022  
  1023  // signalDuringFork is called if we receive a signal while doing a fork.
  1024  // We do not want signals at that time, as a signal sent to the process
  1025  // group may be delivered to the child process, causing confusion.
  1026  // This should never be called, because we block signals across the fork;
  1027  // this function is just a safety check. See issue 18600 for background.
  1028  func signalDuringFork(sig uint32) {
  1029  	println("signal", sig, "received during fork")
  1030  	throw("signal received during fork")
  1031  }
  1032  
  1033  var badginsignalMsg = "fatal: bad g in signal handler\n"
  1034  
  1035  // This runs on a foreign stack, without an m or a g. No stack split.
  1036  //
  1037  //go:nosplit
  1038  //go:norace
  1039  //go:nowritebarrierrec
  1040  func badsignal(sig uintptr, c *sigctxt) {
  1041  	if !iscgo && !cgoHasExtraM {
  1042  		// There is no extra M. needm will not be able to grab
  1043  		// an M. Instead of hanging, just crash.
  1044  		// Cannot call split-stack function as there is no G.
  1045  		s := stringStructOf(&badginsignalMsg)
  1046  		write(2, s.str, int32(s.len))
  1047  		exit(2)
  1048  		*(*uintptr)(unsafe.Pointer(uintptr(123))) = 2
  1049  	}
  1050  	needm()
  1051  	if !sigsend(uint32(sig)) {
  1052  		// A foreign thread received the signal sig, and the
  1053  		// Go code does not want to handle it.
  1054  		raisebadsignal(uint32(sig), c)
  1055  	}
  1056  	dropm()
  1057  }
  1058  
  1059  //go:noescape
  1060  func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
  1061  
  1062  // Determines if the signal should be handled by Go and if not, forwards the
  1063  // signal to the handler that was installed before Go's. Returns whether the
  1064  // signal was forwarded.
  1065  // This is called by the signal handler, and the world may be stopped.
  1066  //
  1067  //go:nosplit
  1068  //go:nowritebarrierrec
  1069  func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
  1070  	if sig >= uint32(len(sigtable)) {
  1071  		return false
  1072  	}
  1073  	fwdFn := atomic.Loaduintptr(&fwdSig[sig])
  1074  	flags := sigtable[sig].flags
  1075  
  1076  	// If we aren't handling the signal, forward it.
  1077  	if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK {
  1078  		// If the signal is ignored, doing nothing is the same as forwarding.
  1079  		if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) {
  1080  			return true
  1081  		}
  1082  		// We are not handling the signal and there is no other handler to forward to.
  1083  		// Crash with the default behavior.
  1084  		if fwdFn == _SIG_DFL {
  1085  			setsig(sig, _SIG_DFL)
  1086  			dieFromSignal(sig)
  1087  			return false
  1088  		}
  1089  
  1090  		sigfwd(fwdFn, sig, info, ctx)
  1091  		return true
  1092  	}
  1093  
  1094  	// This function and its caller sigtrampgo assumes SIGPIPE is delivered on the
  1095  	// originating thread. This property does not hold on macOS (golang.org/issue/33384),
  1096  	// so we have no choice but to ignore SIGPIPE.
  1097  	if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE {
  1098  		return true
  1099  	}
  1100  
  1101  	// If there is no handler to forward to, no need to forward.
  1102  	if fwdFn == _SIG_DFL {
  1103  		return false
  1104  	}
  1105  
  1106  	c := &sigctxt{info, ctx}
  1107  	// Only forward synchronous signals and SIGPIPE.
  1108  	// Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code
  1109  	// is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket
  1110  	// or pipe.
  1111  	if (c.sigcode() == _SI_USER || flags&_SigPanic == 0) && sig != _SIGPIPE {
  1112  		return false
  1113  	}
  1114  	// Determine if the signal occurred inside Go code. We test that:
  1115  	//   (1) we weren't in VDSO page,
  1116  	//   (2) we were in a goroutine (i.e., m.curg != nil), and
  1117  	//   (3) we weren't in CGO.
  1118  	g := sigFetchG(c)
  1119  	if g != nil && g.m != nil && g.m.curg != nil && !g.m.incgo {
  1120  		return false
  1121  	}
  1122  
  1123  	// Signal not handled by Go, forward it.
  1124  	if fwdFn != _SIG_IGN {
  1125  		sigfwd(fwdFn, sig, info, ctx)
  1126  	}
  1127  
  1128  	return true
  1129  }
  1130  
  1131  // sigsave saves the current thread's signal mask into *p.
  1132  // This is used to preserve the non-Go signal mask when a non-Go
  1133  // thread calls a Go function.
  1134  // This is nosplit and nowritebarrierrec because it is called by needm
  1135  // which may be called on a non-Go thread with no g available.
  1136  //
  1137  //go:nosplit
  1138  //go:nowritebarrierrec
  1139  func sigsave(p *sigset) {
  1140  	sigprocmask(_SIG_SETMASK, nil, p)
  1141  }
  1142  
  1143  // msigrestore sets the current thread's signal mask to sigmask.
  1144  // This is used to restore the non-Go signal mask when a non-Go thread
  1145  // calls a Go function.
  1146  // This is nosplit and nowritebarrierrec because it is called by dropm
  1147  // after g has been cleared.
  1148  //
  1149  //go:nosplit
  1150  //go:nowritebarrierrec
  1151  func msigrestore(sigmask sigset) {
  1152  	sigprocmask(_SIG_SETMASK, &sigmask, nil)
  1153  }
  1154  
  1155  // sigsetAllExiting is used by sigblock(true) when a thread is
  1156  // exiting. sigset_all is defined in OS specific code, and per GOOS
  1157  // behavior may override this default for sigsetAllExiting: see
  1158  // osinit().
  1159  var sigsetAllExiting = sigset_all
  1160  
  1161  // sigblock blocks signals in the current thread's signal mask.
  1162  // This is used to block signals while setting up and tearing down g
  1163  // when a non-Go thread calls a Go function. When a thread is exiting
  1164  // we use the sigsetAllExiting value, otherwise the OS specific
  1165  // definition of sigset_all is used.
  1166  // This is nosplit and nowritebarrierrec because it is called by needm
  1167  // which may be called on a non-Go thread with no g available.
  1168  //
  1169  //go:nosplit
  1170  //go:nowritebarrierrec
  1171  func sigblock(exiting bool) {
  1172  	if exiting {
  1173  		sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
  1174  		return
  1175  	}
  1176  	sigprocmask(_SIG_SETMASK, &sigset_all, nil)
  1177  }
  1178  
  1179  // unblocksig removes sig from the current thread's signal mask.
  1180  // This is nosplit and nowritebarrierrec because it is called from
  1181  // dieFromSignal, which can be called by sigfwdgo while running in the
  1182  // signal handler, on the signal stack, with no g available.
  1183  //
  1184  //go:nosplit
  1185  //go:nowritebarrierrec
  1186  func unblocksig(sig uint32) {
  1187  	var set sigset
  1188  	sigaddset(&set, int(sig))
  1189  	sigprocmask(_SIG_UNBLOCK, &set, nil)
  1190  }
  1191  
  1192  // minitSignals is called when initializing a new m to set the
  1193  // thread's alternate signal stack and signal mask.
  1194  func minitSignals() {
  1195  	minitSignalStack()
  1196  	minitSignalMask()
  1197  }
  1198  
  1199  // minitSignalStack is called when initializing a new m to set the
  1200  // alternate signal stack. If the alternate signal stack is not set
  1201  // for the thread (the normal case) then set the alternate signal
  1202  // stack to the gsignal stack. If the alternate signal stack is set
  1203  // for the thread (the case when a non-Go thread sets the alternate
  1204  // signal stack and then calls a Go function) then set the gsignal
  1205  // stack to the alternate signal stack. We also set the alternate
  1206  // signal stack to the gsignal stack if cgo is not used (regardless
  1207  // of whether it is already set). Record which choice was made in
  1208  // newSigstack, so that it can be undone in unminit.
  1209  func minitSignalStack() {
  1210  	_g_ := getg()
  1211  	var st stackt
  1212  	sigaltstack(nil, &st)
  1213  	if st.ss_flags&_SS_DISABLE != 0 || !iscgo {
  1214  		signalstack(&_g_.m.gsignal.stack)
  1215  		_g_.m.newSigstack = true
  1216  	} else {
  1217  		setGsignalStack(&st, &_g_.m.goSigStack)
  1218  		_g_.m.newSigstack = false
  1219  	}
  1220  }
  1221  
  1222  // minitSignalMask is called when initializing a new m to set the
  1223  // thread's signal mask. When this is called all signals have been
  1224  // blocked for the thread.  This starts with m.sigmask, which was set
  1225  // either from initSigmask for a newly created thread or by calling
  1226  // sigsave if this is a non-Go thread calling a Go function. It
  1227  // removes all essential signals from the mask, thus causing those
  1228  // signals to not be blocked. Then it sets the thread's signal mask.
  1229  // After this is called the thread can receive signals.
  1230  func minitSignalMask() {
  1231  	nmask := getg().m.sigmask
  1232  	for i := range sigtable {
  1233  		if !blockableSig(uint32(i)) {
  1234  			sigdelset(&nmask, i)
  1235  		}
  1236  	}
  1237  	sigprocmask(_SIG_SETMASK, &nmask, nil)
  1238  }
  1239  
  1240  // unminitSignals is called from dropm, via unminit, to undo the
  1241  // effect of calling minit on a non-Go thread.
  1242  //
  1243  //go:nosplit
  1244  func unminitSignals() {
  1245  	if getg().m.newSigstack {
  1246  		st := stackt{ss_flags: _SS_DISABLE}
  1247  		sigaltstack(&st, nil)
  1248  	} else {
  1249  		// We got the signal stack from someone else. Restore
  1250  		// the Go-allocated stack in case this M gets reused
  1251  		// for another thread (e.g., it's an extram). Also, on
  1252  		// Android, libc allocates a signal stack for all
  1253  		// threads, so it's important to restore the Go stack
  1254  		// even on Go-created threads so we can free it.
  1255  		restoreGsignalStack(&getg().m.goSigStack)
  1256  	}
  1257  }
  1258  
  1259  // blockableSig reports whether sig may be blocked by the signal mask.
  1260  // We never want to block the signals marked _SigUnblock;
  1261  // these are the synchronous signals that turn into a Go panic.
  1262  // We never want to block the preemption signal if it is being used.
  1263  // In a Go program--not a c-archive/c-shared--we never want to block
  1264  // the signals marked _SigKill or _SigThrow, as otherwise it's possible
  1265  // for all running threads to block them and delay their delivery until
  1266  // we start a new thread. When linked into a C program we let the C code
  1267  // decide on the disposition of those signals.
  1268  func blockableSig(sig uint32) bool {
  1269  	flags := sigtable[sig].flags
  1270  	if flags&_SigUnblock != 0 {
  1271  		return false
  1272  	}
  1273  	if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
  1274  		return false
  1275  	}
  1276  	if isarchive || islibrary {
  1277  		return true
  1278  	}
  1279  	return flags&(_SigKill|_SigThrow) == 0
  1280  }
  1281  
  1282  // gsignalStack saves the fields of the gsignal stack changed by
  1283  // setGsignalStack.
  1284  type gsignalStack struct {
  1285  	stack       stack
  1286  	stackguard0 uintptr
  1287  	stackguard1 uintptr
  1288  	stktopsp    uintptr
  1289  }
  1290  
  1291  // setGsignalStack sets the gsignal stack of the current m to an
  1292  // alternate signal stack returned from the sigaltstack system call.
  1293  // It saves the old values in *old for use by restoreGsignalStack.
  1294  // This is used when handling a signal if non-Go code has set the
  1295  // alternate signal stack.
  1296  //
  1297  //go:nosplit
  1298  //go:nowritebarrierrec
  1299  func setGsignalStack(st *stackt, old *gsignalStack) {
  1300  	g := getg()
  1301  	if old != nil {
  1302  		old.stack = g.m.gsignal.stack
  1303  		old.stackguard0 = g.m.gsignal.stackguard0
  1304  		old.stackguard1 = g.m.gsignal.stackguard1
  1305  		old.stktopsp = g.m.gsignal.stktopsp
  1306  	}
  1307  	stsp := uintptr(unsafe.Pointer(st.ss_sp))
  1308  	g.m.gsignal.stack.lo = stsp
  1309  	g.m.gsignal.stack.hi = stsp + st.ss_size
  1310  	g.m.gsignal.stackguard0 = stsp + _StackGuard
  1311  	g.m.gsignal.stackguard1 = stsp + _StackGuard
  1312  }
  1313  
  1314  // restoreGsignalStack restores the gsignal stack to the value it had
  1315  // before entering the signal handler.
  1316  //
  1317  //go:nosplit
  1318  //go:nowritebarrierrec
  1319  func restoreGsignalStack(st *gsignalStack) {
  1320  	gp := getg().m.gsignal
  1321  	gp.stack = st.stack
  1322  	gp.stackguard0 = st.stackguard0
  1323  	gp.stackguard1 = st.stackguard1
  1324  	gp.stktopsp = st.stktopsp
  1325  }
  1326  
  1327  // signalstack sets the current thread's alternate signal stack to s.
  1328  //
  1329  //go:nosplit
  1330  func signalstack(s *stack) {
  1331  	st := stackt{ss_size: s.hi - s.lo}
  1332  	setSignalstackSP(&st, s.lo)
  1333  	sigaltstack(&st, nil)
  1334  }
  1335  
  1336  // setsigsegv is used on darwin/arm64 to fake a segmentation fault.
  1337  //
  1338  // This is exported via linkname to assembly in runtime/cgo.
  1339  //
  1340  //go:nosplit
  1341  //go:linkname setsigsegv
  1342  func setsigsegv(pc uintptr) {
  1343  	g := getg()
  1344  	g.sig = _SIGSEGV
  1345  	g.sigpc = pc
  1346  	g.sigcode0 = _SEGV_MAPERR
  1347  	g.sigcode1 = 0 // TODO: emulate si_addr
  1348  }
  1349  

View as plain text