...

Source file src/runtime/runtime1.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/bytealg"
     9  	"internal/goarch"
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // Keep a cached value to make gotraceback fast,
    15  // since we call it on every call to gentraceback.
    16  // The cached value is a uint32 in which the low bits
    17  // are the "crash" and "all" settings and the remaining
    18  // bits are the traceback value (0 off, 1 on, 2 include system).
    19  const (
    20  	tracebackCrash = 1 << iota
    21  	tracebackAll
    22  	tracebackShift = iota
    23  )
    24  
    25  var traceback_cache uint32 = 2 << tracebackShift
    26  var traceback_env uint32
    27  
    28  // gotraceback returns the current traceback settings.
    29  //
    30  // If level is 0, suppress all tracebacks.
    31  // If level is 1, show tracebacks, but exclude runtime frames.
    32  // If level is 2, show tracebacks including runtime frames.
    33  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    34  // If crash is set, crash (core dump, etc) after tracebacking.
    35  //
    36  //go:nosplit
    37  func gotraceback() (level int32, all, crash bool) {
    38  	_g_ := getg()
    39  	t := atomic.Load(&traceback_cache)
    40  	crash = t&tracebackCrash != 0
    41  	all = _g_.m.throwing >= throwTypeUser || t&tracebackAll != 0
    42  	if _g_.m.traceback != 0 {
    43  		level = int32(_g_.m.traceback)
    44  	} else if _g_.m.throwing >= throwTypeRuntime {
    45  		// Always include runtime frames in runtime throws unless
    46  		// otherwise overridden by m.traceback.
    47  		level = 2
    48  	} else {
    49  		level = int32(t >> tracebackShift)
    50  	}
    51  	return
    52  }
    53  
    54  var (
    55  	argc int32
    56  	argv **byte
    57  )
    58  
    59  // nosplit for use in linux startup sysargs
    60  //
    61  //go:nosplit
    62  func argv_index(argv **byte, i int32) *byte {
    63  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
    64  }
    65  
    66  func args(c int32, v **byte) {
    67  	argc = c
    68  	argv = v
    69  	sysargs(c, v)
    70  }
    71  
    72  func goargs() {
    73  	if GOOS == "windows" {
    74  		return
    75  	}
    76  	argslice = make([]string, argc)
    77  	for i := int32(0); i < argc; i++ {
    78  		argslice[i] = gostringnocopy(argv_index(argv, i))
    79  	}
    80  }
    81  
    82  func goenvs_unix() {
    83  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    84  	// guarantee env[] will immediately follow argv. Might cause
    85  	// problems.
    86  	n := int32(0)
    87  	for argv_index(argv, argc+1+n) != nil {
    88  		n++
    89  	}
    90  
    91  	envs = make([]string, n)
    92  	for i := int32(0); i < n; i++ {
    93  		envs[i] = gostring(argv_index(argv, argc+1+i))
    94  	}
    95  }
    96  
    97  func environ() []string {
    98  	return envs
    99  }
   100  
   101  // TODO: These should be locals in testAtomic64, but we don't 8-byte
   102  // align stack variables on 386.
   103  var test_z64, test_x64 uint64
   104  
   105  func testAtomic64() {
   106  	test_z64 = 42
   107  	test_x64 = 0
   108  	if atomic.Cas64(&test_z64, test_x64, 1) {
   109  		throw("cas64 failed")
   110  	}
   111  	if test_x64 != 0 {
   112  		throw("cas64 failed")
   113  	}
   114  	test_x64 = 42
   115  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   116  		throw("cas64 failed")
   117  	}
   118  	if test_x64 != 42 || test_z64 != 1 {
   119  		throw("cas64 failed")
   120  	}
   121  	if atomic.Load64(&test_z64) != 1 {
   122  		throw("load64 failed")
   123  	}
   124  	atomic.Store64(&test_z64, (1<<40)+1)
   125  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   126  		throw("store64 failed")
   127  	}
   128  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   129  		throw("xadd64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   132  		throw("xadd64 failed")
   133  	}
   134  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   135  		throw("xchg64 failed")
   136  	}
   137  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   138  		throw("xchg64 failed")
   139  	}
   140  }
   141  
   142  func check() {
   143  	var (
   144  		a     int8
   145  		b     uint8
   146  		c     int16
   147  		d     uint16
   148  		e     int32
   149  		f     uint32
   150  		g     int64
   151  		h     uint64
   152  		i, i1 float32
   153  		j, j1 float64
   154  		k     unsafe.Pointer
   155  		l     *uint16
   156  		m     [4]byte
   157  	)
   158  	type x1t struct {
   159  		x uint8
   160  	}
   161  	type y1t struct {
   162  		x1 x1t
   163  		y  uint8
   164  	}
   165  	var x1 x1t
   166  	var y1 y1t
   167  
   168  	if unsafe.Sizeof(a) != 1 {
   169  		throw("bad a")
   170  	}
   171  	if unsafe.Sizeof(b) != 1 {
   172  		throw("bad b")
   173  	}
   174  	if unsafe.Sizeof(c) != 2 {
   175  		throw("bad c")
   176  	}
   177  	if unsafe.Sizeof(d) != 2 {
   178  		throw("bad d")
   179  	}
   180  	if unsafe.Sizeof(e) != 4 {
   181  		throw("bad e")
   182  	}
   183  	if unsafe.Sizeof(f) != 4 {
   184  		throw("bad f")
   185  	}
   186  	if unsafe.Sizeof(g) != 8 {
   187  		throw("bad g")
   188  	}
   189  	if unsafe.Sizeof(h) != 8 {
   190  		throw("bad h")
   191  	}
   192  	if unsafe.Sizeof(i) != 4 {
   193  		throw("bad i")
   194  	}
   195  	if unsafe.Sizeof(j) != 8 {
   196  		throw("bad j")
   197  	}
   198  	if unsafe.Sizeof(k) != goarch.PtrSize {
   199  		throw("bad k")
   200  	}
   201  	if unsafe.Sizeof(l) != goarch.PtrSize {
   202  		throw("bad l")
   203  	}
   204  	if unsafe.Sizeof(x1) != 1 {
   205  		throw("bad unsafe.Sizeof x1")
   206  	}
   207  	if unsafe.Offsetof(y1.y) != 1 {
   208  		throw("bad offsetof y1.y")
   209  	}
   210  	if unsafe.Sizeof(y1) != 2 {
   211  		throw("bad unsafe.Sizeof y1")
   212  	}
   213  
   214  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   215  		throw("bad timediv")
   216  	}
   217  
   218  	var z uint32
   219  	z = 1
   220  	if !atomic.Cas(&z, 1, 2) {
   221  		throw("cas1")
   222  	}
   223  	if z != 2 {
   224  		throw("cas2")
   225  	}
   226  
   227  	z = 4
   228  	if atomic.Cas(&z, 5, 6) {
   229  		throw("cas3")
   230  	}
   231  	if z != 4 {
   232  		throw("cas4")
   233  	}
   234  
   235  	z = 0xffffffff
   236  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   237  		throw("cas5")
   238  	}
   239  	if z != 0xfffffffe {
   240  		throw("cas6")
   241  	}
   242  
   243  	m = [4]byte{1, 1, 1, 1}
   244  	atomic.Or8(&m[1], 0xf0)
   245  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   246  		throw("atomicor8")
   247  	}
   248  
   249  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   250  	atomic.And8(&m[1], 0x1)
   251  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   252  		throw("atomicand8")
   253  	}
   254  
   255  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   256  	if j == j {
   257  		throw("float64nan")
   258  	}
   259  	if !(j != j) {
   260  		throw("float64nan1")
   261  	}
   262  
   263  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   264  	if j == j1 {
   265  		throw("float64nan2")
   266  	}
   267  	if !(j != j1) {
   268  		throw("float64nan3")
   269  	}
   270  
   271  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   272  	if i == i {
   273  		throw("float32nan")
   274  	}
   275  	if i == i {
   276  		throw("float32nan1")
   277  	}
   278  
   279  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   280  	if i == i1 {
   281  		throw("float32nan2")
   282  	}
   283  	if i == i1 {
   284  		throw("float32nan3")
   285  	}
   286  
   287  	testAtomic64()
   288  
   289  	if _FixedStack != round2(_FixedStack) {
   290  		throw("FixedStack is not power-of-2")
   291  	}
   292  
   293  	if !checkASM() {
   294  		throw("assembly checks failed")
   295  	}
   296  }
   297  
   298  type dbgVar struct {
   299  	name  string
   300  	value *int32
   301  }
   302  
   303  // Holds variables parsed from GODEBUG env var,
   304  // except for "memprofilerate" since there is an
   305  // existing int var for that value, which may
   306  // already have an initial value.
   307  var debug struct {
   308  	cgocheck           int32
   309  	clobberfree        int32
   310  	efence             int32
   311  	gccheckmark        int32
   312  	gcpacertrace       int32
   313  	gcshrinkstackoff   int32
   314  	gcstoptheworld     int32
   315  	gctrace            int32
   316  	invalidptr         int32
   317  	madvdontneed       int32 // for Linux; issue 28466
   318  	scavtrace          int32
   319  	scheddetail        int32
   320  	schedtrace         int32
   321  	tracebackancestors int32
   322  	asyncpreemptoff    int32
   323  	harddecommit       int32
   324  	adaptivestackstart int32
   325  
   326  	// debug.malloc is used as a combined debug check
   327  	// in the malloc function and should be set
   328  	// if any of the below debug options is != 0.
   329  	malloc         bool
   330  	allocfreetrace int32
   331  	inittrace      int32
   332  	sbrk           int32
   333  }
   334  
   335  var dbgvars = []dbgVar{
   336  	{"allocfreetrace", &debug.allocfreetrace},
   337  	{"clobberfree", &debug.clobberfree},
   338  	{"cgocheck", &debug.cgocheck},
   339  	{"efence", &debug.efence},
   340  	{"gccheckmark", &debug.gccheckmark},
   341  	{"gcpacertrace", &debug.gcpacertrace},
   342  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   343  	{"gcstoptheworld", &debug.gcstoptheworld},
   344  	{"gctrace", &debug.gctrace},
   345  	{"invalidptr", &debug.invalidptr},
   346  	{"madvdontneed", &debug.madvdontneed},
   347  	{"sbrk", &debug.sbrk},
   348  	{"scavtrace", &debug.scavtrace},
   349  	{"scheddetail", &debug.scheddetail},
   350  	{"schedtrace", &debug.schedtrace},
   351  	{"tracebackancestors", &debug.tracebackancestors},
   352  	{"asyncpreemptoff", &debug.asyncpreemptoff},
   353  	{"inittrace", &debug.inittrace},
   354  	{"harddecommit", &debug.harddecommit},
   355  	{"adaptivestackstart", &debug.adaptivestackstart},
   356  }
   357  
   358  func parsedebugvars() {
   359  	// defaults
   360  	debug.cgocheck = 1
   361  	debug.invalidptr = 1
   362  	debug.adaptivestackstart = 1 // go119 - set this to 0 to turn larger initial goroutine stacks off
   363  	if GOOS == "linux" {
   364  		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
   365  		// but doesn't affect many of the statistics that
   366  		// MADV_DONTNEED does until the memory is actually
   367  		// reclaimed. This generally leads to poor user
   368  		// experience, like confusing stats in top and other
   369  		// monitoring tools; and bad integration with
   370  		// management systems that respond to memory usage.
   371  		// Hence, default to MADV_DONTNEED.
   372  		debug.madvdontneed = 1
   373  	}
   374  
   375  	for p := gogetenv("GODEBUG"); p != ""; {
   376  		field := ""
   377  		i := bytealg.IndexByteString(p, ',')
   378  		if i < 0 {
   379  			field, p = p, ""
   380  		} else {
   381  			field, p = p[:i], p[i+1:]
   382  		}
   383  		i = bytealg.IndexByteString(field, '=')
   384  		if i < 0 {
   385  			continue
   386  		}
   387  		key, value := field[:i], field[i+1:]
   388  
   389  		// Update MemProfileRate directly here since it
   390  		// is int, not int32, and should only be updated
   391  		// if specified in GODEBUG.
   392  		if key == "memprofilerate" {
   393  			if n, ok := atoi(value); ok {
   394  				MemProfileRate = n
   395  			}
   396  		} else {
   397  			for _, v := range dbgvars {
   398  				if v.name == key {
   399  					if n, ok := atoi32(value); ok {
   400  						*v.value = n
   401  					}
   402  				}
   403  			}
   404  		}
   405  	}
   406  
   407  	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
   408  
   409  	setTraceback(gogetenv("GOTRACEBACK"))
   410  	traceback_env = traceback_cache
   411  }
   412  
   413  //go:linkname setTraceback runtime/debug.SetTraceback
   414  func setTraceback(level string) {
   415  	var t uint32
   416  	switch level {
   417  	case "none":
   418  		t = 0
   419  	case "single", "":
   420  		t = 1 << tracebackShift
   421  	case "all":
   422  		t = 1<<tracebackShift | tracebackAll
   423  	case "system":
   424  		t = 2<<tracebackShift | tracebackAll
   425  	case "crash":
   426  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   427  	default:
   428  		t = tracebackAll
   429  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   430  			t |= uint32(n) << tracebackShift
   431  		}
   432  	}
   433  	// when C owns the process, simply exit'ing the process on fatal errors
   434  	// and panics is surprising. Be louder and abort instead.
   435  	if islibrary || isarchive {
   436  		t |= tracebackCrash
   437  	}
   438  
   439  	t |= traceback_env
   440  
   441  	atomic.Store(&traceback_cache, t)
   442  }
   443  
   444  // Poor mans 64-bit division.
   445  // This is a very special function, do not use it if you are not sure what you are doing.
   446  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   447  // Handles overflow in a time-specific manner.
   448  // This keeps us within no-split stack limits on 32-bit processors.
   449  //
   450  //go:nosplit
   451  func timediv(v int64, div int32, rem *int32) int32 {
   452  	res := int32(0)
   453  	for bit := 30; bit >= 0; bit-- {
   454  		if v >= int64(div)<<uint(bit) {
   455  			v = v - (int64(div) << uint(bit))
   456  			// Before this for loop, res was 0, thus all these
   457  			// power of 2 increments are now just bitsets.
   458  			res |= 1 << uint(bit)
   459  		}
   460  	}
   461  	if v >= int64(div) {
   462  		if rem != nil {
   463  			*rem = 0
   464  		}
   465  		return 0x7fffffff
   466  	}
   467  	if rem != nil {
   468  		*rem = int32(v)
   469  	}
   470  	return res
   471  }
   472  
   473  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   474  
   475  //go:nosplit
   476  func acquirem() *m {
   477  	_g_ := getg()
   478  	_g_.m.locks++
   479  	return _g_.m
   480  }
   481  
   482  //go:nosplit
   483  func releasem(mp *m) {
   484  	_g_ := getg()
   485  	mp.locks--
   486  	if mp.locks == 0 && _g_.preempt {
   487  		// restore the preemption request in case we've cleared it in newstack
   488  		_g_.stackguard0 = stackPreempt
   489  	}
   490  }
   491  
   492  //go:linkname reflect_typelinks reflect.typelinks
   493  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   494  	modules := activeModules()
   495  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   496  	ret := [][]int32{modules[0].typelinks}
   497  	for _, md := range modules[1:] {
   498  		sections = append(sections, unsafe.Pointer(md.types))
   499  		ret = append(ret, md.typelinks)
   500  	}
   501  	return sections, ret
   502  }
   503  
   504  // reflect_resolveNameOff resolves a name offset from a base pointer.
   505  //
   506  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   507  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   508  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   509  }
   510  
   511  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   512  //
   513  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   514  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   515  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   516  }
   517  
   518  // reflect_resolveTextOff resolves a function pointer offset from a base type.
   519  //
   520  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   521  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   522  	return (*_type)(rtype).textOff(textOff(off))
   523  
   524  }
   525  
   526  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
   527  //
   528  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   529  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   530  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   531  }
   532  
   533  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   534  //
   535  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   536  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   537  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   538  }
   539  
   540  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   541  //
   542  //go:linkname reflect_addReflectOff reflect.addReflectOff
   543  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   544  	reflectOffsLock()
   545  	if reflectOffs.m == nil {
   546  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   547  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   548  		reflectOffs.next = -1
   549  	}
   550  	id, found := reflectOffs.minv[ptr]
   551  	if !found {
   552  		id = reflectOffs.next
   553  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   554  		reflectOffs.m[id] = ptr
   555  		reflectOffs.minv[ptr] = id
   556  	}
   557  	reflectOffsUnlock()
   558  	return id
   559  }
   560  

View as plain text