Source file
src/runtime/proc.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
13 "unsafe"
14 )
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 var (
114 m0 m
115 g0 g
116 mcache0 *mcache
117 raceprocctx0 uintptr
118 )
119
120
121 var runtime_inittask initTask
122
123
124 var main_inittask initTask
125
126
127
128
129
130 var main_init_done chan bool
131
132
133 func main_main()
134
135
136 var mainStarted bool
137
138
139 var runtimeInitTime int64
140
141
142 var initSigmask sigset
143
144
145 func main() {
146 g := getg()
147
148
149
150 g.m.g0.racectx = 0
151
152
153
154
155 if goarch.PtrSize == 8 {
156 maxstacksize = 1000000000
157 } else {
158 maxstacksize = 250000000
159 }
160
161
162
163
164 maxstackceiling = 2 * maxstacksize
165
166
167 mainStarted = true
168
169 if GOARCH != "wasm" {
170 systemstack(func() {
171 newm(sysmon, nil, -1)
172 })
173 }
174
175
176
177
178
179
180
181 lockOSThread()
182
183 if g.m != &m0 {
184 throw("runtime.main not on m0")
185 }
186
187
188
189 runtimeInitTime = nanotime()
190 if runtimeInitTime == 0 {
191 throw("nanotime returning zero")
192 }
193
194 if debug.inittrace != 0 {
195 inittrace.id = getg().goid
196 inittrace.active = true
197 }
198
199 doInit(&runtime_inittask)
200
201
202 needUnlock := true
203 defer func() {
204 if needUnlock {
205 unlockOSThread()
206 }
207 }()
208
209 gcenable()
210
211 main_init_done = make(chan bool)
212 if iscgo {
213 if _cgo_thread_start == nil {
214 throw("_cgo_thread_start missing")
215 }
216 if GOOS != "windows" {
217 if _cgo_setenv == nil {
218 throw("_cgo_setenv missing")
219 }
220 if _cgo_unsetenv == nil {
221 throw("_cgo_unsetenv missing")
222 }
223 }
224 if _cgo_notify_runtime_init_done == nil {
225 throw("_cgo_notify_runtime_init_done missing")
226 }
227
228
229 startTemplateThread()
230 cgocall(_cgo_notify_runtime_init_done, nil)
231 }
232
233 doInit(&main_inittask)
234
235
236
237 inittrace.active = false
238
239 close(main_init_done)
240
241 needUnlock = false
242 unlockOSThread()
243
244 if isarchive || islibrary {
245
246
247 return
248 }
249 fn := main_main
250 fn()
251 if raceenabled {
252 racefini()
253 }
254
255
256
257
258
259 if atomic.Load(&runningPanicDefers) != 0 {
260
261 for c := 0; c < 1000; c++ {
262 if atomic.Load(&runningPanicDefers) == 0 {
263 break
264 }
265 Gosched()
266 }
267 }
268 if atomic.Load(&panicking) != 0 {
269 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
270 }
271
272 exit(0)
273 for {
274 var x *int32
275 *x = 0
276 }
277 }
278
279
280
281
282 func os_beforeExit() {
283 if raceenabled {
284 racefini()
285 }
286 }
287
288
289 func init() {
290 go forcegchelper()
291 }
292
293 func forcegchelper() {
294 forcegc.g = getg()
295 lockInit(&forcegc.lock, lockRankForcegc)
296 for {
297 lock(&forcegc.lock)
298 if forcegc.idle != 0 {
299 throw("forcegc: phase error")
300 }
301 atomic.Store(&forcegc.idle, 1)
302 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
303
304 if debug.gctrace > 0 {
305 println("GC forced")
306 }
307
308 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
309 }
310 }
311
312
313
314
315
316 func Gosched() {
317 checkTimeouts()
318 mcall(gosched_m)
319 }
320
321
322
323
324
325 func goschedguarded() {
326 mcall(goschedguarded_m)
327 }
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
347 if reason != waitReasonSleep {
348 checkTimeouts()
349 }
350 mp := acquirem()
351 gp := mp.curg
352 status := readgstatus(gp)
353 if status != _Grunning && status != _Gscanrunning {
354 throw("gopark: bad g status")
355 }
356 mp.waitlock = lock
357 mp.waitunlockf = unlockf
358 gp.waitreason = reason
359 mp.waittraceev = traceEv
360 mp.waittraceskip = traceskip
361 releasem(mp)
362
363 mcall(park_m)
364 }
365
366
367
368 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
369 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
370 }
371
372 func goready(gp *g, traceskip int) {
373 systemstack(func() {
374 ready(gp, traceskip, true)
375 })
376 }
377
378
379 func acquireSudog() *sudog {
380
381
382
383
384
385
386
387
388 mp := acquirem()
389 pp := mp.p.ptr()
390 if len(pp.sudogcache) == 0 {
391 lock(&sched.sudoglock)
392
393 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
394 s := sched.sudogcache
395 sched.sudogcache = s.next
396 s.next = nil
397 pp.sudogcache = append(pp.sudogcache, s)
398 }
399 unlock(&sched.sudoglock)
400
401 if len(pp.sudogcache) == 0 {
402 pp.sudogcache = append(pp.sudogcache, new(sudog))
403 }
404 }
405 n := len(pp.sudogcache)
406 s := pp.sudogcache[n-1]
407 pp.sudogcache[n-1] = nil
408 pp.sudogcache = pp.sudogcache[:n-1]
409 if s.elem != nil {
410 throw("acquireSudog: found s.elem != nil in cache")
411 }
412 releasem(mp)
413 return s
414 }
415
416
417 func releaseSudog(s *sudog) {
418 if s.elem != nil {
419 throw("runtime: sudog with non-nil elem")
420 }
421 if s.isSelect {
422 throw("runtime: sudog with non-false isSelect")
423 }
424 if s.next != nil {
425 throw("runtime: sudog with non-nil next")
426 }
427 if s.prev != nil {
428 throw("runtime: sudog with non-nil prev")
429 }
430 if s.waitlink != nil {
431 throw("runtime: sudog with non-nil waitlink")
432 }
433 if s.c != nil {
434 throw("runtime: sudog with non-nil c")
435 }
436 gp := getg()
437 if gp.param != nil {
438 throw("runtime: releaseSudog with non-nil gp.param")
439 }
440 mp := acquirem()
441 pp := mp.p.ptr()
442 if len(pp.sudogcache) == cap(pp.sudogcache) {
443
444 var first, last *sudog
445 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
446 n := len(pp.sudogcache)
447 p := pp.sudogcache[n-1]
448 pp.sudogcache[n-1] = nil
449 pp.sudogcache = pp.sudogcache[:n-1]
450 if first == nil {
451 first = p
452 } else {
453 last.next = p
454 }
455 last = p
456 }
457 lock(&sched.sudoglock)
458 last.next = sched.sudogcache
459 sched.sudogcache = first
460 unlock(&sched.sudoglock)
461 }
462 pp.sudogcache = append(pp.sudogcache, s)
463 releasem(mp)
464 }
465
466
467 func badmcall(fn func(*g)) {
468 throw("runtime: mcall called on m->g0 stack")
469 }
470
471 func badmcall2(fn func(*g)) {
472 throw("runtime: mcall function returned")
473 }
474
475 func badreflectcall() {
476 panic(plainError("arg size to reflect.call more than 1GB"))
477 }
478
479 var badmorestackg0Msg = "fatal: morestack on g0\n"
480
481
482
483 func badmorestackg0() {
484 sp := stringStructOf(&badmorestackg0Msg)
485 write(2, sp.str, int32(sp.len))
486 }
487
488 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
489
490
491
492 func badmorestackgsignal() {
493 sp := stringStructOf(&badmorestackgsignalMsg)
494 write(2, sp.str, int32(sp.len))
495 }
496
497
498 func badctxt() {
499 throw("ctxt != 0")
500 }
501
502 func lockedOSThread() bool {
503 gp := getg()
504 return gp.lockedm != 0 && gp.m.lockedg != 0
505 }
506
507 var (
508
509
510
511
512
513
514 allglock mutex
515 allgs []*g
516
517
518
519
520
521
522
523
524
525
526
527
528
529 allglen uintptr
530 allgptr **g
531 )
532
533 func allgadd(gp *g) {
534 if readgstatus(gp) == _Gidle {
535 throw("allgadd: bad status Gidle")
536 }
537
538 lock(&allglock)
539 allgs = append(allgs, gp)
540 if &allgs[0] != allgptr {
541 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
542 }
543 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
544 unlock(&allglock)
545 }
546
547
548
549
550 func allGsSnapshot() []*g {
551 assertWorldStoppedOrLockHeld(&allglock)
552
553
554
555
556
557
558 return allgs[:len(allgs):len(allgs)]
559 }
560
561
562 func atomicAllG() (**g, uintptr) {
563 length := atomic.Loaduintptr(&allglen)
564 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
565 return ptr, length
566 }
567
568
569 func atomicAllGIndex(ptr **g, i uintptr) *g {
570 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
571 }
572
573
574
575
576 func forEachG(fn func(gp *g)) {
577 lock(&allglock)
578 for _, gp := range allgs {
579 fn(gp)
580 }
581 unlock(&allglock)
582 }
583
584
585
586
587
588 func forEachGRace(fn func(gp *g)) {
589 ptr, length := atomicAllG()
590 for i := uintptr(0); i < length; i++ {
591 gp := atomicAllGIndex(ptr, i)
592 fn(gp)
593 }
594 return
595 }
596
597 const (
598
599
600 _GoidCacheBatch = 16
601 )
602
603
604
605 func cpuinit() {
606 const prefix = "GODEBUG="
607 var env string
608
609 switch GOOS {
610 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
611 cpu.DebugOptions = true
612
613
614
615
616 n := int32(0)
617 for argv_index(argv, argc+1+n) != nil {
618 n++
619 }
620
621 for i := int32(0); i < n; i++ {
622 p := argv_index(argv, argc+1+i)
623 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
624
625 if hasPrefix(s, prefix) {
626 env = gostring(p)[len(prefix):]
627 break
628 }
629 }
630 }
631
632 cpu.Initialize(env)
633
634
635
636 switch GOARCH {
637 case "386", "amd64":
638 x86HasPOPCNT = cpu.X86.HasPOPCNT
639 x86HasSSE41 = cpu.X86.HasSSE41
640 x86HasFMA = cpu.X86.HasFMA
641
642 case "arm":
643 armHasVFPv4 = cpu.ARM.HasVFPv4
644
645 case "arm64":
646 arm64HasATOMICS = cpu.ARM64.HasATOMICS
647 }
648 }
649
650
651
652
653
654
655
656
657
658 func schedinit() {
659 lockInit(&sched.lock, lockRankSched)
660 lockInit(&sched.sysmonlock, lockRankSysmon)
661 lockInit(&sched.deferlock, lockRankDefer)
662 lockInit(&sched.sudoglock, lockRankSudog)
663 lockInit(&deadlock, lockRankDeadlock)
664 lockInit(&paniclk, lockRankPanic)
665 lockInit(&allglock, lockRankAllg)
666 lockInit(&allpLock, lockRankAllp)
667 lockInit(&reflectOffs.lock, lockRankReflectOffs)
668 lockInit(&finlock, lockRankFin)
669 lockInit(&trace.bufLock, lockRankTraceBuf)
670 lockInit(&trace.stringsLock, lockRankTraceStrings)
671 lockInit(&trace.lock, lockRankTrace)
672 lockInit(&cpuprof.lock, lockRankCpuprof)
673 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
674
675
676
677 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
678
679
680
681 _g_ := getg()
682 if raceenabled {
683 _g_.racectx, raceprocctx0 = raceinit()
684 }
685
686 sched.maxmcount = 10000
687
688
689 worldStopped()
690
691 moduledataverify()
692 stackinit()
693 mallocinit()
694 cpuinit()
695 alginit()
696 fastrandinit()
697 mcommoninit(_g_.m, -1)
698 modulesinit()
699 typelinksinit()
700 itabsinit()
701 stkobjinit()
702
703 sigsave(&_g_.m.sigmask)
704 initSigmask = _g_.m.sigmask
705
706 if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
707 println(offset)
708 throw("sched.timeToRun not aligned to 8 bytes")
709 }
710
711 goargs()
712 goenvs()
713 parsedebugvars()
714 gcinit()
715
716 lock(&sched.lock)
717 sched.lastpoll = uint64(nanotime())
718 procs := ncpu
719 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
720 procs = n
721 }
722 if procresize(procs) != nil {
723 throw("unknown runnable goroutine during bootstrap")
724 }
725 unlock(&sched.lock)
726
727
728 worldStarted()
729
730
731
732
733 if debug.cgocheck > 1 {
734 writeBarrier.cgo = true
735 writeBarrier.enabled = true
736 for _, p := range allp {
737 p.wbBuf.reset()
738 }
739 }
740
741 if buildVersion == "" {
742
743
744 buildVersion = "unknown"
745 }
746 if len(modinfo) == 1 {
747
748
749 modinfo = ""
750 }
751 }
752
753 func dumpgstatus(gp *g) {
754 _g_ := getg()
755 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
756 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
757 }
758
759
760 func checkmcount() {
761 assertLockHeld(&sched.lock)
762
763 if mcount() > sched.maxmcount {
764 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
765 throw("thread exhaustion")
766 }
767 }
768
769
770
771
772
773 func mReserveID() int64 {
774 assertLockHeld(&sched.lock)
775
776 if sched.mnext+1 < sched.mnext {
777 throw("runtime: thread ID overflow")
778 }
779 id := sched.mnext
780 sched.mnext++
781 checkmcount()
782 return id
783 }
784
785
786 func mcommoninit(mp *m, id int64) {
787 _g_ := getg()
788
789
790 if _g_ != _g_.m.g0 {
791 callers(1, mp.createstack[:])
792 }
793
794 lock(&sched.lock)
795
796 if id >= 0 {
797 mp.id = id
798 } else {
799 mp.id = mReserveID()
800 }
801
802 lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
803 hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
804 if lo|hi == 0 {
805 hi = 1
806 }
807
808
809 if goarch.BigEndian {
810 mp.fastrand = uint64(lo)<<32 | uint64(hi)
811 } else {
812 mp.fastrand = uint64(hi)<<32 | uint64(lo)
813 }
814
815 mpreinit(mp)
816 if mp.gsignal != nil {
817 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
818 }
819
820
821
822 mp.alllink = allm
823
824
825
826 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
827 unlock(&sched.lock)
828
829
830 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
831 mp.cgoCallers = new(cgoCallers)
832 }
833 }
834
835 var fastrandseed uintptr
836
837 func fastrandinit() {
838 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
839 getRandomData(s)
840 }
841
842
843 func ready(gp *g, traceskip int, next bool) {
844 if trace.enabled {
845 traceGoUnpark(gp, traceskip)
846 }
847
848 status := readgstatus(gp)
849
850
851 _g_ := getg()
852 mp := acquirem()
853 if status&^_Gscan != _Gwaiting {
854 dumpgstatus(gp)
855 throw("bad g->status in ready")
856 }
857
858
859 casgstatus(gp, _Gwaiting, _Grunnable)
860 runqput(_g_.m.p.ptr(), gp, next)
861 wakep()
862 releasem(mp)
863 }
864
865
866
867 const freezeStopWait = 0x7fffffff
868
869
870
871 var freezing uint32
872
873
874
875
876 func freezetheworld() {
877 atomic.Store(&freezing, 1)
878
879
880
881 for i := 0; i < 5; i++ {
882
883 sched.stopwait = freezeStopWait
884 atomic.Store(&sched.gcwaiting, 1)
885
886 if !preemptall() {
887 break
888 }
889 usleep(1000)
890 }
891
892 usleep(1000)
893 preemptall()
894 usleep(1000)
895 }
896
897
898
899
900
901 func readgstatus(gp *g) uint32 {
902 return atomic.Load(&gp.atomicstatus)
903 }
904
905
906
907
908
909 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
910 success := false
911
912
913 switch oldval {
914 default:
915 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
916 dumpgstatus(gp)
917 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
918 case _Gscanrunnable,
919 _Gscanwaiting,
920 _Gscanrunning,
921 _Gscansyscall,
922 _Gscanpreempted:
923 if newval == oldval&^_Gscan {
924 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
925 }
926 }
927 if !success {
928 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
929 dumpgstatus(gp)
930 throw("casfrom_Gscanstatus: gp->status is not in scan state")
931 }
932 releaseLockRank(lockRankGscan)
933 }
934
935
936
937 func castogscanstatus(gp *g, oldval, newval uint32) bool {
938 switch oldval {
939 case _Grunnable,
940 _Grunning,
941 _Gwaiting,
942 _Gsyscall:
943 if newval == oldval|_Gscan {
944 r := atomic.Cas(&gp.atomicstatus, oldval, newval)
945 if r {
946 acquireLockRank(lockRankGscan)
947 }
948 return r
949
950 }
951 }
952 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
953 throw("castogscanstatus")
954 panic("not reached")
955 }
956
957
958
959
960
961
962
963 func casgstatus(gp *g, oldval, newval uint32) {
964 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
965 systemstack(func() {
966 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
967 throw("casgstatus: bad incoming values")
968 })
969 }
970
971 acquireLockRank(lockRankGscan)
972 releaseLockRank(lockRankGscan)
973
974
975 const yieldDelay = 5 * 1000
976 var nextYield int64
977
978
979
980 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
981 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
982 throw("casgstatus: waiting for Gwaiting but is Grunnable")
983 }
984 if i == 0 {
985 nextYield = nanotime() + yieldDelay
986 }
987 if nanotime() < nextYield {
988 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
989 procyield(1)
990 }
991 } else {
992 osyield()
993 nextYield = nanotime() + yieldDelay/2
994 }
995 }
996
997
998 if oldval == _Grunning {
999
1000 if gp.trackingSeq%gTrackingPeriod == 0 {
1001 gp.tracking = true
1002 }
1003 gp.trackingSeq++
1004 }
1005 if gp.tracking {
1006 if oldval == _Grunnable {
1007
1008
1009
1010 now := nanotime()
1011 gp.runnableTime += now - gp.runnableStamp
1012 gp.runnableStamp = 0
1013 }
1014 if newval == _Grunnable {
1015
1016
1017 now := nanotime()
1018 gp.runnableStamp = now
1019 } else if newval == _Grunning {
1020
1021
1022
1023 gp.tracking = false
1024 sched.timeToRun.record(gp.runnableTime)
1025 gp.runnableTime = 0
1026 }
1027 }
1028 }
1029
1030
1031
1032
1033
1034
1035
1036
1037 func casgcopystack(gp *g) uint32 {
1038 for {
1039 oldstatus := readgstatus(gp) &^ _Gscan
1040 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1041 throw("copystack: bad status, not Gwaiting or Grunnable")
1042 }
1043 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
1044 return oldstatus
1045 }
1046 }
1047 }
1048
1049
1050
1051
1052
1053 func casGToPreemptScan(gp *g, old, new uint32) {
1054 if old != _Grunning || new != _Gscan|_Gpreempted {
1055 throw("bad g transition")
1056 }
1057 acquireLockRank(lockRankGscan)
1058 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
1059 }
1060 }
1061
1062
1063
1064
1065 func casGFromPreempted(gp *g, old, new uint32) bool {
1066 if old != _Gpreempted || new != _Gwaiting {
1067 throw("bad g transition")
1068 }
1069 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 func stopTheWorld(reason string) {
1087 semacquire(&worldsema)
1088 gp := getg()
1089 gp.m.preemptoff = reason
1090 systemstack(func() {
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 casgstatus(gp, _Grunning, _Gwaiting)
1102 stopTheWorldWithSema()
1103 casgstatus(gp, _Gwaiting, _Grunning)
1104 })
1105 }
1106
1107
1108 func startTheWorld() {
1109 systemstack(func() { startTheWorldWithSema(false) })
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 mp := acquirem()
1127 mp.preemptoff = ""
1128 semrelease1(&worldsema, true, 0)
1129 releasem(mp)
1130 }
1131
1132
1133
1134
1135 func stopTheWorldGC(reason string) {
1136 semacquire(&gcsema)
1137 stopTheWorld(reason)
1138 }
1139
1140
1141 func startTheWorldGC() {
1142 startTheWorld()
1143 semrelease(&gcsema)
1144 }
1145
1146
1147 var worldsema uint32 = 1
1148
1149
1150
1151
1152
1153
1154
1155 var gcsema uint32 = 1
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 func stopTheWorldWithSema() {
1180 _g_ := getg()
1181
1182
1183
1184 if _g_.m.locks > 0 {
1185 throw("stopTheWorld: holding locks")
1186 }
1187
1188 lock(&sched.lock)
1189 sched.stopwait = gomaxprocs
1190 atomic.Store(&sched.gcwaiting, 1)
1191 preemptall()
1192
1193 _g_.m.p.ptr().status = _Pgcstop
1194 sched.stopwait--
1195
1196 for _, p := range allp {
1197 s := p.status
1198 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1199 if trace.enabled {
1200 traceGoSysBlock(p)
1201 traceProcStop(p)
1202 }
1203 p.syscalltick++
1204 sched.stopwait--
1205 }
1206 }
1207
1208 now := nanotime()
1209 for {
1210 p, _ := pidleget(now)
1211 if p == nil {
1212 break
1213 }
1214 p.status = _Pgcstop
1215 sched.stopwait--
1216 }
1217 wait := sched.stopwait > 0
1218 unlock(&sched.lock)
1219
1220
1221 if wait {
1222 for {
1223
1224 if notetsleep(&sched.stopnote, 100*1000) {
1225 noteclear(&sched.stopnote)
1226 break
1227 }
1228 preemptall()
1229 }
1230 }
1231
1232
1233 bad := ""
1234 if sched.stopwait != 0 {
1235 bad = "stopTheWorld: not stopped (stopwait != 0)"
1236 } else {
1237 for _, p := range allp {
1238 if p.status != _Pgcstop {
1239 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1240 }
1241 }
1242 }
1243 if atomic.Load(&freezing) != 0 {
1244
1245
1246
1247
1248 lock(&deadlock)
1249 lock(&deadlock)
1250 }
1251 if bad != "" {
1252 throw(bad)
1253 }
1254
1255 worldStopped()
1256 }
1257
1258 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1259 assertWorldStopped()
1260
1261 mp := acquirem()
1262 if netpollinited() {
1263 list := netpoll(0)
1264 injectglist(&list)
1265 }
1266 lock(&sched.lock)
1267
1268 procs := gomaxprocs
1269 if newprocs != 0 {
1270 procs = newprocs
1271 newprocs = 0
1272 }
1273 p1 := procresize(procs)
1274 sched.gcwaiting = 0
1275 if sched.sysmonwait != 0 {
1276 sched.sysmonwait = 0
1277 notewakeup(&sched.sysmonnote)
1278 }
1279 unlock(&sched.lock)
1280
1281 worldStarted()
1282
1283 for p1 != nil {
1284 p := p1
1285 p1 = p1.link.ptr()
1286 if p.m != 0 {
1287 mp := p.m.ptr()
1288 p.m = 0
1289 if mp.nextp != 0 {
1290 throw("startTheWorld: inconsistent mp->nextp")
1291 }
1292 mp.nextp.set(p)
1293 notewakeup(&mp.park)
1294 } else {
1295
1296 newm(nil, p, -1)
1297 }
1298 }
1299
1300
1301 startTime := nanotime()
1302 if emitTraceEvent {
1303 traceGCSTWDone()
1304 }
1305
1306
1307
1308
1309 wakep()
1310
1311 releasem(mp)
1312
1313 return startTime
1314 }
1315
1316
1317
1318 func usesLibcall() bool {
1319 switch GOOS {
1320 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1321 return true
1322 case "openbsd":
1323 return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
1324 }
1325 return false
1326 }
1327
1328
1329
1330 func mStackIsSystemAllocated() bool {
1331 switch GOOS {
1332 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1333 return true
1334 case "openbsd":
1335 switch GOARCH {
1336 case "386", "amd64", "arm", "arm64":
1337 return true
1338 }
1339 }
1340 return false
1341 }
1342
1343
1344
1345 func mstart()
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 func mstart0() {
1357 _g_ := getg()
1358
1359 osStack := _g_.stack.lo == 0
1360 if osStack {
1361
1362
1363
1364
1365
1366
1367
1368
1369 size := _g_.stack.hi
1370 if size == 0 {
1371 size = 8192 * sys.StackGuardMultiplier
1372 }
1373 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1374 _g_.stack.lo = _g_.stack.hi - size + 1024
1375 }
1376
1377
1378 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1379
1380
1381 _g_.stackguard1 = _g_.stackguard0
1382 mstart1()
1383
1384
1385 if mStackIsSystemAllocated() {
1386
1387
1388
1389 osStack = true
1390 }
1391 mexit(osStack)
1392 }
1393
1394
1395
1396
1397
1398 func mstart1() {
1399 _g_ := getg()
1400
1401 if _g_ != _g_.m.g0 {
1402 throw("bad runtime·mstart")
1403 }
1404
1405
1406
1407
1408
1409
1410
1411 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
1412 _g_.sched.pc = getcallerpc()
1413 _g_.sched.sp = getcallersp()
1414
1415 asminit()
1416 minit()
1417
1418
1419
1420 if _g_.m == &m0 {
1421 mstartm0()
1422 }
1423
1424 if fn := _g_.m.mstartfn; fn != nil {
1425 fn()
1426 }
1427
1428 if _g_.m != &m0 {
1429 acquirep(_g_.m.nextp.ptr())
1430 _g_.m.nextp = 0
1431 }
1432 schedule()
1433 }
1434
1435
1436
1437
1438
1439
1440
1441 func mstartm0() {
1442
1443
1444
1445 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1446 cgoHasExtraM = true
1447 newextram()
1448 }
1449 initsig(false)
1450 }
1451
1452
1453
1454
1455 func mPark() {
1456 gp := getg()
1457 notesleep(&gp.m.park)
1458 noteclear(&gp.m.park)
1459 }
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 func mexit(osStack bool) {
1472 g := getg()
1473 m := g.m
1474
1475 if m == &m0 {
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487 handoffp(releasep())
1488 lock(&sched.lock)
1489 sched.nmfreed++
1490 checkdead()
1491 unlock(&sched.lock)
1492 mPark()
1493 throw("locked m0 woke up")
1494 }
1495
1496 sigblock(true)
1497 unminit()
1498
1499
1500 if m.gsignal != nil {
1501 stackfree(m.gsignal.stack)
1502
1503
1504
1505
1506 m.gsignal = nil
1507 }
1508
1509
1510 lock(&sched.lock)
1511 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1512 if *pprev == m {
1513 *pprev = m.alllink
1514 goto found
1515 }
1516 }
1517 throw("m not found in allm")
1518 found:
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528 m.freeWait.Store(freeMWait)
1529 m.freelink = sched.freem
1530 sched.freem = m
1531 unlock(&sched.lock)
1532
1533 atomic.Xadd64(&ncgocall, int64(m.ncgocall))
1534
1535
1536 handoffp(releasep())
1537
1538
1539
1540
1541
1542 lock(&sched.lock)
1543 sched.nmfreed++
1544 checkdead()
1545 unlock(&sched.lock)
1546
1547 if GOOS == "darwin" || GOOS == "ios" {
1548
1549
1550 if atomic.Load(&m.signalPending) != 0 {
1551 atomic.Xadd(&pendingPreemptSignals, -1)
1552 }
1553 }
1554
1555
1556
1557 mdestroy(m)
1558
1559 if osStack {
1560
1561 m.freeWait.Store(freeMRef)
1562
1563
1564
1565 return
1566 }
1567
1568
1569
1570
1571
1572 exitThread(&m.freeWait)
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586 func forEachP(fn func(*p)) {
1587 mp := acquirem()
1588 _p_ := getg().m.p.ptr()
1589
1590 lock(&sched.lock)
1591 if sched.safePointWait != 0 {
1592 throw("forEachP: sched.safePointWait != 0")
1593 }
1594 sched.safePointWait = gomaxprocs - 1
1595 sched.safePointFn = fn
1596
1597
1598 for _, p := range allp {
1599 if p != _p_ {
1600 atomic.Store(&p.runSafePointFn, 1)
1601 }
1602 }
1603 preemptall()
1604
1605
1606
1607
1608
1609
1610
1611 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1612 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1613 fn(p)
1614 sched.safePointWait--
1615 }
1616 }
1617
1618 wait := sched.safePointWait > 0
1619 unlock(&sched.lock)
1620
1621
1622 fn(_p_)
1623
1624
1625
1626 for _, p := range allp {
1627 s := p.status
1628 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1629 if trace.enabled {
1630 traceGoSysBlock(p)
1631 traceProcStop(p)
1632 }
1633 p.syscalltick++
1634 handoffp(p)
1635 }
1636 }
1637
1638
1639 if wait {
1640 for {
1641
1642
1643
1644
1645 if notetsleep(&sched.safePointNote, 100*1000) {
1646 noteclear(&sched.safePointNote)
1647 break
1648 }
1649 preemptall()
1650 }
1651 }
1652 if sched.safePointWait != 0 {
1653 throw("forEachP: not done")
1654 }
1655 for _, p := range allp {
1656 if p.runSafePointFn != 0 {
1657 throw("forEachP: P did not run fn")
1658 }
1659 }
1660
1661 lock(&sched.lock)
1662 sched.safePointFn = nil
1663 unlock(&sched.lock)
1664 releasem(mp)
1665 }
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 func runSafePointFn() {
1679 p := getg().m.p.ptr()
1680
1681
1682
1683 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1684 return
1685 }
1686 sched.safePointFn(p)
1687 lock(&sched.lock)
1688 sched.safePointWait--
1689 if sched.safePointWait == 0 {
1690 notewakeup(&sched.safePointNote)
1691 }
1692 unlock(&sched.lock)
1693 }
1694
1695
1696
1697
1698 var cgoThreadStart unsafe.Pointer
1699
1700 type cgothreadstart struct {
1701 g guintptr
1702 tls *uint64
1703 fn unsafe.Pointer
1704 }
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715 func allocm(_p_ *p, fn func(), id int64) *m {
1716 allocmLock.rlock()
1717
1718
1719
1720
1721 acquirem()
1722
1723 _g_ := getg()
1724 if _g_.m.p == 0 {
1725 acquirep(_p_)
1726 }
1727
1728
1729
1730 if sched.freem != nil {
1731 lock(&sched.lock)
1732 var newList *m
1733 for freem := sched.freem; freem != nil; {
1734 wait := freem.freeWait.Load()
1735 if wait == freeMWait {
1736 next := freem.freelink
1737 freem.freelink = newList
1738 newList = freem
1739 freem = next
1740 continue
1741 }
1742
1743
1744
1745 if wait == freeMStack {
1746
1747
1748
1749 systemstack(func() {
1750 stackfree(freem.g0.stack)
1751 })
1752 }
1753 freem = freem.freelink
1754 }
1755 sched.freem = newList
1756 unlock(&sched.lock)
1757 }
1758
1759 mp := new(m)
1760 mp.mstartfn = fn
1761 mcommoninit(mp, id)
1762
1763
1764
1765 if iscgo || mStackIsSystemAllocated() {
1766 mp.g0 = malg(-1)
1767 } else {
1768 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1769 }
1770 mp.g0.m = mp
1771
1772 if _p_ == _g_.m.p.ptr() {
1773 releasep()
1774 }
1775
1776 releasem(_g_.m)
1777 allocmLock.runlock()
1778 return mp
1779 }
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 func needm() {
1817 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1818
1819
1820
1821
1822
1823
1824 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1825 exit(1)
1826 }
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 var sigmask sigset
1837 sigsave(&sigmask)
1838 sigblock(false)
1839
1840
1841
1842
1843
1844 mp := lockextra(false)
1845
1846
1847
1848
1849
1850
1851
1852
1853 mp.needextram = mp.schedlink == 0
1854 extraMCount--
1855 unlockextra(mp.schedlink.ptr())
1856
1857
1858 mp.sigmask = sigmask
1859
1860
1861
1862 osSetupTLS(mp)
1863
1864
1865
1866
1867
1868
1869 setg(mp.g0)
1870 _g_ := getg()
1871 _g_.stack.hi = getcallersp() + 1024
1872 _g_.stack.lo = getcallersp() - 32*1024
1873 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1874
1875
1876 asminit()
1877 minit()
1878
1879
1880 casgstatus(mp.curg, _Gdead, _Gsyscall)
1881 atomic.Xadd(&sched.ngsys, -1)
1882 }
1883
1884 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1885
1886
1887
1888
1889 func newextram() {
1890 c := atomic.Xchg(&extraMWaiters, 0)
1891 if c > 0 {
1892 for i := uint32(0); i < c; i++ {
1893 oneNewExtraM()
1894 }
1895 } else {
1896
1897 mp := lockextra(true)
1898 unlockextra(mp)
1899 if mp == nil {
1900 oneNewExtraM()
1901 }
1902 }
1903 }
1904
1905
1906 func oneNewExtraM() {
1907
1908
1909
1910
1911
1912 mp := allocm(nil, nil, -1)
1913 gp := malg(4096)
1914 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
1915 gp.sched.sp = gp.stack.hi
1916 gp.sched.sp -= 4 * goarch.PtrSize
1917 gp.sched.lr = 0
1918 gp.sched.g = guintptr(unsafe.Pointer(gp))
1919 gp.syscallpc = gp.sched.pc
1920 gp.syscallsp = gp.sched.sp
1921 gp.stktopsp = gp.sched.sp
1922
1923
1924
1925
1926 casgstatus(gp, _Gidle, _Gdead)
1927 gp.m = mp
1928 mp.curg = gp
1929 mp.lockedInt++
1930 mp.lockedg.set(gp)
1931 gp.lockedm.set(mp)
1932 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1933 if raceenabled {
1934 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
1935 }
1936
1937 allgadd(gp)
1938
1939
1940
1941
1942
1943 atomic.Xadd(&sched.ngsys, +1)
1944
1945
1946 mnext := lockextra(true)
1947 mp.schedlink.set(mnext)
1948 extraMCount++
1949 unlockextra(mp)
1950 }
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 func dropm() {
1976
1977
1978
1979 mp := getg().m
1980
1981
1982 casgstatus(mp.curg, _Gsyscall, _Gdead)
1983 mp.curg.preemptStop = false
1984 atomic.Xadd(&sched.ngsys, +1)
1985
1986
1987
1988
1989
1990 sigmask := mp.sigmask
1991 sigblock(false)
1992 unminit()
1993
1994 mnext := lockextra(true)
1995 extraMCount++
1996 mp.schedlink.set(mnext)
1997
1998 setg(nil)
1999
2000
2001 unlockextra(mp)
2002
2003 msigrestore(sigmask)
2004 }
2005
2006
2007 func getm() uintptr {
2008 return uintptr(unsafe.Pointer(getg().m))
2009 }
2010
2011 var extram uintptr
2012 var extraMCount uint32
2013 var extraMWaiters uint32
2014
2015
2016
2017
2018
2019
2020
2021
2022 func lockextra(nilokay bool) *m {
2023 const locked = 1
2024
2025 incr := false
2026 for {
2027 old := atomic.Loaduintptr(&extram)
2028 if old == locked {
2029 osyield_no_g()
2030 continue
2031 }
2032 if old == 0 && !nilokay {
2033 if !incr {
2034
2035
2036
2037 atomic.Xadd(&extraMWaiters, 1)
2038 incr = true
2039 }
2040 usleep_no_g(1)
2041 continue
2042 }
2043 if atomic.Casuintptr(&extram, old, locked) {
2044 return (*m)(unsafe.Pointer(old))
2045 }
2046 osyield_no_g()
2047 continue
2048 }
2049 }
2050
2051
2052 func unlockextra(mp *m) {
2053 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
2054 }
2055
2056 var (
2057
2058
2059
2060 allocmLock rwmutex
2061
2062
2063
2064
2065 execLock rwmutex
2066 )
2067
2068
2069
2070
2071 var newmHandoff struct {
2072 lock mutex
2073
2074
2075
2076 newm muintptr
2077
2078
2079
2080 waiting bool
2081 wake note
2082
2083
2084
2085
2086 haveTemplateThread uint32
2087 }
2088
2089
2090
2091
2092
2093
2094
2095
2096 func newm(fn func(), _p_ *p, id int64) {
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 acquirem()
2108
2109 mp := allocm(_p_, fn, id)
2110 mp.nextp.set(_p_)
2111 mp.sigmask = initSigmask
2112 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 lock(&newmHandoff.lock)
2125 if newmHandoff.haveTemplateThread == 0 {
2126 throw("on a locked thread with no template thread")
2127 }
2128 mp.schedlink = newmHandoff.newm
2129 newmHandoff.newm.set(mp)
2130 if newmHandoff.waiting {
2131 newmHandoff.waiting = false
2132 notewakeup(&newmHandoff.wake)
2133 }
2134 unlock(&newmHandoff.lock)
2135
2136
2137
2138 releasem(getg().m)
2139 return
2140 }
2141 newm1(mp)
2142 releasem(getg().m)
2143 }
2144
2145 func newm1(mp *m) {
2146 if iscgo {
2147 var ts cgothreadstart
2148 if _cgo_thread_start == nil {
2149 throw("_cgo_thread_start missing")
2150 }
2151 ts.g.set(mp.g0)
2152 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2153 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2154 if msanenabled {
2155 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2156 }
2157 if asanenabled {
2158 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2159 }
2160 execLock.rlock()
2161 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2162 execLock.runlock()
2163 return
2164 }
2165 execLock.rlock()
2166 newosproc(mp)
2167 execLock.runlock()
2168 }
2169
2170
2171
2172
2173
2174 func startTemplateThread() {
2175 if GOARCH == "wasm" {
2176 return
2177 }
2178
2179
2180
2181 mp := acquirem()
2182 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2183 releasem(mp)
2184 return
2185 }
2186 newm(templateThread, nil, -1)
2187 releasem(mp)
2188 }
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202 func templateThread() {
2203 lock(&sched.lock)
2204 sched.nmsys++
2205 checkdead()
2206 unlock(&sched.lock)
2207
2208 for {
2209 lock(&newmHandoff.lock)
2210 for newmHandoff.newm != 0 {
2211 newm := newmHandoff.newm.ptr()
2212 newmHandoff.newm = 0
2213 unlock(&newmHandoff.lock)
2214 for newm != nil {
2215 next := newm.schedlink.ptr()
2216 newm.schedlink = 0
2217 newm1(newm)
2218 newm = next
2219 }
2220 lock(&newmHandoff.lock)
2221 }
2222 newmHandoff.waiting = true
2223 noteclear(&newmHandoff.wake)
2224 unlock(&newmHandoff.lock)
2225 notesleep(&newmHandoff.wake)
2226 }
2227 }
2228
2229
2230
2231 func stopm() {
2232 _g_ := getg()
2233
2234 if _g_.m.locks != 0 {
2235 throw("stopm holding locks")
2236 }
2237 if _g_.m.p != 0 {
2238 throw("stopm holding p")
2239 }
2240 if _g_.m.spinning {
2241 throw("stopm spinning")
2242 }
2243
2244 lock(&sched.lock)
2245 mput(_g_.m)
2246 unlock(&sched.lock)
2247 mPark()
2248 acquirep(_g_.m.nextp.ptr())
2249 _g_.m.nextp = 0
2250 }
2251
2252 func mspinning() {
2253
2254 getg().m.spinning = true
2255 }
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 func startm(_p_ *p, spinning bool) {
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286 mp := acquirem()
2287 lock(&sched.lock)
2288 if _p_ == nil {
2289 _p_, _ = pidleget(0)
2290 if _p_ == nil {
2291 unlock(&sched.lock)
2292 if spinning {
2293
2294
2295 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2296 throw("startm: negative nmspinning")
2297 }
2298 }
2299 releasem(mp)
2300 return
2301 }
2302 }
2303 nmp := mget()
2304 if nmp == nil {
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 id := mReserveID()
2318 unlock(&sched.lock)
2319
2320 var fn func()
2321 if spinning {
2322
2323 fn = mspinning
2324 }
2325 newm(fn, _p_, id)
2326
2327
2328 releasem(mp)
2329 return
2330 }
2331 unlock(&sched.lock)
2332 if nmp.spinning {
2333 throw("startm: m is spinning")
2334 }
2335 if nmp.nextp != 0 {
2336 throw("startm: m has p")
2337 }
2338 if spinning && !runqempty(_p_) {
2339 throw("startm: p has runnable gs")
2340 }
2341
2342 nmp.spinning = spinning
2343 nmp.nextp.set(_p_)
2344 notewakeup(&nmp.park)
2345
2346
2347 releasem(mp)
2348 }
2349
2350
2351
2352
2353
2354 func handoffp(_p_ *p) {
2355
2356
2357
2358
2359 if !runqempty(_p_) || sched.runqsize != 0 {
2360 startm(_p_, false)
2361 return
2362 }
2363
2364 if (trace.enabled || trace.shutdown) && traceReaderAvailable() {
2365 startm(_p_, false)
2366 return
2367 }
2368
2369 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2370 startm(_p_, false)
2371 return
2372 }
2373
2374
2375 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2376 startm(_p_, true)
2377 return
2378 }
2379 lock(&sched.lock)
2380 if sched.gcwaiting != 0 {
2381 _p_.status = _Pgcstop
2382 sched.stopwait--
2383 if sched.stopwait == 0 {
2384 notewakeup(&sched.stopnote)
2385 }
2386 unlock(&sched.lock)
2387 return
2388 }
2389 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2390 sched.safePointFn(_p_)
2391 sched.safePointWait--
2392 if sched.safePointWait == 0 {
2393 notewakeup(&sched.safePointNote)
2394 }
2395 }
2396 if sched.runqsize != 0 {
2397 unlock(&sched.lock)
2398 startm(_p_, false)
2399 return
2400 }
2401
2402
2403 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2404 unlock(&sched.lock)
2405 startm(_p_, false)
2406 return
2407 }
2408
2409
2410
2411 when := nobarrierWakeTime(_p_)
2412 pidleput(_p_, 0)
2413 unlock(&sched.lock)
2414
2415 if when != 0 {
2416 wakeNetPoller(when)
2417 }
2418 }
2419
2420
2421
2422 func wakep() {
2423 if atomic.Load(&sched.npidle) == 0 {
2424 return
2425 }
2426
2427 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
2428 return
2429 }
2430 startm(nil, true)
2431 }
2432
2433
2434
2435 func stoplockedm() {
2436 _g_ := getg()
2437
2438 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2439 throw("stoplockedm: inconsistent locking")
2440 }
2441 if _g_.m.p != 0 {
2442
2443 _p_ := releasep()
2444 handoffp(_p_)
2445 }
2446 incidlelocked(1)
2447
2448 mPark()
2449 status := readgstatus(_g_.m.lockedg.ptr())
2450 if status&^_Gscan != _Grunnable {
2451 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
2452 dumpgstatus(_g_.m.lockedg.ptr())
2453 throw("stoplockedm: not runnable")
2454 }
2455 acquirep(_g_.m.nextp.ptr())
2456 _g_.m.nextp = 0
2457 }
2458
2459
2460
2461
2462
2463 func startlockedm(gp *g) {
2464 _g_ := getg()
2465
2466 mp := gp.lockedm.ptr()
2467 if mp == _g_.m {
2468 throw("startlockedm: locked to me")
2469 }
2470 if mp.nextp != 0 {
2471 throw("startlockedm: m has p")
2472 }
2473
2474 incidlelocked(-1)
2475 _p_ := releasep()
2476 mp.nextp.set(_p_)
2477 notewakeup(&mp.park)
2478 stopm()
2479 }
2480
2481
2482
2483 func gcstopm() {
2484 _g_ := getg()
2485
2486 if sched.gcwaiting == 0 {
2487 throw("gcstopm: not waiting for gc")
2488 }
2489 if _g_.m.spinning {
2490 _g_.m.spinning = false
2491
2492
2493 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2494 throw("gcstopm: negative nmspinning")
2495 }
2496 }
2497 _p_ := releasep()
2498 lock(&sched.lock)
2499 _p_.status = _Pgcstop
2500 sched.stopwait--
2501 if sched.stopwait == 0 {
2502 notewakeup(&sched.stopnote)
2503 }
2504 unlock(&sched.lock)
2505 stopm()
2506 }
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 func execute(gp *g, inheritTime bool) {
2518 _g_ := getg()
2519
2520 if goroutineProfile.active {
2521
2522
2523
2524 tryRecordGoroutineProfile(gp, osyield)
2525 }
2526
2527
2528
2529 _g_.m.curg = gp
2530 gp.m = _g_.m
2531 casgstatus(gp, _Grunnable, _Grunning)
2532 gp.waitsince = 0
2533 gp.preempt = false
2534 gp.stackguard0 = gp.stack.lo + _StackGuard
2535 if !inheritTime {
2536 _g_.m.p.ptr().schedtick++
2537 }
2538
2539
2540 hz := sched.profilehz
2541 if _g_.m.profilehz != hz {
2542 setThreadCPUProfiler(hz)
2543 }
2544
2545 if trace.enabled {
2546
2547
2548 if gp.syscallsp != 0 && gp.sysblocktraced {
2549 traceGoSysExit(gp.sysexitticks)
2550 }
2551 traceGoStart()
2552 }
2553
2554 gogo(&gp.sched)
2555 }
2556
2557
2558
2559
2560
2561 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
2562 _g_ := getg()
2563
2564
2565
2566
2567
2568 top:
2569 _p_ := _g_.m.p.ptr()
2570 if sched.gcwaiting != 0 {
2571 gcstopm()
2572 goto top
2573 }
2574 if _p_.runSafePointFn != 0 {
2575 runSafePointFn()
2576 }
2577
2578
2579
2580
2581
2582 now, pollUntil, _ := checkTimers(_p_, 0)
2583
2584
2585 if trace.enabled || trace.shutdown {
2586 gp = traceReader()
2587 if gp != nil {
2588 casgstatus(gp, _Gwaiting, _Grunnable)
2589 traceGoUnpark(gp, 0)
2590 return gp, false, true
2591 }
2592 }
2593
2594
2595 if gcBlackenEnabled != 0 {
2596 gp, now = gcController.findRunnableGCWorker(_p_, now)
2597 if gp != nil {
2598 return gp, false, true
2599 }
2600 }
2601
2602
2603
2604
2605 if _p_.schedtick%61 == 0 && sched.runqsize > 0 {
2606 lock(&sched.lock)
2607 gp = globrunqget(_p_, 1)
2608 unlock(&sched.lock)
2609 if gp != nil {
2610 return gp, false, false
2611 }
2612 }
2613
2614
2615 if fingwait && fingwake {
2616 if gp := wakefing(); gp != nil {
2617 ready(gp, 0, true)
2618 }
2619 }
2620 if *cgo_yield != nil {
2621 asmcgocall(*cgo_yield, nil)
2622 }
2623
2624
2625 if gp, inheritTime := runqget(_p_); gp != nil {
2626 return gp, inheritTime, false
2627 }
2628
2629
2630 if sched.runqsize != 0 {
2631 lock(&sched.lock)
2632 gp := globrunqget(_p_, 0)
2633 unlock(&sched.lock)
2634 if gp != nil {
2635 return gp, false, false
2636 }
2637 }
2638
2639
2640
2641
2642
2643
2644
2645
2646 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2647 if list := netpoll(0); !list.empty() {
2648 gp := list.pop()
2649 injectglist(&list)
2650 casgstatus(gp, _Gwaiting, _Grunnable)
2651 if trace.enabled {
2652 traceGoUnpark(gp, 0)
2653 }
2654 return gp, false, false
2655 }
2656 }
2657
2658
2659
2660
2661
2662
2663 procs := uint32(gomaxprocs)
2664 if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
2665 if !_g_.m.spinning {
2666 _g_.m.spinning = true
2667 atomic.Xadd(&sched.nmspinning, 1)
2668 }
2669
2670 gp, inheritTime, tnow, w, newWork := stealWork(now)
2671 now = tnow
2672 if gp != nil {
2673
2674 return gp, inheritTime, false
2675 }
2676 if newWork {
2677
2678
2679 goto top
2680 }
2681 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2682
2683 pollUntil = w
2684 }
2685 }
2686
2687
2688
2689
2690
2691 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) && gcController.addIdleMarkWorker() {
2692 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2693 if node != nil {
2694 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2695 gp := node.gp.ptr()
2696 casgstatus(gp, _Gwaiting, _Grunnable)
2697 if trace.enabled {
2698 traceGoUnpark(gp, 0)
2699 }
2700 return gp, false, false
2701 }
2702 gcController.removeIdleMarkWorker()
2703 }
2704
2705
2706
2707
2708
2709 gp, otherReady := beforeIdle(now, pollUntil)
2710 if gp != nil {
2711 casgstatus(gp, _Gwaiting, _Grunnable)
2712 if trace.enabled {
2713 traceGoUnpark(gp, 0)
2714 }
2715 return gp, false, false
2716 }
2717 if otherReady {
2718 goto top
2719 }
2720
2721
2722
2723
2724
2725 allpSnapshot := allp
2726
2727
2728 idlepMaskSnapshot := idlepMask
2729 timerpMaskSnapshot := timerpMask
2730
2731
2732 lock(&sched.lock)
2733 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2734 unlock(&sched.lock)
2735 goto top
2736 }
2737 if sched.runqsize != 0 {
2738 gp := globrunqget(_p_, 0)
2739 unlock(&sched.lock)
2740 return gp, false, false
2741 }
2742 if releasep() != _p_ {
2743 throw("findrunnable: wrong p")
2744 }
2745 now = pidleput(_p_, now)
2746 unlock(&sched.lock)
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768 wasSpinning := _g_.m.spinning
2769 if _g_.m.spinning {
2770 _g_.m.spinning = false
2771 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2772 throw("findrunnable: negative nmspinning")
2773 }
2774
2775
2776
2777
2778
2779
2780
2781
2782 _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
2783 if _p_ != nil {
2784 acquirep(_p_)
2785 _g_.m.spinning = true
2786 atomic.Xadd(&sched.nmspinning, 1)
2787 goto top
2788 }
2789
2790
2791 _p_, gp = checkIdleGCNoP()
2792 if _p_ != nil {
2793 acquirep(_p_)
2794 _g_.m.spinning = true
2795 atomic.Xadd(&sched.nmspinning, 1)
2796
2797
2798 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2799 casgstatus(gp, _Gwaiting, _Grunnable)
2800 if trace.enabled {
2801 traceGoUnpark(gp, 0)
2802 }
2803 return gp, false, false
2804 }
2805
2806
2807
2808
2809
2810
2811
2812 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
2813 }
2814
2815
2816 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2817 atomic.Store64(&sched.pollUntil, uint64(pollUntil))
2818 if _g_.m.p != 0 {
2819 throw("findrunnable: netpoll with p")
2820 }
2821 if _g_.m.spinning {
2822 throw("findrunnable: netpoll with spinning")
2823 }
2824
2825 now = nanotime()
2826 delay := int64(-1)
2827 if pollUntil != 0 {
2828 delay = pollUntil - now
2829 if delay < 0 {
2830 delay = 0
2831 }
2832 }
2833 if faketime != 0 {
2834
2835 delay = 0
2836 }
2837 list := netpoll(delay)
2838 atomic.Store64(&sched.pollUntil, 0)
2839 atomic.Store64(&sched.lastpoll, uint64(now))
2840 if faketime != 0 && list.empty() {
2841
2842
2843 stopm()
2844 goto top
2845 }
2846 lock(&sched.lock)
2847 _p_, _ = pidleget(now)
2848 unlock(&sched.lock)
2849 if _p_ == nil {
2850 injectglist(&list)
2851 } else {
2852 acquirep(_p_)
2853 if !list.empty() {
2854 gp := list.pop()
2855 injectglist(&list)
2856 casgstatus(gp, _Gwaiting, _Grunnable)
2857 if trace.enabled {
2858 traceGoUnpark(gp, 0)
2859 }
2860 return gp, false, false
2861 }
2862 if wasSpinning {
2863 _g_.m.spinning = true
2864 atomic.Xadd(&sched.nmspinning, 1)
2865 }
2866 goto top
2867 }
2868 } else if pollUntil != 0 && netpollinited() {
2869 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2870 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
2871 netpollBreak()
2872 }
2873 }
2874 stopm()
2875 goto top
2876 }
2877
2878
2879
2880
2881
2882 func pollWork() bool {
2883 if sched.runqsize != 0 {
2884 return true
2885 }
2886 p := getg().m.p.ptr()
2887 if !runqempty(p) {
2888 return true
2889 }
2890 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2891 if list := netpoll(0); !list.empty() {
2892 injectglist(&list)
2893 return true
2894 }
2895 }
2896 return false
2897 }
2898
2899
2900
2901
2902
2903
2904
2905 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
2906 pp := getg().m.p.ptr()
2907
2908 ranTimer := false
2909
2910 const stealTries = 4
2911 for i := 0; i < stealTries; i++ {
2912 stealTimersOrRunNextG := i == stealTries-1
2913
2914 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2915 if sched.gcwaiting != 0 {
2916
2917 return nil, false, now, pollUntil, true
2918 }
2919 p2 := allp[enum.position()]
2920 if pp == p2 {
2921 continue
2922 }
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
2938 tnow, w, ran := checkTimers(p2, now)
2939 now = tnow
2940 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2941 pollUntil = w
2942 }
2943 if ran {
2944
2945
2946
2947
2948
2949
2950
2951
2952 if gp, inheritTime := runqget(pp); gp != nil {
2953 return gp, inheritTime, now, pollUntil, ranTimer
2954 }
2955 ranTimer = true
2956 }
2957 }
2958
2959
2960 if !idlepMask.read(enum.position()) {
2961 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
2962 return gp, false, now, pollUntil, ranTimer
2963 }
2964 }
2965 }
2966 }
2967
2968
2969
2970
2971 return nil, false, now, pollUntil, ranTimer
2972 }
2973
2974
2975
2976
2977
2978
2979 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
2980 for id, p2 := range allpSnapshot {
2981 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
2982 lock(&sched.lock)
2983 pp, _ := pidleget(0)
2984 unlock(&sched.lock)
2985 if pp != nil {
2986 return pp
2987 }
2988
2989
2990 break
2991 }
2992 }
2993
2994 return nil
2995 }
2996
2997
2998
2999
3000 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3001 for id, p2 := range allpSnapshot {
3002 if timerpMaskSnapshot.read(uint32(id)) {
3003 w := nobarrierWakeTime(p2)
3004 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3005 pollUntil = w
3006 }
3007 }
3008 }
3009
3010 return pollUntil
3011 }
3012
3013
3014
3015
3016
3017 func checkIdleGCNoP() (*p, *g) {
3018
3019
3020
3021
3022
3023
3024 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3025 return nil, nil
3026 }
3027 if !gcMarkWorkAvailable(nil) {
3028 return nil, nil
3029 }
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048 lock(&sched.lock)
3049 pp, now := pidleget(0)
3050 if pp == nil {
3051 unlock(&sched.lock)
3052 return nil, nil
3053 }
3054
3055
3056 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3057 pidleput(pp, now)
3058 unlock(&sched.lock)
3059 return nil, nil
3060 }
3061
3062 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3063 if node == nil {
3064 pidleput(pp, now)
3065 unlock(&sched.lock)
3066 gcController.removeIdleMarkWorker()
3067 return nil, nil
3068 }
3069
3070 unlock(&sched.lock)
3071
3072 return pp, node.gp.ptr()
3073 }
3074
3075
3076
3077
3078 func wakeNetPoller(when int64) {
3079 if atomic.Load64(&sched.lastpoll) == 0 {
3080
3081
3082
3083
3084 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
3085 if pollerPollUntil == 0 || pollerPollUntil > when {
3086 netpollBreak()
3087 }
3088 } else {
3089
3090
3091 if GOOS != "plan9" {
3092 wakep()
3093 }
3094 }
3095 }
3096
3097 func resetspinning() {
3098 _g_ := getg()
3099 if !_g_.m.spinning {
3100 throw("resetspinning: not a spinning m")
3101 }
3102 _g_.m.spinning = false
3103 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
3104 if int32(nmspinning) < 0 {
3105 throw("findrunnable: negative nmspinning")
3106 }
3107
3108
3109
3110 wakep()
3111 }
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121 func injectglist(glist *gList) {
3122 if glist.empty() {
3123 return
3124 }
3125 if trace.enabled {
3126 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3127 traceGoUnpark(gp, 0)
3128 }
3129 }
3130
3131
3132
3133 head := glist.head.ptr()
3134 var tail *g
3135 qsize := 0
3136 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3137 tail = gp
3138 qsize++
3139 casgstatus(gp, _Gwaiting, _Grunnable)
3140 }
3141
3142
3143 var q gQueue
3144 q.head.set(head)
3145 q.tail.set(tail)
3146 *glist = gList{}
3147
3148 startIdle := func(n int) {
3149 for ; n != 0 && sched.npidle != 0; n-- {
3150 startm(nil, false)
3151 }
3152 }
3153
3154 pp := getg().m.p.ptr()
3155 if pp == nil {
3156 lock(&sched.lock)
3157 globrunqputbatch(&q, int32(qsize))
3158 unlock(&sched.lock)
3159 startIdle(qsize)
3160 return
3161 }
3162
3163 npidle := int(atomic.Load(&sched.npidle))
3164 var globq gQueue
3165 var n int
3166 for n = 0; n < npidle && !q.empty(); n++ {
3167 g := q.pop()
3168 globq.pushBack(g)
3169 }
3170 if n > 0 {
3171 lock(&sched.lock)
3172 globrunqputbatch(&globq, int32(n))
3173 unlock(&sched.lock)
3174 startIdle(n)
3175 qsize -= n
3176 }
3177
3178 if !q.empty() {
3179 runqputbatch(pp, &q, qsize)
3180 }
3181 }
3182
3183
3184
3185 func schedule() {
3186 _g_ := getg()
3187
3188 if _g_.m.locks != 0 {
3189 throw("schedule: holding locks")
3190 }
3191
3192 if _g_.m.lockedg != 0 {
3193 stoplockedm()
3194 execute(_g_.m.lockedg.ptr(), false)
3195 }
3196
3197
3198
3199 if _g_.m.incgo {
3200 throw("schedule: in cgo")
3201 }
3202
3203 top:
3204 pp := _g_.m.p.ptr()
3205 pp.preempt = false
3206
3207
3208
3209
3210 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3211 throw("schedule: spinning with local work")
3212 }
3213
3214 gp, inheritTime, tryWakeP := findRunnable()
3215
3216
3217
3218
3219 if _g_.m.spinning {
3220 resetspinning()
3221 }
3222
3223 if sched.disable.user && !schedEnabled(gp) {
3224
3225
3226
3227 lock(&sched.lock)
3228 if schedEnabled(gp) {
3229
3230
3231 unlock(&sched.lock)
3232 } else {
3233 sched.disable.runnable.pushBack(gp)
3234 sched.disable.n++
3235 unlock(&sched.lock)
3236 goto top
3237 }
3238 }
3239
3240
3241
3242 if tryWakeP {
3243 wakep()
3244 }
3245 if gp.lockedm != 0 {
3246
3247
3248 startlockedm(gp)
3249 goto top
3250 }
3251
3252 execute(gp, inheritTime)
3253 }
3254
3255
3256
3257
3258
3259
3260
3261
3262 func dropg() {
3263 _g_ := getg()
3264
3265 setMNoWB(&_g_.m.curg.m, nil)
3266 setGNoWB(&_g_.m.curg, nil)
3267 }
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3280
3281
3282 next := int64(atomic.Load64(&pp.timer0When))
3283 nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
3284 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3285 next = nextAdj
3286 }
3287
3288 if next == 0 {
3289
3290 return now, 0, false
3291 }
3292
3293 if now == 0 {
3294 now = nanotime()
3295 }
3296 if now < next {
3297
3298
3299
3300
3301 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
3302 return now, next, false
3303 }
3304 }
3305
3306 lock(&pp.timersLock)
3307
3308 if len(pp.timers) > 0 {
3309 adjusttimers(pp, now)
3310 for len(pp.timers) > 0 {
3311
3312
3313 if tw := runtimer(pp, now); tw != 0 {
3314 if tw > 0 {
3315 pollUntil = tw
3316 }
3317 break
3318 }
3319 ran = true
3320 }
3321 }
3322
3323
3324
3325
3326 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
3327 clearDeletedTimers(pp)
3328 }
3329
3330 unlock(&pp.timersLock)
3331
3332 return now, pollUntil, ran
3333 }
3334
3335 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3336 unlock((*mutex)(lock))
3337 return true
3338 }
3339
3340
3341 func park_m(gp *g) {
3342 _g_ := getg()
3343
3344 if trace.enabled {
3345 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
3346 }
3347
3348 casgstatus(gp, _Grunning, _Gwaiting)
3349 dropg()
3350
3351 if fn := _g_.m.waitunlockf; fn != nil {
3352 ok := fn(gp, _g_.m.waitlock)
3353 _g_.m.waitunlockf = nil
3354 _g_.m.waitlock = nil
3355 if !ok {
3356 if trace.enabled {
3357 traceGoUnpark(gp, 2)
3358 }
3359 casgstatus(gp, _Gwaiting, _Grunnable)
3360 execute(gp, true)
3361 }
3362 }
3363 schedule()
3364 }
3365
3366 func goschedImpl(gp *g) {
3367 status := readgstatus(gp)
3368 if status&^_Gscan != _Grunning {
3369 dumpgstatus(gp)
3370 throw("bad g status")
3371 }
3372 casgstatus(gp, _Grunning, _Grunnable)
3373 dropg()
3374 lock(&sched.lock)
3375 globrunqput(gp)
3376 unlock(&sched.lock)
3377
3378 schedule()
3379 }
3380
3381
3382 func gosched_m(gp *g) {
3383 if trace.enabled {
3384 traceGoSched()
3385 }
3386 goschedImpl(gp)
3387 }
3388
3389
3390 func goschedguarded_m(gp *g) {
3391
3392 if !canPreemptM(gp.m) {
3393 gogo(&gp.sched)
3394 }
3395
3396 if trace.enabled {
3397 traceGoSched()
3398 }
3399 goschedImpl(gp)
3400 }
3401
3402 func gopreempt_m(gp *g) {
3403 if trace.enabled {
3404 traceGoPreempt()
3405 }
3406 goschedImpl(gp)
3407 }
3408
3409
3410
3411
3412 func preemptPark(gp *g) {
3413 if trace.enabled {
3414 traceGoPark(traceEvGoBlock, 0)
3415 }
3416 status := readgstatus(gp)
3417 if status&^_Gscan != _Grunning {
3418 dumpgstatus(gp)
3419 throw("bad g status")
3420 }
3421 gp.waitreason = waitReasonPreempted
3422
3423 if gp.asyncSafePoint {
3424
3425
3426
3427 f := findfunc(gp.sched.pc)
3428 if !f.valid() {
3429 throw("preempt at unknown pc")
3430 }
3431 if f.flag&funcFlag_SPWRITE != 0 {
3432 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
3433 throw("preempt SPWRITE")
3434 }
3435 }
3436
3437
3438
3439
3440
3441
3442
3443 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
3444 dropg()
3445 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
3446 schedule()
3447 }
3448
3449
3450
3451
3452 func goyield() {
3453 checkTimeouts()
3454 mcall(goyield_m)
3455 }
3456
3457 func goyield_m(gp *g) {
3458 if trace.enabled {
3459 traceGoPreempt()
3460 }
3461 pp := gp.m.p.ptr()
3462 casgstatus(gp, _Grunning, _Grunnable)
3463 dropg()
3464 runqput(pp, gp, false)
3465 schedule()
3466 }
3467
3468
3469 func goexit1() {
3470 if raceenabled {
3471 racegoend()
3472 }
3473 if trace.enabled {
3474 traceGoEnd()
3475 }
3476 mcall(goexit0)
3477 }
3478
3479
3480 func goexit0(gp *g) {
3481 _g_ := getg()
3482 _p_ := _g_.m.p.ptr()
3483
3484 casgstatus(gp, _Grunning, _Gdead)
3485 gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo))
3486 if isSystemGoroutine(gp, false) {
3487 atomic.Xadd(&sched.ngsys, -1)
3488 }
3489 gp.m = nil
3490 locked := gp.lockedm != 0
3491 gp.lockedm = 0
3492 _g_.m.lockedg = 0
3493 gp.preemptStop = false
3494 gp.paniconfault = false
3495 gp._defer = nil
3496 gp._panic = nil
3497 gp.writebuf = nil
3498 gp.waitreason = 0
3499 gp.param = nil
3500 gp.labels = nil
3501 gp.timer = nil
3502
3503 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
3504
3505
3506
3507 assistWorkPerByte := gcController.assistWorkPerByte.Load()
3508 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
3509 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
3510 gp.gcAssistBytes = 0
3511 }
3512
3513 dropg()
3514
3515 if GOARCH == "wasm" {
3516 gfput(_p_, gp)
3517 schedule()
3518 }
3519
3520 if _g_.m.lockedInt != 0 {
3521 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
3522 throw("internal lockOSThread error")
3523 }
3524 gfput(_p_, gp)
3525 if locked {
3526
3527
3528
3529
3530
3531
3532 if GOOS != "plan9" {
3533 gogo(&_g_.m.g0.sched)
3534 } else {
3535
3536
3537 _g_.m.lockedExt = 0
3538 }
3539 }
3540 schedule()
3541 }
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551 func save(pc, sp uintptr) {
3552 _g_ := getg()
3553
3554 if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
3555
3556
3557
3558
3559
3560 throw("save on system g not allowed")
3561 }
3562
3563 _g_.sched.pc = pc
3564 _g_.sched.sp = sp
3565 _g_.sched.lr = 0
3566 _g_.sched.ret = 0
3567
3568
3569
3570 if _g_.sched.ctxt != nil {
3571 badctxt()
3572 }
3573 }
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612 func reentersyscall(pc, sp uintptr) {
3613 _g_ := getg()
3614
3615
3616
3617 _g_.m.locks++
3618
3619
3620
3621
3622
3623 _g_.stackguard0 = stackPreempt
3624 _g_.throwsplit = true
3625
3626
3627 save(pc, sp)
3628 _g_.syscallsp = sp
3629 _g_.syscallpc = pc
3630 casgstatus(_g_, _Grunning, _Gsyscall)
3631 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3632 systemstack(func() {
3633 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3634 throw("entersyscall")
3635 })
3636 }
3637
3638 if trace.enabled {
3639 systemstack(traceGoSysCall)
3640
3641
3642
3643 save(pc, sp)
3644 }
3645
3646 if atomic.Load(&sched.sysmonwait) != 0 {
3647 systemstack(entersyscall_sysmon)
3648 save(pc, sp)
3649 }
3650
3651 if _g_.m.p.ptr().runSafePointFn != 0 {
3652
3653 systemstack(runSafePointFn)
3654 save(pc, sp)
3655 }
3656
3657 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3658 _g_.sysblocktraced = true
3659 pp := _g_.m.p.ptr()
3660 pp.m = 0
3661 _g_.m.oldp.set(pp)
3662 _g_.m.p = 0
3663 atomic.Store(&pp.status, _Psyscall)
3664 if sched.gcwaiting != 0 {
3665 systemstack(entersyscall_gcwait)
3666 save(pc, sp)
3667 }
3668
3669 _g_.m.locks--
3670 }
3671
3672
3673
3674
3675
3676
3677
3678 func entersyscall() {
3679 reentersyscall(getcallerpc(), getcallersp())
3680 }
3681
3682 func entersyscall_sysmon() {
3683 lock(&sched.lock)
3684 if atomic.Load(&sched.sysmonwait) != 0 {
3685 atomic.Store(&sched.sysmonwait, 0)
3686 notewakeup(&sched.sysmonnote)
3687 }
3688 unlock(&sched.lock)
3689 }
3690
3691 func entersyscall_gcwait() {
3692 _g_ := getg()
3693 _p_ := _g_.m.oldp.ptr()
3694
3695 lock(&sched.lock)
3696 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
3697 if trace.enabled {
3698 traceGoSysBlock(_p_)
3699 traceProcStop(_p_)
3700 }
3701 _p_.syscalltick++
3702 if sched.stopwait--; sched.stopwait == 0 {
3703 notewakeup(&sched.stopnote)
3704 }
3705 }
3706 unlock(&sched.lock)
3707 }
3708
3709
3710
3711
3712 func entersyscallblock() {
3713 _g_ := getg()
3714
3715 _g_.m.locks++
3716 _g_.throwsplit = true
3717 _g_.stackguard0 = stackPreempt
3718 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3719 _g_.sysblocktraced = true
3720 _g_.m.p.ptr().syscalltick++
3721
3722
3723 pc := getcallerpc()
3724 sp := getcallersp()
3725 save(pc, sp)
3726 _g_.syscallsp = _g_.sched.sp
3727 _g_.syscallpc = _g_.sched.pc
3728 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3729 sp1 := sp
3730 sp2 := _g_.sched.sp
3731 sp3 := _g_.syscallsp
3732 systemstack(func() {
3733 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3734 throw("entersyscallblock")
3735 })
3736 }
3737 casgstatus(_g_, _Grunning, _Gsyscall)
3738 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3739 systemstack(func() {
3740 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3741 throw("entersyscallblock")
3742 })
3743 }
3744
3745 systemstack(entersyscallblock_handoff)
3746
3747
3748 save(getcallerpc(), getcallersp())
3749
3750 _g_.m.locks--
3751 }
3752
3753 func entersyscallblock_handoff() {
3754 if trace.enabled {
3755 traceGoSysCall()
3756 traceGoSysBlock(getg().m.p.ptr())
3757 }
3758 handoffp(releasep())
3759 }
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773 func exitsyscall() {
3774 _g_ := getg()
3775
3776 _g_.m.locks++
3777 if getcallersp() > _g_.syscallsp {
3778 throw("exitsyscall: syscall frame is no longer valid")
3779 }
3780
3781 _g_.waitsince = 0
3782 oldp := _g_.m.oldp.ptr()
3783 _g_.m.oldp = 0
3784 if exitsyscallfast(oldp) {
3785
3786
3787 if goroutineProfile.active {
3788
3789
3790
3791 systemstack(func() {
3792 tryRecordGoroutineProfileWB(_g_)
3793 })
3794 }
3795 if trace.enabled {
3796 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3797 systemstack(traceGoStart)
3798 }
3799 }
3800
3801 _g_.m.p.ptr().syscalltick++
3802
3803 casgstatus(_g_, _Gsyscall, _Grunning)
3804
3805
3806
3807 _g_.syscallsp = 0
3808 _g_.m.locks--
3809 if _g_.preempt {
3810
3811 _g_.stackguard0 = stackPreempt
3812 } else {
3813
3814 _g_.stackguard0 = _g_.stack.lo + _StackGuard
3815 }
3816 _g_.throwsplit = false
3817
3818 if sched.disable.user && !schedEnabled(_g_) {
3819
3820 Gosched()
3821 }
3822
3823 return
3824 }
3825
3826 _g_.sysexitticks = 0
3827 if trace.enabled {
3828
3829
3830 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
3831 osyield()
3832 }
3833
3834
3835
3836
3837 _g_.sysexitticks = cputicks()
3838 }
3839
3840 _g_.m.locks--
3841
3842
3843 mcall(exitsyscall0)
3844
3845
3846
3847
3848
3849
3850
3851 _g_.syscallsp = 0
3852 _g_.m.p.ptr().syscalltick++
3853 _g_.throwsplit = false
3854 }
3855
3856
3857 func exitsyscallfast(oldp *p) bool {
3858 _g_ := getg()
3859
3860
3861 if sched.stopwait == freezeStopWait {
3862 return false
3863 }
3864
3865
3866 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
3867
3868 wirep(oldp)
3869 exitsyscallfast_reacquired()
3870 return true
3871 }
3872
3873
3874 if sched.pidle != 0 {
3875 var ok bool
3876 systemstack(func() {
3877 ok = exitsyscallfast_pidle()
3878 if ok && trace.enabled {
3879 if oldp != nil {
3880
3881
3882 for oldp.syscalltick == _g_.m.syscalltick {
3883 osyield()
3884 }
3885 }
3886 traceGoSysExit(0)
3887 }
3888 })
3889 if ok {
3890 return true
3891 }
3892 }
3893 return false
3894 }
3895
3896
3897
3898
3899
3900
3901 func exitsyscallfast_reacquired() {
3902 _g_ := getg()
3903 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3904 if trace.enabled {
3905
3906
3907
3908 systemstack(func() {
3909
3910 traceGoSysBlock(_g_.m.p.ptr())
3911
3912 traceGoSysExit(0)
3913 })
3914 }
3915 _g_.m.p.ptr().syscalltick++
3916 }
3917 }
3918
3919 func exitsyscallfast_pidle() bool {
3920 lock(&sched.lock)
3921 _p_, _ := pidleget(0)
3922 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3923 atomic.Store(&sched.sysmonwait, 0)
3924 notewakeup(&sched.sysmonnote)
3925 }
3926 unlock(&sched.lock)
3927 if _p_ != nil {
3928 acquirep(_p_)
3929 return true
3930 }
3931 return false
3932 }
3933
3934
3935
3936
3937
3938
3939
3940 func exitsyscall0(gp *g) {
3941 casgstatus(gp, _Gsyscall, _Grunnable)
3942 dropg()
3943 lock(&sched.lock)
3944 var _p_ *p
3945 if schedEnabled(gp) {
3946 _p_, _ = pidleget(0)
3947 }
3948 var locked bool
3949 if _p_ == nil {
3950 globrunqput(gp)
3951
3952
3953
3954
3955
3956
3957 locked = gp.lockedm != 0
3958 } else if atomic.Load(&sched.sysmonwait) != 0 {
3959 atomic.Store(&sched.sysmonwait, 0)
3960 notewakeup(&sched.sysmonnote)
3961 }
3962 unlock(&sched.lock)
3963 if _p_ != nil {
3964 acquirep(_p_)
3965 execute(gp, false)
3966 }
3967 if locked {
3968
3969
3970
3971
3972 stoplockedm()
3973 execute(gp, false)
3974 }
3975 stopm()
3976 schedule()
3977 }
3978
3979
3980
3981
3982
3983 func syscall_runtime_BeforeFork() {
3984 gp := getg().m.curg
3985
3986
3987
3988
3989 gp.m.locks++
3990 sigsave(&gp.m.sigmask)
3991 sigblock(false)
3992
3993
3994
3995
3996
3997 gp.stackguard0 = stackFork
3998 }
3999
4000
4001
4002
4003
4004 func syscall_runtime_AfterFork() {
4005 gp := getg().m.curg
4006
4007
4008 gp.stackguard0 = gp.stack.lo + _StackGuard
4009
4010 msigrestore(gp.m.sigmask)
4011
4012 gp.m.locks--
4013 }
4014
4015
4016
4017 var inForkedChild bool
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030 func syscall_runtime_AfterForkInChild() {
4031
4032
4033
4034
4035 inForkedChild = true
4036
4037 clearSignalHandlers()
4038
4039
4040
4041 msigrestore(getg().m.sigmask)
4042
4043 inForkedChild = false
4044 }
4045
4046
4047
4048
4049 var pendingPreemptSignals uint32
4050
4051
4052
4053
4054 func syscall_runtime_BeforeExec() {
4055
4056 execLock.lock()
4057
4058
4059
4060 if GOOS == "darwin" || GOOS == "ios" {
4061 for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
4062 osyield()
4063 }
4064 }
4065 }
4066
4067
4068
4069
4070 func syscall_runtime_AfterExec() {
4071 execLock.unlock()
4072 }
4073
4074
4075 func malg(stacksize int32) *g {
4076 newg := new(g)
4077 if stacksize >= 0 {
4078 stacksize = round2(_StackSystem + stacksize)
4079 systemstack(func() {
4080 newg.stack = stackalloc(uint32(stacksize))
4081 })
4082 newg.stackguard0 = newg.stack.lo + _StackGuard
4083 newg.stackguard1 = ^uintptr(0)
4084
4085
4086 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4087 }
4088 return newg
4089 }
4090
4091
4092
4093
4094 func newproc(fn *funcval) {
4095 gp := getg()
4096 pc := getcallerpc()
4097 systemstack(func() {
4098 newg := newproc1(fn, gp, pc)
4099
4100 _p_ := getg().m.p.ptr()
4101 runqput(_p_, newg, true)
4102
4103 if mainStarted {
4104 wakep()
4105 }
4106 })
4107 }
4108
4109
4110
4111
4112 func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
4113 _g_ := getg()
4114
4115 if fn == nil {
4116 fatal("go of nil func value")
4117 }
4118 acquirem()
4119
4120 _p_ := _g_.m.p.ptr()
4121 newg := gfget(_p_)
4122 if newg == nil {
4123 newg = malg(_StackMin)
4124 casgstatus(newg, _Gidle, _Gdead)
4125 allgadd(newg)
4126 }
4127 if newg.stack.hi == 0 {
4128 throw("newproc1: newg missing stack")
4129 }
4130
4131 if readgstatus(newg) != _Gdead {
4132 throw("newproc1: new g is not Gdead")
4133 }
4134
4135 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
4136 totalSize = alignUp(totalSize, sys.StackAlign)
4137 sp := newg.stack.hi - totalSize
4138 spArg := sp
4139 if usesLR {
4140
4141 *(*uintptr)(unsafe.Pointer(sp)) = 0
4142 prepGoExitFrame(sp)
4143 spArg += sys.MinFrameSize
4144 }
4145
4146 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4147 newg.sched.sp = sp
4148 newg.stktopsp = sp
4149 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4150 newg.sched.g = guintptr(unsafe.Pointer(newg))
4151 gostartcallfn(&newg.sched, fn)
4152 newg.gopc = callerpc
4153 newg.ancestors = saveAncestors(callergp)
4154 newg.startpc = fn.fn
4155 if isSystemGoroutine(newg, false) {
4156 atomic.Xadd(&sched.ngsys, +1)
4157 } else {
4158
4159 if _g_.m.curg != nil {
4160 newg.labels = _g_.m.curg.labels
4161 }
4162 if goroutineProfile.active {
4163
4164
4165
4166
4167
4168 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
4169 }
4170 }
4171
4172 newg.trackingSeq = uint8(fastrand())
4173 if newg.trackingSeq%gTrackingPeriod == 0 {
4174 newg.tracking = true
4175 }
4176 casgstatus(newg, _Gdead, _Grunnable)
4177 gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))
4178
4179 if _p_.goidcache == _p_.goidcacheend {
4180
4181
4182
4183 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
4184 _p_.goidcache -= _GoidCacheBatch - 1
4185 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
4186 }
4187 newg.goid = int64(_p_.goidcache)
4188 _p_.goidcache++
4189 if raceenabled {
4190 newg.racectx = racegostart(callerpc)
4191 if newg.labels != nil {
4192
4193
4194 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
4195 }
4196 }
4197 if trace.enabled {
4198 traceGoCreate(newg, newg.startpc)
4199 }
4200 releasem(_g_.m)
4201
4202 return newg
4203 }
4204
4205
4206
4207
4208 func saveAncestors(callergp *g) *[]ancestorInfo {
4209
4210 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4211 return nil
4212 }
4213 var callerAncestors []ancestorInfo
4214 if callergp.ancestors != nil {
4215 callerAncestors = *callergp.ancestors
4216 }
4217 n := int32(len(callerAncestors)) + 1
4218 if n > debug.tracebackancestors {
4219 n = debug.tracebackancestors
4220 }
4221 ancestors := make([]ancestorInfo, n)
4222 copy(ancestors[1:], callerAncestors)
4223
4224 var pcs [_TracebackMaxFrames]uintptr
4225 npcs := gcallers(callergp, 0, pcs[:])
4226 ipcs := make([]uintptr, npcs)
4227 copy(ipcs, pcs[:])
4228 ancestors[0] = ancestorInfo{
4229 pcs: ipcs,
4230 goid: callergp.goid,
4231 gopc: callergp.gopc,
4232 }
4233
4234 ancestorsp := new([]ancestorInfo)
4235 *ancestorsp = ancestors
4236 return ancestorsp
4237 }
4238
4239
4240
4241 func gfput(_p_ *p, gp *g) {
4242 if readgstatus(gp) != _Gdead {
4243 throw("gfput: bad status (not Gdead)")
4244 }
4245
4246 stksize := gp.stack.hi - gp.stack.lo
4247
4248 if stksize != uintptr(startingStackSize) {
4249
4250 stackfree(gp.stack)
4251 gp.stack.lo = 0
4252 gp.stack.hi = 0
4253 gp.stackguard0 = 0
4254 }
4255
4256 _p_.gFree.push(gp)
4257 _p_.gFree.n++
4258 if _p_.gFree.n >= 64 {
4259 var (
4260 inc int32
4261 stackQ gQueue
4262 noStackQ gQueue
4263 )
4264 for _p_.gFree.n >= 32 {
4265 gp = _p_.gFree.pop()
4266 _p_.gFree.n--
4267 if gp.stack.lo == 0 {
4268 noStackQ.push(gp)
4269 } else {
4270 stackQ.push(gp)
4271 }
4272 inc++
4273 }
4274 lock(&sched.gFree.lock)
4275 sched.gFree.noStack.pushAll(noStackQ)
4276 sched.gFree.stack.pushAll(stackQ)
4277 sched.gFree.n += inc
4278 unlock(&sched.gFree.lock)
4279 }
4280 }
4281
4282
4283
4284 func gfget(_p_ *p) *g {
4285 retry:
4286 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
4287 lock(&sched.gFree.lock)
4288
4289 for _p_.gFree.n < 32 {
4290
4291 gp := sched.gFree.stack.pop()
4292 if gp == nil {
4293 gp = sched.gFree.noStack.pop()
4294 if gp == nil {
4295 break
4296 }
4297 }
4298 sched.gFree.n--
4299 _p_.gFree.push(gp)
4300 _p_.gFree.n++
4301 }
4302 unlock(&sched.gFree.lock)
4303 goto retry
4304 }
4305 gp := _p_.gFree.pop()
4306 if gp == nil {
4307 return nil
4308 }
4309 _p_.gFree.n--
4310 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
4311
4312
4313
4314 systemstack(func() {
4315 stackfree(gp.stack)
4316 gp.stack.lo = 0
4317 gp.stack.hi = 0
4318 gp.stackguard0 = 0
4319 })
4320 }
4321 if gp.stack.lo == 0 {
4322
4323 systemstack(func() {
4324 gp.stack = stackalloc(startingStackSize)
4325 })
4326 gp.stackguard0 = gp.stack.lo + _StackGuard
4327 } else {
4328 if raceenabled {
4329 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4330 }
4331 if msanenabled {
4332 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4333 }
4334 if asanenabled {
4335 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4336 }
4337 }
4338 return gp
4339 }
4340
4341
4342 func gfpurge(_p_ *p) {
4343 var (
4344 inc int32
4345 stackQ gQueue
4346 noStackQ gQueue
4347 )
4348 for !_p_.gFree.empty() {
4349 gp := _p_.gFree.pop()
4350 _p_.gFree.n--
4351 if gp.stack.lo == 0 {
4352 noStackQ.push(gp)
4353 } else {
4354 stackQ.push(gp)
4355 }
4356 inc++
4357 }
4358 lock(&sched.gFree.lock)
4359 sched.gFree.noStack.pushAll(noStackQ)
4360 sched.gFree.stack.pushAll(stackQ)
4361 sched.gFree.n += inc
4362 unlock(&sched.gFree.lock)
4363 }
4364
4365
4366 func Breakpoint() {
4367 breakpoint()
4368 }
4369
4370
4371
4372
4373
4374
4375 func dolockOSThread() {
4376 if GOARCH == "wasm" {
4377 return
4378 }
4379 _g_ := getg()
4380 _g_.m.lockedg.set(_g_)
4381 _g_.lockedm.set(_g_.m)
4382 }
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400 func LockOSThread() {
4401 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
4402
4403
4404
4405 startTemplateThread()
4406 }
4407 _g_ := getg()
4408 _g_.m.lockedExt++
4409 if _g_.m.lockedExt == 0 {
4410 _g_.m.lockedExt--
4411 panic("LockOSThread nesting overflow")
4412 }
4413 dolockOSThread()
4414 }
4415
4416
4417 func lockOSThread() {
4418 getg().m.lockedInt++
4419 dolockOSThread()
4420 }
4421
4422
4423
4424
4425
4426
4427 func dounlockOSThread() {
4428 if GOARCH == "wasm" {
4429 return
4430 }
4431 _g_ := getg()
4432 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
4433 return
4434 }
4435 _g_.m.lockedg = 0
4436 _g_.lockedm = 0
4437 }
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453 func UnlockOSThread() {
4454 _g_ := getg()
4455 if _g_.m.lockedExt == 0 {
4456 return
4457 }
4458 _g_.m.lockedExt--
4459 dounlockOSThread()
4460 }
4461
4462
4463 func unlockOSThread() {
4464 _g_ := getg()
4465 if _g_.m.lockedInt == 0 {
4466 systemstack(badunlockosthread)
4467 }
4468 _g_.m.lockedInt--
4469 dounlockOSThread()
4470 }
4471
4472 func badunlockosthread() {
4473 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
4474 }
4475
4476 func gcount() int32 {
4477 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
4478 for _, _p_ := range allp {
4479 n -= _p_.gFree.n
4480 }
4481
4482
4483
4484 if n < 1 {
4485 n = 1
4486 }
4487 return n
4488 }
4489
4490 func mcount() int32 {
4491 return int32(sched.mnext - sched.nmfreed)
4492 }
4493
4494 var prof struct {
4495 signalLock uint32
4496 hz int32
4497 }
4498
4499 func _System() { _System() }
4500 func _ExternalCode() { _ExternalCode() }
4501 func _LostExternalCode() { _LostExternalCode() }
4502 func _GC() { _GC() }
4503 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
4504 func _VDSO() { _VDSO() }
4505
4506
4507
4508
4509
4510 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
4511 if prof.hz == 0 {
4512 return
4513 }
4514
4515
4516
4517
4518 if mp != nil && mp.profilehz == 0 {
4519 return
4520 }
4521
4522
4523
4524
4525
4526
4527
4528 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
4529 if f := findfunc(pc); f.valid() {
4530 if hasPrefix(funcname(f), "runtime/internal/atomic") {
4531 cpuprof.lostAtomic++
4532 return
4533 }
4534 }
4535 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
4536
4537
4538
4539 cpuprof.lostAtomic++
4540 return
4541 }
4542 }
4543
4544
4545
4546
4547
4548
4549
4550 getg().m.mallocing++
4551
4552 var stk [maxCPUProfStack]uintptr
4553 n := 0
4554 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4555 cgoOff := 0
4556
4557
4558
4559
4560
4561 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4562 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4563 cgoOff++
4564 }
4565 copy(stk[:], mp.cgoCallers[:cgoOff])
4566 mp.cgoCallers[0] = 0
4567 }
4568
4569
4570 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
4571 if n > 0 {
4572 n += cgoOff
4573 }
4574 } else {
4575 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4576 }
4577
4578 if n <= 0 {
4579
4580
4581 n = 0
4582 if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4583
4584
4585 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
4586 }
4587 if n == 0 && mp != nil && mp.vdsoSP != 0 {
4588 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4589 }
4590 if n == 0 {
4591
4592 n = 2
4593 if inVDSOPage(pc) {
4594 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
4595 } else if pc > firstmoduledata.etext {
4596
4597 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
4598 }
4599 stk[0] = pc
4600 if mp.preemptoff != "" {
4601 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
4602 } else {
4603 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
4604 }
4605 }
4606 }
4607
4608 if prof.hz != 0 {
4609
4610
4611
4612 var tagPtr *unsafe.Pointer
4613 if gp != nil && gp.m != nil && gp.m.curg != nil {
4614 tagPtr = &gp.m.curg.labels
4615 }
4616 cpuprof.add(tagPtr, stk[:n])
4617
4618 gprof := gp
4619 var pp *p
4620 if gp != nil && gp.m != nil {
4621 if gp.m.curg != nil {
4622 gprof = gp.m.curg
4623 }
4624 pp = gp.m.p.ptr()
4625 }
4626 traceCPUSample(gprof, pp, stk[:n])
4627 }
4628 getg().m.mallocing--
4629 }
4630
4631
4632
4633 func setcpuprofilerate(hz int32) {
4634
4635 if hz < 0 {
4636 hz = 0
4637 }
4638
4639
4640
4641 _g_ := getg()
4642 _g_.m.locks++
4643
4644
4645
4646
4647 setThreadCPUProfiler(0)
4648
4649 for !atomic.Cas(&prof.signalLock, 0, 1) {
4650 osyield()
4651 }
4652 if prof.hz != hz {
4653 setProcessCPUProfiler(hz)
4654 prof.hz = hz
4655 }
4656 atomic.Store(&prof.signalLock, 0)
4657
4658 lock(&sched.lock)
4659 sched.profilehz = hz
4660 unlock(&sched.lock)
4661
4662 if hz != 0 {
4663 setThreadCPUProfiler(hz)
4664 }
4665
4666 _g_.m.locks--
4667 }
4668
4669
4670
4671 func (pp *p) init(id int32) {
4672 pp.id = id
4673 pp.status = _Pgcstop
4674 pp.sudogcache = pp.sudogbuf[:0]
4675 pp.deferpool = pp.deferpoolbuf[:0]
4676 pp.wbBuf.reset()
4677 if pp.mcache == nil {
4678 if id == 0 {
4679 if mcache0 == nil {
4680 throw("missing mcache?")
4681 }
4682
4683
4684 pp.mcache = mcache0
4685 } else {
4686 pp.mcache = allocmcache()
4687 }
4688 }
4689 if raceenabled && pp.raceprocctx == 0 {
4690 if id == 0 {
4691 pp.raceprocctx = raceprocctx0
4692 raceprocctx0 = 0
4693 } else {
4694 pp.raceprocctx = raceproccreate()
4695 }
4696 }
4697 lockInit(&pp.timersLock, lockRankTimers)
4698
4699
4700
4701 timerpMask.set(id)
4702
4703
4704 idlepMask.clear(id)
4705 }
4706
4707
4708
4709
4710
4711 func (pp *p) destroy() {
4712 assertLockHeld(&sched.lock)
4713 assertWorldStopped()
4714
4715
4716 for pp.runqhead != pp.runqtail {
4717
4718 pp.runqtail--
4719 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
4720
4721 globrunqputhead(gp)
4722 }
4723 if pp.runnext != 0 {
4724 globrunqputhead(pp.runnext.ptr())
4725 pp.runnext = 0
4726 }
4727 if len(pp.timers) > 0 {
4728 plocal := getg().m.p.ptr()
4729
4730
4731
4732
4733 lock(&plocal.timersLock)
4734 lock(&pp.timersLock)
4735 moveTimers(plocal, pp.timers)
4736 pp.timers = nil
4737 pp.numTimers = 0
4738 pp.deletedTimers = 0
4739 atomic.Store64(&pp.timer0When, 0)
4740 unlock(&pp.timersLock)
4741 unlock(&plocal.timersLock)
4742 }
4743
4744 if gcphase != _GCoff {
4745 wbBufFlush1(pp)
4746 pp.gcw.dispose()
4747 }
4748 for i := range pp.sudogbuf {
4749 pp.sudogbuf[i] = nil
4750 }
4751 pp.sudogcache = pp.sudogbuf[:0]
4752 for j := range pp.deferpoolbuf {
4753 pp.deferpoolbuf[j] = nil
4754 }
4755 pp.deferpool = pp.deferpoolbuf[:0]
4756 systemstack(func() {
4757 for i := 0; i < pp.mspancache.len; i++ {
4758
4759 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
4760 }
4761 pp.mspancache.len = 0
4762 lock(&mheap_.lock)
4763 pp.pcache.flush(&mheap_.pages)
4764 unlock(&mheap_.lock)
4765 })
4766 freemcache(pp.mcache)
4767 pp.mcache = nil
4768 gfpurge(pp)
4769 traceProcFree(pp)
4770 if raceenabled {
4771 if pp.timerRaceCtx != 0 {
4772
4773
4774
4775
4776
4777 mp := getg().m
4778 phold := mp.p.ptr()
4779 mp.p.set(pp)
4780
4781 racectxend(pp.timerRaceCtx)
4782 pp.timerRaceCtx = 0
4783
4784 mp.p.set(phold)
4785 }
4786 raceprocdestroy(pp.raceprocctx)
4787 pp.raceprocctx = 0
4788 }
4789 pp.gcAssistTime = 0
4790 pp.status = _Pdead
4791 }
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801 func procresize(nprocs int32) *p {
4802 assertLockHeld(&sched.lock)
4803 assertWorldStopped()
4804
4805 old := gomaxprocs
4806 if old < 0 || nprocs <= 0 {
4807 throw("procresize: invalid arg")
4808 }
4809 if trace.enabled {
4810 traceGomaxprocs(nprocs)
4811 }
4812
4813
4814 now := nanotime()
4815 if sched.procresizetime != 0 {
4816 sched.totaltime += int64(old) * (now - sched.procresizetime)
4817 }
4818 sched.procresizetime = now
4819
4820 maskWords := (nprocs + 31) / 32
4821
4822
4823 if nprocs > int32(len(allp)) {
4824
4825
4826 lock(&allpLock)
4827 if nprocs <= int32(cap(allp)) {
4828 allp = allp[:nprocs]
4829 } else {
4830 nallp := make([]*p, nprocs)
4831
4832
4833 copy(nallp, allp[:cap(allp)])
4834 allp = nallp
4835 }
4836
4837 if maskWords <= int32(cap(idlepMask)) {
4838 idlepMask = idlepMask[:maskWords]
4839 timerpMask = timerpMask[:maskWords]
4840 } else {
4841 nidlepMask := make([]uint32, maskWords)
4842
4843 copy(nidlepMask, idlepMask)
4844 idlepMask = nidlepMask
4845
4846 ntimerpMask := make([]uint32, maskWords)
4847 copy(ntimerpMask, timerpMask)
4848 timerpMask = ntimerpMask
4849 }
4850 unlock(&allpLock)
4851 }
4852
4853
4854 for i := old; i < nprocs; i++ {
4855 pp := allp[i]
4856 if pp == nil {
4857 pp = new(p)
4858 }
4859 pp.init(i)
4860 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
4861 }
4862
4863 _g_ := getg()
4864 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
4865
4866 _g_.m.p.ptr().status = _Prunning
4867 _g_.m.p.ptr().mcache.prepareForSweep()
4868 } else {
4869
4870
4871
4872
4873
4874 if _g_.m.p != 0 {
4875 if trace.enabled {
4876
4877
4878
4879 traceGoSched()
4880 traceProcStop(_g_.m.p.ptr())
4881 }
4882 _g_.m.p.ptr().m = 0
4883 }
4884 _g_.m.p = 0
4885 p := allp[0]
4886 p.m = 0
4887 p.status = _Pidle
4888 acquirep(p)
4889 if trace.enabled {
4890 traceGoStart()
4891 }
4892 }
4893
4894
4895 mcache0 = nil
4896
4897
4898 for i := nprocs; i < old; i++ {
4899 p := allp[i]
4900 p.destroy()
4901
4902 }
4903
4904
4905 if int32(len(allp)) != nprocs {
4906 lock(&allpLock)
4907 allp = allp[:nprocs]
4908 idlepMask = idlepMask[:maskWords]
4909 timerpMask = timerpMask[:maskWords]
4910 unlock(&allpLock)
4911 }
4912
4913 var runnablePs *p
4914 for i := nprocs - 1; i >= 0; i-- {
4915 p := allp[i]
4916 if _g_.m.p.ptr() == p {
4917 continue
4918 }
4919 p.status = _Pidle
4920 if runqempty(p) {
4921 pidleput(p, now)
4922 } else {
4923 p.m.set(mget())
4924 p.link.set(runnablePs)
4925 runnablePs = p
4926 }
4927 }
4928 stealOrder.reset(uint32(nprocs))
4929 var int32p *int32 = &gomaxprocs
4930 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
4931 if old != nprocs {
4932
4933 gcCPULimiter.resetCapacity(now, nprocs)
4934 }
4935 return runnablePs
4936 }
4937
4938
4939
4940
4941
4942
4943
4944 func acquirep(_p_ *p) {
4945
4946 wirep(_p_)
4947
4948
4949
4950
4951
4952 _p_.mcache.prepareForSweep()
4953
4954 if trace.enabled {
4955 traceProcStart()
4956 }
4957 }
4958
4959
4960
4961
4962
4963
4964
4965 func wirep(_p_ *p) {
4966 _g_ := getg()
4967
4968 if _g_.m.p != 0 {
4969 throw("wirep: already in go")
4970 }
4971 if _p_.m != 0 || _p_.status != _Pidle {
4972 id := int64(0)
4973 if _p_.m != 0 {
4974 id = _p_.m.ptr().id
4975 }
4976 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
4977 throw("wirep: invalid p state")
4978 }
4979 _g_.m.p.set(_p_)
4980 _p_.m.set(_g_.m)
4981 _p_.status = _Prunning
4982 }
4983
4984
4985 func releasep() *p {
4986 _g_ := getg()
4987
4988 if _g_.m.p == 0 {
4989 throw("releasep: invalid arg")
4990 }
4991 _p_ := _g_.m.p.ptr()
4992 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
4993 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
4994 throw("releasep: invalid p state")
4995 }
4996 if trace.enabled {
4997 traceProcStop(_g_.m.p.ptr())
4998 }
4999 _g_.m.p = 0
5000 _p_.m = 0
5001 _p_.status = _Pidle
5002 return _p_
5003 }
5004
5005 func incidlelocked(v int32) {
5006 lock(&sched.lock)
5007 sched.nmidlelocked += v
5008 if v > 0 {
5009 checkdead()
5010 }
5011 unlock(&sched.lock)
5012 }
5013
5014
5015
5016
5017 func checkdead() {
5018 assertLockHeld(&sched.lock)
5019
5020
5021
5022
5023 if islibrary || isarchive {
5024 return
5025 }
5026
5027
5028
5029
5030
5031 if panicking > 0 {
5032 return
5033 }
5034
5035
5036
5037
5038
5039 var run0 int32
5040 if !iscgo && cgoHasExtraM {
5041 mp := lockextra(true)
5042 haveExtraM := extraMCount > 0
5043 unlockextra(mp)
5044 if haveExtraM {
5045 run0 = 1
5046 }
5047 }
5048
5049 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5050 if run > run0 {
5051 return
5052 }
5053 if run < 0 {
5054 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5055 throw("checkdead: inconsistent counts")
5056 }
5057
5058 grunning := 0
5059 forEachG(func(gp *g) {
5060 if isSystemGoroutine(gp, false) {
5061 return
5062 }
5063 s := readgstatus(gp)
5064 switch s &^ _Gscan {
5065 case _Gwaiting,
5066 _Gpreempted:
5067 grunning++
5068 case _Grunnable,
5069 _Grunning,
5070 _Gsyscall:
5071 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5072 throw("checkdead: runnable g")
5073 }
5074 })
5075 if grunning == 0 {
5076 unlock(&sched.lock)
5077 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5078 }
5079
5080
5081 if faketime != 0 {
5082 if when := timeSleepUntil(); when < maxWhen {
5083 faketime = when
5084
5085
5086 pp, _ := pidleget(faketime)
5087 if pp == nil {
5088
5089
5090 throw("checkdead: no p for timer")
5091 }
5092 mp := mget()
5093 if mp == nil {
5094
5095
5096 throw("checkdead: no m for timer")
5097 }
5098
5099
5100
5101 atomic.Xadd(&sched.nmspinning, 1)
5102 mp.spinning = true
5103 mp.nextp.set(pp)
5104 notewakeup(&mp.park)
5105 return
5106 }
5107 }
5108
5109
5110 for _, _p_ := range allp {
5111 if len(_p_.timers) > 0 {
5112 return
5113 }
5114 }
5115
5116 unlock(&sched.lock)
5117 fatal("all goroutines are asleep - deadlock!")
5118 }
5119
5120
5121
5122
5123
5124
5125 var forcegcperiod int64 = 2 * 60 * 1e9
5126
5127
5128
5129 var needSysmonWorkaround bool = false
5130
5131
5132
5133
5134 func sysmon() {
5135 lock(&sched.lock)
5136 sched.nmsys++
5137 checkdead()
5138 unlock(&sched.lock)
5139
5140 lasttrace := int64(0)
5141 idle := 0
5142 delay := uint32(0)
5143
5144 for {
5145 if idle == 0 {
5146 delay = 20
5147 } else if idle > 50 {
5148 delay *= 2
5149 }
5150 if delay > 10*1000 {
5151 delay = 10 * 1000
5152 }
5153 usleep(delay)
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170 now := nanotime()
5171 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
5172 lock(&sched.lock)
5173 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
5174 syscallWake := false
5175 next := timeSleepUntil()
5176 if next > now {
5177 atomic.Store(&sched.sysmonwait, 1)
5178 unlock(&sched.lock)
5179
5180
5181 sleep := forcegcperiod / 2
5182 if next-now < sleep {
5183 sleep = next - now
5184 }
5185 shouldRelax := sleep >= osRelaxMinNS
5186 if shouldRelax {
5187 osRelax(true)
5188 }
5189 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5190 if shouldRelax {
5191 osRelax(false)
5192 }
5193 lock(&sched.lock)
5194 atomic.Store(&sched.sysmonwait, 0)
5195 noteclear(&sched.sysmonnote)
5196 }
5197 if syscallWake {
5198 idle = 0
5199 delay = 20
5200 }
5201 }
5202 unlock(&sched.lock)
5203 }
5204
5205 lock(&sched.sysmonlock)
5206
5207
5208 now = nanotime()
5209
5210
5211 if *cgo_yield != nil {
5212 asmcgocall(*cgo_yield, nil)
5213 }
5214
5215 lastpoll := int64(atomic.Load64(&sched.lastpoll))
5216 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5217 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
5218 list := netpoll(0)
5219 if !list.empty() {
5220
5221
5222
5223
5224
5225
5226
5227 incidlelocked(-1)
5228 injectglist(&list)
5229 incidlelocked(1)
5230 }
5231 }
5232 if GOOS == "netbsd" && needSysmonWorkaround {
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248 if next := timeSleepUntil(); next < now {
5249 startm(nil, false)
5250 }
5251 }
5252 if scavenger.sysmonWake.Load() != 0 {
5253
5254 scavenger.wake()
5255 }
5256
5257
5258 if retake(now) != 0 {
5259 idle = 0
5260 } else {
5261 idle++
5262 }
5263
5264 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
5265 lock(&forcegc.lock)
5266 forcegc.idle = 0
5267 var list gList
5268 list.push(forcegc.g)
5269 injectglist(&list)
5270 unlock(&forcegc.lock)
5271 }
5272 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
5273 lasttrace = now
5274 schedtrace(debug.scheddetail > 0)
5275 }
5276 unlock(&sched.sysmonlock)
5277 }
5278 }
5279
5280 type sysmontick struct {
5281 schedtick uint32
5282 schedwhen int64
5283 syscalltick uint32
5284 syscallwhen int64
5285 }
5286
5287
5288
5289 const forcePreemptNS = 10 * 1000 * 1000
5290
5291 func retake(now int64) uint32 {
5292 n := 0
5293
5294
5295 lock(&allpLock)
5296
5297
5298
5299 for i := 0; i < len(allp); i++ {
5300 _p_ := allp[i]
5301 if _p_ == nil {
5302
5303
5304 continue
5305 }
5306 pd := &_p_.sysmontick
5307 s := _p_.status
5308 sysretake := false
5309 if s == _Prunning || s == _Psyscall {
5310
5311 t := int64(_p_.schedtick)
5312 if int64(pd.schedtick) != t {
5313 pd.schedtick = uint32(t)
5314 pd.schedwhen = now
5315 } else if pd.schedwhen+forcePreemptNS <= now {
5316 preemptone(_p_)
5317
5318
5319 sysretake = true
5320 }
5321 }
5322 if s == _Psyscall {
5323
5324 t := int64(_p_.syscalltick)
5325 if !sysretake && int64(pd.syscalltick) != t {
5326 pd.syscalltick = uint32(t)
5327 pd.syscallwhen = now
5328 continue
5329 }
5330
5331
5332
5333 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
5334 continue
5335 }
5336
5337 unlock(&allpLock)
5338
5339
5340
5341
5342 incidlelocked(-1)
5343 if atomic.Cas(&_p_.status, s, _Pidle) {
5344 if trace.enabled {
5345 traceGoSysBlock(_p_)
5346 traceProcStop(_p_)
5347 }
5348 n++
5349 _p_.syscalltick++
5350 handoffp(_p_)
5351 }
5352 incidlelocked(1)
5353 lock(&allpLock)
5354 }
5355 }
5356 unlock(&allpLock)
5357 return uint32(n)
5358 }
5359
5360
5361
5362
5363
5364
5365 func preemptall() bool {
5366 res := false
5367 for _, _p_ := range allp {
5368 if _p_.status != _Prunning {
5369 continue
5370 }
5371 if preemptone(_p_) {
5372 res = true
5373 }
5374 }
5375 return res
5376 }
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388 func preemptone(_p_ *p) bool {
5389 mp := _p_.m.ptr()
5390 if mp == nil || mp == getg().m {
5391 return false
5392 }
5393 gp := mp.curg
5394 if gp == nil || gp == mp.g0 {
5395 return false
5396 }
5397
5398 gp.preempt = true
5399
5400
5401
5402
5403
5404 gp.stackguard0 = stackPreempt
5405
5406
5407 if preemptMSupported && debug.asyncpreemptoff == 0 {
5408 _p_.preempt = true
5409 preemptM(mp)
5410 }
5411
5412 return true
5413 }
5414
5415 var starttime int64
5416
5417 func schedtrace(detailed bool) {
5418 now := nanotime()
5419 if starttime == 0 {
5420 starttime = now
5421 }
5422
5423 lock(&sched.lock)
5424 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
5425 if detailed {
5426 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
5427 }
5428
5429
5430
5431 for i, _p_ := range allp {
5432 mp := _p_.m.ptr()
5433 h := atomic.Load(&_p_.runqhead)
5434 t := atomic.Load(&_p_.runqtail)
5435 if detailed {
5436 id := int64(-1)
5437 if mp != nil {
5438 id = mp.id
5439 }
5440 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
5441 } else {
5442
5443
5444 print(" ")
5445 if i == 0 {
5446 print("[")
5447 }
5448 print(t - h)
5449 if i == len(allp)-1 {
5450 print("]\n")
5451 }
5452 }
5453 }
5454
5455 if !detailed {
5456 unlock(&sched.lock)
5457 return
5458 }
5459
5460 for mp := allm; mp != nil; mp = mp.alllink {
5461 _p_ := mp.p.ptr()
5462 gp := mp.curg
5463 lockedg := mp.lockedg.ptr()
5464 id1 := int32(-1)
5465 if _p_ != nil {
5466 id1 = _p_.id
5467 }
5468 id2 := int64(-1)
5469 if gp != nil {
5470 id2 = gp.goid
5471 }
5472 id3 := int64(-1)
5473 if lockedg != nil {
5474 id3 = lockedg.goid
5475 }
5476 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
5477 }
5478
5479 forEachG(func(gp *g) {
5480 mp := gp.m
5481 lockedm := gp.lockedm.ptr()
5482 id1 := int64(-1)
5483 if mp != nil {
5484 id1 = mp.id
5485 }
5486 id2 := int64(-1)
5487 if lockedm != nil {
5488 id2 = lockedm.id
5489 }
5490 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
5491 })
5492 unlock(&sched.lock)
5493 }
5494
5495
5496
5497
5498
5499
5500 func schedEnableUser(enable bool) {
5501 lock(&sched.lock)
5502 if sched.disable.user == !enable {
5503 unlock(&sched.lock)
5504 return
5505 }
5506 sched.disable.user = !enable
5507 if enable {
5508 n := sched.disable.n
5509 sched.disable.n = 0
5510 globrunqputbatch(&sched.disable.runnable, n)
5511 unlock(&sched.lock)
5512 for ; n != 0 && sched.npidle != 0; n-- {
5513 startm(nil, false)
5514 }
5515 } else {
5516 unlock(&sched.lock)
5517 }
5518 }
5519
5520
5521
5522
5523
5524 func schedEnabled(gp *g) bool {
5525 assertLockHeld(&sched.lock)
5526
5527 if sched.disable.user {
5528 return isSystemGoroutine(gp, true)
5529 }
5530 return true
5531 }
5532
5533
5534
5535
5536
5537
5538 func mput(mp *m) {
5539 assertLockHeld(&sched.lock)
5540
5541 mp.schedlink = sched.midle
5542 sched.midle.set(mp)
5543 sched.nmidle++
5544 checkdead()
5545 }
5546
5547
5548
5549
5550
5551
5552 func mget() *m {
5553 assertLockHeld(&sched.lock)
5554
5555 mp := sched.midle.ptr()
5556 if mp != nil {
5557 sched.midle = mp.schedlink
5558 sched.nmidle--
5559 }
5560 return mp
5561 }
5562
5563
5564
5565
5566
5567
5568 func globrunqput(gp *g) {
5569 assertLockHeld(&sched.lock)
5570
5571 sched.runq.pushBack(gp)
5572 sched.runqsize++
5573 }
5574
5575
5576
5577
5578
5579
5580 func globrunqputhead(gp *g) {
5581 assertLockHeld(&sched.lock)
5582
5583 sched.runq.push(gp)
5584 sched.runqsize++
5585 }
5586
5587
5588
5589
5590
5591
5592
5593 func globrunqputbatch(batch *gQueue, n int32) {
5594 assertLockHeld(&sched.lock)
5595
5596 sched.runq.pushBackAll(*batch)
5597 sched.runqsize += n
5598 *batch = gQueue{}
5599 }
5600
5601
5602
5603 func globrunqget(_p_ *p, max int32) *g {
5604 assertLockHeld(&sched.lock)
5605
5606 if sched.runqsize == 0 {
5607 return nil
5608 }
5609
5610 n := sched.runqsize/gomaxprocs + 1
5611 if n > sched.runqsize {
5612 n = sched.runqsize
5613 }
5614 if max > 0 && n > max {
5615 n = max
5616 }
5617 if n > int32(len(_p_.runq))/2 {
5618 n = int32(len(_p_.runq)) / 2
5619 }
5620
5621 sched.runqsize -= n
5622
5623 gp := sched.runq.pop()
5624 n--
5625 for ; n > 0; n-- {
5626 gp1 := sched.runq.pop()
5627 runqput(_p_, gp1, false)
5628 }
5629 return gp
5630 }
5631
5632
5633 type pMask []uint32
5634
5635
5636 func (p pMask) read(id uint32) bool {
5637 word := id / 32
5638 mask := uint32(1) << (id % 32)
5639 return (atomic.Load(&p[word]) & mask) != 0
5640 }
5641
5642
5643 func (p pMask) set(id int32) {
5644 word := id / 32
5645 mask := uint32(1) << (id % 32)
5646 atomic.Or(&p[word], mask)
5647 }
5648
5649
5650 func (p pMask) clear(id int32) {
5651 word := id / 32
5652 mask := uint32(1) << (id % 32)
5653 atomic.And(&p[word], ^mask)
5654 }
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681 func updateTimerPMask(pp *p) {
5682 if atomic.Load(&pp.numTimers) > 0 {
5683 return
5684 }
5685
5686
5687
5688
5689 lock(&pp.timersLock)
5690 if atomic.Load(&pp.numTimers) == 0 {
5691 timerpMask.clear(pp.id)
5692 }
5693 unlock(&pp.timersLock)
5694 }
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707 func pidleput(_p_ *p, now int64) int64 {
5708 assertLockHeld(&sched.lock)
5709
5710 if !runqempty(_p_) {
5711 throw("pidleput: P has non-empty run queue")
5712 }
5713 if now == 0 {
5714 now = nanotime()
5715 }
5716 updateTimerPMask(_p_)
5717 idlepMask.set(_p_.id)
5718 _p_.link = sched.pidle
5719 sched.pidle.set(_p_)
5720 atomic.Xadd(&sched.npidle, 1)
5721 if !_p_.limiterEvent.start(limiterEventIdle, now) {
5722 throw("must be able to track idle limiter event")
5723 }
5724 return now
5725 }
5726
5727
5728
5729
5730
5731
5732
5733
5734 func pidleget(now int64) (*p, int64) {
5735 assertLockHeld(&sched.lock)
5736
5737 _p_ := sched.pidle.ptr()
5738 if _p_ != nil {
5739
5740 if now == 0 {
5741 now = nanotime()
5742 }
5743 timerpMask.set(_p_.id)
5744 idlepMask.clear(_p_.id)
5745 sched.pidle = _p_.link
5746 atomic.Xadd(&sched.npidle, -1)
5747 _p_.limiterEvent.stop(limiterEventIdle, now)
5748 }
5749 return _p_, now
5750 }
5751
5752
5753
5754 func runqempty(_p_ *p) bool {
5755
5756
5757
5758
5759 for {
5760 head := atomic.Load(&_p_.runqhead)
5761 tail := atomic.Load(&_p_.runqtail)
5762 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
5763 if tail == atomic.Load(&_p_.runqtail) {
5764 return head == tail && runnext == 0
5765 }
5766 }
5767 }
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778 const randomizeScheduler = raceenabled
5779
5780
5781
5782
5783
5784
5785 func runqput(_p_ *p, gp *g, next bool) {
5786 if randomizeScheduler && next && fastrandn(2) == 0 {
5787 next = false
5788 }
5789
5790 if next {
5791 retryNext:
5792 oldnext := _p_.runnext
5793 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
5794 goto retryNext
5795 }
5796 if oldnext == 0 {
5797 return
5798 }
5799
5800 gp = oldnext.ptr()
5801 }
5802
5803 retry:
5804 h := atomic.LoadAcq(&_p_.runqhead)
5805 t := _p_.runqtail
5806 if t-h < uint32(len(_p_.runq)) {
5807 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
5808 atomic.StoreRel(&_p_.runqtail, t+1)
5809 return
5810 }
5811 if runqputslow(_p_, gp, h, t) {
5812 return
5813 }
5814
5815 goto retry
5816 }
5817
5818
5819
5820 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
5821 var batch [len(_p_.runq)/2 + 1]*g
5822
5823
5824 n := t - h
5825 n = n / 2
5826 if n != uint32(len(_p_.runq)/2) {
5827 throw("runqputslow: queue is not full")
5828 }
5829 for i := uint32(0); i < n; i++ {
5830 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5831 }
5832 if !atomic.CasRel(&_p_.runqhead, h, h+n) {
5833 return false
5834 }
5835 batch[n] = gp
5836
5837 if randomizeScheduler {
5838 for i := uint32(1); i <= n; i++ {
5839 j := fastrandn(i + 1)
5840 batch[i], batch[j] = batch[j], batch[i]
5841 }
5842 }
5843
5844
5845 for i := uint32(0); i < n; i++ {
5846 batch[i].schedlink.set(batch[i+1])
5847 }
5848 var q gQueue
5849 q.head.set(batch[0])
5850 q.tail.set(batch[n])
5851
5852
5853 lock(&sched.lock)
5854 globrunqputbatch(&q, int32(n+1))
5855 unlock(&sched.lock)
5856 return true
5857 }
5858
5859
5860
5861
5862
5863 func runqputbatch(pp *p, q *gQueue, qsize int) {
5864 h := atomic.LoadAcq(&pp.runqhead)
5865 t := pp.runqtail
5866 n := uint32(0)
5867 for !q.empty() && t-h < uint32(len(pp.runq)) {
5868 gp := q.pop()
5869 pp.runq[t%uint32(len(pp.runq))].set(gp)
5870 t++
5871 n++
5872 }
5873 qsize -= int(n)
5874
5875 if randomizeScheduler {
5876 off := func(o uint32) uint32 {
5877 return (pp.runqtail + o) % uint32(len(pp.runq))
5878 }
5879 for i := uint32(1); i < n; i++ {
5880 j := fastrandn(i + 1)
5881 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
5882 }
5883 }
5884
5885 atomic.StoreRel(&pp.runqtail, t)
5886 if !q.empty() {
5887 lock(&sched.lock)
5888 globrunqputbatch(q, int32(qsize))
5889 unlock(&sched.lock)
5890 }
5891 }
5892
5893
5894
5895
5896
5897 func runqget(_p_ *p) (gp *g, inheritTime bool) {
5898
5899 next := _p_.runnext
5900
5901
5902
5903 if next != 0 && _p_.runnext.cas(next, 0) {
5904 return next.ptr(), true
5905 }
5906
5907 for {
5908 h := atomic.LoadAcq(&_p_.runqhead)
5909 t := _p_.runqtail
5910 if t == h {
5911 return nil, false
5912 }
5913 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
5914 if atomic.CasRel(&_p_.runqhead, h, h+1) {
5915 return gp, false
5916 }
5917 }
5918 }
5919
5920
5921
5922 func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
5923 oldNext := _p_.runnext
5924 if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
5925 drainQ.pushBack(oldNext.ptr())
5926 n++
5927 }
5928
5929 retry:
5930 h := atomic.LoadAcq(&_p_.runqhead)
5931 t := _p_.runqtail
5932 qn := t - h
5933 if qn == 0 {
5934 return
5935 }
5936 if qn > uint32(len(_p_.runq)) {
5937 goto retry
5938 }
5939
5940 if !atomic.CasRel(&_p_.runqhead, h, h+qn) {
5941 goto retry
5942 }
5943
5944
5945
5946
5947
5948
5949
5950
5951 for i := uint32(0); i < qn; i++ {
5952 gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5953 drainQ.pushBack(gp)
5954 n++
5955 }
5956 return
5957 }
5958
5959
5960
5961
5962
5963 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
5964 for {
5965 h := atomic.LoadAcq(&_p_.runqhead)
5966 t := atomic.LoadAcq(&_p_.runqtail)
5967 n := t - h
5968 n = n - n/2
5969 if n == 0 {
5970 if stealRunNextG {
5971
5972 if next := _p_.runnext; next != 0 {
5973 if _p_.status == _Prunning {
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984 if GOOS != "windows" && GOOS != "openbsd" && GOOS != "netbsd" {
5985 usleep(3)
5986 } else {
5987
5988
5989
5990 osyield()
5991 }
5992 }
5993 if !_p_.runnext.cas(next, 0) {
5994 continue
5995 }
5996 batch[batchHead%uint32(len(batch))] = next
5997 return 1
5998 }
5999 }
6000 return 0
6001 }
6002 if n > uint32(len(_p_.runq)/2) {
6003 continue
6004 }
6005 for i := uint32(0); i < n; i++ {
6006 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
6007 batch[(batchHead+i)%uint32(len(batch))] = g
6008 }
6009 if atomic.CasRel(&_p_.runqhead, h, h+n) {
6010 return n
6011 }
6012 }
6013 }
6014
6015
6016
6017
6018 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
6019 t := _p_.runqtail
6020 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
6021 if n == 0 {
6022 return nil
6023 }
6024 n--
6025 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
6026 if n == 0 {
6027 return gp
6028 }
6029 h := atomic.LoadAcq(&_p_.runqhead)
6030 if t-h+n >= uint32(len(_p_.runq)) {
6031 throw("runqsteal: runq overflow")
6032 }
6033 atomic.StoreRel(&_p_.runqtail, t+n)
6034 return gp
6035 }
6036
6037
6038
6039 type gQueue struct {
6040 head guintptr
6041 tail guintptr
6042 }
6043
6044
6045 func (q *gQueue) empty() bool {
6046 return q.head == 0
6047 }
6048
6049
6050 func (q *gQueue) push(gp *g) {
6051 gp.schedlink = q.head
6052 q.head.set(gp)
6053 if q.tail == 0 {
6054 q.tail.set(gp)
6055 }
6056 }
6057
6058
6059 func (q *gQueue) pushBack(gp *g) {
6060 gp.schedlink = 0
6061 if q.tail != 0 {
6062 q.tail.ptr().schedlink.set(gp)
6063 } else {
6064 q.head.set(gp)
6065 }
6066 q.tail.set(gp)
6067 }
6068
6069
6070
6071 func (q *gQueue) pushBackAll(q2 gQueue) {
6072 if q2.tail == 0 {
6073 return
6074 }
6075 q2.tail.ptr().schedlink = 0
6076 if q.tail != 0 {
6077 q.tail.ptr().schedlink = q2.head
6078 } else {
6079 q.head = q2.head
6080 }
6081 q.tail = q2.tail
6082 }
6083
6084
6085
6086 func (q *gQueue) pop() *g {
6087 gp := q.head.ptr()
6088 if gp != nil {
6089 q.head = gp.schedlink
6090 if q.head == 0 {
6091 q.tail = 0
6092 }
6093 }
6094 return gp
6095 }
6096
6097
6098 func (q *gQueue) popList() gList {
6099 stack := gList{q.head}
6100 *q = gQueue{}
6101 return stack
6102 }
6103
6104
6105
6106 type gList struct {
6107 head guintptr
6108 }
6109
6110
6111 func (l *gList) empty() bool {
6112 return l.head == 0
6113 }
6114
6115
6116 func (l *gList) push(gp *g) {
6117 gp.schedlink = l.head
6118 l.head.set(gp)
6119 }
6120
6121
6122 func (l *gList) pushAll(q gQueue) {
6123 if !q.empty() {
6124 q.tail.ptr().schedlink = l.head
6125 l.head = q.head
6126 }
6127 }
6128
6129
6130 func (l *gList) pop() *g {
6131 gp := l.head.ptr()
6132 if gp != nil {
6133 l.head = gp.schedlink
6134 }
6135 return gp
6136 }
6137
6138
6139 func setMaxThreads(in int) (out int) {
6140 lock(&sched.lock)
6141 out = int(sched.maxmcount)
6142 if in > 0x7fffffff {
6143 sched.maxmcount = 0x7fffffff
6144 } else {
6145 sched.maxmcount = int32(in)
6146 }
6147 checkmcount()
6148 unlock(&sched.lock)
6149 return
6150 }
6151
6152
6153 func procPin() int {
6154 _g_ := getg()
6155 mp := _g_.m
6156
6157 mp.locks++
6158 return int(mp.p.ptr().id)
6159 }
6160
6161
6162 func procUnpin() {
6163 _g_ := getg()
6164 _g_.m.locks--
6165 }
6166
6167
6168
6169 func sync_runtime_procPin() int {
6170 return procPin()
6171 }
6172
6173
6174
6175 func sync_runtime_procUnpin() {
6176 procUnpin()
6177 }
6178
6179
6180
6181 func sync_atomic_runtime_procPin() int {
6182 return procPin()
6183 }
6184
6185
6186
6187 func sync_atomic_runtime_procUnpin() {
6188 procUnpin()
6189 }
6190
6191
6192
6193
6194
6195 func sync_runtime_canSpin(i int) bool {
6196
6197
6198
6199
6200
6201 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
6202 return false
6203 }
6204 if p := getg().m.p.ptr(); !runqempty(p) {
6205 return false
6206 }
6207 return true
6208 }
6209
6210
6211
6212 func sync_runtime_doSpin() {
6213 procyield(active_spin_cnt)
6214 }
6215
6216 var stealOrder randomOrder
6217
6218
6219
6220
6221
6222 type randomOrder struct {
6223 count uint32
6224 coprimes []uint32
6225 }
6226
6227 type randomEnum struct {
6228 i uint32
6229 count uint32
6230 pos uint32
6231 inc uint32
6232 }
6233
6234 func (ord *randomOrder) reset(count uint32) {
6235 ord.count = count
6236 ord.coprimes = ord.coprimes[:0]
6237 for i := uint32(1); i <= count; i++ {
6238 if gcd(i, count) == 1 {
6239 ord.coprimes = append(ord.coprimes, i)
6240 }
6241 }
6242 }
6243
6244 func (ord *randomOrder) start(i uint32) randomEnum {
6245 return randomEnum{
6246 count: ord.count,
6247 pos: i % ord.count,
6248 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
6249 }
6250 }
6251
6252 func (enum *randomEnum) done() bool {
6253 return enum.i == enum.count
6254 }
6255
6256 func (enum *randomEnum) next() {
6257 enum.i++
6258 enum.pos = (enum.pos + enum.inc) % enum.count
6259 }
6260
6261 func (enum *randomEnum) position() uint32 {
6262 return enum.pos
6263 }
6264
6265 func gcd(a, b uint32) uint32 {
6266 for b != 0 {
6267 a, b = b, a%b
6268 }
6269 return a
6270 }
6271
6272
6273
6274 type initTask struct {
6275
6276 state uintptr
6277 ndeps uintptr
6278 nfns uintptr
6279
6280
6281 }
6282
6283
6284
6285 var inittrace tracestat
6286
6287 type tracestat struct {
6288 active bool
6289 id int64
6290 allocs uint64
6291 bytes uint64
6292 }
6293
6294 func doInit(t *initTask) {
6295 switch t.state {
6296 case 2:
6297 return
6298 case 1:
6299 throw("recursive call during initialization - linker skew")
6300 default:
6301 t.state = 1
6302
6303 for i := uintptr(0); i < t.ndeps; i++ {
6304 p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
6305 t2 := *(**initTask)(p)
6306 doInit(t2)
6307 }
6308
6309 if t.nfns == 0 {
6310 t.state = 2
6311 return
6312 }
6313
6314 var (
6315 start int64
6316 before tracestat
6317 )
6318
6319 if inittrace.active {
6320 start = nanotime()
6321
6322 before = inittrace
6323 }
6324
6325 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
6326 for i := uintptr(0); i < t.nfns; i++ {
6327 p := add(firstFunc, i*goarch.PtrSize)
6328 f := *(*func())(unsafe.Pointer(&p))
6329 f()
6330 }
6331
6332 if inittrace.active {
6333 end := nanotime()
6334
6335 after := inittrace
6336
6337 f := *(*func())(unsafe.Pointer(&firstFunc))
6338 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
6339
6340 var sbuf [24]byte
6341 print("init ", pkg, " @")
6342 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
6343 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
6344 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
6345 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
6346 print("\n")
6347 }
6348
6349 t.state = 2
6350 }
6351 }
6352
View as plain text