Source file
src/runtime/stack.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
66
67 const (
68
69
70
71
72 _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
73
74
75 _StackMin = 2048
76
77
78
79 _FixedStack0 = _StackMin + _StackSystem
80 _FixedStack1 = _FixedStack0 - 1
81 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
82 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
83 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
84 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
85 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
86 _FixedStack = _FixedStack6 + 1
87
88
89
90
91
92
93 _StackBig = 4096
94
95
96
97
98
99
100
101 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
102
103
104
105
106 _StackSmall = 128
107
108
109
110 _StackLimit = _StackGuard - _StackSystem - _StackSmall
111 )
112
113 const (
114
115
116
117
118
119 stackDebug = 0
120 stackFromSystem = 0
121 stackFaultOnFree = 0
122 stackPoisonCopy = 0
123 stackNoCache = 0
124
125
126 debugCheckBP = false
127 )
128
129 const (
130 uintptrMask = 1<<(8*goarch.PtrSize) - 1
131
132
133
134
135
136
137
138 stackPreempt = uintptrMask & -1314
139
140
141
142 stackFork = uintptrMask & -1234
143
144
145
146 stackForceMove = uintptrMask & -275
147
148
149 stackPoisonMin = uintptrMask & -4096
150 )
151
152
153
154
155
156
157
158 var stackpool [_NumStackOrders]struct {
159 item stackpoolItem
160 _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
161 }
162
163
164 type stackpoolItem struct {
165 mu mutex
166 span mSpanList
167 }
168
169
170 var stackLarge struct {
171 lock mutex
172 free [heapAddrBits - pageShift]mSpanList
173 }
174
175 func stackinit() {
176 if _StackCacheSize&_PageMask != 0 {
177 throw("cache size must be a multiple of page size")
178 }
179 for i := range stackpool {
180 stackpool[i].item.span.init()
181 lockInit(&stackpool[i].item.mu, lockRankStackpool)
182 }
183 for i := range stackLarge.free {
184 stackLarge.free[i].init()
185 lockInit(&stackLarge.lock, lockRankStackLarge)
186 }
187 }
188
189
190 func stacklog2(n uintptr) int {
191 log2 := 0
192 for n > 1 {
193 n >>= 1
194 log2++
195 }
196 return log2
197 }
198
199
200
201 func stackpoolalloc(order uint8) gclinkptr {
202 list := &stackpool[order].item.span
203 s := list.first
204 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
205 if s == nil {
206
207 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
208 if s == nil {
209 throw("out of memory")
210 }
211 if s.allocCount != 0 {
212 throw("bad allocCount")
213 }
214 if s.manualFreeList.ptr() != nil {
215 throw("bad manualFreeList")
216 }
217 osStackAlloc(s)
218 s.elemsize = _FixedStack << order
219 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
220 x := gclinkptr(s.base() + i)
221 x.ptr().next = s.manualFreeList
222 s.manualFreeList = x
223 }
224 list.insert(s)
225 }
226 x := s.manualFreeList
227 if x.ptr() == nil {
228 throw("span has no free stacks")
229 }
230 s.manualFreeList = x.ptr().next
231 s.allocCount++
232 if s.manualFreeList.ptr() == nil {
233
234 list.remove(s)
235 }
236 return x
237 }
238
239
240 func stackpoolfree(x gclinkptr, order uint8) {
241 s := spanOfUnchecked(uintptr(x))
242 if s.state.get() != mSpanManual {
243 throw("freeing stack not in a stack span")
244 }
245 if s.manualFreeList.ptr() == nil {
246
247 stackpool[order].item.span.insert(s)
248 }
249 x.ptr().next = s.manualFreeList
250 s.manualFreeList = x
251 s.allocCount--
252 if gcphase == _GCoff && s.allocCount == 0 {
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 stackpool[order].item.span.remove(s)
269 s.manualFreeList = 0
270 osStackFree(s)
271 mheap_.freeManual(s, spanAllocStack)
272 }
273 }
274
275
276
277
278
279 func stackcacherefill(c *mcache, order uint8) {
280 if stackDebug >= 1 {
281 print("stackcacherefill order=", order, "\n")
282 }
283
284
285
286 var list gclinkptr
287 var size uintptr
288 lock(&stackpool[order].item.mu)
289 for size < _StackCacheSize/2 {
290 x := stackpoolalloc(order)
291 x.ptr().next = list
292 list = x
293 size += _FixedStack << order
294 }
295 unlock(&stackpool[order].item.mu)
296 c.stackcache[order].list = list
297 c.stackcache[order].size = size
298 }
299
300
301 func stackcacherelease(c *mcache, order uint8) {
302 if stackDebug >= 1 {
303 print("stackcacherelease order=", order, "\n")
304 }
305 x := c.stackcache[order].list
306 size := c.stackcache[order].size
307 lock(&stackpool[order].item.mu)
308 for size > _StackCacheSize/2 {
309 y := x.ptr().next
310 stackpoolfree(x, order)
311 x = y
312 size -= _FixedStack << order
313 }
314 unlock(&stackpool[order].item.mu)
315 c.stackcache[order].list = x
316 c.stackcache[order].size = size
317 }
318
319
320 func stackcache_clear(c *mcache) {
321 if stackDebug >= 1 {
322 print("stackcache clear\n")
323 }
324 for order := uint8(0); order < _NumStackOrders; order++ {
325 lock(&stackpool[order].item.mu)
326 x := c.stackcache[order].list
327 for x.ptr() != nil {
328 y := x.ptr().next
329 stackpoolfree(x, order)
330 x = y
331 }
332 c.stackcache[order].list = 0
333 c.stackcache[order].size = 0
334 unlock(&stackpool[order].item.mu)
335 }
336 }
337
338
339
340
341
342
343
344 func stackalloc(n uint32) stack {
345
346
347
348 thisg := getg()
349 if thisg != thisg.m.g0 {
350 throw("stackalloc not on scheduler stack")
351 }
352 if n&(n-1) != 0 {
353 throw("stack size not a power of 2")
354 }
355 if stackDebug >= 1 {
356 print("stackalloc ", n, "\n")
357 }
358
359 if debug.efence != 0 || stackFromSystem != 0 {
360 n = uint32(alignUp(uintptr(n), physPageSize))
361 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
362 if v == nil {
363 throw("out of memory (stackalloc)")
364 }
365 return stack{uintptr(v), uintptr(v) + uintptr(n)}
366 }
367
368
369
370
371 var v unsafe.Pointer
372 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
373 order := uint8(0)
374 n2 := n
375 for n2 > _FixedStack {
376 order++
377 n2 >>= 1
378 }
379 var x gclinkptr
380 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
381
382
383
384
385 lock(&stackpool[order].item.mu)
386 x = stackpoolalloc(order)
387 unlock(&stackpool[order].item.mu)
388 } else {
389 c := thisg.m.p.ptr().mcache
390 x = c.stackcache[order].list
391 if x.ptr() == nil {
392 stackcacherefill(c, order)
393 x = c.stackcache[order].list
394 }
395 c.stackcache[order].list = x.ptr().next
396 c.stackcache[order].size -= uintptr(n)
397 }
398 v = unsafe.Pointer(x)
399 } else {
400 var s *mspan
401 npage := uintptr(n) >> _PageShift
402 log2npage := stacklog2(npage)
403
404
405 lock(&stackLarge.lock)
406 if !stackLarge.free[log2npage].isEmpty() {
407 s = stackLarge.free[log2npage].first
408 stackLarge.free[log2npage].remove(s)
409 }
410 unlock(&stackLarge.lock)
411
412 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
413
414 if s == nil {
415
416 s = mheap_.allocManual(npage, spanAllocStack)
417 if s == nil {
418 throw("out of memory")
419 }
420 osStackAlloc(s)
421 s.elemsize = uintptr(n)
422 }
423 v = unsafe.Pointer(s.base())
424 }
425
426 if raceenabled {
427 racemalloc(v, uintptr(n))
428 }
429 if msanenabled {
430 msanmalloc(v, uintptr(n))
431 }
432 if asanenabled {
433 asanunpoison(v, uintptr(n))
434 }
435 if stackDebug >= 1 {
436 print(" allocated ", v, "\n")
437 }
438 return stack{uintptr(v), uintptr(v) + uintptr(n)}
439 }
440
441
442
443
444
445
446
447 func stackfree(stk stack) {
448 gp := getg()
449 v := unsafe.Pointer(stk.lo)
450 n := stk.hi - stk.lo
451 if n&(n-1) != 0 {
452 throw("stack not a power of 2")
453 }
454 if stk.lo+n < stk.hi {
455 throw("bad stack size")
456 }
457 if stackDebug >= 1 {
458 println("stackfree", v, n)
459 memclrNoHeapPointers(v, n)
460 }
461 if debug.efence != 0 || stackFromSystem != 0 {
462 if debug.efence != 0 || stackFaultOnFree != 0 {
463 sysFault(v, n)
464 } else {
465 sysFree(v, n, &memstats.stacks_sys)
466 }
467 return
468 }
469 if msanenabled {
470 msanfree(v, n)
471 }
472 if asanenabled {
473 asanpoison(v, n)
474 }
475 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
476 order := uint8(0)
477 n2 := n
478 for n2 > _FixedStack {
479 order++
480 n2 >>= 1
481 }
482 x := gclinkptr(v)
483 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
484 lock(&stackpool[order].item.mu)
485 stackpoolfree(x, order)
486 unlock(&stackpool[order].item.mu)
487 } else {
488 c := gp.m.p.ptr().mcache
489 if c.stackcache[order].size >= _StackCacheSize {
490 stackcacherelease(c, order)
491 }
492 x.ptr().next = c.stackcache[order].list
493 c.stackcache[order].list = x
494 c.stackcache[order].size += n
495 }
496 } else {
497 s := spanOfUnchecked(uintptr(v))
498 if s.state.get() != mSpanManual {
499 println(hex(s.base()), v)
500 throw("bad span state")
501 }
502 if gcphase == _GCoff {
503
504
505 osStackFree(s)
506 mheap_.freeManual(s, spanAllocStack)
507 } else {
508
509
510
511
512
513 log2npage := stacklog2(s.npages)
514 lock(&stackLarge.lock)
515 stackLarge.free[log2npage].insert(s)
516 unlock(&stackLarge.lock)
517 }
518 }
519 }
520
521 var maxstacksize uintptr = 1 << 20
522
523 var maxstackceiling = maxstacksize
524
525 var ptrnames = []string{
526 0: "scalar",
527 1: "ptr",
528 }
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558 type adjustinfo struct {
559 old stack
560 delta uintptr
561 cache pcvalueCache
562
563
564 sghi uintptr
565 }
566
567
568
569 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
570 pp := (*uintptr)(vpp)
571 p := *pp
572 if stackDebug >= 4 {
573 print(" ", pp, ":", hex(p), "\n")
574 }
575 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
576 *pp = p + adjinfo.delta
577 if stackDebug >= 3 {
578 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
579 }
580 }
581 }
582
583
584
585 type bitvector struct {
586 n int32
587 bytedata *uint8
588 }
589
590
591
592
593
594 func (bv *bitvector) ptrbit(i uintptr) uint8 {
595 b := *(addb(bv.bytedata, i/8))
596 return (b >> (i % 8)) & 1
597 }
598
599
600
601 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
602 minp := adjinfo.old.lo
603 maxp := adjinfo.old.hi
604 delta := adjinfo.delta
605 num := uintptr(bv.n)
606
607
608
609
610
611 useCAS := uintptr(scanp) < adjinfo.sghi
612 for i := uintptr(0); i < num; i += 8 {
613 if stackDebug >= 4 {
614 for j := uintptr(0); j < 8; j++ {
615 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
616 }
617 }
618 b := *(addb(bv.bytedata, i/8))
619 for b != 0 {
620 j := uintptr(sys.Ctz8(b))
621 b &= b - 1
622 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
623 retry:
624 p := *pp
625 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
626
627
628 getg().m.traceback = 2
629 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
630 throw("invalid pointer found on stack")
631 }
632 if minp <= p && p < maxp {
633 if stackDebug >= 3 {
634 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
635 }
636 if useCAS {
637 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
638 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
639 goto retry
640 }
641 } else {
642 *pp = p + delta
643 }
644 }
645 }
646 }
647 }
648
649
650 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
651 adjinfo := (*adjustinfo)(arg)
652 if frame.continpc == 0 {
653
654 return true
655 }
656 f := frame.fn
657 if stackDebug >= 2 {
658 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
659 }
660 if f.funcID == funcID_systemstack_switch {
661
662
663
664 return true
665 }
666
667 locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
668
669
670 if locals.n > 0 {
671 size := uintptr(locals.n) * goarch.PtrSize
672 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
673 }
674
675
676
677 if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
678 if stackDebug >= 3 {
679 print(" saved bp\n")
680 }
681 if debugCheckBP {
682
683
684 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
685 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
686 println("runtime: found invalid frame pointer")
687 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
688 throw("bad frame pointer")
689 }
690 }
691 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
692 }
693
694
695 if args.n > 0 {
696 if stackDebug >= 3 {
697 print(" args\n")
698 }
699 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
700 }
701
702
703
704 if frame.varp != 0 {
705 for i := range objs {
706 obj := &objs[i]
707 off := obj.off
708 base := frame.varp
709 if off >= 0 {
710 base = frame.argp
711 }
712 p := base + uintptr(off)
713 if p < frame.sp {
714
715
716
717 continue
718 }
719 ptrdata := obj.ptrdata()
720 gcdata := obj.gcdata()
721 var s *mspan
722 if obj.useGCProg() {
723
724 s = materializeGCProg(ptrdata, gcdata)
725 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
726 }
727 for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
728 if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
729 adjustpointer(adjinfo, unsafe.Pointer(p+i))
730 }
731 }
732 if s != nil {
733 dematerializeGCProg(s)
734 }
735 }
736 }
737
738 return true
739 }
740
741 func adjustctxt(gp *g, adjinfo *adjustinfo) {
742 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
743 if !framepointer_enabled {
744 return
745 }
746 if debugCheckBP {
747 bp := gp.sched.bp
748 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
749 println("runtime: found invalid top frame pointer")
750 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
751 throw("bad top frame pointer")
752 }
753 }
754 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
755 }
756
757 func adjustdefers(gp *g, adjinfo *adjustinfo) {
758
759
760
761 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
762 for d := gp._defer; d != nil; d = d.link {
763 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
764 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
765 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
766 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
767 adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
768 adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
769 }
770 }
771
772 func adjustpanics(gp *g, adjinfo *adjustinfo) {
773
774
775 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
776 }
777
778 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
779
780
781 for s := gp.waiting; s != nil; s = s.waitlink {
782 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
783 }
784 }
785
786 func fillstack(stk stack, b byte) {
787 for p := stk.lo; p < stk.hi; p++ {
788 *(*byte)(unsafe.Pointer(p)) = b
789 }
790 }
791
792 func findsghi(gp *g, stk stack) uintptr {
793 var sghi uintptr
794 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
795 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
796 if stk.lo <= p && p < stk.hi && p > sghi {
797 sghi = p
798 }
799 }
800 return sghi
801 }
802
803
804
805
806 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
807 if gp.waiting == nil {
808 return 0
809 }
810
811
812 var lastc *hchan
813 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
814 if sg.c != lastc {
815
816
817
818
819
820
821
822
823
824 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
825 }
826 lastc = sg.c
827 }
828
829
830 adjustsudogs(gp, adjinfo)
831
832
833
834
835 var sgsize uintptr
836 if adjinfo.sghi != 0 {
837 oldBot := adjinfo.old.hi - used
838 newBot := oldBot + adjinfo.delta
839 sgsize = adjinfo.sghi - oldBot
840 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
841 }
842
843
844 lastc = nil
845 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
846 if sg.c != lastc {
847 unlock(&sg.c.lock)
848 }
849 lastc = sg.c
850 }
851
852 return sgsize
853 }
854
855
856
857 func copystack(gp *g, newsize uintptr) {
858 if gp.syscallsp != 0 {
859 throw("stack growth not allowed in system call")
860 }
861 old := gp.stack
862 if old.lo == 0 {
863 throw("nil stackbase")
864 }
865 used := old.hi - gp.sched.sp
866
867
868
869
870 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
871
872
873 new := stackalloc(uint32(newsize))
874 if stackPoisonCopy != 0 {
875 fillstack(new, 0xfd)
876 }
877 if stackDebug >= 1 {
878 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
879 }
880
881
882 var adjinfo adjustinfo
883 adjinfo.old = old
884 adjinfo.delta = new.hi - old.hi
885
886
887 ncopy := used
888 if !gp.activeStackChans {
889 if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
890
891
892
893
894 throw("racy sudog adjustment due to parking on channel")
895 }
896 adjustsudogs(gp, &adjinfo)
897 } else {
898
899
900
901
902
903
904
905 adjinfo.sghi = findsghi(gp, old)
906
907
908
909 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
910 }
911
912
913 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
914
915
916
917
918 adjustctxt(gp, &adjinfo)
919 adjustdefers(gp, &adjinfo)
920 adjustpanics(gp, &adjinfo)
921 if adjinfo.sghi != 0 {
922 adjinfo.sghi += adjinfo.delta
923 }
924
925
926 gp.stack = new
927 gp.stackguard0 = new.lo + _StackGuard
928 gp.sched.sp = new.hi - used
929 gp.stktopsp += adjinfo.delta
930
931
932 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
933
934
935 if stackPoisonCopy != 0 {
936 fillstack(old, 0xfc)
937 }
938 stackfree(old)
939 }
940
941
942 func round2(x int32) int32 {
943 s := uint(0)
944 for 1<<s < x {
945 s++
946 }
947 return 1 << s
948 }
949
950
951
952
953
954
955
956
957
958
959
960
961
962 func newstack() {
963 thisg := getg()
964
965 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
966 throw("stack growth after fork")
967 }
968 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
969 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
970 morebuf := thisg.m.morebuf
971 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
972 throw("runtime: wrong goroutine in newstack")
973 }
974
975 gp := thisg.m.curg
976
977 if thisg.m.curg.throwsplit {
978
979 morebuf := thisg.m.morebuf
980 gp.syscallsp = morebuf.sp
981 gp.syscallpc = morebuf.pc
982 pcname, pcoff := "(unknown)", uintptr(0)
983 f := findfunc(gp.sched.pc)
984 if f.valid() {
985 pcname = funcname(f)
986 pcoff = gp.sched.pc - f.entry()
987 }
988 print("runtime: newstack at ", pcname, "+", hex(pcoff),
989 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
990 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
991 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
992
993 thisg.m.traceback = 2
994 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
995 throw("runtime: stack split at bad time")
996 }
997
998 morebuf := thisg.m.morebuf
999 thisg.m.morebuf.pc = 0
1000 thisg.m.morebuf.lr = 0
1001 thisg.m.morebuf.sp = 0
1002 thisg.m.morebuf.g = 0
1003
1004
1005
1006
1007 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 preempt := stackguard0 == stackPreempt
1022 if preempt {
1023 if !canPreemptM(thisg.m) {
1024
1025
1026 gp.stackguard0 = gp.stack.lo + _StackGuard
1027 gogo(&gp.sched)
1028 }
1029 }
1030
1031 if gp.stack.lo == 0 {
1032 throw("missing stack in newstack")
1033 }
1034 sp := gp.sched.sp
1035 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1036
1037 sp -= goarch.PtrSize
1038 }
1039 if stackDebug >= 1 || sp < gp.stack.lo {
1040 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1041 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1042 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1043 }
1044 if sp < gp.stack.lo {
1045 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1046 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1047 throw("runtime: split stack overflow")
1048 }
1049
1050 if preempt {
1051 if gp == thisg.m.g0 {
1052 throw("runtime: preempt g0")
1053 }
1054 if thisg.m.p == 0 && thisg.m.locks == 0 {
1055 throw("runtime: g is running but p is not")
1056 }
1057
1058 if gp.preemptShrink {
1059
1060
1061 gp.preemptShrink = false
1062 shrinkstack(gp)
1063 }
1064
1065 if gp.preemptStop {
1066 preemptPark(gp)
1067 }
1068
1069
1070 gopreempt_m(gp)
1071 }
1072
1073
1074 oldsize := gp.stack.hi - gp.stack.lo
1075 newsize := oldsize * 2
1076
1077
1078
1079
1080 if f := findfunc(gp.sched.pc); f.valid() {
1081 max := uintptr(funcMaxSPDelta(f))
1082 needed := max + _StackGuard
1083 used := gp.stack.hi - gp.sched.sp
1084 for newsize-used < needed {
1085 newsize *= 2
1086 }
1087 }
1088
1089 if stackguard0 == stackForceMove {
1090
1091
1092
1093 newsize = oldsize
1094 }
1095
1096 if newsize > maxstacksize || newsize > maxstackceiling {
1097 if maxstacksize < maxstackceiling {
1098 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1099 } else {
1100 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1101 }
1102 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1103 throw("stack overflow")
1104 }
1105
1106
1107
1108 casgstatus(gp, _Grunning, _Gcopystack)
1109
1110
1111
1112 copystack(gp, newsize)
1113 if stackDebug >= 1 {
1114 print("stack grow done\n")
1115 }
1116 casgstatus(gp, _Gcopystack, _Grunning)
1117 gogo(&gp.sched)
1118 }
1119
1120
1121 func nilfunc() {
1122 *(*uint8)(nil) = 0
1123 }
1124
1125
1126
1127 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1128 var fn unsafe.Pointer
1129 if fv != nil {
1130 fn = unsafe.Pointer(fv.fn)
1131 } else {
1132 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1133 }
1134 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1135 }
1136
1137
1138
1139
1140 func isShrinkStackSafe(gp *g) bool {
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153 return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
1154 }
1155
1156
1157
1158
1159
1160 func shrinkstack(gp *g) {
1161 if gp.stack.lo == 0 {
1162 throw("missing stack in shrinkstack")
1163 }
1164 if s := readgstatus(gp); s&_Gscan == 0 {
1165
1166
1167
1168 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1169
1170 throw("bad status in shrinkstack")
1171 }
1172 }
1173 if !isShrinkStackSafe(gp) {
1174 throw("shrinkstack at bad time")
1175 }
1176
1177
1178
1179 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1180 throw("shrinking stack in libcall")
1181 }
1182
1183 if debug.gcshrinkstackoff > 0 {
1184 return
1185 }
1186 f := findfunc(gp.startpc)
1187 if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1188
1189
1190 return
1191 }
1192
1193 oldsize := gp.stack.hi - gp.stack.lo
1194 newsize := oldsize / 2
1195
1196
1197 if newsize < _FixedStack {
1198 return
1199 }
1200
1201
1202
1203
1204
1205 avail := gp.stack.hi - gp.stack.lo
1206 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1207 return
1208 }
1209
1210 if stackDebug > 0 {
1211 print("shrinking stack ", oldsize, "->", newsize, "\n")
1212 }
1213
1214 copystack(gp, newsize)
1215 }
1216
1217
1218 func freeStackSpans() {
1219
1220 for order := range stackpool {
1221 lock(&stackpool[order].item.mu)
1222 list := &stackpool[order].item.span
1223 for s := list.first; s != nil; {
1224 next := s.next
1225 if s.allocCount == 0 {
1226 list.remove(s)
1227 s.manualFreeList = 0
1228 osStackFree(s)
1229 mheap_.freeManual(s, spanAllocStack)
1230 }
1231 s = next
1232 }
1233 unlock(&stackpool[order].item.mu)
1234 }
1235
1236
1237 lock(&stackLarge.lock)
1238 for i := range stackLarge.free {
1239 for s := stackLarge.free[i].first; s != nil; {
1240 next := s.next
1241 stackLarge.free[i].remove(s)
1242 osStackFree(s)
1243 mheap_.freeManual(s, spanAllocStack)
1244 s = next
1245 }
1246 }
1247 unlock(&stackLarge.lock)
1248 }
1249
1250
1251
1252 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
1253 targetpc := frame.continpc
1254 if targetpc == 0 {
1255
1256 return
1257 }
1258
1259 f := frame.fn
1260 pcdata := int32(-1)
1261 if targetpc != f.entry() {
1262
1263
1264
1265
1266 targetpc--
1267 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
1268 }
1269 if pcdata == -1 {
1270
1271
1272
1273 pcdata = 0
1274 }
1275
1276
1277 size := frame.varp - frame.sp
1278 var minsize uintptr
1279 switch goarch.ArchFamily {
1280 case goarch.ARM64:
1281 minsize = sys.StackAlign
1282 default:
1283 minsize = sys.MinFrameSize
1284 }
1285 if size > minsize {
1286 stackid := pcdata
1287 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
1288 if stkmap == nil || stkmap.n <= 0 {
1289 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
1290 throw("missing stackmap")
1291 }
1292
1293 if stkmap.nbit > 0 {
1294 if stackid < 0 || stackid >= stkmap.n {
1295
1296 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1297 throw("bad symbol table")
1298 }
1299 locals = stackmapdata(stkmap, stackid)
1300 if stackDebug >= 3 && debug {
1301 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
1302 }
1303 } else if stackDebug >= 3 && debug {
1304 print(" no locals to adjust\n")
1305 }
1306 }
1307
1308
1309 if frame.arglen > 0 {
1310 if frame.argmap != nil {
1311
1312
1313
1314 args = *frame.argmap
1315 n := int32(frame.arglen / goarch.PtrSize)
1316 if n < args.n {
1317 args.n = n
1318 }
1319 } else {
1320 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
1321 if stackmap == nil || stackmap.n <= 0 {
1322 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
1323 throw("missing stackmap")
1324 }
1325 if pcdata < 0 || pcdata >= stackmap.n {
1326
1327 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1328 throw("bad symbol table")
1329 }
1330 if stackmap.nbit > 0 {
1331 args = stackmapdata(stackmap, pcdata)
1332 }
1333 }
1334 }
1335
1336
1337 if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") &&
1338 unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
1339
1340
1341
1342
1343 objs = methodValueCallFrameObjs[:]
1344 } else {
1345 p := funcdata(f, _FUNCDATA_StackObjects)
1346 if p != nil {
1347 n := *(*uintptr)(p)
1348 p = add(p, goarch.PtrSize)
1349 r0 := (*stackObjectRecord)(noescape(p))
1350 objs = unsafe.Slice(r0, int(n))
1351
1352
1353
1354
1355
1356 }
1357 }
1358
1359 return
1360 }
1361
1362 var methodValueCallFrameObjs [1]stackObjectRecord
1363
1364 func stkobjinit() {
1365 var abiRegArgsEface any = abi.RegArgs{}
1366 abiRegArgsType := efaceOf(&abiRegArgsEface)._type
1367 if abiRegArgsType.kind&kindGCProg != 0 {
1368 throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
1369 }
1370
1371
1372 ptr := uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
1373 var mod *moduledata
1374 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1375 if datap.gofunc <= ptr && ptr < datap.end {
1376 mod = datap
1377 break
1378 }
1379 }
1380 if mod == nil {
1381 throw("methodValueCallFrameObjs is not in a module")
1382 }
1383 methodValueCallFrameObjs[0] = stackObjectRecord{
1384 off: -int32(alignUp(abiRegArgsType.size, 8)),
1385 size: int32(abiRegArgsType.size),
1386 _ptrdata: int32(abiRegArgsType.ptrdata),
1387 gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.gcdata)) - mod.rodata),
1388 }
1389 }
1390
1391
1392
1393 type stackObjectRecord struct {
1394
1395
1396
1397 off int32
1398 size int32
1399 _ptrdata int32
1400 gcdataoff uint32
1401 }
1402
1403 func (r *stackObjectRecord) useGCProg() bool {
1404 return r._ptrdata < 0
1405 }
1406
1407 func (r *stackObjectRecord) ptrdata() uintptr {
1408 x := r._ptrdata
1409 if x < 0 {
1410 return uintptr(-x)
1411 }
1412 return uintptr(x)
1413 }
1414
1415
1416 func (r *stackObjectRecord) gcdata() *byte {
1417 ptr := uintptr(unsafe.Pointer(r))
1418 var mod *moduledata
1419 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1420 if datap.gofunc <= ptr && ptr < datap.end {
1421 mod = datap
1422 break
1423 }
1424 }
1425
1426
1427
1428 res := mod.rodata + uintptr(r.gcdataoff)
1429 return (*byte)(unsafe.Pointer(res))
1430 }
1431
1432
1433
1434
1435
1436 func morestackc() {
1437 throw("attempt to execute system stack code on user stack")
1438 }
1439
1440
1441
1442
1443
1444 var startingStackSize uint32 = _FixedStack
1445
1446 func gcComputeStartingStackSize() {
1447 if debug.adaptivestackstart == 0 {
1448 return
1449 }
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 var scannedStackSize uint64
1461 var scannedStacks uint64
1462 for _, p := range allp {
1463 scannedStackSize += p.scannedStackSize
1464 scannedStacks += p.scannedStacks
1465
1466 p.scannedStackSize = 0
1467 p.scannedStacks = 0
1468 }
1469 if scannedStacks == 0 {
1470 startingStackSize = _FixedStack
1471 return
1472 }
1473 avg := scannedStackSize/scannedStacks + _StackGuard
1474
1475
1476 if avg > uint64(maxstacksize) {
1477 avg = uint64(maxstacksize)
1478 }
1479 if avg < _FixedStack {
1480 avg = _FixedStack
1481 }
1482
1483 startingStackSize = uint32(round2(int32(avg)))
1484 }
1485
View as plain text