Source file
src/runtime/mprof.go
Documentation: runtime
1
2
3
4
5
6
7
8 package runtime
9
10 import (
11 "internal/abi"
12 "runtime/internal/atomic"
13 "unsafe"
14 )
15
16
17 var (
18
19 profInsertLock mutex
20
21 profBlockLock mutex
22
23 profMemActiveLock mutex
24
25
26 profMemFutureLock [len(memRecord{}.future)]mutex
27 )
28
29
30
31
32 const (
33
34 memProfile bucketType = 1 + iota
35 blockProfile
36 mutexProfile
37
38
39 buckHashSize = 179999
40
41
42 maxStack = 32
43 )
44
45 type bucketType int
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62 type bucket struct {
63 next *bucket
64 allnext *bucket
65 typ bucketType
66 hash uintptr
67 size uintptr
68 nstk uintptr
69 }
70
71
72
73 type memRecord struct {
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 active memRecordCycle
119
120
121
122
123
124
125
126
127
128
129
130 future [3]memRecordCycle
131 }
132
133
134 type memRecordCycle struct {
135 allocs, frees uintptr
136 alloc_bytes, free_bytes uintptr
137 }
138
139
140 func (a *memRecordCycle) add(b *memRecordCycle) {
141 a.allocs += b.allocs
142 a.frees += b.frees
143 a.alloc_bytes += b.alloc_bytes
144 a.free_bytes += b.free_bytes
145 }
146
147
148
149 type blockRecord struct {
150 count float64
151 cycles int64
152 }
153
154 var (
155 mbuckets atomic.UnsafePointer
156 bbuckets atomic.UnsafePointer
157 xbuckets atomic.UnsafePointer
158 buckhash atomic.UnsafePointer
159
160 mProfCycle mProfCycleHolder
161 )
162
163 type buckhashArray [buckHashSize]atomic.UnsafePointer
164
165 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
166
167
168
169
170
171 type mProfCycleHolder struct {
172 value atomic.Uint32
173 }
174
175
176 func (c *mProfCycleHolder) read() (cycle uint32) {
177 v := c.value.Load()
178 cycle = v >> 1
179 return cycle
180 }
181
182
183
184 func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
185 for {
186 prev := c.value.Load()
187 cycle = prev >> 1
188 alreadyFlushed = (prev & 0x1) != 0
189 next := prev | 0x1
190 if c.value.CompareAndSwap(prev, next) {
191 return cycle, alreadyFlushed
192 }
193 }
194 }
195
196
197
198 func (c *mProfCycleHolder) increment() {
199
200
201
202 for {
203 prev := c.value.Load()
204 cycle := prev >> 1
205 cycle = (cycle + 1) % mProfCycleWrap
206 next := cycle << 1
207 if c.value.CompareAndSwap(prev, next) {
208 break
209 }
210 }
211 }
212
213
214 func newBucket(typ bucketType, nstk int) *bucket {
215 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
216 switch typ {
217 default:
218 throw("invalid profile bucket type")
219 case memProfile:
220 size += unsafe.Sizeof(memRecord{})
221 case blockProfile, mutexProfile:
222 size += unsafe.Sizeof(blockRecord{})
223 }
224
225 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
226 b.typ = typ
227 b.nstk = uintptr(nstk)
228 return b
229 }
230
231
232 func (b *bucket) stk() []uintptr {
233 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
234 return stk[:b.nstk:b.nstk]
235 }
236
237
238 func (b *bucket) mp() *memRecord {
239 if b.typ != memProfile {
240 throw("bad use of bucket.mp")
241 }
242 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
243 return (*memRecord)(data)
244 }
245
246
247 func (b *bucket) bp() *blockRecord {
248 if b.typ != blockProfile && b.typ != mutexProfile {
249 throw("bad use of bucket.bp")
250 }
251 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
252 return (*blockRecord)(data)
253 }
254
255
256 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
257 bh := (*buckhashArray)(buckhash.Load())
258 if bh == nil {
259 lock(&profInsertLock)
260
261 bh = (*buckhashArray)(buckhash.Load())
262 if bh == nil {
263 bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
264 if bh == nil {
265 throw("runtime: cannot allocate memory")
266 }
267 buckhash.StoreNoWB(unsafe.Pointer(bh))
268 }
269 unlock(&profInsertLock)
270 }
271
272
273 var h uintptr
274 for _, pc := range stk {
275 h += pc
276 h += h << 10
277 h ^= h >> 6
278 }
279
280 h += size
281 h += h << 10
282 h ^= h >> 6
283
284 h += h << 3
285 h ^= h >> 11
286
287 i := int(h % buckHashSize)
288
289 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
290 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
291 return b
292 }
293 }
294
295 if !alloc {
296 return nil
297 }
298
299 lock(&profInsertLock)
300
301 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
302 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
303 unlock(&profInsertLock)
304 return b
305 }
306 }
307
308
309 b := newBucket(typ, len(stk))
310 copy(b.stk(), stk)
311 b.hash = h
312 b.size = size
313
314 var allnext *atomic.UnsafePointer
315 if typ == memProfile {
316 allnext = &mbuckets
317 } else if typ == mutexProfile {
318 allnext = &xbuckets
319 } else {
320 allnext = &bbuckets
321 }
322
323 b.next = (*bucket)(bh[i].Load())
324 b.allnext = (*bucket)(allnext.Load())
325
326 bh[i].StoreNoWB(unsafe.Pointer(b))
327 allnext.StoreNoWB(unsafe.Pointer(b))
328
329 unlock(&profInsertLock)
330 return b
331 }
332
333 func eqslice(x, y []uintptr) bool {
334 if len(x) != len(y) {
335 return false
336 }
337 for i, xi := range x {
338 if xi != y[i] {
339 return false
340 }
341 }
342 return true
343 }
344
345
346
347
348
349
350
351
352
353 func mProf_NextCycle() {
354 mProfCycle.increment()
355 }
356
357
358
359
360
361
362
363
364 func mProf_Flush() {
365 cycle, alreadyFlushed := mProfCycle.setFlushed()
366 if alreadyFlushed {
367 return
368 }
369
370 index := cycle % uint32(len(memRecord{}.future))
371 lock(&profMemActiveLock)
372 lock(&profMemFutureLock[index])
373 mProf_FlushLocked(index)
374 unlock(&profMemFutureLock[index])
375 unlock(&profMemActiveLock)
376 }
377
378
379
380
381
382 func mProf_FlushLocked(index uint32) {
383 assertLockHeld(&profMemActiveLock)
384 assertLockHeld(&profMemFutureLock[index])
385 head := (*bucket)(mbuckets.Load())
386 for b := head; b != nil; b = b.allnext {
387 mp := b.mp()
388
389
390
391 mpc := &mp.future[index]
392 mp.active.add(mpc)
393 *mpc = memRecordCycle{}
394 }
395 }
396
397
398
399
400
401 func mProf_PostSweep() {
402
403
404
405
406
407 cycle := mProfCycle.read() + 1
408
409 index := cycle % uint32(len(memRecord{}.future))
410 lock(&profMemActiveLock)
411 lock(&profMemFutureLock[index])
412 mProf_FlushLocked(index)
413 unlock(&profMemFutureLock[index])
414 unlock(&profMemActiveLock)
415 }
416
417
418 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
419 var stk [maxStack]uintptr
420 nstk := callers(4, stk[:])
421
422 index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
423
424 b := stkbucket(memProfile, size, stk[:nstk], true)
425 mp := b.mp()
426 mpc := &mp.future[index]
427
428 lock(&profMemFutureLock[index])
429 mpc.allocs++
430 mpc.alloc_bytes += size
431 unlock(&profMemFutureLock[index])
432
433
434
435
436
437 systemstack(func() {
438 setprofilebucket(p, b)
439 })
440 }
441
442
443 func mProf_Free(b *bucket, size uintptr) {
444 index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
445
446 mp := b.mp()
447 mpc := &mp.future[index]
448
449 lock(&profMemFutureLock[index])
450 mpc.frees++
451 mpc.free_bytes += size
452 unlock(&profMemFutureLock[index])
453 }
454
455 var blockprofilerate uint64
456
457
458
459
460
461
462
463 func SetBlockProfileRate(rate int) {
464 var r int64
465 if rate <= 0 {
466 r = 0
467 } else if rate == 1 {
468 r = 1
469 } else {
470
471 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
472 if r == 0 {
473 r = 1
474 }
475 }
476
477 atomic.Store64(&blockprofilerate, uint64(r))
478 }
479
480 func blockevent(cycles int64, skip int) {
481 if cycles <= 0 {
482 cycles = 1
483 }
484
485 rate := int64(atomic.Load64(&blockprofilerate))
486 if blocksampled(cycles, rate) {
487 saveblockevent(cycles, rate, skip+1, blockProfile)
488 }
489 }
490
491
492
493 func blocksampled(cycles, rate int64) bool {
494 if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
495 return false
496 }
497 return true
498 }
499
500 func saveblockevent(cycles, rate int64, skip int, which bucketType) {
501 gp := getg()
502 var nstk int
503 var stk [maxStack]uintptr
504 if gp.m.curg == nil || gp.m.curg == gp {
505 nstk = callers(skip, stk[:])
506 } else {
507 nstk = gcallers(gp.m.curg, skip, stk[:])
508 }
509 b := stkbucket(which, 0, stk[:nstk], true)
510 bp := b.bp()
511
512 lock(&profBlockLock)
513 if which == blockProfile && cycles < rate {
514
515 bp.count += float64(rate) / float64(cycles)
516 bp.cycles += rate
517 } else {
518 bp.count++
519 bp.cycles += cycles
520 }
521 unlock(&profBlockLock)
522 }
523
524 var mutexprofilerate uint64
525
526
527
528
529
530
531
532
533 func SetMutexProfileFraction(rate int) int {
534 if rate < 0 {
535 return int(mutexprofilerate)
536 }
537 old := mutexprofilerate
538 atomic.Store64(&mutexprofilerate, uint64(rate))
539 return int(old)
540 }
541
542
543 func mutexevent(cycles int64, skip int) {
544 if cycles < 0 {
545 cycles = 0
546 }
547 rate := int64(atomic.Load64(&mutexprofilerate))
548
549
550 if rate > 0 && int64(fastrand())%rate == 0 {
551 saveblockevent(cycles, rate, skip+1, mutexProfile)
552 }
553 }
554
555
556
557
558 type StackRecord struct {
559 Stack0 [32]uintptr
560 }
561
562
563
564 func (r *StackRecord) Stack() []uintptr {
565 for i, v := range r.Stack0 {
566 if v == 0 {
567 return r.Stack0[0:i]
568 }
569 }
570 return r.Stack0[0:]
571 }
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587 var MemProfileRate int = defaultMemProfileRate(512 * 1024)
588
589
590
591
592 func defaultMemProfileRate(v int) int {
593 if disableMemoryProfiling {
594 return 0
595 }
596 return v
597 }
598
599
600
601
602 var disableMemoryProfiling bool
603
604
605
606 type MemProfileRecord struct {
607 AllocBytes, FreeBytes int64
608 AllocObjects, FreeObjects int64
609 Stack0 [32]uintptr
610 }
611
612
613 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
614
615
616 func (r *MemProfileRecord) InUseObjects() int64 {
617 return r.AllocObjects - r.FreeObjects
618 }
619
620
621
622 func (r *MemProfileRecord) Stack() []uintptr {
623 for i, v := range r.Stack0 {
624 if v == 0 {
625 return r.Stack0[0:i]
626 }
627 }
628 return r.Stack0[0:]
629 }
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
653 cycle := mProfCycle.read()
654
655
656
657 index := cycle % uint32(len(memRecord{}.future))
658 lock(&profMemActiveLock)
659 lock(&profMemFutureLock[index])
660 mProf_FlushLocked(index)
661 unlock(&profMemFutureLock[index])
662 clear := true
663 head := (*bucket)(mbuckets.Load())
664 for b := head; b != nil; b = b.allnext {
665 mp := b.mp()
666 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
667 n++
668 }
669 if mp.active.allocs != 0 || mp.active.frees != 0 {
670 clear = false
671 }
672 }
673 if clear {
674
675
676
677
678 n = 0
679 for b := head; b != nil; b = b.allnext {
680 mp := b.mp()
681 for c := range mp.future {
682 lock(&profMemFutureLock[c])
683 mp.active.add(&mp.future[c])
684 mp.future[c] = memRecordCycle{}
685 unlock(&profMemFutureLock[c])
686 }
687 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
688 n++
689 }
690 }
691 }
692 if n <= len(p) {
693 ok = true
694 idx := 0
695 for b := head; b != nil; b = b.allnext {
696 mp := b.mp()
697 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
698 record(&p[idx], b)
699 idx++
700 }
701 }
702 }
703 unlock(&profMemActiveLock)
704 return
705 }
706
707
708 func record(r *MemProfileRecord, b *bucket) {
709 mp := b.mp()
710 r.AllocBytes = int64(mp.active.alloc_bytes)
711 r.FreeBytes = int64(mp.active.free_bytes)
712 r.AllocObjects = int64(mp.active.allocs)
713 r.FreeObjects = int64(mp.active.frees)
714 if raceenabled {
715 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
716 }
717 if msanenabled {
718 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
719 }
720 if asanenabled {
721 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
722 }
723 copy(r.Stack0[:], b.stk())
724 for i := int(b.nstk); i < len(r.Stack0); i++ {
725 r.Stack0[i] = 0
726 }
727 }
728
729 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
730 lock(&profMemActiveLock)
731 head := (*bucket)(mbuckets.Load())
732 for b := head; b != nil; b = b.allnext {
733 mp := b.mp()
734 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
735 }
736 unlock(&profMemActiveLock)
737 }
738
739
740
741 type BlockProfileRecord struct {
742 Count int64
743 Cycles int64
744 StackRecord
745 }
746
747
748
749
750
751
752
753
754 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
755 lock(&profBlockLock)
756 head := (*bucket)(bbuckets.Load())
757 for b := head; b != nil; b = b.allnext {
758 n++
759 }
760 if n <= len(p) {
761 ok = true
762 for b := head; b != nil; b = b.allnext {
763 bp := b.bp()
764 r := &p[0]
765 r.Count = int64(bp.count)
766
767
768 if r.Count == 0 {
769 r.Count = 1
770 }
771 r.Cycles = bp.cycles
772 if raceenabled {
773 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
774 }
775 if msanenabled {
776 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
777 }
778 if asanenabled {
779 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
780 }
781 i := copy(r.Stack0[:], b.stk())
782 for ; i < len(r.Stack0); i++ {
783 r.Stack0[i] = 0
784 }
785 p = p[1:]
786 }
787 }
788 unlock(&profBlockLock)
789 return
790 }
791
792
793
794
795
796
797
798 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
799 lock(&profBlockLock)
800 head := (*bucket)(xbuckets.Load())
801 for b := head; b != nil; b = b.allnext {
802 n++
803 }
804 if n <= len(p) {
805 ok = true
806 for b := head; b != nil; b = b.allnext {
807 bp := b.bp()
808 r := &p[0]
809 r.Count = int64(bp.count)
810 r.Cycles = bp.cycles
811 i := copy(r.Stack0[:], b.stk())
812 for ; i < len(r.Stack0); i++ {
813 r.Stack0[i] = 0
814 }
815 p = p[1:]
816 }
817 }
818 unlock(&profBlockLock)
819 return
820 }
821
822
823
824
825
826
827
828 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
829 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
830 for mp := first; mp != nil; mp = mp.alllink {
831 n++
832 }
833 if n <= len(p) {
834 ok = true
835 i := 0
836 for mp := first; mp != nil; mp = mp.alllink {
837 p[i].Stack0 = mp.createstack
838 i++
839 }
840 }
841 return
842 }
843
844
845 func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
846 return goroutineProfileWithLabels(p, labels)
847 }
848
849 const go119ConcurrentGoroutineProfile = true
850
851
852 func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
853 if labels != nil && len(labels) != len(p) {
854 labels = nil
855 }
856
857 if go119ConcurrentGoroutineProfile {
858 return goroutineProfileWithLabelsConcurrent(p, labels)
859 }
860 return goroutineProfileWithLabelsSync(p, labels)
861 }
862
863 var goroutineProfile = struct {
864 sema uint32
865 active bool
866 offset atomic.Int64
867 records []StackRecord
868 labels []unsafe.Pointer
869 }{
870 sema: 1,
871 }
872
873
874
875
876
877
878
879
880
881
882
883
884 type goroutineProfileState uint32
885
886 const (
887 goroutineProfileAbsent goroutineProfileState = iota
888 goroutineProfileInProgress
889 goroutineProfileSatisfied
890 )
891
892 type goroutineProfileStateHolder atomic.Uint32
893
894 func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
895 return goroutineProfileState((*atomic.Uint32)(p).Load())
896 }
897
898 func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
899 (*atomic.Uint32)(p).Store(uint32(value))
900 }
901
902 func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
903 return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
904 }
905
906 func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
907 semacquire(&goroutineProfile.sema)
908
909 ourg := getg()
910
911 stopTheWorld("profile")
912
913
914
915
916
917
918
919 n = int(gcount())
920 if fingRunning {
921 n++
922 }
923
924 if n > len(p) {
925
926
927
928 startTheWorld()
929 semrelease(&goroutineProfile.sema)
930 return n, false
931 }
932
933
934 sp := getcallersp()
935 pc := getcallerpc()
936 systemstack(func() {
937 saveg(pc, sp, ourg, &p[0])
938 })
939 ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
940 goroutineProfile.offset.Store(1)
941
942
943
944
945
946
947 goroutineProfile.active = true
948 goroutineProfile.records = p
949 goroutineProfile.labels = labels
950
951
952
953 if fing != nil {
954 fing.goroutineProfiled.Store(goroutineProfileSatisfied)
955 if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
956 doRecordGoroutineProfile(fing)
957 }
958 }
959 startTheWorld()
960
961
962
963
964
965
966
967
968
969
970
971
972 forEachGRace(func(gp1 *g) {
973 tryRecordGoroutineProfile(gp1, Gosched)
974 })
975
976 stopTheWorld("profile cleanup")
977 endOffset := goroutineProfile.offset.Swap(0)
978 goroutineProfile.active = false
979 goroutineProfile.records = nil
980 goroutineProfile.labels = nil
981 startTheWorld()
982
983
984
985 forEachGRace(func(gp1 *g) {
986 gp1.goroutineProfiled.Store(goroutineProfileAbsent)
987 })
988
989 if raceenabled {
990 raceacquire(unsafe.Pointer(&labelSync))
991 }
992
993 if n != int(endOffset) {
994
995
996
997
998
999
1000
1001
1002
1003 }
1004
1005 semrelease(&goroutineProfile.sema)
1006 return n, true
1007 }
1008
1009
1010
1011
1012
1013 func tryRecordGoroutineProfileWB(gp1 *g) {
1014 if getg().m.p.ptr() == nil {
1015 throw("no P available, write barriers are forbidden")
1016 }
1017 tryRecordGoroutineProfile(gp1, osyield)
1018 }
1019
1020
1021
1022
1023 func tryRecordGoroutineProfile(gp1 *g, yield func()) {
1024 if readgstatus(gp1) == _Gdead {
1025
1026
1027
1028
1029 return
1030 }
1031 if isSystemGoroutine(gp1, true) {
1032
1033
1034 return
1035 }
1036
1037 for {
1038 prev := gp1.goroutineProfiled.Load()
1039 if prev == goroutineProfileSatisfied {
1040
1041
1042 break
1043 }
1044 if prev == goroutineProfileInProgress {
1045
1046
1047 yield()
1048 continue
1049 }
1050
1051
1052
1053
1054
1055
1056 mp := acquirem()
1057 if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
1058 doRecordGoroutineProfile(gp1)
1059 gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
1060 }
1061 releasem(mp)
1062 }
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072 func doRecordGoroutineProfile(gp1 *g) {
1073 if readgstatus(gp1) == _Grunning {
1074 print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
1075 throw("cannot read stack of running goroutine")
1076 }
1077
1078 offset := int(goroutineProfile.offset.Add(1)) - 1
1079
1080 if offset >= len(goroutineProfile.records) {
1081
1082
1083
1084 return
1085 }
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
1096
1097 if goroutineProfile.labels != nil {
1098 goroutineProfile.labels[offset] = gp1.labels
1099 }
1100 }
1101
1102 func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1103 gp := getg()
1104
1105 isOK := func(gp1 *g) bool {
1106
1107
1108 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
1109 }
1110
1111 stopTheWorld("profile")
1112
1113
1114 n = 1
1115 forEachGRace(func(gp1 *g) {
1116 if isOK(gp1) {
1117 n++
1118 }
1119 })
1120
1121 if n <= len(p) {
1122 ok = true
1123 r, lbl := p, labels
1124
1125
1126 sp := getcallersp()
1127 pc := getcallerpc()
1128 systemstack(func() {
1129 saveg(pc, sp, gp, &r[0])
1130 })
1131 r = r[1:]
1132
1133
1134 if labels != nil {
1135 lbl[0] = gp.labels
1136 lbl = lbl[1:]
1137 }
1138
1139
1140 forEachGRace(func(gp1 *g) {
1141 if !isOK(gp1) {
1142 return
1143 }
1144
1145 if len(r) == 0 {
1146
1147
1148 return
1149 }
1150
1151
1152
1153
1154 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
1155 if labels != nil {
1156 lbl[0] = gp1.labels
1157 lbl = lbl[1:]
1158 }
1159 r = r[1:]
1160 })
1161 }
1162
1163 if raceenabled {
1164 raceacquire(unsafe.Pointer(&labelSync))
1165 }
1166
1167 startTheWorld()
1168 return n, ok
1169 }
1170
1171
1172
1173
1174
1175
1176
1177 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
1178
1179 return goroutineProfileWithLabels(p, nil)
1180 }
1181
1182 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
1183 n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
1184 if n < len(r.Stack0) {
1185 r.Stack0[n] = 0
1186 }
1187 }
1188
1189
1190
1191
1192
1193 func Stack(buf []byte, all bool) int {
1194 if all {
1195 stopTheWorld("stack trace")
1196 }
1197
1198 n := 0
1199 if len(buf) > 0 {
1200 gp := getg()
1201 sp := getcallersp()
1202 pc := getcallerpc()
1203 systemstack(func() {
1204 g0 := getg()
1205
1206
1207
1208 g0.m.traceback = 1
1209 g0.writebuf = buf[0:0:len(buf)]
1210 goroutineheader(gp)
1211 traceback(pc, sp, 0, gp)
1212 if all {
1213 tracebackothers(gp)
1214 }
1215 g0.m.traceback = 0
1216 n = len(g0.writebuf)
1217 g0.writebuf = nil
1218 })
1219 }
1220
1221 if all {
1222 startTheWorld()
1223 }
1224 return n
1225 }
1226
1227
1228
1229 var tracelock mutex
1230
1231 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
1232 lock(&tracelock)
1233 gp := getg()
1234 gp.m.traceback = 2
1235 if typ == nil {
1236 print("tracealloc(", p, ", ", hex(size), ")\n")
1237 } else {
1238 print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
1239 }
1240 if gp.m.curg == nil || gp == gp.m.curg {
1241 goroutineheader(gp)
1242 pc := getcallerpc()
1243 sp := getcallersp()
1244 systemstack(func() {
1245 traceback(pc, sp, 0, gp)
1246 })
1247 } else {
1248 goroutineheader(gp.m.curg)
1249 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
1250 }
1251 print("\n")
1252 gp.m.traceback = 0
1253 unlock(&tracelock)
1254 }
1255
1256 func tracefree(p unsafe.Pointer, size uintptr) {
1257 lock(&tracelock)
1258 gp := getg()
1259 gp.m.traceback = 2
1260 print("tracefree(", p, ", ", hex(size), ")\n")
1261 goroutineheader(gp)
1262 pc := getcallerpc()
1263 sp := getcallersp()
1264 systemstack(func() {
1265 traceback(pc, sp, 0, gp)
1266 })
1267 print("\n")
1268 gp.m.traceback = 0
1269 unlock(&tracelock)
1270 }
1271
1272 func tracegc() {
1273 lock(&tracelock)
1274 gp := getg()
1275 gp.m.traceback = 2
1276 print("tracegc()\n")
1277
1278 tracebackothers(gp)
1279 print("end tracegc\n")
1280 print("\n")
1281 gp.m.traceback = 0
1282 unlock(&tracelock)
1283 }
1284
View as plain text