Source file
src/runtime/trace.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "internal/goarch"
17 "runtime/internal/atomic"
18 "runtime/internal/sys"
19 "unsafe"
20 )
21
22
23 const (
24 traceEvNone = 0
25 traceEvBatch = 1
26 traceEvFrequency = 2
27 traceEvStack = 3
28 traceEvGomaxprocs = 4
29 traceEvProcStart = 5
30 traceEvProcStop = 6
31 traceEvGCStart = 7
32 traceEvGCDone = 8
33 traceEvGCSTWStart = 9
34 traceEvGCSTWDone = 10
35 traceEvGCSweepStart = 11
36 traceEvGCSweepDone = 12
37 traceEvGoCreate = 13
38 traceEvGoStart = 14
39 traceEvGoEnd = 15
40 traceEvGoStop = 16
41 traceEvGoSched = 17
42 traceEvGoPreempt = 18
43 traceEvGoSleep = 19
44 traceEvGoBlock = 20
45 traceEvGoUnblock = 21
46 traceEvGoBlockSend = 22
47 traceEvGoBlockRecv = 23
48 traceEvGoBlockSelect = 24
49 traceEvGoBlockSync = 25
50 traceEvGoBlockCond = 26
51 traceEvGoBlockNet = 27
52 traceEvGoSysCall = 28
53 traceEvGoSysExit = 29
54 traceEvGoSysBlock = 30
55 traceEvGoWaiting = 31
56 traceEvGoInSyscall = 32
57 traceEvHeapAlloc = 33
58 traceEvHeapGoal = 34
59 traceEvTimerGoroutine = 35
60 traceEvFutileWakeup = 36
61 traceEvString = 37
62 traceEvGoStartLocal = 38
63 traceEvGoUnblockLocal = 39
64 traceEvGoSysExitLocal = 40
65 traceEvGoStartLabel = 41
66 traceEvGoBlockGC = 42
67 traceEvGCMarkAssistStart = 43
68 traceEvGCMarkAssistDone = 44
69 traceEvUserTaskCreate = 45
70 traceEvUserTaskEnd = 46
71 traceEvUserRegion = 47
72 traceEvUserLog = 48
73 traceEvCPUSample = 49
74 traceEvCount = 50
75
76
77
78 )
79
80 const (
81
82
83
84
85
86
87
88
89
90 traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
91
92
93
94 traceStackSize = 128
95
96 traceGlobProc = -1
97
98 traceBytesPerNumber = 10
99
100 traceArgCountShift = 6
101
102
103
104
105
106
107 traceFutileWakeup byte = 128
108 )
109
110
111 var trace struct {
112 lock mutex
113 lockOwner *g
114 enabled bool
115 shutdown bool
116 headerWritten bool
117 footerWritten bool
118 shutdownSema uint32
119 seqStart uint64
120 ticksStart int64
121 ticksEnd int64
122 timeStart int64
123 timeEnd int64
124 seqGC uint64
125 reading traceBufPtr
126 empty traceBufPtr
127 fullHead traceBufPtr
128 fullTail traceBufPtr
129 reader guintptr
130 stackTab traceStackTable
131
132
133
134
135
136
137
138
139
140
141 cpuLogRead *profBuf
142
143
144
145 cpuLogBuf traceBufPtr
146
147 signalLock atomic.Uint32
148 cpuLogWrite *profBuf
149
150
151
152
153
154
155
156 stringsLock mutex
157 strings map[string]uint64
158 stringSeq uint64
159
160
161 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
162
163 bufLock mutex
164 buf traceBufPtr
165 }
166
167
168 type traceBufHeader struct {
169 link traceBufPtr
170 lastTicks uint64
171 pos int
172 stk [traceStackSize]uintptr
173 }
174
175
176
177
178 type traceBuf struct {
179 traceBufHeader
180 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
181 }
182
183
184
185
186
187
188
189
190 type traceBufPtr uintptr
191
192 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
193 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
194 func traceBufPtrOf(b *traceBuf) traceBufPtr {
195 return traceBufPtr(unsafe.Pointer(b))
196 }
197
198
199
200
201
202
203 func StartTrace() error {
204
205
206
207
208
209 stopTheWorldGC("start tracing")
210
211
212 lock(&sched.sysmonlock)
213
214
215
216
217
218
219 lock(&trace.bufLock)
220
221 if trace.enabled || trace.shutdown {
222 unlock(&trace.bufLock)
223 unlock(&sched.sysmonlock)
224 startTheWorldGC()
225 return errorString("tracing is already enabled")
226 }
227
228
229
230
231
232
233
234
235 _g_ := getg()
236 _g_.m.startingtrace = true
237
238
239 mp := acquirem()
240 stkBuf := make([]uintptr, traceStackSize)
241 stackID := traceStackID(mp, stkBuf, 2)
242 releasem(mp)
243
244 profBuf := newProfBuf(2, profBufWordCount, profBufTagCount)
245 trace.cpuLogRead = profBuf
246
247
248
249
250
251
252
253
254 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), unsafe.Pointer(profBuf))
255
256
257 forEachGRace(func(gp *g) {
258 status := readgstatus(gp)
259 if status != _Gdead {
260 gp.traceseq = 0
261 gp.tracelastp = getg().m.p
262
263 id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
264 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
265 }
266 if status == _Gwaiting {
267
268 gp.traceseq++
269 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
270 }
271 if status == _Gsyscall {
272 gp.traceseq++
273 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
274 } else {
275 gp.sysblocktraced = false
276 }
277 })
278 traceProcStart()
279 traceGoStart()
280
281
282
283
284 trace.ticksStart = cputicks()
285 trace.timeStart = nanotime()
286 trace.headerWritten = false
287 trace.footerWritten = false
288
289
290
291
292 trace.stringSeq = 0
293 trace.strings = make(map[string]uint64)
294
295 trace.seqGC = 0
296 _g_.m.startingtrace = false
297 trace.enabled = true
298
299
300 _, pid, bufp := traceAcquireBuffer()
301 for i, label := range gcMarkWorkerModeStrings[:] {
302 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
303 }
304 traceReleaseBuffer(pid)
305
306 unlock(&trace.bufLock)
307
308 unlock(&sched.sysmonlock)
309
310 startTheWorldGC()
311 return nil
312 }
313
314
315
316 func StopTrace() {
317
318
319 stopTheWorldGC("stop tracing")
320
321
322 lock(&sched.sysmonlock)
323
324
325 lock(&trace.bufLock)
326
327 if !trace.enabled {
328 unlock(&trace.bufLock)
329 unlock(&sched.sysmonlock)
330 startTheWorldGC()
331 return
332 }
333
334 traceGoSched()
335
336 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
337 trace.cpuLogRead.close()
338 traceReadCPU()
339
340
341
342 for _, p := range allp[:cap(allp)] {
343 buf := p.tracebuf
344 if buf != 0 {
345 traceFullQueue(buf)
346 p.tracebuf = 0
347 }
348 }
349 if trace.buf != 0 {
350 buf := trace.buf
351 trace.buf = 0
352 if buf.ptr().pos != 0 {
353 traceFullQueue(buf)
354 }
355 }
356 if trace.cpuLogBuf != 0 {
357 buf := trace.cpuLogBuf
358 trace.cpuLogBuf = 0
359 if buf.ptr().pos != 0 {
360 traceFullQueue(buf)
361 }
362 }
363
364 for {
365 trace.ticksEnd = cputicks()
366 trace.timeEnd = nanotime()
367
368 if trace.timeEnd != trace.timeStart {
369 break
370 }
371 osyield()
372 }
373
374 trace.enabled = false
375 trace.shutdown = true
376 unlock(&trace.bufLock)
377
378 unlock(&sched.sysmonlock)
379
380 startTheWorldGC()
381
382
383
384 semacquire(&trace.shutdownSema)
385 if raceenabled {
386 raceacquire(unsafe.Pointer(&trace.shutdownSema))
387 }
388
389
390 lock(&trace.lock)
391 for _, p := range allp[:cap(allp)] {
392 if p.tracebuf != 0 {
393 throw("trace: non-empty trace buffer in proc")
394 }
395 }
396 if trace.buf != 0 {
397 throw("trace: non-empty global trace buffer")
398 }
399 if trace.fullHead != 0 || trace.fullTail != 0 {
400 throw("trace: non-empty full trace buffer")
401 }
402 if trace.reading != 0 || trace.reader != 0 {
403 throw("trace: reading after shutdown")
404 }
405 for trace.empty != 0 {
406 buf := trace.empty
407 trace.empty = buf.ptr().link
408 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
409 }
410 trace.strings = nil
411 trace.shutdown = false
412 trace.cpuLogRead = nil
413 unlock(&trace.lock)
414 }
415
416
417
418
419
420
421 func ReadTrace() []byte {
422
423
424
425
426
427
428 lock(&trace.lock)
429 trace.lockOwner = getg()
430
431 if trace.reader != 0 {
432
433
434
435 trace.lockOwner = nil
436 unlock(&trace.lock)
437 println("runtime: ReadTrace called from multiple goroutines simultaneously")
438 return nil
439 }
440
441 if buf := trace.reading; buf != 0 {
442 buf.ptr().link = trace.empty
443 trace.empty = buf
444 trace.reading = 0
445 }
446
447 if !trace.headerWritten {
448 trace.headerWritten = true
449 trace.lockOwner = nil
450 unlock(&trace.lock)
451 return []byte("go 1.19 trace\x00\x00\x00")
452 }
453
454
455 if !trace.footerWritten && !trace.shutdown {
456 traceReadCPU()
457 }
458
459 if trace.fullHead == 0 && !trace.shutdown {
460 trace.reader.set(getg())
461 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
462 lock(&trace.lock)
463 }
464
465 if trace.fullHead != 0 {
466 buf := traceFullDequeue()
467 trace.reading = buf
468 trace.lockOwner = nil
469 unlock(&trace.lock)
470 return buf.ptr().arr[:buf.ptr().pos]
471 }
472
473
474 if !trace.footerWritten {
475 trace.footerWritten = true
476
477 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
478 if freq <= 0 {
479 throw("trace: ReadTrace got invalid frequency")
480 }
481 trace.lockOwner = nil
482 unlock(&trace.lock)
483 var data []byte
484 data = append(data, traceEvFrequency|0<<traceArgCountShift)
485 data = traceAppend(data, uint64(freq))
486
487
488 trace.stackTab.dump()
489 return data
490 }
491
492 if trace.shutdown {
493 trace.lockOwner = nil
494 unlock(&trace.lock)
495 if raceenabled {
496
497
498
499 racerelease(unsafe.Pointer(&trace.shutdownSema))
500 }
501
502 semrelease(&trace.shutdownSema)
503 return nil
504 }
505
506 trace.lockOwner = nil
507 unlock(&trace.lock)
508 println("runtime: spurious wakeup of trace reader")
509 return nil
510 }
511
512
513
514 func traceReader() *g {
515 if !traceReaderAvailable() {
516 return nil
517 }
518 lock(&trace.lock)
519 if !traceReaderAvailable() {
520 unlock(&trace.lock)
521 return nil
522 }
523 gp := trace.reader.ptr()
524 trace.reader.set(nil)
525 unlock(&trace.lock)
526 return gp
527 }
528
529
530
531
532 func traceReaderAvailable() bool {
533 return trace.reader != 0 && (trace.fullHead != 0 || trace.shutdown)
534 }
535
536
537 func traceProcFree(pp *p) {
538 buf := pp.tracebuf
539 pp.tracebuf = 0
540 if buf == 0 {
541 return
542 }
543 lock(&trace.lock)
544 traceFullQueue(buf)
545 unlock(&trace.lock)
546 }
547
548
549 func traceFullQueue(buf traceBufPtr) {
550 buf.ptr().link = 0
551 if trace.fullHead == 0 {
552 trace.fullHead = buf
553 } else {
554 trace.fullTail.ptr().link = buf
555 }
556 trace.fullTail = buf
557 }
558
559
560 func traceFullDequeue() traceBufPtr {
561 buf := trace.fullHead
562 if buf == 0 {
563 return 0
564 }
565 trace.fullHead = buf.ptr().link
566 if trace.fullHead == 0 {
567 trace.fullTail = 0
568 }
569 buf.ptr().link = 0
570 return buf
571 }
572
573
574
575
576
577
578 func traceEvent(ev byte, skip int, args ...uint64) {
579 mp, pid, bufp := traceAcquireBuffer()
580
581
582
583
584
585
586
587
588
589
590
591 if !trace.enabled && !mp.startingtrace {
592 traceReleaseBuffer(pid)
593 return
594 }
595
596 if skip > 0 {
597 if getg() == mp.curg {
598 skip++
599 }
600 }
601 traceEventLocked(0, mp, pid, bufp, ev, 0, skip, args...)
602 traceReleaseBuffer(pid)
603 }
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, stackID uint32, skip int, args ...uint64) {
623 buf := bufp.ptr()
624
625 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
626 if buf == nil || len(buf.arr)-buf.pos < maxSize {
627 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
628 bufp.set(buf)
629 }
630
631
632
633 ticks := uint64(cputicks()) / traceTickDiv
634 tickDiff := ticks - buf.lastTicks
635 if tickDiff == 0 {
636 ticks = buf.lastTicks + 1
637 tickDiff = 1
638 }
639
640 buf.lastTicks = ticks
641 narg := byte(len(args))
642 if stackID != 0 || skip >= 0 {
643 narg++
644 }
645
646
647 if narg > 3 {
648 narg = 3
649 }
650 startPos := buf.pos
651 buf.byte(ev | narg<<traceArgCountShift)
652 var lenp *byte
653 if narg == 3 {
654
655 buf.varint(0)
656 lenp = &buf.arr[buf.pos-1]
657 }
658 buf.varint(tickDiff)
659 for _, a := range args {
660 buf.varint(a)
661 }
662 if stackID != 0 {
663 buf.varint(uint64(stackID))
664 } else if skip == 0 {
665 buf.varint(0)
666 } else if skip > 0 {
667 buf.varint(traceStackID(mp, buf.stk[:], skip))
668 }
669 evSize := buf.pos - startPos
670 if evSize > maxSize {
671 throw("invalid length of trace event")
672 }
673 if lenp != nil {
674
675 *lenp = byte(evSize - 2)
676 }
677 }
678
679
680
681
682 func traceCPUSample(gp *g, pp *p, stk []uintptr) {
683 if !trace.enabled {
684
685
686 return
687 }
688
689
690 now := cputicks()
691
692
693
694
695 var hdr [2]uint64
696 if pp != nil {
697
698
699 hdr[0] = uint64(pp.id)<<1 | 0b1
700 } else {
701 hdr[0] = 0b10
702 }
703 if gp != nil {
704 hdr[1] = uint64(gp.goid)
705 }
706
707
708 for !trace.signalLock.CompareAndSwap(0, 1) {
709
710 osyield()
711 }
712
713 if log := (*profBuf)(atomic.Loadp(unsafe.Pointer(&trace.cpuLogWrite))); log != nil {
714
715
716
717 log.write(nil, now, hdr[:], stk)
718 }
719
720 trace.signalLock.Store(0)
721 }
722
723 func traceReadCPU() {
724 bufp := &trace.cpuLogBuf
725
726 for {
727 data, tags, _ := trace.cpuLogRead.read(profBufNonBlocking)
728 if len(data) == 0 {
729 break
730 }
731 for len(data) > 0 {
732 if len(data) < 4 || data[0] > uint64(len(data)) {
733 break
734 }
735 if data[0] < 4 || tags != nil && len(tags) < 1 {
736 break
737 }
738 if len(tags) < 1 {
739 break
740 }
741 timestamp := data[1]
742 ppid := data[2] >> 1
743 if hasP := (data[2] & 0b1) != 0; !hasP {
744 ppid = ^uint64(0)
745 }
746 goid := data[3]
747 stk := data[4:data[0]]
748 empty := len(stk) == 1 && data[2] == 0 && data[3] == 0
749 data = data[data[0]:]
750
751
752
753
754 tags = tags[1:]
755
756 if empty {
757
758
759
760
761
762
763 continue
764 }
765
766 buf := bufp.ptr()
767 if buf == nil {
768 *bufp = traceFlush(*bufp, 0)
769 buf = bufp.ptr()
770 }
771 for i := range stk {
772 if i >= len(buf.stk) {
773 break
774 }
775 buf.stk[i] = uintptr(stk[i])
776 }
777 stackID := trace.stackTab.put(buf.stk[:len(stk)])
778
779 traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp/traceTickDiv, ppid, goid)
780 }
781 }
782 }
783
784 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
785 _g_ := getg()
786 gp := mp.curg
787 var nstk int
788 if gp == _g_ {
789 nstk = callers(skip+1, buf)
790 } else if gp != nil {
791 gp = mp.curg
792 nstk = gcallers(gp, skip, buf)
793 }
794 if nstk > 0 {
795 nstk--
796 }
797 if nstk > 0 && gp.goid == 1 {
798 nstk--
799 }
800 id := trace.stackTab.put(buf[:nstk])
801 return uint64(id)
802 }
803
804
805 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
806 mp = acquirem()
807 if p := mp.p.ptr(); p != nil {
808 return mp, p.id, &p.tracebuf
809 }
810 lock(&trace.bufLock)
811 return mp, traceGlobProc, &trace.buf
812 }
813
814
815 func traceReleaseBuffer(pid int32) {
816 if pid == traceGlobProc {
817 unlock(&trace.bufLock)
818 }
819 releasem(getg().m)
820 }
821
822
823 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
824 owner := trace.lockOwner
825 dolock := owner == nil || owner != getg().m.curg
826 if dolock {
827 lock(&trace.lock)
828 }
829 if buf != 0 {
830 traceFullQueue(buf)
831 }
832 if trace.empty != 0 {
833 buf = trace.empty
834 trace.empty = buf.ptr().link
835 } else {
836 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
837 if buf == 0 {
838 throw("trace: out of memory")
839 }
840 }
841 bufp := buf.ptr()
842 bufp.link.set(nil)
843 bufp.pos = 0
844
845
846 ticks := uint64(cputicks()) / traceTickDiv
847 if ticks == bufp.lastTicks {
848 ticks = bufp.lastTicks + 1
849 }
850 bufp.lastTicks = ticks
851 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
852 bufp.varint(uint64(pid))
853 bufp.varint(ticks)
854
855 if dolock {
856 unlock(&trace.lock)
857 }
858 return buf
859 }
860
861
862 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
863 if s == "" {
864 return 0, bufp
865 }
866
867 lock(&trace.stringsLock)
868 if raceenabled {
869
870
871 raceacquire(unsafe.Pointer(&trace.stringsLock))
872 }
873
874 if id, ok := trace.strings[s]; ok {
875 if raceenabled {
876 racerelease(unsafe.Pointer(&trace.stringsLock))
877 }
878 unlock(&trace.stringsLock)
879
880 return id, bufp
881 }
882
883 trace.stringSeq++
884 id := trace.stringSeq
885 trace.strings[s] = id
886
887 if raceenabled {
888 racerelease(unsafe.Pointer(&trace.stringsLock))
889 }
890 unlock(&trace.stringsLock)
891
892
893
894
895
896
897 buf := bufp.ptr()
898 size := 1 + 2*traceBytesPerNumber + len(s)
899 if buf == nil || len(buf.arr)-buf.pos < size {
900 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
901 bufp.set(buf)
902 }
903 buf.byte(traceEvString)
904 buf.varint(id)
905
906
907
908 slen := len(s)
909 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
910 slen = room
911 }
912
913 buf.varint(uint64(slen))
914 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
915
916 bufp.set(buf)
917 return id, bufp
918 }
919
920
921 func traceAppend(buf []byte, v uint64) []byte {
922 for ; v >= 0x80; v >>= 7 {
923 buf = append(buf, 0x80|byte(v))
924 }
925 buf = append(buf, byte(v))
926 return buf
927 }
928
929
930 func (buf *traceBuf) varint(v uint64) {
931 pos := buf.pos
932 for ; v >= 0x80; v >>= 7 {
933 buf.arr[pos] = 0x80 | byte(v)
934 pos++
935 }
936 buf.arr[pos] = byte(v)
937 pos++
938 buf.pos = pos
939 }
940
941
942 func (buf *traceBuf) byte(v byte) {
943 buf.arr[buf.pos] = v
944 buf.pos++
945 }
946
947
948
949 type traceStackTable struct {
950 lock mutex
951 seq uint32
952 mem traceAlloc
953 tab [1 << 13]traceStackPtr
954 }
955
956
957 type traceStack struct {
958 link traceStackPtr
959 hash uintptr
960 id uint32
961 n int
962 stk [0]uintptr
963 }
964
965 type traceStackPtr uintptr
966
967 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
968
969
970 func (ts *traceStack) stack() []uintptr {
971 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
972 }
973
974
975
976 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
977 if len(pcs) == 0 {
978 return 0
979 }
980 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
981
982 if id := tab.find(pcs, hash); id != 0 {
983 return id
984 }
985
986 lock(&tab.lock)
987 if id := tab.find(pcs, hash); id != 0 {
988 unlock(&tab.lock)
989 return id
990 }
991
992 tab.seq++
993 stk := tab.newStack(len(pcs))
994 stk.hash = hash
995 stk.id = tab.seq
996 stk.n = len(pcs)
997 stkpc := stk.stack()
998 for i, pc := range pcs {
999 stkpc[i] = pc
1000 }
1001 part := int(hash % uintptr(len(tab.tab)))
1002 stk.link = tab.tab[part]
1003 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
1004 unlock(&tab.lock)
1005 return stk.id
1006 }
1007
1008
1009 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
1010 part := int(hash % uintptr(len(tab.tab)))
1011 Search:
1012 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
1013 if stk.hash == hash && stk.n == len(pcs) {
1014 for i, stkpc := range stk.stack() {
1015 if stkpc != pcs[i] {
1016 continue Search
1017 }
1018 }
1019 return stk.id
1020 }
1021 }
1022 return 0
1023 }
1024
1025
1026 func (tab *traceStackTable) newStack(n int) *traceStack {
1027 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
1028 }
1029
1030
1031 func allFrames(pcs []uintptr) []Frame {
1032 frames := make([]Frame, 0, len(pcs))
1033 ci := CallersFrames(pcs)
1034 for {
1035 f, more := ci.Next()
1036 frames = append(frames, f)
1037 if !more {
1038 return frames
1039 }
1040 }
1041 }
1042
1043
1044
1045 func (tab *traceStackTable) dump() {
1046 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
1047 bufp := traceFlush(0, 0)
1048 for _, stk := range tab.tab {
1049 stk := stk.ptr()
1050 for ; stk != nil; stk = stk.link.ptr() {
1051 tmpbuf := tmp[:0]
1052 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
1053 frames := allFrames(stk.stack())
1054 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
1055 for _, f := range frames {
1056 var frame traceFrame
1057 frame, bufp = traceFrameForPC(bufp, 0, f)
1058 tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
1059 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
1060 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
1061 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
1062 }
1063
1064 size := 1 + traceBytesPerNumber + len(tmpbuf)
1065 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
1066 bufp = traceFlush(bufp, 0)
1067 }
1068 buf := bufp.ptr()
1069 buf.byte(traceEvStack | 3<<traceArgCountShift)
1070 buf.varint(uint64(len(tmpbuf)))
1071 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
1072 }
1073 }
1074
1075 lock(&trace.lock)
1076 traceFullQueue(bufp)
1077 unlock(&trace.lock)
1078
1079 tab.mem.drop()
1080 *tab = traceStackTable{}
1081 lockInit(&((*tab).lock), lockRankTraceStackTab)
1082 }
1083
1084 type traceFrame struct {
1085 funcID uint64
1086 fileID uint64
1087 line uint64
1088 }
1089
1090
1091
1092 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
1093 bufp := &buf
1094 var frame traceFrame
1095
1096 fn := f.Function
1097 const maxLen = 1 << 10
1098 if len(fn) > maxLen {
1099 fn = fn[len(fn)-maxLen:]
1100 }
1101 frame.funcID, bufp = traceString(bufp, pid, fn)
1102 frame.line = uint64(f.Line)
1103 file := f.File
1104 if len(file) > maxLen {
1105 file = file[len(file)-maxLen:]
1106 }
1107 frame.fileID, bufp = traceString(bufp, pid, file)
1108 return frame, (*bufp)
1109 }
1110
1111
1112
1113 type traceAlloc struct {
1114 head traceAllocBlockPtr
1115 off uintptr
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125 type traceAllocBlock struct {
1126 next traceAllocBlockPtr
1127 data [64<<10 - goarch.PtrSize]byte
1128 }
1129
1130
1131 type traceAllocBlockPtr uintptr
1132
1133 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
1134 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
1135
1136
1137 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
1138 n = alignUp(n, goarch.PtrSize)
1139 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
1140 if n > uintptr(len(a.head.ptr().data)) {
1141 throw("trace: alloc too large")
1142 }
1143 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
1144 if block == nil {
1145 throw("trace: out of memory")
1146 }
1147 block.next.set(a.head.ptr())
1148 a.head.set(block)
1149 a.off = 0
1150 }
1151 p := &a.head.ptr().data[a.off]
1152 a.off += n
1153 return unsafe.Pointer(p)
1154 }
1155
1156
1157 func (a *traceAlloc) drop() {
1158 for a.head != 0 {
1159 block := a.head.ptr()
1160 a.head.set(block.next.ptr())
1161 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
1162 }
1163 }
1164
1165
1166
1167 func traceGomaxprocs(procs int32) {
1168 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
1169 }
1170
1171 func traceProcStart() {
1172 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
1173 }
1174
1175 func traceProcStop(pp *p) {
1176
1177
1178 mp := acquirem()
1179 oldp := mp.p
1180 mp.p.set(pp)
1181 traceEvent(traceEvProcStop, -1)
1182 mp.p = oldp
1183 releasem(mp)
1184 }
1185
1186 func traceGCStart() {
1187 traceEvent(traceEvGCStart, 3, trace.seqGC)
1188 trace.seqGC++
1189 }
1190
1191 func traceGCDone() {
1192 traceEvent(traceEvGCDone, -1)
1193 }
1194
1195 func traceGCSTWStart(kind int) {
1196 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
1197 }
1198
1199 func traceGCSTWDone() {
1200 traceEvent(traceEvGCSTWDone, -1)
1201 }
1202
1203
1204
1205
1206
1207
1208 func traceGCSweepStart() {
1209
1210
1211 _p_ := getg().m.p.ptr()
1212 if _p_.traceSweep {
1213 throw("double traceGCSweepStart")
1214 }
1215 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
1216 }
1217
1218
1219
1220
1221
1222 func traceGCSweepSpan(bytesSwept uintptr) {
1223 _p_ := getg().m.p.ptr()
1224 if _p_.traceSweep {
1225 if _p_.traceSwept == 0 {
1226 traceEvent(traceEvGCSweepStart, 1)
1227 }
1228 _p_.traceSwept += bytesSwept
1229 }
1230 }
1231
1232 func traceGCSweepDone() {
1233 _p_ := getg().m.p.ptr()
1234 if !_p_.traceSweep {
1235 throw("missing traceGCSweepStart")
1236 }
1237 if _p_.traceSwept != 0 {
1238 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
1239 }
1240 _p_.traceSweep = false
1241 }
1242
1243 func traceGCMarkAssistStart() {
1244 traceEvent(traceEvGCMarkAssistStart, 1)
1245 }
1246
1247 func traceGCMarkAssistDone() {
1248 traceEvent(traceEvGCMarkAssistDone, -1)
1249 }
1250
1251 func traceGoCreate(newg *g, pc uintptr) {
1252 newg.traceseq = 0
1253 newg.tracelastp = getg().m.p
1254
1255 id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
1256 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1257 }
1258
1259 func traceGoStart() {
1260 _g_ := getg().m.curg
1261 _p_ := _g_.m.p
1262 _g_.traceseq++
1263 if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
1264 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1265 } else if _g_.tracelastp == _p_ {
1266 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1267 } else {
1268 _g_.tracelastp = _p_
1269 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1270 }
1271 }
1272
1273 func traceGoEnd() {
1274 traceEvent(traceEvGoEnd, -1)
1275 }
1276
1277 func traceGoSched() {
1278 _g_ := getg()
1279 _g_.tracelastp = _g_.m.p
1280 traceEvent(traceEvGoSched, 1)
1281 }
1282
1283 func traceGoPreempt() {
1284 _g_ := getg()
1285 _g_.tracelastp = _g_.m.p
1286 traceEvent(traceEvGoPreempt, 1)
1287 }
1288
1289 func traceGoPark(traceEv byte, skip int) {
1290 if traceEv&traceFutileWakeup != 0 {
1291 traceEvent(traceEvFutileWakeup, -1)
1292 }
1293 traceEvent(traceEv & ^traceFutileWakeup, skip)
1294 }
1295
1296 func traceGoUnpark(gp *g, skip int) {
1297 _p_ := getg().m.p
1298 gp.traceseq++
1299 if gp.tracelastp == _p_ {
1300 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1301 } else {
1302 gp.tracelastp = _p_
1303 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1304 }
1305 }
1306
1307 func traceGoSysCall() {
1308 traceEvent(traceEvGoSysCall, 1)
1309 }
1310
1311 func traceGoSysExit(ts int64) {
1312 if ts != 0 && ts < trace.ticksStart {
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 ts = 0
1323 }
1324 _g_ := getg().m.curg
1325 _g_.traceseq++
1326 _g_.tracelastp = _g_.m.p
1327 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1328 }
1329
1330 func traceGoSysBlock(pp *p) {
1331
1332
1333 mp := acquirem()
1334 oldp := mp.p
1335 mp.p.set(pp)
1336 traceEvent(traceEvGoSysBlock, -1)
1337 mp.p = oldp
1338 releasem(mp)
1339 }
1340
1341 func traceHeapAlloc() {
1342 traceEvent(traceEvHeapAlloc, -1, gcController.heapLive)
1343 }
1344
1345 func traceHeapGoal() {
1346 heapGoal := gcController.heapGoal()
1347 if heapGoal == ^uint64(0) {
1348
1349 traceEvent(traceEvHeapGoal, -1, 0)
1350 } else {
1351 traceEvent(traceEvHeapGoal, -1, heapGoal)
1352 }
1353 }
1354
1355
1356
1357
1358
1359 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1360 if !trace.enabled {
1361 return
1362 }
1363
1364
1365 mp, pid, bufp := traceAcquireBuffer()
1366 if !trace.enabled && !mp.startingtrace {
1367 traceReleaseBuffer(pid)
1368 return
1369 }
1370
1371 typeStringID, bufp := traceString(bufp, pid, taskType)
1372 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 0, 3, id, parentID, typeStringID)
1373 traceReleaseBuffer(pid)
1374 }
1375
1376
1377 func trace_userTaskEnd(id uint64) {
1378 traceEvent(traceEvUserTaskEnd, 2, id)
1379 }
1380
1381
1382 func trace_userRegion(id, mode uint64, name string) {
1383 if !trace.enabled {
1384 return
1385 }
1386
1387 mp, pid, bufp := traceAcquireBuffer()
1388 if !trace.enabled && !mp.startingtrace {
1389 traceReleaseBuffer(pid)
1390 return
1391 }
1392
1393 nameStringID, bufp := traceString(bufp, pid, name)
1394 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 0, 3, id, mode, nameStringID)
1395 traceReleaseBuffer(pid)
1396 }
1397
1398
1399 func trace_userLog(id uint64, category, message string) {
1400 if !trace.enabled {
1401 return
1402 }
1403
1404 mp, pid, bufp := traceAcquireBuffer()
1405 if !trace.enabled && !mp.startingtrace {
1406 traceReleaseBuffer(pid)
1407 return
1408 }
1409
1410 categoryID, bufp := traceString(bufp, pid, category)
1411
1412 extraSpace := traceBytesPerNumber + len(message)
1413 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 0, 3, id, categoryID)
1414
1415
1416 buf := bufp.ptr()
1417
1418
1419
1420 slen := len(message)
1421 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1422 slen = room
1423 }
1424 buf.varint(uint64(slen))
1425 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1426
1427 traceReleaseBuffer(pid)
1428 }
1429
1430
1431
1432 func startPCforTrace(pc uintptr) uintptr {
1433 f := findfunc(pc)
1434 if !f.valid() {
1435 return pc
1436 }
1437 w := funcdata(f, _FUNCDATA_WrapInfo)
1438 if w == nil {
1439 return pc
1440 }
1441 return f.datap.textAddr(*(*uint32)(w))
1442 }
1443
View as plain text