Source file
src/runtime/mgcsweep.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 package runtime
26
27 import (
28 "runtime/internal/atomic"
29 "unsafe"
30 )
31
32 var sweep sweepdata
33
34
35 type sweepdata struct {
36 lock mutex
37 g *g
38 parked bool
39 started bool
40
41 nbgsweep uint32
42 npausesweep uint32
43
44
45
46 active activeSweep
47
48
49
50
51
52
53
54
55 centralIndex sweepClass
56 }
57
58
59
60 type sweepClass uint32
61
62 const (
63 numSweepClasses = numSpanClasses * 2
64 sweepClassDone sweepClass = sweepClass(^uint32(0))
65 )
66
67 func (s *sweepClass) load() sweepClass {
68 return sweepClass(atomic.Load((*uint32)(s)))
69 }
70
71 func (s *sweepClass) update(sNew sweepClass) {
72
73
74 sOld := s.load()
75 for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
76 sOld = s.load()
77 }
78
79
80
81
82
83 }
84
85 func (s *sweepClass) clear() {
86 atomic.Store((*uint32)(s), 0)
87 }
88
89
90
91
92
93 func (s sweepClass) split() (spc spanClass, full bool) {
94 return spanClass(s >> 1), s&1 == 0
95 }
96
97
98
99
100 func (h *mheap) nextSpanForSweep() *mspan {
101 sg := h.sweepgen
102 for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
103 spc, full := sc.split()
104 c := &h.central[spc].mcentral
105 var s *mspan
106 if full {
107 s = c.fullUnswept(sg).pop()
108 } else {
109 s = c.partialUnswept(sg).pop()
110 }
111 if s != nil {
112
113
114 sweep.centralIndex.update(sc)
115 return s
116 }
117 }
118
119 sweep.centralIndex.update(sweepClassDone)
120 return nil
121 }
122
123 const sweepDrainedMask = 1 << 31
124
125
126
127
128
129
130 type activeSweep struct {
131
132
133
134
135
136
137
138
139 state atomic.Uint32
140 }
141
142
143
144
145
146
147
148
149
150
151
152 func (a *activeSweep) begin() sweepLocker {
153 for {
154 state := a.state.Load()
155 if state&sweepDrainedMask != 0 {
156 return sweepLocker{mheap_.sweepgen, false}
157 }
158 if a.state.CompareAndSwap(state, state+1) {
159 return sweepLocker{mheap_.sweepgen, true}
160 }
161 }
162 }
163
164
165
166 func (a *activeSweep) end(sl sweepLocker) {
167 if sl.sweepGen != mheap_.sweepgen {
168 throw("sweeper left outstanding across sweep generations")
169 }
170 for {
171 state := a.state.Load()
172 if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
173 throw("mismatched begin/end of activeSweep")
174 }
175 if a.state.CompareAndSwap(state, state-1) {
176 if state != sweepDrainedMask {
177 return
178 }
179 if debug.gcpacertrace > 0 {
180 print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
181 }
182 return
183 }
184 }
185 }
186
187
188
189
190
191
192
193 func (a *activeSweep) markDrained() bool {
194 for {
195 state := a.state.Load()
196 if state&sweepDrainedMask != 0 {
197 return false
198 }
199 if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
200 return true
201 }
202 }
203 }
204
205
206 func (a *activeSweep) sweepers() uint32 {
207 return a.state.Load() &^ sweepDrainedMask
208 }
209
210
211
212
213 func (a *activeSweep) isDone() bool {
214 return a.state.Load() == sweepDrainedMask
215 }
216
217
218
219
220 func (a *activeSweep) reset() {
221 assertWorldStopped()
222 a.state.Store(0)
223 }
224
225
226
227
228
229
230
231 func finishsweep_m() {
232 assertWorldStopped()
233
234
235
236
237
238
239 for sweepone() != ^uintptr(0) {
240 sweep.npausesweep++
241 }
242
243
244
245
246
247
248 if sweep.active.sweepers() != 0 {
249 throw("active sweepers found at start of mark phase")
250 }
251
252
253
254
255
256 sg := mheap_.sweepgen
257 for i := range mheap_.central {
258 c := &mheap_.central[i].mcentral
259 c.partialUnswept(sg).reset()
260 c.fullUnswept(sg).reset()
261 }
262
263
264
265
266 scavenger.wake()
267
268 nextMarkBitArenaEpoch()
269 }
270
271 func bgsweep(c chan int) {
272 sweep.g = getg()
273
274 lockInit(&sweep.lock, lockRankSweep)
275 lock(&sweep.lock)
276 sweep.parked = true
277 c <- 1
278 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
279
280 for {
281 for sweepone() != ^uintptr(0) {
282 sweep.nbgsweep++
283 Gosched()
284 }
285 for freeSomeWbufs(true) {
286 Gosched()
287 }
288 lock(&sweep.lock)
289 if !isSweepDone() {
290
291
292
293 unlock(&sweep.lock)
294 continue
295 }
296 sweep.parked = true
297 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
298 }
299 }
300
301
302 type sweepLocker struct {
303
304 sweepGen uint32
305 valid bool
306 }
307
308
309 type sweepLocked struct {
310 *mspan
311 }
312
313
314
315 func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
316 if !l.valid {
317 throw("use of invalid sweepLocker")
318 }
319
320 if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
321 return sweepLocked{}, false
322 }
323
324 if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
325 return sweepLocked{}, false
326 }
327 return sweepLocked{s}, true
328 }
329
330
331
332 func sweepone() uintptr {
333 gp := getg()
334
335
336
337 gp.m.locks++
338
339
340
341 sl := sweep.active.begin()
342 if !sl.valid {
343 gp.m.locks--
344 return ^uintptr(0)
345 }
346
347
348 npages := ^uintptr(0)
349 var noMoreWork bool
350 for {
351 s := mheap_.nextSpanForSweep()
352 if s == nil {
353 noMoreWork = sweep.active.markDrained()
354 break
355 }
356 if state := s.state.get(); state != mSpanInUse {
357
358
359
360 if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
361 print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
362 throw("non in-use span in unswept list")
363 }
364 continue
365 }
366 if s, ok := sl.tryAcquire(s); ok {
367
368 npages = s.npages
369 if s.sweep(false) {
370
371
372
373 mheap_.reclaimCredit.Add(npages)
374 } else {
375
376
377
378 npages = 0
379 }
380 break
381 }
382 }
383 sweep.active.end(sl)
384
385 if noMoreWork {
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401 if debug.scavtrace > 0 {
402 systemstack(func() {
403 lock(&mheap_.lock)
404 released := atomic.Loaduintptr(&mheap_.pages.scav.released)
405 printScavTrace(released, false)
406 atomic.Storeuintptr(&mheap_.pages.scav.released, 0)
407 unlock(&mheap_.lock)
408 })
409 }
410 scavenger.ready()
411 }
412
413 gp.m.locks--
414 return npages
415 }
416
417
418
419
420
421
422
423 func isSweepDone() bool {
424 return sweep.active.isDone()
425 }
426
427
428
429
430 func (s *mspan) ensureSwept() {
431
432
433
434 _g_ := getg()
435 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
436 throw("mspan.ensureSwept: m is not locked")
437 }
438
439
440
441
442 sl := sweep.active.begin()
443 if sl.valid {
444
445 if s, ok := sl.tryAcquire(s); ok {
446 s.sweep(false)
447 sweep.active.end(sl)
448 return
449 }
450 sweep.active.end(sl)
451 }
452
453
454
455
456 for {
457 spangen := atomic.Load(&s.sweepgen)
458 if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
459 break
460 }
461 osyield()
462 }
463 }
464
465
466
467
468
469
470 func (sl *sweepLocked) sweep(preserve bool) bool {
471
472
473 _g_ := getg()
474 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
475 throw("mspan.sweep: m is not locked")
476 }
477
478 s := sl.mspan
479 if !preserve {
480
481
482 sl.mspan = nil
483 }
484
485 sweepgen := mheap_.sweepgen
486 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
487 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
488 throw("mspan.sweep: bad span state")
489 }
490
491 if trace.enabled {
492 traceGCSweepSpan(s.npages * _PageSize)
493 }
494
495 mheap_.pagesSwept.Add(int64(s.npages))
496
497 spc := s.spanclass
498 size := s.elemsize
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516 hadSpecials := s.specials != nil
517 siter := newSpecialsIter(s)
518 for siter.valid() {
519
520 objIndex := uintptr(siter.s.offset) / size
521 p := s.base() + objIndex*size
522 mbits := s.markBitsForIndex(objIndex)
523 if !mbits.isMarked() {
524
525
526 hasFin := false
527 endOffset := p - s.base() + size
528 for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
529 if tmp.kind == _KindSpecialFinalizer {
530
531 mbits.setMarkedNonAtomic()
532 hasFin = true
533 break
534 }
535 }
536
537 for siter.valid() && uintptr(siter.s.offset) < endOffset {
538
539
540 special := siter.s
541 p := s.base() + uintptr(special.offset)
542 if special.kind == _KindSpecialFinalizer || !hasFin {
543 siter.unlinkAndNext()
544 freeSpecial(special, unsafe.Pointer(p), size)
545 } else {
546
547
548
549 siter.next()
550 }
551 }
552 } else {
553
554 if siter.s.kind == _KindSpecialReachable {
555 special := siter.unlinkAndNext()
556 (*specialReachable)(unsafe.Pointer(special)).reachable = true
557 freeSpecial(special, unsafe.Pointer(p), size)
558 } else {
559
560 siter.next()
561 }
562 }
563 }
564 if hadSpecials && s.specials == nil {
565 spanHasNoSpecials(s)
566 }
567
568 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
569
570
571 mbits := s.markBitsForBase()
572 abits := s.allocBitsForIndex(0)
573 for i := uintptr(0); i < s.nelems; i++ {
574 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
575 x := s.base() + i*s.elemsize
576 if debug.allocfreetrace != 0 {
577 tracefree(unsafe.Pointer(x), size)
578 }
579 if debug.clobberfree != 0 {
580 clobberfree(unsafe.Pointer(x), size)
581 }
582 if raceenabled {
583 racefree(unsafe.Pointer(x), size)
584 }
585 if msanenabled {
586 msanfree(unsafe.Pointer(x), size)
587 }
588 if asanenabled {
589 asanpoison(unsafe.Pointer(x), size)
590 }
591 }
592 mbits.advance()
593 abits.advance()
594 }
595 }
596
597
598 if s.freeindex < s.nelems {
599
600
601
602
603
604 obj := s.freeindex
605 if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
606 s.reportZombies()
607 }
608
609 for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
610 if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
611 s.reportZombies()
612 }
613 }
614 }
615
616
617 nalloc := uint16(s.countAlloc())
618 nfreed := s.allocCount - nalloc
619 if nalloc > s.allocCount {
620
621
622 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
623 throw("sweep increased allocation count")
624 }
625
626 s.allocCount = nalloc
627 s.freeindex = 0
628 s.freeIndexForScan = 0
629 if trace.enabled {
630 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
631 }
632
633
634
635 s.allocBits = s.gcmarkBits
636 s.gcmarkBits = newMarkBits(s.nelems)
637
638
639 s.refillAllocCache(0)
640
641
642
643 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
644 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
645 throw("mspan.sweep: bad span state after sweep")
646 }
647 if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
648 throw("swept cached span")
649 }
650
651
652
653
654
655
656
657
658
659
660
661 atomic.Store(&s.sweepgen, sweepgen)
662
663 if spc.sizeclass() != 0 {
664
665 if nfreed > 0 {
666
667
668
669
670 s.needzero = 1
671 stats := memstats.heapStats.acquire()
672 atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
673 memstats.heapStats.release()
674
675
676 gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))
677 }
678 if !preserve {
679
680
681
682
683
684 if nalloc == 0 {
685
686 mheap_.freeSpan(s)
687 return true
688 }
689
690 if uintptr(nalloc) == s.nelems {
691 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
692 } else {
693 mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
694 }
695 }
696 } else if !preserve {
697
698 if nfreed != 0 {
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715 if debug.efence > 0 {
716 s.limit = 0
717 sysFault(unsafe.Pointer(s.base()), size)
718 } else {
719 mheap_.freeSpan(s)
720 }
721
722
723 stats := memstats.heapStats.acquire()
724 atomic.Xadd64(&stats.largeFreeCount, 1)
725 atomic.Xadd64(&stats.largeFree, int64(size))
726 memstats.heapStats.release()
727
728
729 gcController.totalFree.Add(int64(size))
730
731 return true
732 }
733
734
735 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
736 }
737 return false
738 }
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754 func (s *mspan) reportZombies() {
755 printlock()
756 print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
757 mbits := s.markBitsForBase()
758 abits := s.allocBitsForIndex(0)
759 for i := uintptr(0); i < s.nelems; i++ {
760 addr := s.base() + i*s.elemsize
761 print(hex(addr))
762 alloc := i < s.freeindex || abits.isMarked()
763 if alloc {
764 print(" alloc")
765 } else {
766 print(" free ")
767 }
768 if mbits.isMarked() {
769 print(" marked ")
770 } else {
771 print(" unmarked")
772 }
773 zombie := mbits.isMarked() && !alloc
774 if zombie {
775 print(" zombie")
776 }
777 print("\n")
778 if zombie {
779 length := s.elemsize
780 if length > 1024 {
781 length = 1024
782 }
783 hexdumpWords(addr, addr+length, nil)
784 }
785 mbits.advance()
786 abits.advance()
787 }
788 throw("found pointer to free object")
789 }
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
809 if mheap_.sweepPagesPerByte == 0 {
810
811 return
812 }
813
814 if trace.enabled {
815 traceGCSweepStart()
816 }
817
818 retry:
819 sweptBasis := mheap_.pagesSweptBasis.Load()
820
821
822 newHeapLive := uintptr(atomic.Load64(&gcController.heapLive)-mheap_.sweepHeapLiveBasis) + spanBytes
823 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
824 for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
825 if sweepone() == ^uintptr(0) {
826 mheap_.sweepPagesPerByte = 0
827 break
828 }
829 if mheap_.pagesSweptBasis.Load() != sweptBasis {
830
831 goto retry
832 }
833 }
834
835 if trace.enabled {
836 traceGCSweepDone()
837 }
838 }
839
840
841
842 func clobberfree(x unsafe.Pointer, size uintptr) {
843
844 for i := uintptr(0); i < size; i += 4 {
845 *(*uint32)(add(x, i)) = 0xdeadbeef
846 }
847 }
848
849
850
851
852
853
854 func gcPaceSweeper(trigger uint64) {
855 assertWorldStoppedOrLockHeld(&mheap_.lock)
856
857
858 if isSweepDone() {
859 mheap_.sweepPagesPerByte = 0
860 } else {
861
862
863
864
865
866 heapLiveBasis := atomic.Load64(&gcController.heapLive)
867 heapDistance := int64(trigger) - int64(heapLiveBasis)
868
869
870
871 heapDistance -= 1024 * 1024
872 if heapDistance < _PageSize {
873
874 heapDistance = _PageSize
875 }
876 pagesSwept := mheap_.pagesSwept.Load()
877 pagesInUse := mheap_.pagesInUse.Load()
878 sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
879 if sweepDistancePages <= 0 {
880 mheap_.sweepPagesPerByte = 0
881 } else {
882 mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
883 mheap_.sweepHeapLiveBasis = heapLiveBasis
884
885
886
887 mheap_.pagesSweptBasis.Store(pagesSwept)
888 }
889 }
890 }
891
View as plain text