...
Source file
src/runtime/mgcwork.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/goarch"
9 "runtime/internal/atomic"
10 "unsafe"
11 )
12
13 const (
14 _WorkbufSize = 2048
15
16
17
18
19
20
21
22 workbufAlloc = 32 << 10
23 )
24
25 func init() {
26 if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
27 throw("bad workbufAlloc")
28 }
29 }
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55 type gcWork struct {
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74 wbuf1, wbuf2 *workbuf
75
76
77
78 bytesMarked uint64
79
80
81
82
83 heapScanWork int64
84
85
86
87
88
89 flushedWork bool
90 }
91
92
93
94
95
96
97
98
99 func (w *gcWork) init() {
100 w.wbuf1 = getempty()
101 wbuf2 := trygetfull()
102 if wbuf2 == nil {
103 wbuf2 = getempty()
104 }
105 w.wbuf2 = wbuf2
106 }
107
108
109
110
111
112 func (w *gcWork) put(obj uintptr) {
113 flushed := false
114 wbuf := w.wbuf1
115
116
117 lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
118 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
119 if wbuf == nil {
120 w.init()
121 wbuf = w.wbuf1
122
123 } else if wbuf.nobj == len(wbuf.obj) {
124 w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
125 wbuf = w.wbuf1
126 if wbuf.nobj == len(wbuf.obj) {
127 putfull(wbuf)
128 w.flushedWork = true
129 wbuf = getempty()
130 w.wbuf1 = wbuf
131 flushed = true
132 }
133 }
134
135 wbuf.obj[wbuf.nobj] = obj
136 wbuf.nobj++
137
138
139
140
141
142 if flushed && gcphase == _GCmark {
143 gcController.enlistWorker()
144 }
145 }
146
147
148
149
150
151 func (w *gcWork) putFast(obj uintptr) bool {
152 wbuf := w.wbuf1
153 if wbuf == nil || wbuf.nobj == len(wbuf.obj) {
154 return false
155 }
156
157 wbuf.obj[wbuf.nobj] = obj
158 wbuf.nobj++
159 return true
160 }
161
162
163
164
165
166 func (w *gcWork) putBatch(obj []uintptr) {
167 if len(obj) == 0 {
168 return
169 }
170
171 flushed := false
172 wbuf := w.wbuf1
173 if wbuf == nil {
174 w.init()
175 wbuf = w.wbuf1
176 }
177
178 for len(obj) > 0 {
179 for wbuf.nobj == len(wbuf.obj) {
180 putfull(wbuf)
181 w.flushedWork = true
182 w.wbuf1, w.wbuf2 = w.wbuf2, getempty()
183 wbuf = w.wbuf1
184 flushed = true
185 }
186 n := copy(wbuf.obj[wbuf.nobj:], obj)
187 wbuf.nobj += n
188 obj = obj[n:]
189 }
190
191 if flushed && gcphase == _GCmark {
192 gcController.enlistWorker()
193 }
194 }
195
196
197
198
199
200
201
202
203 func (w *gcWork) tryGet() uintptr {
204 wbuf := w.wbuf1
205 if wbuf == nil {
206 w.init()
207 wbuf = w.wbuf1
208
209 }
210 if wbuf.nobj == 0 {
211 w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
212 wbuf = w.wbuf1
213 if wbuf.nobj == 0 {
214 owbuf := wbuf
215 wbuf = trygetfull()
216 if wbuf == nil {
217 return 0
218 }
219 putempty(owbuf)
220 w.wbuf1 = wbuf
221 }
222 }
223
224 wbuf.nobj--
225 return wbuf.obj[wbuf.nobj]
226 }
227
228
229
230
231
232
233 func (w *gcWork) tryGetFast() uintptr {
234 wbuf := w.wbuf1
235 if wbuf == nil || wbuf.nobj == 0 {
236 return 0
237 }
238
239 wbuf.nobj--
240 return wbuf.obj[wbuf.nobj]
241 }
242
243
244
245
246
247
248
249
250 func (w *gcWork) dispose() {
251 if wbuf := w.wbuf1; wbuf != nil {
252 if wbuf.nobj == 0 {
253 putempty(wbuf)
254 } else {
255 putfull(wbuf)
256 w.flushedWork = true
257 }
258 w.wbuf1 = nil
259
260 wbuf = w.wbuf2
261 if wbuf.nobj == 0 {
262 putempty(wbuf)
263 } else {
264 putfull(wbuf)
265 w.flushedWork = true
266 }
267 w.wbuf2 = nil
268 }
269 if w.bytesMarked != 0 {
270
271
272
273
274 atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
275 w.bytesMarked = 0
276 }
277 if w.heapScanWork != 0 {
278 gcController.heapScanWork.Add(w.heapScanWork)
279 w.heapScanWork = 0
280 }
281 }
282
283
284
285
286
287 func (w *gcWork) balance() {
288 if w.wbuf1 == nil {
289 return
290 }
291 if wbuf := w.wbuf2; wbuf.nobj != 0 {
292 putfull(wbuf)
293 w.flushedWork = true
294 w.wbuf2 = getempty()
295 } else if wbuf := w.wbuf1; wbuf.nobj > 4 {
296 w.wbuf1 = handoff(wbuf)
297 w.flushedWork = true
298 } else {
299 return
300 }
301
302 if gcphase == _GCmark {
303 gcController.enlistWorker()
304 }
305 }
306
307
308
309
310 func (w *gcWork) empty() bool {
311 return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
312 }
313
314
315
316
317
318 type workbufhdr struct {
319 node lfnode
320 nobj int
321 }
322
323
324 type workbuf struct {
325 workbufhdr
326
327 obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
328 }
329
330
331
332
333
334
335 func (b *workbuf) checknonempty() {
336 if b.nobj == 0 {
337 throw("workbuf is empty")
338 }
339 }
340
341 func (b *workbuf) checkempty() {
342 if b.nobj != 0 {
343 throw("workbuf is not empty")
344 }
345 }
346
347
348
349
350
351 func getempty() *workbuf {
352 var b *workbuf
353 if work.empty != 0 {
354 b = (*workbuf)(work.empty.pop())
355 if b != nil {
356 b.checkempty()
357 }
358 }
359
360
361 lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
362 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
363 if b == nil {
364
365 var s *mspan
366 if work.wbufSpans.free.first != nil {
367 lock(&work.wbufSpans.lock)
368 s = work.wbufSpans.free.first
369 if s != nil {
370 work.wbufSpans.free.remove(s)
371 work.wbufSpans.busy.insert(s)
372 }
373 unlock(&work.wbufSpans.lock)
374 }
375 if s == nil {
376 systemstack(func() {
377 s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
378 })
379 if s == nil {
380 throw("out of memory")
381 }
382
383 lock(&work.wbufSpans.lock)
384 work.wbufSpans.busy.insert(s)
385 unlock(&work.wbufSpans.lock)
386 }
387
388
389 for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
390 newb := (*workbuf)(unsafe.Pointer(s.base() + i))
391 newb.nobj = 0
392 lfnodeValidate(&newb.node)
393 if i == 0 {
394 b = newb
395 } else {
396 putempty(newb)
397 }
398 }
399 }
400 return b
401 }
402
403
404
405
406
407 func putempty(b *workbuf) {
408 b.checkempty()
409 work.empty.push(&b.node)
410 }
411
412
413
414
415
416
417 func putfull(b *workbuf) {
418 b.checknonempty()
419 work.full.push(&b.node)
420 }
421
422
423
424
425
426 func trygetfull() *workbuf {
427 b := (*workbuf)(work.full.pop())
428 if b != nil {
429 b.checknonempty()
430 return b
431 }
432 return b
433 }
434
435
436 func handoff(b *workbuf) *workbuf {
437
438 b1 := getempty()
439 n := b.nobj / 2
440 b.nobj -= n
441 b1.nobj = n
442 memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
443
444
445 putfull(b)
446 return b1
447 }
448
449
450
451
452 func prepareFreeWorkbufs() {
453 lock(&work.wbufSpans.lock)
454 if work.full != 0 {
455 throw("cannot free workbufs when work.full != 0")
456 }
457
458
459
460 work.empty = 0
461 work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
462 unlock(&work.wbufSpans.lock)
463 }
464
465
466
467 func freeSomeWbufs(preemptible bool) bool {
468 const batchSize = 64
469 lock(&work.wbufSpans.lock)
470 if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
471 unlock(&work.wbufSpans.lock)
472 return false
473 }
474 systemstack(func() {
475 gp := getg().m.curg
476 for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
477 span := work.wbufSpans.free.first
478 if span == nil {
479 break
480 }
481 work.wbufSpans.free.remove(span)
482 mheap_.freeManual(span, spanAllocWorkBuf)
483 }
484 })
485 more := !work.wbufSpans.free.isEmpty()
486 unlock(&work.wbufSpans.lock)
487 return more
488 }
489
View as plain text