Source file
src/runtime/mpagealloc_64bit.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "runtime/internal/atomic"
11 "unsafe"
12 )
13
14 const (
15
16 summaryLevels = 5
17
18
19 pageAlloc32Bit = 0
20 pageAlloc64Bit = 1
21
22
23
24
25
26
27 pallocChunksL1Bits = 13
28 )
29
30
31
32
33
34 var levelBits = [summaryLevels]uint{
35 summaryL0Bits,
36 summaryLevelBits,
37 summaryLevelBits,
38 summaryLevelBits,
39 summaryLevelBits,
40 }
41
42
43
44
45
46
47
48
49 var levelShift = [summaryLevels]uint{
50 heapAddrBits - summaryL0Bits,
51 heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
52 heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
53 heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
54 heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
55 }
56
57
58
59
60
61 var levelLogPages = [summaryLevels]uint{
62 logPallocChunkPages + 4*summaryLevelBits,
63 logPallocChunkPages + 3*summaryLevelBits,
64 logPallocChunkPages + 2*summaryLevelBits,
65 logPallocChunkPages + 1*summaryLevelBits,
66 logPallocChunkPages,
67 }
68
69
70
71
72 func (p *pageAlloc) sysInit() {
73
74
75 for l, shift := range levelShift {
76 entries := 1 << (heapAddrBits - shift)
77
78
79 b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
80 r := sysReserve(nil, b)
81 if r == nil {
82 throw("failed to reserve page summary memory")
83 }
84
85
86 sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
87 p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
88 }
89
90
91 nbytes := uintptr(1<<heapAddrBits) / pallocChunkBytes / 8
92 r := sysReserve(nil, nbytes)
93 sl := notInHeapSlice{(*notInHeap)(r), int(nbytes), int(nbytes)}
94 p.scav.index.chunks = *(*[]atomic.Uint8)(unsafe.Pointer(&sl))
95 }
96
97
98
99
100
101
102
103
104
105
106
107 func (p *pageAlloc) sysGrow(base, limit uintptr) {
108 if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
109 print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
110 throw("sysGrow bounds not aligned to pallocChunkBytes")
111 }
112
113
114
115
116 addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
117 sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
118 return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
119 }
120
121
122
123
124 summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
125 baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
126 limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
127 base := unsafe.Pointer(&p.summary[level][0])
128 return addrRange{
129 offAddr{uintptr(add(base, baseOffset))},
130 offAddr{uintptr(add(base, limitOffset))},
131 }
132 }
133
134
135
136
137 addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
138 sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
139 return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
140 }
141
142
143
144
145
146
147
148
149
150
151 inUseIndex := p.inUse.findSucc(base)
152
153
154 for l := range p.summary {
155
156 needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
157
158
159
160
161
162 if needIdxLimit > len(p.summary[l]) {
163 p.summary[l] = p.summary[l][:needIdxLimit]
164 }
165
166
167 need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
168
169
170
171
172
173 if inUseIndex > 0 {
174 need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1]))
175 }
176 if inUseIndex < len(p.inUse.ranges) {
177 need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex]))
178 }
179
180 if need.size() == 0 {
181 continue
182 }
183
184
185 sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
186 sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
187 p.summaryMappedReady += need.size()
188 }
189
190
191 p.summaryMappedReady += p.scav.index.grow(base, limit, p.sysStat)
192 }
193
194
195
196
197 func (s *scavengeIndex) grow(base, limit uintptr, sysStat *sysMemStat) uintptr {
198 if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
199 print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
200 throw("sysGrow bounds not aligned to pallocChunkBytes")
201 }
202
203
204
205
206
207
208
209
210
211
212
213 haveMin := s.min.Load()
214 haveMax := s.max.Load()
215 needMin := int32(alignDown(uintptr(chunkIndex(base)/8), physPageSize))
216 needMax := int32(alignUp(uintptr((chunkIndex(limit)+7)/8), physPageSize))
217
218 if needMax < haveMin {
219 needMax = haveMin
220 }
221 if needMin > haveMax {
222 needMin = haveMax
223 }
224 have := makeAddrRange(
225
226 uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(haveMin),
227 uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(haveMax),
228 )
229 need := makeAddrRange(
230
231 uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(needMin),
232 uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(needMax),
233 )
234
235
236 need = need.subtract(have)
237
238
239 if need.size() != 0 {
240 sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat)
241 sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
242
243 if haveMin == 0 || needMin < haveMin {
244 s.min.Store(needMin)
245 }
246 if haveMax == 0 || needMax > haveMax {
247 s.max.Store(needMax)
248 }
249 }
250
251
252 minHeapIdx := s.minHeapIdx.Load()
253 if baseIdx := int32(chunkIndex(base) / 8); minHeapIdx == 0 || baseIdx < minHeapIdx {
254 s.minHeapIdx.Store(baseIdx)
255 }
256 return need.size()
257 }
258
View as plain text