1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "internal/goarch" 105 "internal/goos" 106 "runtime/internal/atomic" 107 "runtime/internal/math" 108 "runtime/internal/sys" 109 "unsafe" 110 ) 111 112 const ( 113 maxTinySize = _TinySize 114 tinySizeClass = _TinySizeClass 115 maxSmallSize = _MaxSmallSize 116 117 pageShift = _PageShift 118 pageSize = _PageSize 119 120 concurrentSweep = _ConcurrentSweep 121 122 _PageSize = 1 << _PageShift 123 _PageMask = _PageSize - 1 124 125 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 126 _64bit = 1 << (^uintptr(0) >> 63) / 2 127 128 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 129 _TinySize = 16 130 _TinySizeClass = int8(2) 131 132 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 133 134 // Per-P, per order stack segment cache size. 135 _StackCacheSize = 32 * 1024 136 137 // Number of orders that get caching. Order 0 is FixedStack 138 // and each successive order is twice as large. 139 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 140 // will be allocated directly. 141 // Since FixedStack is different on different systems, we 142 // must vary NumStackOrders to keep the same maximum cached size. 143 // OS | FixedStack | NumStackOrders 144 // -----------------+------------+--------------- 145 // linux/darwin/bsd | 2KB | 4 146 // windows/32 | 4KB | 3 147 // windows/64 | 8KB | 2 148 // plan9 | 4KB | 3 149 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9 150 151 // heapAddrBits is the number of bits in a heap address. On 152 // amd64, addresses are sign-extended beyond heapAddrBits. On 153 // other arches, they are zero-extended. 154 // 155 // On most 64-bit platforms, we limit this to 48 bits based on a 156 // combination of hardware and OS limitations. 157 // 158 // amd64 hardware limits addresses to 48 bits, sign-extended 159 // to 64 bits. Addresses where the top 16 bits are not either 160 // all 0 or all 1 are "non-canonical" and invalid. Because of 161 // these "negative" addresses, we offset addresses by 1<<47 162 // (arenaBaseOffset) on amd64 before computing indexes into 163 // the heap arenas index. In 2017, amd64 hardware added 164 // support for 57 bit addresses; however, currently only Linux 165 // supports this extension and the kernel will never choose an 166 // address above 1<<47 unless mmap is called with a hint 167 // address above 1<<47 (which we never do). 168 // 169 // arm64 hardware (as of ARMv8) limits user addresses to 48 170 // bits, in the range [0, 1<<48). 171 // 172 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 173 // in hardware. On Linux, Go leans on stricter OS limits. Based 174 // on Linux's processor.h, the user address space is limited as 175 // follows on 64-bit architectures: 176 // 177 // Architecture Name Maximum Value (exclusive) 178 // --------------------------------------------------------------------- 179 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 180 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 181 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 182 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 183 // s390x TASK_SIZE 1<<64 (64 bit addresses) 184 // 185 // These limits may increase over time, but are currently at 186 // most 48 bits except on s390x. On all architectures, Linux 187 // starts placing mmap'd regions at addresses that are 188 // significantly below 48 bits, so even if it's possible to 189 // exceed Go's 48 bit limit, it's extremely unlikely in 190 // practice. 191 // 192 // On 32-bit platforms, we accept the full 32-bit address 193 // space because doing so is cheap. 194 // mips32 only has access to the low 2GB of virtual memory, so 195 // we further limit it to 31 bits. 196 // 197 // On ios/arm64, although 64-bit pointers are presumably 198 // available, pointers are truncated to 33 bits in iOS <14. 199 // Furthermore, only the top 4 GiB of the address space are 200 // actually available to the application. In iOS >=14, more 201 // of the address space is available, and the OS can now 202 // provide addresses outside of those 33 bits. Pick 40 bits 203 // as a reasonable balance between address space usage by the 204 // page allocator, and flexibility for what mmap'd regions 205 // we'll accept for the heap. We can't just move to the full 206 // 48 bits because this uses too much address space for older 207 // iOS versions. 208 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 209 // to a 48-bit address space like every other arm64 platform. 210 // 211 // WebAssembly currently has a limit of 4GB linear memory. 212 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64 213 214 // maxAlloc is the maximum size of an allocation. On 64-bit, 215 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 216 // 32-bit, however, this is one less than 1<<32 because the 217 // number of bytes in the address space doesn't actually fit 218 // in a uintptr. 219 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 220 221 // The number of bits in a heap address, the size of heap 222 // arenas, and the L1 and L2 arena map sizes are related by 223 // 224 // (1 << addr bits) = arena size * L1 entries * L2 entries 225 // 226 // Currently, we balance these as follows: 227 // 228 // Platform Addr bits Arena size L1 entries L2 entries 229 // -------------- --------- ---------- ---------- ----------- 230 // */64-bit 48 64MB 1 4M (32MB) 231 // windows/64-bit 48 4MB 64 1M (8MB) 232 // ios/arm64 33 4MB 1 2048 (8KB) 233 // */32-bit 32 4MB 1 1024 (4KB) 234 // */mips(le) 31 4MB 1 512 (2KB) 235 236 // heapArenaBytes is the size of a heap arena. The heap 237 // consists of mappings of size heapArenaBytes, aligned to 238 // heapArenaBytes. The initial heap mapping is one arena. 239 // 240 // This is currently 64MB on 64-bit non-Windows and 4MB on 241 // 32-bit and on Windows. We use smaller arenas on Windows 242 // because all committed memory is charged to the process, 243 // even if it's not touched. Hence, for processes with small 244 // heaps, the mapped arena space needs to be commensurate. 245 // This is particularly important with the race detector, 246 // since it significantly amplifies the cost of committed 247 // memory. 248 heapArenaBytes = 1 << logHeapArenaBytes 249 250 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 251 // prefer using heapArenaBytes where possible (we need the 252 // constant to compute some other constants). 253 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64 254 255 // heapArenaBitmapBytes is the size of each heap arena's bitmap. 256 heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2) 257 258 pagesPerArena = heapArenaBytes / pageSize 259 260 // arenaL1Bits is the number of bits of the arena number 261 // covered by the first level arena map. 262 // 263 // This number should be small, since the first level arena 264 // map requires PtrSize*(1<<arenaL1Bits) of space in the 265 // binary's BSS. It can be zero, in which case the first level 266 // index is effectively unused. There is a performance benefit 267 // to this, since the generated code can be more efficient, 268 // but comes at the cost of having a large L2 mapping. 269 // 270 // We use the L1 map on 64-bit Windows because the arena size 271 // is small, but the address space is still 48 bits, and 272 // there's a high cost to having a large L2. 273 arenaL1Bits = 6 * (_64bit * goos.IsWindows) 274 275 // arenaL2Bits is the number of bits of the arena number 276 // covered by the second level arena index. 277 // 278 // The size of each arena map allocation is proportional to 279 // 1<<arenaL2Bits, so it's important that this not be too 280 // large. 48 bits leads to 32MB arena index allocations, which 281 // is about the practical threshold. 282 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 283 284 // arenaL1Shift is the number of bits to shift an arena frame 285 // number by to compute an index into the first level arena map. 286 arenaL1Shift = arenaL2Bits 287 288 // arenaBits is the total bits in a combined arena map index. 289 // This is split between the index into the L1 arena map and 290 // the L2 arena map. 291 arenaBits = arenaL1Bits + arenaL2Bits 292 293 // arenaBaseOffset is the pointer value that corresponds to 294 // index 0 in the heap arena map. 295 // 296 // On amd64, the address space is 48 bits, sign extended to 64 297 // bits. This offset lets us handle "negative" addresses (or 298 // high addresses if viewed as unsigned). 299 // 300 // On aix/ppc64, this offset allows to keep the heapAddrBits to 301 // 48. Otherwise, it would be 60 in order to handle mmap addresses 302 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 303 // case, the memory reserved in (s *pageAlloc).init for chunks 304 // is causing important slowdowns. 305 // 306 // On other platforms, the user address space is contiguous 307 // and starts at 0, so no offset is necessary. 308 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix 309 // A typed version of this constant that will make it into DWARF (for viewcore). 310 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 311 312 // Max number of threads to run garbage collection. 313 // 2, 3, and 4 are all plausible maximums depending 314 // on the hardware details of the machine. The garbage 315 // collector scales well to 32 cpus. 316 _MaxGcproc = 32 317 318 // minLegalPointer is the smallest possible legal pointer. 319 // This is the smallest possible architectural page size, 320 // since we assume that the first page is never mapped. 321 // 322 // This should agree with minZeroPage in the compiler. 323 minLegalPointer uintptr = 4096 324 ) 325 326 // physPageSize is the size in bytes of the OS's physical pages. 327 // Mapping and unmapping operations must be done at multiples of 328 // physPageSize. 329 // 330 // This must be set by the OS init code (typically in osinit) before 331 // mallocinit. 332 var physPageSize uintptr 333 334 // physHugePageSize is the size in bytes of the OS's default physical huge 335 // page size whose allocation is opaque to the application. It is assumed 336 // and verified to be a power of two. 337 // 338 // If set, this must be set by the OS init code (typically in osinit) before 339 // mallocinit. However, setting it at all is optional, and leaving the default 340 // value is always safe (though potentially less efficient). 341 // 342 // Since physHugePageSize is always assumed to be a power of two, 343 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 344 // The purpose of physHugePageShift is to avoid doing divisions in 345 // performance critical functions. 346 var ( 347 physHugePageSize uintptr 348 physHugePageShift uint 349 ) 350 351 func mallocinit() { 352 if class_to_size[_TinySizeClass] != _TinySize { 353 throw("bad TinySizeClass") 354 } 355 356 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 { 357 // heapBits expects modular arithmetic on bitmap 358 // addresses to work. 359 throw("heapArenaBitmapBytes not a power of 2") 360 } 361 362 // Check physPageSize. 363 if physPageSize == 0 { 364 // The OS init code failed to fetch the physical page size. 365 throw("failed to get system page size") 366 } 367 if physPageSize > maxPhysPageSize { 368 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 369 throw("bad system page size") 370 } 371 if physPageSize < minPhysPageSize { 372 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 373 throw("bad system page size") 374 } 375 if physPageSize&(physPageSize-1) != 0 { 376 print("system page size (", physPageSize, ") must be a power of 2\n") 377 throw("bad system page size") 378 } 379 if physHugePageSize&(physHugePageSize-1) != 0 { 380 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 381 throw("bad system huge page size") 382 } 383 if physHugePageSize > maxPhysHugePageSize { 384 // physHugePageSize is greater than the maximum supported huge page size. 385 // Don't throw here, like in the other cases, since a system configured 386 // in this way isn't wrong, we just don't have the code to support them. 387 // Instead, silently set the huge page size to zero. 388 physHugePageSize = 0 389 } 390 if physHugePageSize != 0 { 391 // Since physHugePageSize is a power of 2, it suffices to increase 392 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 393 for 1<<physHugePageShift != physHugePageSize { 394 physHugePageShift++ 395 } 396 } 397 if pagesPerArena%pagesPerSpanRoot != 0 { 398 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 399 throw("bad pagesPerSpanRoot") 400 } 401 if pagesPerArena%pagesPerReclaimerChunk != 0 { 402 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 403 throw("bad pagesPerReclaimerChunk") 404 } 405 406 // Initialize the heap. 407 mheap_.init() 408 mcache0 = allocmcache() 409 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 410 lockInit(&profInsertLock, lockRankProfInsert) 411 lockInit(&profBlockLock, lockRankProfBlock) 412 lockInit(&profMemActiveLock, lockRankProfMemActive) 413 for i := range profMemFutureLock { 414 lockInit(&profMemFutureLock[i], lockRankProfMemFuture) 415 } 416 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 417 418 // Create initial arena growth hints. 419 if goarch.PtrSize == 8 { 420 // On a 64-bit machine, we pick the following hints 421 // because: 422 // 423 // 1. Starting from the middle of the address space 424 // makes it easier to grow out a contiguous range 425 // without running in to some other mapping. 426 // 427 // 2. This makes Go heap addresses more easily 428 // recognizable when debugging. 429 // 430 // 3. Stack scanning in gccgo is still conservative, 431 // so it's important that addresses be distinguishable 432 // from other data. 433 // 434 // Starting at 0x00c0 means that the valid memory addresses 435 // will begin 0x00c0, 0x00c1, ... 436 // In little-endian, that's c0 00, c1 00, ... None of those are valid 437 // UTF-8 sequences, and they are otherwise as far away from 438 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 439 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 440 // on OS X during thread allocations. 0x00c0 causes conflicts with 441 // AddressSanitizer which reserves all memory up to 0x0100. 442 // These choices reduce the odds of a conservative garbage collector 443 // not collecting memory because some non-pointer block of memory 444 // had a bit pattern that matched a memory address. 445 // 446 // However, on arm64, we ignore all this advice above and slam the 447 // allocation at 0x40 << 32 because when using 4k pages with 3-level 448 // translation buffers, the user address space is limited to 39 bits 449 // On ios/arm64, the address space is even smaller. 450 // 451 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 452 // processes. 453 for i := 0x7f; i >= 0; i-- { 454 var p uintptr 455 switch { 456 case raceenabled: 457 // The TSAN runtime requires the heap 458 // to be in the range [0x00c000000000, 459 // 0x00e000000000). 460 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 461 if p >= uintptrMask&0x00e000000000 { 462 continue 463 } 464 case GOARCH == "arm64" && GOOS == "ios": 465 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 466 case GOARCH == "arm64": 467 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 468 case GOOS == "aix": 469 if i == 0 { 470 // We don't use addresses directly after 0x0A00000000000000 471 // to avoid collisions with others mmaps done by non-go programs. 472 continue 473 } 474 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 475 default: 476 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 477 } 478 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 479 hint.addr = p 480 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 481 } 482 } else { 483 // On a 32-bit machine, we're much more concerned 484 // about keeping the usable heap contiguous. 485 // Hence: 486 // 487 // 1. We reserve space for all heapArenas up front so 488 // they don't get interleaved with the heap. They're 489 // ~258MB, so this isn't too bad. (We could reserve a 490 // smaller amount of space up front if this is a 491 // problem.) 492 // 493 // 2. We hint the heap to start right above the end of 494 // the binary so we have the best chance of keeping it 495 // contiguous. 496 // 497 // 3. We try to stake out a reasonably large initial 498 // heap reservation. 499 500 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 501 meta := uintptr(sysReserve(nil, arenaMetaSize)) 502 if meta != 0 { 503 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 504 } 505 506 // We want to start the arena low, but if we're linked 507 // against C code, it's possible global constructors 508 // have called malloc and adjusted the process' brk. 509 // Query the brk so we can avoid trying to map the 510 // region over it (which will cause the kernel to put 511 // the region somewhere else, likely at a high 512 // address). 513 procBrk := sbrk0() 514 515 // If we ask for the end of the data segment but the 516 // operating system requires a little more space 517 // before we can start allocating, it will give out a 518 // slightly higher pointer. Except QEMU, which is 519 // buggy, as usual: it won't adjust the pointer 520 // upward. So adjust it upward a little bit ourselves: 521 // 1/4 MB to get away from the running binary image. 522 p := firstmoduledata.end 523 if p < procBrk { 524 p = procBrk 525 } 526 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 527 p = mheap_.heapArenaAlloc.end 528 } 529 p = alignUp(p+(256<<10), heapArenaBytes) 530 // Because we're worried about fragmentation on 531 // 32-bit, we try to make a large initial reservation. 532 arenaSizes := []uintptr{ 533 512 << 20, 534 256 << 20, 535 128 << 20, 536 } 537 for _, arenaSize := range arenaSizes { 538 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 539 if a != nil { 540 mheap_.arena.init(uintptr(a), size, false) 541 p = mheap_.arena.end // For hint below 542 break 543 } 544 } 545 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 546 hint.addr = p 547 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 548 } 549 } 550 551 // sysAlloc allocates heap arena space for at least n bytes. The 552 // returned pointer is always heapArenaBytes-aligned and backed by 553 // h.arenas metadata. The returned size is always a multiple of 554 // heapArenaBytes. sysAlloc returns nil on failure. 555 // There is no corresponding free function. 556 // 557 // sysAlloc returns a memory region in the Reserved state. This region must 558 // be transitioned to Prepared and then Ready before use. 559 // 560 // h must be locked. 561 func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { 562 assertLockHeld(&h.lock) 563 564 n = alignUp(n, heapArenaBytes) 565 566 // First, try the arena pre-reservation. 567 // Newly-used mappings are considered released. 568 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased) 569 if v != nil { 570 size = n 571 goto mapped 572 } 573 574 // Try to grow the heap at a hint address. 575 for h.arenaHints != nil { 576 hint := h.arenaHints 577 p := hint.addr 578 if hint.down { 579 p -= n 580 } 581 if p+n < p { 582 // We can't use this, so don't ask. 583 v = nil 584 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 585 // Outside addressable heap. Can't use. 586 v = nil 587 } else { 588 v = sysReserve(unsafe.Pointer(p), n) 589 } 590 if p == uintptr(v) { 591 // Success. Update the hint. 592 if !hint.down { 593 p += n 594 } 595 hint.addr = p 596 size = n 597 break 598 } 599 // Failed. Discard this hint and try the next. 600 // 601 // TODO: This would be cleaner if sysReserve could be 602 // told to only return the requested address. In 603 // particular, this is already how Windows behaves, so 604 // it would simplify things there. 605 if v != nil { 606 sysFreeOS(v, n) 607 } 608 h.arenaHints = hint.next 609 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 610 } 611 612 if size == 0 { 613 if raceenabled { 614 // The race detector assumes the heap lives in 615 // [0x00c000000000, 0x00e000000000), but we 616 // just ran out of hints in this region. Give 617 // a nice failure. 618 throw("too many address space collisions for -race mode") 619 } 620 621 // All of the hints failed, so we'll take any 622 // (sufficiently aligned) address the kernel will give 623 // us. 624 v, size = sysReserveAligned(nil, n, heapArenaBytes) 625 if v == nil { 626 return nil, 0 627 } 628 629 // Create new hints for extending this region. 630 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 631 hint.addr, hint.down = uintptr(v), true 632 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 633 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 634 hint.addr = uintptr(v) + size 635 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 636 } 637 638 // Check for bad pointers or pointers we can't use. 639 { 640 var bad string 641 p := uintptr(v) 642 if p+size < p { 643 bad = "region exceeds uintptr range" 644 } else if arenaIndex(p) >= 1<<arenaBits { 645 bad = "base outside usable address space" 646 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 647 bad = "end outside usable address space" 648 } 649 if bad != "" { 650 // This should be impossible on most architectures, 651 // but it would be really confusing to debug. 652 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 653 throw("memory reservation exceeds address space limit") 654 } 655 } 656 657 if uintptr(v)&(heapArenaBytes-1) != 0 { 658 throw("misrounded allocation in sysAlloc") 659 } 660 661 mapped: 662 // Create arena metadata. 663 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 664 l2 := h.arenas[ri.l1()] 665 if l2 == nil { 666 // Allocate an L2 arena map. 667 // 668 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no 669 // statistic we can comfortably account for this space in. With this structure, 670 // we rely on demand paging to avoid large overheads, but tracking which memory 671 // is paged in is too expensive. Trying to account for the whole region means 672 // that it will appear like an enormous memory overhead in statistics, even though 673 // it is not. 674 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2))) 675 if l2 == nil { 676 throw("out of memory allocating heap arena map") 677 } 678 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 679 } 680 681 if l2[ri.l2()] != nil { 682 throw("arena already initialized") 683 } 684 var r *heapArena 685 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 686 if r == nil { 687 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 688 if r == nil { 689 throw("out of memory allocating heap arena metadata") 690 } 691 } 692 693 // Add the arena to the arenas list. 694 if len(h.allArenas) == cap(h.allArenas) { 695 size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize 696 if size == 0 { 697 size = physPageSize 698 } 699 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) 700 if newArray == nil { 701 throw("out of memory allocating allArenas") 702 } 703 oldSlice := h.allArenas 704 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)} 705 copy(h.allArenas, oldSlice) 706 // Do not free the old backing array because 707 // there may be concurrent readers. Since we 708 // double the array each time, this can lead 709 // to at most 2x waste. 710 } 711 h.allArenas = h.allArenas[:len(h.allArenas)+1] 712 h.allArenas[len(h.allArenas)-1] = ri 713 714 // Store atomically just in case an object from the 715 // new heap arena becomes visible before the heap lock 716 // is released (which shouldn't happen, but there's 717 // little downside to this). 718 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 719 } 720 721 // Tell the race detector about the new heap memory. 722 if raceenabled { 723 racemapshadow(v, size) 724 } 725 726 return 727 } 728 729 // sysReserveAligned is like sysReserve, but the returned pointer is 730 // aligned to align bytes. It may reserve either n or n+align bytes, 731 // so it returns the size that was reserved. 732 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 733 // Since the alignment is rather large in uses of this 734 // function, we're not likely to get it by chance, so we ask 735 // for a larger region and remove the parts we don't need. 736 retries := 0 737 retry: 738 p := uintptr(sysReserve(v, size+align)) 739 switch { 740 case p == 0: 741 return nil, 0 742 case p&(align-1) == 0: 743 // We got lucky and got an aligned region, so we can 744 // use the whole thing. 745 return unsafe.Pointer(p), size + align 746 case GOOS == "windows": 747 // On Windows we can't release pieces of a 748 // reservation, so we release the whole thing and 749 // re-reserve the aligned sub-region. This may race, 750 // so we may have to try again. 751 sysFreeOS(unsafe.Pointer(p), size+align) 752 p = alignUp(p, align) 753 p2 := sysReserve(unsafe.Pointer(p), size) 754 if p != uintptr(p2) { 755 // Must have raced. Try again. 756 sysFreeOS(p2, size) 757 if retries++; retries == 100 { 758 throw("failed to allocate aligned heap memory; too many retries") 759 } 760 goto retry 761 } 762 // Success. 763 return p2, size 764 default: 765 // Trim off the unaligned parts. 766 pAligned := alignUp(p, align) 767 sysFreeOS(unsafe.Pointer(p), pAligned-p) 768 end := pAligned + size 769 endLen := (p + size + align) - end 770 if endLen > 0 { 771 sysFreeOS(unsafe.Pointer(end), endLen) 772 } 773 return unsafe.Pointer(pAligned), size 774 } 775 } 776 777 // base address for all 0-byte allocations 778 var zerobase uintptr 779 780 // nextFreeFast returns the next free object if one is quickly available. 781 // Otherwise it returns 0. 782 func nextFreeFast(s *mspan) gclinkptr { 783 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 784 if theBit < 64 { 785 result := s.freeindex + uintptr(theBit) 786 if result < s.nelems { 787 freeidx := result + 1 788 if freeidx%64 == 0 && freeidx != s.nelems { 789 return 0 790 } 791 s.allocCache >>= uint(theBit + 1) 792 s.freeindex = freeidx 793 s.allocCount++ 794 return gclinkptr(result*s.elemsize + s.base()) 795 } 796 } 797 return 0 798 } 799 800 // nextFree returns the next free object from the cached span if one is available. 801 // Otherwise it refills the cache with a span with an available object and 802 // returns that object along with a flag indicating that this was a heavy 803 // weight allocation. If it is a heavy weight allocation the caller must 804 // determine whether a new GC cycle needs to be started or if the GC is active 805 // whether this goroutine needs to assist the GC. 806 // 807 // Must run in a non-preemptible context since otherwise the owner of 808 // c could change. 809 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 810 s = c.alloc[spc] 811 shouldhelpgc = false 812 freeIndex := s.nextFreeIndex() 813 if freeIndex == s.nelems { 814 // The span is full. 815 if uintptr(s.allocCount) != s.nelems { 816 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 817 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 818 } 819 c.refill(spc) 820 shouldhelpgc = true 821 s = c.alloc[spc] 822 823 freeIndex = s.nextFreeIndex() 824 } 825 826 if freeIndex >= s.nelems { 827 throw("freeIndex is not valid") 828 } 829 830 v = gclinkptr(freeIndex*s.elemsize + s.base()) 831 s.allocCount++ 832 if uintptr(s.allocCount) > s.nelems { 833 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 834 throw("s.allocCount > s.nelems") 835 } 836 return 837 } 838 839 // Allocate an object of size bytes. 840 // Small objects are allocated from the per-P cache's free lists. 841 // Large objects (> 32 kB) are allocated straight from the heap. 842 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 843 if gcphase == _GCmarktermination { 844 throw("mallocgc called with gcphase == _GCmarktermination") 845 } 846 847 if size == 0 { 848 return unsafe.Pointer(&zerobase) 849 } 850 userSize := size 851 if asanenabled { 852 // Refer to ASAN runtime library, the malloc() function allocates extra memory, 853 // the redzone, around the user requested memory region. And the redzones are marked 854 // as unaddressable. We perform the same operations in Go to detect the overflows or 855 // underflows. 856 size += computeRZlog(size) 857 } 858 859 if debug.malloc { 860 if debug.sbrk != 0 { 861 align := uintptr(16) 862 if typ != nil { 863 // TODO(austin): This should be just 864 // align = uintptr(typ.align) 865 // but that's only 4 on 32-bit platforms, 866 // even if there's a uint64 field in typ (see #599). 867 // This causes 64-bit atomic accesses to panic. 868 // Hence, we use stricter alignment that matches 869 // the normal allocator better. 870 if size&7 == 0 { 871 align = 8 872 } else if size&3 == 0 { 873 align = 4 874 } else if size&1 == 0 { 875 align = 2 876 } else { 877 align = 1 878 } 879 } 880 return persistentalloc(size, align, &memstats.other_sys) 881 } 882 883 if inittrace.active && inittrace.id == getg().goid { 884 // Init functions are executed sequentially in a single goroutine. 885 inittrace.allocs += 1 886 } 887 } 888 889 // assistG is the G to charge for this allocation, or nil if 890 // GC is not currently active. 891 var assistG *g 892 if gcBlackenEnabled != 0 { 893 // Charge the current user G for this allocation. 894 assistG = getg() 895 if assistG.m.curg != nil { 896 assistG = assistG.m.curg 897 } 898 // Charge the allocation against the G. We'll account 899 // for internal fragmentation at the end of mallocgc. 900 assistG.gcAssistBytes -= int64(size) 901 902 if assistG.gcAssistBytes < 0 { 903 // This G is in debt. Assist the GC to correct 904 // this before allocating. This must happen 905 // before disabling preemption. 906 gcAssistAlloc(assistG) 907 } 908 } 909 910 // Set mp.mallocing to keep from being preempted by GC. 911 mp := acquirem() 912 if mp.mallocing != 0 { 913 throw("malloc deadlock") 914 } 915 if mp.gsignal == getg() { 916 throw("malloc during signal") 917 } 918 mp.mallocing = 1 919 920 shouldhelpgc := false 921 dataSize := userSize 922 c := getMCache(mp) 923 if c == nil { 924 throw("mallocgc called without a P or outside bootstrapping") 925 } 926 var span *mspan 927 var x unsafe.Pointer 928 noscan := typ == nil || typ.ptrdata == 0 929 // In some cases block zeroing can profitably (for latency reduction purposes) 930 // be delayed till preemption is possible; delayedZeroing tracks that state. 931 delayedZeroing := false 932 if size <= maxSmallSize { 933 if noscan && size < maxTinySize { 934 // Tiny allocator. 935 // 936 // Tiny allocator combines several tiny allocation requests 937 // into a single memory block. The resulting memory block 938 // is freed when all subobjects are unreachable. The subobjects 939 // must be noscan (don't have pointers), this ensures that 940 // the amount of potentially wasted memory is bounded. 941 // 942 // Size of the memory block used for combining (maxTinySize) is tunable. 943 // Current setting is 16 bytes, which relates to 2x worst case memory 944 // wastage (when all but one subobjects are unreachable). 945 // 8 bytes would result in no wastage at all, but provides less 946 // opportunities for combining. 947 // 32 bytes provides more opportunities for combining, 948 // but can lead to 4x worst case wastage. 949 // The best case winning is 8x regardless of block size. 950 // 951 // Objects obtained from tiny allocator must not be freed explicitly. 952 // So when an object will be freed explicitly, we ensure that 953 // its size >= maxTinySize. 954 // 955 // SetFinalizer has a special case for objects potentially coming 956 // from tiny allocator, it such case it allows to set finalizers 957 // for an inner byte of a memory block. 958 // 959 // The main targets of tiny allocator are small strings and 960 // standalone escaping variables. On a json benchmark 961 // the allocator reduces number of allocations by ~12% and 962 // reduces heap size by ~20%. 963 off := c.tinyoffset 964 // Align tiny pointer for required (conservative) alignment. 965 if size&7 == 0 { 966 off = alignUp(off, 8) 967 } else if goarch.PtrSize == 4 && size == 12 { 968 // Conservatively align 12-byte objects to 8 bytes on 32-bit 969 // systems so that objects whose first field is a 64-bit 970 // value is aligned to 8 bytes and does not cause a fault on 971 // atomic access. See issue 37262. 972 // TODO(mknyszek): Remove this workaround if/when issue 36606 973 // is resolved. 974 off = alignUp(off, 8) 975 } else if size&3 == 0 { 976 off = alignUp(off, 4) 977 } else if size&1 == 0 { 978 off = alignUp(off, 2) 979 } 980 if off+size <= maxTinySize && c.tiny != 0 { 981 // The object fits into existing tiny block. 982 x = unsafe.Pointer(c.tiny + off) 983 c.tinyoffset = off + size 984 c.tinyAllocs++ 985 mp.mallocing = 0 986 releasem(mp) 987 return x 988 } 989 // Allocate a new maxTinySize block. 990 span = c.alloc[tinySpanClass] 991 v := nextFreeFast(span) 992 if v == 0 { 993 v, span, shouldhelpgc = c.nextFree(tinySpanClass) 994 } 995 x = unsafe.Pointer(v) 996 (*[2]uint64)(x)[0] = 0 997 (*[2]uint64)(x)[1] = 0 998 // See if we need to replace the existing tiny block with the new one 999 // based on amount of remaining free space. 1000 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1001 // Note: disabled when race detector is on, see comment near end of this function. 1002 c.tiny = uintptr(x) 1003 c.tinyoffset = size 1004 } 1005 size = maxTinySize 1006 } else { 1007 var sizeclass uint8 1008 if size <= smallSizeMax-8 { 1009 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1010 } else { 1011 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1012 } 1013 size = uintptr(class_to_size[sizeclass]) 1014 spc := makeSpanClass(sizeclass, noscan) 1015 span = c.alloc[spc] 1016 v := nextFreeFast(span) 1017 if v == 0 { 1018 v, span, shouldhelpgc = c.nextFree(spc) 1019 } 1020 x = unsafe.Pointer(v) 1021 if needzero && span.needzero != 0 { 1022 memclrNoHeapPointers(unsafe.Pointer(v), size) 1023 } 1024 } 1025 } else { 1026 shouldhelpgc = true 1027 // For large allocations, keep track of zeroed state so that 1028 // bulk zeroing can be happen later in a preemptible context. 1029 span = c.allocLarge(size, noscan) 1030 span.freeindex = 1 1031 span.allocCount = 1 1032 size = span.elemsize 1033 x = unsafe.Pointer(span.base()) 1034 if needzero && span.needzero != 0 { 1035 if noscan { 1036 delayedZeroing = true 1037 } else { 1038 memclrNoHeapPointers(x, size) 1039 // We've in theory cleared almost the whole span here, 1040 // and could take the extra step of actually clearing 1041 // the whole thing. However, don't. Any GC bits for the 1042 // uncleared parts will be zero, and it's just going to 1043 // be needzero = 1 once freed anyway. 1044 } 1045 } 1046 } 1047 1048 var scanSize uintptr 1049 if !noscan { 1050 heapBitsSetType(uintptr(x), size, dataSize, typ) 1051 if dataSize > typ.size { 1052 // Array allocation. If there are any 1053 // pointers, GC has to scan to the last 1054 // element. 1055 if typ.ptrdata != 0 { 1056 scanSize = dataSize - typ.size + typ.ptrdata 1057 } 1058 } else { 1059 scanSize = typ.ptrdata 1060 } 1061 c.scanAlloc += scanSize 1062 } 1063 1064 // Ensure that the stores above that initialize x to 1065 // type-safe memory and set the heap bits occur before 1066 // the caller can make x observable to the garbage 1067 // collector. Otherwise, on weakly ordered machines, 1068 // the garbage collector could follow a pointer to x, 1069 // but see uninitialized memory or stale heap bits. 1070 publicationBarrier() 1071 // As x and the heap bits are initialized, update 1072 // freeIndexForScan now so x is seen by the GC 1073 // (including convervative scan) as an allocated object. 1074 // While this pointer can't escape into user code as a 1075 // _live_ pointer until we return, conservative scanning 1076 // may find a dead pointer that happens to point into this 1077 // object. Delaying this update until now ensures that 1078 // conservative scanning considers this pointer dead until 1079 // this point. 1080 span.freeIndexForScan = span.freeindex 1081 1082 // Allocate black during GC. 1083 // All slots hold nil so no scanning is needed. 1084 // This may be racing with GC so do it atomically if there can be 1085 // a race marking the bit. 1086 if gcphase != _GCoff { 1087 gcmarknewobject(span, uintptr(x), size, scanSize) 1088 } 1089 1090 if raceenabled { 1091 racemalloc(x, size) 1092 } 1093 1094 if msanenabled { 1095 msanmalloc(x, size) 1096 } 1097 1098 if asanenabled { 1099 // We should only read/write the memory with the size asked by the user. 1100 // The rest of the allocated memory should be poisoned, so that we can report 1101 // errors when accessing poisoned memory. 1102 // The allocated memory is larger than required userSize, it will also include 1103 // redzone and some other padding bytes. 1104 rzBeg := unsafe.Add(x, userSize) 1105 asanpoison(rzBeg, size-userSize) 1106 asanunpoison(x, userSize) 1107 } 1108 1109 if rate := MemProfileRate; rate > 0 { 1110 // Note cache c only valid while m acquired; see #47302 1111 if rate != 1 && size < c.nextSample { 1112 c.nextSample -= size 1113 } else { 1114 profilealloc(mp, x, size) 1115 } 1116 } 1117 mp.mallocing = 0 1118 releasem(mp) 1119 1120 // Pointerfree data can be zeroed late in a context where preemption can occur. 1121 // x will keep the memory alive. 1122 if delayedZeroing { 1123 if !noscan { 1124 throw("delayed zeroing on data that may contain pointers") 1125 } 1126 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1127 } 1128 1129 if debug.malloc { 1130 if debug.allocfreetrace != 0 { 1131 tracealloc(x, size, typ) 1132 } 1133 1134 if inittrace.active && inittrace.id == getg().goid { 1135 // Init functions are executed sequentially in a single goroutine. 1136 inittrace.bytes += uint64(size) 1137 } 1138 } 1139 1140 if assistG != nil { 1141 // Account for internal fragmentation in the assist 1142 // debt now that we know it. 1143 assistG.gcAssistBytes -= int64(size - dataSize) 1144 } 1145 1146 if shouldhelpgc { 1147 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1148 gcStart(t) 1149 } 1150 } 1151 1152 if raceenabled && noscan && dataSize < maxTinySize { 1153 // Pad tinysize allocations so they are aligned with the end 1154 // of the tinyalloc region. This ensures that any arithmetic 1155 // that goes off the top end of the object will be detectable 1156 // by checkptr (issue 38872). 1157 // Note that we disable tinyalloc when raceenabled for this to work. 1158 // TODO: This padding is only performed when the race detector 1159 // is enabled. It would be nice to enable it if any package 1160 // was compiled with checkptr, but there's no easy way to 1161 // detect that (especially at compile time). 1162 // TODO: enable this padding for all allocations, not just 1163 // tinyalloc ones. It's tricky because of pointer maps. 1164 // Maybe just all noscan objects? 1165 x = add(x, size-dataSize) 1166 } 1167 1168 return x 1169 } 1170 1171 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 1172 // on chunks of the buffer to be zeroed, with opportunities for preemption 1173 // along the way. memclrNoHeapPointers contains no safepoints and also 1174 // cannot be preemptively scheduled, so this provides a still-efficient 1175 // block copy that can also be preempted on a reasonable granularity. 1176 // 1177 // Use this with care; if the data being cleared is tagged to contain 1178 // pointers, this allows the GC to run before it is all cleared. 1179 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 1180 v := uintptr(x) 1181 // got this from benchmarking. 128k is too small, 512k is too large. 1182 const chunkBytes = 256 * 1024 1183 vsize := v + size 1184 for voff := v; voff < vsize; voff = voff + chunkBytes { 1185 if getg().preempt { 1186 // may hold locks, e.g., profiling 1187 goschedguarded() 1188 } 1189 // clear min(avail, lump) bytes 1190 n := vsize - voff 1191 if n > chunkBytes { 1192 n = chunkBytes 1193 } 1194 memclrNoHeapPointers(unsafe.Pointer(voff), n) 1195 } 1196 } 1197 1198 // implementation of new builtin 1199 // compiler (both frontend and SSA backend) knows the signature 1200 // of this function 1201 func newobject(typ *_type) unsafe.Pointer { 1202 return mallocgc(typ.size, typ, true) 1203 } 1204 1205 //go:linkname reflect_unsafe_New reflect.unsafe_New 1206 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1207 return mallocgc(typ.size, typ, true) 1208 } 1209 1210 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 1211 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1212 return mallocgc(typ.size, typ, true) 1213 } 1214 1215 // newarray allocates an array of n elements of type typ. 1216 func newarray(typ *_type, n int) unsafe.Pointer { 1217 if n == 1 { 1218 return mallocgc(typ.size, typ, true) 1219 } 1220 mem, overflow := math.MulUintptr(typ.size, uintptr(n)) 1221 if overflow || mem > maxAlloc || n < 0 { 1222 panic(plainError("runtime: allocation size out of range")) 1223 } 1224 return mallocgc(mem, typ, true) 1225 } 1226 1227 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1228 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1229 return newarray(typ, n) 1230 } 1231 1232 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1233 c := getMCache(mp) 1234 if c == nil { 1235 throw("profilealloc called without a P or outside bootstrapping") 1236 } 1237 c.nextSample = nextSample() 1238 mProf_Malloc(x, size) 1239 } 1240 1241 // nextSample returns the next sampling point for heap profiling. The goal is 1242 // to sample allocations on average every MemProfileRate bytes, but with a 1243 // completely random distribution over the allocation timeline; this 1244 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1245 // processes, the distance between two samples follows the exponential 1246 // distribution (exp(MemProfileRate)), so the best return value is a random 1247 // number taken from an exponential distribution whose mean is MemProfileRate. 1248 func nextSample() uintptr { 1249 if MemProfileRate == 1 { 1250 // Callers assign our return value to 1251 // mcache.next_sample, but next_sample is not used 1252 // when the rate is 1. So avoid the math below and 1253 // just return something. 1254 return 0 1255 } 1256 if GOOS == "plan9" { 1257 // Plan 9 doesn't support floating point in note handler. 1258 if g := getg(); g == g.m.gsignal { 1259 return nextSampleNoFP() 1260 } 1261 } 1262 1263 return uintptr(fastexprand(MemProfileRate)) 1264 } 1265 1266 // fastexprand returns a random number from an exponential distribution with 1267 // the specified mean. 1268 func fastexprand(mean int) int32 { 1269 // Avoid overflow. Maximum possible step is 1270 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1271 switch { 1272 case mean > 0x7000000: 1273 mean = 0x7000000 1274 case mean == 0: 1275 return 0 1276 } 1277 1278 // Take a random sample of the exponential distribution exp(-mean*x). 1279 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1280 // p = 1 - exp(-mean*x), so 1281 // q = 1 - p == exp(-mean*x) 1282 // log_e(q) = -mean*x 1283 // -log_e(q)/mean = x 1284 // x = -log_e(q) * mean 1285 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1286 const randomBitCount = 26 1287 q := fastrandn(1<<randomBitCount) + 1 1288 qlog := fastlog2(float64(q)) - randomBitCount 1289 if qlog > 0 { 1290 qlog = 0 1291 } 1292 const minusLog2 = -0.6931471805599453 // -ln(2) 1293 return int32(qlog*(minusLog2*float64(mean))) + 1 1294 } 1295 1296 // nextSampleNoFP is similar to nextSample, but uses older, 1297 // simpler code to avoid floating point. 1298 func nextSampleNoFP() uintptr { 1299 // Set first allocation sample size. 1300 rate := MemProfileRate 1301 if rate > 0x3fffffff { // make 2*rate not overflow 1302 rate = 0x3fffffff 1303 } 1304 if rate != 0 { 1305 return uintptr(fastrandn(uint32(2 * rate))) 1306 } 1307 return 0 1308 } 1309 1310 type persistentAlloc struct { 1311 base *notInHeap 1312 off uintptr 1313 } 1314 1315 var globalAlloc struct { 1316 mutex 1317 persistentAlloc 1318 } 1319 1320 // persistentChunkSize is the number of bytes we allocate when we grow 1321 // a persistentAlloc. 1322 const persistentChunkSize = 256 << 10 1323 1324 // persistentChunks is a list of all the persistent chunks we have 1325 // allocated. The list is maintained through the first word in the 1326 // persistent chunk. This is updated atomically. 1327 var persistentChunks *notInHeap 1328 1329 // Wrapper around sysAlloc that can allocate small chunks. 1330 // There is no associated free operation. 1331 // Intended for things like function/type/debug-related persistent data. 1332 // If align is 0, uses default align (currently 8). 1333 // The returned memory will be zeroed. 1334 // sysStat must be non-nil. 1335 // 1336 // Consider marking persistentalloc'd types go:notinheap. 1337 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1338 var p *notInHeap 1339 systemstack(func() { 1340 p = persistentalloc1(size, align, sysStat) 1341 }) 1342 return unsafe.Pointer(p) 1343 } 1344 1345 // Must run on system stack because stack growth can (re)invoke it. 1346 // See issue 9174. 1347 // 1348 //go:systemstack 1349 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 1350 const ( 1351 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1352 ) 1353 1354 if size == 0 { 1355 throw("persistentalloc: size == 0") 1356 } 1357 if align != 0 { 1358 if align&(align-1) != 0 { 1359 throw("persistentalloc: align is not a power of 2") 1360 } 1361 if align > _PageSize { 1362 throw("persistentalloc: align is too large") 1363 } 1364 } else { 1365 align = 8 1366 } 1367 1368 if size >= maxBlock { 1369 return (*notInHeap)(sysAlloc(size, sysStat)) 1370 } 1371 1372 mp := acquirem() 1373 var persistent *persistentAlloc 1374 if mp != nil && mp.p != 0 { 1375 persistent = &mp.p.ptr().palloc 1376 } else { 1377 lock(&globalAlloc.mutex) 1378 persistent = &globalAlloc.persistentAlloc 1379 } 1380 persistent.off = alignUp(persistent.off, align) 1381 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1382 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1383 if persistent.base == nil { 1384 if persistent == &globalAlloc.persistentAlloc { 1385 unlock(&globalAlloc.mutex) 1386 } 1387 throw("runtime: cannot allocate memory") 1388 } 1389 1390 // Add the new chunk to the persistentChunks list. 1391 for { 1392 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1393 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1394 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1395 break 1396 } 1397 } 1398 persistent.off = alignUp(goarch.PtrSize, align) 1399 } 1400 p := persistent.base.add(persistent.off) 1401 persistent.off += size 1402 releasem(mp) 1403 if persistent == &globalAlloc.persistentAlloc { 1404 unlock(&globalAlloc.mutex) 1405 } 1406 1407 if sysStat != &memstats.other_sys { 1408 sysStat.add(int64(size)) 1409 memstats.other_sys.add(-int64(size)) 1410 } 1411 return p 1412 } 1413 1414 // inPersistentAlloc reports whether p points to memory allocated by 1415 // persistentalloc. This must be nosplit because it is called by the 1416 // cgo checker code, which is called by the write barrier code. 1417 // 1418 //go:nosplit 1419 func inPersistentAlloc(p uintptr) bool { 1420 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1421 for chunk != 0 { 1422 if p >= chunk && p < chunk+persistentChunkSize { 1423 return true 1424 } 1425 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1426 } 1427 return false 1428 } 1429 1430 // linearAlloc is a simple linear allocator that pre-reserves a region 1431 // of memory and then optionally maps that region into the Ready state 1432 // as needed. 1433 // 1434 // The caller is responsible for locking. 1435 type linearAlloc struct { 1436 next uintptr // next free byte 1437 mapped uintptr // one byte past end of mapped space 1438 end uintptr // end of reserved space 1439 1440 mapMemory bool // transition memory from Reserved to Ready if true 1441 } 1442 1443 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 1444 if base+size < base { 1445 // Chop off the last byte. The runtime isn't prepared 1446 // to deal with situations where the bounds could overflow. 1447 // Leave that memory reserved, though, so we don't map it 1448 // later. 1449 size -= 1 1450 } 1451 l.next, l.mapped = base, base 1452 l.end = base + size 1453 l.mapMemory = mapMemory 1454 } 1455 1456 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1457 p := alignUp(l.next, align) 1458 if p+size > l.end { 1459 return nil 1460 } 1461 l.next = p + size 1462 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 1463 if l.mapMemory { 1464 // Transition from Reserved to Prepared to Ready. 1465 n := pEnd - l.mapped 1466 sysMap(unsafe.Pointer(l.mapped), n, sysStat) 1467 sysUsed(unsafe.Pointer(l.mapped), n, n) 1468 } 1469 l.mapped = pEnd 1470 } 1471 return unsafe.Pointer(p) 1472 } 1473 1474 // notInHeap is off-heap memory allocated by a lower-level allocator 1475 // like sysAlloc or persistentAlloc. 1476 // 1477 // In general, it's better to use real types marked as go:notinheap, 1478 // but this serves as a generic type for situations where that isn't 1479 // possible (like in the allocators). 1480 // 1481 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1482 // 1483 //go:notinheap 1484 type notInHeap struct{} 1485 1486 func (p *notInHeap) add(bytes uintptr) *notInHeap { 1487 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1488 } 1489 1490 // computeRZlog computes the size of the redzone. 1491 // Refer to the implementation of the compiler-rt. 1492 func computeRZlog(userSize uintptr) uintptr { 1493 switch { 1494 case userSize <= (64 - 16): 1495 return 16 << 0 1496 case userSize <= (128 - 32): 1497 return 16 << 1 1498 case userSize <= (512 - 64): 1499 return 16 << 2 1500 case userSize <= (4096 - 128): 1501 return 16 << 3 1502 case userSize <= (1<<14)-256: 1503 return 16 << 4 1504 case userSize <= (1<<15)-512: 1505 return 16 << 5 1506 case userSize <= (1<<16)-1024: 1507 return 16 << 6 1508 default: 1509 return 16 << 7 1510 } 1511 } 1512