1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This was originally based on tcmalloc, but has diverged quite a bit.
8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
10 // The main allocator works in runs of pages.
11 // Small allocation sizes (up to and including 32 kB) are
12 // rounded to one of about 70 size classes, each of which
13 // has its own free set of objects of exactly that size.
14 // Any free page of memory can be split into a set of objects
15 // of one size class, which are then managed using a free bitmap.
17 // The allocator's data structures are:
19 // fixalloc: a free-list allocator for fixed-size off-heap objects,
20 // used to manage storage used by the allocator.
21 // mheap: the malloc heap, managed at page (8192-byte) granularity.
22 // mspan: a run of in-use pages managed by the mheap.
23 // mcentral: collects all spans of a given size class.
24 // mcache: a per-P cache of mspans with free space.
25 // mstats: allocation statistics.
27 // Allocating a small object proceeds up a hierarchy of caches:
29 // 1. Round the size up to one of the small size classes
30 // and look in the corresponding mspan in this P's mcache.
31 // Scan the mspan's free bitmap to find a free slot.
32 // If there is a free slot, allocate it.
33 // This can all be done without acquiring a lock.
35 // 2. If the mspan has no free slots, obtain a new mspan
36 // from the mcentral's list of mspans of the required size
37 // class that have free space.
38 // Obtaining a whole span amortizes the cost of locking
41 // 3. If the mcentral's mspan list is empty, obtain a run
42 // of pages from the mheap to use for the mspan.
44 // 4. If the mheap is empty or has no page runs large enough,
45 // allocate a new group of pages (at least 1MB) from the
46 // operating system. Allocating a large run of pages
47 // amortizes the cost of talking to the operating system.
49 // Sweeping an mspan and freeing objects on it proceeds up a similar
52 // 1. If the mspan is being swept in response to allocation, it
53 // is returned to the mcache to satisfy the allocation.
55 // 2. Otherwise, if the mspan still has allocated objects in it,
56 // it is placed on the mcentral free list for the mspan's size
59 // 3. Otherwise, if all objects in the mspan are free, the mspan's
60 // pages are returned to the mheap and the mspan is now dead.
62 // Allocating and freeing a large object uses the mheap
63 // directly, bypassing the mcache and mcentral.
65 // If mspan.needzero is false, then free object slots in the mspan are
66 // already zeroed. Otherwise if needzero is true, objects are zeroed as
67 // they are allocated. There are various benefits to delaying zeroing
70 // 1. Stack frame allocation can avoid zeroing altogether.
72 // 2. It exhibits better temporal locality, since the program is
73 // probably about to write to the memory.
75 // 3. We don't zero pages that never get reused.
77 // Virtual memory layout
79 // The heap consists of a set of arenas, which are 64MB on 64-bit and
80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
81 // aligned to the arena size.
83 // Each arena has an associated heapArena object that stores the
84 // metadata for that arena: the heap bitmap for all words in the arena
85 // and the span map for all pages in the arena. heapArena objects are
86 // themselves allocated off-heap.
88 // Since arenas are aligned, the address space can be viewed as a
89 // series of arena frames. The arena map (mheap_.arenas) maps from
90 // arena frame number to *heapArena, or nil for parts of the address
91 // space not backed by the Go heap. The arena map is structured as a
92 // two-level array consisting of a "L1" arena map and many "L2" arena
93 // maps; however, since arenas are large, on many architectures, the
94 // arena map consists of a single, large L2 map.
96 // The arena map covers the entire possible address space, allowing
97 // the Go heap to use any part of the address space. The allocator
98 // attempts to keep arenas contiguous so that large spans (and hence
99 // large objects) can cross arenas.
105 "internal/goexperiment"
107 "runtime/internal/atomic"
108 "runtime/internal/math"
109 "runtime/internal/sys"
114 maxTinySize = _TinySize
115 tinySizeClass = _TinySizeClass
116 maxSmallSize = _MaxSmallSize
118 pageShift = _PageShift
121 _PageSize = 1 << _PageShift
122 _PageMask = _PageSize - 1
124 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
125 _64bit = 1 << (^uintptr(0) >> 63) / 2
127 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
129 _TinySizeClass = int8(2)
131 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
133 // Per-P, per order stack segment cache size.
134 _StackCacheSize = 32 * 1024
136 // Number of orders that get caching. Order 0 is FixedStack
137 // and each successive order is twice as large.
138 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
139 // will be allocated directly.
140 // Since FixedStack is different on different systems, we
141 // must vary NumStackOrders to keep the same maximum cached size.
142 // OS | FixedStack | NumStackOrders
143 // -----------------+------------+---------------
144 // linux/darwin/bsd | 2KB | 4
145 // windows/32 | 4KB | 3
146 // windows/64 | 8KB | 2
148 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
150 // heapAddrBits is the number of bits in a heap address. On
151 // amd64, addresses are sign-extended beyond heapAddrBits. On
152 // other arches, they are zero-extended.
154 // On most 64-bit platforms, we limit this to 48 bits based on a
155 // combination of hardware and OS limitations.
157 // amd64 hardware limits addresses to 48 bits, sign-extended
158 // to 64 bits. Addresses where the top 16 bits are not either
159 // all 0 or all 1 are "non-canonical" and invalid. Because of
160 // these "negative" addresses, we offset addresses by 1<<47
161 // (arenaBaseOffset) on amd64 before computing indexes into
162 // the heap arenas index. In 2017, amd64 hardware added
163 // support for 57 bit addresses; however, currently only Linux
164 // supports this extension and the kernel will never choose an
165 // address above 1<<47 unless mmap is called with a hint
166 // address above 1<<47 (which we never do).
168 // arm64 hardware (as of ARMv8) limits user addresses to 48
169 // bits, in the range [0, 1<<48).
171 // ppc64, mips64, and s390x support arbitrary 64 bit addresses
172 // in hardware. On Linux, Go leans on stricter OS limits. Based
173 // on Linux's processor.h, the user address space is limited as
174 // follows on 64-bit architectures:
176 // Architecture Name Maximum Value (exclusive)
177 // ---------------------------------------------------------------------
178 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses)
179 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses)
180 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses)
181 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
182 // s390x TASK_SIZE 1<<64 (64 bit addresses)
184 // These limits may increase over time, but are currently at
185 // most 48 bits except on s390x. On all architectures, Linux
186 // starts placing mmap'd regions at addresses that are
187 // significantly below 48 bits, so even if it's possible to
188 // exceed Go's 48 bit limit, it's extremely unlikely in
191 // On 32-bit platforms, we accept the full 32-bit address
192 // space because doing so is cheap.
193 // mips32 only has access to the low 2GB of virtual memory, so
194 // we further limit it to 31 bits.
196 // On ios/arm64, although 64-bit pointers are presumably
197 // available, pointers are truncated to 33 bits in iOS <14.
198 // Furthermore, only the top 4 GiB of the address space are
199 // actually available to the application. In iOS >=14, more
200 // of the address space is available, and the OS can now
201 // provide addresses outside of those 33 bits. Pick 40 bits
202 // as a reasonable balance between address space usage by the
203 // page allocator, and flexibility for what mmap'd regions
204 // we'll accept for the heap. We can't just move to the full
205 // 48 bits because this uses too much address space for older
207 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
208 // to a 48-bit address space like every other arm64 platform.
210 // WebAssembly currently has a limit of 4GB linear memory.
211 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
213 // maxAlloc is the maximum size of an allocation. On 64-bit,
214 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
215 // 32-bit, however, this is one less than 1<<32 because the
216 // number of bytes in the address space doesn't actually fit
218 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
220 // The number of bits in a heap address, the size of heap
221 // arenas, and the L1 and L2 arena map sizes are related by
223 // (1 << addr bits) = arena size * L1 entries * L2 entries
225 // Currently, we balance these as follows:
227 // Platform Addr bits Arena size L1 entries L2 entries
228 // -------------- --------- ---------- ---------- -----------
229 // */64-bit 48 64MB 1 4M (32MB)
230 // windows/64-bit 48 4MB 64 1M (8MB)
231 // ios/arm64 33 4MB 1 2048 (8KB)
232 // */32-bit 32 4MB 1 1024 (4KB)
233 // */mips(le) 31 4MB 1 512 (2KB)
235 // heapArenaBytes is the size of a heap arena. The heap
236 // consists of mappings of size heapArenaBytes, aligned to
237 // heapArenaBytes. The initial heap mapping is one arena.
239 // This is currently 64MB on 64-bit non-Windows and 4MB on
240 // 32-bit and on Windows. We use smaller arenas on Windows
241 // because all committed memory is charged to the process,
242 // even if it's not touched. Hence, for processes with small
243 // heaps, the mapped arena space needs to be commensurate.
244 // This is particularly important with the race detector,
245 // since it significantly amplifies the cost of committed
247 heapArenaBytes = 1 << logHeapArenaBytes
249 heapArenaWords = heapArenaBytes / goarch.PtrSize
251 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
252 // prefer using heapArenaBytes where possible (we need the
253 // constant to compute some other constants).
254 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
256 // heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
257 heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
259 pagesPerArena = heapArenaBytes / pageSize
261 // arenaL1Bits is the number of bits of the arena number
262 // covered by the first level arena map.
264 // This number should be small, since the first level arena
265 // map requires PtrSize*(1<<arenaL1Bits) of space in the
266 // binary's BSS. It can be zero, in which case the first level
267 // index is effectively unused. There is a performance benefit
268 // to this, since the generated code can be more efficient,
269 // but comes at the cost of having a large L2 mapping.
271 // We use the L1 map on 64-bit Windows because the arena size
272 // is small, but the address space is still 48 bits, and
273 // there's a high cost to having a large L2.
274 arenaL1Bits = 6 * (_64bit * goos.IsWindows)
276 // arenaL2Bits is the number of bits of the arena number
277 // covered by the second level arena index.
279 // The size of each arena map allocation is proportional to
280 // 1<<arenaL2Bits, so it's important that this not be too
281 // large. 48 bits leads to 32MB arena index allocations, which
282 // is about the practical threshold.
283 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
285 // arenaL1Shift is the number of bits to shift an arena frame
286 // number by to compute an index into the first level arena map.
287 arenaL1Shift = arenaL2Bits
289 // arenaBits is the total bits in a combined arena map index.
290 // This is split between the index into the L1 arena map and
292 arenaBits = arenaL1Bits + arenaL2Bits
294 // arenaBaseOffset is the pointer value that corresponds to
295 // index 0 in the heap arena map.
297 // On amd64, the address space is 48 bits, sign extended to 64
298 // bits. This offset lets us handle "negative" addresses (or
299 // high addresses if viewed as unsigned).
301 // On aix/ppc64, this offset allows to keep the heapAddrBits to
302 // 48. Otherwise, it would be 60 in order to handle mmap addresses
303 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
304 // case, the memory reserved in (s *pageAlloc).init for chunks
305 // is causing important slowdowns.
307 // On other platforms, the user address space is contiguous
308 // and starts at 0, so no offset is necessary.
309 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
310 // A typed version of this constant that will make it into DWARF (for viewcore).
311 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
313 // Max number of threads to run garbage collection.
314 // 2, 3, and 4 are all plausible maximums depending
315 // on the hardware details of the machine. The garbage
316 // collector scales well to 32 cpus.
319 // minLegalPointer is the smallest possible legal pointer.
320 // This is the smallest possible architectural page size,
321 // since we assume that the first page is never mapped.
323 // This should agree with minZeroPage in the compiler.
324 minLegalPointer uintptr = 4096
326 // minHeapForMetadataHugePages sets a threshold on when certain kinds of
327 // heap metadata, currently the arenas map L2 entries and page alloc bitmap
328 // mappings, are allowed to be backed by huge pages. If the heap goal ever
329 // exceeds this threshold, then huge pages are enabled.
331 // These numbers are chosen with the assumption that huge pages are on the
332 // order of a few MiB in size.
334 // The kind of metadata this applies to has a very low overhead when compared
335 // to address space used, but their constant overheads for small heaps would
336 // be very high if they were to be backed by huge pages (e.g. a few MiB makes
337 // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB
338 // heap). The benefit of huge pages is also not worth it for small heaps,
339 // because only a very, very small part of the metadata is used for small heaps.
341 // N.B. If the heap goal exceeds the threshold then shrinks to a very small size
342 // again, then huge pages will still be enabled for this mapping. The reason is that
343 // there's no point unless we're also returning the physical memory for these
344 // metadata mappings back to the OS. That would be quite complex to do in general
345 // as the heap is likely fragmented after a reduction in heap size.
346 minHeapForMetadataHugePages = 1 << 30
349 // physPageSize is the size in bytes of the OS's physical pages.
350 // Mapping and unmapping operations must be done at multiples of
353 // This must be set by the OS init code (typically in osinit) before
355 var physPageSize uintptr
357 // physHugePageSize is the size in bytes of the OS's default physical huge
358 // page size whose allocation is opaque to the application. It is assumed
359 // and verified to be a power of two.
361 // If set, this must be set by the OS init code (typically in osinit) before
362 // mallocinit. However, setting it at all is optional, and leaving the default
363 // value is always safe (though potentially less efficient).
365 // Since physHugePageSize is always assumed to be a power of two,
366 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
367 // The purpose of physHugePageShift is to avoid doing divisions in
368 // performance critical functions.
370 physHugePageSize uintptr
371 physHugePageShift uint
375 if class_to_size[_TinySizeClass] != _TinySize {
376 throw("bad TinySizeClass")
379 if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 {
380 // heapBits expects modular arithmetic on bitmap
381 // addresses to work.
382 throw("heapArenaBitmapWords not a power of 2")
385 // Check physPageSize.
386 if physPageSize == 0 {
387 // The OS init code failed to fetch the physical page size.
388 throw("failed to get system page size")
390 if physPageSize > maxPhysPageSize {
391 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
392 throw("bad system page size")
394 if physPageSize < minPhysPageSize {
395 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
396 throw("bad system page size")
398 if physPageSize&(physPageSize-1) != 0 {
399 print("system page size (", physPageSize, ") must be a power of 2\n")
400 throw("bad system page size")
402 if physHugePageSize&(physHugePageSize-1) != 0 {
403 print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
404 throw("bad system huge page size")
406 if physHugePageSize > maxPhysHugePageSize {
407 // physHugePageSize is greater than the maximum supported huge page size.
408 // Don't throw here, like in the other cases, since a system configured
409 // in this way isn't wrong, we just don't have the code to support them.
410 // Instead, silently set the huge page size to zero.
413 if physHugePageSize != 0 {
414 // Since physHugePageSize is a power of 2, it suffices to increase
415 // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
416 for 1<<physHugePageShift != physHugePageSize {
420 if pagesPerArena%pagesPerSpanRoot != 0 {
421 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
422 throw("bad pagesPerSpanRoot")
424 if pagesPerArena%pagesPerReclaimerChunk != 0 {
425 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
426 throw("bad pagesPerReclaimerChunk")
428 if goexperiment.AllocHeaders {
429 // Check that the minimum size (exclusive) for a malloc header is also
430 // a size class boundary. This is important to making sure checks align
431 // across different parts of the runtime.
432 minSizeForMallocHeaderIsSizeClass := false
433 for i := 0; i < len(class_to_size); i++ {
434 if minSizeForMallocHeader == uintptr(class_to_size[i]) {
435 minSizeForMallocHeaderIsSizeClass = true
439 if !minSizeForMallocHeaderIsSizeClass {
440 throw("min size of malloc header is not a size class boundary")
442 // Check that the pointer bitmap for all small sizes without a malloc header
444 if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
445 throw("max pointer/scan bitmap size for headerless objects is too large")
449 if minTagBits > taggedPointerBits {
450 throw("taggedPointerbits too small")
453 // Initialize the heap.
455 mcache0 = allocmcache()
456 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
457 lockInit(&profInsertLock, lockRankProfInsert)
458 lockInit(&profBlockLock, lockRankProfBlock)
459 lockInit(&profMemActiveLock, lockRankProfMemActive)
460 for i := range profMemFutureLock {
461 lockInit(&profMemFutureLock[i], lockRankProfMemFuture)
463 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
465 // Create initial arena growth hints.
466 if goarch.PtrSize == 8 {
467 // On a 64-bit machine, we pick the following hints
470 // 1. Starting from the middle of the address space
471 // makes it easier to grow out a contiguous range
472 // without running in to some other mapping.
474 // 2. This makes Go heap addresses more easily
475 // recognizable when debugging.
477 // 3. Stack scanning in gccgo is still conservative,
478 // so it's important that addresses be distinguishable
481 // Starting at 0x00c0 means that the valid memory addresses
482 // will begin 0x00c0, 0x00c1, ...
483 // In little-endian, that's c0 00, c1 00, ... None of those are valid
484 // UTF-8 sequences, and they are otherwise as far away from
485 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
486 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
487 // on OS X during thread allocations. 0x00c0 causes conflicts with
488 // AddressSanitizer which reserves all memory up to 0x0100.
489 // These choices reduce the odds of a conservative garbage collector
490 // not collecting memory because some non-pointer block of memory
491 // had a bit pattern that matched a memory address.
493 // However, on arm64, we ignore all this advice above and slam the
494 // allocation at 0x40 << 32 because when using 4k pages with 3-level
495 // translation buffers, the user address space is limited to 39 bits
496 // On ios/arm64, the address space is even smaller.
498 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
501 // Space mapped for user arenas comes immediately after the range
502 // originally reserved for the regular heap when race mode is not
503 // enabled because user arena chunks can never be used for regular heap
504 // allocations and we want to avoid fragmenting the address space.
506 // In race mode we have no choice but to just use the same hints because
507 // the race detector requires that the heap be mapped contiguously.
508 for i := 0x7f; i >= 0; i-- {
512 // The TSAN runtime requires the heap
513 // to be in the range [0x00c000000000,
515 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
516 if p >= uintptrMask&0x00e000000000 {
519 case GOARCH == "arm64" && GOOS == "ios":
520 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
521 case GOARCH == "arm64":
522 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
525 // We don't use addresses directly after 0x0A00000000000000
526 // to avoid collisions with others mmaps done by non-go programs.
529 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
531 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
533 // Switch to generating hints for user arenas if we've gone
534 // through about half the hints. In race mode, take only about
535 // a quarter; we don't have very much space to work with.
536 hintList := &mheap_.arenaHints
537 if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) {
538 hintList = &mheap_.userArena.arenaHints
540 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
542 hint.next, *hintList = *hintList, hint
545 // On a 32-bit machine, we're much more concerned
546 // about keeping the usable heap contiguous.
549 // 1. We reserve space for all heapArenas up front so
550 // they don't get interleaved with the heap. They're
551 // ~258MB, so this isn't too bad. (We could reserve a
552 // smaller amount of space up front if this is a
555 // 2. We hint the heap to start right above the end of
556 // the binary so we have the best chance of keeping it
559 // 3. We try to stake out a reasonably large initial
562 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
563 meta := uintptr(sysReserve(nil, arenaMetaSize))
565 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
568 // We want to start the arena low, but if we're linked
569 // against C code, it's possible global constructors
570 // have called malloc and adjusted the process' brk.
571 // Query the brk so we can avoid trying to map the
572 // region over it (which will cause the kernel to put
573 // the region somewhere else, likely at a high
577 // If we ask for the end of the data segment but the
578 // operating system requires a little more space
579 // before we can start allocating, it will give out a
580 // slightly higher pointer. Except QEMU, which is
581 // buggy, as usual: it won't adjust the pointer
582 // upward. So adjust it upward a little bit ourselves:
583 // 1/4 MB to get away from the running binary image.
584 p := firstmoduledata.end
588 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
589 p = mheap_.heapArenaAlloc.end
591 p = alignUp(p+(256<<10), heapArenaBytes)
592 // Because we're worried about fragmentation on
593 // 32-bit, we try to make a large initial reservation.
594 arenaSizes := []uintptr{
599 for _, arenaSize := range arenaSizes {
600 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
602 mheap_.arena.init(uintptr(a), size, false)
603 p = mheap_.arena.end // For hint below
607 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
609 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
611 // Place the hint for user arenas just after the large reservation.
613 // While this potentially competes with the hint above, in practice we probably
614 // aren't going to be getting this far anyway on 32-bit platforms.
615 userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
616 userArenaHint.addr = p
617 userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint
619 // Initialize the memory limit here because the allocator is going to look at it
620 // but we haven't called gcinit yet and we're definitely going to allocate memory before then.
621 gcController.memoryLimit.Store(maxInt64)
624 // sysAlloc allocates heap arena space for at least n bytes. The
625 // returned pointer is always heapArenaBytes-aligned and backed by
626 // h.arenas metadata. The returned size is always a multiple of
627 // heapArenaBytes. sysAlloc returns nil on failure.
628 // There is no corresponding free function.
630 // hintList is a list of hint addresses for where to allocate new
631 // heap arenas. It must be non-nil.
633 // register indicates whether the heap arena should be registered
636 // sysAlloc returns a memory region in the Reserved state. This region must
637 // be transitioned to Prepared and then Ready before use.
640 func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) {
641 assertLockHeld(&h.lock)
643 n = alignUp(n, heapArenaBytes)
645 if hintList == &h.arenaHints {
646 // First, try the arena pre-reservation.
647 // Newly-used mappings are considered released.
649 // Only do this if we're using the regular heap arena hints.
650 // This behavior is only for the heap.
651 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased)
658 // Try to grow the heap at a hint address.
659 for *hintList != nil {
666 // We can't use this, so don't ask.
668 } else if arenaIndex(p+n-1) >= 1<<arenaBits {
669 // Outside addressable heap. Can't use.
672 v = sysReserve(unsafe.Pointer(p), n)
675 // Success. Update the hint.
683 // Failed. Discard this hint and try the next.
685 // TODO: This would be cleaner if sysReserve could be
686 // told to only return the requested address. In
687 // particular, this is already how Windows behaves, so
688 // it would simplify things there.
692 *hintList = hint.next
693 h.arenaHintAlloc.free(unsafe.Pointer(hint))
698 // The race detector assumes the heap lives in
699 // [0x00c000000000, 0x00e000000000), but we
700 // just ran out of hints in this region. Give
702 throw("too many address space collisions for -race mode")
705 // All of the hints failed, so we'll take any
706 // (sufficiently aligned) address the kernel will give
708 v, size = sysReserveAligned(nil, n, heapArenaBytes)
713 // Create new hints for extending this region.
714 hint := (*arenaHint)(h.arenaHintAlloc.alloc())
715 hint.addr, hint.down = uintptr(v), true
716 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
717 hint = (*arenaHint)(h.arenaHintAlloc.alloc())
718 hint.addr = uintptr(v) + size
719 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
722 // Check for bad pointers or pointers we can't use.
727 bad = "region exceeds uintptr range"
728 } else if arenaIndex(p) >= 1<<arenaBits {
729 bad = "base outside usable address space"
730 } else if arenaIndex(p+size-1) >= 1<<arenaBits {
731 bad = "end outside usable address space"
734 // This should be impossible on most architectures,
735 // but it would be really confusing to debug.
736 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
737 throw("memory reservation exceeds address space limit")
741 if uintptr(v)&(heapArenaBytes-1) != 0 {
742 throw("misrounded allocation in sysAlloc")
746 // Create arena metadata.
747 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
748 l2 := h.arenas[ri.l1()]
750 // Allocate an L2 arena map.
752 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no
753 // statistic we can comfortably account for this space in. With this structure,
754 // we rely on demand paging to avoid large overheads, but tracking which memory
755 // is paged in is too expensive. Trying to account for the whole region means
756 // that it will appear like an enormous memory overhead in statistics, even though
758 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
760 throw("out of memory allocating heap arena map")
762 if h.arenasHugePages {
763 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
765 sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
767 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
770 if l2[ri.l2()] != nil {
771 throw("arena already initialized")
774 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
776 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
778 throw("out of memory allocating heap arena metadata")
782 // Register the arena in allArenas if requested.
784 if len(h.allArenas) == cap(h.allArenas) {
785 size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
789 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
791 throw("out of memory allocating allArenas")
793 oldSlice := h.allArenas
794 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
795 copy(h.allArenas, oldSlice)
796 // Do not free the old backing array because
797 // there may be concurrent readers. Since we
798 // double the array each time, this can lead
799 // to at most 2x waste.
801 h.allArenas = h.allArenas[:len(h.allArenas)+1]
802 h.allArenas[len(h.allArenas)-1] = ri
805 // Store atomically just in case an object from the
806 // new heap arena becomes visible before the heap lock
807 // is released (which shouldn't happen, but there's
808 // little downside to this).
809 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
812 // Tell the race detector about the new heap memory.
814 racemapshadow(v, size)
820 // sysReserveAligned is like sysReserve, but the returned pointer is
821 // aligned to align bytes. It may reserve either n or n+align bytes,
822 // so it returns the size that was reserved.
823 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
824 // Since the alignment is rather large in uses of this
825 // function, we're not likely to get it by chance, so we ask
826 // for a larger region and remove the parts we don't need.
829 p := uintptr(sysReserve(v, size+align))
833 case p&(align-1) == 0:
834 return unsafe.Pointer(p), size + align
835 case GOOS == "windows":
836 // On Windows we can't release pieces of a
837 // reservation, so we release the whole thing and
838 // re-reserve the aligned sub-region. This may race,
839 // so we may have to try again.
840 sysFreeOS(unsafe.Pointer(p), size+align)
841 p = alignUp(p, align)
842 p2 := sysReserve(unsafe.Pointer(p), size)
843 if p != uintptr(p2) {
844 // Must have raced. Try again.
846 if retries++; retries == 100 {
847 throw("failed to allocate aligned heap memory; too many retries")
854 // Trim off the unaligned parts.
855 pAligned := alignUp(p, align)
856 sysFreeOS(unsafe.Pointer(p), pAligned-p)
857 end := pAligned + size
858 endLen := (p + size + align) - end
860 sysFreeOS(unsafe.Pointer(end), endLen)
862 return unsafe.Pointer(pAligned), size
866 // enableMetadataHugePages enables huge pages for various sources of heap metadata.
868 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
869 // time, but may take time proportional to the size of the mapped heap beyond that.
871 // This function is idempotent.
873 // The heap lock must not be held over this operation, since it will briefly acquire
875 func (h *mheap) enableMetadataHugePages() {
876 // Enable huge pages for page structure.
877 h.pages.enableChunkHugePages()
879 // Grab the lock and set arenasHugePages if it's not.
881 // Once arenasHugePages is set, all new L2 entries will be eligible for
882 // huge pages. We'll set all the old entries after we release the lock.
884 if h.arenasHugePages {
888 h.arenasHugePages = true
891 // N.B. The arenas L1 map is quite small on all platforms, so it's fine to
892 // just iterate over the whole thing.
893 for i := range h.arenas {
894 l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i])))
898 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
902 // base address for all 0-byte allocations
905 // nextFreeFast returns the next free object if one is quickly available.
906 // Otherwise it returns 0.
907 func nextFreeFast(s *mspan) gclinkptr {
908 theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
910 result := s.freeindex + uint16(theBit)
911 if result < s.nelems {
912 freeidx := result + 1
913 if freeidx%64 == 0 && freeidx != s.nelems {
916 s.allocCache >>= uint(theBit + 1)
917 s.freeindex = freeidx
919 return gclinkptr(uintptr(result)*s.elemsize + s.base())
925 // nextFree returns the next free object from the cached span if one is available.
926 // Otherwise it refills the cache with a span with an available object and
927 // returns that object along with a flag indicating that this was a heavy
928 // weight allocation. If it is a heavy weight allocation the caller must
929 // determine whether a new GC cycle needs to be started or if the GC is active
930 // whether this goroutine needs to assist the GC.
932 // Must run in a non-preemptible context since otherwise the owner of
934 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
937 freeIndex := s.nextFreeIndex()
938 if freeIndex == s.nelems {
940 if s.allocCount != s.nelems {
941 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
942 throw("s.allocCount != s.nelems && freeIndex == s.nelems")
948 freeIndex = s.nextFreeIndex()
951 if freeIndex >= s.nelems {
952 throw("freeIndex is not valid")
955 v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base())
957 if s.allocCount > s.nelems {
958 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
959 throw("s.allocCount > s.nelems")
964 // Allocate an object of size bytes.
965 // Small objects are allocated from the per-P cache's free lists.
966 // Large objects (> 32 kB) are allocated straight from the heap.
967 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
968 if gcphase == _GCmarktermination {
969 throw("mallocgc called with gcphase == _GCmarktermination")
973 return unsafe.Pointer(&zerobase)
976 // It's possible for any malloc to trigger sweeping, which may in
977 // turn queue finalizers. Record this dynamic lock edge.
978 lockRankMayQueueFinalizer()
982 // Refer to ASAN runtime library, the malloc() function allocates extra memory,
983 // the redzone, around the user requested memory region. And the redzones are marked
984 // as unaddressable. We perform the same operations in Go to detect the overflows or
986 size += computeRZlog(size)
993 // TODO(austin): This should be just
994 // align = uintptr(typ.align)
995 // but that's only 4 on 32-bit platforms,
996 // even if there's a uint64 field in typ (see #599).
997 // This causes 64-bit atomic accesses to panic.
998 // Hence, we use stricter alignment that matches
999 // the normal allocator better.
1002 } else if size&3 == 0 {
1004 } else if size&1 == 0 {
1010 return persistentalloc(size, align, &memstats.other_sys)
1013 if inittrace.active && inittrace.id == getg().goid {
1014 // Init functions are executed sequentially in a single goroutine.
1015 inittrace.allocs += 1
1019 // assistG is the G to charge for this allocation, or nil if
1020 // GC is not currently active.
1021 assistG := deductAssistCredit(size)
1023 // Set mp.mallocing to keep from being preempted by GC.
1025 if mp.mallocing != 0 {
1026 throw("malloc deadlock")
1028 if mp.gsignal == getg() {
1029 throw("malloc during signal")
1033 shouldhelpgc := false
1034 dataSize := userSize
1037 throw("mallocgc called without a P or outside bootstrapping")
1041 var x unsafe.Pointer
1042 noscan := typ == nil || typ.PtrBytes == 0
1043 // In some cases block zeroing can profitably (for latency reduction purposes)
1044 // be delayed till preemption is possible; delayedZeroing tracks that state.
1045 delayedZeroing := false
1046 // Determine if it's a 'small' object that goes into a size-classed span.
1048 // Note: This comparison looks a little strange, but it exists to smooth out
1049 // the crossover between the largest size class and large objects that have
1050 // their own spans. The small window of object sizes between maxSmallSize-mallocHeaderSize
1051 // and maxSmallSize will be considered large, even though they might fit in
1052 // a size class. In practice this is completely fine, since the largest small
1053 // size class has a single object in it already, precisely to make the transition
1054 // to large objects smooth.
1055 if size <= maxSmallSize-mallocHeaderSize {
1056 if noscan && size < maxTinySize {
1059 // Tiny allocator combines several tiny allocation requests
1060 // into a single memory block. The resulting memory block
1061 // is freed when all subobjects are unreachable. The subobjects
1062 // must be noscan (don't have pointers), this ensures that
1063 // the amount of potentially wasted memory is bounded.
1065 // Size of the memory block used for combining (maxTinySize) is tunable.
1066 // Current setting is 16 bytes, which relates to 2x worst case memory
1067 // wastage (when all but one subobjects are unreachable).
1068 // 8 bytes would result in no wastage at all, but provides less
1069 // opportunities for combining.
1070 // 32 bytes provides more opportunities for combining,
1071 // but can lead to 4x worst case wastage.
1072 // The best case winning is 8x regardless of block size.
1074 // Objects obtained from tiny allocator must not be freed explicitly.
1075 // So when an object will be freed explicitly, we ensure that
1076 // its size >= maxTinySize.
1078 // SetFinalizer has a special case for objects potentially coming
1079 // from tiny allocator, it such case it allows to set finalizers
1080 // for an inner byte of a memory block.
1082 // The main targets of tiny allocator are small strings and
1083 // standalone escaping variables. On a json benchmark
1084 // the allocator reduces number of allocations by ~12% and
1085 // reduces heap size by ~20%.
1087 // Align tiny pointer for required (conservative) alignment.
1089 off = alignUp(off, 8)
1090 } else if goarch.PtrSize == 4 && size == 12 {
1091 // Conservatively align 12-byte objects to 8 bytes on 32-bit
1092 // systems so that objects whose first field is a 64-bit
1093 // value is aligned to 8 bytes and does not cause a fault on
1094 // atomic access. See issue 37262.
1095 // TODO(mknyszek): Remove this workaround if/when issue 36606
1097 off = alignUp(off, 8)
1098 } else if size&3 == 0 {
1099 off = alignUp(off, 4)
1100 } else if size&1 == 0 {
1101 off = alignUp(off, 2)
1103 if off+size <= maxTinySize && c.tiny != 0 {
1104 // The object fits into existing tiny block.
1105 x = unsafe.Pointer(c.tiny + off)
1106 c.tinyoffset = off + size
1112 // Allocate a new maxTinySize block.
1113 span = c.alloc[tinySpanClass]
1114 v := nextFreeFast(span)
1116 v, span, shouldhelpgc = c.nextFree(tinySpanClass)
1118 x = unsafe.Pointer(v)
1119 (*[2]uint64)(x)[0] = 0
1120 (*[2]uint64)(x)[1] = 0
1121 // See if we need to replace the existing tiny block with the new one
1122 // based on amount of remaining free space.
1123 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
1124 // Note: disabled when race detector is on, see comment near end of this function.
1130 hasHeader := !noscan && !heapBitsInSpan(size)
1131 if goexperiment.AllocHeaders && hasHeader {
1132 size += mallocHeaderSize
1135 if size <= smallSizeMax-8 {
1136 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
1138 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
1140 size = uintptr(class_to_size[sizeclass])
1141 spc := makeSpanClass(sizeclass, noscan)
1143 v := nextFreeFast(span)
1145 v, span, shouldhelpgc = c.nextFree(spc)
1147 x = unsafe.Pointer(v)
1148 if needzero && span.needzero != 0 {
1149 memclrNoHeapPointers(x, size)
1151 if goexperiment.AllocHeaders && hasHeader {
1152 header = (**_type)(x)
1153 x = add(x, mallocHeaderSize)
1154 size -= mallocHeaderSize
1159 // For large allocations, keep track of zeroed state so that
1160 // bulk zeroing can be happen later in a preemptible context.
1161 span = c.allocLarge(size, noscan)
1164 size = span.elemsize
1165 x = unsafe.Pointer(span.base())
1166 if needzero && span.needzero != 0 {
1168 delayedZeroing = true
1170 memclrNoHeapPointers(x, size)
1173 if goexperiment.AllocHeaders && !noscan {
1174 header = &span.largeType
1178 if goexperiment.AllocHeaders {
1179 c.scanAlloc += heapSetType(uintptr(x), dataSize, typ, header, span)
1181 var scanSize uintptr
1182 heapBitsSetType(uintptr(x), size, dataSize, typ)
1183 if dataSize > typ.Size_ {
1184 // Array allocation. If there are any
1185 // pointers, GC has to scan to the last
1187 if typ.PtrBytes != 0 {
1188 scanSize = dataSize - typ.Size_ + typ.PtrBytes
1191 scanSize = typ.PtrBytes
1193 c.scanAlloc += scanSize
1197 // Ensure that the stores above that initialize x to
1198 // type-safe memory and set the heap bits occur before
1199 // the caller can make x observable to the garbage
1200 // collector. Otherwise, on weakly ordered machines,
1201 // the garbage collector could follow a pointer to x,
1202 // but see uninitialized memory or stale heap bits.
1203 publicationBarrier()
1204 // As x and the heap bits are initialized, update
1205 // freeIndexForScan now so x is seen by the GC
1206 // (including conservative scan) as an allocated object.
1207 // While this pointer can't escape into user code as a
1208 // _live_ pointer until we return, conservative scanning
1209 // may find a dead pointer that happens to point into this
1210 // object. Delaying this update until now ensures that
1211 // conservative scanning considers this pointer dead until
1213 span.freeIndexForScan = span.freeindex
1215 // Allocate black during GC.
1216 // All slots hold nil so no scanning is needed.
1217 // This may be racing with GC so do it atomically if there can be
1218 // a race marking the bit.
1219 if gcphase != _GCoff {
1220 // Pass the full size of the allocation to the number of bytes
1223 // If !goexperiment.AllocHeaders, "size" doesn't include the
1224 // allocation header, so use span.elemsize unconditionally.
1225 gcmarknewobject(span, uintptr(x), span.elemsize)
1237 // We should only read/write the memory with the size asked by the user.
1238 // The rest of the allocated memory should be poisoned, so that we can report
1239 // errors when accessing poisoned memory.
1240 // The allocated memory is larger than required userSize, it will also include
1241 // redzone and some other padding bytes.
1242 rzBeg := unsafe.Add(x, userSize)
1243 asanpoison(rzBeg, size-userSize)
1244 asanunpoison(x, userSize)
1247 if rate := MemProfileRate; rate > 0 {
1248 // Note cache c only valid while m acquired; see #47302
1249 if rate != 1 && size < c.nextSample {
1250 c.nextSample -= size
1252 profilealloc(mp, x, size)
1258 // Pointerfree data can be zeroed late in a context where preemption can occur.
1259 // x will keep the memory alive.
1262 throw("delayed zeroing on data that may contain pointers")
1264 if goexperiment.AllocHeaders && header != nil {
1265 throw("unexpected malloc header in delayed zeroing of large object")
1267 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
1271 if debug.allocfreetrace != 0 {
1272 tracealloc(x, size, typ)
1275 if inittrace.active && inittrace.id == getg().goid {
1276 // Init functions are executed sequentially in a single goroutine.
1277 inittrace.bytes += uint64(size)
1282 // Account for internal fragmentation in the assist
1283 // debt now that we know it.
1284 assistG.gcAssistBytes -= int64(size - dataSize)
1288 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1293 if raceenabled && noscan && dataSize < maxTinySize {
1294 // Pad tinysize allocations so they are aligned with the end
1295 // of the tinyalloc region. This ensures that any arithmetic
1296 // that goes off the top end of the object will be detectable
1297 // by checkptr (issue 38872).
1298 // Note that we disable tinyalloc when raceenabled for this to work.
1299 // TODO: This padding is only performed when the race detector
1300 // is enabled. It would be nice to enable it if any package
1301 // was compiled with checkptr, but there's no easy way to
1302 // detect that (especially at compile time).
1303 // TODO: enable this padding for all allocations, not just
1304 // tinyalloc ones. It's tricky because of pointer maps.
1305 // Maybe just all noscan objects?
1306 x = add(x, size-dataSize)
1312 // deductAssistCredit reduces the current G's assist credit
1313 // by size bytes, and assists the GC if necessary.
1315 // Caller must be preemptible.
1317 // Returns the G for which the assist credit was accounted.
1318 func deductAssistCredit(size uintptr) *g {
1320 if gcBlackenEnabled != 0 {
1321 // Charge the current user G for this allocation.
1323 if assistG.m.curg != nil {
1324 assistG = assistG.m.curg
1326 // Charge the allocation against the G. We'll account
1327 // for internal fragmentation at the end of mallocgc.
1328 assistG.gcAssistBytes -= int64(size)
1330 if assistG.gcAssistBytes < 0 {
1331 // This G is in debt. Assist the GC to correct
1332 // this before allocating. This must happen
1333 // before disabling preemption.
1334 gcAssistAlloc(assistG)
1340 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
1341 // on chunks of the buffer to be zeroed, with opportunities for preemption
1342 // along the way. memclrNoHeapPointers contains no safepoints and also
1343 // cannot be preemptively scheduled, so this provides a still-efficient
1344 // block copy that can also be preempted on a reasonable granularity.
1346 // Use this with care; if the data being cleared is tagged to contain
1347 // pointers, this allows the GC to run before it is all cleared.
1348 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
1350 // got this from benchmarking. 128k is too small, 512k is too large.
1351 const chunkBytes = 256 * 1024
1353 for voff := v; voff < vsize; voff = voff + chunkBytes {
1355 // may hold locks, e.g., profiling
1358 // clear min(avail, lump) bytes
1363 memclrNoHeapPointers(unsafe.Pointer(voff), n)
1367 // implementation of new builtin
1368 // compiler (both frontend and SSA backend) knows the signature
1369 // of this function.
1370 func newobject(typ *_type) unsafe.Pointer {
1371 return mallocgc(typ.Size_, typ, true)
1374 //go:linkname reflect_unsafe_New reflect.unsafe_New
1375 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
1376 return mallocgc(typ.Size_, typ, true)
1379 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
1380 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
1381 return mallocgc(typ.Size_, typ, true)
1384 // newarray allocates an array of n elements of type typ.
1385 func newarray(typ *_type, n int) unsafe.Pointer {
1387 return mallocgc(typ.Size_, typ, true)
1389 mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
1390 if overflow || mem > maxAlloc || n < 0 {
1391 panic(plainError("runtime: allocation size out of range"))
1393 return mallocgc(mem, typ, true)
1396 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
1397 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
1398 return newarray(typ, n)
1401 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
1404 throw("profilealloc called without a P or outside bootstrapping")
1406 c.nextSample = nextSample()
1407 mProf_Malloc(x, size)
1410 // nextSample returns the next sampling point for heap profiling. The goal is
1411 // to sample allocations on average every MemProfileRate bytes, but with a
1412 // completely random distribution over the allocation timeline; this
1413 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
1414 // processes, the distance between two samples follows the exponential
1415 // distribution (exp(MemProfileRate)), so the best return value is a random
1416 // number taken from an exponential distribution whose mean is MemProfileRate.
1417 func nextSample() uintptr {
1418 if MemProfileRate == 1 {
1419 // Callers assign our return value to
1420 // mcache.next_sample, but next_sample is not used
1421 // when the rate is 1. So avoid the math below and
1422 // just return something.
1425 if GOOS == "plan9" {
1426 // Plan 9 doesn't support floating point in note handler.
1427 if gp := getg(); gp == gp.m.gsignal {
1428 return nextSampleNoFP()
1432 return uintptr(fastexprand(MemProfileRate))
1435 // fastexprand returns a random number from an exponential distribution with
1436 // the specified mean.
1437 func fastexprand(mean int) int32 {
1438 // Avoid overflow. Maximum possible step is
1439 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
1441 case mean > 0x7000000:
1447 // Take a random sample of the exponential distribution exp(-mean*x).
1448 // The probability distribution function is mean*exp(-mean*x), so the CDF is
1449 // p = 1 - exp(-mean*x), so
1450 // q = 1 - p == exp(-mean*x)
1451 // log_e(q) = -mean*x
1452 // -log_e(q)/mean = x
1453 // x = -log_e(q) * mean
1454 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
1455 const randomBitCount = 26
1456 q := fastrandn(1<<randomBitCount) + 1
1457 qlog := fastlog2(float64(q)) - randomBitCount
1461 const minusLog2 = -0.6931471805599453 // -ln(2)
1462 return int32(qlog*(minusLog2*float64(mean))) + 1
1465 // nextSampleNoFP is similar to nextSample, but uses older,
1466 // simpler code to avoid floating point.
1467 func nextSampleNoFP() uintptr {
1468 // Set first allocation sample size.
1469 rate := MemProfileRate
1470 if rate > 0x3fffffff { // make 2*rate not overflow
1474 return uintptr(fastrandn(uint32(2 * rate)))
1479 type persistentAlloc struct {
1484 var globalAlloc struct {
1489 // persistentChunkSize is the number of bytes we allocate when we grow
1490 // a persistentAlloc.
1491 const persistentChunkSize = 256 << 10
1493 // persistentChunks is a list of all the persistent chunks we have
1494 // allocated. The list is maintained through the first word in the
1495 // persistent chunk. This is updated atomically.
1496 var persistentChunks *notInHeap
1498 // Wrapper around sysAlloc that can allocate small chunks.
1499 // There is no associated free operation.
1500 // Intended for things like function/type/debug-related persistent data.
1501 // If align is 0, uses default align (currently 8).
1502 // The returned memory will be zeroed.
1503 // sysStat must be non-nil.
1505 // Consider marking persistentalloc'd types not in heap by embedding
1506 // runtime/internal/sys.NotInHeap.
1507 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1509 systemstack(func() {
1510 p = persistentalloc1(size, align, sysStat)
1512 return unsafe.Pointer(p)
1515 // Must run on system stack because stack growth can (re)invoke it.
1519 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
1521 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
1525 throw("persistentalloc: size == 0")
1528 if align&(align-1) != 0 {
1529 throw("persistentalloc: align is not a power of 2")
1531 if align > _PageSize {
1532 throw("persistentalloc: align is too large")
1538 if size >= maxBlock {
1539 return (*notInHeap)(sysAlloc(size, sysStat))
1543 var persistent *persistentAlloc
1544 if mp != nil && mp.p != 0 {
1545 persistent = &mp.p.ptr().palloc
1547 lock(&globalAlloc.mutex)
1548 persistent = &globalAlloc.persistentAlloc
1550 persistent.off = alignUp(persistent.off, align)
1551 if persistent.off+size > persistentChunkSize || persistent.base == nil {
1552 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
1553 if persistent.base == nil {
1554 if persistent == &globalAlloc.persistentAlloc {
1555 unlock(&globalAlloc.mutex)
1557 throw("runtime: cannot allocate memory")
1560 // Add the new chunk to the persistentChunks list.
1562 chunks := uintptr(unsafe.Pointer(persistentChunks))
1563 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
1564 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
1568 persistent.off = alignUp(goarch.PtrSize, align)
1570 p := persistent.base.add(persistent.off)
1571 persistent.off += size
1573 if persistent == &globalAlloc.persistentAlloc {
1574 unlock(&globalAlloc.mutex)
1577 if sysStat != &memstats.other_sys {
1578 sysStat.add(int64(size))
1579 memstats.other_sys.add(-int64(size))
1584 // inPersistentAlloc reports whether p points to memory allocated by
1585 // persistentalloc. This must be nosplit because it is called by the
1586 // cgo checker code, which is called by the write barrier code.
1589 func inPersistentAlloc(p uintptr) bool {
1590 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
1592 if p >= chunk && p < chunk+persistentChunkSize {
1595 chunk = *(*uintptr)(unsafe.Pointer(chunk))
1600 // linearAlloc is a simple linear allocator that pre-reserves a region
1601 // of memory and then optionally maps that region into the Ready state
1604 // The caller is responsible for locking.
1605 type linearAlloc struct {
1606 next uintptr // next free byte
1607 mapped uintptr // one byte past end of mapped space
1608 end uintptr // end of reserved space
1610 mapMemory bool // transition memory from Reserved to Ready if true
1613 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
1614 if base+size < base {
1615 // Chop off the last byte. The runtime isn't prepared
1616 // to deal with situations where the bounds could overflow.
1617 // Leave that memory reserved, though, so we don't map it
1621 l.next, l.mapped = base, base
1623 l.mapMemory = mapMemory
1626 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1627 p := alignUp(l.next, align)
1632 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
1634 // Transition from Reserved to Prepared to Ready.
1635 n := pEnd - l.mapped
1636 sysMap(unsafe.Pointer(l.mapped), n, sysStat)
1637 sysUsed(unsafe.Pointer(l.mapped), n, n)
1641 return unsafe.Pointer(p)
1644 // notInHeap is off-heap memory allocated by a lower-level allocator
1645 // like sysAlloc or persistentAlloc.
1647 // In general, it's better to use real types which embed
1648 // runtime/internal/sys.NotInHeap, but this serves as a generic type
1649 // for situations where that isn't possible (like in the allocators).
1651 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
1652 type notInHeap struct{ _ sys.NotInHeap }
1654 func (p *notInHeap) add(bytes uintptr) *notInHeap {
1655 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
1658 // computeRZlog computes the size of the redzone.
1659 // Refer to the implementation of the compiler-rt.
1660 func computeRZlog(userSize uintptr) uintptr {
1662 case userSize <= (64 - 16):
1664 case userSize <= (128 - 32):
1666 case userSize <= (512 - 64):
1668 case userSize <= (4096 - 128):
1670 case userSize <= (1<<14)-256:
1672 case userSize <= (1<<15)-512:
1674 case userSize <= (1<<16)-1024: