1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This was originally based on tcmalloc, but has diverged quite a bit.
8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
10 // The main allocator works in runs of pages.
11 // Small allocation sizes (up to and including 32 kB) are
12 // rounded to one of about 70 size classes, each of which
13 // has its own free set of objects of exactly that size.
14 // Any free page of memory can be split into a set of objects
15 // of one size class, which are then managed using a free bitmap.
17 // The allocator's data structures are:
19 // fixalloc: a free-list allocator for fixed-size off-heap objects,
20 // used to manage storage used by the allocator.
21 // mheap: the malloc heap, managed at page (8192-byte) granularity.
22 // mspan: a run of in-use pages managed by the mheap.
23 // mcentral: collects all spans of a given size class.
24 // mcache: a per-P cache of mspans with free space.
25 // mstats: allocation statistics.
27 // Allocating a small object proceeds up a hierarchy of caches:
29 // 1. Round the size up to one of the small size classes
30 // and look in the corresponding mspan in this P's mcache.
31 // Scan the mspan's free bitmap to find a free slot.
32 // If there is a free slot, allocate it.
33 // This can all be done without acquiring a lock.
35 // 2. If the mspan has no free slots, obtain a new mspan
36 // from the mcentral's list of mspans of the required size
37 // class that have free space.
38 // Obtaining a whole span amortizes the cost of locking
41 // 3. If the mcentral's mspan list is empty, obtain a run
42 // of pages from the mheap to use for the mspan.
44 // 4. If the mheap is empty or has no page runs large enough,
45 // allocate a new group of pages (at least 1MB) from the
46 // operating system. Allocating a large run of pages
47 // amortizes the cost of talking to the operating system.
49 // Sweeping an mspan and freeing objects on it proceeds up a similar
52 // 1. If the mspan is being swept in response to allocation, it
53 // is returned to the mcache to satisfy the allocation.
55 // 2. Otherwise, if the mspan still has allocated objects in it,
56 // it is placed on the mcentral free list for the mspan's size
59 // 3. Otherwise, if all objects in the mspan are free, the mspan's
60 // pages are returned to the mheap and the mspan is now dead.
62 // Allocating and freeing a large object uses the mheap
63 // directly, bypassing the mcache and mcentral.
65 // If mspan.needzero is false, then free object slots in the mspan are
66 // already zeroed. Otherwise if needzero is true, objects are zeroed as
67 // they are allocated. There are various benefits to delaying zeroing
70 // 1. Stack frame allocation can avoid zeroing altogether.
72 // 2. It exhibits better temporal locality, since the program is
73 // probably about to write to the memory.
75 // 3. We don't zero pages that never get reused.
77 // Virtual memory layout
79 // The heap consists of a set of arenas, which are 64MB on 64-bit and
80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
81 // aligned to the arena size.
83 // Each arena has an associated heapArena object that stores the
84 // metadata for that arena: the heap bitmap for all words in the arena
85 // and the span map for all pages in the arena. heapArena objects are
86 // themselves allocated off-heap.
88 // Since arenas are aligned, the address space can be viewed as a
89 // series of arena frames. The arena map (mheap_.arenas) maps from
90 // arena frame number to *heapArena, or nil for parts of the address
91 // space not backed by the Go heap. The arena map is structured as a
92 // two-level array consisting of a "L1" arena map and many "L2" arena
93 // maps; however, since arenas are large, on many architectures, the
94 // arena map consists of a single, large L2 map.
96 // The arena map covers the entire possible address space, allowing
97 // the Go heap to use any part of the address space. The allocator
98 // attempts to keep arenas contiguous so that large spans (and hence
99 // large objects) can cross arenas.
106 "runtime/internal/atomic"
107 "runtime/internal/math"
108 "runtime/internal/sys"
113 maxTinySize = _TinySize
114 tinySizeClass = _TinySizeClass
115 maxSmallSize = _MaxSmallSize
117 pageShift = _PageShift
120 concurrentSweep = _ConcurrentSweep
122 _PageSize = 1 << _PageShift
123 _PageMask = _PageSize - 1
125 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
126 _64bit = 1 << (^uintptr(0) >> 63) / 2
128 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
130 _TinySizeClass = int8(2)
132 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
134 // Per-P, per order stack segment cache size.
135 _StackCacheSize = 32 * 1024
137 // Number of orders that get caching. Order 0 is FixedStack
138 // and each successive order is twice as large.
139 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
140 // will be allocated directly.
141 // Since FixedStack is different on different systems, we
142 // must vary NumStackOrders to keep the same maximum cached size.
143 // OS | FixedStack | NumStackOrders
144 // -----------------+------------+---------------
145 // linux/darwin/bsd | 2KB | 4
146 // windows/32 | 4KB | 3
147 // windows/64 | 8KB | 2
149 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
151 // heapAddrBits is the number of bits in a heap address. On
152 // amd64, addresses are sign-extended beyond heapAddrBits. On
153 // other arches, they are zero-extended.
155 // On most 64-bit platforms, we limit this to 48 bits based on a
156 // combination of hardware and OS limitations.
158 // amd64 hardware limits addresses to 48 bits, sign-extended
159 // to 64 bits. Addresses where the top 16 bits are not either
160 // all 0 or all 1 are "non-canonical" and invalid. Because of
161 // these "negative" addresses, we offset addresses by 1<<47
162 // (arenaBaseOffset) on amd64 before computing indexes into
163 // the heap arenas index. In 2017, amd64 hardware added
164 // support for 57 bit addresses; however, currently only Linux
165 // supports this extension and the kernel will never choose an
166 // address above 1<<47 unless mmap is called with a hint
167 // address above 1<<47 (which we never do).
169 // arm64 hardware (as of ARMv8) limits user addresses to 48
170 // bits, in the range [0, 1<<48).
172 // ppc64, mips64, and s390x support arbitrary 64 bit addresses
173 // in hardware. On Linux, Go leans on stricter OS limits. Based
174 // on Linux's processor.h, the user address space is limited as
175 // follows on 64-bit architectures:
177 // Architecture Name Maximum Value (exclusive)
178 // ---------------------------------------------------------------------
179 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses)
180 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses)
181 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses)
182 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
183 // s390x TASK_SIZE 1<<64 (64 bit addresses)
185 // These limits may increase over time, but are currently at
186 // most 48 bits except on s390x. On all architectures, Linux
187 // starts placing mmap'd regions at addresses that are
188 // significantly below 48 bits, so even if it's possible to
189 // exceed Go's 48 bit limit, it's extremely unlikely in
192 // On 32-bit platforms, we accept the full 32-bit address
193 // space because doing so is cheap.
194 // mips32 only has access to the low 2GB of virtual memory, so
195 // we further limit it to 31 bits.
197 // On ios/arm64, although 64-bit pointers are presumably
198 // available, pointers are truncated to 33 bits in iOS <14.
199 // Furthermore, only the top 4 GiB of the address space are
200 // actually available to the application. In iOS >=14, more
201 // of the address space is available, and the OS can now
202 // provide addresses outside of those 33 bits. Pick 40 bits
203 // as a reasonable balance between address space usage by the
204 // page allocator, and flexibility for what mmap'd regions
205 // we'll accept for the heap. We can't just move to the full
206 // 48 bits because this uses too much address space for older
208 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
209 // to a 48-bit address space like every other arm64 platform.
211 // WebAssembly currently has a limit of 4GB linear memory.
212 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
214 // maxAlloc is the maximum size of an allocation. On 64-bit,
215 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
216 // 32-bit, however, this is one less than 1<<32 because the
217 // number of bytes in the address space doesn't actually fit
219 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
221 // The number of bits in a heap address, the size of heap
222 // arenas, and the L1 and L2 arena map sizes are related by
224 // (1 << addr bits) = arena size * L1 entries * L2 entries
226 // Currently, we balance these as follows:
228 // Platform Addr bits Arena size L1 entries L2 entries
229 // -------------- --------- ---------- ---------- -----------
230 // */64-bit 48 64MB 1 4M (32MB)
231 // windows/64-bit 48 4MB 64 1M (8MB)
232 // ios/arm64 33 4MB 1 2048 (8KB)
233 // */32-bit 32 4MB 1 1024 (4KB)
234 // */mips(le) 31 4MB 1 512 (2KB)
236 // heapArenaBytes is the size of a heap arena. The heap
237 // consists of mappings of size heapArenaBytes, aligned to
238 // heapArenaBytes. The initial heap mapping is one arena.
240 // This is currently 64MB on 64-bit non-Windows and 4MB on
241 // 32-bit and on Windows. We use smaller arenas on Windows
242 // because all committed memory is charged to the process,
243 // even if it's not touched. Hence, for processes with small
244 // heaps, the mapped arena space needs to be commensurate.
245 // This is particularly important with the race detector,
246 // since it significantly amplifies the cost of committed
248 heapArenaBytes = 1 << logHeapArenaBytes
250 heapArenaWords = heapArenaBytes / goarch.PtrSize
252 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
253 // prefer using heapArenaBytes where possible (we need the
254 // constant to compute some other constants).
255 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
257 // heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
258 heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
260 pagesPerArena = heapArenaBytes / pageSize
262 // arenaL1Bits is the number of bits of the arena number
263 // covered by the first level arena map.
265 // This number should be small, since the first level arena
266 // map requires PtrSize*(1<<arenaL1Bits) of space in the
267 // binary's BSS. It can be zero, in which case the first level
268 // index is effectively unused. There is a performance benefit
269 // to this, since the generated code can be more efficient,
270 // but comes at the cost of having a large L2 mapping.
272 // We use the L1 map on 64-bit Windows because the arena size
273 // is small, but the address space is still 48 bits, and
274 // there's a high cost to having a large L2.
275 arenaL1Bits = 6 * (_64bit * goos.IsWindows)
277 // arenaL2Bits is the number of bits of the arena number
278 // covered by the second level arena index.
280 // The size of each arena map allocation is proportional to
281 // 1<<arenaL2Bits, so it's important that this not be too
282 // large. 48 bits leads to 32MB arena index allocations, which
283 // is about the practical threshold.
284 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
286 // arenaL1Shift is the number of bits to shift an arena frame
287 // number by to compute an index into the first level arena map.
288 arenaL1Shift = arenaL2Bits
290 // arenaBits is the total bits in a combined arena map index.
291 // This is split between the index into the L1 arena map and
293 arenaBits = arenaL1Bits + arenaL2Bits
295 // arenaBaseOffset is the pointer value that corresponds to
296 // index 0 in the heap arena map.
298 // On amd64, the address space is 48 bits, sign extended to 64
299 // bits. This offset lets us handle "negative" addresses (or
300 // high addresses if viewed as unsigned).
302 // On aix/ppc64, this offset allows to keep the heapAddrBits to
303 // 48. Otherwise, it would be 60 in order to handle mmap addresses
304 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
305 // case, the memory reserved in (s *pageAlloc).init for chunks
306 // is causing important slowdowns.
308 // On other platforms, the user address space is contiguous
309 // and starts at 0, so no offset is necessary.
310 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
311 // A typed version of this constant that will make it into DWARF (for viewcore).
312 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
314 // Max number of threads to run garbage collection.
315 // 2, 3, and 4 are all plausible maximums depending
316 // on the hardware details of the machine. The garbage
317 // collector scales well to 32 cpus.
320 // minLegalPointer is the smallest possible legal pointer.
321 // This is the smallest possible architectural page size,
322 // since we assume that the first page is never mapped.
324 // This should agree with minZeroPage in the compiler.
325 minLegalPointer uintptr = 4096
327 // minHeapForMetadataHugePages sets a threshold on when certain kinds of
328 // heap metadata, currently the arenas map L2 entries and page alloc bitmap
329 // mappings, are allowed to be backed by huge pages. If the heap goal ever
330 // exceeds this threshold, then huge pages are enabled.
332 // These numbers are chosen with the assumption that huge pages are on the
333 // order of a few MiB in size.
335 // The kind of metadata this applies to has a very low overhead when compared
336 // to address space used, but their constant overheads for small heaps would
337 // be very high if they were to be backed by huge pages (e.g. a few MiB makes
338 // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB
339 // heap). The benefit of huge pages is also not worth it for small heaps,
340 // because only a very, very small part of the metadata is used for small heaps.
342 // N.B. If the heap goal exceeds the threshold then shrinks to a very small size
343 // again, then huge pages will still be enabled for this mapping. The reason is that
344 // there's no point unless we're also returning the physical memory for these
345 // metadata mappings back to the OS. That would be quite complex to do in general
346 // as the heap is likely fragmented after a reduction in heap size.
347 minHeapForMetadataHugePages = 1 << 30
350 // physPageSize is the size in bytes of the OS's physical pages.
351 // Mapping and unmapping operations must be done at multiples of
354 // This must be set by the OS init code (typically in osinit) before
356 var physPageSize uintptr
358 // physHugePageSize is the size in bytes of the OS's default physical huge
359 // page size whose allocation is opaque to the application. It is assumed
360 // and verified to be a power of two.
362 // If set, this must be set by the OS init code (typically in osinit) before
363 // mallocinit. However, setting it at all is optional, and leaving the default
364 // value is always safe (though potentially less efficient).
366 // Since physHugePageSize is always assumed to be a power of two,
367 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
368 // The purpose of physHugePageShift is to avoid doing divisions in
369 // performance critical functions.
371 physHugePageSize uintptr
372 physHugePageShift uint
376 if class_to_size[_TinySizeClass] != _TinySize {
377 throw("bad TinySizeClass")
380 if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 {
381 // heapBits expects modular arithmetic on bitmap
382 // addresses to work.
383 throw("heapArenaBitmapWords not a power of 2")
386 // Check physPageSize.
387 if physPageSize == 0 {
388 // The OS init code failed to fetch the physical page size.
389 throw("failed to get system page size")
391 if physPageSize > maxPhysPageSize {
392 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
393 throw("bad system page size")
395 if physPageSize < minPhysPageSize {
396 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
397 throw("bad system page size")
399 if physPageSize&(physPageSize-1) != 0 {
400 print("system page size (", physPageSize, ") must be a power of 2\n")
401 throw("bad system page size")
403 if physHugePageSize&(physHugePageSize-1) != 0 {
404 print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
405 throw("bad system huge page size")
407 if physHugePageSize > maxPhysHugePageSize {
408 // physHugePageSize is greater than the maximum supported huge page size.
409 // Don't throw here, like in the other cases, since a system configured
410 // in this way isn't wrong, we just don't have the code to support them.
411 // Instead, silently set the huge page size to zero.
414 if physHugePageSize != 0 {
415 // Since physHugePageSize is a power of 2, it suffices to increase
416 // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
417 for 1<<physHugePageShift != physHugePageSize {
421 if pagesPerArena%pagesPerSpanRoot != 0 {
422 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
423 throw("bad pagesPerSpanRoot")
425 if pagesPerArena%pagesPerReclaimerChunk != 0 {
426 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
427 throw("bad pagesPerReclaimerChunk")
430 if minTagBits > taggedPointerBits {
431 throw("taggedPointerbits too small")
434 // Initialize the heap.
436 mcache0 = allocmcache()
437 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
438 lockInit(&profInsertLock, lockRankProfInsert)
439 lockInit(&profBlockLock, lockRankProfBlock)
440 lockInit(&profMemActiveLock, lockRankProfMemActive)
441 for i := range profMemFutureLock {
442 lockInit(&profMemFutureLock[i], lockRankProfMemFuture)
444 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
446 // Create initial arena growth hints.
447 if goarch.PtrSize == 8 {
448 // On a 64-bit machine, we pick the following hints
451 // 1. Starting from the middle of the address space
452 // makes it easier to grow out a contiguous range
453 // without running in to some other mapping.
455 // 2. This makes Go heap addresses more easily
456 // recognizable when debugging.
458 // 3. Stack scanning in gccgo is still conservative,
459 // so it's important that addresses be distinguishable
462 // Starting at 0x00c0 means that the valid memory addresses
463 // will begin 0x00c0, 0x00c1, ...
464 // In little-endian, that's c0 00, c1 00, ... None of those are valid
465 // UTF-8 sequences, and they are otherwise as far away from
466 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
467 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
468 // on OS X during thread allocations. 0x00c0 causes conflicts with
469 // AddressSanitizer which reserves all memory up to 0x0100.
470 // These choices reduce the odds of a conservative garbage collector
471 // not collecting memory because some non-pointer block of memory
472 // had a bit pattern that matched a memory address.
474 // However, on arm64, we ignore all this advice above and slam the
475 // allocation at 0x40 << 32 because when using 4k pages with 3-level
476 // translation buffers, the user address space is limited to 39 bits
477 // On ios/arm64, the address space is even smaller.
479 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
482 // Space mapped for user arenas comes immediately after the range
483 // originally reserved for the regular heap when race mode is not
484 // enabled because user arena chunks can never be used for regular heap
485 // allocations and we want to avoid fragmenting the address space.
487 // In race mode we have no choice but to just use the same hints because
488 // the race detector requires that the heap be mapped contiguously.
489 for i := 0x7f; i >= 0; i-- {
493 // The TSAN runtime requires the heap
494 // to be in the range [0x00c000000000,
496 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
497 if p >= uintptrMask&0x00e000000000 {
500 case GOARCH == "arm64" && GOOS == "ios":
501 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
502 case GOARCH == "arm64":
503 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
506 // We don't use addresses directly after 0x0A00000000000000
507 // to avoid collisions with others mmaps done by non-go programs.
510 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
512 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
514 // Switch to generating hints for user arenas if we've gone
515 // through about half the hints. In race mode, take only about
516 // a quarter; we don't have very much space to work with.
517 hintList := &mheap_.arenaHints
518 if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) {
519 hintList = &mheap_.userArena.arenaHints
521 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
523 hint.next, *hintList = *hintList, hint
526 // On a 32-bit machine, we're much more concerned
527 // about keeping the usable heap contiguous.
530 // 1. We reserve space for all heapArenas up front so
531 // they don't get interleaved with the heap. They're
532 // ~258MB, so this isn't too bad. (We could reserve a
533 // smaller amount of space up front if this is a
536 // 2. We hint the heap to start right above the end of
537 // the binary so we have the best chance of keeping it
540 // 3. We try to stake out a reasonably large initial
543 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
544 meta := uintptr(sysReserve(nil, arenaMetaSize))
546 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
549 // We want to start the arena low, but if we're linked
550 // against C code, it's possible global constructors
551 // have called malloc and adjusted the process' brk.
552 // Query the brk so we can avoid trying to map the
553 // region over it (which will cause the kernel to put
554 // the region somewhere else, likely at a high
558 // If we ask for the end of the data segment but the
559 // operating system requires a little more space
560 // before we can start allocating, it will give out a
561 // slightly higher pointer. Except QEMU, which is
562 // buggy, as usual: it won't adjust the pointer
563 // upward. So adjust it upward a little bit ourselves:
564 // 1/4 MB to get away from the running binary image.
565 p := firstmoduledata.end
569 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
570 p = mheap_.heapArenaAlloc.end
572 p = alignUp(p+(256<<10), heapArenaBytes)
573 // Because we're worried about fragmentation on
574 // 32-bit, we try to make a large initial reservation.
575 arenaSizes := []uintptr{
580 for _, arenaSize := range arenaSizes {
581 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
583 mheap_.arena.init(uintptr(a), size, false)
584 p = mheap_.arena.end // For hint below
588 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
590 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
592 // Place the hint for user arenas just after the large reservation.
594 // While this potentially competes with the hint above, in practice we probably
595 // aren't going to be getting this far anyway on 32-bit platforms.
596 userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
597 userArenaHint.addr = p
598 userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint
600 // Initialize the memory limit here because the allocator is going to look at it
601 // but we haven't called gcinit yet and we're definitely going to allocate memory before then.
602 gcController.memoryLimit.Store(maxInt64)
605 // sysAlloc allocates heap arena space for at least n bytes. The
606 // returned pointer is always heapArenaBytes-aligned and backed by
607 // h.arenas metadata. The returned size is always a multiple of
608 // heapArenaBytes. sysAlloc returns nil on failure.
609 // There is no corresponding free function.
611 // hintList is a list of hint addresses for where to allocate new
612 // heap arenas. It must be non-nil.
614 // register indicates whether the heap arena should be registered
617 // sysAlloc returns a memory region in the Reserved state. This region must
618 // be transitioned to Prepared and then Ready before use.
621 func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) {
622 assertLockHeld(&h.lock)
624 n = alignUp(n, heapArenaBytes)
626 if hintList == &h.arenaHints {
627 // First, try the arena pre-reservation.
628 // Newly-used mappings are considered released.
630 // Only do this if we're using the regular heap arena hints.
631 // This behavior is only for the heap.
632 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased)
639 // Try to grow the heap at a hint address.
640 for *hintList != nil {
647 // We can't use this, so don't ask.
649 } else if arenaIndex(p+n-1) >= 1<<arenaBits {
650 // Outside addressable heap. Can't use.
653 v = sysReserve(unsafe.Pointer(p), n)
656 // Success. Update the hint.
664 // Failed. Discard this hint and try the next.
666 // TODO: This would be cleaner if sysReserve could be
667 // told to only return the requested address. In
668 // particular, this is already how Windows behaves, so
669 // it would simplify things there.
673 *hintList = hint.next
674 h.arenaHintAlloc.free(unsafe.Pointer(hint))
679 // The race detector assumes the heap lives in
680 // [0x00c000000000, 0x00e000000000), but we
681 // just ran out of hints in this region. Give
683 throw("too many address space collisions for -race mode")
686 // All of the hints failed, so we'll take any
687 // (sufficiently aligned) address the kernel will give
689 v, size = sysReserveAligned(nil, n, heapArenaBytes)
694 // Create new hints for extending this region.
695 hint := (*arenaHint)(h.arenaHintAlloc.alloc())
696 hint.addr, hint.down = uintptr(v), true
697 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
698 hint = (*arenaHint)(h.arenaHintAlloc.alloc())
699 hint.addr = uintptr(v) + size
700 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
703 // Check for bad pointers or pointers we can't use.
708 bad = "region exceeds uintptr range"
709 } else if arenaIndex(p) >= 1<<arenaBits {
710 bad = "base outside usable address space"
711 } else if arenaIndex(p+size-1) >= 1<<arenaBits {
712 bad = "end outside usable address space"
715 // This should be impossible on most architectures,
716 // but it would be really confusing to debug.
717 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
718 throw("memory reservation exceeds address space limit")
722 if uintptr(v)&(heapArenaBytes-1) != 0 {
723 throw("misrounded allocation in sysAlloc")
727 // Create arena metadata.
728 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
729 l2 := h.arenas[ri.l1()]
731 // Allocate an L2 arena map.
733 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no
734 // statistic we can comfortably account for this space in. With this structure,
735 // we rely on demand paging to avoid large overheads, but tracking which memory
736 // is paged in is too expensive. Trying to account for the whole region means
737 // that it will appear like an enormous memory overhead in statistics, even though
739 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
741 throw("out of memory allocating heap arena map")
743 if h.arenasHugePages {
744 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
746 sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
748 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
751 if l2[ri.l2()] != nil {
752 throw("arena already initialized")
755 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
757 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
759 throw("out of memory allocating heap arena metadata")
763 // Register the arena in allArenas if requested.
765 if len(h.allArenas) == cap(h.allArenas) {
766 size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
770 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
772 throw("out of memory allocating allArenas")
774 oldSlice := h.allArenas
775 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
776 copy(h.allArenas, oldSlice)
777 // Do not free the old backing array because
778 // there may be concurrent readers. Since we
779 // double the array each time, this can lead
780 // to at most 2x waste.
782 h.allArenas = h.allArenas[:len(h.allArenas)+1]
783 h.allArenas[len(h.allArenas)-1] = ri
786 // Store atomically just in case an object from the
787 // new heap arena becomes visible before the heap lock
788 // is released (which shouldn't happen, but there's
789 // little downside to this).
790 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
793 // Tell the race detector about the new heap memory.
795 racemapshadow(v, size)
801 // sysReserveAligned is like sysReserve, but the returned pointer is
802 // aligned to align bytes. It may reserve either n or n+align bytes,
803 // so it returns the size that was reserved.
804 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
805 // Since the alignment is rather large in uses of this
806 // function, we're not likely to get it by chance, so we ask
807 // for a larger region and remove the parts we don't need.
810 p := uintptr(sysReserve(v, size+align))
814 case p&(align-1) == 0:
815 return unsafe.Pointer(p), size + align
816 case GOOS == "windows":
817 // On Windows we can't release pieces of a
818 // reservation, so we release the whole thing and
819 // re-reserve the aligned sub-region. This may race,
820 // so we may have to try again.
821 sysFreeOS(unsafe.Pointer(p), size+align)
822 p = alignUp(p, align)
823 p2 := sysReserve(unsafe.Pointer(p), size)
824 if p != uintptr(p2) {
825 // Must have raced. Try again.
827 if retries++; retries == 100 {
828 throw("failed to allocate aligned heap memory; too many retries")
835 // Trim off the unaligned parts.
836 pAligned := alignUp(p, align)
837 sysFreeOS(unsafe.Pointer(p), pAligned-p)
838 end := pAligned + size
839 endLen := (p + size + align) - end
841 sysFreeOS(unsafe.Pointer(end), endLen)
843 return unsafe.Pointer(pAligned), size
847 // enableMetadataHugePages enables huge pages for various sources of heap metadata.
849 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
850 // time, but may take time proportional to the size of the mapped heap beyond that.
852 // This function is idempotent.
854 // The heap lock must not be held over this operation, since it will briefly acquire
856 func (h *mheap) enableMetadataHugePages() {
857 // Enable huge pages for page structure.
858 h.pages.enableChunkHugePages()
860 // Grab the lock and set arenasHugePages if it's not.
862 // Once arenasHugePages is set, all new L2 entries will be eligible for
863 // huge pages. We'll set all the old entries after we release the lock.
865 if h.arenasHugePages {
869 h.arenasHugePages = true
872 // N.B. The arenas L1 map is quite small on all platforms, so it's fine to
873 // just iterate over the whole thing.
874 for i := range h.arenas {
875 l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i])))
879 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
883 // base address for all 0-byte allocations
886 // nextFreeFast returns the next free object if one is quickly available.
887 // Otherwise it returns 0.
888 func nextFreeFast(s *mspan) gclinkptr {
889 theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
891 result := s.freeindex + uintptr(theBit)
892 if result < s.nelems {
893 freeidx := result + 1
894 if freeidx%64 == 0 && freeidx != s.nelems {
897 s.allocCache >>= uint(theBit + 1)
898 s.freeindex = freeidx
900 return gclinkptr(result*s.elemsize + s.base())
906 // nextFree returns the next free object from the cached span if one is available.
907 // Otherwise it refills the cache with a span with an available object and
908 // returns that object along with a flag indicating that this was a heavy
909 // weight allocation. If it is a heavy weight allocation the caller must
910 // determine whether a new GC cycle needs to be started or if the GC is active
911 // whether this goroutine needs to assist the GC.
913 // Must run in a non-preemptible context since otherwise the owner of
915 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
918 freeIndex := s.nextFreeIndex()
919 if freeIndex == s.nelems {
921 if uintptr(s.allocCount) != s.nelems {
922 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
923 throw("s.allocCount != s.nelems && freeIndex == s.nelems")
929 freeIndex = s.nextFreeIndex()
932 if freeIndex >= s.nelems {
933 throw("freeIndex is not valid")
936 v = gclinkptr(freeIndex*s.elemsize + s.base())
938 if uintptr(s.allocCount) > s.nelems {
939 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
940 throw("s.allocCount > s.nelems")
945 // Allocate an object of size bytes.
946 // Small objects are allocated from the per-P cache's free lists.
947 // Large objects (> 32 kB) are allocated straight from the heap.
948 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
949 if gcphase == _GCmarktermination {
950 throw("mallocgc called with gcphase == _GCmarktermination")
954 return unsafe.Pointer(&zerobase)
957 // It's possible for any malloc to trigger sweeping, which may in
958 // turn queue finalizers. Record this dynamic lock edge.
959 lockRankMayQueueFinalizer()
963 // Refer to ASAN runtime library, the malloc() function allocates extra memory,
964 // the redzone, around the user requested memory region. And the redzones are marked
965 // as unaddressable. We perform the same operations in Go to detect the overflows or
967 size += computeRZlog(size)
974 // TODO(austin): This should be just
975 // align = uintptr(typ.align)
976 // but that's only 4 on 32-bit platforms,
977 // even if there's a uint64 field in typ (see #599).
978 // This causes 64-bit atomic accesses to panic.
979 // Hence, we use stricter alignment that matches
980 // the normal allocator better.
983 } else if size&3 == 0 {
985 } else if size&1 == 0 {
991 return persistentalloc(size, align, &memstats.other_sys)
994 if inittrace.active && inittrace.id == getg().goid {
995 // Init functions are executed sequentially in a single goroutine.
996 inittrace.allocs += 1
1000 // assistG is the G to charge for this allocation, or nil if
1001 // GC is not currently active.
1002 assistG := deductAssistCredit(size)
1004 // Set mp.mallocing to keep from being preempted by GC.
1006 if mp.mallocing != 0 {
1007 throw("malloc deadlock")
1009 if mp.gsignal == getg() {
1010 throw("malloc during signal")
1014 shouldhelpgc := false
1015 dataSize := userSize
1018 throw("mallocgc called without a P or outside bootstrapping")
1021 var x unsafe.Pointer
1022 noscan := typ == nil || typ.PtrBytes == 0
1023 // In some cases block zeroing can profitably (for latency reduction purposes)
1024 // be delayed till preemption is possible; delayedZeroing tracks that state.
1025 delayedZeroing := false
1026 if size <= maxSmallSize {
1027 if noscan && size < maxTinySize {
1030 // Tiny allocator combines several tiny allocation requests
1031 // into a single memory block. The resulting memory block
1032 // is freed when all subobjects are unreachable. The subobjects
1033 // must be noscan (don't have pointers), this ensures that
1034 // the amount of potentially wasted memory is bounded.
1036 // Size of the memory block used for combining (maxTinySize) is tunable.
1037 // Current setting is 16 bytes, which relates to 2x worst case memory
1038 // wastage (when all but one subobjects are unreachable).
1039 // 8 bytes would result in no wastage at all, but provides less
1040 // opportunities for combining.
1041 // 32 bytes provides more opportunities for combining,
1042 // but can lead to 4x worst case wastage.
1043 // The best case winning is 8x regardless of block size.
1045 // Objects obtained from tiny allocator must not be freed explicitly.
1046 // So when an object will be freed explicitly, we ensure that
1047 // its size >= maxTinySize.
1049 // SetFinalizer has a special case for objects potentially coming
1050 // from tiny allocator, it such case it allows to set finalizers
1051 // for an inner byte of a memory block.
1053 // The main targets of tiny allocator are small strings and
1054 // standalone escaping variables. On a json benchmark
1055 // the allocator reduces number of allocations by ~12% and
1056 // reduces heap size by ~20%.
1058 // Align tiny pointer for required (conservative) alignment.
1060 off = alignUp(off, 8)
1061 } else if goarch.PtrSize == 4 && size == 12 {
1062 // Conservatively align 12-byte objects to 8 bytes on 32-bit
1063 // systems so that objects whose first field is a 64-bit
1064 // value is aligned to 8 bytes and does not cause a fault on
1065 // atomic access. See issue 37262.
1066 // TODO(mknyszek): Remove this workaround if/when issue 36606
1068 off = alignUp(off, 8)
1069 } else if size&3 == 0 {
1070 off = alignUp(off, 4)
1071 } else if size&1 == 0 {
1072 off = alignUp(off, 2)
1074 if off+size <= maxTinySize && c.tiny != 0 {
1075 // The object fits into existing tiny block.
1076 x = unsafe.Pointer(c.tiny + off)
1077 c.tinyoffset = off + size
1083 // Allocate a new maxTinySize block.
1084 span = c.alloc[tinySpanClass]
1085 v := nextFreeFast(span)
1087 v, span, shouldhelpgc = c.nextFree(tinySpanClass)
1089 x = unsafe.Pointer(v)
1090 (*[2]uint64)(x)[0] = 0
1091 (*[2]uint64)(x)[1] = 0
1092 // See if we need to replace the existing tiny block with the new one
1093 // based on amount of remaining free space.
1094 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
1095 // Note: disabled when race detector is on, see comment near end of this function.
1102 if size <= smallSizeMax-8 {
1103 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
1105 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
1107 size = uintptr(class_to_size[sizeclass])
1108 spc := makeSpanClass(sizeclass, noscan)
1110 v := nextFreeFast(span)
1112 v, span, shouldhelpgc = c.nextFree(spc)
1114 x = unsafe.Pointer(v)
1115 if needzero && span.needzero != 0 {
1116 memclrNoHeapPointers(x, size)
1121 // For large allocations, keep track of zeroed state so that
1122 // bulk zeroing can be happen later in a preemptible context.
1123 span = c.allocLarge(size, noscan)
1126 size = span.elemsize
1127 x = unsafe.Pointer(span.base())
1128 if needzero && span.needzero != 0 {
1130 delayedZeroing = true
1132 memclrNoHeapPointers(x, size)
1133 // We've in theory cleared almost the whole span here,
1134 // and could take the extra step of actually clearing
1135 // the whole thing. However, don't. Any GC bits for the
1136 // uncleared parts will be zero, and it's just going to
1137 // be needzero = 1 once freed anyway.
1143 var scanSize uintptr
1144 heapBitsSetType(uintptr(x), size, dataSize, typ)
1145 if dataSize > typ.Size_ {
1146 // Array allocation. If there are any
1147 // pointers, GC has to scan to the last
1149 if typ.PtrBytes != 0 {
1150 scanSize = dataSize - typ.Size_ + typ.PtrBytes
1153 scanSize = typ.PtrBytes
1155 c.scanAlloc += scanSize
1158 // Ensure that the stores above that initialize x to
1159 // type-safe memory and set the heap bits occur before
1160 // the caller can make x observable to the garbage
1161 // collector. Otherwise, on weakly ordered machines,
1162 // the garbage collector could follow a pointer to x,
1163 // but see uninitialized memory or stale heap bits.
1164 publicationBarrier()
1165 // As x and the heap bits are initialized, update
1166 // freeIndexForScan now so x is seen by the GC
1167 // (including convervative scan) as an allocated object.
1168 // While this pointer can't escape into user code as a
1169 // _live_ pointer until we return, conservative scanning
1170 // may find a dead pointer that happens to point into this
1171 // object. Delaying this update until now ensures that
1172 // conservative scanning considers this pointer dead until
1174 span.freeIndexForScan = span.freeindex
1176 // Allocate black during GC.
1177 // All slots hold nil so no scanning is needed.
1178 // This may be racing with GC so do it atomically if there can be
1179 // a race marking the bit.
1180 if gcphase != _GCoff {
1181 gcmarknewobject(span, uintptr(x), size)
1193 // We should only read/write the memory with the size asked by the user.
1194 // The rest of the allocated memory should be poisoned, so that we can report
1195 // errors when accessing poisoned memory.
1196 // The allocated memory is larger than required userSize, it will also include
1197 // redzone and some other padding bytes.
1198 rzBeg := unsafe.Add(x, userSize)
1199 asanpoison(rzBeg, size-userSize)
1200 asanunpoison(x, userSize)
1203 if rate := MemProfileRate; rate > 0 {
1204 // Note cache c only valid while m acquired; see #47302
1205 if rate != 1 && size < c.nextSample {
1206 c.nextSample -= size
1208 profilealloc(mp, x, size)
1214 // Pointerfree data can be zeroed late in a context where preemption can occur.
1215 // x will keep the memory alive.
1218 throw("delayed zeroing on data that may contain pointers")
1220 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
1224 if debug.allocfreetrace != 0 {
1225 tracealloc(x, size, typ)
1228 if inittrace.active && inittrace.id == getg().goid {
1229 // Init functions are executed sequentially in a single goroutine.
1230 inittrace.bytes += uint64(size)
1235 // Account for internal fragmentation in the assist
1236 // debt now that we know it.
1237 assistG.gcAssistBytes -= int64(size - dataSize)
1241 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1246 if raceenabled && noscan && dataSize < maxTinySize {
1247 // Pad tinysize allocations so they are aligned with the end
1248 // of the tinyalloc region. This ensures that any arithmetic
1249 // that goes off the top end of the object will be detectable
1250 // by checkptr (issue 38872).
1251 // Note that we disable tinyalloc when raceenabled for this to work.
1252 // TODO: This padding is only performed when the race detector
1253 // is enabled. It would be nice to enable it if any package
1254 // was compiled with checkptr, but there's no easy way to
1255 // detect that (especially at compile time).
1256 // TODO: enable this padding for all allocations, not just
1257 // tinyalloc ones. It's tricky because of pointer maps.
1258 // Maybe just all noscan objects?
1259 x = add(x, size-dataSize)
1265 // deductAssistCredit reduces the current G's assist credit
1266 // by size bytes, and assists the GC if necessary.
1268 // Caller must be preemptible.
1270 // Returns the G for which the assist credit was accounted.
1271 func deductAssistCredit(size uintptr) *g {
1273 if gcBlackenEnabled != 0 {
1274 // Charge the current user G for this allocation.
1276 if assistG.m.curg != nil {
1277 assistG = assistG.m.curg
1279 // Charge the allocation against the G. We'll account
1280 // for internal fragmentation at the end of mallocgc.
1281 assistG.gcAssistBytes -= int64(size)
1283 if assistG.gcAssistBytes < 0 {
1284 // This G is in debt. Assist the GC to correct
1285 // this before allocating. This must happen
1286 // before disabling preemption.
1287 gcAssistAlloc(assistG)
1293 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
1294 // on chunks of the buffer to be zeroed, with opportunities for preemption
1295 // along the way. memclrNoHeapPointers contains no safepoints and also
1296 // cannot be preemptively scheduled, so this provides a still-efficient
1297 // block copy that can also be preempted on a reasonable granularity.
1299 // Use this with care; if the data being cleared is tagged to contain
1300 // pointers, this allows the GC to run before it is all cleared.
1301 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
1303 // got this from benchmarking. 128k is too small, 512k is too large.
1304 const chunkBytes = 256 * 1024
1306 for voff := v; voff < vsize; voff = voff + chunkBytes {
1308 // may hold locks, e.g., profiling
1311 // clear min(avail, lump) bytes
1316 memclrNoHeapPointers(unsafe.Pointer(voff), n)
1320 // implementation of new builtin
1321 // compiler (both frontend and SSA backend) knows the signature
1322 // of this function.
1323 func newobject(typ *_type) unsafe.Pointer {
1324 return mallocgc(typ.Size_, typ, true)
1327 //go:linkname reflect_unsafe_New reflect.unsafe_New
1328 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
1329 return mallocgc(typ.Size_, typ, true)
1332 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
1333 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
1334 return mallocgc(typ.Size_, typ, true)
1337 // newarray allocates an array of n elements of type typ.
1338 func newarray(typ *_type, n int) unsafe.Pointer {
1340 return mallocgc(typ.Size_, typ, true)
1342 mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
1343 if overflow || mem > maxAlloc || n < 0 {
1344 panic(plainError("runtime: allocation size out of range"))
1346 return mallocgc(mem, typ, true)
1349 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
1350 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
1351 return newarray(typ, n)
1354 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
1357 throw("profilealloc called without a P or outside bootstrapping")
1359 c.nextSample = nextSample()
1360 mProf_Malloc(x, size)
1363 // nextSample returns the next sampling point for heap profiling. The goal is
1364 // to sample allocations on average every MemProfileRate bytes, but with a
1365 // completely random distribution over the allocation timeline; this
1366 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
1367 // processes, the distance between two samples follows the exponential
1368 // distribution (exp(MemProfileRate)), so the best return value is a random
1369 // number taken from an exponential distribution whose mean is MemProfileRate.
1370 func nextSample() uintptr {
1371 if MemProfileRate == 1 {
1372 // Callers assign our return value to
1373 // mcache.next_sample, but next_sample is not used
1374 // when the rate is 1. So avoid the math below and
1375 // just return something.
1378 if GOOS == "plan9" {
1379 // Plan 9 doesn't support floating point in note handler.
1380 if gp := getg(); gp == gp.m.gsignal {
1381 return nextSampleNoFP()
1385 return uintptr(fastexprand(MemProfileRate))
1388 // fastexprand returns a random number from an exponential distribution with
1389 // the specified mean.
1390 func fastexprand(mean int) int32 {
1391 // Avoid overflow. Maximum possible step is
1392 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
1394 case mean > 0x7000000:
1400 // Take a random sample of the exponential distribution exp(-mean*x).
1401 // The probability distribution function is mean*exp(-mean*x), so the CDF is
1402 // p = 1 - exp(-mean*x), so
1403 // q = 1 - p == exp(-mean*x)
1404 // log_e(q) = -mean*x
1405 // -log_e(q)/mean = x
1406 // x = -log_e(q) * mean
1407 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
1408 const randomBitCount = 26
1409 q := fastrandn(1<<randomBitCount) + 1
1410 qlog := fastlog2(float64(q)) - randomBitCount
1414 const minusLog2 = -0.6931471805599453 // -ln(2)
1415 return int32(qlog*(minusLog2*float64(mean))) + 1
1418 // nextSampleNoFP is similar to nextSample, but uses older,
1419 // simpler code to avoid floating point.
1420 func nextSampleNoFP() uintptr {
1421 // Set first allocation sample size.
1422 rate := MemProfileRate
1423 if rate > 0x3fffffff { // make 2*rate not overflow
1427 return uintptr(fastrandn(uint32(2 * rate)))
1432 type persistentAlloc struct {
1437 var globalAlloc struct {
1442 // persistentChunkSize is the number of bytes we allocate when we grow
1443 // a persistentAlloc.
1444 const persistentChunkSize = 256 << 10
1446 // persistentChunks is a list of all the persistent chunks we have
1447 // allocated. The list is maintained through the first word in the
1448 // persistent chunk. This is updated atomically.
1449 var persistentChunks *notInHeap
1451 // Wrapper around sysAlloc that can allocate small chunks.
1452 // There is no associated free operation.
1453 // Intended for things like function/type/debug-related persistent data.
1454 // If align is 0, uses default align (currently 8).
1455 // The returned memory will be zeroed.
1456 // sysStat must be non-nil.
1458 // Consider marking persistentalloc'd types not in heap by embedding
1459 // runtime/internal/sys.NotInHeap.
1460 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1462 systemstack(func() {
1463 p = persistentalloc1(size, align, sysStat)
1465 return unsafe.Pointer(p)
1468 // Must run on system stack because stack growth can (re)invoke it.
1472 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
1474 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
1478 throw("persistentalloc: size == 0")
1481 if align&(align-1) != 0 {
1482 throw("persistentalloc: align is not a power of 2")
1484 if align > _PageSize {
1485 throw("persistentalloc: align is too large")
1491 if size >= maxBlock {
1492 return (*notInHeap)(sysAlloc(size, sysStat))
1496 var persistent *persistentAlloc
1497 if mp != nil && mp.p != 0 {
1498 persistent = &mp.p.ptr().palloc
1500 lock(&globalAlloc.mutex)
1501 persistent = &globalAlloc.persistentAlloc
1503 persistent.off = alignUp(persistent.off, align)
1504 if persistent.off+size > persistentChunkSize || persistent.base == nil {
1505 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
1506 if persistent.base == nil {
1507 if persistent == &globalAlloc.persistentAlloc {
1508 unlock(&globalAlloc.mutex)
1510 throw("runtime: cannot allocate memory")
1513 // Add the new chunk to the persistentChunks list.
1515 chunks := uintptr(unsafe.Pointer(persistentChunks))
1516 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
1517 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
1521 persistent.off = alignUp(goarch.PtrSize, align)
1523 p := persistent.base.add(persistent.off)
1524 persistent.off += size
1526 if persistent == &globalAlloc.persistentAlloc {
1527 unlock(&globalAlloc.mutex)
1530 if sysStat != &memstats.other_sys {
1531 sysStat.add(int64(size))
1532 memstats.other_sys.add(-int64(size))
1537 // inPersistentAlloc reports whether p points to memory allocated by
1538 // persistentalloc. This must be nosplit because it is called by the
1539 // cgo checker code, which is called by the write barrier code.
1542 func inPersistentAlloc(p uintptr) bool {
1543 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
1545 if p >= chunk && p < chunk+persistentChunkSize {
1548 chunk = *(*uintptr)(unsafe.Pointer(chunk))
1553 // linearAlloc is a simple linear allocator that pre-reserves a region
1554 // of memory and then optionally maps that region into the Ready state
1557 // The caller is responsible for locking.
1558 type linearAlloc struct {
1559 next uintptr // next free byte
1560 mapped uintptr // one byte past end of mapped space
1561 end uintptr // end of reserved space
1563 mapMemory bool // transition memory from Reserved to Ready if true
1566 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
1567 if base+size < base {
1568 // Chop off the last byte. The runtime isn't prepared
1569 // to deal with situations where the bounds could overflow.
1570 // Leave that memory reserved, though, so we don't map it
1574 l.next, l.mapped = base, base
1576 l.mapMemory = mapMemory
1579 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1580 p := alignUp(l.next, align)
1585 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
1587 // Transition from Reserved to Prepared to Ready.
1588 n := pEnd - l.mapped
1589 sysMap(unsafe.Pointer(l.mapped), n, sysStat)
1590 sysUsed(unsafe.Pointer(l.mapped), n, n)
1594 return unsafe.Pointer(p)
1597 // notInHeap is off-heap memory allocated by a lower-level allocator
1598 // like sysAlloc or persistentAlloc.
1600 // In general, it's better to use real types which embed
1601 // runtime/internal/sys.NotInHeap, but this serves as a generic type
1602 // for situations where that isn't possible (like in the allocators).
1604 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
1605 type notInHeap struct{ _ sys.NotInHeap }
1607 func (p *notInHeap) add(bytes uintptr) *notInHeap {
1608 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
1611 // computeRZlog computes the size of the redzone.
1612 // Refer to the implementation of the compiler-rt.
1613 func computeRZlog(userSize uintptr) uintptr {
1615 case userSize <= (64 - 16):
1617 case userSize <= (128 - 32):
1619 case userSize <= (512 - 64):
1621 case userSize <= (4096 - 128):
1623 case userSize <= (1<<14)-256:
1625 case userSize <= (1<<15)-512:
1627 case userSize <= (1<<16)-1024: