1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This was originally based on tcmalloc, but has diverged quite a bit.
8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
10 // The main allocator works in runs of pages.
11 // Small allocation sizes (up to and including 32 kB) are
12 // rounded to one of about 70 size classes, each of which
13 // has its own free set of objects of exactly that size.
14 // Any free page of memory can be split into a set of objects
15 // of one size class, which are then managed using a free bitmap.
17 // The allocator's data structures are:
19 // fixalloc: a free-list allocator for fixed-size off-heap objects,
20 // used to manage storage used by the allocator.
21 // mheap: the malloc heap, managed at page (8192-byte) granularity.
22 // mspan: a run of in-use pages managed by the mheap.
23 // mcentral: collects all spans of a given size class.
24 // mcache: a per-P cache of mspans with free space.
25 // mstats: allocation statistics.
27 // Allocating a small object proceeds up a hierarchy of caches:
29 // 1. Round the size up to one of the small size classes
30 // and look in the corresponding mspan in this P's mcache.
31 // Scan the mspan's free bitmap to find a free slot.
32 // If there is a free slot, allocate it.
33 // This can all be done without acquiring a lock.
35 // 2. If the mspan has no free slots, obtain a new mspan
36 // from the mcentral's list of mspans of the required size
37 // class that have free space.
38 // Obtaining a whole span amortizes the cost of locking
41 // 3. If the mcentral's mspan list is empty, obtain a run
42 // of pages from the mheap to use for the mspan.
44 // 4. If the mheap is empty or has no page runs large enough,
45 // allocate a new group of pages (at least 1MB) from the
46 // operating system. Allocating a large run of pages
47 // amortizes the cost of talking to the operating system.
49 // Sweeping an mspan and freeing objects on it proceeds up a similar
52 // 1. If the mspan is being swept in response to allocation, it
53 // is returned to the mcache to satisfy the allocation.
55 // 2. Otherwise, if the mspan still has allocated objects in it,
56 // it is placed on the mcentral free list for the mspan's size
59 // 3. Otherwise, if all objects in the mspan are free, the mspan's
60 // pages are returned to the mheap and the mspan is now dead.
62 // Allocating and freeing a large object uses the mheap
63 // directly, bypassing the mcache and mcentral.
65 // If mspan.needzero is false, then free object slots in the mspan are
66 // already zeroed. Otherwise if needzero is true, objects are zeroed as
67 // they are allocated. There are various benefits to delaying zeroing
70 // 1. Stack frame allocation can avoid zeroing altogether.
72 // 2. It exhibits better temporal locality, since the program is
73 // probably about to write to the memory.
75 // 3. We don't zero pages that never get reused.
77 // Virtual memory layout
79 // The heap consists of a set of arenas, which are 64MB on 64-bit and
80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
81 // aligned to the arena size.
83 // Each arena has an associated heapArena object that stores the
84 // metadata for that arena: the heap bitmap for all words in the arena
85 // and the span map for all pages in the arena. heapArena objects are
86 // themselves allocated off-heap.
88 // Since arenas are aligned, the address space can be viewed as a
89 // series of arena frames. The arena map (mheap_.arenas) maps from
90 // arena frame number to *heapArena, or nil for parts of the address
91 // space not backed by the Go heap. The arena map is structured as a
92 // two-level array consisting of a "L1" arena map and many "L2" arena
93 // maps; however, since arenas are large, on many architectures, the
94 // arena map consists of a single, large L2 map.
96 // The arena map covers the entire possible address space, allowing
97 // the Go heap to use any part of the address space. The allocator
98 // attempts to keep arenas contiguous so that large spans (and hence
99 // large objects) can cross arenas.
104 "runtime/internal/atomic"
105 "runtime/internal/math"
106 "runtime/internal/sys"
113 maxTinySize = _TinySize
114 tinySizeClass = _TinySizeClass
115 maxSmallSize = _MaxSmallSize
117 pageShift = _PageShift
120 // By construction, single page spans of the smallest object class
121 // have the most objects per span.
122 maxObjsPerSpan = pageSize / 8
124 concurrentSweep = _ConcurrentSweep
126 _PageSize = 1 << _PageShift
127 _PageMask = _PageSize - 1
129 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
130 _64bit = 1 << (^uintptr(0) >> 63) / 2
132 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
134 _TinySizeClass = int8(2)
136 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
138 // Per-P, per order stack segment cache size.
139 _StackCacheSize = 32 * 1024
141 // Number of orders that get caching. Order 0 is FixedStack
142 // and each successive order is twice as large.
143 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
144 // will be allocated directly.
145 // Since FixedStack is different on different systems, we
146 // must vary NumStackOrders to keep the same maximum cached size.
147 // OS | FixedStack | NumStackOrders
148 // -----------------+------------+---------------
149 // linux/darwin/bsd | 2KB | 4
150 // windows/32 | 4KB | 3
151 // windows/64 | 8KB | 2
153 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
155 // heapAddrBits is the number of bits in a heap address. On
156 // amd64, addresses are sign-extended beyond heapAddrBits. On
157 // other arches, they are zero-extended.
159 // On most 64-bit platforms, we limit this to 48 bits based on a
160 // combination of hardware and OS limitations.
162 // amd64 hardware limits addresses to 48 bits, sign-extended
163 // to 64 bits. Addresses where the top 16 bits are not either
164 // all 0 or all 1 are "non-canonical" and invalid. Because of
165 // these "negative" addresses, we offset addresses by 1<<47
166 // (arenaBaseOffset) on amd64 before computing indexes into
167 // the heap arenas index. In 2017, amd64 hardware added
168 // support for 57 bit addresses; however, currently only Linux
169 // supports this extension and the kernel will never choose an
170 // address above 1<<47 unless mmap is called with a hint
171 // address above 1<<47 (which we never do).
173 // arm64 hardware (as of ARMv8) limits user addresses to 48
174 // bits, in the range [0, 1<<48).
176 // ppc64, mips64, and s390x support arbitrary 64 bit addresses
177 // in hardware. On Linux, Go leans on stricter OS limits. Based
178 // on Linux's processor.h, the user address space is limited as
179 // follows on 64-bit architectures:
181 // Architecture Name Maximum Value (exclusive)
182 // ---------------------------------------------------------------------
183 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses)
184 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses)
185 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses)
186 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
187 // s390x TASK_SIZE 1<<64 (64 bit addresses)
189 // These limits may increase over time, but are currently at
190 // most 48 bits except on s390x. On all architectures, Linux
191 // starts placing mmap'd regions at addresses that are
192 // significantly below 48 bits, so even if it's possible to
193 // exceed Go's 48 bit limit, it's extremely unlikely in
196 // On 32-bit platforms, we accept the full 32-bit address
197 // space because doing so is cheap.
198 // mips32 only has access to the low 2GB of virtual memory, so
199 // we further limit it to 31 bits.
201 // On ios/arm64, although 64-bit pointers are presumably
202 // available, pointers are truncated to 33 bits. Furthermore,
203 // only the top 4 GiB of the address space are actually available
204 // to the application, but we allow the whole 33 bits anyway for
206 // TODO(mknyszek): Consider limiting it to 32 bits and using
207 // arenaBaseOffset to offset into the top 4 GiB.
209 // WebAssembly currently has a limit of 4GB linear memory.
210 heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64
212 // maxAlloc is the maximum size of an allocation. On 64-bit,
213 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
214 // 32-bit, however, this is one less than 1<<32 because the
215 // number of bytes in the address space doesn't actually fit
217 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
219 // The number of bits in a heap address, the size of heap
220 // arenas, and the L1 and L2 arena map sizes are related by
222 // (1 << addr bits) = arena size * L1 entries * L2 entries
224 // Currently, we balance these as follows:
226 // Platform Addr bits Arena size L1 entries L2 entries
227 // -------------- --------- ---------- ---------- -----------
228 // */64-bit 48 64MB 1 4M (32MB)
229 // windows/64-bit 48 4MB 64 1M (8MB)
230 // */32-bit 32 4MB 1 1024 (4KB)
231 // */mips(le) 31 4MB 1 512 (2KB)
233 // heapArenaBytes is the size of a heap arena. The heap
234 // consists of mappings of size heapArenaBytes, aligned to
235 // heapArenaBytes. The initial heap mapping is one arena.
237 // This is currently 64MB on 64-bit non-Windows and 4MB on
238 // 32-bit and on Windows. We use smaller arenas on Windows
239 // because all committed memory is charged to the process,
240 // even if it's not touched. Hence, for processes with small
241 // heaps, the mapped arena space needs to be commensurate.
242 // This is particularly important with the race detector,
243 // since it significantly amplifies the cost of committed
245 heapArenaBytes = 1 << logHeapArenaBytes
247 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
248 // prefer using heapArenaBytes where possible (we need the
249 // constant to compute some other constants).
250 logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm
252 // heapArenaBitmapBytes is the size of each heap arena's bitmap.
253 heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
255 pagesPerArena = heapArenaBytes / pageSize
257 // arenaL1Bits is the number of bits of the arena number
258 // covered by the first level arena map.
260 // This number should be small, since the first level arena
261 // map requires PtrSize*(1<<arenaL1Bits) of space in the
262 // binary's BSS. It can be zero, in which case the first level
263 // index is effectively unused. There is a performance benefit
264 // to this, since the generated code can be more efficient,
265 // but comes at the cost of having a large L2 mapping.
267 // We use the L1 map on 64-bit Windows because the arena size
268 // is small, but the address space is still 48 bits, and
269 // there's a high cost to having a large L2.
270 arenaL1Bits = 6 * (_64bit * sys.GoosWindows)
272 // arenaL2Bits is the number of bits of the arena number
273 // covered by the second level arena index.
275 // The size of each arena map allocation is proportional to
276 // 1<<arenaL2Bits, so it's important that this not be too
277 // large. 48 bits leads to 32MB arena index allocations, which
278 // is about the practical threshold.
279 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
281 // arenaL1Shift is the number of bits to shift an arena frame
282 // number by to compute an index into the first level arena map.
283 arenaL1Shift = arenaL2Bits
285 // arenaBits is the total bits in a combined arena map index.
286 // This is split between the index into the L1 arena map and
288 arenaBits = arenaL1Bits + arenaL2Bits
290 // arenaBaseOffset is the pointer value that corresponds to
291 // index 0 in the heap arena map.
293 // On amd64, the address space is 48 bits, sign extended to 64
294 // bits. This offset lets us handle "negative" addresses (or
295 // high addresses if viewed as unsigned).
297 // On aix/ppc64, this offset allows to keep the heapAddrBits to
298 // 48. Otherwize, it would be 60 in order to handle mmap addresses
299 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
300 // case, the memory reserved in (s *pageAlloc).init for chunks
301 // is causing important slowdowns.
303 // On other platforms, the user address space is contiguous
304 // and starts at 0, so no offset is necessary.
305 arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix
306 // A typed version of this constant that will make it into DWARF (for viewcore).
307 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
309 // Max number of threads to run garbage collection.
310 // 2, 3, and 4 are all plausible maximums depending
311 // on the hardware details of the machine. The garbage
312 // collector scales well to 32 cpus.
315 // minLegalPointer is the smallest possible legal pointer.
316 // This is the smallest possible architectural page size,
317 // since we assume that the first page is never mapped.
319 // This should agree with minZeroPage in the compiler.
320 minLegalPointer uintptr = 4096
323 // physPageSize is the size in bytes of the OS's physical pages.
324 // Mapping and unmapping operations must be done at multiples of
327 // This must be set by the OS init code (typically in osinit) before
329 var physPageSize uintptr
331 // physHugePageSize is the size in bytes of the OS's default physical huge
332 // page size whose allocation is opaque to the application. It is assumed
333 // and verified to be a power of two.
335 // If set, this must be set by the OS init code (typically in osinit) before
336 // mallocinit. However, setting it at all is optional, and leaving the default
337 // value is always safe (though potentially less efficient).
339 // Since physHugePageSize is always assumed to be a power of two,
340 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
341 // The purpose of physHugePageShift is to avoid doing divisions in
342 // performance critical functions.
344 physHugePageSize uintptr
345 physHugePageShift uint
348 // OS memory management abstraction layer
350 // Regions of the address space managed by the runtime may be in one of four
351 // states at any given time:
352 // 1) None - Unreserved and unmapped, the default state of any region.
353 // 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
354 // Does not count against the process' memory footprint.
355 // 3) Prepared - Reserved, intended not to be backed by physical memory (though
356 // an OS may implement this lazily). Can transition efficiently to
357 // Ready. Accessing memory in such a region is undefined (may
358 // fault, may give back unexpected zeroes, etc.).
359 // 4) Ready - may be accessed safely.
361 // This set of states is more than is strictly necessary to support all the
362 // currently supported platforms. One could get by with just None, Reserved, and
363 // Ready. However, the Prepared state gives us flexibility for performance
364 // purposes. For example, on POSIX-y operating systems, Reserved is usually a
365 // private anonymous mmap'd region with PROT_NONE set, and to transition
366 // to Ready would require setting PROT_READ|PROT_WRITE. However the
367 // underspecification of Prepared lets us use just MADV_FREE to transition from
368 // Ready to Prepared. Thus with the Prepared state we can set the permission
369 // bits just once early on, we can efficiently tell the OS that it's free to
370 // take pages away from us when we don't strictly need them.
372 // For each OS there is a common set of helpers defined that transition
373 // memory regions between these states. The helpers are as follows:
375 // sysAlloc transitions an OS-chosen region of memory from None to Ready.
376 // More specifically, it obtains a large chunk of zeroed memory from the
377 // operating system, typically on the order of a hundred kilobytes
378 // or a megabyte. This memory is always immediately available for use.
380 // sysFree transitions a memory region from any state to None. Therefore, it
381 // returns memory unconditionally. It is used if an out-of-memory error has been
382 // detected midway through an allocation or to carve out an aligned section of
383 // the address space. It is okay if sysFree is a no-op only if sysReserve always
384 // returns a memory region aligned to the heap allocator's alignment
387 // sysReserve transitions a memory region from None to Reserved. It reserves
388 // address space in such a way that it would cause a fatal fault upon access
389 // (either via permissions or not committing the memory). Such a reservation is
390 // thus never backed by physical memory.
391 // If the pointer passed to it is non-nil, the caller wants the
392 // reservation there, but sysReserve can still choose another
393 // location if that one is unavailable.
394 // NOTE: sysReserve returns OS-aligned memory, but the heap allocator
395 // may use larger alignment, so the caller must be careful to realign the
396 // memory obtained by sysReserve.
398 // sysMap transitions a memory region from Reserved to Prepared. It ensures the
399 // memory region can be efficiently transitioned to Ready.
401 // sysUsed transitions a memory region from Prepared to Ready. It notifies the
402 // operating system that the memory region is needed and ensures that the region
403 // may be safely accessed. This is typically a no-op on systems that don't have
404 // an explicit commit step and hard over-commit limits, but is critical on
405 // Windows, for example.
407 // sysUnused transitions a memory region from Ready to Prepared. It notifies the
408 // operating system that the physical pages backing this memory region are no
409 // longer needed and can be reused for other purposes. The contents of a
410 // sysUnused memory region are considered forfeit and the region must not be
411 // accessed again until sysUsed is called.
413 // sysFault transitions a memory region from Ready or Prepared to Reserved. It
414 // marks a region such that it will always fault if accessed. Used only for
415 // debugging the runtime.
418 if class_to_size[_TinySizeClass] != _TinySize {
419 throw("bad TinySizeClass")
424 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
425 // heapBits expects modular arithmetic on bitmap
426 // addresses to work.
427 throw("heapArenaBitmapBytes not a power of 2")
430 // Copy class sizes out for statistics table.
431 for i := range class_to_size {
432 memstats.by_size[i].size = uint32(class_to_size[i])
435 // Check physPageSize.
436 if physPageSize == 0 {
437 // The OS init code failed to fetch the physical page size.
438 throw("failed to get system page size")
440 if physPageSize > maxPhysPageSize {
441 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
442 throw("bad system page size")
444 if physPageSize < minPhysPageSize {
445 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
446 throw("bad system page size")
448 if physPageSize&(physPageSize-1) != 0 {
449 print("system page size (", physPageSize, ") must be a power of 2\n")
450 throw("bad system page size")
452 if physHugePageSize&(physHugePageSize-1) != 0 {
453 print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
454 throw("bad system huge page size")
456 if physHugePageSize > maxPhysHugePageSize {
457 // physHugePageSize is greater than the maximum supported huge page size.
458 // Don't throw here, like in the other cases, since a system configured
459 // in this way isn't wrong, we just don't have the code to support them.
460 // Instead, silently set the huge page size to zero.
463 if physHugePageSize != 0 {
464 // Since physHugePageSize is a power of 2, it suffices to increase
465 // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
466 for 1<<physHugePageShift != physHugePageSize {
470 if pagesPerArena%pagesPerSpanRoot != 0 {
471 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
472 throw("bad pagesPerSpanRoot")
474 if pagesPerArena%pagesPerReclaimerChunk != 0 {
475 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
476 throw("bad pagesPerReclaimerChunk")
479 // Initialize the heap.
481 mcache0 = allocmcache()
482 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
483 lockInit(&proflock, lockRankProf)
484 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
486 // Create initial arena growth hints.
487 if sys.PtrSize == 8 {
488 // On a 64-bit machine, we pick the following hints
491 // 1. Starting from the middle of the address space
492 // makes it easier to grow out a contiguous range
493 // without running in to some other mapping.
495 // 2. This makes Go heap addresses more easily
496 // recognizable when debugging.
498 // 3. Stack scanning in gccgo is still conservative,
499 // so it's important that addresses be distinguishable
502 // Starting at 0x00c0 means that the valid memory addresses
503 // will begin 0x00c0, 0x00c1, ...
504 // In little-endian, that's c0 00, c1 00, ... None of those are valid
505 // UTF-8 sequences, and they are otherwise as far away from
506 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
507 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
508 // on OS X during thread allocations. 0x00c0 causes conflicts with
509 // AddressSanitizer which reserves all memory up to 0x0100.
510 // These choices reduce the odds of a conservative garbage collector
511 // not collecting memory because some non-pointer block of memory
512 // had a bit pattern that matched a memory address.
514 // However, on arm64, we ignore all this advice above and slam the
515 // allocation at 0x40 << 32 because when using 4k pages with 3-level
516 // translation buffers, the user address space is limited to 39 bits
517 // On ios/arm64, the address space is even smaller.
519 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
521 for i := 0x7f; i >= 0; i-- {
525 // The TSAN runtime requires the heap
526 // to be in the range [0x00c000000000,
528 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
529 if p >= uintptrMask&0x00e000000000 {
532 case GOARCH == "arm64" && GOOS == "ios":
533 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
534 case GOARCH == "arm64":
535 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
538 // We don't use addresses directly after 0x0A00000000000000
539 // to avoid collisions with others mmaps done by non-go programs.
542 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
544 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
546 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
548 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
551 // On a 32-bit machine, we're much more concerned
552 // about keeping the usable heap contiguous.
555 // 1. We reserve space for all heapArenas up front so
556 // they don't get interleaved with the heap. They're
557 // ~258MB, so this isn't too bad. (We could reserve a
558 // smaller amount of space up front if this is a
561 // 2. We hint the heap to start right above the end of
562 // the binary so we have the best chance of keeping it
565 // 3. We try to stake out a reasonably large initial
568 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
569 meta := uintptr(sysReserve(nil, arenaMetaSize))
571 mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
574 // We want to start the arena low, but if we're linked
575 // against C code, it's possible global constructors
576 // have called malloc and adjusted the process' brk.
577 // Query the brk so we can avoid trying to map the
578 // region over it (which will cause the kernel to put
579 // the region somewhere else, likely at a high
583 // If we ask for the end of the data segment but the
584 // operating system requires a little more space
585 // before we can start allocating, it will give out a
586 // slightly higher pointer. Except QEMU, which is
587 // buggy, as usual: it won't adjust the pointer
588 // upward. So adjust it upward a little bit ourselves:
589 // 1/4 MB to get away from the running binary image.
590 p := firstmoduledata.end
594 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
595 p = mheap_.heapArenaAlloc.end
597 p = alignUp(p+(256<<10), heapArenaBytes)
598 // Because we're worried about fragmentation on
599 // 32-bit, we try to make a large initial reservation.
600 arenaSizes := []uintptr{
605 for _, arenaSize := range arenaSizes {
606 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
608 mheap_.arena.init(uintptr(a), size)
609 p = mheap_.arena.end // For hint below
613 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
615 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
619 // sysAlloc allocates heap arena space for at least n bytes. The
620 // returned pointer is always heapArenaBytes-aligned and backed by
621 // h.arenas metadata. The returned size is always a multiple of
622 // heapArenaBytes. sysAlloc returns nil on failure.
623 // There is no corresponding free function.
625 // sysAlloc returns a memory region in the Prepared state. This region must
626 // be transitioned to Ready before use.
629 func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
630 assertLockHeld(&h.lock)
632 n = alignUp(n, heapArenaBytes)
634 // First, try the arena pre-reservation.
635 v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
641 // Try to grow the heap at a hint address.
642 for h.arenaHints != nil {
649 // We can't use this, so don't ask.
651 } else if arenaIndex(p+n-1) >= 1<<arenaBits {
652 // Outside addressable heap. Can't use.
655 v = sysReserve(unsafe.Pointer(p), n)
658 // Success. Update the hint.
666 // Failed. Discard this hint and try the next.
668 // TODO: This would be cleaner if sysReserve could be
669 // told to only return the requested address. In
670 // particular, this is already how Windows behaves, so
671 // it would simplify things there.
675 h.arenaHints = hint.next
676 h.arenaHintAlloc.free(unsafe.Pointer(hint))
681 // The race detector assumes the heap lives in
682 // [0x00c000000000, 0x00e000000000), but we
683 // just ran out of hints in this region. Give
685 throw("too many address space collisions for -race mode")
688 // All of the hints failed, so we'll take any
689 // (sufficiently aligned) address the kernel will give
691 v, size = sysReserveAligned(nil, n, heapArenaBytes)
696 // Create new hints for extending this region.
697 hint := (*arenaHint)(h.arenaHintAlloc.alloc())
698 hint.addr, hint.down = uintptr(v), true
699 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
700 hint = (*arenaHint)(h.arenaHintAlloc.alloc())
701 hint.addr = uintptr(v) + size
702 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
705 // Check for bad pointers or pointers we can't use.
710 bad = "region exceeds uintptr range"
711 } else if arenaIndex(p) >= 1<<arenaBits {
712 bad = "base outside usable address space"
713 } else if arenaIndex(p+size-1) >= 1<<arenaBits {
714 bad = "end outside usable address space"
717 // This should be impossible on most architectures,
718 // but it would be really confusing to debug.
719 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
720 throw("memory reservation exceeds address space limit")
724 if uintptr(v)&(heapArenaBytes-1) != 0 {
725 throw("misrounded allocation in sysAlloc")
728 // Transition from Reserved to Prepared.
729 sysMap(v, size, &memstats.heap_sys)
732 // Create arena metadata.
733 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
734 l2 := h.arenas[ri.l1()]
736 // Allocate an L2 arena map.
737 l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
739 throw("out of memory allocating heap arena map")
741 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
744 if l2[ri.l2()] != nil {
745 throw("arena already initialized")
748 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
750 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
752 throw("out of memory allocating heap arena metadata")
756 // Add the arena to the arenas list.
757 if len(h.allArenas) == cap(h.allArenas) {
758 size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
762 newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
764 throw("out of memory allocating allArenas")
766 oldSlice := h.allArenas
767 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
768 copy(h.allArenas, oldSlice)
769 // Do not free the old backing array because
770 // there may be concurrent readers. Since we
771 // double the array each time, this can lead
772 // to at most 2x waste.
774 h.allArenas = h.allArenas[:len(h.allArenas)+1]
775 h.allArenas[len(h.allArenas)-1] = ri
777 // Store atomically just in case an object from the
778 // new heap arena becomes visible before the heap lock
779 // is released (which shouldn't happen, but there's
780 // little downside to this).
781 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
784 // Tell the race detector about the new heap memory.
786 racemapshadow(v, size)
792 // sysReserveAligned is like sysReserve, but the returned pointer is
793 // aligned to align bytes. It may reserve either n or n+align bytes,
794 // so it returns the size that was reserved.
795 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
796 // Since the alignment is rather large in uses of this
797 // function, we're not likely to get it by chance, so we ask
798 // for a larger region and remove the parts we don't need.
801 p := uintptr(sysReserve(v, size+align))
805 case p&(align-1) == 0:
806 // We got lucky and got an aligned region, so we can
807 // use the whole thing.
808 return unsafe.Pointer(p), size + align
809 case GOOS == "windows":
810 // On Windows we can't release pieces of a
811 // reservation, so we release the whole thing and
812 // re-reserve the aligned sub-region. This may race,
813 // so we may have to try again.
814 sysFree(unsafe.Pointer(p), size+align, nil)
815 p = alignUp(p, align)
816 p2 := sysReserve(unsafe.Pointer(p), size)
817 if p != uintptr(p2) {
818 // Must have raced. Try again.
819 sysFree(p2, size, nil)
820 if retries++; retries == 100 {
821 throw("failed to allocate aligned heap memory; too many retries")
828 // Trim off the unaligned parts.
829 pAligned := alignUp(p, align)
830 sysFree(unsafe.Pointer(p), pAligned-p, nil)
831 end := pAligned + size
832 endLen := (p + size + align) - end
834 sysFree(unsafe.Pointer(end), endLen, nil)
836 return unsafe.Pointer(pAligned), size
840 // base address for all 0-byte allocations
843 // nextFreeFast returns the next free object if one is quickly available.
844 // Otherwise it returns 0.
845 func nextFreeFast(s *mspan) gclinkptr {
846 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
848 result := s.freeindex + uintptr(theBit)
849 if result < s.nelems {
850 freeidx := result + 1
851 if freeidx%64 == 0 && freeidx != s.nelems {
854 s.allocCache >>= uint(theBit + 1)
855 s.freeindex = freeidx
857 return gclinkptr(result*s.elemsize + s.base())
863 // nextFree returns the next free object from the cached span if one is available.
864 // Otherwise it refills the cache with a span with an available object and
865 // returns that object along with a flag indicating that this was a heavy
866 // weight allocation. If it is a heavy weight allocation the caller must
867 // determine whether a new GC cycle needs to be started or if the GC is active
868 // whether this goroutine needs to assist the GC.
870 // Must run in a non-preemptible context since otherwise the owner of
872 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
875 freeIndex := s.nextFreeIndex()
876 if freeIndex == s.nelems {
878 if uintptr(s.allocCount) != s.nelems {
879 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
880 throw("s.allocCount != s.nelems && freeIndex == s.nelems")
886 freeIndex = s.nextFreeIndex()
889 if freeIndex >= s.nelems {
890 throw("freeIndex is not valid")
893 v = gclinkptr(freeIndex*s.elemsize + s.base())
895 if uintptr(s.allocCount) > s.nelems {
896 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
897 throw("s.allocCount > s.nelems")
902 // Allocate an object of size bytes.
903 // Small objects are allocated from the per-P cache's free lists.
904 // Large objects (> 32 kB) are allocated straight from the heap.
905 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
906 if gcphase == _GCmarktermination {
907 throw("mallocgc called with gcphase == _GCmarktermination")
911 return unsafe.Pointer(&zerobase)
918 // TODO(austin): This should be just
919 // align = uintptr(typ.align)
920 // but that's only 4 on 32-bit platforms,
921 // even if there's a uint64 field in typ (see #599).
922 // This causes 64-bit atomic accesses to panic.
923 // Hence, we use stricter alignment that matches
924 // the normal allocator better.
927 } else if size&3 == 0 {
929 } else if size&1 == 0 {
935 return persistentalloc(size, align, &memstats.other_sys)
938 if inittrace.active && inittrace.id == getg().goid {
939 // Init functions are executed sequentially in a single Go routine.
940 inittrace.allocs += 1
944 // assistG is the G to charge for this allocation, or nil if
945 // GC is not currently active.
947 if gcBlackenEnabled != 0 {
948 // Charge the current user G for this allocation.
950 if assistG.m.curg != nil {
951 assistG = assistG.m.curg
953 // Charge the allocation against the G. We'll account
954 // for internal fragmentation at the end of mallocgc.
955 assistG.gcAssistBytes -= int64(size)
957 if assistG.gcAssistBytes < 0 {
958 // This G is in debt. Assist the GC to correct
959 // this before allocating. This must happen
960 // before disabling preemption.
961 gcAssistAlloc(assistG)
965 // Set mp.mallocing to keep from being preempted by GC.
967 if mp.mallocing != 0 {
968 throw("malloc deadlock")
970 if mp.gsignal == getg() {
971 throw("malloc during signal")
975 shouldhelpgc := false
979 throw("mallocgc called without a P or outside bootstrapping")
983 noscan := typ == nil || typ.ptrdata == 0
984 if size <= maxSmallSize {
985 if noscan && size < maxTinySize {
988 // Tiny allocator combines several tiny allocation requests
989 // into a single memory block. The resulting memory block
990 // is freed when all subobjects are unreachable. The subobjects
991 // must be noscan (don't have pointers), this ensures that
992 // the amount of potentially wasted memory is bounded.
994 // Size of the memory block used for combining (maxTinySize) is tunable.
995 // Current setting is 16 bytes, which relates to 2x worst case memory
996 // wastage (when all but one subobjects are unreachable).
997 // 8 bytes would result in no wastage at all, but provides less
998 // opportunities for combining.
999 // 32 bytes provides more opportunities for combining,
1000 // but can lead to 4x worst case wastage.
1001 // The best case winning is 8x regardless of block size.
1003 // Objects obtained from tiny allocator must not be freed explicitly.
1004 // So when an object will be freed explicitly, we ensure that
1005 // its size >= maxTinySize.
1007 // SetFinalizer has a special case for objects potentially coming
1008 // from tiny allocator, it such case it allows to set finalizers
1009 // for an inner byte of a memory block.
1011 // The main targets of tiny allocator are small strings and
1012 // standalone escaping variables. On a json benchmark
1013 // the allocator reduces number of allocations by ~12% and
1014 // reduces heap size by ~20%.
1016 // Align tiny pointer for required (conservative) alignment.
1018 off = alignUp(off, 8)
1019 } else if sys.PtrSize == 4 && size == 12 {
1020 // Conservatively align 12-byte objects to 8 bytes on 32-bit
1021 // systems so that objects whose first field is a 64-bit
1022 // value is aligned to 8 bytes and does not cause a fault on
1023 // atomic access. See issue 37262.
1024 // TODO(mknyszek): Remove this workaround if/when issue 36606
1026 off = alignUp(off, 8)
1027 } else if size&3 == 0 {
1028 off = alignUp(off, 4)
1029 } else if size&1 == 0 {
1030 off = alignUp(off, 2)
1032 if off+size <= maxTinySize && c.tiny != 0 {
1033 // The object fits into existing tiny block.
1034 x = unsafe.Pointer(c.tiny + off)
1035 c.tinyoffset = off + size
1041 // Allocate a new maxTinySize block.
1042 span = c.alloc[tinySpanClass]
1043 v := nextFreeFast(span)
1045 v, span, shouldhelpgc = c.nextFree(tinySpanClass)
1047 x = unsafe.Pointer(v)
1048 (*[2]uint64)(x)[0] = 0
1049 (*[2]uint64)(x)[1] = 0
1050 // See if we need to replace the existing tiny block with the new one
1051 // based on amount of remaining free space.
1052 if size < c.tinyoffset || c.tiny == 0 {
1059 if size <= smallSizeMax-8 {
1060 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
1062 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
1064 size = uintptr(class_to_size[sizeclass])
1065 spc := makeSpanClass(sizeclass, noscan)
1067 v := nextFreeFast(span)
1069 v, span, shouldhelpgc = c.nextFree(spc)
1071 x = unsafe.Pointer(v)
1072 if needzero && span.needzero != 0 {
1073 memclrNoHeapPointers(unsafe.Pointer(v), size)
1078 span = c.allocLarge(size, needzero, noscan)
1081 x = unsafe.Pointer(span.base())
1082 size = span.elemsize
1085 var scanSize uintptr
1087 // If allocating a defer+arg block, now that we've picked a malloc size
1088 // large enough to hold everything, cut the "asked for" size down to
1089 // just the defer header, so that the GC bitmap will record the arg block
1090 // as containing nothing at all (as if it were unused space at the end of
1091 // a malloc block caused by size rounding).
1092 // The defer arg areas are scanned as part of scanstack.
1093 if typ == deferType {
1094 dataSize = unsafe.Sizeof(_defer{})
1096 heapBitsSetType(uintptr(x), size, dataSize, typ)
1097 if dataSize > typ.size {
1098 // Array allocation. If there are any
1099 // pointers, GC has to scan to the last
1101 if typ.ptrdata != 0 {
1102 scanSize = dataSize - typ.size + typ.ptrdata
1105 scanSize = typ.ptrdata
1107 c.scanAlloc += scanSize
1110 // Ensure that the stores above that initialize x to
1111 // type-safe memory and set the heap bits occur before
1112 // the caller can make x observable to the garbage
1113 // collector. Otherwise, on weakly ordered machines,
1114 // the garbage collector could follow a pointer to x,
1115 // but see uninitialized memory or stale heap bits.
1116 publicationBarrier()
1118 // Allocate black during GC.
1119 // All slots hold nil so no scanning is needed.
1120 // This may be racing with GC so do it atomically if there can be
1121 // a race marking the bit.
1122 if gcphase != _GCoff {
1123 gcmarknewobject(span, uintptr(x), size, scanSize)
1138 if debug.allocfreetrace != 0 {
1139 tracealloc(x, size, typ)
1142 if inittrace.active && inittrace.id == getg().goid {
1143 // Init functions are executed sequentially in a single Go routine.
1144 inittrace.bytes += uint64(size)
1148 if rate := MemProfileRate; rate > 0 {
1149 if rate != 1 && size < c.nextSample {
1150 c.nextSample -= size
1153 profilealloc(mp, x, size)
1159 // Account for internal fragmentation in the assist
1160 // debt now that we know it.
1161 assistG.gcAssistBytes -= int64(size - dataSize)
1165 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1173 // implementation of new builtin
1174 // compiler (both frontend and SSA backend) knows the signature
1176 func newobject(typ *_type) unsafe.Pointer {
1177 return mallocgc(typ.size, typ, true)
1180 //go:linkname reflect_unsafe_New reflect.unsafe_New
1181 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
1182 return mallocgc(typ.size, typ, true)
1185 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
1186 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
1187 return mallocgc(typ.size, typ, true)
1190 // newarray allocates an array of n elements of type typ.
1191 func newarray(typ *_type, n int) unsafe.Pointer {
1193 return mallocgc(typ.size, typ, true)
1195 mem, overflow := math.MulUintptr(typ.size, uintptr(n))
1196 if overflow || mem > maxAlloc || n < 0 {
1197 panic(plainError("runtime: allocation size out of range"))
1199 return mallocgc(mem, typ, true)
1202 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
1203 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
1204 return newarray(typ, n)
1207 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
1210 throw("profilealloc called without a P or outside bootstrapping")
1212 c.nextSample = nextSample()
1213 mProf_Malloc(x, size)
1216 // nextSample returns the next sampling point for heap profiling. The goal is
1217 // to sample allocations on average every MemProfileRate bytes, but with a
1218 // completely random distribution over the allocation timeline; this
1219 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
1220 // processes, the distance between two samples follows the exponential
1221 // distribution (exp(MemProfileRate)), so the best return value is a random
1222 // number taken from an exponential distribution whose mean is MemProfileRate.
1223 func nextSample() uintptr {
1224 if MemProfileRate == 1 {
1225 // Callers assign our return value to
1226 // mcache.next_sample, but next_sample is not used
1227 // when the rate is 1. So avoid the math below and
1228 // just return something.
1231 if GOOS == "plan9" {
1232 // Plan 9 doesn't support floating point in note handler.
1233 if g := getg(); g == g.m.gsignal {
1234 return nextSampleNoFP()
1238 return uintptr(fastexprand(MemProfileRate))
1241 // fastexprand returns a random number from an exponential distribution with
1242 // the specified mean.
1243 func fastexprand(mean int) int32 {
1244 // Avoid overflow. Maximum possible step is
1245 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
1247 case mean > 0x7000000:
1253 // Take a random sample of the exponential distribution exp(-mean*x).
1254 // The probability distribution function is mean*exp(-mean*x), so the CDF is
1255 // p = 1 - exp(-mean*x), so
1256 // q = 1 - p == exp(-mean*x)
1257 // log_e(q) = -mean*x
1258 // -log_e(q)/mean = x
1259 // x = -log_e(q) * mean
1260 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
1261 const randomBitCount = 26
1262 q := fastrand()%(1<<randomBitCount) + 1
1263 qlog := fastlog2(float64(q)) - randomBitCount
1267 const minusLog2 = -0.6931471805599453 // -ln(2)
1268 return int32(qlog*(minusLog2*float64(mean))) + 1
1271 // nextSampleNoFP is similar to nextSample, but uses older,
1272 // simpler code to avoid floating point.
1273 func nextSampleNoFP() uintptr {
1274 // Set first allocation sample size.
1275 rate := MemProfileRate
1276 if rate > 0x3fffffff { // make 2*rate not overflow
1280 return uintptr(fastrand() % uint32(2*rate))
1285 type persistentAlloc struct {
1290 var globalAlloc struct {
1295 // persistentChunkSize is the number of bytes we allocate when we grow
1296 // a persistentAlloc.
1297 const persistentChunkSize = 256 << 10
1299 // persistentChunks is a list of all the persistent chunks we have
1300 // allocated. The list is maintained through the first word in the
1301 // persistent chunk. This is updated atomically.
1302 var persistentChunks *notInHeap
1304 // Wrapper around sysAlloc that can allocate small chunks.
1305 // There is no associated free operation.
1306 // Intended for things like function/type/debug-related persistent data.
1307 // If align is 0, uses default align (currently 8).
1308 // The returned memory will be zeroed.
1310 // Consider marking persistentalloc'd types go:notinheap.
1311 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1313 systemstack(func() {
1314 p = persistentalloc1(size, align, sysStat)
1316 return unsafe.Pointer(p)
1319 // Must run on system stack because stack growth can (re)invoke it.
1322 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
1324 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
1328 throw("persistentalloc: size == 0")
1331 if align&(align-1) != 0 {
1332 throw("persistentalloc: align is not a power of 2")
1334 if align > _PageSize {
1335 throw("persistentalloc: align is too large")
1341 if size >= maxBlock {
1342 return (*notInHeap)(sysAlloc(size, sysStat))
1346 var persistent *persistentAlloc
1347 if mp != nil && mp.p != 0 {
1348 persistent = &mp.p.ptr().palloc
1350 lock(&globalAlloc.mutex)
1351 persistent = &globalAlloc.persistentAlloc
1353 persistent.off = alignUp(persistent.off, align)
1354 if persistent.off+size > persistentChunkSize || persistent.base == nil {
1355 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
1356 if persistent.base == nil {
1357 if persistent == &globalAlloc.persistentAlloc {
1358 unlock(&globalAlloc.mutex)
1360 throw("runtime: cannot allocate memory")
1363 // Add the new chunk to the persistentChunks list.
1365 chunks := uintptr(unsafe.Pointer(persistentChunks))
1366 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
1367 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
1371 persistent.off = alignUp(sys.PtrSize, align)
1373 p := persistent.base.add(persistent.off)
1374 persistent.off += size
1376 if persistent == &globalAlloc.persistentAlloc {
1377 unlock(&globalAlloc.mutex)
1380 if sysStat != &memstats.other_sys {
1381 sysStat.add(int64(size))
1382 memstats.other_sys.add(-int64(size))
1387 // inPersistentAlloc reports whether p points to memory allocated by
1388 // persistentalloc. This must be nosplit because it is called by the
1389 // cgo checker code, which is called by the write barrier code.
1391 func inPersistentAlloc(p uintptr) bool {
1392 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
1394 if p >= chunk && p < chunk+persistentChunkSize {
1397 chunk = *(*uintptr)(unsafe.Pointer(chunk))
1402 // linearAlloc is a simple linear allocator that pre-reserves a region
1403 // of memory and then maps that region into the Ready state as needed. The
1404 // caller is responsible for locking.
1405 type linearAlloc struct {
1406 next uintptr // next free byte
1407 mapped uintptr // one byte past end of mapped space
1408 end uintptr // end of reserved space
1411 func (l *linearAlloc) init(base, size uintptr) {
1412 if base+size < base {
1413 // Chop off the last byte. The runtime isn't prepared
1414 // to deal with situations where the bounds could overflow.
1415 // Leave that memory reserved, though, so we don't map it
1419 l.next, l.mapped = base, base
1423 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1424 p := alignUp(l.next, align)
1429 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
1430 // Transition from Reserved to Prepared to Ready.
1431 sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
1432 sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
1435 return unsafe.Pointer(p)
1438 // notInHeap is off-heap memory allocated by a lower-level allocator
1439 // like sysAlloc or persistentAlloc.
1441 // In general, it's better to use real types marked as go:notinheap,
1442 // but this serves as a generic type for situations where that isn't
1443 // possible (like in the allocators).
1445 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
1448 type notInHeap struct{}
1450 func (p *notInHeap) add(bytes uintptr) *notInHeap {
1451 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))