1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This was originally based on tcmalloc, but has diverged quite a bit.
8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
10 // The main allocator works in runs of pages.
11 // Small allocation sizes (up to and including 32 kB) are
12 // rounded to one of about 70 size classes, each of which
13 // has its own free set of objects of exactly that size.
14 // Any free page of memory can be split into a set of objects
15 // of one size class, which are then managed using a free bitmap.
17 // The allocator's data structures are:
19 // fixalloc: a free-list allocator for fixed-size off-heap objects,
20 // used to manage storage used by the allocator.
21 // mheap: the malloc heap, managed at page (8192-byte) granularity.
22 // mspan: a run of in-use pages managed by the mheap.
23 // mcentral: collects all spans of a given size class.
24 // mcache: a per-P cache of mspans with free space.
25 // mstats: allocation statistics.
27 // Allocating a small object proceeds up a hierarchy of caches:
29 // 1. Round the size up to one of the small size classes
30 // and look in the corresponding mspan in this P's mcache.
31 // Scan the mspan's free bitmap to find a free slot.
32 // If there is a free slot, allocate it.
33 // This can all be done without acquiring a lock.
35 // 2. If the mspan has no free slots, obtain a new mspan
36 // from the mcentral's list of mspans of the required size
37 // class that have free space.
38 // Obtaining a whole span amortizes the cost of locking
41 // 3. If the mcentral's mspan list is empty, obtain a run
42 // of pages from the mheap to use for the mspan.
44 // 4. If the mheap is empty or has no page runs large enough,
45 // allocate a new group of pages (at least 1MB) from the
46 // operating system. Allocating a large run of pages
47 // amortizes the cost of talking to the operating system.
49 // Sweeping an mspan and freeing objects on it proceeds up a similar
52 // 1. If the mspan is being swept in response to allocation, it
53 // is returned to the mcache to satisfy the allocation.
55 // 2. Otherwise, if the mspan still has allocated objects in it,
56 // it is placed on the mcentral free list for the mspan's size
59 // 3. Otherwise, if all objects in the mspan are free, the mspan's
60 // pages are returned to the mheap and the mspan is now dead.
62 // Allocating and freeing a large object uses the mheap
63 // directly, bypassing the mcache and mcentral.
65 // If mspan.needzero is false, then free object slots in the mspan are
66 // already zeroed. Otherwise if needzero is true, objects are zeroed as
67 // they are allocated. There are various benefits to delaying zeroing
70 // 1. Stack frame allocation can avoid zeroing altogether.
72 // 2. It exhibits better temporal locality, since the program is
73 // probably about to write to the memory.
75 // 3. We don't zero pages that never get reused.
77 // Virtual memory layout
79 // The heap consists of a set of arenas, which are 64MB on 64-bit and
80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
81 // aligned to the arena size.
83 // Each arena has an associated heapArena object that stores the
84 // metadata for that arena: the heap bitmap for all words in the arena
85 // and the span map for all pages in the arena. heapArena objects are
86 // themselves allocated off-heap.
88 // Since arenas are aligned, the address space can be viewed as a
89 // series of arena frames. The arena map (mheap_.arenas) maps from
90 // arena frame number to *heapArena, or nil for parts of the address
91 // space not backed by the Go heap. The arena map is structured as a
92 // two-level array consisting of a "L1" arena map and many "L2" arena
93 // maps; however, since arenas are large, on many architectures, the
94 // arena map consists of a single, large L2 map.
96 // The arena map covers the entire possible address space, allowing
97 // the Go heap to use any part of the address space. The allocator
98 // attempts to keep arenas contiguous so that large spans (and hence
99 // large objects) can cross arenas.
106 "runtime/internal/atomic"
107 "runtime/internal/math"
108 "runtime/internal/sys"
115 maxTinySize = _TinySize
116 tinySizeClass = _TinySizeClass
117 maxSmallSize = _MaxSmallSize
119 pageShift = _PageShift
122 // By construction, single page spans of the smallest object class
123 // have the most objects per span.
124 maxObjsPerSpan = pageSize / 8
126 concurrentSweep = _ConcurrentSweep
128 _PageSize = 1 << _PageShift
129 _PageMask = _PageSize - 1
131 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
132 _64bit = 1 << (^uintptr(0) >> 63) / 2
134 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
136 _TinySizeClass = int8(2)
138 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
140 // Per-P, per order stack segment cache size.
141 _StackCacheSize = 32 * 1024
143 // Number of orders that get caching. Order 0 is FixedStack
144 // and each successive order is twice as large.
145 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
146 // will be allocated directly.
147 // Since FixedStack is different on different systems, we
148 // must vary NumStackOrders to keep the same maximum cached size.
149 // OS | FixedStack | NumStackOrders
150 // -----------------+------------+---------------
151 // linux/darwin/bsd | 2KB | 4
152 // windows/32 | 4KB | 3
153 // windows/64 | 8KB | 2
155 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
157 // heapAddrBits is the number of bits in a heap address. On
158 // amd64, addresses are sign-extended beyond heapAddrBits. On
159 // other arches, they are zero-extended.
161 // On most 64-bit platforms, we limit this to 48 bits based on a
162 // combination of hardware and OS limitations.
164 // amd64 hardware limits addresses to 48 bits, sign-extended
165 // to 64 bits. Addresses where the top 16 bits are not either
166 // all 0 or all 1 are "non-canonical" and invalid. Because of
167 // these "negative" addresses, we offset addresses by 1<<47
168 // (arenaBaseOffset) on amd64 before computing indexes into
169 // the heap arenas index. In 2017, amd64 hardware added
170 // support for 57 bit addresses; however, currently only Linux
171 // supports this extension and the kernel will never choose an
172 // address above 1<<47 unless mmap is called with a hint
173 // address above 1<<47 (which we never do).
175 // arm64 hardware (as of ARMv8) limits user addresses to 48
176 // bits, in the range [0, 1<<48).
178 // ppc64, mips64, and s390x support arbitrary 64 bit addresses
179 // in hardware. On Linux, Go leans on stricter OS limits. Based
180 // on Linux's processor.h, the user address space is limited as
181 // follows on 64-bit architectures:
183 // Architecture Name Maximum Value (exclusive)
184 // ---------------------------------------------------------------------
185 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses)
186 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses)
187 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses)
188 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
189 // s390x TASK_SIZE 1<<64 (64 bit addresses)
191 // These limits may increase over time, but are currently at
192 // most 48 bits except on s390x. On all architectures, Linux
193 // starts placing mmap'd regions at addresses that are
194 // significantly below 48 bits, so even if it's possible to
195 // exceed Go's 48 bit limit, it's extremely unlikely in
198 // On 32-bit platforms, we accept the full 32-bit address
199 // space because doing so is cheap.
200 // mips32 only has access to the low 2GB of virtual memory, so
201 // we further limit it to 31 bits.
203 // On ios/arm64, although 64-bit pointers are presumably
204 // available, pointers are truncated to 33 bits. Furthermore,
205 // only the top 4 GiB of the address space are actually available
206 // to the application, but we allow the whole 33 bits anyway for
208 // TODO(mknyszek): Consider limiting it to 32 bits and using
209 // arenaBaseOffset to offset into the top 4 GiB.
211 // WebAssembly currently has a limit of 4GB linear memory.
212 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 33*goos.IsIos*goarch.IsArm64
214 // maxAlloc is the maximum size of an allocation. On 64-bit,
215 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
216 // 32-bit, however, this is one less than 1<<32 because the
217 // number of bytes in the address space doesn't actually fit
219 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
221 // The number of bits in a heap address, the size of heap
222 // arenas, and the L1 and L2 arena map sizes are related by
224 // (1 << addr bits) = arena size * L1 entries * L2 entries
226 // Currently, we balance these as follows:
228 // Platform Addr bits Arena size L1 entries L2 entries
229 // -------------- --------- ---------- ---------- -----------
230 // */64-bit 48 64MB 1 4M (32MB)
231 // windows/64-bit 48 4MB 64 1M (8MB)
232 // ios/arm64 33 4MB 1 2048 (8KB)
233 // */32-bit 32 4MB 1 1024 (4KB)
234 // */mips(le) 31 4MB 1 512 (2KB)
236 // heapArenaBytes is the size of a heap arena. The heap
237 // consists of mappings of size heapArenaBytes, aligned to
238 // heapArenaBytes. The initial heap mapping is one arena.
240 // This is currently 64MB on 64-bit non-Windows and 4MB on
241 // 32-bit and on Windows. We use smaller arenas on Windows
242 // because all committed memory is charged to the process,
243 // even if it's not touched. Hence, for processes with small
244 // heaps, the mapped arena space needs to be commensurate.
245 // This is particularly important with the race detector,
246 // since it significantly amplifies the cost of committed
248 heapArenaBytes = 1 << logHeapArenaBytes
250 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
251 // prefer using heapArenaBytes where possible (we need the
252 // constant to compute some other constants).
253 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
255 // heapArenaBitmapBytes is the size of each heap arena's bitmap.
256 heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
258 pagesPerArena = heapArenaBytes / pageSize
260 // arenaL1Bits is the number of bits of the arena number
261 // covered by the first level arena map.
263 // This number should be small, since the first level arena
264 // map requires PtrSize*(1<<arenaL1Bits) of space in the
265 // binary's BSS. It can be zero, in which case the first level
266 // index is effectively unused. There is a performance benefit
267 // to this, since the generated code can be more efficient,
268 // but comes at the cost of having a large L2 mapping.
270 // We use the L1 map on 64-bit Windows because the arena size
271 // is small, but the address space is still 48 bits, and
272 // there's a high cost to having a large L2.
273 arenaL1Bits = 6 * (_64bit * goos.IsWindows)
275 // arenaL2Bits is the number of bits of the arena number
276 // covered by the second level arena index.
278 // The size of each arena map allocation is proportional to
279 // 1<<arenaL2Bits, so it's important that this not be too
280 // large. 48 bits leads to 32MB arena index allocations, which
281 // is about the practical threshold.
282 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
284 // arenaL1Shift is the number of bits to shift an arena frame
285 // number by to compute an index into the first level arena map.
286 arenaL1Shift = arenaL2Bits
288 // arenaBits is the total bits in a combined arena map index.
289 // This is split between the index into the L1 arena map and
291 arenaBits = arenaL1Bits + arenaL2Bits
293 // arenaBaseOffset is the pointer value that corresponds to
294 // index 0 in the heap arena map.
296 // On amd64, the address space is 48 bits, sign extended to 64
297 // bits. This offset lets us handle "negative" addresses (or
298 // high addresses if viewed as unsigned).
300 // On aix/ppc64, this offset allows to keep the heapAddrBits to
301 // 48. Otherwise, it would be 60 in order to handle mmap addresses
302 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
303 // case, the memory reserved in (s *pageAlloc).init for chunks
304 // is causing important slowdowns.
306 // On other platforms, the user address space is contiguous
307 // and starts at 0, so no offset is necessary.
308 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
309 // A typed version of this constant that will make it into DWARF (for viewcore).
310 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
312 // Max number of threads to run garbage collection.
313 // 2, 3, and 4 are all plausible maximums depending
314 // on the hardware details of the machine. The garbage
315 // collector scales well to 32 cpus.
318 // minLegalPointer is the smallest possible legal pointer.
319 // This is the smallest possible architectural page size,
320 // since we assume that the first page is never mapped.
322 // This should agree with minZeroPage in the compiler.
323 minLegalPointer uintptr = 4096
326 // physPageSize is the size in bytes of the OS's physical pages.
327 // Mapping and unmapping operations must be done at multiples of
330 // This must be set by the OS init code (typically in osinit) before
332 var physPageSize uintptr
334 // physHugePageSize is the size in bytes of the OS's default physical huge
335 // page size whose allocation is opaque to the application. It is assumed
336 // and verified to be a power of two.
338 // If set, this must be set by the OS init code (typically in osinit) before
339 // mallocinit. However, setting it at all is optional, and leaving the default
340 // value is always safe (though potentially less efficient).
342 // Since physHugePageSize is always assumed to be a power of two,
343 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
344 // The purpose of physHugePageShift is to avoid doing divisions in
345 // performance critical functions.
347 physHugePageSize uintptr
348 physHugePageShift uint
351 // OS memory management abstraction layer
353 // Regions of the address space managed by the runtime may be in one of four
354 // states at any given time:
355 // 1) None - Unreserved and unmapped, the default state of any region.
356 // 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
357 // Does not count against the process' memory footprint.
358 // 3) Prepared - Reserved, intended not to be backed by physical memory (though
359 // an OS may implement this lazily). Can transition efficiently to
360 // Ready. Accessing memory in such a region is undefined (may
361 // fault, may give back unexpected zeroes, etc.).
362 // 4) Ready - may be accessed safely.
364 // This set of states is more than is strictly necessary to support all the
365 // currently supported platforms. One could get by with just None, Reserved, and
366 // Ready. However, the Prepared state gives us flexibility for performance
367 // purposes. For example, on POSIX-y operating systems, Reserved is usually a
368 // private anonymous mmap'd region with PROT_NONE set, and to transition
369 // to Ready would require setting PROT_READ|PROT_WRITE. However the
370 // underspecification of Prepared lets us use just MADV_FREE to transition from
371 // Ready to Prepared. Thus with the Prepared state we can set the permission
372 // bits just once early on, we can efficiently tell the OS that it's free to
373 // take pages away from us when we don't strictly need them.
375 // For each OS there is a common set of helpers defined that transition
376 // memory regions between these states. The helpers are as follows:
378 // sysAlloc transitions an OS-chosen region of memory from None to Ready.
379 // More specifically, it obtains a large chunk of zeroed memory from the
380 // operating system, typically on the order of a hundred kilobytes
381 // or a megabyte. This memory is always immediately available for use.
383 // sysFree transitions a memory region from any state to None. Therefore, it
384 // returns memory unconditionally. It is used if an out-of-memory error has been
385 // detected midway through an allocation or to carve out an aligned section of
386 // the address space. It is okay if sysFree is a no-op only if sysReserve always
387 // returns a memory region aligned to the heap allocator's alignment
390 // sysReserve transitions a memory region from None to Reserved. It reserves
391 // address space in such a way that it would cause a fatal fault upon access
392 // (either via permissions or not committing the memory). Such a reservation is
393 // thus never backed by physical memory.
394 // If the pointer passed to it is non-nil, the caller wants the
395 // reservation there, but sysReserve can still choose another
396 // location if that one is unavailable.
397 // NOTE: sysReserve returns OS-aligned memory, but the heap allocator
398 // may use larger alignment, so the caller must be careful to realign the
399 // memory obtained by sysReserve.
401 // sysMap transitions a memory region from Reserved to Prepared. It ensures the
402 // memory region can be efficiently transitioned to Ready.
404 // sysUsed transitions a memory region from Prepared to Ready. It notifies the
405 // operating system that the memory region is needed and ensures that the region
406 // may be safely accessed. This is typically a no-op on systems that don't have
407 // an explicit commit step and hard over-commit limits, but is critical on
408 // Windows, for example.
410 // sysUnused transitions a memory region from Ready to Prepared. It notifies the
411 // operating system that the physical pages backing this memory region are no
412 // longer needed and can be reused for other purposes. The contents of a
413 // sysUnused memory region are considered forfeit and the region must not be
414 // accessed again until sysUsed is called.
416 // sysFault transitions a memory region from Ready or Prepared to Reserved. It
417 // marks a region such that it will always fault if accessed. Used only for
418 // debugging the runtime.
421 if class_to_size[_TinySizeClass] != _TinySize {
422 throw("bad TinySizeClass")
425 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
426 // heapBits expects modular arithmetic on bitmap
427 // addresses to work.
428 throw("heapArenaBitmapBytes not a power of 2")
431 // Copy class sizes out for statistics table.
432 for i := range class_to_size {
433 memstats.by_size[i].size = uint32(class_to_size[i])
436 // Check physPageSize.
437 if physPageSize == 0 {
438 // The OS init code failed to fetch the physical page size.
439 throw("failed to get system page size")
441 if physPageSize > maxPhysPageSize {
442 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
443 throw("bad system page size")
445 if physPageSize < minPhysPageSize {
446 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
447 throw("bad system page size")
449 if physPageSize&(physPageSize-1) != 0 {
450 print("system page size (", physPageSize, ") must be a power of 2\n")
451 throw("bad system page size")
453 if physHugePageSize&(physHugePageSize-1) != 0 {
454 print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
455 throw("bad system huge page size")
457 if physHugePageSize > maxPhysHugePageSize {
458 // physHugePageSize is greater than the maximum supported huge page size.
459 // Don't throw here, like in the other cases, since a system configured
460 // in this way isn't wrong, we just don't have the code to support them.
461 // Instead, silently set the huge page size to zero.
464 if physHugePageSize != 0 {
465 // Since physHugePageSize is a power of 2, it suffices to increase
466 // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
467 for 1<<physHugePageShift != physHugePageSize {
471 if pagesPerArena%pagesPerSpanRoot != 0 {
472 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
473 throw("bad pagesPerSpanRoot")
475 if pagesPerArena%pagesPerReclaimerChunk != 0 {
476 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
477 throw("bad pagesPerReclaimerChunk")
480 // Initialize the heap.
482 mcache0 = allocmcache()
483 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
484 lockInit(&proflock, lockRankProf)
485 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
487 // Create initial arena growth hints.
488 if goarch.PtrSize == 8 {
489 // On a 64-bit machine, we pick the following hints
492 // 1. Starting from the middle of the address space
493 // makes it easier to grow out a contiguous range
494 // without running in to some other mapping.
496 // 2. This makes Go heap addresses more easily
497 // recognizable when debugging.
499 // 3. Stack scanning in gccgo is still conservative,
500 // so it's important that addresses be distinguishable
503 // Starting at 0x00c0 means that the valid memory addresses
504 // will begin 0x00c0, 0x00c1, ...
505 // In little-endian, that's c0 00, c1 00, ... None of those are valid
506 // UTF-8 sequences, and they are otherwise as far away from
507 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
508 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
509 // on OS X during thread allocations. 0x00c0 causes conflicts with
510 // AddressSanitizer which reserves all memory up to 0x0100.
511 // These choices reduce the odds of a conservative garbage collector
512 // not collecting memory because some non-pointer block of memory
513 // had a bit pattern that matched a memory address.
515 // However, on arm64, we ignore all this advice above and slam the
516 // allocation at 0x40 << 32 because when using 4k pages with 3-level
517 // translation buffers, the user address space is limited to 39 bits
518 // On ios/arm64, the address space is even smaller.
520 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
522 for i := 0x7f; i >= 0; i-- {
526 // The TSAN runtime requires the heap
527 // to be in the range [0x00c000000000,
529 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
530 if p >= uintptrMask&0x00e000000000 {
533 case GOARCH == "arm64" && GOOS == "ios":
534 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
535 case GOARCH == "arm64":
536 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
539 // We don't use addresses directly after 0x0A00000000000000
540 // to avoid collisions with others mmaps done by non-go programs.
543 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
545 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
547 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
549 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
552 // On a 32-bit machine, we're much more concerned
553 // about keeping the usable heap contiguous.
556 // 1. We reserve space for all heapArenas up front so
557 // they don't get interleaved with the heap. They're
558 // ~258MB, so this isn't too bad. (We could reserve a
559 // smaller amount of space up front if this is a
562 // 2. We hint the heap to start right above the end of
563 // the binary so we have the best chance of keeping it
566 // 3. We try to stake out a reasonably large initial
569 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
570 meta := uintptr(sysReserve(nil, arenaMetaSize))
572 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
575 // We want to start the arena low, but if we're linked
576 // against C code, it's possible global constructors
577 // have called malloc and adjusted the process' brk.
578 // Query the brk so we can avoid trying to map the
579 // region over it (which will cause the kernel to put
580 // the region somewhere else, likely at a high
584 // If we ask for the end of the data segment but the
585 // operating system requires a little more space
586 // before we can start allocating, it will give out a
587 // slightly higher pointer. Except QEMU, which is
588 // buggy, as usual: it won't adjust the pointer
589 // upward. So adjust it upward a little bit ourselves:
590 // 1/4 MB to get away from the running binary image.
591 p := firstmoduledata.end
595 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
596 p = mheap_.heapArenaAlloc.end
598 p = alignUp(p+(256<<10), heapArenaBytes)
599 // Because we're worried about fragmentation on
600 // 32-bit, we try to make a large initial reservation.
601 arenaSizes := []uintptr{
606 for _, arenaSize := range arenaSizes {
607 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
609 mheap_.arena.init(uintptr(a), size, false)
610 p = mheap_.arena.end // For hint below
614 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
616 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
620 // sysAlloc allocates heap arena space for at least n bytes. The
621 // returned pointer is always heapArenaBytes-aligned and backed by
622 // h.arenas metadata. The returned size is always a multiple of
623 // heapArenaBytes. sysAlloc returns nil on failure.
624 // There is no corresponding free function.
626 // sysAlloc returns a memory region in the Reserved state. This region must
627 // be transitioned to Prepared and then Ready before use.
630 func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
631 assertLockHeld(&h.lock)
633 n = alignUp(n, heapArenaBytes)
635 // First, try the arena pre-reservation.
636 v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
642 // Try to grow the heap at a hint address.
643 for h.arenaHints != nil {
650 // We can't use this, so don't ask.
652 } else if arenaIndex(p+n-1) >= 1<<arenaBits {
653 // Outside addressable heap. Can't use.
656 v = sysReserve(unsafe.Pointer(p), n)
659 // Success. Update the hint.
667 // Failed. Discard this hint and try the next.
669 // TODO: This would be cleaner if sysReserve could be
670 // told to only return the requested address. In
671 // particular, this is already how Windows behaves, so
672 // it would simplify things there.
676 h.arenaHints = hint.next
677 h.arenaHintAlloc.free(unsafe.Pointer(hint))
682 // The race detector assumes the heap lives in
683 // [0x00c000000000, 0x00e000000000), but we
684 // just ran out of hints in this region. Give
686 throw("too many address space collisions for -race mode")
689 // All of the hints failed, so we'll take any
690 // (sufficiently aligned) address the kernel will give
692 v, size = sysReserveAligned(nil, n, heapArenaBytes)
697 // Create new hints for extending this region.
698 hint := (*arenaHint)(h.arenaHintAlloc.alloc())
699 hint.addr, hint.down = uintptr(v), true
700 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
701 hint = (*arenaHint)(h.arenaHintAlloc.alloc())
702 hint.addr = uintptr(v) + size
703 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
706 // Check for bad pointers or pointers we can't use.
711 bad = "region exceeds uintptr range"
712 } else if arenaIndex(p) >= 1<<arenaBits {
713 bad = "base outside usable address space"
714 } else if arenaIndex(p+size-1) >= 1<<arenaBits {
715 bad = "end outside usable address space"
718 // This should be impossible on most architectures,
719 // but it would be really confusing to debug.
720 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
721 throw("memory reservation exceeds address space limit")
725 if uintptr(v)&(heapArenaBytes-1) != 0 {
726 throw("misrounded allocation in sysAlloc")
730 // Create arena metadata.
731 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
732 l2 := h.arenas[ri.l1()]
734 // Allocate an L2 arena map.
735 l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
737 throw("out of memory allocating heap arena map")
739 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
742 if l2[ri.l2()] != nil {
743 throw("arena already initialized")
746 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
748 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
750 throw("out of memory allocating heap arena metadata")
754 // Add the arena to the arenas list.
755 if len(h.allArenas) == cap(h.allArenas) {
756 size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
760 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
762 throw("out of memory allocating allArenas")
764 oldSlice := h.allArenas
765 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
766 copy(h.allArenas, oldSlice)
767 // Do not free the old backing array because
768 // there may be concurrent readers. Since we
769 // double the array each time, this can lead
770 // to at most 2x waste.
772 h.allArenas = h.allArenas[:len(h.allArenas)+1]
773 h.allArenas[len(h.allArenas)-1] = ri
775 // Store atomically just in case an object from the
776 // new heap arena becomes visible before the heap lock
777 // is released (which shouldn't happen, but there's
778 // little downside to this).
779 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
782 // Tell the race detector about the new heap memory.
784 racemapshadow(v, size)
790 // sysReserveAligned is like sysReserve, but the returned pointer is
791 // aligned to align bytes. It may reserve either n or n+align bytes,
792 // so it returns the size that was reserved.
793 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
794 // Since the alignment is rather large in uses of this
795 // function, we're not likely to get it by chance, so we ask
796 // for a larger region and remove the parts we don't need.
799 p := uintptr(sysReserve(v, size+align))
803 case p&(align-1) == 0:
804 // We got lucky and got an aligned region, so we can
805 // use the whole thing.
806 return unsafe.Pointer(p), size + align
807 case GOOS == "windows":
808 // On Windows we can't release pieces of a
809 // reservation, so we release the whole thing and
810 // re-reserve the aligned sub-region. This may race,
811 // so we may have to try again.
812 sysFree(unsafe.Pointer(p), size+align, nil)
813 p = alignUp(p, align)
814 p2 := sysReserve(unsafe.Pointer(p), size)
815 if p != uintptr(p2) {
816 // Must have raced. Try again.
817 sysFree(p2, size, nil)
818 if retries++; retries == 100 {
819 throw("failed to allocate aligned heap memory; too many retries")
826 // Trim off the unaligned parts.
827 pAligned := alignUp(p, align)
828 sysFree(unsafe.Pointer(p), pAligned-p, nil)
829 end := pAligned + size
830 endLen := (p + size + align) - end
832 sysFree(unsafe.Pointer(end), endLen, nil)
834 return unsafe.Pointer(pAligned), size
838 // base address for all 0-byte allocations
841 // nextFreeFast returns the next free object if one is quickly available.
842 // Otherwise it returns 0.
843 func nextFreeFast(s *mspan) gclinkptr {
844 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
846 result := s.freeindex + uintptr(theBit)
847 if result < s.nelems {
848 freeidx := result + 1
849 if freeidx%64 == 0 && freeidx != s.nelems {
852 s.allocCache >>= uint(theBit + 1)
853 s.freeindex = freeidx
855 return gclinkptr(result*s.elemsize + s.base())
861 // nextFree returns the next free object from the cached span if one is available.
862 // Otherwise it refills the cache with a span with an available object and
863 // returns that object along with a flag indicating that this was a heavy
864 // weight allocation. If it is a heavy weight allocation the caller must
865 // determine whether a new GC cycle needs to be started or if the GC is active
866 // whether this goroutine needs to assist the GC.
868 // Must run in a non-preemptible context since otherwise the owner of
870 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
873 freeIndex := s.nextFreeIndex()
874 if freeIndex == s.nelems {
876 if uintptr(s.allocCount) != s.nelems {
877 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
878 throw("s.allocCount != s.nelems && freeIndex == s.nelems")
884 freeIndex = s.nextFreeIndex()
887 if freeIndex >= s.nelems {
888 throw("freeIndex is not valid")
891 v = gclinkptr(freeIndex*s.elemsize + s.base())
893 if uintptr(s.allocCount) > s.nelems {
894 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
895 throw("s.allocCount > s.nelems")
900 // Allocate an object of size bytes.
901 // Small objects are allocated from the per-P cache's free lists.
902 // Large objects (> 32 kB) are allocated straight from the heap.
903 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
904 if gcphase == _GCmarktermination {
905 throw("mallocgc called with gcphase == _GCmarktermination")
909 return unsafe.Pointer(&zerobase)
916 // TODO(austin): This should be just
917 // align = uintptr(typ.align)
918 // but that's only 4 on 32-bit platforms,
919 // even if there's a uint64 field in typ (see #599).
920 // This causes 64-bit atomic accesses to panic.
921 // Hence, we use stricter alignment that matches
922 // the normal allocator better.
925 } else if size&3 == 0 {
927 } else if size&1 == 0 {
933 return persistentalloc(size, align, &memstats.other_sys)
936 if inittrace.active && inittrace.id == getg().goid {
937 // Init functions are executed sequentially in a single goroutine.
938 inittrace.allocs += 1
942 // assistG is the G to charge for this allocation, or nil if
943 // GC is not currently active.
945 if gcBlackenEnabled != 0 {
946 // Charge the current user G for this allocation.
948 if assistG.m.curg != nil {
949 assistG = assistG.m.curg
951 // Charge the allocation against the G. We'll account
952 // for internal fragmentation at the end of mallocgc.
953 assistG.gcAssistBytes -= int64(size)
955 if assistG.gcAssistBytes < 0 {
956 // This G is in debt. Assist the GC to correct
957 // this before allocating. This must happen
958 // before disabling preemption.
959 gcAssistAlloc(assistG)
963 // Set mp.mallocing to keep from being preempted by GC.
965 if mp.mallocing != 0 {
966 throw("malloc deadlock")
968 if mp.gsignal == getg() {
969 throw("malloc during signal")
973 shouldhelpgc := false
977 throw("mallocgc called without a P or outside bootstrapping")
981 noscan := typ == nil || typ.ptrdata == 0
982 // In some cases block zeroing can profitably (for latency reduction purposes)
983 // be delayed till preemption is possible; isZeroed tracks that state.
985 if size <= maxSmallSize {
986 if noscan && size < maxTinySize {
989 // Tiny allocator combines several tiny allocation requests
990 // into a single memory block. The resulting memory block
991 // is freed when all subobjects are unreachable. The subobjects
992 // must be noscan (don't have pointers), this ensures that
993 // the amount of potentially wasted memory is bounded.
995 // Size of the memory block used for combining (maxTinySize) is tunable.
996 // Current setting is 16 bytes, which relates to 2x worst case memory
997 // wastage (when all but one subobjects are unreachable).
998 // 8 bytes would result in no wastage at all, but provides less
999 // opportunities for combining.
1000 // 32 bytes provides more opportunities for combining,
1001 // but can lead to 4x worst case wastage.
1002 // The best case winning is 8x regardless of block size.
1004 // Objects obtained from tiny allocator must not be freed explicitly.
1005 // So when an object will be freed explicitly, we ensure that
1006 // its size >= maxTinySize.
1008 // SetFinalizer has a special case for objects potentially coming
1009 // from tiny allocator, it such case it allows to set finalizers
1010 // for an inner byte of a memory block.
1012 // The main targets of tiny allocator are small strings and
1013 // standalone escaping variables. On a json benchmark
1014 // the allocator reduces number of allocations by ~12% and
1015 // reduces heap size by ~20%.
1017 // Align tiny pointer for required (conservative) alignment.
1019 off = alignUp(off, 8)
1020 } else if goarch.PtrSize == 4 && size == 12 {
1021 // Conservatively align 12-byte objects to 8 bytes on 32-bit
1022 // systems so that objects whose first field is a 64-bit
1023 // value is aligned to 8 bytes and does not cause a fault on
1024 // atomic access. See issue 37262.
1025 // TODO(mknyszek): Remove this workaround if/when issue 36606
1027 off = alignUp(off, 8)
1028 } else if size&3 == 0 {
1029 off = alignUp(off, 4)
1030 } else if size&1 == 0 {
1031 off = alignUp(off, 2)
1033 if off+size <= maxTinySize && c.tiny != 0 {
1034 // The object fits into existing tiny block.
1035 x = unsafe.Pointer(c.tiny + off)
1036 c.tinyoffset = off + size
1042 // Allocate a new maxTinySize block.
1043 span = c.alloc[tinySpanClass]
1044 v := nextFreeFast(span)
1046 v, span, shouldhelpgc = c.nextFree(tinySpanClass)
1048 x = unsafe.Pointer(v)
1049 (*[2]uint64)(x)[0] = 0
1050 (*[2]uint64)(x)[1] = 0
1051 // See if we need to replace the existing tiny block with the new one
1052 // based on amount of remaining free space.
1053 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
1054 // Note: disabled when race detector is on, see comment near end of this function.
1061 if size <= smallSizeMax-8 {
1062 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
1064 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
1066 size = uintptr(class_to_size[sizeclass])
1067 spc := makeSpanClass(sizeclass, noscan)
1069 v := nextFreeFast(span)
1071 v, span, shouldhelpgc = c.nextFree(spc)
1073 x = unsafe.Pointer(v)
1074 if needzero && span.needzero != 0 {
1075 memclrNoHeapPointers(unsafe.Pointer(v), size)
1080 // For large allocations, keep track of zeroed state so that
1081 // bulk zeroing can be happen later in a preemptible context.
1082 span, isZeroed = c.allocLarge(size, needzero && !noscan, noscan)
1085 x = unsafe.Pointer(span.base())
1086 size = span.elemsize
1089 var scanSize uintptr
1091 heapBitsSetType(uintptr(x), size, dataSize, typ)
1092 if dataSize > typ.size {
1093 // Array allocation. If there are any
1094 // pointers, GC has to scan to the last
1096 if typ.ptrdata != 0 {
1097 scanSize = dataSize - typ.size + typ.ptrdata
1100 scanSize = typ.ptrdata
1102 c.scanAlloc += scanSize
1105 // Ensure that the stores above that initialize x to
1106 // type-safe memory and set the heap bits occur before
1107 // the caller can make x observable to the garbage
1108 // collector. Otherwise, on weakly ordered machines,
1109 // the garbage collector could follow a pointer to x,
1110 // but see uninitialized memory or stale heap bits.
1111 publicationBarrier()
1113 // Allocate black during GC.
1114 // All slots hold nil so no scanning is needed.
1115 // This may be racing with GC so do it atomically if there can be
1116 // a race marking the bit.
1117 if gcphase != _GCoff {
1118 gcmarknewobject(span, uintptr(x), size, scanSize)
1132 // Pointerfree data can be zeroed late in a context where preemption can occur.
1133 // x will keep the memory alive.
1134 if !isZeroed && needzero {
1135 memclrNoHeapPointersChunked(size, x)
1139 if debug.allocfreetrace != 0 {
1140 tracealloc(x, size, typ)
1143 if inittrace.active && inittrace.id == getg().goid {
1144 // Init functions are executed sequentially in a single goroutine.
1145 inittrace.bytes += uint64(size)
1149 if rate := MemProfileRate; rate > 0 {
1150 if rate != 1 && size < c.nextSample {
1151 c.nextSample -= size
1154 profilealloc(mp, x, size)
1160 // Account for internal fragmentation in the assist
1161 // debt now that we know it.
1162 assistG.gcAssistBytes -= int64(size - dataSize)
1166 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1171 if raceenabled && noscan && dataSize < maxTinySize {
1172 // Pad tinysize allocations so they are aligned with the end
1173 // of the tinyalloc region. This ensures that any arithmetic
1174 // that goes off the top end of the object will be detectable
1175 // by checkptr (issue 38872).
1176 // Note that we disable tinyalloc when raceenabled for this to work.
1177 // TODO: This padding is only performed when the race detector
1178 // is enabled. It would be nice to enable it if any package
1179 // was compiled with checkptr, but there's no easy way to
1180 // detect that (especially at compile time).
1181 // TODO: enable this padding for all allocations, not just
1182 // tinyalloc ones. It's tricky because of pointer maps.
1183 // Maybe just all noscan objects?
1184 x = add(x, size-dataSize)
1190 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
1191 // on chunks of the buffer to be zeroed, with opportunities for preemption
1192 // along the way. memclrNoHeapPointers contains no safepoints and also
1193 // cannot be preemptively scheduled, so this provides a still-efficient
1194 // block copy that can also be preempted on a reasonable granularity.
1196 // Use this with care; if the data being cleared is tagged to contain
1197 // pointers, this allows the GC to run before it is all cleared.
1198 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
1200 // got this from benchmarking. 128k is too small, 512k is too large.
1201 const chunkBytes = 256 * 1024
1203 for voff := v; voff < vsize; voff = voff + chunkBytes {
1205 // may hold locks, e.g., profiling
1208 // clear min(avail, lump) bytes
1213 memclrNoHeapPointers(unsafe.Pointer(voff), n)
1217 // implementation of new builtin
1218 // compiler (both frontend and SSA backend) knows the signature
1220 func newobject(typ *_type) unsafe.Pointer {
1221 return mallocgc(typ.size, typ, true)
1224 //go:linkname reflect_unsafe_New reflect.unsafe_New
1225 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
1226 return mallocgc(typ.size, typ, true)
1229 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
1230 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
1231 return mallocgc(typ.size, typ, true)
1234 // newarray allocates an array of n elements of type typ.
1235 func newarray(typ *_type, n int) unsafe.Pointer {
1237 return mallocgc(typ.size, typ, true)
1239 mem, overflow := math.MulUintptr(typ.size, uintptr(n))
1240 if overflow || mem > maxAlloc || n < 0 {
1241 panic(plainError("runtime: allocation size out of range"))
1243 return mallocgc(mem, typ, true)
1246 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
1247 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
1248 return newarray(typ, n)
1251 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
1254 throw("profilealloc called without a P or outside bootstrapping")
1256 c.nextSample = nextSample()
1257 mProf_Malloc(x, size)
1260 // nextSample returns the next sampling point for heap profiling. The goal is
1261 // to sample allocations on average every MemProfileRate bytes, but with a
1262 // completely random distribution over the allocation timeline; this
1263 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
1264 // processes, the distance between two samples follows the exponential
1265 // distribution (exp(MemProfileRate)), so the best return value is a random
1266 // number taken from an exponential distribution whose mean is MemProfileRate.
1267 func nextSample() uintptr {
1268 if MemProfileRate == 1 {
1269 // Callers assign our return value to
1270 // mcache.next_sample, but next_sample is not used
1271 // when the rate is 1. So avoid the math below and
1272 // just return something.
1275 if GOOS == "plan9" {
1276 // Plan 9 doesn't support floating point in note handler.
1277 if g := getg(); g == g.m.gsignal {
1278 return nextSampleNoFP()
1282 return uintptr(fastexprand(MemProfileRate))
1285 // fastexprand returns a random number from an exponential distribution with
1286 // the specified mean.
1287 func fastexprand(mean int) int32 {
1288 // Avoid overflow. Maximum possible step is
1289 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
1291 case mean > 0x7000000:
1297 // Take a random sample of the exponential distribution exp(-mean*x).
1298 // The probability distribution function is mean*exp(-mean*x), so the CDF is
1299 // p = 1 - exp(-mean*x), so
1300 // q = 1 - p == exp(-mean*x)
1301 // log_e(q) = -mean*x
1302 // -log_e(q)/mean = x
1303 // x = -log_e(q) * mean
1304 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
1305 const randomBitCount = 26
1306 q := fastrand()%(1<<randomBitCount) + 1
1307 qlog := fastlog2(float64(q)) - randomBitCount
1311 const minusLog2 = -0.6931471805599453 // -ln(2)
1312 return int32(qlog*(minusLog2*float64(mean))) + 1
1315 // nextSampleNoFP is similar to nextSample, but uses older,
1316 // simpler code to avoid floating point.
1317 func nextSampleNoFP() uintptr {
1318 // Set first allocation sample size.
1319 rate := MemProfileRate
1320 if rate > 0x3fffffff { // make 2*rate not overflow
1324 return uintptr(fastrand() % uint32(2*rate))
1329 type persistentAlloc struct {
1334 var globalAlloc struct {
1339 // persistentChunkSize is the number of bytes we allocate when we grow
1340 // a persistentAlloc.
1341 const persistentChunkSize = 256 << 10
1343 // persistentChunks is a list of all the persistent chunks we have
1344 // allocated. The list is maintained through the first word in the
1345 // persistent chunk. This is updated atomically.
1346 var persistentChunks *notInHeap
1348 // Wrapper around sysAlloc that can allocate small chunks.
1349 // There is no associated free operation.
1350 // Intended for things like function/type/debug-related persistent data.
1351 // If align is 0, uses default align (currently 8).
1352 // The returned memory will be zeroed.
1354 // Consider marking persistentalloc'd types go:notinheap.
1355 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1357 systemstack(func() {
1358 p = persistentalloc1(size, align, sysStat)
1360 return unsafe.Pointer(p)
1363 // Must run on system stack because stack growth can (re)invoke it.
1366 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
1368 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
1372 throw("persistentalloc: size == 0")
1375 if align&(align-1) != 0 {
1376 throw("persistentalloc: align is not a power of 2")
1378 if align > _PageSize {
1379 throw("persistentalloc: align is too large")
1385 if size >= maxBlock {
1386 return (*notInHeap)(sysAlloc(size, sysStat))
1390 var persistent *persistentAlloc
1391 if mp != nil && mp.p != 0 {
1392 persistent = &mp.p.ptr().palloc
1394 lock(&globalAlloc.mutex)
1395 persistent = &globalAlloc.persistentAlloc
1397 persistent.off = alignUp(persistent.off, align)
1398 if persistent.off+size > persistentChunkSize || persistent.base == nil {
1399 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
1400 if persistent.base == nil {
1401 if persistent == &globalAlloc.persistentAlloc {
1402 unlock(&globalAlloc.mutex)
1404 throw("runtime: cannot allocate memory")
1407 // Add the new chunk to the persistentChunks list.
1409 chunks := uintptr(unsafe.Pointer(persistentChunks))
1410 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
1411 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
1415 persistent.off = alignUp(goarch.PtrSize, align)
1417 p := persistent.base.add(persistent.off)
1418 persistent.off += size
1420 if persistent == &globalAlloc.persistentAlloc {
1421 unlock(&globalAlloc.mutex)
1424 if sysStat != &memstats.other_sys {
1425 sysStat.add(int64(size))
1426 memstats.other_sys.add(-int64(size))
1431 // inPersistentAlloc reports whether p points to memory allocated by
1432 // persistentalloc. This must be nosplit because it is called by the
1433 // cgo checker code, which is called by the write barrier code.
1435 func inPersistentAlloc(p uintptr) bool {
1436 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
1438 if p >= chunk && p < chunk+persistentChunkSize {
1441 chunk = *(*uintptr)(unsafe.Pointer(chunk))
1446 // linearAlloc is a simple linear allocator that pre-reserves a region
1447 // of memory and then optionally maps that region into the Ready state
1450 // The caller is responsible for locking.
1451 type linearAlloc struct {
1452 next uintptr // next free byte
1453 mapped uintptr // one byte past end of mapped space
1454 end uintptr // end of reserved space
1456 mapMemory bool // transition memory from Reserved to Ready if true
1459 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
1460 if base+size < base {
1461 // Chop off the last byte. The runtime isn't prepared
1462 // to deal with situations where the bounds could overflow.
1463 // Leave that memory reserved, though, so we don't map it
1467 l.next, l.mapped = base, base
1469 l.mapMemory = mapMemory
1472 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
1473 p := alignUp(l.next, align)
1478 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
1480 // Transition from Reserved to Prepared to Ready.
1481 sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
1482 sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
1486 return unsafe.Pointer(p)
1489 // notInHeap is off-heap memory allocated by a lower-level allocator
1490 // like sysAlloc or persistentAlloc.
1492 // In general, it's better to use real types marked as go:notinheap,
1493 // but this serves as a generic type for situations where that isn't
1494 // possible (like in the allocators).
1496 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
1499 type notInHeap struct{}
1501 func (p *notInHeap) add(bytes uintptr) *notInHeap {
1502 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))