1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Memory allocator, based on tcmalloc.
6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
8 // The main allocator works in runs of pages.
9 // Small allocation sizes (up to and including 32 kB) are
10 // rounded to one of about 100 size classes, each of which
11 // has its own free list of objects of exactly that size.
12 // Any free page of memory can be split into a set of objects
13 // of one size class, which are then managed using free list
16 // The allocator's data structures are:
18 // FixAlloc: a free-list allocator for fixed-size objects,
19 // used to manage storage used by the allocator.
20 // MHeap: the malloc heap, managed at page (4096-byte) granularity.
21 // MSpan: a run of pages managed by the MHeap.
22 // MCentral: a shared free list for a given size class.
23 // MCache: a per-thread (in Go, per-P) cache for small objects.
24 // MStats: allocation statistics.
26 // Allocating a small object proceeds up a hierarchy of caches:
28 // 1. Round the size up to one of the small size classes
29 // and look in the corresponding MCache free list.
30 // If the list is not empty, allocate an object from it.
31 // This can all be done without acquiring a lock.
33 // 2. If the MCache free list is empty, replenish it by
34 // taking a bunch of objects from the MCentral free list.
35 // Moving a bunch amortizes the cost of acquiring the MCentral lock.
37 // 3. If the MCentral free list is empty, replenish it by
38 // allocating a run of pages from the MHeap and then
39 // chopping that memory into objects of the given size.
40 // Allocating many objects amortizes the cost of locking
43 // 4. If the MHeap is empty or has no page runs large enough,
44 // allocate a new group of pages (at least 1MB) from the
45 // operating system. Allocating a large run of pages
46 // amortizes the cost of talking to the operating system.
48 // Freeing a small object proceeds up the same hierarchy:
50 // 1. Look up the size class for the object and add it to
51 // the MCache free list.
53 // 2. If the MCache free list is too long or the MCache has
54 // too much memory, return some to the MCentral free lists.
56 // 3. If all the objects in a given span have returned to
57 // the MCentral list, return that span to the page heap.
59 // 4. If the heap has too much memory, return some to the
62 // TODO(rsc): Step 4 is not implemented.
64 // Allocating and freeing a large object uses the page heap
65 // directly, bypassing the MCache and MCentral free lists.
67 // The small objects on the MCache and MCentral free lists
68 // may or may not be zeroed. They are zeroed if and only if
69 // the second word of the object is zero. A span in the
70 // page heap is zeroed unless s->needzero is set. When a span
71 // is allocated to break into small objects, it is zeroed if needed
72 // and s->needzero is set. There are two main benefits to delaying the
75 // 1. stack frames allocated from the small object lists
76 // or the page heap can avoid zeroing altogether.
77 // 2. the cost of zeroing when reusing a small object is
78 // charged to the mutator, not the garbage collector.
83 "runtime/internal/sys"
90 flagNoScan = _FlagNoScan
91 flagNoZero = _FlagNoZero
93 maxTinySize = _TinySize
94 tinySizeClass = _TinySizeClass
95 maxSmallSize = _MaxSmallSize
97 pageShift = _PageShift
100 // By construction, single page spans of the smallest object class
101 // have the most objects per span.
102 maxObjsPerSpan = pageSize / 8
104 mSpanInUse = _MSpanInUse
106 concurrentSweep = _ConcurrentSweep
111 _PageSize = 1 << _PageShift
112 _PageMask = _PageSize - 1
116 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
117 _64bit = 1 << (^uintptr(0) >> 63) / 2
119 // Computed constant. The definition of MaxSmallSize and the
120 // algorithm in msize.go produces some number of different allocation
121 // size classes. NumSizeClasses is that number. It's needed here
122 // because there are static arrays of this length; when msize runs its
123 // size choosing algorithm it double-checks that NumSizeClasses agrees.
126 // Tunable constants.
127 _MaxSmallSize = 32 << 10
129 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
133 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
134 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
135 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth
137 // Per-P, per order stack segment cache size.
138 _StackCacheSize = 32 * 1024
140 // Number of orders that get caching. Order 0 is FixedStack
141 // and each successive order is twice as large.
142 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
143 // will be allocated directly.
144 // Since FixedStack is different on different systems, we
145 // must vary NumStackOrders to keep the same maximum cached size.
146 // OS | FixedStack | NumStackOrders
147 // -----------------+------------+---------------
148 // linux/darwin/bsd | 2KB | 4
149 // windows/32 | 4KB | 3
150 // windows/64 | 8KB | 2
152 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
154 // Number of bits in page to span calculations (4k pages).
155 // On Windows 64-bit we limit the arena to 32GB or 35 bits.
156 // Windows counts memory used by page table into committed memory
157 // of the process, so we can't reserve too much memory.
158 // See https://golang.org/issue/5402 and https://golang.org/issue/5236.
159 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits.
160 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
161 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
162 // but as most devices have less than 4GB of physical memory anyway, we
163 // try to be conservative here, and only ask for a 2GB heap.
164 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
165 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift
167 _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
169 // Max number of threads to run garbage collection.
170 // 2, 3, and 4 are all plausible maximums depending
171 // on the hardware details of the machine. The garbage
172 // collector scales well to 32 cpus.
176 // Page number (address>>pageShift)
179 const _MaxArena32 = 2 << 30
181 // OS-defined helpers:
183 // sysAlloc obtains a large chunk of zeroed memory from the
184 // operating system, typically on the order of a hundred kilobytes
186 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
187 // may use larger alignment, so the caller must be careful to realign the
188 // memory obtained by sysAlloc.
190 // SysUnused notifies the operating system that the contents
191 // of the memory region are no longer needed and can be reused
192 // for other purposes.
193 // SysUsed notifies the operating system that the contents
194 // of the memory region are needed again.
196 // SysFree returns it unconditionally; this is only used if
197 // an out-of-memory error has been detected midway through
198 // an allocation. It is okay if SysFree is a no-op.
200 // SysReserve reserves address space without allocating memory.
201 // If the pointer passed to it is non-nil, the caller wants the
202 // reservation there, but SysReserve can still choose another
203 // location if that one is unavailable. On some systems and in some
204 // cases SysReserve will simply check that the address space is
205 // available and not actually reserve it. If SysReserve returns
206 // non-nil, it sets *reserved to true if the address space is
207 // reserved, false if it has merely been checked.
208 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
209 // may use larger alignment, so the caller must be careful to realign the
210 // memory obtained by sysAlloc.
212 // SysMap maps previously reserved address space for use.
213 // The reserved argument is true if the address space was really
214 // reserved, not merely checked.
216 // SysFault marks a (already sysAlloc'd) region to fault
217 // if accessed. Used only for debugging the runtime.
222 if class_to_size[_TinySizeClass] != _TinySize {
223 throw("bad TinySizeClass")
226 var p, bitmapSize, spansSize, pSize, limit uintptr
229 // limit = runtime.memlimit();
230 // See https://golang.org/issue/5049
231 // TODO(rsc): Fix after 1.1.
234 // Set up the allocation arena, a contiguous area of memory where
235 // allocated data will be found. The arena begins with a bitmap large
236 // enough to hold 4 bits per allocated word.
237 if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) {
238 // On a 64-bit machine, allocate from a single contiguous reservation.
239 // 512 GB (MaxMem) should be big enough for now.
241 // The code will work with the reservation at any address, but ask
242 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
243 // Allocating a 512 GB region takes away 39 bits, and the amd64
244 // doesn't let us choose the top 17 bits, so that leaves the 9 bits
245 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
246 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
247 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
248 // UTF-8 sequences, and they are otherwise as far away from
249 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
250 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
251 // on OS X during thread allocations. 0x00c0 causes conflicts with
252 // AddressSanitizer which reserves all memory up to 0x0100.
253 // These choices are both for debuggability and to reduce the
254 // odds of a conservative garbage collector (as is still used in gccgo)
255 // not collecting memory because some non-pointer block of memory
256 // had a bit pattern that matched a memory address.
258 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB)
259 // but it hardly matters: e0 00 is not valid UTF-8 either.
261 // If this fails we fall back to the 32 bit memory mechanism
263 // However, on arm64, we ignore all this advice above and slam the
264 // allocation at 0x40 << 32 because when using 4k pages with 3-level
265 // translation buffers, the user address space is limited to 39 bits
266 // On darwin/arm64, the address space is even smaller.
267 arenaSize := round(_MaxMem, _PageSize)
268 bitmapSize = arenaSize / (sys.PtrSize * 8 / 4)
269 spansSize = arenaSize / _PageSize * sys.PtrSize
270 spansSize = round(spansSize, _PageSize)
271 for i := 0; i <= 0x7f; i++ {
273 case GOARCH == "arm64" && GOOS == "darwin":
274 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
275 case GOARCH == "arm64":
276 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
278 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
280 pSize = bitmapSize + spansSize + arenaSize + _PageSize
281 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
289 // On a 32-bit machine, we can't typically get away
290 // with a giant virtual address space reservation.
291 // Instead we map the memory information bitmap
292 // immediately after the data segment, large enough
293 // to handle another 2GB of mappings (256 MB),
294 // along with a reservation for an initial arena.
295 // When that gets used up, we'll start asking the kernel
296 // for any memory anywhere and hope it's in the 2GB
297 // following the bitmap (presumably the executable begins
298 // near the bottom of memory, so we'll have to use up
299 // most of memory before the kernel resorts to giving out
300 // memory before the beginning of the text segment).
302 // Alternatively we could reserve 512 MB bitmap, enough
303 // for 4GB of mappings, and then accept any memory the
304 // kernel threw at us, but normally that's a waste of 512 MB
305 // of address space, which is probably too much in a 32-bit world.
307 // If we fail to allocate, try again with a smaller arena.
308 // This is necessary on Android L where we share a process
309 // with ART, which reserves virtual memory aggressively.
310 arenaSizes := []uintptr{
316 for _, arenaSize := range arenaSizes {
317 bitmapSize = _MaxArena32 / (sys.PtrSize * 8 / 4)
318 spansSize = _MaxArena32 / _PageSize * sys.PtrSize
319 if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
320 bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
321 arenaSize = bitmapSize * 8
322 spansSize = arenaSize / _PageSize * sys.PtrSize
324 spansSize = round(spansSize, _PageSize)
326 // SysReserve treats the address we ask for, end, as a hint,
327 // not as an absolute requirement. If we ask for the end
328 // of the data segment but the operating system requires
329 // a little more space before we can start allocating, it will
330 // give out a slightly higher pointer. Except QEMU, which
331 // is buggy, as usual: it won't adjust the pointer upward.
332 // So adjust it upward a little bit ourselves: 1/4 MB to get
333 // away from the running binary image and then round up
335 p = round(firstmoduledata.end+(1<<18), 1<<20)
336 pSize = bitmapSize + spansSize + arenaSize + _PageSize
337 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
343 throw("runtime: cannot reserve arena virtual address space")
347 // PageSize can be larger than OS definition of page size,
348 // so SysReserve can give us a PageSize-unaligned pointer.
349 // To overcome this we ask for PageSize more and round up the pointer.
350 p1 := round(p, _PageSize)
352 mheap_.spans = (**mspan)(unsafe.Pointer(p1))
353 mheap_.bitmap = p1 + spansSize
354 mheap_.arena_start = p1 + (spansSize + bitmapSize)
355 mheap_.arena_used = mheap_.arena_start
356 mheap_.arena_end = p + pSize
357 mheap_.arena_reserved = reserved
359 if mheap_.arena_start&(_PageSize-1) != 0 {
360 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
361 throw("misrounded allocation in mallocinit")
364 // Initialize the rest of the allocator.
365 mheap_.init(spansSize)
367 _g_.m.mcache = allocmcache()
370 // sysReserveHigh reserves space somewhere high in the address space.
371 // sysReserve doesn't actually reserve the full amount requested on
372 // 64-bit systems, because of problems with ulimit. Instead it checks
373 // that it can get the first 64 kB and assumes it can grab the rest as
374 // needed. This doesn't work well with the "let the kernel pick an address"
375 // mode, so don't do that. Pick a high address instead.
376 func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer {
377 if sys.PtrSize == 4 {
378 return sysReserve(nil, n, reserved)
381 for i := 0; i <= 0x7f; i++ {
382 p := uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
384 p = uintptr(sysReserve(unsafe.Pointer(p), n, reserved))
386 return unsafe.Pointer(p)
390 return sysReserve(nil, n, reserved)
393 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
394 if n > h.arena_end-h.arena_used {
395 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
396 // Reserve some more space.
397 p_size := round(n+_PageSize, 256<<20)
398 new_end := h.arena_end + p_size // Careful: can overflow
399 if h.arena_end <= new_end && new_end <= h.arena_start+_MaxArena32 {
400 // TODO: It would be bad if part of the arena
401 // is reserved and part is not.
403 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
407 if p == h.arena_end {
408 h.arena_end = new_end
409 h.arena_reserved = reserved
410 } else if h.arena_start <= p && p+p_size <= h.arena_start+_MaxArena32 {
411 // Keep everything page-aligned.
412 // Our pages are bigger than hardware pages.
413 h.arena_end = p + p_size
414 used := p + (-p & (_PageSize - 1))
418 h.arena_reserved = reserved
420 // We haven't added this allocation to
421 // the stats, so subtract it from a
422 // fake stat (but avoid underflow).
423 stat := uint64(p_size)
424 sysFree(unsafe.Pointer(p), p_size, &stat)
429 if n <= h.arena_end-h.arena_used {
430 // Keep taking from our reservation.
432 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
437 racemapshadow(unsafe.Pointer(p), n)
440 if p&(_PageSize-1) != 0 {
441 throw("misrounded allocation in MHeap_SysAlloc")
443 return unsafe.Pointer(p)
446 // If using 64-bit, our reservation is all we have.
447 if h.arena_end-h.arena_start >= _MaxArena32 {
451 // On 32-bit, once the reservation is gone we can
452 // try to get memory at a location chosen by the OS
453 // and hope that it is in the range we allocated bitmap for.
454 p_size := round(n, _PageSize) + _PageSize
455 p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
460 if p < h.arena_start || p+p_size-h.arena_start >= _MaxArena32 {
462 if top-h.arena_start > _MaxArena32 {
463 top = h.arena_start + _MaxArena32
465 print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n")
466 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
471 p += -p & (_PageSize - 1)
472 if p+n > h.arena_used {
476 if p_end > h.arena_end {
480 racemapshadow(unsafe.Pointer(p), n)
484 if p&(_PageSize-1) != 0 {
485 throw("misrounded allocation in MHeap_SysAlloc")
487 return unsafe.Pointer(p)
490 // base address for all 0-byte allocations
495 _FlagNoScan = 1 << 0 // GC doesn't have to scan object
496 _FlagNoZero = 1 << 1 // don't zero memory
499 // nextFreeFast returns the next free object if one is quickly available.
500 // Otherwise it returns 0.
501 func (c *mcache) nextFreeFast(sizeclass int8) gclinkptr {
502 s := c.alloc[sizeclass]
503 ctzIndex := uint8(s.allocCache & 0xff)
505 theBit := uint64(ctzVals[ctzIndex])
506 freeidx := s.freeindex // help the pre ssa compiler out here with cse.
507 result := freeidx + uintptr(theBit)
508 if result < s.nelems {
509 s.allocCache >>= (theBit + 1)
511 if freeidx%64 == 0 && freeidx != s.nelems {
512 // We just incremented s.freeindex so it isn't 0
513 // so we are moving to the next aCache.
514 whichByte := freeidx / 8
515 s.refillAllocCache(whichByte)
517 s.freeindex = freeidx
518 v := gclinkptr(result*s.elemsize + s.base())
526 // nextFree returns the next free object from the cached span if one is available.
527 // Otherwise it refills the cache with a span with an available object and
528 // returns that object along with a flag indicating that this was a heavy
529 // weight allocation. If it is a heavy weight allocation the caller must
530 // determine whether a new GC cycle needs to be started or if the GC is active
531 // whether this goroutine needs to assist the GC.
532 func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) {
533 s := c.alloc[sizeclass]
535 freeIndex := s.nextFreeIndex()
536 if freeIndex == s.nelems {
538 if uintptr(s.allocCount) != s.nelems {
539 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
540 throw("s.allocCount != s.nelems && freeIndex == s.nelems")
543 c.refill(int32(sizeclass))
546 s = c.alloc[sizeclass]
548 freeIndex = s.nextFreeIndex()
551 if freeIndex >= s.nelems {
552 throw("freeIndex is not valid")
555 v = gclinkptr(freeIndex*s.elemsize + s.base())
557 if uintptr(s.allocCount) > s.nelems {
558 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
559 throw("s.allocCount > s.nelems")
564 // Allocate an object of size bytes.
565 // Small objects are allocated from the per-P cache's free lists.
566 // Large objects (> 32 kB) are allocated straight from the heap.
567 func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
568 if gcphase == _GCmarktermination {
569 throw("mallocgc called with gcphase == _GCmarktermination")
573 return unsafe.Pointer(&zerobase)
576 if flags&flagNoScan == 0 && typ == nil {
577 throw("malloc missing type")
583 align = uintptr(typ.align)
585 return persistentalloc(size, align, &memstats.other_sys)
588 // assistG is the G to charge for this allocation, or nil if
589 // GC is not currently active.
591 if gcBlackenEnabled != 0 {
592 // Charge the current user G for this allocation.
594 if assistG.m.curg != nil {
595 assistG = assistG.m.curg
597 // Charge the allocation against the G. We'll account
598 // for internal fragmentation at the end of mallocgc.
599 assistG.gcAssistBytes -= int64(size)
601 if assistG.gcAssistBytes < 0 {
602 // This G is in debt. Assist the GC to correct
603 // this before allocating. This must happen
604 // before disabling preemption.
605 gcAssistAlloc(assistG)
609 // Set mp.mallocing to keep from being preempted by GC.
611 if mp.mallocing != 0 {
612 throw("malloc deadlock")
614 if mp.gsignal == getg() {
615 throw("malloc during signal")
619 shouldhelpgc := false
623 if size <= maxSmallSize {
624 if flags&flagNoScan != 0 && size < maxTinySize {
627 // Tiny allocator combines several tiny allocation requests
628 // into a single memory block. The resulting memory block
629 // is freed when all subobjects are unreachable. The subobjects
630 // must be FlagNoScan (don't have pointers), this ensures that
631 // the amount of potentially wasted memory is bounded.
633 // Size of the memory block used for combining (maxTinySize) is tunable.
634 // Current setting is 16 bytes, which relates to 2x worst case memory
635 // wastage (when all but one subobjects are unreachable).
636 // 8 bytes would result in no wastage at all, but provides less
637 // opportunities for combining.
638 // 32 bytes provides more opportunities for combining,
639 // but can lead to 4x worst case wastage.
640 // The best case winning is 8x regardless of block size.
642 // Objects obtained from tiny allocator must not be freed explicitly.
643 // So when an object will be freed explicitly, we ensure that
644 // its size >= maxTinySize.
646 // SetFinalizer has a special case for objects potentially coming
647 // from tiny allocator, it such case it allows to set finalizers
648 // for an inner byte of a memory block.
650 // The main targets of tiny allocator are small strings and
651 // standalone escaping variables. On a json benchmark
652 // the allocator reduces number of allocations by ~12% and
653 // reduces heap size by ~20%.
655 // Align tiny pointer for required (conservative) alignment.
658 } else if size&3 == 0 {
660 } else if size&1 == 0 {
663 if off+size <= maxTinySize && c.tiny != 0 {
664 // The object fits into existing tiny block.
665 x = unsafe.Pointer(c.tiny + off)
666 c.tinyoffset = off + size
672 // Allocate a new maxTinySize block.
674 v = c.nextFreeFast(tinySizeClass)
676 v, shouldhelpgc = c.nextFree(tinySizeClass)
678 x = unsafe.Pointer(v)
679 (*[2]uint64)(x)[0] = 0
680 (*[2]uint64)(x)[1] = 0
681 // See if we need to replace the existing tiny block with the new one
682 // based on amount of remaining free space.
683 if size < c.tinyoffset || c.tiny == 0 {
691 sizeclass = size_to_class8[(size+7)>>3]
693 sizeclass = size_to_class128[(size-1024+127)>>7]
695 size = uintptr(class_to_size[sizeclass])
697 v = c.nextFreeFast(sizeclass)
699 v, shouldhelpgc = c.nextFree(sizeclass)
701 x = unsafe.Pointer(v)
702 if flags&flagNoZero == 0 {
703 memclr(unsafe.Pointer(v), size)
704 // TODO:(rlh) Only clear if object is not known to be zeroed.
711 s = largeAlloc(size, flags)
714 x = unsafe.Pointer(s.base())
718 if flags&flagNoScan != 0 {
719 heapBitsSetTypeNoScan(uintptr(x), size)
721 // If allocating a defer+arg block, now that we've picked a malloc size
722 // large enough to hold everything, cut the "asked for" size down to
723 // just the defer header, so that the GC bitmap will record the arg block
724 // as containing nothing at all (as if it were unused space at the end of
725 // a malloc block caused by size rounding).
726 // The defer arg areas are scanned as part of scanstack.
727 if typ == deferType {
728 dataSize = unsafe.Sizeof(_defer{})
730 heapBitsSetType(uintptr(x), size, dataSize, typ)
731 if dataSize > typ.size {
732 // Array allocation. If there are any
733 // pointers, GC has to scan to the last
735 if typ.ptrdata != 0 {
736 c.local_scan += dataSize - typ.size + typ.ptrdata
739 c.local_scan += typ.ptrdata
742 // Ensure that the stores above that initialize x to
743 // type-safe memory and set the heap bits occur before
744 // the caller can make x observable to the garbage
745 // collector. Otherwise, on weakly ordered machines,
746 // the garbage collector could follow a pointer to x,
747 // but see uninitialized memory or stale heap bits.
751 // GCmarkterminate allocates black
752 // All slots hold nil so no scanning is needed.
753 // This may be racing with GC so do it atomically if there can be
754 // a race marking the bit.
755 if gcphase == _GCmarktermination || gcBlackenPromptly {
757 gcmarknewobject_m(uintptr(x), size)
761 // The object x is about to be reused but tracefree and msanfree
762 // need to be informed.
763 // TODO:(rlh) It is quite possible that this object is being allocated
764 // out of a fresh span and that there is no preceding call to
765 // tracealloc with this object. If this is an issue then initialization
766 // of the fresh span needs to leave some crumbs around that can be used to
767 // avoid these calls. Furthermore these crumbs a likely the same as
768 // those needed to determine if the object needs to be zeroed.
769 // In the case of msanfree it does not make sense to call msanfree
770 // followed by msanmalloc. msanfree indicates that the bytes are not
771 // initialized but msanmalloc is about to indicate that they are.
772 // It makes no difference whether msanmalloc has been called on these
774 if debug.allocfreetrace != 0 {
775 tracefree(unsafe.Pointer(x), size)
789 if debug.allocfreetrace != 0 {
790 tracealloc(x, size, typ)
793 if rate := MemProfileRate; rate > 0 {
794 if size < uintptr(rate) && int32(size) < c.next_sample {
795 c.next_sample -= int32(size)
798 profilealloc(mp, x, size)
804 // Account for internal fragmentation in the assist
805 // debt now that we know it.
806 assistG.gcAssistBytes -= int64(size - dataSize)
809 if shouldhelpgc && gcShouldStart(false) {
810 gcStart(gcBackgroundMode, false)
816 func largeAlloc(size uintptr, flag uint32) *mspan {
817 // print("largeAlloc size=", size, "\n")
819 if size+_PageSize < size {
820 throw("out of memory")
822 npages := size >> _PageShift
823 if size&_PageMask != 0 {
827 // Deduct credit for this span allocation and sweep if
828 // necessary. mHeap_Alloc will also sweep npages, so this only
829 // pays the debt down to npage pages.
830 deductSweepCredit(npages*_PageSize, npages)
832 s := mheap_.alloc(npages, 0, true, flag&_FlagNoZero == 0)
834 throw("out of memory")
836 s.limit = s.base() + size
837 heapBitsForSpan(s.base()).initSpan(s)
841 // implementation of new builtin
842 func newobject(typ *_type) unsafe.Pointer {
844 if typ.kind&kindNoPointers != 0 {
847 return mallocgc(typ.size, typ, flags)
850 //go:linkname reflect_unsafe_New reflect.unsafe_New
851 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
852 return newobject(typ)
855 // implementation of make builtin for slices
856 func newarray(typ *_type, n uintptr) unsafe.Pointer {
858 if typ.kind&kindNoPointers != 0 {
861 if int(n) < 0 || (typ.size > 0 && n > _MaxMem/typ.size) {
862 panic("runtime: allocation size out of range")
864 return mallocgc(typ.size*n, typ, flags)
867 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
868 func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer {
869 return newarray(typ, n)
872 // rawmem returns a chunk of pointerless memory. It is
874 func rawmem(size uintptr) unsafe.Pointer {
875 return mallocgc(size, nil, flagNoScan|flagNoZero)
878 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
879 mp.mcache.next_sample = nextSample()
880 mProf_Malloc(x, size)
883 // nextSample returns the next sampling point for heap profiling.
884 // It produces a random variable with a geometric distribution and
885 // mean MemProfileRate. This is done by generating a uniformly
886 // distributed random number and applying the cumulative distribution
887 // function for an exponential.
888 func nextSample() int32 {
890 // Plan 9 doesn't support floating point in note handler.
891 if g := getg(); g == g.m.gsignal {
892 return nextSampleNoFP()
896 period := MemProfileRate
898 // make nextSample not overflow. Maximum possible step is
899 // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period.
901 case period > 0x7000000:
907 // Let m be the sample rate,
908 // the probability distribution function is m*exp(-mx), so the CDF is
909 // p = 1 - exp(-mx), so
910 // q = 1 - p == exp(-mx)
913 // x = -log_e(q) * period
914 // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
915 const randomBitCount = 26
916 q := fastrand1()%(1<<randomBitCount) + 1
917 qlog := fastlog2(float64(q)) - randomBitCount
921 const minusLog2 = -0.6931471805599453 // -ln(2)
922 return int32(qlog*(minusLog2*float64(period))) + 1
925 // nextSampleNoFP is similar to nextSample, but uses older,
926 // simpler code to avoid floating point.
927 func nextSampleNoFP() int32 {
928 // Set first allocation sample size.
929 rate := MemProfileRate
930 if rate > 0x3fffffff { // make 2*rate not overflow
934 return int32(int(fastrand1()) % (2 * rate))
939 type persistentAlloc struct {
944 var globalAlloc struct {
949 // Wrapper around sysAlloc that can allocate small chunks.
950 // There is no associated free operation.
951 // Intended for things like function/type/debug-related persistent data.
952 // If align is 0, uses default align (currently 8).
953 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
956 p = persistentalloc1(size, align, sysStat)
961 // Must run on system stack because stack growth can (re)invoke it.
964 func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
967 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
971 throw("persistentalloc: size == 0")
974 if align&(align-1) != 0 {
975 throw("persistentalloc: align is not a power of 2")
977 if align > _PageSize {
978 throw("persistentalloc: align is too large")
984 if size >= maxBlock {
985 return sysAlloc(size, sysStat)
989 var persistent *persistentAlloc
990 if mp != nil && mp.p != 0 {
991 persistent = &mp.p.ptr().palloc
993 lock(&globalAlloc.mutex)
994 persistent = &globalAlloc.persistentAlloc
996 persistent.off = round(persistent.off, align)
997 if persistent.off+size > chunk || persistent.base == nil {
998 persistent.base = sysAlloc(chunk, &memstats.other_sys)
999 if persistent.base == nil {
1000 if persistent == &globalAlloc.persistentAlloc {
1001 unlock(&globalAlloc.mutex)
1003 throw("runtime: cannot allocate memory")
1007 p := add(persistent.base, persistent.off)
1008 persistent.off += size
1010 if persistent == &globalAlloc.persistentAlloc {
1011 unlock(&globalAlloc.mutex)
1014 if sysStat != &memstats.other_sys {
1015 mSysStatInc(sysStat, size)
1016 mSysStatDec(&memstats.other_sys, size)