1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Memory allocator, based on tcmalloc.
6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
8 // The main allocator works in runs of pages.
9 // Small allocation sizes (up to and including 32 kB) are
10 // rounded to one of about 100 size classes, each of which
11 // has its own free list of objects of exactly that size.
12 // Any free page of memory can be split into a set of objects
13 // of one size class, which are then managed using free list
16 // The allocator's data structures are:
18 // FixAlloc: a free-list allocator for fixed-size objects,
19 // used to manage storage used by the allocator.
20 // MHeap: the malloc heap, managed at page (4096-byte) granularity.
21 // MSpan: a run of pages managed by the MHeap.
22 // MCentral: a shared free list for a given size class.
23 // MCache: a per-thread (in Go, per-P) cache for small objects.
24 // MStats: allocation statistics.
26 // Allocating a small object proceeds up a hierarchy of caches:
28 // 1. Round the size up to one of the small size classes
29 // and look in the corresponding MCache free list.
30 // If the list is not empty, allocate an object from it.
31 // This can all be done without acquiring a lock.
33 // 2. If the MCache free list is empty, replenish it by
34 // taking a bunch of objects from the MCentral free list.
35 // Moving a bunch amortizes the cost of acquiring the MCentral lock.
37 // 3. If the MCentral free list is empty, replenish it by
38 // allocating a run of pages from the MHeap and then
39 // chopping that memory into objects of the given size.
40 // Allocating many objects amortizes the cost of locking
43 // 4. If the MHeap is empty or has no page runs large enough,
44 // allocate a new group of pages (at least 1MB) from the
45 // operating system. Allocating a large run of pages
46 // amortizes the cost of talking to the operating system.
48 // Freeing a small object proceeds up the same hierarchy:
50 // 1. Look up the size class for the object and add it to
51 // the MCache free list.
53 // 2. If the MCache free list is too long or the MCache has
54 // too much memory, return some to the MCentral free lists.
56 // 3. If all the objects in a given span have returned to
57 // the MCentral list, return that span to the page heap.
59 // 4. If the heap has too much memory, return some to the
62 // TODO(rsc): Step 4 is not implemented.
64 // Allocating and freeing a large object uses the page heap
65 // directly, bypassing the MCache and MCentral free lists.
67 // The small objects on the MCache and MCentral free lists
68 // may or may not be zeroed. They are zeroed if and only if
69 // the second word of the object is zero. A span in the
70 // page heap is zeroed unless s->needzero is set. When a span
71 // is allocated to break into small objects, it is zeroed if needed
72 // and s->needzero is set. There are two main benefits to delaying the
75 // 1. stack frames allocated from the small object lists
76 // or the page heap can avoid zeroing altogether.
77 // 2. the cost of zeroing when reusing a small object is
78 // charged to the mutator, not the garbage collector.
83 "runtime/internal/sys"
90 maxTinySize = _TinySize
91 tinySizeClass = _TinySizeClass
92 maxSmallSize = _MaxSmallSize
94 pageShift = _PageShift
97 // By construction, single page spans of the smallest object class
98 // have the most objects per span.
99 maxObjsPerSpan = pageSize / 8
101 mSpanInUse = _MSpanInUse
103 concurrentSweep = _ConcurrentSweep
108 _PageSize = 1 << _PageShift
109 _PageMask = _PageSize - 1
113 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
114 _64bit = 1 << (^uintptr(0) >> 63) / 2
116 // Computed constant. The definition of MaxSmallSize and the
117 // algorithm in msize.go produces some number of different allocation
118 // size classes. NumSizeClasses is that number. It's needed here
119 // because there are static arrays of this length; when msize runs its
120 // size choosing algorithm it double-checks that NumSizeClasses agrees.
123 // Tunable constants.
124 _MaxSmallSize = 32 << 10
126 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
130 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
131 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
132 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth
134 // Per-P, per order stack segment cache size.
135 _StackCacheSize = 32 * 1024
137 // Number of orders that get caching. Order 0 is FixedStack
138 // and each successive order is twice as large.
139 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
140 // will be allocated directly.
141 // Since FixedStack is different on different systems, we
142 // must vary NumStackOrders to keep the same maximum cached size.
143 // OS | FixedStack | NumStackOrders
144 // -----------------+------------+---------------
145 // linux/darwin/bsd | 2KB | 4
146 // windows/32 | 4KB | 3
147 // windows/64 | 8KB | 2
149 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
151 // Number of bits in page to span calculations (4k pages).
152 // On Windows 64-bit we limit the arena to 32GB or 35 bits.
153 // Windows counts memory used by page table into committed memory
154 // of the process, so we can't reserve too much memory.
155 // See https://golang.org/issue/5402 and https://golang.org/issue/5236.
156 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits.
157 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
158 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
159 // but as most devices have less than 4GB of physical memory anyway, we
160 // try to be conservative here, and only ask for a 2GB heap.
161 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
162 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift
164 _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
166 // Max number of threads to run garbage collection.
167 // 2, 3, and 4 are all plausible maximums depending
168 // on the hardware details of the machine. The garbage
169 // collector scales well to 32 cpus.
173 // Page number (address>>pageShift)
176 const _MaxArena32 = 2 << 30
178 // OS-defined helpers:
180 // sysAlloc obtains a large chunk of zeroed memory from the
181 // operating system, typically on the order of a hundred kilobytes
183 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
184 // may use larger alignment, so the caller must be careful to realign the
185 // memory obtained by sysAlloc.
187 // SysUnused notifies the operating system that the contents
188 // of the memory region are no longer needed and can be reused
189 // for other purposes.
190 // SysUsed notifies the operating system that the contents
191 // of the memory region are needed again.
193 // SysFree returns it unconditionally; this is only used if
194 // an out-of-memory error has been detected midway through
195 // an allocation. It is okay if SysFree is a no-op.
197 // SysReserve reserves address space without allocating memory.
198 // If the pointer passed to it is non-nil, the caller wants the
199 // reservation there, but SysReserve can still choose another
200 // location if that one is unavailable. On some systems and in some
201 // cases SysReserve will simply check that the address space is
202 // available and not actually reserve it. If SysReserve returns
203 // non-nil, it sets *reserved to true if the address space is
204 // reserved, false if it has merely been checked.
205 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
206 // may use larger alignment, so the caller must be careful to realign the
207 // memory obtained by sysAlloc.
209 // SysMap maps previously reserved address space for use.
210 // The reserved argument is true if the address space was really
211 // reserved, not merely checked.
213 // SysFault marks a (already sysAlloc'd) region to fault
214 // if accessed. Used only for debugging the runtime.
219 if class_to_size[_TinySizeClass] != _TinySize {
220 throw("bad TinySizeClass")
223 var p, bitmapSize, spansSize, pSize, limit uintptr
226 // limit = runtime.memlimit();
227 // See https://golang.org/issue/5049
228 // TODO(rsc): Fix after 1.1.
231 // Set up the allocation arena, a contiguous area of memory where
232 // allocated data will be found. The arena begins with a bitmap large
233 // enough to hold 4 bits per allocated word.
234 if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) {
235 // On a 64-bit machine, allocate from a single contiguous reservation.
236 // 512 GB (MaxMem) should be big enough for now.
238 // The code will work with the reservation at any address, but ask
239 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
240 // Allocating a 512 GB region takes away 39 bits, and the amd64
241 // doesn't let us choose the top 17 bits, so that leaves the 9 bits
242 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
243 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
244 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
245 // UTF-8 sequences, and they are otherwise as far away from
246 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
247 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
248 // on OS X during thread allocations. 0x00c0 causes conflicts with
249 // AddressSanitizer which reserves all memory up to 0x0100.
250 // These choices are both for debuggability and to reduce the
251 // odds of a conservative garbage collector (as is still used in gccgo)
252 // not collecting memory because some non-pointer block of memory
253 // had a bit pattern that matched a memory address.
255 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB)
256 // but it hardly matters: e0 00 is not valid UTF-8 either.
258 // If this fails we fall back to the 32 bit memory mechanism
260 // However, on arm64, we ignore all this advice above and slam the
261 // allocation at 0x40 << 32 because when using 4k pages with 3-level
262 // translation buffers, the user address space is limited to 39 bits
263 // On darwin/arm64, the address space is even smaller.
264 arenaSize := round(_MaxMem, _PageSize)
265 bitmapSize = arenaSize / (sys.PtrSize * 8 / 4)
266 spansSize = arenaSize / _PageSize * sys.PtrSize
267 spansSize = round(spansSize, _PageSize)
268 for i := 0; i <= 0x7f; i++ {
270 case GOARCH == "arm64" && GOOS == "darwin":
271 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
272 case GOARCH == "arm64":
273 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
275 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
277 pSize = bitmapSize + spansSize + arenaSize + _PageSize
278 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
286 // On a 32-bit machine, we can't typically get away
287 // with a giant virtual address space reservation.
288 // Instead we map the memory information bitmap
289 // immediately after the data segment, large enough
290 // to handle another 2GB of mappings (256 MB),
291 // along with a reservation for an initial arena.
292 // When that gets used up, we'll start asking the kernel
293 // for any memory anywhere and hope it's in the 2GB
294 // following the bitmap (presumably the executable begins
295 // near the bottom of memory, so we'll have to use up
296 // most of memory before the kernel resorts to giving out
297 // memory before the beginning of the text segment).
299 // Alternatively we could reserve 512 MB bitmap, enough
300 // for 4GB of mappings, and then accept any memory the
301 // kernel threw at us, but normally that's a waste of 512 MB
302 // of address space, which is probably too much in a 32-bit world.
304 // If we fail to allocate, try again with a smaller arena.
305 // This is necessary on Android L where we share a process
306 // with ART, which reserves virtual memory aggressively.
307 arenaSizes := []uintptr{
313 for _, arenaSize := range arenaSizes {
314 bitmapSize = _MaxArena32 / (sys.PtrSize * 8 / 4)
315 spansSize = _MaxArena32 / _PageSize * sys.PtrSize
316 if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
317 bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
318 arenaSize = bitmapSize * 8
319 spansSize = arenaSize / _PageSize * sys.PtrSize
321 spansSize = round(spansSize, _PageSize)
323 // SysReserve treats the address we ask for, end, as a hint,
324 // not as an absolute requirement. If we ask for the end
325 // of the data segment but the operating system requires
326 // a little more space before we can start allocating, it will
327 // give out a slightly higher pointer. Except QEMU, which
328 // is buggy, as usual: it won't adjust the pointer upward.
329 // So adjust it upward a little bit ourselves: 1/4 MB to get
330 // away from the running binary image and then round up
332 p = round(firstmoduledata.end+(1<<18), 1<<20)
333 pSize = bitmapSize + spansSize + arenaSize + _PageSize
334 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
340 throw("runtime: cannot reserve arena virtual address space")
344 // PageSize can be larger than OS definition of page size,
345 // so SysReserve can give us a PageSize-unaligned pointer.
346 // To overcome this we ask for PageSize more and round up the pointer.
347 p1 := round(p, _PageSize)
349 mheap_.spans = (**mspan)(unsafe.Pointer(p1))
350 mheap_.bitmap = p1 + spansSize
351 mheap_.arena_start = p1 + (spansSize + bitmapSize)
352 mheap_.arena_used = mheap_.arena_start
353 mheap_.arena_end = p + pSize
354 mheap_.arena_reserved = reserved
356 if mheap_.arena_start&(_PageSize-1) != 0 {
357 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
358 throw("misrounded allocation in mallocinit")
361 // Initialize the rest of the allocator.
362 mheap_.init(spansSize)
364 _g_.m.mcache = allocmcache()
367 // sysReserveHigh reserves space somewhere high in the address space.
368 // sysReserve doesn't actually reserve the full amount requested on
369 // 64-bit systems, because of problems with ulimit. Instead it checks
370 // that it can get the first 64 kB and assumes it can grab the rest as
371 // needed. This doesn't work well with the "let the kernel pick an address"
372 // mode, so don't do that. Pick a high address instead.
373 func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer {
374 if sys.PtrSize == 4 {
375 return sysReserve(nil, n, reserved)
378 for i := 0; i <= 0x7f; i++ {
379 p := uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
381 p = uintptr(sysReserve(unsafe.Pointer(p), n, reserved))
383 return unsafe.Pointer(p)
387 return sysReserve(nil, n, reserved)
390 func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
391 if n > h.arena_end-h.arena_used {
392 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
393 // Reserve some more space.
394 p_size := round(n+_PageSize, 256<<20)
395 new_end := h.arena_end + p_size // Careful: can overflow
396 if h.arena_end <= new_end && new_end <= h.arena_start+_MaxArena32 {
397 // TODO: It would be bad if part of the arena
398 // is reserved and part is not.
400 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
404 if p == h.arena_end {
405 h.arena_end = new_end
406 h.arena_reserved = reserved
407 } else if h.arena_start <= p && p+p_size <= h.arena_start+_MaxArena32 {
408 // Keep everything page-aligned.
409 // Our pages are bigger than hardware pages.
410 h.arena_end = p + p_size
411 used := p + (-p & (_PageSize - 1))
415 h.arena_reserved = reserved
417 // We haven't added this allocation to
418 // the stats, so subtract it from a
419 // fake stat (but avoid underflow).
420 stat := uint64(p_size)
421 sysFree(unsafe.Pointer(p), p_size, &stat)
426 if n <= h.arena_end-h.arena_used {
427 // Keep taking from our reservation.
429 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
434 racemapshadow(unsafe.Pointer(p), n)
437 if p&(_PageSize-1) != 0 {
438 throw("misrounded allocation in MHeap_SysAlloc")
440 return unsafe.Pointer(p)
443 // If using 64-bit, our reservation is all we have.
444 if h.arena_end-h.arena_start >= _MaxArena32 {
448 // On 32-bit, once the reservation is gone we can
449 // try to get memory at a location chosen by the OS
450 // and hope that it is in the range we allocated bitmap for.
451 p_size := round(n, _PageSize) + _PageSize
452 p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
457 if p < h.arena_start || p+p_size-h.arena_start >= _MaxArena32 {
459 if top-h.arena_start > _MaxArena32 {
460 top = h.arena_start + _MaxArena32
462 print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n")
463 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
468 p += -p & (_PageSize - 1)
469 if p+n > h.arena_used {
473 if p_end > h.arena_end {
477 racemapshadow(unsafe.Pointer(p), n)
481 if p&(_PageSize-1) != 0 {
482 throw("misrounded allocation in MHeap_SysAlloc")
484 return unsafe.Pointer(p)
487 // base address for all 0-byte allocations
490 // nextFreeFast returns the next free object if one is quickly available.
491 // Otherwise it returns 0.
492 func (c *mcache) nextFreeFast(sizeclass int8) gclinkptr {
493 s := c.alloc[sizeclass]
494 ctzIndex := uint8(s.allocCache & 0xff)
496 theBit := uint64(ctzVals[ctzIndex])
497 freeidx := s.freeindex // help the pre ssa compiler out here with cse.
498 result := freeidx + uintptr(theBit)
499 if result < s.nelems {
500 s.allocCache >>= (theBit + 1)
502 if freeidx%64 == 0 && freeidx != s.nelems {
503 // We just incremented s.freeindex so it isn't 0
504 // so we are moving to the next aCache.
505 whichByte := freeidx / 8
506 s.refillAllocCache(whichByte)
508 s.freeindex = freeidx
509 v := gclinkptr(result*s.elemsize + s.base())
517 // nextFree returns the next free object from the cached span if one is available.
518 // Otherwise it refills the cache with a span with an available object and
519 // returns that object along with a flag indicating that this was a heavy
520 // weight allocation. If it is a heavy weight allocation the caller must
521 // determine whether a new GC cycle needs to be started or if the GC is active
522 // whether this goroutine needs to assist the GC.
523 func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) {
524 s := c.alloc[sizeclass]
526 freeIndex := s.nextFreeIndex()
527 if freeIndex == s.nelems {
529 if uintptr(s.allocCount) != s.nelems {
530 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
531 throw("s.allocCount != s.nelems && freeIndex == s.nelems")
534 c.refill(int32(sizeclass))
537 s = c.alloc[sizeclass]
539 freeIndex = s.nextFreeIndex()
542 if freeIndex >= s.nelems {
543 throw("freeIndex is not valid")
546 v = gclinkptr(freeIndex*s.elemsize + s.base())
548 if uintptr(s.allocCount) > s.nelems {
549 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
550 throw("s.allocCount > s.nelems")
555 // Allocate an object of size bytes.
556 // Small objects are allocated from the per-P cache's free lists.
557 // Large objects (> 32 kB) are allocated straight from the heap.
558 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
559 if gcphase == _GCmarktermination {
560 throw("mallocgc called with gcphase == _GCmarktermination")
564 return unsafe.Pointer(&zerobase)
570 align = uintptr(typ.align)
572 return persistentalloc(size, align, &memstats.other_sys)
575 // assistG is the G to charge for this allocation, or nil if
576 // GC is not currently active.
578 if gcBlackenEnabled != 0 {
579 // Charge the current user G for this allocation.
581 if assistG.m.curg != nil {
582 assistG = assistG.m.curg
584 // Charge the allocation against the G. We'll account
585 // for internal fragmentation at the end of mallocgc.
586 assistG.gcAssistBytes -= int64(size)
588 if assistG.gcAssistBytes < 0 {
589 // This G is in debt. Assist the GC to correct
590 // this before allocating. This must happen
591 // before disabling preemption.
592 gcAssistAlloc(assistG)
596 // Set mp.mallocing to keep from being preempted by GC.
598 if mp.mallocing != 0 {
599 throw("malloc deadlock")
601 if mp.gsignal == getg() {
602 throw("malloc during signal")
606 shouldhelpgc := false
610 noscan := typ == nil || typ.kind&kindNoPointers != 0
611 if size <= maxSmallSize {
612 if noscan && size < maxTinySize {
615 // Tiny allocator combines several tiny allocation requests
616 // into a single memory block. The resulting memory block
617 // is freed when all subobjects are unreachable. The subobjects
618 // must be noscan (don't have pointers), this ensures that
619 // the amount of potentially wasted memory is bounded.
621 // Size of the memory block used for combining (maxTinySize) is tunable.
622 // Current setting is 16 bytes, which relates to 2x worst case memory
623 // wastage (when all but one subobjects are unreachable).
624 // 8 bytes would result in no wastage at all, but provides less
625 // opportunities for combining.
626 // 32 bytes provides more opportunities for combining,
627 // but can lead to 4x worst case wastage.
628 // The best case winning is 8x regardless of block size.
630 // Objects obtained from tiny allocator must not be freed explicitly.
631 // So when an object will be freed explicitly, we ensure that
632 // its size >= maxTinySize.
634 // SetFinalizer has a special case for objects potentially coming
635 // from tiny allocator, it such case it allows to set finalizers
636 // for an inner byte of a memory block.
638 // The main targets of tiny allocator are small strings and
639 // standalone escaping variables. On a json benchmark
640 // the allocator reduces number of allocations by ~12% and
641 // reduces heap size by ~20%.
643 // Align tiny pointer for required (conservative) alignment.
646 } else if size&3 == 0 {
648 } else if size&1 == 0 {
651 if off+size <= maxTinySize && c.tiny != 0 {
652 // The object fits into existing tiny block.
653 x = unsafe.Pointer(c.tiny + off)
654 c.tinyoffset = off + size
660 // Allocate a new maxTinySize block.
662 v = c.nextFreeFast(tinySizeClass)
664 v, shouldhelpgc = c.nextFree(tinySizeClass)
666 x = unsafe.Pointer(v)
667 (*[2]uint64)(x)[0] = 0
668 (*[2]uint64)(x)[1] = 0
669 // See if we need to replace the existing tiny block with the new one
670 // based on amount of remaining free space.
671 if size < c.tinyoffset || c.tiny == 0 {
679 sizeclass = size_to_class8[(size+7)>>3]
681 sizeclass = size_to_class128[(size-1024+127)>>7]
683 size = uintptr(class_to_size[sizeclass])
685 v = c.nextFreeFast(sizeclass)
687 v, shouldhelpgc = c.nextFree(sizeclass)
689 x = unsafe.Pointer(v)
691 memclr(unsafe.Pointer(v), size)
692 // TODO:(rlh) Only clear if object is not known to be zeroed.
699 s = largeAlloc(size, needzero)
702 x = unsafe.Pointer(s.base())
708 heapBitsSetTypeNoScan(uintptr(x), size)
710 // If allocating a defer+arg block, now that we've picked a malloc size
711 // large enough to hold everything, cut the "asked for" size down to
712 // just the defer header, so that the GC bitmap will record the arg block
713 // as containing nothing at all (as if it were unused space at the end of
714 // a malloc block caused by size rounding).
715 // The defer arg areas are scanned as part of scanstack.
716 if typ == deferType {
717 dataSize = unsafe.Sizeof(_defer{})
719 heapBitsSetType(uintptr(x), size, dataSize, typ)
720 if dataSize > typ.size {
721 // Array allocation. If there are any
722 // pointers, GC has to scan to the last
724 if typ.ptrdata != 0 {
725 scanSize = dataSize - typ.size + typ.ptrdata
728 scanSize = typ.ptrdata
730 c.local_scan += scanSize
732 // Ensure that the stores above that initialize x to
733 // type-safe memory and set the heap bits occur before
734 // the caller can make x observable to the garbage
735 // collector. Otherwise, on weakly ordered machines,
736 // the garbage collector could follow a pointer to x,
737 // but see uninitialized memory or stale heap bits.
741 // Allocate black during GC.
742 // All slots hold nil so no scanning is needed.
743 // This may be racing with GC so do it atomically if there can be
744 // a race marking the bit.
745 if gcphase != _GCoff {
746 gcmarknewobject(uintptr(x), size, scanSize)
749 // The object x is about to be reused but tracefree and msanfree
750 // need to be informed.
751 // TODO:(rlh) It is quite possible that this object is being allocated
752 // out of a fresh span and that there is no preceding call to
753 // tracealloc with this object. If this is an issue then initialization
754 // of the fresh span needs to leave some crumbs around that can be used to
755 // avoid these calls. Furthermore these crumbs a likely the same as
756 // those needed to determine if the object needs to be zeroed.
757 // In the case of msanfree it does not make sense to call msanfree
758 // followed by msanmalloc. msanfree indicates that the bytes are not
759 // initialized but msanmalloc is about to indicate that they are.
760 // It makes no difference whether msanmalloc has been called on these
762 if debug.allocfreetrace != 0 {
763 tracefree(unsafe.Pointer(x), size)
777 if debug.allocfreetrace != 0 {
778 tracealloc(x, size, typ)
781 if rate := MemProfileRate; rate > 0 {
782 if size < uintptr(rate) && int32(size) < c.next_sample {
783 c.next_sample -= int32(size)
786 profilealloc(mp, x, size)
792 // Account for internal fragmentation in the assist
793 // debt now that we know it.
794 assistG.gcAssistBytes -= int64(size - dataSize)
797 if shouldhelpgc && gcShouldStart(false) {
798 gcStart(gcBackgroundMode, false)
804 func largeAlloc(size uintptr, needzero bool) *mspan {
805 // print("largeAlloc size=", size, "\n")
807 if size+_PageSize < size {
808 throw("out of memory")
810 npages := size >> _PageShift
811 if size&_PageMask != 0 {
815 // Deduct credit for this span allocation and sweep if
816 // necessary. mHeap_Alloc will also sweep npages, so this only
817 // pays the debt down to npage pages.
818 deductSweepCredit(npages*_PageSize, npages)
820 s := mheap_.alloc(npages, 0, true, needzero)
822 throw("out of memory")
824 s.limit = s.base() + size
825 heapBitsForSpan(s.base()).initSpan(s)
829 // implementation of new builtin
830 func newobject(typ *_type) unsafe.Pointer {
831 return mallocgc(typ.size, typ, true)
834 //go:linkname reflect_unsafe_New reflect.unsafe_New
835 func reflect_unsafe_New(typ *_type) unsafe.Pointer {
836 return newobject(typ)
839 // newarray allocates an array of n elements of type typ.
840 func newarray(typ *_type, n int) unsafe.Pointer {
841 if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
842 panic(plainError("runtime: allocation size out of range"))
844 return mallocgc(typ.size*uintptr(n), typ, true)
847 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
848 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
849 return newarray(typ, n)
852 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
853 mp.mcache.next_sample = nextSample()
854 mProf_Malloc(x, size)
857 // nextSample returns the next sampling point for heap profiling.
858 // It produces a random variable with a geometric distribution and
859 // mean MemProfileRate. This is done by generating a uniformly
860 // distributed random number and applying the cumulative distribution
861 // function for an exponential.
862 func nextSample() int32 {
864 // Plan 9 doesn't support floating point in note handler.
865 if g := getg(); g == g.m.gsignal {
866 return nextSampleNoFP()
870 period := MemProfileRate
872 // make nextSample not overflow. Maximum possible step is
873 // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period.
875 case period > 0x7000000:
881 // Let m be the sample rate,
882 // the probability distribution function is m*exp(-mx), so the CDF is
883 // p = 1 - exp(-mx), so
884 // q = 1 - p == exp(-mx)
887 // x = -log_e(q) * period
888 // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
889 const randomBitCount = 26
890 q := fastrand1()%(1<<randomBitCount) + 1
891 qlog := fastlog2(float64(q)) - randomBitCount
895 const minusLog2 = -0.6931471805599453 // -ln(2)
896 return int32(qlog*(minusLog2*float64(period))) + 1
899 // nextSampleNoFP is similar to nextSample, but uses older,
900 // simpler code to avoid floating point.
901 func nextSampleNoFP() int32 {
902 // Set first allocation sample size.
903 rate := MemProfileRate
904 if rate > 0x3fffffff { // make 2*rate not overflow
908 return int32(int(fastrand1()) % (2 * rate))
913 type persistentAlloc struct {
918 var globalAlloc struct {
923 // Wrapper around sysAlloc that can allocate small chunks.
924 // There is no associated free operation.
925 // Intended for things like function/type/debug-related persistent data.
926 // If align is 0, uses default align (currently 8).
927 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
930 p = persistentalloc1(size, align, sysStat)
935 // Must run on system stack because stack growth can (re)invoke it.
938 func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
941 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
945 throw("persistentalloc: size == 0")
948 if align&(align-1) != 0 {
949 throw("persistentalloc: align is not a power of 2")
951 if align > _PageSize {
952 throw("persistentalloc: align is too large")
958 if size >= maxBlock {
959 return sysAlloc(size, sysStat)
963 var persistent *persistentAlloc
964 if mp != nil && mp.p != 0 {
965 persistent = &mp.p.ptr().palloc
967 lock(&globalAlloc.mutex)
968 persistent = &globalAlloc.persistentAlloc
970 persistent.off = round(persistent.off, align)
971 if persistent.off+size > chunk || persistent.base == nil {
972 persistent.base = sysAlloc(chunk, &memstats.other_sys)
973 if persistent.base == nil {
974 if persistent == &globalAlloc.persistentAlloc {
975 unlock(&globalAlloc.mutex)
977 throw("runtime: cannot allocate memory")
981 p := add(persistent.base, persistent.off)
982 persistent.off += size
984 if persistent == &globalAlloc.persistentAlloc {
985 unlock(&globalAlloc.mutex)
988 if sysStat != &memstats.other_sys {
989 mSysStatInc(sysStat, size)
990 mSysStatDec(&memstats.other_sys, size)