1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.go for overview.
14 "runtime/internal/atomic"
15 "runtime/internal/sys"
20 // minPhysPageSize is a lower-bound on the physical page size. The
21 // true physical page size may be larger than this. In contrast,
22 // sys.PhysPageSize is an upper-bound on the physical page size.
23 minPhysPageSize = 4096
25 // maxPhysPageSize is the maximum page size the runtime supports.
26 maxPhysPageSize = 512 << 10
28 // maxPhysHugePageSize sets an upper-bound on the maximum huge page size
29 // that the runtime supports.
30 maxPhysHugePageSize = pallocChunkBytes
32 // pagesPerReclaimerChunk indicates how many pages to scan from the
33 // pageInUse bitmap at a time. Used by the page reclaimer.
35 // Higher values reduce contention on scanning indexes (such as
36 // h.reclaimIndex), but increase the minimum latency of the
39 // The time required to scan this many pages can vary a lot depending
40 // on how many spans are actually freed. Experimentally, it can
41 // scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only
42 // free spans at ~32 MB/ms. Using 512 pages bounds this at
45 // Must be a multiple of the pageInUse bitmap element size and
46 // must also evenly divide pagesPerArena.
47 pagesPerReclaimerChunk = 512
49 // physPageAlignedStacks indicates whether stack allocations must be
50 // physical page aligned. This is a requirement for MAP_STACK on
52 physPageAlignedStacks = GOOS == "openbsd"
56 // The heap itself is the "free" and "scav" treaps,
57 // but all the other global data is here too.
59 // mheap must not be heap-allocated because it contains mSpanLists,
60 // which must not be heap-allocated.
64 // lock must only be acquired on the system stack, otherwise a g
65 // could self-deadlock if its stack grows with the lock held.
68 pages pageAlloc // page allocation data structure
70 sweepgen uint32 // sweep generation, see comment in mspan; written during STW
72 // allspans is a slice of all mspans ever created. Each mspan
73 // appears exactly once.
75 // The memory for allspans is manually managed and can be
76 // reallocated and move as the heap grows.
78 // In general, allspans is protected by mheap_.lock, which
79 // prevents concurrent access as well as freeing the backing
80 // store. Accesses during STW might not hold the lock, but
81 // must ensure that allocation cannot happen around the
82 // access (since that may free the backing store).
83 allspans []*mspan // all spans out there
87 // These parameters represent a linear function from gcController.heapLive
88 // to page sweep count. The proportional sweep system works to
89 // stay in the black by keeping the current page sweep count
90 // above this line at the current gcController.heapLive.
92 // The line has slope sweepPagesPerByte and passes through a
93 // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
94 // any given time, the system is at (gcController.heapLive,
95 // pagesSwept) in this space.
97 // It is important that the line pass through a point we
98 // control rather than simply starting at a 0,0 origin
99 // because that lets us adjust sweep pacing at any time while
100 // accounting for current progress. If we could only adjust
101 // the slope, it would create a discontinuity in debt if any
102 // progress has already been made.
103 pagesInUse atomic.Uintptr // pages of spans in stats mSpanInUse
104 pagesSwept atomic.Uint64 // pages swept this cycle
105 pagesSweptBasis atomic.Uint64 // pagesSwept to use as the origin of the sweep ratio
106 sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without
107 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
109 // Page reclaimer state
111 // reclaimIndex is the page index in allArenas of next page to
112 // reclaim. Specifically, it refers to page (i %
113 // pagesPerArena) of arena allArenas[i / pagesPerArena].
115 // If this is >= 1<<63, the page reclaimer is done scanning
117 reclaimIndex atomic.Uint64
119 // reclaimCredit is spare credit for extra pages swept. Since
120 // the page reclaimer works in large chunks, it may reclaim
121 // more than requested. Any spare pages released go to this
123 reclaimCredit atomic.Uintptr
125 _ cpu.CacheLinePad // prevents false-sharing between arenas and preceding variables
127 // arenas is the heap arena map. It points to the metadata for
128 // the heap for every arena frame of the entire usable virtual
131 // Use arenaIndex to compute indexes into this array.
133 // For regions of the address space that are not backed by the
134 // Go heap, the arena map contains nil.
136 // Modifications are protected by mheap_.lock. Reads can be
137 // performed without locking; however, a given entry can
138 // transition from nil to non-nil at any time when the lock
139 // isn't held. (Entries never transitions back to nil.)
141 // In general, this is a two-level mapping consisting of an L1
142 // map and possibly many L2 maps. This saves space when there
143 // are a huge number of arena frames. However, on many
144 // platforms (even 64-bit), arenaL1Bits is 0, making this
145 // effectively a single-level map. In this case, arenas[0]
146 // will never be nil.
147 arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena
149 // arenasHugePages indicates whether arenas' L2 entries are eligible
150 // to be backed by huge pages.
153 // heapArenaAlloc is pre-reserved space for allocating heapArena
154 // objects. This is only used on 32-bit, where we pre-reserve
155 // this space to avoid interleaving it with the heap itself.
156 heapArenaAlloc linearAlloc
158 // arenaHints is a list of addresses at which to attempt to
159 // add more heap arenas. This is initially populated with a
160 // set of general hint addresses, and grown with the bounds of
161 // actual heap arena ranges.
162 arenaHints *arenaHint
164 // arena is a pre-reserved space for allocating heap arenas
165 // (the actual arenas). This is only used on 32-bit.
168 // allArenas is the arenaIndex of every mapped arena. This can
169 // be used to iterate through the address space.
171 // Access is protected by mheap_.lock. However, since this is
172 // append-only and old backing arrays are never freed, it is
173 // safe to acquire mheap_.lock, copy the slice header, and
174 // then release mheap_.lock.
177 // sweepArenas is a snapshot of allArenas taken at the
178 // beginning of the sweep cycle. This can be read safely by
179 // simply blocking GC (by disabling preemption).
180 sweepArenas []arenaIdx
182 // markArenas is a snapshot of allArenas taken at the beginning
183 // of the mark cycle. Because allArenas is append-only, neither
184 // this slice nor its contents will change during the mark, so
185 // it can be read safely.
186 markArenas []arenaIdx
188 // curArena is the arena that the heap is currently growing
189 // into. This should always be physPageSize-aligned.
194 // central free lists for small size classes.
195 // the padding makes sure that the mcentrals are
196 // spaced CacheLinePadSize bytes apart, so that each mcentral.lock
197 // gets its own cache line.
198 // central is indexed by spanClass.
199 central [numSpanClasses]struct {
201 pad [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
204 spanalloc fixalloc // allocator for span*
205 cachealloc fixalloc // allocator for mcache*
206 specialfinalizeralloc fixalloc // allocator for specialfinalizer*
207 specialprofilealloc fixalloc // allocator for specialprofile*
208 specialReachableAlloc fixalloc // allocator for specialReachable
209 specialPinCounterAlloc fixalloc // allocator for specialPinCounter
210 speciallock mutex // lock for special record allocators.
211 arenaHintAlloc fixalloc // allocator for arenaHints
215 // Protected by mheap_.lock.
217 // arenaHints is a list of addresses at which to attempt to
218 // add more heap arenas for user arena chunks. This is initially
219 // populated with a set of general hint addresses, and grown with
220 // the bounds of actual heap arena ranges.
221 arenaHints *arenaHint
223 // quarantineList is a list of user arena spans that have been set to fault, but
224 // are waiting for all pointers into them to go away. Sweeping handles
225 // identifying when this is true, and moves the span to the ready list.
226 quarantineList mSpanList
228 // readyList is a list of empty user arena spans that are ready for reuse.
232 unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
237 // A heapArena stores metadata for a heap arena. heapArenas are stored
238 // outside of the Go heap and accessed via the mheap_.arenas index.
239 type heapArena struct {
242 // heapArenaPtrScalar contains pointer/scalar data about the heap for this heap arena.
245 // spans maps from virtual address page ID within this arena to *mspan.
246 // For allocated spans, their pages map to the span itself.
247 // For free spans, only the lowest and highest pages map to the span itself.
248 // Internal pages map to an arbitrary span.
249 // For pages that have never been allocated, spans entries are nil.
251 // Modifications are protected by mheap.lock. Reads can be
252 // performed without locking, but ONLY from indexes that are
253 // known to contain in-use or stack spans. This means there
254 // must not be a safe-point between establishing that an
255 // address is live and looking it up in the spans array.
256 spans [pagesPerArena]*mspan
258 // pageInUse is a bitmap that indicates which spans are in
259 // state mSpanInUse. This bitmap is indexed by page number,
260 // but only the bit corresponding to the first page in each
263 // Reads and writes are atomic.
264 pageInUse [pagesPerArena / 8]uint8
266 // pageMarks is a bitmap that indicates which spans have any
267 // marked objects on them. Like pageInUse, only the bit
268 // corresponding to the first page in each span is used.
270 // Writes are done atomically during marking. Reads are
271 // non-atomic and lock-free since they only occur during
272 // sweeping (and hence never race with writes).
274 // This is used to quickly find whole spans that can be freed.
276 // TODO(austin): It would be nice if this was uint64 for
277 // faster scanning, but we don't have 64-bit atomic bit
279 pageMarks [pagesPerArena / 8]uint8
281 // pageSpecials is a bitmap that indicates which spans have
282 // specials (finalizers or other). Like pageInUse, only the bit
283 // corresponding to the first page in each span is used.
285 // Writes are done atomically whenever a special is added to
286 // a span and whenever the last special is removed from a span.
287 // Reads are done atomically to find spans containing specials
289 pageSpecials [pagesPerArena / 8]uint8
291 // checkmarks stores the debug.gccheckmark state. It is only
292 // used if debug.gccheckmark > 0.
293 checkmarks *checkmarksMap
295 // zeroedBase marks the first byte of the first page in this
296 // arena which hasn't been used yet and is therefore already
297 // zero. zeroedBase is relative to the arena base.
298 // Increases monotonically until it hits heapArenaBytes.
300 // This field is sufficient to determine if an allocation
301 // needs to be zeroed because the page allocator follows an
302 // address-ordered first-fit policy.
304 // Read atomically and written with an atomic CAS.
308 // arenaHint is a hint for where to grow the heap arenas. See
309 // mheap_.arenaHints.
310 type arenaHint struct {
317 // An mspan is a run of pages.
319 // When a mspan is in the heap free treap, state == mSpanFree
320 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
321 // If the mspan is in the heap scav treap, then in addition to the
322 // above scavenged == true. scavenged == false in all other cases.
324 // When a mspan is allocated, state == mSpanInUse or mSpanManual
325 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
327 // Every mspan is in one doubly-linked list, either in the mheap's
328 // busy list or one of the mcentral's span lists.
330 // An mspan representing actual memory has state mSpanInUse,
331 // mSpanManual, or mSpanFree. Transitions between these states are
332 // constrained as follows:
334 // - A span may transition from free to in-use or manual during any GC
337 // - During sweeping (gcphase == _GCoff), a span may transition from
338 // in-use to free (as a result of sweeping) or manual to free (as a
339 // result of stacks being freed).
341 // - During GC (gcphase != _GCoff), a span *must not* transition from
342 // manual or in-use to free. Because concurrent GC may read a pointer
343 // and then look up its span, the span state must be monotonic.
345 // Setting mspan.state to mSpanInUse or mSpanManual must be done
346 // atomically and only after all other span fields are valid.
347 // Likewise, if inspecting a span is contingent on it being
348 // mSpanInUse, the state should be loaded atomically and checked
349 // before depending on other fields. This allows the garbage collector
350 // to safely deal with potentially invalid pointers, since resolving
351 // such pointers may race with a span being allocated.
352 type mSpanState uint8
355 mSpanDead mSpanState = iota
356 mSpanInUse // allocated for garbage collected heap
357 mSpanManual // allocated for manual management (e.g., stack allocator)
360 // mSpanStateNames are the names of the span states, indexed by
362 var mSpanStateNames = []string{
368 // mSpanStateBox holds an atomic.Uint8 to provide atomic operations on
369 // an mSpanState. This is a separate type to disallow accidental comparison
370 // or assignment with mSpanState.
371 type mSpanStateBox struct {
375 // It is nosplit to match get, below.
378 func (b *mSpanStateBox) set(s mSpanState) {
382 // It is nosplit because it's called indirectly by typedmemclr,
383 // which must not be preempted.
386 func (b *mSpanStateBox) get() mSpanState {
387 return mSpanState(b.s.Load())
390 // mSpanList heads a linked list of spans.
391 type mSpanList struct {
393 first *mspan // first span in list, or nil if none
394 last *mspan // last span in list, or nil if none
399 next *mspan // next span in list, or nil if none
400 prev *mspan // previous span in list, or nil if none
401 list *mSpanList // For debugging. TODO: Remove.
403 startAddr uintptr // address of first byte of span aka s.base()
404 npages uintptr // number of pages in span
406 manualFreeList gclinkptr // list of free objects in mSpanManual spans
408 // freeindex is the slot index between 0 and nelems at which to begin scanning
409 // for the next free object in this span.
410 // Each allocation scans allocBits starting at freeindex until it encounters a 0
411 // indicating a free object. freeindex is then adjusted so that subsequent scans begin
412 // just past the newly discovered free object.
414 // If freeindex == nelem, this span has no free objects.
416 // allocBits is a bitmap of objects in this span.
417 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
418 // then object n is free;
419 // otherwise, object n is allocated. Bits starting at nelem are
420 // undefined and should never be referenced.
422 // Object n starts at address n*elemsize + (start << pageShift).
424 // TODO: Look up nelems from sizeclass and remove this field if it
425 // helps performance.
426 nelems uint16 // number of object in the span.
427 // freeIndexForScan is like freeindex, except that freeindex is
428 // used by the allocator whereas freeIndexForScan is used by the
429 // GC scanner. They are two fields so that the GC sees the object
430 // is allocated only when the object and the heap bits are
431 // initialized (see also the assignment of freeIndexForScan in
432 // mallocgc, and issue 54596).
433 freeIndexForScan uint16
435 // Cache of the allocBits at freeindex. allocCache is shifted
436 // such that the lowest bit corresponds to the bit freeindex.
437 // allocCache holds the complement of allocBits, thus allowing
438 // ctz (count trailing zero) to use it directly.
439 // allocCache may contain bits beyond s.nelems; the caller must ignore
443 // allocBits and gcmarkBits hold pointers to a span's mark and
444 // allocation bits. The pointers are 8 byte aligned.
445 // There are three arenas where this data is held.
446 // free: Dirty arenas that are no longer accessed
447 // and can be reused.
448 // next: Holds information to be used in the next GC cycle.
449 // current: Information being used during this GC cycle.
450 // previous: Information being used during the last GC cycle.
451 // A new GC cycle starts with the call to finishsweep_m.
452 // finishsweep_m moves the previous arena to the free arena,
453 // the current arena to the previous arena, and
454 // the next arena to the current arena.
455 // The next arena is populated as the spans request
456 // memory to hold gcmarkBits for the next GC cycle as well
457 // as allocBits for newly allocated spans.
459 // The pointer arithmetic is done "by hand" instead of using
460 // arrays to avoid bounds checks along critical performance
462 // The sweep will free the old allocBits and set allocBits to the
463 // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
467 pinnerBits *gcBits // bitmap for pinned objects; accessed atomically
470 // if sweepgen == h->sweepgen - 2, the span needs sweeping
471 // if sweepgen == h->sweepgen - 1, the span is currently being swept
472 // if sweepgen == h->sweepgen, the span is swept and ready to use
473 // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping
474 // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached
475 // h->sweepgen is incremented by 2 after every GC
478 divMul uint32 // for divide by elemsize
479 allocCount uint16 // number of allocated objects
480 spanclass spanClass // size class and noscan (uint8)
481 state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
482 needzero uint8 // needs to be zeroed before allocation
483 isUserArenaChunk bool // whether or not this span represents a user arena
484 allocCountBeforeCache uint16 // a copy of allocCount that is stored just before this span is cached
485 elemsize uintptr // computed from sizeclass or from npages
486 limit uintptr // end of data in span
487 speciallock mutex // guards specials list and changes to pinnerBits
488 specials *special // linked list of special records sorted by offset.
489 userArenaChunkFree addrRange // interval for managing chunk allocation
492 func (s *mspan) base() uintptr {
496 func (s *mspan) layout() (size, n, total uintptr) {
497 total = s.npages << _PageShift
505 // recordspan adds a newly allocated span to h.allspans.
507 // This only happens the first time a span is allocated from
508 // mheap.spanalloc (it is not called when a span is reused).
510 // Write barriers are disallowed here because it can be called from
511 // gcWork when allocating new workbufs. However, because it's an
512 // indirect call from the fixalloc initializer, the compiler can't see
515 // The heap lock must be held.
517 //go:nowritebarrierrec
518 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
522 assertLockHeld(&h.lock)
524 if len(h.allspans) >= cap(h.allspans) {
525 n := 64 * 1024 / goarch.PtrSize
526 if n < cap(h.allspans)*3/2 {
527 n = cap(h.allspans) * 3 / 2
530 sp := (*slice)(unsafe.Pointer(&new))
531 sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
533 throw("runtime: cannot allocate memory")
535 sp.len = len(h.allspans)
537 if len(h.allspans) > 0 {
538 copy(new, h.allspans)
540 oldAllspans := h.allspans
541 *(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new))
542 if len(oldAllspans) != 0 {
543 sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
546 h.allspans = h.allspans[:len(h.allspans)+1]
547 h.allspans[len(h.allspans)-1] = s
550 // A spanClass represents the size class and noscan-ness of a span.
552 // Each size class has a noscan spanClass and a scan spanClass. The
553 // noscan spanClass contains only noscan objects, which do not contain
554 // pointers and thus do not need to be scanned by the garbage
559 numSpanClasses = _NumSizeClasses << 1
560 tinySpanClass = spanClass(tinySizeClass<<1 | 1)
563 func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
564 return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
567 func (sc spanClass) sizeclass() int8 {
571 func (sc spanClass) noscan() bool {
575 // arenaIndex returns the index into mheap_.arenas of the arena
576 // containing metadata for p. This index combines of an index into the
577 // L1 map and an index into the L2 map and should be used as
578 // mheap_.arenas[ai.l1()][ai.l2()].
580 // If p is outside the range of valid heap addresses, either l1() or
581 // l2() will be out of bounds.
583 // It is nosplit because it's called by spanOf and several other
584 // nosplit functions.
587 func arenaIndex(p uintptr) arenaIdx {
588 return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)
591 // arenaBase returns the low address of the region covered by heap
593 func arenaBase(i arenaIdx) uintptr {
594 return uintptr(i)*heapArenaBytes + arenaBaseOffset
599 // l1 returns the "l1" portion of an arenaIdx.
601 // Marked nosplit because it's called by spanOf and other nosplit
605 func (i arenaIdx) l1() uint {
606 if arenaL1Bits == 0 {
607 // Let the compiler optimize this away if there's no
611 return uint(i) >> arenaL1Shift
615 // l2 returns the "l2" portion of an arenaIdx.
617 // Marked nosplit because it's called by spanOf and other nosplit funcs.
621 func (i arenaIdx) l2() uint {
622 if arenaL1Bits == 0 {
625 return uint(i) & (1<<arenaL2Bits - 1)
629 // inheap reports whether b is a pointer into a (potentially dead) heap object.
630 // It returns false for pointers into mSpanManual spans.
631 // Non-preemptible because it is used by write barriers.
635 func inheap(b uintptr) bool {
636 return spanOfHeap(b) != nil
639 // inHeapOrStack is a variant of inheap that returns true for pointers
640 // into any allocated heap span.
644 func inHeapOrStack(b uintptr) bool {
646 if s == nil || b < s.base() {
649 switch s.state.get() {
650 case mSpanInUse, mSpanManual:
657 // spanOf returns the span of p. If p does not point into the heap
658 // arena or no span has ever contained p, spanOf returns nil.
660 // If p does not point to allocated memory, this may return a non-nil
661 // span that does *not* contain p. If this is a possibility, the
662 // caller should either call spanOfHeap or check the span bounds
665 // Must be nosplit because it has callers that are nosplit.
668 func spanOf(p uintptr) *mspan {
669 // This function looks big, but we use a lot of constant
670 // folding around arenaL1Bits to get it under the inlining
671 // budget. Also, many of the checks here are safety checks
672 // that Go needs to do anyway, so the generated code is quite
675 if arenaL1Bits == 0 {
676 // If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
677 if ri.l2() >= uint(len(mheap_.arenas[0])) {
681 // If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
682 if ri.l1() >= uint(len(mheap_.arenas)) {
686 l2 := mheap_.arenas[ri.l1()]
687 if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
694 return ha.spans[(p/pageSize)%pagesPerArena]
697 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
698 // that p points into an allocated heap arena.
700 // Must be nosplit because it has callers that are nosplit.
703 func spanOfUnchecked(p uintptr) *mspan {
705 return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
708 // spanOfHeap is like spanOf, but returns nil if p does not point to a
711 // Must be nosplit because it has callers that are nosplit.
714 func spanOfHeap(p uintptr) *mspan {
716 // s is nil if it's never been allocated. Otherwise, we check
717 // its state first because we don't trust this pointer, so we
718 // have to synchronize with span initialization. Then, it's
719 // still possible we picked up a stale span pointer, so we
720 // have to check the span's bounds.
721 if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
727 // pageIndexOf returns the arena, page index, and page mask for pointer p.
728 // The caller must ensure p is in the heap.
729 func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) {
731 arena = mheap_.arenas[ai.l1()][ai.l2()]
732 pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
733 pageMask = byte(1 << ((p / pageSize) % 8))
737 // Initialize the heap.
738 func (h *mheap) init() {
739 lockInit(&h.lock, lockRankMheap)
740 lockInit(&h.speciallock, lockRankMheapSpecial)
742 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
743 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
744 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
745 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
746 h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
747 h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
748 h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
750 // Don't zero mspan allocations. Background sweeping can
751 // inspect a span concurrently with allocating it, so it's
752 // important that the span's sweepgen survive across freeing
753 // and re-allocating a span to prevent background sweeping
754 // from improperly cas'ing it from 0.
756 // This is safe because mspan contains no heap pointers.
757 h.spanalloc.zero = false
759 // h->mapcache needs no init
761 for i := range h.central {
762 h.central[i].mcentral.init(spanClass(i))
765 h.pages.init(&h.lock, &memstats.gcMiscSys, false)
768 // reclaim sweeps and reclaims at least npage pages into the heap.
769 // It is called before allocating npage pages to keep growth in check.
771 // reclaim implements the page-reclaimer half of the sweeper.
773 // h.lock must NOT be held.
774 func (h *mheap) reclaim(npage uintptr) {
775 // TODO(austin): Half of the time spent freeing spans is in
776 // locking/unlocking the heap (even with low contention). We
777 // could make the slow path here several times faster by
778 // batching heap frees.
780 // Bail early if there's no more reclaim work.
781 if h.reclaimIndex.Load() >= 1<<63 {
785 // Disable preemption so the GC can't start while we're
786 // sweeping, so we can read h.sweepArenas, and so
787 // traceGCSweepStart/Done pair on the P.
794 arenas := h.sweepArenas
797 // Pull from accumulated credit first.
798 if credit := h.reclaimCredit.Load(); credit > 0 {
801 // Take only what we need.
804 if h.reclaimCredit.CompareAndSwap(credit, credit-take) {
810 // Claim a chunk of work.
811 idx := uintptr(h.reclaimIndex.Add(pagesPerReclaimerChunk) - pagesPerReclaimerChunk)
812 if idx/pagesPerArena >= uintptr(len(arenas)) {
813 // Page reclaiming is done.
814 h.reclaimIndex.Store(1 << 63)
819 // Lock the heap for reclaimChunk.
825 nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk)
829 // Put spare pages toward global credit.
830 h.reclaimCredit.Add(nfound - npage)
844 // reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
845 // It returns the number of pages returned to the heap.
847 // h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
848 // temporarily unlocked and re-locked in order to do sweeping or if tracing is
850 func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
851 // The heap lock must be held because this accesses the
852 // heapArena.spans arrays using potentially non-live pointers.
853 // In particular, if a span were freed and merged concurrently
854 // with this probing heapArena.spans, it would be possible to
855 // observe arbitrary, stale span pointers.
856 assertLockHeld(&h.lock)
860 sl := sweep.active.begin()
865 ai := arenas[pageIdx/pagesPerArena]
866 ha := h.arenas[ai.l1()][ai.l2()]
868 // Get a chunk of the bitmap to work on.
869 arenaPage := uint(pageIdx % pagesPerArena)
870 inUse := ha.pageInUse[arenaPage/8:]
871 marked := ha.pageMarks[arenaPage/8:]
872 if uintptr(len(inUse)) > n/8 {
874 marked = marked[:n/8]
877 // Scan this bitmap chunk for spans that are in-use
878 // but have no marked objects on them.
879 for i := range inUse {
880 inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i]
881 if inUseUnmarked == 0 {
885 for j := uint(0); j < 8; j++ {
886 if inUseUnmarked&(1<<j) != 0 {
887 s := ha.spans[arenaPage+uint(i)*8+j]
888 if s, ok := sl.tryAcquire(s); ok {
895 // Reload inUse. It's possible nearby
896 // spans were freed when we dropped the
897 // lock and we don't want to get stale
898 // pointers from the spans array.
899 inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i]
906 pageIdx += uintptr(len(inUse) * 8)
907 n -= uintptr(len(inUse) * 8)
912 // Account for pages scanned but not reclaimed.
913 traceGCSweepSpan((n0 - nFreed) * pageSize)
917 assertLockHeld(&h.lock) // Must be locked on return.
921 // spanAllocType represents the type of allocation to make, or
922 // the type of allocation to be freed.
923 type spanAllocType uint8
926 spanAllocHeap spanAllocType = iota // heap span
927 spanAllocStack // stack span
928 spanAllocPtrScalarBits // unrolled GC prog bitmap span
929 spanAllocWorkBuf // work buf span
932 // manual returns true if the span allocation is manually managed.
933 func (s spanAllocType) manual() bool {
934 return s != spanAllocHeap
937 // alloc allocates a new span of npage pages from the GC'd heap.
939 // spanclass indicates the span's size class and scannability.
941 // Returns a span that has been fully initialized. span.needzero indicates
942 // whether the span has been zeroed. Note that it may not be.
943 func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
944 // Don't do any operations that lock the heap on the G stack.
945 // It might trigger stack growth, and the stack growth code needs
946 // to be able to allocate heap.
949 // To prevent excessive heap growth, before allocating n pages
950 // we need to sweep and reclaim at least n pages.
954 s = h.allocSpan(npages, spanAllocHeap, spanclass)
959 // allocManual allocates a manually-managed span of npage pages.
960 // allocManual returns nil if allocation fails.
962 // allocManual adds the bytes used to *stat, which should be a
963 // memstats in-use field. Unlike allocations in the GC'd heap, the
964 // allocation does *not* count toward heapInUse.
966 // The memory backing the returned span may not be zeroed if
967 // span.needzero is set.
969 // allocManual must be called on the system stack because it may
970 // acquire the heap lock via allocSpan. See mheap for details.
972 // If new code is written to call allocManual, do NOT use an
973 // existing spanAllocType value and instead declare a new one.
976 func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan {
978 throw("manual span allocation called with non-manually-managed type")
980 return h.allocSpan(npages, typ, 0)
983 // setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
985 func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
987 ai := arenaIndex(base)
988 ha := h.arenas[ai.l1()][ai.l2()]
989 for n := uintptr(0); n < npage; n++ {
990 i := (p + n) % pagesPerArena
992 ai = arenaIndex(base + n*pageSize)
993 ha = h.arenas[ai.l1()][ai.l2()]
999 // allocNeedsZero checks if the region of address space [base, base+npage*pageSize),
1000 // assumed to be allocated, needs to be zeroed, updating heap arena metadata for
1001 // future allocations.
1003 // This must be called each time pages are allocated from the heap, even if the page
1004 // allocator can otherwise prove the memory it's allocating is already zero because
1005 // they're fresh from the operating system. It updates heapArena metadata that is
1006 // critical for future page allocations.
1008 // There are no locking constraints on this method.
1009 func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
1011 ai := arenaIndex(base)
1012 ha := h.arenas[ai.l1()][ai.l2()]
1014 zeroedBase := atomic.Loaduintptr(&ha.zeroedBase)
1015 arenaBase := base % heapArenaBytes
1016 if arenaBase < zeroedBase {
1017 // We extended into the non-zeroed part of the
1018 // arena, so this region needs to be zeroed before use.
1020 // zeroedBase is monotonically increasing, so if we see this now then
1021 // we can be sure we need to zero this memory region.
1023 // We still need to update zeroedBase for this arena, and
1024 // potentially more arenas.
1027 // We may observe arenaBase > zeroedBase if we're racing with one or more
1028 // allocations which are acquiring memory directly before us in the address
1029 // space. But, because we know no one else is acquiring *this* memory, it's
1030 // still safe to not zero.
1032 // Compute how far into the arena we extend into, capped
1033 // at heapArenaBytes.
1034 arenaLimit := arenaBase + npage*pageSize
1035 if arenaLimit > heapArenaBytes {
1036 arenaLimit = heapArenaBytes
1038 // Increase ha.zeroedBase so it's >= arenaLimit.
1039 // We may be racing with other updates.
1040 for arenaLimit > zeroedBase {
1041 if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) {
1044 zeroedBase = atomic.Loaduintptr(&ha.zeroedBase)
1045 // Double check basic conditions of zeroedBase.
1046 if zeroedBase <= arenaLimit && zeroedBase > arenaBase {
1047 // The zeroedBase moved into the space we were trying to
1048 // claim. That's very bad, and indicates someone allocated
1049 // the same region we did.
1050 throw("potentially overlapping in-use allocations detected")
1054 // Move base forward and subtract from npage to move into
1055 // the next arena, or finish.
1056 base += arenaLimit - arenaBase
1057 npage -= (arenaLimit - arenaBase) / pageSize
1062 // tryAllocMSpan attempts to allocate an mspan object from
1063 // the P-local cache, but may fail.
1065 // h.lock need not be held.
1067 // This caller must ensure that its P won't change underneath
1068 // it during this function. Currently to ensure that we enforce
1069 // that the function is run on the system stack, because that's
1070 // the only place it is used now. In the future, this requirement
1071 // may be relaxed if its use is necessary elsewhere.
1074 func (h *mheap) tryAllocMSpan() *mspan {
1075 pp := getg().m.p.ptr()
1076 // If we don't have a p or the cache is empty, we can't do
1078 if pp == nil || pp.mspancache.len == 0 {
1081 // Pull off the last entry in the cache.
1082 s := pp.mspancache.buf[pp.mspancache.len-1]
1087 // allocMSpanLocked allocates an mspan object.
1089 // h.lock must be held.
1091 // allocMSpanLocked must be called on the system stack because
1092 // its caller holds the heap lock. See mheap for details.
1093 // Running on the system stack also ensures that we won't
1094 // switch Ps during this function. See tryAllocMSpan for details.
1097 func (h *mheap) allocMSpanLocked() *mspan {
1098 assertLockHeld(&h.lock)
1100 pp := getg().m.p.ptr()
1102 // We don't have a p so just do the normal thing.
1103 return (*mspan)(h.spanalloc.alloc())
1105 // Refill the cache if necessary.
1106 if pp.mspancache.len == 0 {
1107 const refillCount = len(pp.mspancache.buf) / 2
1108 for i := 0; i < refillCount; i++ {
1109 pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc())
1111 pp.mspancache.len = refillCount
1113 // Pull off the last entry in the cache.
1114 s := pp.mspancache.buf[pp.mspancache.len-1]
1119 // freeMSpanLocked free an mspan object.
1121 // h.lock must be held.
1123 // freeMSpanLocked must be called on the system stack because
1124 // its caller holds the heap lock. See mheap for details.
1125 // Running on the system stack also ensures that we won't
1126 // switch Ps during this function. See tryAllocMSpan for details.
1129 func (h *mheap) freeMSpanLocked(s *mspan) {
1130 assertLockHeld(&h.lock)
1132 pp := getg().m.p.ptr()
1133 // First try to free the mspan directly to the cache.
1134 if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
1135 pp.mspancache.buf[pp.mspancache.len] = s
1139 // Failing that (or if we don't have a p), just free it to
1141 h.spanalloc.free(unsafe.Pointer(s))
1144 // allocSpan allocates an mspan which owns npages worth of memory.
1146 // If typ.manual() == false, allocSpan allocates a heap span of class spanclass
1147 // and updates heap accounting. If manual == true, allocSpan allocates a
1148 // manually-managed span (spanclass is ignored), and the caller is
1149 // responsible for any accounting related to its use of the span. Either
1150 // way, allocSpan will atomically add the bytes in the newly allocated
1151 // span to *sysStat.
1153 // The returned span is fully initialized.
1155 // h.lock must not be held.
1157 // allocSpan must be called on the system stack both because it acquires
1158 // the heap lock and because it must block GC transitions.
1161 func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
1162 // Function-global state.
1164 base, scav := uintptr(0), uintptr(0)
1165 growth := uintptr(0)
1167 // On some platforms we need to provide physical page aligned stack
1168 // allocations. Where the page size is less than the physical page
1169 // size, we already manage to do this by default.
1170 needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
1172 // If the allocation is small enough, try the page cache!
1173 // The page cache does not support aligned allocations, so we cannot use
1174 // it if we need to provide a physical page aligned stack allocation.
1176 if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 {
1179 // If the cache is empty, refill it.
1182 *c = h.pages.allocToCache()
1186 // Try to allocate from the cache.
1187 base, scav = c.alloc(npages)
1189 s = h.tryAllocMSpan()
1193 // We have a base but no mspan, so we need
1194 // to lock the heap.
1198 // For one reason or another, we couldn't get the
1199 // whole job done without the heap lock.
1202 if needPhysPageAlign {
1203 // Overallocate by a physical page to allow for later alignment.
1204 extraPages := physPageSize / pageSize
1206 // Find a big enough region first, but then only allocate the
1207 // aligned portion. We can't just allocate and then free the
1208 // edges because we need to account for scavenged memory, and
1209 // that's difficult with alloc.
1211 // Note that we skip updates to searchAddr here. It's OK if
1212 // it's stale and higher than normal; it'll operate correctly,
1213 // just come with a performance cost.
1214 base, _ = h.pages.find(npages + extraPages)
1217 growth, ok = h.grow(npages + extraPages)
1222 base, _ = h.pages.find(npages + extraPages)
1224 throw("grew heap, but no adequate free space found")
1227 base = alignUp(base, physPageSize)
1228 scav = h.pages.allocRange(base, npages)
1232 // Try to acquire a base address.
1233 base, scav = h.pages.alloc(npages)
1236 growth, ok = h.grow(npages)
1241 base, scav = h.pages.alloc(npages)
1243 throw("grew heap, but no adequate free space found")
1248 // We failed to get an mspan earlier, so grab
1249 // one now that we have the heap lock.
1250 s = h.allocMSpanLocked()
1255 // Decide if we need to scavenge in response to what we just allocated.
1256 // Specifically, we track the maximum amount of memory to scavenge of all
1257 // the alternatives below, assuming that the maximum satisfies *all*
1258 // conditions we check (e.g. if we need to scavenge X to satisfy the
1259 // memory limit and Y to satisfy heap-growth scavenging, and Y > X, then
1260 // it's fine to pick Y, because the memory limit is still satisfied).
1262 // It's fine to do this after allocating because we expect any scavenged
1263 // pages not to get touched until we return. Simultaneously, it's important
1264 // to do this before calling sysUsed because that may commit address space.
1265 bytesToScavenge := uintptr(0)
1266 forceScavenge := false
1267 if limit := gcController.memoryLimit.Load(); !gcCPULimiter.limiting() {
1268 // Assist with scavenging to maintain the memory limit by the amount
1269 // that we expect to page in.
1270 inuse := gcController.mappedReady.Load()
1271 // Be careful about overflow, especially with uintptrs. Even on 32-bit platforms
1272 // someone can set a really big memory limit that isn't maxInt64.
1273 if uint64(scav)+inuse > uint64(limit) {
1274 bytesToScavenge = uintptr(uint64(scav) + inuse - uint64(limit))
1275 forceScavenge = true
1278 if goal := scavenge.gcPercentGoal.Load(); goal != ^uint64(0) && growth > 0 {
1279 // We just caused a heap growth, so scavenge down what will soon be used.
1280 // By scavenging inline we deal with the failure to allocate out of
1281 // memory fragments by scavenging the memory fragments that are least
1282 // likely to be re-used.
1284 // Only bother with this because we're not using a memory limit. We don't
1285 // care about heap growths as long as we're under the memory limit, and the
1286 // previous check for scaving already handles that.
1287 if retained := heapRetained(); retained+uint64(growth) > goal {
1288 // The scavenging algorithm requires the heap lock to be dropped so it
1289 // can acquire it only sparingly. This is a potentially expensive operation
1290 // so it frees up other goroutines to allocate in the meanwhile. In fact,
1291 // they can make use of the growth we just created.
1293 if overage := uintptr(retained + uint64(growth) - goal); todo > overage {
1296 if todo > bytesToScavenge {
1297 bytesToScavenge = todo
1301 // There are a few very limited circumstances where we won't have a P here.
1302 // It's OK to simply skip scavenging in these cases. Something else will notice
1303 // and pick up the tab.
1305 if pp != nil && bytesToScavenge > 0 {
1306 // Measure how long we spent scavenging and add that measurement to the assist
1307 // time so we can track it for the GC CPU limiter.
1309 // Limiter event tracking might be disabled if we end up here
1310 // while on a mark worker.
1312 track := pp.limiterEvent.start(limiterEventScavengeAssist, start)
1314 // Scavenge, but back out if the limiter turns on.
1315 released := h.pages.scavenge(bytesToScavenge, func() bool {
1316 return gcCPULimiter.limiting()
1319 mheap_.pages.scav.releasedEager.Add(released)
1321 // Finish up accounting.
1324 pp.limiterEvent.stop(limiterEventScavengeAssist, now)
1326 scavenge.assistTime.Add(now - start)
1329 // Initialize the span.
1330 h.initSpan(s, typ, spanclass, base, npages)
1332 // Commit and account for any scavenged memory that the span now owns.
1333 nbytes := npages * pageSize
1335 // sysUsed all the pages that are actually available
1336 // in the span since some of them might be scavenged.
1337 sysUsed(unsafe.Pointer(base), nbytes, scav)
1338 gcController.heapReleased.add(-int64(scav))
1341 gcController.heapFree.add(-int64(nbytes - scav))
1342 if typ == spanAllocHeap {
1343 gcController.heapInUse.add(int64(nbytes))
1345 // Update consistent stats.
1346 stats := memstats.heapStats.acquire()
1347 atomic.Xaddint64(&stats.committed, int64(scav))
1348 atomic.Xaddint64(&stats.released, -int64(scav))
1351 atomic.Xaddint64(&stats.inHeap, int64(nbytes))
1352 case spanAllocStack:
1353 atomic.Xaddint64(&stats.inStacks, int64(nbytes))
1354 case spanAllocPtrScalarBits:
1355 atomic.Xaddint64(&stats.inPtrScalarBits, int64(nbytes))
1356 case spanAllocWorkBuf:
1357 atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
1359 memstats.heapStats.release()
1361 pageTraceAlloc(pp, now, base, npages)
1365 // initSpan initializes a blank span s which will represent the range
1366 // [base, base+npages*pageSize). typ is the type of span being allocated.
1367 func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) {
1368 // At this point, both s != nil and base != 0, and the heap
1369 // lock is no longer held. Initialize the span.
1370 s.init(base, npages)
1371 if h.allocNeedsZero(base, npages) {
1374 nbytes := npages * pageSize
1376 s.manualFreeList = 0
1378 s.limit = s.base() + s.npages*pageSize
1379 s.state.set(mSpanManual)
1381 // We must set span properties before the span is published anywhere
1382 // since we're not holding the heap lock.
1383 s.spanclass = spanclass
1384 if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
1389 s.elemsize = uintptr(class_to_size[sizeclass])
1390 s.nelems = uint16(nbytes / s.elemsize)
1391 s.divMul = class_to_divmagic[sizeclass]
1394 // Initialize mark and allocation structures.
1396 s.freeIndexForScan = 0
1397 s.allocCache = ^uint64(0) // all 1s indicating all free.
1398 s.gcmarkBits = newMarkBits(uintptr(s.nelems))
1399 s.allocBits = newAllocBits(uintptr(s.nelems))
1401 // It's safe to access h.sweepgen without the heap lock because it's
1402 // only ever updated with the world stopped and we run on the
1403 // systemstack which blocks a STW transition.
1404 atomic.Store(&s.sweepgen, h.sweepgen)
1406 // Now that the span is filled in, set its state. This
1407 // is a publication barrier for the other fields in
1408 // the span. While valid pointers into this span
1409 // should never be visible until the span is returned,
1410 // if the garbage collector finds an invalid pointer,
1411 // access to the span may race with initialization of
1412 // the span. We resolve this race by atomically
1413 // setting the state after the span is fully
1414 // initialized, and atomically checking the state in
1415 // any situation where a pointer is suspect.
1416 s.state.set(mSpanInUse)
1419 // Publish the span in various locations.
1421 // This is safe to call without the lock held because the slots
1422 // related to this span will only ever be read or modified by
1423 // this thread until pointers into the span are published (and
1424 // we execute a publication barrier at the end of this function
1425 // before that happens) or pageInUse is updated.
1426 h.setSpans(s.base(), npages, s)
1429 // Mark in-use span in arena page bitmap.
1431 // This publishes the span to the page sweeper, so
1432 // it's imperative that the span be completely initialized
1433 // prior to this line.
1434 arena, pageIdx, pageMask := pageIndexOf(s.base())
1435 atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
1437 // Update related page sweeper stats.
1438 h.pagesInUse.Add(npages)
1441 // Make sure the newly allocated span will be observed
1442 // by the GC before pointers into the span are published.
1443 publicationBarrier()
1446 // Try to add at least npage pages of memory to the heap,
1447 // returning how much the heap grew by and whether it worked.
1449 // h.lock must be held.
1450 func (h *mheap) grow(npage uintptr) (uintptr, bool) {
1451 assertLockHeld(&h.lock)
1453 // We must grow the heap in whole palloc chunks.
1454 // We call sysMap below but note that because we
1455 // round up to pallocChunkPages which is on the order
1456 // of MiB (generally >= to the huge page size) we
1457 // won't be calling it too much.
1458 ask := alignUp(npage, pallocChunkPages) * pageSize
1460 totalGrowth := uintptr(0)
1461 // This may overflow because ask could be very large
1462 // and is otherwise unrelated to h.curArena.base.
1463 end := h.curArena.base + ask
1464 nBase := alignUp(end, physPageSize)
1465 if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
1466 // Not enough room in the current arena. Allocate more
1467 // arena space. This may not be contiguous with the
1468 // current arena, so we have to request the full ask.
1469 av, asize := h.sysAlloc(ask, &h.arenaHints, true)
1471 inUse := gcController.heapFree.load() + gcController.heapReleased.load() + gcController.heapInUse.load()
1472 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", inUse, " in use)\n")
1476 if uintptr(av) == h.curArena.end {
1477 // The new space is contiguous with the old
1478 // space, so just extend the current space.
1479 h.curArena.end = uintptr(av) + asize
1481 // The new space is discontiguous. Track what
1482 // remains of the current space and switch to
1483 // the new space. This should be rare.
1484 if size := h.curArena.end - h.curArena.base; size != 0 {
1485 // Transition this space from Reserved to Prepared and mark it
1486 // as released since we'll be able to start using it after updating
1487 // the page allocator and releasing the lock at any time.
1488 sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased)
1490 stats := memstats.heapStats.acquire()
1491 atomic.Xaddint64(&stats.released, int64(size))
1492 memstats.heapStats.release()
1493 // Update the page allocator's structures to make this
1494 // space ready for allocation.
1495 h.pages.grow(h.curArena.base, size)
1498 // Switch to the new space.
1499 h.curArena.base = uintptr(av)
1500 h.curArena.end = uintptr(av) + asize
1503 // Recalculate nBase.
1504 // We know this won't overflow, because sysAlloc returned
1505 // a valid region starting at h.curArena.base which is at
1506 // least ask bytes in size.
1507 nBase = alignUp(h.curArena.base+ask, physPageSize)
1510 // Grow into the current arena.
1511 v := h.curArena.base
1512 h.curArena.base = nBase
1514 // Transition the space we're going to use from Reserved to Prepared.
1516 // The allocation is always aligned to the heap arena
1517 // size which is always > physPageSize, so its safe to
1518 // just add directly to heapReleased.
1519 sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased)
1521 // The memory just allocated counts as both released
1522 // and idle, even though it's not yet backed by spans.
1523 stats := memstats.heapStats.acquire()
1524 atomic.Xaddint64(&stats.released, int64(nBase-v))
1525 memstats.heapStats.release()
1527 // Update the page allocator's structures to make this
1528 // space ready for allocation.
1529 h.pages.grow(v, nBase-v)
1530 totalGrowth += nBase - v
1531 return totalGrowth, true
1534 // Free the span back into the heap.
1535 func (h *mheap) freeSpan(s *mspan) {
1536 systemstack(func() {
1537 pageTraceFree(getg().m.p.ptr(), 0, s.base(), s.npages)
1541 // Tell msan that this entire span is no longer in use.
1542 base := unsafe.Pointer(s.base())
1543 bytes := s.npages << _PageShift
1544 msanfree(base, bytes)
1547 // Tell asan that this entire span is no longer in use.
1548 base := unsafe.Pointer(s.base())
1549 bytes := s.npages << _PageShift
1550 asanpoison(base, bytes)
1552 h.freeSpanLocked(s, spanAllocHeap)
1557 // freeManual frees a manually-managed span returned by allocManual.
1558 // typ must be the same as the spanAllocType passed to the allocManual that
1561 // This must only be called when gcphase == _GCoff. See mSpanState for
1564 // freeManual must be called on the system stack because it acquires
1565 // the heap lock. See mheap for details.
1568 func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
1569 pageTraceFree(getg().m.p.ptr(), 0, s.base(), s.npages)
1573 h.freeSpanLocked(s, typ)
1577 func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
1578 assertLockHeld(&h.lock)
1580 switch s.state.get() {
1582 if s.allocCount != 0 {
1583 throw("mheap.freeSpanLocked - invalid stack free")
1586 if s.isUserArenaChunk {
1587 throw("mheap.freeSpanLocked - invalid free of user arena chunk")
1589 if s.allocCount != 0 || s.sweepgen != h.sweepgen {
1590 print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
1591 throw("mheap.freeSpanLocked - invalid free")
1593 h.pagesInUse.Add(-s.npages)
1595 // Clear in-use bit in arena page bitmap.
1596 arena, pageIdx, pageMask := pageIndexOf(s.base())
1597 atomic.And8(&arena.pageInUse[pageIdx], ^pageMask)
1599 throw("mheap.freeSpanLocked - invalid span state")
1604 // Mirrors the code in allocSpan.
1605 nbytes := s.npages * pageSize
1606 gcController.heapFree.add(int64(nbytes))
1607 if typ == spanAllocHeap {
1608 gcController.heapInUse.add(-int64(nbytes))
1610 // Update consistent stats.
1611 stats := memstats.heapStats.acquire()
1614 atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
1615 case spanAllocStack:
1616 atomic.Xaddint64(&stats.inStacks, -int64(nbytes))
1617 case spanAllocPtrScalarBits:
1618 atomic.Xaddint64(&stats.inPtrScalarBits, -int64(nbytes))
1619 case spanAllocWorkBuf:
1620 atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
1622 memstats.heapStats.release()
1624 // Mark the space as free.
1625 h.pages.free(s.base(), s.npages)
1627 // Free the span structure. We no longer have a use for it.
1628 s.state.set(mSpanDead)
1629 h.freeMSpanLocked(s)
1632 // scavengeAll acquires the heap lock (blocking any additional
1633 // manipulation of the page allocator) and iterates over the whole
1634 // heap, scavenging every free page available.
1636 // Must run on the system stack because it acquires the heap lock.
1639 func (h *mheap) scavengeAll() {
1640 // Disallow malloc or panic while holding the heap lock. We do
1641 // this here because this is a non-mallocgc entry-point to
1646 // Force scavenge everything.
1647 released := h.pages.scavenge(^uintptr(0), nil, true)
1651 if debug.scavtrace > 0 {
1652 printScavTrace(0, released, true)
1656 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
1657 func runtime_debug_freeOSMemory() {
1659 systemstack(func() { mheap_.scavengeAll() })
1662 // Initialize a new span with the given start and npages.
1663 func (span *mspan) init(base uintptr, npages uintptr) {
1664 // span is *not* zeroed.
1668 span.startAddr = base
1669 span.npages = npages
1673 span.speciallock.key = 0
1677 span.freeIndexForScan = 0
1678 span.allocBits = nil
1679 span.gcmarkBits = nil
1680 span.pinnerBits = nil
1681 span.state.set(mSpanDead)
1682 lockInit(&span.speciallock, lockRankMspanSpecial)
1685 func (span *mspan) inList() bool {
1686 return span.list != nil
1689 // Initialize an empty doubly-linked list.
1690 func (list *mSpanList) init() {
1695 func (list *mSpanList) remove(span *mspan) {
1696 if span.list != list {
1697 print("runtime: failed mSpanList.remove span.npages=", span.npages,
1698 " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
1699 throw("mSpanList.remove")
1701 if list.first == span {
1702 list.first = span.next
1704 span.prev.next = span.next
1706 if list.last == span {
1707 list.last = span.prev
1709 span.next.prev = span.prev
1716 func (list *mSpanList) isEmpty() bool {
1717 return list.first == nil
1720 func (list *mSpanList) insert(span *mspan) {
1721 if span.next != nil || span.prev != nil || span.list != nil {
1722 println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list)
1723 throw("mSpanList.insert")
1725 span.next = list.first
1726 if list.first != nil {
1727 // The list contains at least one span; link it in.
1728 // The last span in the list doesn't change.
1729 list.first.prev = span
1731 // The list contains no spans, so this is also the last span.
1738 func (list *mSpanList) insertBack(span *mspan) {
1739 if span.next != nil || span.prev != nil || span.list != nil {
1740 println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list)
1741 throw("mSpanList.insertBack")
1743 span.prev = list.last
1744 if list.last != nil {
1745 // The list contains at least one span.
1746 list.last.next = span
1748 // The list contains no spans, so this is also the first span.
1755 // takeAll removes all spans from other and inserts them at the front
1757 func (list *mSpanList) takeAll(other *mSpanList) {
1758 if other.isEmpty() {
1762 // Reparent everything in other to list.
1763 for s := other.first; s != nil; s = s.next {
1767 // Concatenate the lists.
1771 // Neither list is empty. Put other before list.
1772 other.last.next = list.first
1773 list.first.prev = other.last
1774 list.first = other.first
1777 other.first, other.last = nil, nil
1781 _KindSpecialFinalizer = 1
1782 _KindSpecialProfile = 2
1783 // _KindSpecialReachable is a special used for tracking
1784 // reachability during testing.
1785 _KindSpecialReachable = 3
1786 // _KindSpecialPinCounter is a special used for objects that are pinned
1788 _KindSpecialPinCounter = 4
1789 // Note: The finalizer special must be first because if we're freeing
1790 // an object, a finalizer special will cause the freeing operation
1791 // to abort, and we want to keep the other special records around
1795 type special struct {
1797 next *special // linked list in span
1798 offset uint16 // span offset of object
1799 kind byte // kind of special
1802 // spanHasSpecials marks a span as having specials in the arena bitmap.
1803 func spanHasSpecials(s *mspan) {
1804 arenaPage := (s.base() / pageSize) % pagesPerArena
1805 ai := arenaIndex(s.base())
1806 ha := mheap_.arenas[ai.l1()][ai.l2()]
1807 atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8))
1810 // spanHasNoSpecials marks a span as having no specials in the arena bitmap.
1811 func spanHasNoSpecials(s *mspan) {
1812 arenaPage := (s.base() / pageSize) % pagesPerArena
1813 ai := arenaIndex(s.base())
1814 ha := mheap_.arenas[ai.l1()][ai.l2()]
1815 atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8)))
1818 // Adds the special record s to the list of special records for
1819 // the object p. All fields of s should be filled in except for
1820 // offset & next, which this routine will fill in.
1821 // Returns true if the special was successfully added, false otherwise.
1822 // (The add will fail only if a record with the same p and s->kind
1824 func addspecial(p unsafe.Pointer, s *special) bool {
1825 span := spanOfHeap(uintptr(p))
1827 throw("addspecial on invalid pointer")
1830 // Ensure that the span is swept.
1831 // Sweeping accesses the specials list w/o locks, so we have
1832 // to synchronize with it. And it's just much safer.
1836 offset := uintptr(p) - span.base()
1839 lock(&span.speciallock)
1841 // Find splice point, check for existing record.
1842 iter, exists := span.specialFindSplicePoint(offset, kind)
1844 // Splice in record, fill in offset.
1845 s.offset = uint16(offset)
1848 spanHasSpecials(span)
1851 unlock(&span.speciallock)
1853 return !exists // already exists
1856 // Removes the Special record of the given kind for the object p.
1857 // Returns the record if the record existed, nil otherwise.
1858 // The caller must FixAlloc_Free the result.
1859 func removespecial(p unsafe.Pointer, kind uint8) *special {
1860 span := spanOfHeap(uintptr(p))
1862 throw("removespecial on invalid pointer")
1865 // Ensure that the span is swept.
1866 // Sweeping accesses the specials list w/o locks, so we have
1867 // to synchronize with it. And it's just much safer.
1871 offset := uintptr(p) - span.base()
1874 lock(&span.speciallock)
1876 iter, exists := span.specialFindSplicePoint(offset, kind)
1882 if span.specials == nil {
1883 spanHasNoSpecials(span)
1885 unlock(&span.speciallock)
1890 // Find a splice point in the sorted list and check for an already existing
1891 // record. Returns a pointer to the next-reference in the list predecessor.
1892 // Returns true, if the referenced item is an exact match.
1893 func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool) {
1894 // Find splice point, check for existing record.
1895 iter := &span.specials
1902 if offset == uintptr(s.offset) && kind == s.kind {
1906 if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) {
1914 // The described object has a finalizer set for it.
1916 // specialfinalizer is allocated from non-GC'd memory, so any heap
1917 // pointers must be specially handled.
1918 type specialfinalizer struct {
1921 fn *funcval // May be a heap pointer.
1923 fint *_type // May be a heap pointer, but always live.
1924 ot *ptrtype // May be a heap pointer, but always live.
1927 // Adds a finalizer to the object p. Returns true if it succeeded.
1928 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
1929 lock(&mheap_.speciallock)
1930 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
1931 unlock(&mheap_.speciallock)
1932 s.special.kind = _KindSpecialFinalizer
1937 if addspecial(p, &s.special) {
1938 // This is responsible for maintaining the same
1939 // GC-related invariants as markrootSpans in any
1940 // situation where it's possible that markrootSpans
1941 // has already run but mark termination hasn't yet.
1942 if gcphase != _GCoff {
1943 base, span, _ := findObject(uintptr(p), 0, 0)
1945 gcw := &mp.p.ptr().gcw
1946 // Mark everything reachable from the object
1947 // so it's retained for the finalizer.
1948 if !span.spanclass.noscan() {
1949 scanobject(base, gcw)
1951 // Mark the finalizer itself, since the
1952 // special isn't part of the GC'd heap.
1953 scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
1959 // There was an old finalizer
1960 lock(&mheap_.speciallock)
1961 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1962 unlock(&mheap_.speciallock)
1966 // Removes the finalizer (if any) from the object p.
1967 func removefinalizer(p unsafe.Pointer) {
1968 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
1970 return // there wasn't a finalizer to remove
1972 lock(&mheap_.speciallock)
1973 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1974 unlock(&mheap_.speciallock)
1977 // The described object is being heap profiled.
1978 type specialprofile struct {
1984 // Set the heap profile bucket associated with addr to b.
1985 func setprofilebucket(p unsafe.Pointer, b *bucket) {
1986 lock(&mheap_.speciallock)
1987 s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
1988 unlock(&mheap_.speciallock)
1989 s.special.kind = _KindSpecialProfile
1991 if !addspecial(p, &s.special) {
1992 throw("setprofilebucket: profile already set")
1996 // specialReachable tracks whether an object is reachable on the next
1997 // GC cycle. This is used by testing.
1998 type specialReachable struct {
2004 // specialPinCounter tracks whether an object is pinned multiple times.
2005 type specialPinCounter struct {
2010 // specialsIter helps iterate over specials lists.
2011 type specialsIter struct {
2016 func newSpecialsIter(span *mspan) specialsIter {
2017 return specialsIter{&span.specials, span.specials}
2020 func (i *specialsIter) valid() bool {
2024 func (i *specialsIter) next() {
2029 // unlinkAndNext removes the current special from the list and moves
2030 // the iterator to the next special. It returns the unlinked special.
2031 func (i *specialsIter) unlinkAndNext() *special {
2038 // freeSpecial performs any cleanup on special s and deallocates it.
2039 // s must already be unlinked from the specials list.
2040 func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
2042 case _KindSpecialFinalizer:
2043 sf := (*specialfinalizer)(unsafe.Pointer(s))
2044 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
2045 lock(&mheap_.speciallock)
2046 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
2047 unlock(&mheap_.speciallock)
2048 case _KindSpecialProfile:
2049 sp := (*specialprofile)(unsafe.Pointer(s))
2050 mProf_Free(sp.b, size)
2051 lock(&mheap_.speciallock)
2052 mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
2053 unlock(&mheap_.speciallock)
2054 case _KindSpecialReachable:
2055 sp := (*specialReachable)(unsafe.Pointer(s))
2057 // The creator frees these.
2058 case _KindSpecialPinCounter:
2059 lock(&mheap_.speciallock)
2060 mheap_.specialPinCounterAlloc.free(unsafe.Pointer(s))
2061 unlock(&mheap_.speciallock)
2063 throw("bad special kind")
2064 panic("not reached")
2068 // gcBits is an alloc/mark bitmap. This is always used as gcBits.x.
2069 type gcBits struct {
2074 // bytep returns a pointer to the n'th byte of b.
2075 func (b *gcBits) bytep(n uintptr) *uint8 {
2076 return addb(&b.x, n)
2079 // bitp returns a pointer to the byte containing bit n and a mask for
2080 // selecting that bit from *bytep.
2081 func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
2082 return b.bytep(n / 8), 1 << (n % 8)
2085 const gcBitsChunkBytes = uintptr(64 << 10)
2086 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
2088 type gcBitsHeader struct {
2089 free uintptr // free is the index into bits of the next free byte.
2090 next uintptr // *gcBits triggers recursive type bug. (issue 14620)
2093 type gcBitsArena struct {
2095 // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
2096 free uintptr // free is the index into bits of the next free byte; read/write atomically
2098 bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
2101 var gcBitsArenas struct {
2104 next *gcBitsArena // Read atomically. Write atomically under lock.
2105 current *gcBitsArena
2106 previous *gcBitsArena
2109 // tryAlloc allocates from b or returns nil if b does not have enough room.
2110 // This is safe to call concurrently.
2111 func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
2112 if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
2115 // Try to allocate from this block.
2116 end := atomic.Xadduintptr(&b.free, bytes)
2117 if end > uintptr(len(b.bits)) {
2120 // There was enough room.
2121 start := end - bytes
2122 return &b.bits[start]
2125 // newMarkBits returns a pointer to 8 byte aligned bytes
2126 // to be used for a span's mark bits.
2127 func newMarkBits(nelems uintptr) *gcBits {
2128 blocksNeeded := (nelems + 63) / 64
2129 bytesNeeded := blocksNeeded * 8
2131 // Try directly allocating from the current head arena.
2132 head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
2133 if p := head.tryAlloc(bytesNeeded); p != nil {
2137 // There's not enough room in the head arena. We may need to
2138 // allocate a new arena.
2139 lock(&gcBitsArenas.lock)
2140 // Try the head arena again, since it may have changed. Now
2141 // that we hold the lock, the list head can't change, but its
2142 // free position still can.
2143 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
2144 unlock(&gcBitsArenas.lock)
2148 // Allocate a new arena. This may temporarily drop the lock.
2149 fresh := newArenaMayUnlock()
2150 // If newArenaMayUnlock dropped the lock, another thread may
2151 // have put a fresh arena on the "next" list. Try allocating
2153 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
2154 // Put fresh back on the free list.
2155 // TODO: Mark it "already zeroed"
2156 fresh.next = gcBitsArenas.free
2157 gcBitsArenas.free = fresh
2158 unlock(&gcBitsArenas.lock)
2162 // Allocate from the fresh arena. We haven't linked it in yet, so
2163 // this cannot race and is guaranteed to succeed.
2164 p := fresh.tryAlloc(bytesNeeded)
2166 throw("markBits overflow")
2169 // Add the fresh arena to the "next" list.
2170 fresh.next = gcBitsArenas.next
2171 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
2173 unlock(&gcBitsArenas.lock)
2177 // newAllocBits returns a pointer to 8 byte aligned bytes
2178 // to be used for this span's alloc bits.
2179 // newAllocBits is used to provide newly initialized spans
2180 // allocation bits. For spans not being initialized the
2181 // mark bits are repurposed as allocation bits when
2182 // the span is swept.
2183 func newAllocBits(nelems uintptr) *gcBits {
2184 return newMarkBits(nelems)
2187 // nextMarkBitArenaEpoch establishes a new epoch for the arenas
2188 // holding the mark bits. The arenas are named relative to the
2189 // current GC cycle which is demarcated by the call to finishweep_m.
2191 // All current spans have been swept.
2192 // During that sweep each span allocated room for its gcmarkBits in
2193 // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
2194 // where the GC will mark objects and after each span is swept these bits
2195 // will be used to allocate objects.
2196 // gcBitsArenas.current becomes gcBitsArenas.previous where the span's
2197 // gcAllocBits live until all the spans have been swept during this GC cycle.
2198 // The span's sweep extinguishes all the references to gcBitsArenas.previous
2199 // by pointing gcAllocBits into the gcBitsArenas.current.
2200 // The gcBitsArenas.previous is released to the gcBitsArenas.free list.
2201 func nextMarkBitArenaEpoch() {
2202 lock(&gcBitsArenas.lock)
2203 if gcBitsArenas.previous != nil {
2204 if gcBitsArenas.free == nil {
2205 gcBitsArenas.free = gcBitsArenas.previous
2207 // Find end of previous arenas.
2208 last := gcBitsArenas.previous
2209 for last = gcBitsArenas.previous; last.next != nil; last = last.next {
2211 last.next = gcBitsArenas.free
2212 gcBitsArenas.free = gcBitsArenas.previous
2215 gcBitsArenas.previous = gcBitsArenas.current
2216 gcBitsArenas.current = gcBitsArenas.next
2217 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
2218 unlock(&gcBitsArenas.lock)
2221 // newArenaMayUnlock allocates and zeroes a gcBits arena.
2222 // The caller must hold gcBitsArena.lock. This may temporarily release it.
2223 func newArenaMayUnlock() *gcBitsArena {
2224 var result *gcBitsArena
2225 if gcBitsArenas.free == nil {
2226 unlock(&gcBitsArenas.lock)
2227 result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
2229 throw("runtime: cannot allocate memory")
2231 lock(&gcBitsArenas.lock)
2233 result = gcBitsArenas.free
2234 gcBitsArenas.free = gcBitsArenas.free.next
2235 memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
2238 // If result.bits is not 8 byte aligned adjust index so
2239 // that &result.bits[result.free] is 8 byte aligned.
2240 if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 {
2243 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)