1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.go for overview.
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
18 // The heap itself is the "free[]" and "large" arrays,
19 // but all the other global data is here too.
22 free [_MaxMHeapList]mSpanList // free lists of given length
23 freelarge mSpanList // free lists length >= _MaxMHeapList
24 busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length
25 busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList
26 allspans **mspan // all spans out there
27 gcspans **mspan // copy of allspans referenced by gc marker or sweeper
29 sweepgen uint32 // sweep generation, see comment in mspan
30 sweepdone uint32 // all spans are swept
36 pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
37 spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically
38 pagesSwept uint64 // pages swept this cycle; updated atomically
39 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
40 // TODO(austin): pagesInUse should be a uintptr, but the 386
41 // compiler can't 8-byte align fields.
44 largefree uint64 // bytes freed for large objects (>maxsmallsize)
45 nlargefree uint64 // number of frees for large objects (>maxsmallsize)
46 nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
48 // range of addresses we might see in the heap
52 arena_used uintptr // always mHeap_Map{Bits,Spans} before updating
56 // central free lists for small size classes.
57 // the padding makes sure that the MCentrals are
58 // spaced CacheLineSize bytes apart, so that each MCentral.lock
59 // gets its own cache line.
60 central [_NumSizeClasses]struct {
62 pad [sys.CacheLineSize]byte
65 spanalloc fixalloc // allocator for span*
66 cachealloc fixalloc // allocator for mcache*
67 specialfinalizeralloc fixalloc // allocator for specialfinalizer*
68 specialprofilealloc fixalloc // allocator for specialprofile*
69 speciallock mutex // lock for special record allocators.
74 // An MSpan is a run of pages.
76 // When a MSpan is in the heap free list, state == MSpanFree
77 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
79 // When a MSpan is allocated, state == MSpanInUse or MSpanStack
80 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
82 // Every MSpan is in one doubly-linked list,
83 // either one of the MHeap's free lists or one of the
84 // MCentral's span lists.
86 // An MSpan representing actual memory has state _MSpanInUse,
87 // _MSpanStack, or _MSpanFree. Transitions between these states are
88 // constrained as follows:
90 // * A span may transition from free to in-use or stack during any GC
93 // * During sweeping (gcphase == _GCoff), a span may transition from
94 // in-use to free (as a result of sweeping) or stack to free (as a
95 // result of stacks being freed).
97 // * During GC (gcphase != _GCoff), a span *must not* transition from
98 // stack or in-use to free. Because concurrent GC may read a pointer
99 // and then look up its span, the span state must be monotonic.
101 _MSpanInUse = iota // allocated for garbage collected heap
102 _MSpanStack // allocated for use by stack allocator
107 // mSpanList heads a linked list of spans.
109 // Linked list structure is based on BSD's "tail queue" data structure.
110 type mSpanList struct {
111 first *mspan // first span in list, or nil if none
112 last **mspan // last span's next field, or first if none
116 next *mspan // next span in list, or nil if none
117 prev **mspan // previous span's next field, or list head's first field if none
118 list *mSpanList // For debugging. TODO: Remove.
119 //TODO:(rlh) Eliminate start field and use startAddr >> PageShift instead.
120 startAddr uintptr // uintptr(s.start << _PageShift) aka s.base()
121 start pageID // starting page number
122 npages uintptr // number of pages in span
123 stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
125 // freeindex is the slot index between 0 and nelems at which to begin scanning
126 // for the next free object in this span.
127 // Each allocation scans allocBits starting at freeindex until it encounters a 0
128 // indicating a free object. freeindex is then adjusted so that subsequent scans begin
129 // just past the the newly discovered free object.
131 // If freeindex == nelem, this span has no free objects.
133 // allocBits is a bitmap of objects in this span.
134 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
135 // then object n is free;
136 // otherwise, object n is allocated. Bits starting at nelem are
137 // undefined and should never be referenced.
139 // Object n starts at address n*elemsize + (start << pageShift).
141 // TODO: Look up nelems from sizeclass and remove this field if it
142 // helps performance.
143 nelems uintptr // number of object in the span.
145 // Cache of the allocBits at freeindex. allocCache is shifted
146 // such that the lowest bit corresponds to the bit freeindex.
147 // allocCache holds the complement of allocBits, thus allowing
148 // ctz64 (count trailing zero) to use it directly.
149 // allocCache may contain bits beyond s.nelems; the caller must ignore
152 allocBits *[maxObjsPerSpan / 8]uint8
153 gcmarkBits *[maxObjsPerSpan / 8]uint8
155 // allocBits and gcmarkBits currently point to either markbits1
156 // or markbits2. At the end of a GC cycle allocBits and
157 // gcmarkBits swap roles simply by swapping pointers.
158 // This level of indirection also facilitates an implementation
159 // where markbits1 and markbits2 are not inlined in mspan.
160 markbits1 [maxObjsPerSpan / 8]uint8 // A bit for each obj.
161 markbits2 [maxObjsPerSpan / 8]uint8 // A bit for each obj.
164 // if sweepgen == h->sweepgen - 2, the span needs sweeping
165 // if sweepgen == h->sweepgen - 1, the span is currently being swept
166 // if sweepgen == h->sweepgen, the span is swept and ready to use
167 // h->sweepgen is incremented by 2 after every GC
170 divMul uint32 // for divide by elemsize - divMagic.mul
171 allocCount uint16 // capacity - number of objects in freelist
172 sizeclass uint8 // size class
173 incache bool // being used by an mcache
174 state uint8 // mspaninuse etc
175 needzero uint8 // needs to be zeroed before allocation
176 divShift uint8 // for divide by elemsize - divMagic.shift
177 divShift2 uint8 // for divide by elemsize - divMagic.shift2
178 elemsize uintptr // computed from sizeclass or from npages
179 unusedsince int64 // first time spotted by gc in mspanfree state
180 npreleased uintptr // number of pages released to the os
181 limit uintptr // end of data in span
182 speciallock mutex // guards specials list
183 specials *special // linked list of special records sorted by offset.
184 baseMask uintptr // if non-0, elemsize is a power of 2, & this will get object allocation base
187 func (s *mspan) base() uintptr {
191 func (s *mspan) layout() (size, n, total uintptr) {
192 total = s.npages << _PageShift
200 var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
202 // h_spans is a lookup table to map virtual address page IDs to *mspan.
203 // For allocated spans, their pages map to the span itself.
204 // For free spans, only the lowest and highest pages map to the span itself. Internal
205 // pages map to an arbitrary span.
206 // For pages that have never been allocated, h_spans entries are nil.
207 var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
209 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
212 if len(h_allspans) >= cap(h_allspans) {
213 n := 64 * 1024 / sys.PtrSize
214 if n < cap(h_allspans)*3/2 {
215 n = cap(h_allspans) * 3 / 2
218 sp := (*slice)(unsafe.Pointer(&new))
219 sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
221 throw("runtime: cannot allocate memory")
223 sp.len = len(h_allspans)
225 if len(h_allspans) > 0 {
226 copy(new, h_allspans)
227 // Don't free the old array if it's referenced by sweep.
228 // See the comment in mgc.go.
229 if h.allspans != mheap_.gcspans {
230 sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys)
234 h.allspans = (**mspan)(sp.array)
236 h_allspans = append(h_allspans, s)
237 h.nspan = uint32(len(h_allspans))
240 // inheap reports whether b is a pointer into a (potentially dead) heap object.
241 // It returns false for pointers into stack spans.
242 // Non-preemptible because it is used by write barriers.
245 func inheap(b uintptr) bool {
246 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
249 // Not a beginning of a block, consult span table to find the block beginning.
252 x -= mheap_.arena_start >> _PageShift
254 if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
260 // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places.
261 // Use the functions instead.
263 // spanOf returns the span of p. If p does not point into the heap or
264 // no span contains p, spanOf returns nil.
265 func spanOf(p uintptr) *mspan {
266 if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used {
269 return spanOfUnchecked(p)
272 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
273 // that p points into the heap (that is, mheap_.arena_start <= p <
274 // mheap_.arena_used).
275 func spanOfUnchecked(p uintptr) *mspan {
276 return h_spans[(p-mheap_.arena_start)>>_PageShift]
279 func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
282 _g_.m.mcache.local_nlookup++
283 if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
284 // purge cache stats to prevent overflow
286 purgecachedstats(_g_.m.mcache)
290 s := mheap_.lookupMaybe(unsafe.Pointer(v))
305 if s.sizeclass == 0 {
311 *size = s.npages << _PageShift
328 // Initialize the heap.
329 func (h *mheap) init(spans_size uintptr) {
330 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
331 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
332 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
333 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
335 // h->mapcache needs no init
336 for i := range h.free {
343 for i := range h.central {
344 h.central[i].mcentral.init(int32(i))
347 sp := (*slice)(unsafe.Pointer(&h_spans))
348 sp.array = unsafe.Pointer(h.spans)
349 sp.len = int(spans_size / sys.PtrSize)
350 sp.cap = int(spans_size / sys.PtrSize)
353 // mHeap_MapSpans makes sure that the spans are mapped
354 // up to the new value of arena_used.
356 // It must be called with the expected new value of arena_used,
357 // *before* h.arena_used has been updated.
358 // Waiting to update arena_used until after the memory has been mapped
359 // avoids faults when other threads try access the bitmap immediately
360 // after observing the change to arena_used.
361 func (h *mheap) mapSpans(arena_used uintptr) {
362 // Map spans array, PageSize at a time.
365 n = n / _PageSize * sys.PtrSize
366 n = round(n, sys.PhysPageSize)
367 if h.spans_mapped >= n {
370 sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys)
374 // Sweeps spans in list until reclaims at least npages into heap.
375 // Returns the actual number of pages reclaimed.
376 func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr {
378 sg := mheap_.sweepgen
380 for s := list.first; s != nil; s = s.next {
381 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
383 // swept spans are at the end of the list
394 // the span could have been moved elsewhere
397 if s.sweepgen == sg-1 {
398 // the span is being sweept by background sweeper, skip
401 // already swept empty span,
402 // all subsequent ones must also be either swept or in process of sweeping
408 // Sweeps and reclaims at least npage pages into heap.
409 // Called before allocating npage pages.
410 func (h *mheap) reclaim(npage uintptr) {
411 // First try to sweep busy spans with large objects of size >= npage,
412 // this has good chances of reclaiming the necessary space.
413 for i := int(npage); i < len(h.busy); i++ {
414 if h.reclaimList(&h.busy[i], npage) != 0 {
419 // Then -- even larger objects.
420 if h.reclaimList(&h.busylarge, npage) != 0 {
424 // Now try smaller objects.
425 // One such object is not enough, so we need to reclaim several of them.
426 reclaimed := uintptr(0)
427 for i := 0; i < int(npage) && i < len(h.busy); i++ {
428 reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
429 if reclaimed >= npage {
434 // Now sweep everything that is not yet swept.
438 if n == ^uintptr(0) { // all spans are swept
442 if reclaimed >= npage {
449 // Allocate a new span of npage pages from the heap for GC'd memory
450 // and record its size class in the HeapMap and HeapMapCache.
451 func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
454 throw("_mheap_alloc not on g0 stack")
458 // To prevent excessive heap growth, before allocating n pages
459 // we need to sweep and reclaim at least n pages.
460 if h.sweepdone == 0 {
461 // TODO(austin): This tends to sweep a large number of
462 // spans in order to find a few completely free spans
463 // (for example, in the garbage benchmark, this sweeps
464 // ~30x the number of pages its trying to allocate).
465 // If GC kept a bit for whether there were any marks
466 // in a span, we could release these free spans
467 // at the end of GC and eliminate this entirely.
471 // transfer stats from cache to global
472 memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
473 _g_.m.mcache.local_scan = 0
474 memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
475 _g_.m.mcache.local_tinyallocs = 0
477 s := h.allocSpanLocked(npage)
479 // Record span info, because gc needs to be
480 // able to map interior pointer to containing span.
481 atomic.Store(&s.sweepgen, h.sweepgen)
482 s.state = _MSpanInUse
484 s.sizeclass = uint8(sizeclass)
486 s.elemsize = s.npages << _PageShift
492 s.elemsize = uintptr(class_to_size[sizeclass])
493 m := &class_to_divmagic[sizeclass]
496 s.divShift2 = m.shift2
497 s.baseMask = m.baseMask
500 // update stats, sweep lists
501 h.pagesInUse += uint64(npage)
503 memstats.heap_objects++
504 atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
505 // Swept spans are at the end of lists.
506 if s.npages < uintptr(len(h.free)) {
507 h.busy[s.npages].insertBack(s)
509 h.busylarge.insertBack(s)
513 // heap_scan and heap_live were updated.
514 if gcBlackenEnabled != 0 {
515 gcController.revise()
522 // h_spans is accessed concurrently without synchronization
523 // from other threads. Hence, there must be a store/store
524 // barrier here to ensure the writes to h_spans above happen
525 // before the caller can publish a pointer p to an object
526 // allocated from s. As soon as this happens, the garbage
527 // collector running on another processor could read p and
528 // look up s in h_spans. The unlock acts as the barrier to
529 // order these writes. On the read side, the data dependency
530 // between p and the index in h_spans orders the reads.
535 func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
536 // Don't do any operations that lock the heap on the G stack.
537 // It might trigger stack growth, and the stack growth code needs
538 // to be able to allocate heap.
541 s = h.alloc_m(npage, sizeclass, large)
545 if needzero && s.needzero != 0 {
546 memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift)
553 func (h *mheap) allocStack(npage uintptr) *mspan {
556 throw("mheap_allocstack not on g0 stack")
559 s := h.allocSpanLocked(npage)
561 s.state = _MSpanStack
564 memstats.stacks_inuse += uint64(s.npages << _PageShift)
567 // This unlock acts as a release barrier. See mHeap_Alloc_m.
572 // Allocates a span of the given size. h must be locked.
573 // The returned span has been removed from the
574 // free list, but its state is still MSpanFree.
575 func (h *mheap) allocSpanLocked(npage uintptr) *mspan {
579 // Try in fixed-size lists up to max.
580 for i := int(npage); i < len(h.free); i++ {
588 // Best fit in list of large spans.
590 s = h.allocLarge(npage)
595 s = h.allocLarge(npage)
603 if s.state != _MSpanFree {
604 throw("MHeap_AllocLocked - MSpan not free")
606 if s.npages < npage {
607 throw("MHeap_AllocLocked - bad npages")
611 throw("still in list")
613 if s.npreleased > 0 {
614 sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
615 memstats.heap_released -= uint64(s.npreleased << _PageShift)
619 if s.npages > npage {
620 // Trim extra and put it back in the heap.
621 t := (*mspan)(h.spanalloc.alloc())
622 t.init(s.start+pageID(npage), s.npages-npage)
624 p := uintptr(t.start)
625 p -= (h.arena_start >> _PageShift)
630 h_spans[p+t.npages-1] = t
631 t.needzero = s.needzero
632 s.state = _MSpanStack // prevent coalescing with s
633 t.state = _MSpanStack
634 h.freeSpanLocked(t, false, false, s.unusedsince)
639 p := uintptr(s.start)
640 p -= (h.arena_start >> _PageShift)
641 for n := uintptr(0); n < npage; n++ {
645 memstats.heap_inuse += uint64(npage << _PageShift)
646 memstats.heap_idle -= uint64(npage << _PageShift)
648 //println("spanalloc", hex(s.start<<_PageShift))
650 throw("still in list")
655 // Allocate a span of exactly npage pages from the list of large spans.
656 func (h *mheap) allocLarge(npage uintptr) *mspan {
657 return bestFit(&h.freelarge, npage, nil)
660 // Search list for smallest span with >= npage pages.
661 // If there are multiple smallest spans, take the one
662 // with the earliest starting address.
663 func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan {
664 for s := list.first; s != nil; s = s.next {
665 if s.npages < npage {
668 if best == nil || s.npages < best.npages || (s.npages == best.npages && s.start < best.start) {
675 // Try to add at least npage pages of memory to the heap,
676 // returning whether it worked.
679 func (h *mheap) grow(npage uintptr) bool {
680 // Ask for a big chunk, to reduce the number of mappings
681 // the operating system needs to track; also amortizes
682 // the overhead of an operating system mapping.
683 // Allocate a multiple of 64kB.
684 npage = round(npage, (64<<10)/_PageSize)
685 ask := npage << _PageShift
686 if ask < _HeapAllocChunk {
687 ask = _HeapAllocChunk
692 if ask > npage<<_PageShift {
693 ask = npage << _PageShift
697 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
702 // Create a fake "in use" span and free it, so that the
703 // right coalescing happens.
704 s := (*mspan)(h.spanalloc.alloc())
705 s.init(pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
706 p := uintptr(s.start)
707 p -= (h.arena_start >> _PageShift)
708 for i := p; i < p+s.npages; i++ {
711 atomic.Store(&s.sweepgen, h.sweepgen)
712 s.state = _MSpanInUse
713 h.pagesInUse += uint64(s.npages)
714 h.freeSpanLocked(s, false, true, 0)
718 // Look up the span at the given address.
719 // Address is guaranteed to be in map
720 // and is guaranteed to be start or end of span.
721 func (h *mheap) lookup(v unsafe.Pointer) *mspan {
724 return h_spans[p>>_PageShift]
727 // Look up the span at the given address.
728 // Address is *not* guaranteed to be in map
729 // and may be anywhere in the span.
730 // Map entries for the middle of a span are only
731 // valid for allocated spans. Free spans may have
732 // other garbage in their middles, so we have to
734 func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
735 if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
738 p := uintptr(v) >> _PageShift
740 q -= h.arena_start >> _PageShift
742 if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
748 // Free the span back into the heap.
749 func (h *mheap) freeSpan(s *mspan, acct int32) {
753 memstats.heap_scan += uint64(mp.mcache.local_scan)
754 mp.mcache.local_scan = 0
755 memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
756 mp.mcache.local_tinyallocs = 0
758 // Tell msan that this entire span is no longer in use.
759 base := unsafe.Pointer(s.base())
760 bytes := s.npages << _PageShift
761 msanfree(base, bytes)
764 memstats.heap_objects--
766 if gcBlackenEnabled != 0 {
767 // heap_scan changed.
768 gcController.revise()
770 h.freeSpanLocked(s, true, true, 0)
775 func (h *mheap) freeStack(s *mspan) {
778 throw("mheap_freestack not on g0 stack")
782 memstats.stacks_inuse -= uint64(s.npages << _PageShift)
783 h.freeSpanLocked(s, true, true, 0)
787 // s must be on a busy list (h.busy or h.busylarge) or unlinked.
788 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
791 if s.allocCount != 0 {
792 throw("MHeap_FreeSpanLocked - invalid stack free")
795 if s.allocCount != 0 || s.sweepgen != h.sweepgen {
796 print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
797 throw("MHeap_FreeSpanLocked - invalid free")
799 h.pagesInUse -= uint64(s.npages)
801 throw("MHeap_FreeSpanLocked - invalid span state")
805 memstats.heap_inuse -= uint64(s.npages << _PageShift)
808 memstats.heap_idle += uint64(s.npages << _PageShift)
812 h.busyList(s.npages).remove(s)
815 // Stamp newly unused spans. The scavenger will use that
816 // info to potentially give back some pages to the OS.
817 s.unusedsince = unusedsince
818 if unusedsince == 0 {
819 s.unusedsince = nanotime()
823 // Coalesce with earlier, later spans.
824 p := uintptr(s.start)
825 p -= h.arena_start >> _PageShift
828 if t != nil && t.state == _MSpanFree {
830 s.startAddr = uintptr(s.start << _PageShift)
832 s.npreleased = t.npreleased // absorb released pages
833 s.needzero |= t.needzero
836 h.freeList(t.npages).remove(t)
838 h.spanalloc.free(unsafe.Pointer(t))
841 if (p+s.npages)*sys.PtrSize < h.spans_mapped {
842 t := h_spans[p+s.npages]
843 if t != nil && t.state == _MSpanFree {
845 s.npreleased += t.npreleased
846 s.needzero |= t.needzero
847 h_spans[p+s.npages-1] = s
848 h.freeList(t.npages).remove(t)
850 h.spanalloc.free(unsafe.Pointer(t))
854 // Insert s into appropriate list.
855 h.freeList(s.npages).insert(s)
858 func (h *mheap) freeList(npages uintptr) *mSpanList {
859 if npages < uintptr(len(h.free)) {
860 return &h.free[npages]
865 func (h *mheap) busyList(npages uintptr) *mSpanList {
866 if npages < uintptr(len(h.free)) {
867 return &h.busy[npages]
872 func scavengelist(list *mSpanList, now, limit uint64) uintptr {
877 var sumreleased uintptr
878 for s := list.first; s != nil; s = s.next {
879 if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
880 start := uintptr(s.start) << _PageShift
881 end := start + s.npages<<_PageShift
882 if sys.PhysPageSize > _PageSize {
883 // We can only release pages in
884 // PhysPageSize blocks, so round start
885 // and end in. (Otherwise, madvise
886 // will round them *out* and release
887 // more memory than we want.)
888 start = (start + sys.PhysPageSize - 1) &^ (sys.PhysPageSize - 1)
889 end &^= sys.PhysPageSize - 1
896 released := len - (s.npreleased << _PageShift)
897 if sys.PhysPageSize > _PageSize && released == 0 {
900 memstats.heap_released += uint64(released)
901 sumreleased += released
902 s.npreleased = len >> _PageShift
903 sysUnused(unsafe.Pointer(start), len)
909 func (h *mheap) scavenge(k int32, now, limit uint64) {
911 var sumreleased uintptr
912 for i := 0; i < len(h.free); i++ {
913 sumreleased += scavengelist(&h.free[i], now, limit)
915 sumreleased += scavengelist(&h.freelarge, now, limit)
918 if debug.gctrace > 0 {
920 print("scvg", k, ": ", sumreleased>>20, " MB released\n")
922 // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
923 // But we can't call ReadMemStats on g0 holding locks.
924 print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
928 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
929 func runtime_debug_freeOSMemory() {
930 gcStart(gcForceBlockMode, false)
931 systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
934 // Initialize a new span with the given start and npages.
935 func (span *mspan) init(start pageID, npages uintptr) {
940 span.startAddr = uintptr(start << _PageShift)
946 span.state = _MSpanDead
949 span.speciallock.key = 0
953 span.allocBits = &span.markbits1
954 span.gcmarkBits = &span.markbits2
955 // determine if this is actually needed. It is once / span so it
956 // isn't expensive. This is to be replaced by an arena
957 // based system where things can be cleared all at once so
958 // don't worry about optimizing this.
959 for i := 0; i < len(span.markbits1); i++ {
960 span.allocBits[i] = 0
961 span.gcmarkBits[i] = 0
965 func (span *mspan) inList() bool {
966 return span.prev != nil
969 // Initialize an empty doubly-linked list.
970 func (list *mSpanList) init() {
972 list.last = &list.first
975 func (list *mSpanList) remove(span *mspan) {
976 if span.prev == nil || span.list != list {
977 println("runtime: failed MSpanList_Remove", span, span.prev, span.list, list)
978 throw("MSpanList_Remove")
980 if span.next != nil {
981 span.next.prev = span.prev
983 // TODO: After we remove the span.list != list check above,
984 // we could at least still check list.last == &span.next here.
985 list.last = span.prev
987 *span.prev = span.next
993 func (list *mSpanList) isEmpty() bool {
994 return list.first == nil
997 func (list *mSpanList) insert(span *mspan) {
998 if span.next != nil || span.prev != nil || span.list != nil {
999 println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list)
1000 throw("MSpanList_Insert")
1002 span.next = list.first
1003 if list.first != nil {
1004 list.first.prev = &span.next
1006 list.last = &span.next
1009 span.prev = &list.first
1013 func (list *mSpanList) insertBack(span *mspan) {
1014 if span.next != nil || span.prev != nil || span.list != nil {
1015 println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
1016 throw("MSpanList_InsertBack")
1019 span.prev = list.last
1021 list.last = &span.next
1026 _KindSpecialFinalizer = 1
1027 _KindSpecialProfile = 2
1028 // Note: The finalizer special must be first because if we're freeing
1029 // an object, a finalizer special will cause the freeing operation
1030 // to abort, and we want to keep the other special records around
1034 type special struct {
1035 next *special // linked list in span
1036 offset uint16 // span offset of object
1037 kind byte // kind of special
1040 // Adds the special record s to the list of special records for
1041 // the object p. All fields of s should be filled in except for
1042 // offset & next, which this routine will fill in.
1043 // Returns true if the special was successfully added, false otherwise.
1044 // (The add will fail only if a record with the same p and s->kind
1046 func addspecial(p unsafe.Pointer, s *special) bool {
1047 span := mheap_.lookupMaybe(p)
1049 throw("addspecial on invalid pointer")
1052 // Ensure that the span is swept.
1053 // Sweeping accesses the specials list w/o locks, so we have
1054 // to synchronize with it. And it's just much safer.
1058 offset := uintptr(p) - uintptr(span.start<<_PageShift)
1061 lock(&span.speciallock)
1063 // Find splice point, check for existing record.
1070 if offset == uintptr(x.offset) && kind == x.kind {
1071 unlock(&span.speciallock)
1073 return false // already exists
1075 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
1081 // Splice in record, fill in offset.
1082 s.offset = uint16(offset)
1085 unlock(&span.speciallock)
1091 // Removes the Special record of the given kind for the object p.
1092 // Returns the record if the record existed, nil otherwise.
1093 // The caller must FixAlloc_Free the result.
1094 func removespecial(p unsafe.Pointer, kind uint8) *special {
1095 span := mheap_.lookupMaybe(p)
1097 throw("removespecial on invalid pointer")
1100 // Ensure that the span is swept.
1101 // Sweeping accesses the specials list w/o locks, so we have
1102 // to synchronize with it. And it's just much safer.
1106 offset := uintptr(p) - uintptr(span.start<<_PageShift)
1108 lock(&span.speciallock)
1115 // This function is used for finalizers only, so we don't check for
1116 // "interior" specials (p must be exactly equal to s->offset).
1117 if offset == uintptr(s.offset) && kind == s.kind {
1119 unlock(&span.speciallock)
1125 unlock(&span.speciallock)
1130 // The described object has a finalizer set for it.
1131 type specialfinalizer struct {
1139 // Adds a finalizer to the object p. Returns true if it succeeded.
1140 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
1141 lock(&mheap_.speciallock)
1142 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
1143 unlock(&mheap_.speciallock)
1144 s.special.kind = _KindSpecialFinalizer
1149 if addspecial(p, &s.special) {
1150 // This is responsible for maintaining the same
1151 // GC-related invariants as markrootSpans in any
1152 // situation where it's possible that markrootSpans
1153 // has already run but mark termination hasn't yet.
1154 if gcphase != _GCoff {
1155 _, base, _ := findObject(p)
1157 gcw := &mp.p.ptr().gcw
1158 // Mark everything reachable from the object
1159 // so it's retained for the finalizer.
1160 scanobject(uintptr(base), gcw)
1161 // Mark the finalizer itself, since the
1162 // special isn't part of the GC'd heap.
1163 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
1164 if gcBlackenPromptly {
1172 // There was an old finalizer
1173 lock(&mheap_.speciallock)
1174 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1175 unlock(&mheap_.speciallock)
1179 // Removes the finalizer (if any) from the object p.
1180 func removefinalizer(p unsafe.Pointer) {
1181 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
1183 return // there wasn't a finalizer to remove
1185 lock(&mheap_.speciallock)
1186 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1187 unlock(&mheap_.speciallock)
1190 // The described object is being heap profiled.
1191 type specialprofile struct {
1196 // Set the heap profile bucket associated with addr to b.
1197 func setprofilebucket(p unsafe.Pointer, b *bucket) {
1198 lock(&mheap_.speciallock)
1199 s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
1200 unlock(&mheap_.speciallock)
1201 s.special.kind = _KindSpecialProfile
1203 if !addspecial(p, &s.special) {
1204 throw("setprofilebucket: profile already set")
1208 // Do whatever cleanup needs to be done to deallocate s. It has
1209 // already been unlinked from the MSpan specials list.
1210 func freespecial(s *special, p unsafe.Pointer, size uintptr) {
1212 case _KindSpecialFinalizer:
1213 sf := (*specialfinalizer)(unsafe.Pointer(s))
1214 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
1215 lock(&mheap_.speciallock)
1216 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
1217 unlock(&mheap_.speciallock)
1218 case _KindSpecialProfile:
1219 sp := (*specialprofile)(unsafe.Pointer(s))
1220 mProf_Free(sp.b, size)
1221 lock(&mheap_.speciallock)
1222 mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
1223 unlock(&mheap_.speciallock)
1225 throw("bad special kind")
1226 panic("not reached")