next *mspan // next span in list, or nil if none
prev **mspan // previous span's next field, or list head's first field if none
list *mSpanList // For debugging. TODO: Remove.
+ //TODO:(rlh) Eliminate start field and use startAddr >> PageShift instead.
+ startAddr uintptr // uintptr(s.start << _PageShift) aka s.base()
+ start pageID // starting page number
+ npages uintptr // number of pages in span
+ stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
+
+ // freeindex is the slot index between 0 and nelems at which to begin scanning
+ // for the next free object in this span.
+ // Each allocation scans allocBits starting at freeindex until it encounters a 0
+ // indicating a free object. freeindex is then adjusted so that subsequent scans begin
+ // just past the the newly discovered free object.
+ //
+ // If freeindex == nelem, this span has no free objects.
+ //
+ // allocBits is a bitmap of objects in this span.
+ // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
+ // then object n is free;
+ // otherwise, object n is allocated. Bits starting at nelem are
+ // undefined and should never be referenced.
+ //
+ // Object n starts at address n*elemsize + (start << pageShift).
+ freeindex uintptr
+ // TODO: Look up nelems from sizeclass and remove this field if it
+ // helps performance.
+ nelems uintptr // number of object in the span.
+
+ // Cache of the allocBits at freeindex. allocCache is shifted
+ // such that the lowest bit corresponds to the bit freeindex.
+ // allocCache holds the complement of allocBits, thus allowing
+ // ctz64 (count trailing zero) to use it directly.
+ // allocCache may contain bits beyond s.nelems; the caller must ignore
+ // these.
+ allocCache uint64
+ allocBits *[maxObjsPerSpan / 8]uint8
+ gcmarkBits *[maxObjsPerSpan / 8]uint8
+
+ // allocBits and gcmarkBits currently point to either markbits1
+ // or markbits2. At the end of a GC cycle allocBits and
+ // gcmarkBits swap roles simply by swapping pointers.
+ // This level of indirection also facilitates an implementation
+ // where markbits1 and markbits2 are not inlined in mspan.
+ markbits1 [maxObjsPerSpan / 8]uint8 // A bit for each obj.
+ markbits2 [maxObjsPerSpan / 8]uint8 // A bit for each obj.
- start pageID // starting page number
- npages uintptr // number of pages in span
- freelist gclinkptr // list of free objects
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
// if sweepgen == h->sweepgen - 1, the span is currently being swept
sweepgen uint32
divMul uint32 // for divide by elemsize - divMagic.mul
- ref uint16 // capacity - number of objects in freelist
+ allocCount uint16 // capacity - number of objects in freelist
sizeclass uint8 // size class
incache bool // being used by an mcache
state uint8 // mspaninuse etc
}
func (s *mspan) base() uintptr {
- return uintptr(s.start << _PageShift)
+ return s.startAddr
}
func (s *mspan) layout() (size, n, total uintptr) {
return 0
}
- p := uintptr(s.start) << _PageShift
+ p := s.base()
if s.sizeclass == 0 {
// Large object.
if base != nil {
// able to map interior pointer to containing span.
atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse
- s.freelist = 0
- s.ref = 0
+ s.allocCount = 0
s.sizeclass = uint8(sizeclass)
if sizeclass == 0 {
s.elemsize = s.npages << _PageShift
if s != nil {
if needzero && s.needzero != 0 {
- memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
+ memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift)
}
s.needzero = 0
}
s := h.allocSpanLocked(npage)
if s != nil {
s.state = _MSpanStack
- s.freelist = 0
- s.ref = 0
+ s.stackfreelist = 0
+ s.allocCount = 0
memstats.stacks_inuse += uint64(s.npages << _PageShift)
}
throw("still in list")
}
if s.npreleased > 0 {
- sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
+ sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
memstats.heap_released -= uint64(s.npreleased << _PageShift)
s.npreleased = 0
}
mp.mcache.local_scan = 0
memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
mp.mcache.local_tinyallocs = 0
+ if msanenabled {
+ // Tell msan that this entire span is no longer in use.
+ base := unsafe.Pointer(s.base())
+ bytes := s.npages << _PageShift
+ msanfree(base, bytes)
+ }
if acct != 0 {
memstats.heap_objects--
}
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state {
case _MSpanStack:
- if s.ref != 0 {
+ if s.allocCount != 0 {
throw("MHeap_FreeSpanLocked - invalid stack free")
}
case _MSpanInUse:
- if s.ref != 0 || s.sweepgen != h.sweepgen {
- print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
+ if s.allocCount != 0 || s.sweepgen != h.sweepgen {
+ print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
throw("MHeap_FreeSpanLocked - invalid free")
}
h.pagesInUse -= uint64(s.npages)
t := h_spans[p-1]
if t != nil && t.state == _MSpanFree {
s.start = t.start
+ s.startAddr = uintptr(s.start << _PageShift)
s.npages += t.npages
s.npreleased = t.npreleased // absorb released pages
s.needzero |= t.needzero
span.prev = nil
span.list = nil
span.start = start
+ span.startAddr = uintptr(start << _PageShift)
span.npages = npages
- span.freelist = 0
- span.ref = 0
+ span.allocCount = 0
span.sizeclass = 0
span.incache = false
span.elemsize = 0
span.speciallock.key = 0
span.specials = nil
span.needzero = 0
+ span.freeindex = 0
+ span.allocBits = &span.markbits1
+ span.gcmarkBits = &span.markbits2
+ // determine if this is actually needed. It is once / span so it
+ // isn't expensive. This is to be replaced by an arena
+ // based system where things can be cleared all at once so
+ // don't worry about optimizing this.
+ for i := 0; i < len(span.markbits1); i++ {
+ span.allocBits[i] = 0
+ span.gcmarkBits[i] = 0
+ }
}
func (span *mspan) inList() bool {
func (list *mSpanList) remove(span *mspan) {
if span.prev == nil || span.list != list {
- println("failed MSpanList_Remove", span, span.prev, span.list, list)
+ println("runtime: failed MSpanList_Remove", span, span.prev, span.list, list)
throw("MSpanList_Remove")
}
if span.next != nil {
func (list *mSpanList) insert(span *mspan) {
if span.next != nil || span.prev != nil || span.list != nil {
- println("failed MSpanList_Insert", span, span.next, span.prev, span.list)
+ println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list)
throw("MSpanList_Insert")
}
span.next = list.first