const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
meta := uintptr(sysReserve(nil, arenaMetaSize))
if meta != 0 {
- mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
+ mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
}
// We want to start the arena low, but if we're linked
for _, arenaSize := range arenaSizes {
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
if a != nil {
- mheap_.arena.init(uintptr(a), size)
+ mheap_.arena.init(uintptr(a), size, false)
p = mheap_.arena.end // For hint below
break
}
// heapArenaBytes. sysAlloc returns nil on failure.
// There is no corresponding free function.
//
-// sysAlloc returns a memory region in the Prepared state. This region must
-// be transitioned to Ready before use.
+// sysAlloc returns a memory region in the Reserved state. This region must
+// be transitioned to Prepared and then Ready before use.
//
// h must be locked.
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
throw("misrounded allocation in sysAlloc")
}
- // Transition from Reserved to Prepared.
- sysMap(v, size, &memstats.heap_sys)
-
mapped:
// Create arena metadata.
for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
}
// linearAlloc is a simple linear allocator that pre-reserves a region
-// of memory and then maps that region into the Ready state as needed. The
-// caller is responsible for locking.
+// of memory and then optionally maps that region into the Ready state
+// as needed.
+//
+// The caller is responsible for locking.
type linearAlloc struct {
next uintptr // next free byte
mapped uintptr // one byte past end of mapped space
end uintptr // end of reserved space
+
+ mapMemory bool // transition memory from Reserved to Ready if true
}
-func (l *linearAlloc) init(base, size uintptr) {
+func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
if base+size < base {
// Chop off the last byte. The runtime isn't prepared
// to deal with situations where the bounds could overflow.
}
l.next, l.mapped = base, base
l.end = base + size
+ l.mapMemory = mapMemory
}
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
}
l.next = p + size
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
- // Transition from Reserved to Prepared to Ready.
- sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
- sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
+ if l.mapMemory {
+ // Transition from Reserved to Prepared to Ready.
+ sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
+ sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
+ }
l.mapped = pEnd
}
return unsafe.Pointer(p)
assertLockHeld(&h.lock)
// We must grow the heap in whole palloc chunks.
+ // We call sysMap below but note that because we
+ // round up to pallocChunkPages which is on the order
+ // of MiB (generally >= to the huge page size) we
+ // won't be calling it too much.
ask := alignUp(npage, pallocChunkPages) * pageSize
totalGrowth := uintptr(0)
// remains of the current space and switch to
// the new space. This should be rare.
if size := h.curArena.end - h.curArena.base; size != 0 {
+ // Transition this space from Reserved to Prepared and mark it
+ // as released since we'll be able to start using it after updating
+ // the page allocator and releasing the lock at any time.
+ sysMap(unsafe.Pointer(h.curArena.base), size, &memstats.heap_sys)
+ // Update stats.
+ atomic.Xadd64(&memstats.heap_released, int64(size))
+ stats := memstats.heapStats.acquire()
+ atomic.Xaddint64(&stats.released, int64(size))
+ memstats.heapStats.release()
+ // Update the page allocator's structures to make this
+ // space ready for allocation.
h.pages.grow(h.curArena.base, size)
totalGrowth += size
}
h.curArena.end = uintptr(av) + asize
}
- // The memory just allocated counts as both released
- // and idle, even though it's not yet backed by spans.
- //
- // The allocation is always aligned to the heap arena
- // size which is always > physPageSize, so its safe to
- // just add directly to heap_released.
- atomic.Xadd64(&memstats.heap_released, int64(asize))
- stats := memstats.heapStats.acquire()
- atomic.Xaddint64(&stats.released, int64(asize))
- memstats.heapStats.release()
-
// Recalculate nBase.
// We know this won't overflow, because sysAlloc returned
// a valid region starting at h.curArena.base which is at
// Grow into the current arena.
v := h.curArena.base
h.curArena.base = nBase
+
+ // Transition the space we're going to use from Reserved to Prepared.
+ sysMap(unsafe.Pointer(v), nBase-v, &memstats.heap_sys)
+
+ // The memory just allocated counts as both released
+ // and idle, even though it's not yet backed by spans.
+ //
+ // The allocation is always aligned to the heap arena
+ // size which is always > physPageSize, so its safe to
+ // just add directly to heap_released.
+ atomic.Xadd64(&memstats.heap_released, int64(nBase-v))
+ stats := memstats.heapStats.acquire()
+ atomic.Xaddint64(&stats.released, int64(nBase-v))
+ memstats.heapStats.release()
+
+ // Update the page allocator's structures to make this
+ // space ready for allocation.
h.pages.grow(v, nBase-v)
totalGrowth += nBase - v