return (*pageCache)(c).alloc(npages)
}
func (c *PageCache) Flush(s *PageAlloc) {
- (*pageCache)(c).flush((*pageAlloc)(s))
+ cp := (*pageCache)(c)
+ sp := (*pageAlloc)(s)
+
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(sp.mheapLock)
+ cp.flush(sp)
+ unlock(sp.mheapLock)
+ })
}
// Expose chunk index type.
type PageAlloc pageAlloc
func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
- return (*pageAlloc)(p).alloc(npages)
+ pp := (*pageAlloc)(p)
+
+ var addr, scav uintptr
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(pp.mheapLock)
+ addr, scav = pp.alloc(npages)
+ unlock(pp.mheapLock)
+ })
+ return addr, scav
}
func (p *PageAlloc) AllocToCache() PageCache {
- return PageCache((*pageAlloc)(p).allocToCache())
+ pp := (*pageAlloc)(p)
+
+ var c PageCache
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(pp.mheapLock)
+ c = PageCache(pp.allocToCache())
+ unlock(pp.mheapLock)
+ })
+ return c
}
func (p *PageAlloc) Free(base, npages uintptr) {
- (*pageAlloc)(p).free(base, npages)
+ pp := (*pageAlloc)(p)
+
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(pp.mheapLock)
+ pp.free(base, npages)
+ unlock(pp.mheapLock)
+ })
}
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
pp := (*pageAlloc)(p)
systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
lock(pp.mheapLock)
r = pp.scavenge(nbytes, mayUnlock)
unlock(pp.mheapLock)
addr := chunkBase(chunkIdx(i))
// Mark the chunk's existence in the pageAlloc.
- p.grow(addr, pallocChunkBytes)
+ systemstack(func() {
+ lock(p.mheapLock)
+ p.grow(addr, pallocChunkBytes)
+ unlock(p.mheapLock)
+ })
// Initialize the bitmap and update pageAlloc metadata.
chunk := p.chunkOf(chunkIndex(addr))
}
// Update heap metadata for the allocRange calls above.
- p.update(addr, pallocChunkPages, false, false)
+ systemstack(func() {
+ lock(p.mheapLock)
+ p.update(addr, pallocChunkPages, false, false)
+ unlock(p.mheapLock)
+ })
}
+
systemstack(func() {
lock(p.mheapLock)
p.scavengeStartGen()
unlock(p.mheapLock)
})
+
return (*PageAlloc)(p)
}
//
// h must be locked.
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
+ assertLockHeld(&h.lock)
+
n = alignUp(n, heapArenaBytes)
// First, try the arena pre-reservation.
//
// mheap_.lock must be held or the world must be stopped.
func gcSetTriggerRatio(triggerRatio float64) {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+
// Compute the next GC goal, which is when the allocated heap
// has grown by GOGC/100 over the heap marked by the last
// cycle.
//
// mheap_.lock must be held or the world must be stopped.
func gcEffectiveGrowthRatio() float64 {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+
egogc := float64(atomic.Load64(&memstats.next_gc)-memstats.heap_marked) / float64(memstats.heap_marked)
if egogc < 0 {
// Shouldn't happen, but just in case.
//
//go:systemstack
func (p *pageAlloc) scavenge(nbytes uintptr, mayUnlock bool) uintptr {
+ assertLockHeld(p.mheapLock)
+
var (
addrs addrRange
gen uint32
//
//go:systemstack
func (p *pageAlloc) scavengeStartGen() {
+ assertLockHeld(p.mheapLock)
+
if debug.scavtrace > 0 {
printScavTrace(p.scav.gen, p.scav.released, false)
}
//
//go:systemstack
func (p *pageAlloc) scavengeReserve() (addrRange, uint32) {
+ assertLockHeld(p.mheapLock)
+
// Start by reserving the minimum.
r := p.scav.inUse.removeLast(p.scav.reservationBytes)
//
//go:systemstack
func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
+ assertLockHeld(p.mheapLock)
+
if r.size() == 0 || gen != p.scav.gen {
return
}
//
//go:systemstack
func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
+ assertLockHeld(p.mheapLock)
+
// Defensively check if we've recieved an empty address range.
// If so, just return.
if work.size() == 0 {
// If we found something, scavenge it and return!
if npages != 0 {
work.limit = offAddr{p.scavengeRangeLocked(maxChunk, base, npages)}
+
+ assertLockHeld(p.mheapLock) // Must be locked on return.
return uintptr(npages) * pageSize, work
}
}
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
work.limit = offAddr{p.scavengeRangeLocked(candidateChunkIdx, base, npages)}
+
+ assertLockHeld(p.mheapLock) // Must be locked on return.
return uintptr(npages) * pageSize, work
}
// We were fooled, so let's continue from where we left off.
work.limit = offAddr{chunkBase(candidateChunkIdx)}
}
+
+ assertLockHeld(p.mheapLock) // Must be locked on return.
return 0, work
}
//
// p.mheapLock must be held.
func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {
+ assertLockHeld(p.mheapLock)
+
p.chunkOf(ci).scavenged.setRange(base, npages)
// Compute the full address for the start of the range.
// indirect call from the fixalloc initializer, the compiler can't see
// this.
//
+// The heap lock must be held.
+//
//go:nowritebarrierrec
func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h := (*mheap)(vh)
s := (*mspan)(p)
+
+ assertLockHeld(&h.lock)
+
if len(h.allspans) >= cap(h.allspans) {
n := 64 * 1024 / sys.PtrSize
if n < cap(h.allspans)*3/2 {
//
// reclaim implements the page-reclaimer half of the sweeper.
//
-// h must NOT be locked.
+// h.lock must NOT be held.
func (h *mheap) reclaim(npage uintptr) {
// TODO(austin): Half of the time spent freeing spans is in
// locking/unlocking the heap (even with low contention). We
// In particular, if a span were freed and merged concurrently
// with this probing heapArena.spans, it would be possible to
// observe arbitrary, stale span pointers.
+ assertLockHeld(&h.lock)
+
n0 := n
var nFreed uintptr
sg := h.sweepgen
traceGCSweepSpan((n0 - nFreed) * pageSize)
lock(&h.lock)
}
+
+ assertLockHeld(&h.lock) // Must be locked on return.
return nFreed
}
// tryAllocMSpan attempts to allocate an mspan object from
// the P-local cache, but may fail.
//
-// h need not be locked.
+// h.lock need not be held.
//
// This caller must ensure that its P won't change underneath
// it during this function. Currently to ensure that we enforce
// allocMSpanLocked allocates an mspan object.
//
-// h must be locked.
+// h.lock must be held.
//
// allocMSpanLocked must be called on the system stack because
// its caller holds the heap lock. See mheap for details.
//
//go:systemstack
func (h *mheap) allocMSpanLocked() *mspan {
+ assertLockHeld(&h.lock)
+
pp := getg().m.p.ptr()
if pp == nil {
// We don't have a p so just do the normal thing.
// freeMSpanLocked free an mspan object.
//
-// h must be locked.
+// h.lock must be held.
//
// freeMSpanLocked must be called on the system stack because
// its caller holds the heap lock. See mheap for details.
//
//go:systemstack
func (h *mheap) freeMSpanLocked(s *mspan) {
+ assertLockHeld(&h.lock)
+
pp := getg().m.p.ptr()
// First try to free the mspan directly to the cache.
if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
//
// The returned span is fully initialized.
//
-// h must not be locked.
+// h.lock must not be held.
//
// allocSpan must be called on the system stack both because it acquires
// the heap lock and because it must block GC transitions.
// Try to add at least npage pages of memory to the heap,
// returning whether it worked.
//
-// h must be locked.
+// h.lock must be held.
func (h *mheap) grow(npage uintptr) bool {
+ assertLockHeld(&h.lock)
+
// We must grow the heap in whole palloc chunks.
ask := alignUp(npage, pallocChunkPages) * pageSize
}
func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
+ assertLockHeld(&h.lock)
+
switch s.state.get() {
case mSpanManual:
if s.allocCount != 0 {
//
// p.mheapLock must be held.
func (p *pageAlloc) grow(base, size uintptr) {
+ assertLockHeld(p.mheapLock)
+
// Round up to chunks, since we can't deal with increments smaller
// than chunks. Also, sysGrow expects aligned values.
limit := alignUp(base+size, pallocChunkBytes)
//
// p.mheapLock must be held.
func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
+ assertLockHeld(p.mheapLock)
+
// base, limit, start, and end are inclusive.
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
//
// p.mheapLock must be held.
func (p *pageAlloc) allocRange(base, npages uintptr) uintptr {
+ assertLockHeld(p.mheapLock)
+
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
//
// p.mheapLock must be held.
func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr {
+ assertLockHeld(p.mheapLock)
+
// If we're not in a test, validate first by checking mheap_.arenas.
// This is a fast path which is only safe to use outside of testing.
ai := arenaIndex(addr.addr())
//
// p.mheapLock must be held.
func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
+ assertLockHeld(p.mheapLock)
+
// Search algorithm.
//
// This algorithm walks each level l of the radix tree from the root level
// should be ignored.
//
// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
+ assertLockHeld(p.mheapLock)
+
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(p.searchAddr.addr()) >= p.end {
// free returns npages worth of memory starting at base back to the page heap.
//
// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
func (p *pageAlloc) free(base, npages uintptr) {
+ assertLockHeld(p.mheapLock)
+
// If we're freeing pages below the p.searchAddr, update searchAddr.
if b := (offAddr{base}); b.lessThan(p.searchAddr) {
p.searchAddr = b
// into s. Then, it clears the cache, such that empty returns
// true.
//
-// p.mheapLock must be held or the world must be stopped.
+// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
func (c *pageCache) flush(p *pageAlloc) {
+ assertLockHeld(p.mheapLock)
+
if c.empty() {
return
}
// chunk.
//
// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
func (p *pageAlloc) allocToCache() pageCache {
+ assertLockHeld(p.mheapLock)
+
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(p.searchAddr.addr()) >= p.end {
mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
}
pp.mspancache.len = 0
+ lock(&mheap_.lock)
pp.pcache.flush(&mheap_.pages)
+ unlock(&mheap_.lock)
})
freemcache(pp.mcache)
pp.mcache = nil