1 // Copyright 2019 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
12 const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
14 // pageCache represents a per-p cache of pages the allocator can
15 // allocate from without a lock. More specifically, it represents
16 // a pageCachePages*pageSize chunk of memory with 0 or more free
18 type pageCache struct {
19 base uintptr // base address of the chunk
20 cache uint64 // 64-bit bitmap representing free pages (1 means free)
21 scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
24 // empty reports whether the page cache has no free pages.
25 func (c *pageCache) empty() bool {
29 // alloc allocates npages from the page cache and is the main entry
30 // point for allocation.
32 // Returns a base address and the amount of scavenged memory in the
33 // allocated region in bytes.
35 // Returns a base address of zero on failure, in which case the
36 // amount of scavenged memory should be ignored.
37 func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
42 i := uintptr(sys.TrailingZeros64(c.cache))
43 scav := (c.scav >> i) & 1
44 c.cache &^= 1 << i // set bit to mark in-use
45 c.scav &^= 1 << i // clear bit to mark unscavenged
46 return c.base + i*pageSize, uintptr(scav) * pageSize
48 return c.allocN(npages)
51 // allocN is a helper which attempts to allocate npages worth of pages
52 // from the cache. It represents the general case for allocating from
55 // Returns a base address and the amount of scavenged memory in the
56 // allocated region in bytes.
57 func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
58 i := findBitRange64(c.cache, uint(npages))
62 mask := ((uint64(1) << npages) - 1) << i
63 scav := sys.OnesCount64(c.scav & mask)
64 c.cache &^= mask // mark in-use bits
65 c.scav &^= mask // clear scavenged bits
66 return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
69 // flush empties out unallocated free pages in the given cache
70 // into s. Then, it clears the cache, such that empty returns
73 // p.mheapLock must be held.
75 // Must run on the system stack because p.mheapLock must be held.
78 func (c *pageCache) flush(p *pageAlloc) {
79 assertLockHeld(p.mheapLock)
84 ci := chunkIndex(c.base)
85 pi := chunkPageIndex(c.base)
87 // This method is called very infrequently, so just do the
88 // slower, safer thing by iterating over each bit individually.
89 for i := uint(0); i < 64; i++ {
90 if c.cache&(1<<i) != 0 {
91 p.chunkOf(ci).free1(pi + i)
93 // Update density statistics.
94 p.scav.index.free(ci, pi+i, 1)
96 if c.scav&(1<<i) != 0 {
97 p.chunkOf(ci).scavenged.setRange(pi+i, 1)
101 // Since this is a lot like a free, we need to make sure
102 // we update the searchAddr just like free does.
103 if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
106 p.update(c.base, pageCachePages, false, false)
110 // allocToCache acquires a pageCachePages-aligned chunk of free pages which
111 // may not be contiguous, and returns a pageCache structure which owns the
114 // p.mheapLock must be held.
116 // Must run on the system stack because p.mheapLock must be held.
119 func (p *pageAlloc) allocToCache() pageCache {
120 assertLockHeld(p.mheapLock)
122 // If the searchAddr refers to a region which has a higher address than
123 // any known chunk, then we know we're out of memory.
124 if chunkIndex(p.searchAddr.addr()) >= p.end {
128 ci := chunkIndex(p.searchAddr.addr()) // chunk index
129 var chunk *pallocData
130 if p.summary[len(p.summary)-1][ci] != 0 {
131 // Fast path: there's free pages at or near the searchAddr address.
132 chunk = p.chunkOf(ci)
133 j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
135 throw("bad summary data")
138 base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
139 cache: ^chunk.pages64(j),
140 scav: chunk.scavenged.block64(j),
143 // Slow path: the searchAddr address had nothing there, so go find
144 // the first free page the slow way.
147 // We failed to find adequate free space, so mark the searchAddr as OoM
148 // and return an empty pageCache.
149 p.searchAddr = maxSearchAddr()
152 ci = chunkIndex(addr)
153 chunk = p.chunkOf(ci)
155 base: alignDown(addr, 64*pageSize),
156 cache: ^chunk.pages64(chunkPageIndex(addr)),
157 scav: chunk.scavenged.block64(chunkPageIndex(addr)),
161 // Set the page bits as allocated and clear the scavenged bits, but
162 // be careful to only set and clear the relevant bits.
163 cpi := chunkPageIndex(c.base)
164 chunk.allocPages64(cpi, c.cache)
165 chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */)
167 // Update as an allocation, but note that it's not contiguous.
168 p.update(c.base, pageCachePages, false, true)
170 // Update density statistics.
171 p.scav.index.alloc(ci, uint(sys.OnesCount64(c.cache)))
173 // Set the search address to the last page represented by the cache.
174 // Since all of the pages in this block are going to the cache, and we
175 // searched for the first free page, we can confidently start at the
178 // However, p.searchAddr is not allowed to point into unmapped heap memory
179 // unless it is maxSearchAddr, so make it the last page as opposed to
181 p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}