p.chunkOf(ci).allocRange(base, npages)
p.update(addr, uintptr(npages), true, true)
- // Grab whether the chunk is hugepage backed and if it is,
- // clear it. We're about to break up this huge page.
- p.scav.index.setNoHugePage(ci)
-
// With that done, it's safe to unlock.
unlock(p.mheapLock)
func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) {
sc := s.chunks[ci].load()
sc.alloc(npages, s.gen)
- if !sc.isHugePage() && sc.inUse > scavChunkHiOccPages {
- // Mark that we're considering this chunk as backed by huge pages.
- sc.setHugePage()
-
- // TODO(mknyszek): Consider eagerly backing memory with huge pages
- // here. In the past we've attempted to use sysHugePageCollapse
- // (which uses MADV_COLLAPSE on Linux, and is unsupported elswhere)
- // for this purpose, but that caused performance issues in production
- // environments.
- }
+ // TODO(mknyszek): Consider eagerly backing memory with huge pages
+ // here and track whether we believe this chunk is backed by huge pages.
+ // In the past we've attempted to use sysHugePageCollapse (which uses
+ // MADV_COLLAPSE on Linux, and is unsupported elswhere) for this purpose,
+ // but that caused performance issues in production environments.
s.chunks[ci].store(sc)
}
s.chunks[ci].store(val)
}
-// setNoHugePage updates the backed-by-hugepages status of a particular chunk.
-// Returns true if the set was successful (not already backed by huge pages).
-//
-// setNoHugePage may only run concurrently with find.
-func (s *scavengeIndex) setNoHugePage(ci chunkIdx) {
- val := s.chunks[ci].load()
- if !val.isHugePage() {
- return
- }
- val.setNoHugePage()
- s.chunks[ci].store(val)
-}
-
// atomicScavChunkData is an atomic wrapper around a scavChunkData
// that stores it in its packed form.
type atomicScavChunkData struct {
// file. The reason we say "HasFree" here is so the zero value is
// correct for a newly-grown chunk. (New memory is scavenged.)
scavChunkHasFree scavChunkFlags = 1 << iota
- // scavChunkNoHugePage indicates whether this chunk has had any huge
- // pages broken by the scavenger.
- //
- // The negative here is unfortunate, but necessary to make it so that
- // the zero value of scavChunkData accurately represents the state of
- // a newly-grown chunk. (New memory is marked as backed by huge pages.)
- scavChunkNoHugePage
// scavChunkMaxFlags is the maximum number of flags we can have, given how
// a scavChunkData is packed into 8 bytes.
*sc |= scavChunkHasFree
}
-// isHugePage returns false if the noHugePage flag is set.
-func (sc *scavChunkFlags) isHugePage() bool {
- return (*sc)&scavChunkNoHugePage == 0
-}
-
-// setHugePage clears the noHugePage flag.
-func (sc *scavChunkFlags) setHugePage() {
- *sc &^= scavChunkNoHugePage
-}
-
-// setNoHugePage sets the noHugePage flag.
-func (sc *scavChunkFlags) setNoHugePage() {
- *sc |= scavChunkNoHugePage
-}
-
// shouldScavenge returns true if the corresponding chunk should be interrogated
// by the scavenger.
func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool {