Also, clean up atomics on released-per-cycle while we're here.
For #57069.
Change-Id: I14026e8281f01dea1e8c8de6aa8944712b7b24d9
Reviewed-on: https://go-review.googlesource.com/c/go/+/495916
Reviewed-by: Michael Pratt <mpratt@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
scavenger as well as the total amount of memory returned to the operating system
and an estimate of physical memory utilization. The format of this line is subject
to change, but currently it is:
- scav # KiB work, # KiB total, #% util
+ scav # KiB work (bg), # KiB work (eager), # KiB total, #% util
where the fields are as follows:
- # KiB work the amount of memory returned to the OS since the last line
- # KiB total the total amount of memory returned to the OS
- #% util the fraction of all unscavenged memory which is in-use
+ # KiB work (bg) the amount of memory returned to the OS in the background since
+ the last line
+ # KiB work (eager) the amount of memory returned to the OS eagerly since the last line
+ # KiB now the amount of address space currently returned to the OS
+ #% util the fraction of all unscavenged heap memory which is in-use
If the line ends with "(forced)", then scavenging was forced by a
debug.FreeOSMemory() call.
scavenger.park()
continue
}
- atomic.Xadduintptr(&mheap_.pages.scav.released, released)
+ mheap_.pages.scav.releasedBg.Add(released)
scavenger.sleep(workTime)
}
}
// application.
//
// scavenger.lock must be held.
-func printScavTrace(released uintptr, forced bool) {
+func printScavTrace(releasedBg, releasedEager uintptr, forced bool) {
assertLockHeld(&scavenger.lock)
printlock()
print("scav ",
- released>>10, " KiB work, ",
- gcController.heapReleased.load()>>10, " KiB total, ",
+ releasedBg>>10, " KiB work (bg), ",
+ releasedEager>>10, " KiB work (eager), ",
+ gcController.heapReleased.load()>>10, " KiB now, ",
(gcController.heapInUse.load()*100)/heapRetained(), "% util",
)
if forced {
if debug.scavtrace > 0 {
systemstack(func() {
lock(&mheap_.lock)
- released := atomic.Loaduintptr(&mheap_.pages.scav.released)
- printScavTrace(released, false)
- atomic.Storeuintptr(&mheap_.pages.scav.released, 0)
+
+ // Get released stats.
+ releasedBg := mheap_.pages.scav.releasedBg.Load()
+ releasedEager := mheap_.pages.scav.releasedEager.Load()
+
+ // Print the line.
+ printScavTrace(releasedBg, releasedEager, false)
+
+ // Update the stats.
+ mheap_.pages.scav.releasedBg.Add(-releasedBg)
+ mheap_.pages.scav.releasedEager.Add(-releasedEager)
unlock(&mheap_.lock)
})
}
track := pp.limiterEvent.start(limiterEventScavengeAssist, start)
// Scavenge, but back out if the limiter turns on.
- h.pages.scavenge(bytesToScavenge, func() bool {
+ released := h.pages.scavenge(bytesToScavenge, func() bool {
return gcCPULimiter.limiting()
}, forceScavenge)
+ mheap_.pages.scav.releasedEager.Add(released)
+
// Finish up accounting.
now = nanotime()
if track {
gp.m.mallocing--
if debug.scavtrace > 0 {
- printScavTrace(released, true)
+ printScavTrace(0, released, true)
}
}
package runtime
import (
+ "runtime/internal/atomic"
"unsafe"
)
// scavenge.
index scavengeIndex
- // released is the amount of memory released this scavenge cycle.
- //
- // Updated atomically.
- released uintptr
+ // releasedBg is the amount of memory released in the background this
+ // scavenge cycle.
+ releasedBg atomic.Uintptr
+
+ // releasedEager is the amount of memory released eagerly this scavenge
+ // cycle.
+ releasedEager atomic.Uintptr
}
// mheap_.lock. This level of indirection makes it possible