1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
16 // If you edit this structure, also edit type MemStats below.
18 // General statistics.
19 alloc uint64 // bytes allocated and not yet freed
20 total_alloc uint64 // bytes allocated (even if freed)
21 sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
22 nlookup uint64 // number of pointer lookups
23 nmalloc uint64 // number of mallocs
24 nfree uint64 // number of frees
26 // Statistics about malloc heap.
27 // protected by mheap.lock
28 heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
29 heap_sys uint64 // bytes obtained from system
30 heap_idle uint64 // bytes in idle spans
31 heap_inuse uint64 // bytes in non-idle spans
32 heap_released uint64 // bytes released to the os
33 heap_objects uint64 // total number of allocated objects
35 // Statistics about allocation of low-level fixed-size structures.
36 // Protected by FixAlloc locks.
37 stacks_inuse uint64 // this number is included in heap_inuse above
38 stacks_sys uint64 // always 0 in mstats
39 mspan_inuse uint64 // mspan structures
41 mcache_inuse uint64 // mcache structures
43 buckhash_sys uint64 // profiling bucket hash table
47 // Statistics about garbage collector.
48 // Protected by mheap or stopping the world during GC.
49 next_gc uint64 // next gc (in heap_alloc time)
50 last_gc uint64 // last gc (in absolute time)
52 pause_ns [256]uint64 // circular buffer of recent gc pause lengths
53 pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
55 gc_cpu_fraction float64 // fraction of CPU time used by GC
59 // Statistics about allocation size classes.
61 by_size [_NumSizeClasses]struct {
67 // Statistics below here are not exported to Go directly.
69 tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
71 // heap_live is the number of bytes considered live by the GC.
72 // That is: retained by the most recent GC plus allocated
73 // since then. heap_live <= heap_alloc, since heap_live
74 // excludes unmarked objects that have not yet been swept.
77 // heap_scan is the number of bytes of "scannable" heap. This
78 // is the live heap (as counted by heap_live), but omitting
79 // no-scan objects and no-scan tails of objects.
82 // heap_marked is the number of bytes marked by the previous
83 // GC. After mark termination, heap_live == heap_marked, but
84 // unlike heap_live, heap_marked does not change until the
85 // next mark termination.
88 // heap_reachable is an estimate of the reachable heap bytes
89 // at the end of the previous GC.
95 // A MemStats records statistics about the memory allocator.
96 type MemStats struct {
97 // General statistics.
98 Alloc uint64 // bytes allocated and not yet freed
99 TotalAlloc uint64 // bytes allocated (even if freed)
100 Sys uint64 // bytes obtained from system (sum of XxxSys below)
101 Lookups uint64 // number of pointer lookups
102 Mallocs uint64 // number of mallocs
103 Frees uint64 // number of frees
105 // Main allocation heap statistics.
106 HeapAlloc uint64 // bytes allocated and not yet freed (same as Alloc above)
107 HeapSys uint64 // bytes obtained from system
108 HeapIdle uint64 // bytes in idle spans
109 HeapInuse uint64 // bytes in non-idle span
110 HeapReleased uint64 // bytes released to the OS
111 HeapObjects uint64 // total number of allocated objects
113 // Low-level fixed-size structure allocator statistics.
114 // Inuse is bytes used now.
115 // Sys is bytes obtained from system.
116 StackInuse uint64 // bytes used by stack allocator
118 MSpanInuse uint64 // mspan structures
120 MCacheInuse uint64 // mcache structures
122 BuckHashSys uint64 // profiling bucket hash table
123 GCSys uint64 // GC metadata
124 OtherSys uint64 // other system allocations
126 // Garbage collector statistics.
127 NextGC uint64 // next collection will happen when HeapAlloc ≥ this amount
128 LastGC uint64 // end time of last collection (nanoseconds since 1970)
130 PauseNs [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]
131 PauseEnd [256]uint64 // circular buffer of recent GC pause end times
133 GCCPUFraction float64 // fraction of CPU time used by GC
137 // Per-size allocation statistics.
138 // 61 is NumSizeClasses in the C code.
146 // Size of the trailing by_size array differs between Go and C,
147 // and all data after by_size is local to runtime, not exported.
148 // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
149 // sizeof_C_MStats is what C thinks about size of Go struct.
150 var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
153 var memStats MemStats
154 if sizeof_C_MStats != unsafe.Sizeof(memStats) {
155 println(sizeof_C_MStats, unsafe.Sizeof(memStats))
156 throw("MStats vs MemStatsType size mismatch")
160 // ReadMemStats populates m with memory allocator statistics.
161 func ReadMemStats(m *MemStats) {
162 stopTheWorld("read mem stats")
171 func readmemstats_m(stats *MemStats) {
174 // Size of the trailing by_size array differs between Go and C,
175 // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
176 memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
178 // Stack numbers are part of the heap numbers, separate those out for user consumption
179 stats.StackSys += stats.StackInuse
180 stats.HeapInuse -= stats.StackInuse
181 stats.HeapSys -= stats.StackInuse
184 //go:linkname readGCStats runtime/debug.readGCStats
185 func readGCStats(pauses *[]uint64) {
187 readGCStats_m(pauses)
191 func readGCStats_m(pauses *[]uint64) {
193 // Calling code in runtime/debug should make the slice large enough.
194 if cap(p) < len(memstats.pause_ns)+3 {
195 throw("short slice passed to readGCStats")
198 // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
202 if n > uint32(len(memstats.pause_ns)) {
203 n = uint32(len(memstats.pause_ns))
206 // The pause buffer is circular. The most recent pause is at
207 // pause_ns[(numgc-1)%len(pause_ns)], and then backward
208 // from there to go back farther in time. We deliver the times
209 // most recent first (in p[0]).
211 for i := uint32(0); i < n; i++ {
212 j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
213 p[i] = memstats.pause_ns[j]
214 p[n+i] = memstats.pause_end[j]
217 p[n+n] = memstats.last_gc
218 p[n+n+1] = uint64(memstats.numgc)
219 p[n+n+2] = memstats.pause_total_ns
225 func updatememstats(stats *gcstats) {
229 for mp := allm; mp != nil; mp = mp.alllink {
231 src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
232 dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
233 for i, v := range src {
236 mp.gcstats = gcstats{}
240 memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
241 memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
242 memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
243 memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
245 // Calculate memory allocator stats.
246 // During program execution we only count number of frees and amount of freed memory.
247 // Current number of alive object in the heap and amount of alive heap memory
248 // are calculated by scanning all spans.
249 // Total number of mallocs is calculated as number of frees plus number of alive objects.
250 // Similarly, total amount of allocated memory is calculated as amount of freed memory
251 // plus amount of alive heap memory.
253 memstats.total_alloc = 0
256 for i := 0; i < len(memstats.by_size); i++ {
257 memstats.by_size[i].nmalloc = 0
258 memstats.by_size[i].nfree = 0
261 // Flush MCache's to MCentral.
262 systemstack(flushallmcaches)
264 // Aggregate local stats.
267 // Scan all spans and count number of alive objects.
269 for i := uint32(0); i < mheap_.nspan; i++ {
271 if s.state != mSpanInUse {
274 if s.sizeclass == 0 {
276 memstats.alloc += uint64(s.elemsize)
278 memstats.nmalloc += uint64(s.ref)
279 memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref)
280 memstats.alloc += uint64(s.ref) * uint64(s.elemsize)
285 // Aggregate by size class.
286 smallfree := uint64(0)
287 memstats.nfree = mheap_.nlargefree
288 for i := 0; i < len(memstats.by_size); i++ {
289 memstats.nfree += mheap_.nsmallfree[i]
290 memstats.by_size[i].nfree = mheap_.nsmallfree[i]
291 memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
292 smallfree += uint64(mheap_.nsmallfree[i]) * uint64(class_to_size[i])
294 memstats.nfree += memstats.tinyallocs
295 memstats.nmalloc += memstats.nfree
297 // Calculate derived stats.
298 memstats.total_alloc = uint64(memstats.alloc) + uint64(mheap_.largefree) + smallfree
299 memstats.heap_alloc = memstats.alloc
300 memstats.heap_objects = memstats.nmalloc - memstats.nfree
319 func flushallmcaches() {
335 func purgecachedstats(c *mcache) {
336 // Protected by either heap or GC lock.
338 memstats.heap_live += uint64(c.local_cachealloc)
339 c.local_cachealloc = 0
343 memstats.heap_scan += uint64(c.local_scan)
345 memstats.tinyallocs += uint64(c.local_tinyallocs)
346 c.local_tinyallocs = 0
347 memstats.nlookup += uint64(c.local_nlookup)
349 h.largefree += uint64(c.local_largefree)
350 c.local_largefree = 0
351 h.nlargefree += uint64(c.local_nlargefree)
352 c.local_nlargefree = 0
353 for i := 0; i < len(c.local_nsmallfree); i++ {
354 h.nsmallfree[i] += uint64(c.local_nsmallfree[i])
355 c.local_nsmallfree[i] = 0
359 // Atomically increases a given *system* memory stat. We are counting on this
360 // stat never overflowing a uintptr, so this function must only be used for
361 // system memory stats.
363 // The current implementation for little endian architectures is based on
364 // xadduintptr(), which is less than ideal: xadd64() should really be used.
365 // Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
366 // doesn't use locks. (Locks are a problem as they require a valid G, which
367 // restricts their useability.)
369 // A side-effect of using xadduintptr() is that we need to check for
372 func mSysStatInc(sysStat *uint64, n uintptr) {
373 if sys.BigEndian != 0 {
374 atomic.Xadd64(sysStat, int64(n))
377 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
378 print("runtime: stat overflow: val ", val, ", n ", n, "\n")
383 // Atomically decreases a given *system* memory stat. Same comments as
384 // mSysStatInc apply.
386 func mSysStatDec(sysStat *uint64, n uintptr) {
387 if sys.BigEndian != 0 {
388 atomic.Xadd64(sysStat, -int64(n))
391 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
392 print("runtime: stat underflow: val ", val, ", n ", n, "\n")