1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Implementation of runtime/debug.WriteHeapDump. Writes all
6 // objects in the heap plus additional info (roots, threads,
7 // finalizers, etc.) to a file.
9 // The format of the dumped file is described at
10 // http://golang.org/s/go14heapdump.
32 tagQueuedFinalizer = 11
41 var dumpfd uintptr // fd to write the dump to.
44 // buffer of pending write data
52 func dwrite(data unsafe.Pointer, len uintptr) {
56 if nbuf+len <= bufSize {
57 copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
62 write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
64 write(dumpfd, data, int32(len))
67 copy(buf[:], (*[bufSize]byte)(data)[:len])
72 func dwritebyte(b byte) {
73 dwrite(unsafe.Pointer(&b), 1)
77 write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
81 // Cache of types that have been serialized already.
82 // We use a type's hash field to pick a bucket.
83 // Inside a bucket, we keep a list of types that
84 // have been serialized so far, most recently used first.
85 // Note: when a bucket overflows we may end up
86 // serializing a type more than once. That's ok.
88 typeCacheBuckets = 256
92 type typeCacheBucket struct {
93 t [typeCacheAssoc]*_type
96 var typecache [typeCacheBuckets]typeCacheBucket
98 // dump a uint64 in a varint format parseable by encoding/binary
99 func dumpint(v uint64) {
103 buf[n] = byte(v | 0x80)
109 dwrite(unsafe.Pointer(&buf), uintptr(n))
112 func dumpbool(b bool) {
120 // dump varint uint64 length followed by memory contents
121 func dumpmemrange(data unsafe.Pointer, len uintptr) {
126 func dumpslice(b []byte) {
127 dumpint(uint64(len(b)))
129 dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
133 func dumpstr(s string) {
134 sp := (*stringStruct)(unsafe.Pointer(&s))
135 dumpmemrange(sp.str, uintptr(sp.len))
138 // dump information for a type
139 func dumptype(t *_type) {
144 // If we've definitely serialized the type before,
145 // no need to do it again.
146 b := &typecache[t.hash&(typeCacheBuckets-1)]
150 for i := 1; i < typeCacheAssoc; i++ {
153 for j := i; j > 0; j-- {
161 // Might not have been dumped yet. Dump it and
162 // remember we did so.
163 for j := typeCacheAssoc - 1; j > 0; j-- {
170 dumpint(uint64(uintptr(unsafe.Pointer(t))))
171 dumpint(uint64(t.size))
172 if t.x == nil || t.x.pkgpath == nil || t.x.name == nil {
175 pkgpath := (*stringStruct)(unsafe.Pointer(&t.x.pkgpath))
176 name := (*stringStruct)(unsafe.Pointer(&t.x.name))
177 dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
178 dwrite(pkgpath.str, uintptr(pkgpath.len))
180 dwrite(name.str, uintptr(name.len))
182 dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
186 func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
187 dumpbvtypes(&bv, obj)
189 dumpint(uint64(uintptr(obj)))
190 dumpmemrange(obj, size)
194 func dumpotherroot(description string, to unsafe.Pointer) {
195 dumpint(tagOtherRoot)
197 dumpint(uint64(uintptr(to)))
200 func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
201 dumpint(tagFinalizer)
202 dumpint(uint64(uintptr(obj)))
203 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
204 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
205 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
206 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
209 type childInfo struct {
210 // Information passed up from the callee frame about
211 // the layout of the outargs region.
212 argoff uintptr // where the arguments start in the frame
213 arglen uintptr // size of args region
214 args bitvector // if args.n >= 0, pointer map of args region
215 sp *uint8 // callee sp
216 depth uintptr // depth in call stack (0 == most recent)
219 // dump kinds & offsets of interesting fields in bv
220 func dumpbv(cbv *bitvector, offset uintptr) {
222 for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
223 switch bv.bytedata[i/8] >> (i % 8) & 3 {
225 gothrow("unexpected pointer bits")
227 // BitsDead has already been processed in makeheapobjbv.
228 // We should only see it in stack maps, in which case we should continue processing.
232 dumpint(fieldKindPtr)
233 dumpint(uint64(offset + i/_BitsPerPointer*ptrSize))
238 func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
239 child := (*childInfo)(arg)
242 // Figure out what we can about our stack map
247 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc)
249 // We do not have a valid pcdata value but there might be a
250 // stackmap for this function. It is likely that we are looking
251 // at the function prologue, assume so and hope for the best.
254 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
256 // Dump any types we will need to resolve Efaces.
257 if child.args.n >= 0 {
258 dumpbvtypes(&child.args, unsafe.Pointer(s.sp+child.argoff))
261 if stkmap != nil && stkmap.n > 0 {
262 bv = stackmapdata(stkmap, pcdata)
263 dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n/_BitsPerPointer*ptrSize)))
268 // Dump main body of stack frame.
269 dumpint(tagStackFrame)
270 dumpint(uint64(s.sp)) // lowest address in frame
271 dumpint(uint64(child.depth)) // # of frames deep on the stack
272 dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
273 dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents
274 dumpint(uint64(f.entry))
275 dumpint(uint64(s.pc))
276 dumpint(uint64(s.continpc))
277 name := gofuncname(f)
279 name = "unknown function"
283 // Dump fields in the outargs section
284 if child.args.n >= 0 {
285 dumpbv(&child.args, child.argoff)
287 // conservative - everything might be a pointer
288 for off := child.argoff; off < child.argoff+child.arglen; off += ptrSize {
289 dumpint(fieldKindPtr)
294 // Dump fields in the local vars section
296 // No locals information, dump everything.
297 for off := child.arglen; off < s.varp-s.sp; off += ptrSize {
298 dumpint(fieldKindPtr)
301 } else if stkmap.n < 0 {
302 // Locals size information, dump just the locals.
303 size := uintptr(-stkmap.n)
304 for off := s.varp - size - s.sp; off < s.varp-s.sp; off += ptrSize {
305 dumpint(fieldKindPtr)
308 } else if stkmap.n > 0 {
309 // Locals bitmap information, scan just the pointers in
311 dumpbv(&bv, s.varp-uintptr(bv.n)/_BitsPerPointer*ptrSize-s.sp)
313 dumpint(fieldKindEol)
315 // Record arg info for parent.
316 child.argoff = s.argp - s.fp
317 child.arglen = s.arglen
318 child.sp = (*uint8)(unsafe.Pointer(s.sp))
320 stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
322 child.args = stackmapdata(stkmap, pcdata)
329 func dumpgoroutine(gp *g) {
330 var sp, pc, lr uintptr
331 if gp.syscallsp != 0 {
341 dumpint(tagGoroutine)
342 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
344 dumpint(uint64(gp.goid))
345 dumpint(uint64(gp.gopc))
346 dumpint(uint64(readgstatus(gp)))
347 dumpbool(gp.issystem)
348 dumpbool(false) // isbackground
349 dumpint(uint64(gp.waitsince))
350 dumpstr(gp.waitreason)
351 dumpint(uint64(uintptr(gp.sched.ctxt)))
352 dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
353 dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
354 dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
362 gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
364 // dump defer & panic records
365 for d := gp._defer; d != nil; d = d.link {
367 dumpint(uint64(uintptr(unsafe.Pointer(d))))
368 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
369 dumpint(uint64(d.argp))
370 dumpint(uint64(d.pc))
371 dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
372 dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
373 dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
375 for p := gp._panic; p != nil; p = p.link {
377 dumpint(uint64(uintptr(unsafe.Pointer(p))))
378 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
379 eface := (*eface)(unsafe.Pointer(&p.arg))
380 dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
381 dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
382 dumpint(0) // was p->defer, no longer recorded
383 dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
388 // goroutines & stacks
389 for i := 0; uintptr(i) < allglen; i++ {
391 status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
394 print("runtime: unexpected G.status ", hex(status), "\n")
395 gothrow("dumpgs in STW - bad status")
406 func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
407 dumpint(tagQueuedFinalizer)
408 dumpint(uint64(uintptr(obj)))
409 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
410 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
411 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
412 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
417 dumpbvtypes(&gcdatamask, unsafe.Pointer(&data))
419 dumpint(uint64(uintptr(unsafe.Pointer(&data))))
420 dumpmemrange(unsafe.Pointer(&data), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)))
421 dumpfields(gcdatamask)
424 dumpbvtypes(&gcbssmask, unsafe.Pointer(&bss))
426 dumpint(uint64(uintptr(unsafe.Pointer(&bss))))
427 dumpmemrange(unsafe.Pointer(&bss), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
428 dumpfields(gcbssmask)
431 allspans := h_allspans
432 for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
433 s := allspans[spanidx]
434 if s.state == _MSpanInUse {
436 for sp := s.specials; sp != nil; sp = sp.next {
437 if sp.kind != _KindSpecialFinalizer {
440 spf := (*specialfinalizer)(unsafe.Pointer(sp))
441 p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset))
442 dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
448 iterate_finq(finq_callback)
451 // Bit vector of free marks.
452 // Needs to be as big as the largest number of objects per span.
453 var freemark [_PageSize / 8]bool
456 for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
458 if s.state != _MSpanInUse {
461 p := uintptr(s.start << _PageShift)
463 n := (s.npages << _PageShift) / size
464 if n > uintptr(len(freemark)) {
465 gothrow("freemark array doesn't have enough entries")
467 for l := s.freelist; l.ptr() != nil; l = l.ptr().next {
468 freemark[(uintptr(l)-p)/size] = true
470 for j := uintptr(0); j < n; j, p = j+1, p+size {
475 dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
483 if *(*byte)(unsafe.Pointer(&x)) == 1 {
484 dumpbool(false) // little-endian ptrs
486 dumpbool(true) // big-endian ptrs
489 dumpint(uint64(mheap_.arena_start))
490 dumpint(uint64(mheap_.arena_used))
492 dumpstr(goexperiment)
493 dumpint(uint64(ncpu))
496 func itab_callback(tab *itab) {
498 // Dump a map from itab* to the type of its data field.
499 // We want this map so we can deduce types of interface referents.
500 if t.kind&kindDirectIface == 0 {
501 // indirect - data slot is a pointer to t.
504 dumpint(uint64(uintptr(unsafe.Pointer(tab))))
505 dumpint(uint64(uintptr(unsafe.Pointer(t.ptrto))))
506 } else if t.kind&kindNoPointers == 0 {
507 // t is pointer-like - data slot is a t.
510 dumpint(uint64(uintptr(unsafe.Pointer(tab))))
511 dumpint(uint64(uintptr(unsafe.Pointer(t))))
513 // Data slot is a scalar. Dump type just for fun.
514 // With pointer-only interfaces, this shouldn't happen.
517 dumpint(uint64(uintptr(unsafe.Pointer(tab))))
518 dumpint(uint64(uintptr(unsafe.Pointer(t))))
523 iterate_itabs(itab_callback)
527 for mp := allm; mp != nil; mp = mp.alllink {
529 dumpint(uint64(uintptr(unsafe.Pointer(mp))))
530 dumpint(uint64(mp.id))
535 func dumpmemstats() {
537 dumpint(memstats.alloc)
538 dumpint(memstats.total_alloc)
539 dumpint(memstats.sys)
540 dumpint(memstats.nlookup)
541 dumpint(memstats.nmalloc)
542 dumpint(memstats.nfree)
543 dumpint(memstats.heap_alloc)
544 dumpint(memstats.heap_sys)
545 dumpint(memstats.heap_idle)
546 dumpint(memstats.heap_inuse)
547 dumpint(memstats.heap_released)
548 dumpint(memstats.heap_objects)
549 dumpint(memstats.stacks_inuse)
550 dumpint(memstats.stacks_sys)
551 dumpint(memstats.mspan_inuse)
552 dumpint(memstats.mspan_sys)
553 dumpint(memstats.mcache_inuse)
554 dumpint(memstats.mcache_sys)
555 dumpint(memstats.buckhash_sys)
556 dumpint(memstats.gc_sys)
557 dumpint(memstats.other_sys)
558 dumpint(memstats.next_gc)
559 dumpint(memstats.last_gc)
560 dumpint(memstats.pause_total_ns)
561 for i := 0; i < 256; i++ {
562 dumpint(memstats.pause_ns[i])
564 dumpint(uint64(memstats.numgc))
567 func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
568 stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
570 dumpint(uint64(uintptr(unsafe.Pointer(b))))
571 dumpint(uint64(size))
572 dumpint(uint64(nstk))
573 for i := uintptr(0); i < nstk; i++ {
587 buf[n] = "0123456789abcdef"[pc&15]
601 dumpstr(gofuncname(f))
602 if i > 0 && pc > f.entry {
605 file, line := funcline(f, pc)
607 dumpint(uint64(line))
610 dumpint(uint64(allocs))
611 dumpint(uint64(frees))
615 iterate_memprof(dumpmemprof_callback)
616 allspans := h_allspans
617 for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
618 s := allspans[spanidx]
619 if s.state != _MSpanInUse {
622 for sp := s.specials; sp != nil; sp = sp.next {
623 if sp.kind != _KindSpecialProfile {
626 spp := (*specialprofile)(unsafe.Pointer(sp))
627 p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset)
628 dumpint(tagAllocSample)
630 dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
635 var dumphdr = []byte("go1.4 heap dump\n")
638 // make sure we're done sweeping
639 for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
641 if s.state == _MSpanInUse {
645 memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
646 dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
659 func writeheapdump_m(fd uintptr) {
661 casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
662 _g_.waitreason = "dumping heap"
664 // Update stats so we can dump them.
665 // As a side effect, flushes all the MCaches so the MSpan.freelist
666 // lists contain all the free objects.
672 // Call dump routine.
678 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
682 casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
685 // dumpint() the kind & offset of each field in an object.
686 func dumpfields(bv bitvector) {
688 dumpint(fieldKindEol)
691 // The heap dump reader needs to be able to disambiguate
692 // Eface entries. So it needs to know every type that might
693 // appear in such an entry. The following routine accomplishes that.
694 // TODO(rsc, khr): Delete - no longer possible.
696 // Dump all the types that appear in the type field of
697 // any Eface described by this bit vector.
698 func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
701 func makeheapobjbv(p uintptr, size uintptr) bitvector {
702 // Extend the temp buffer if necessary.
703 nptr := size / ptrSize
704 if uintptr(len(tmpbuf)) < nptr*_BitsPerPointer/8+1 {
706 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
708 n := nptr*_BitsPerPointer/8 + 1
709 p := sysAlloc(n, &memstats.other_sys)
711 gothrow("heapdump: out of memory")
713 tmpbuf = (*[1 << 30]byte)(p)[:n]
715 // Copy and compact the bitmap.
717 for i = 0; i < nptr; i++ {
718 off := (p + i*ptrSize - mheap_.arena_start) / ptrSize
719 bitp := (*uint8)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
720 shift := uint8((off % wordsPerBitmapByte) * gcBits)
721 bits := (*bitp >> (shift + 2)) & _BitsMask
722 if bits == _BitsDead {
723 break // end of heap object
725 tmpbuf[i*_BitsPerPointer/8] &^= (_BitsMask << ((i * _BitsPerPointer) % 8))
726 tmpbuf[i*_BitsPerPointer/8] |= bits << ((i * _BitsPerPointer) % 8)
728 return bitvector{int32(i * _BitsPerPointer), &tmpbuf[0]}