1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Implementation of runtime/debug.WriteHeapDump. Writes all
6 // objects in the heap plus additional info (roots, threads,
7 // finalizers, etc.) to a file.
9 // The format of the dumped file is described at
10 // https://golang.org/s/go15heapdump.
17 "internal/goexperiment"
21 //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
22 func runtime_debug_WriteHeapDump(fd uintptr) {
23 stopTheWorld(stwWriteHeapDump)
25 // Keep m on this G's stack instead of the system stack.
26 // Both readmemstats_m and writeheapdump_m have pretty large
27 // peak stack depths and we risk blowing the system stack.
28 // This is safe because the world is stopped, so we don't
29 // need to worry about anyone shrinking and therefore moving
33 // Call readmemstats_m here instead of deeper in
34 // writeheapdump_m because we might blow the system stack
37 writeheapdump_m(fd, &m)
59 tagQueuedFinalizer = 11
68 var dumpfd uintptr // fd to write the dump to.
71 // buffer of pending write data
79 func dwrite(data unsafe.Pointer, len uintptr) {
83 if nbuf+len <= bufSize {
84 copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
89 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
91 write(dumpfd, data, int32(len))
94 copy(buf[:], (*[bufSize]byte)(data)[:len])
99 func dwritebyte(b byte) {
100 dwrite(unsafe.Pointer(&b), 1)
104 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
108 // Cache of types that have been serialized already.
109 // We use a type's hash field to pick a bucket.
110 // Inside a bucket, we keep a list of types that
111 // have been serialized so far, most recently used first.
112 // Note: when a bucket overflows we may end up
113 // serializing a type more than once. That's ok.
115 typeCacheBuckets = 256
119 type typeCacheBucket struct {
120 t [typeCacheAssoc]*_type
123 var typecache [typeCacheBuckets]typeCacheBucket
125 // dump a uint64 in a varint format parseable by encoding/binary.
126 func dumpint(v uint64) {
130 buf[n] = byte(v | 0x80)
136 dwrite(unsafe.Pointer(&buf), uintptr(n))
139 func dumpbool(b bool) {
147 // dump varint uint64 length followed by memory contents.
148 func dumpmemrange(data unsafe.Pointer, len uintptr) {
153 func dumpslice(b []byte) {
154 dumpint(uint64(len(b)))
156 dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
160 func dumpstr(s string) {
161 dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
164 // dump information for a type.
165 func dumptype(t *_type) {
170 // If we've definitely serialized the type before,
171 // no need to do it again.
172 b := &typecache[t.Hash&(typeCacheBuckets-1)]
176 for i := 1; i < typeCacheAssoc; i++ {
179 for j := i; j > 0; j-- {
187 // Might not have been dumped yet. Dump it and
188 // remember we did so.
189 for j := typeCacheAssoc - 1; j > 0; j-- {
196 dumpint(uint64(uintptr(unsafe.Pointer(t))))
197 dumpint(uint64(t.Size_))
199 if x := t.Uncommon(); x == nil || rt.nameOff(x.PkgPath).Name() == "" {
202 pkgpath := rt.nameOff(x.PkgPath).Name()
204 dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name))))
205 dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath)))
207 dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
209 dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0)
213 func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
215 dumpint(uint64(uintptr(obj)))
216 dumpmemrange(obj, size)
220 func dumpotherroot(description string, to unsafe.Pointer) {
221 dumpint(tagOtherRoot)
223 dumpint(uint64(uintptr(to)))
226 func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
227 dumpint(tagFinalizer)
228 dumpint(uint64(uintptr(obj)))
229 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
230 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
231 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
232 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
235 type childInfo struct {
236 // Information passed up from the callee frame about
237 // the layout of the outargs region.
238 argoff uintptr // where the arguments start in the frame
239 arglen uintptr // size of args region
240 args bitvector // if args.n >= 0, pointer map of args region
241 sp *uint8 // callee sp
242 depth uintptr // depth in call stack (0 == most recent)
245 // dump kinds & offsets of interesting fields in bv.
246 func dumpbv(cbv *bitvector, offset uintptr) {
247 for i := uintptr(0); i < uintptr(cbv.n); i++ {
248 if cbv.ptrbit(i) == 1 {
249 dumpint(fieldKindPtr)
250 dumpint(uint64(offset + i*goarch.PtrSize))
255 func dumpframe(s *stkframe, child *childInfo) {
258 // Figure out what we can about our stack map
260 pcdata := int32(-1) // Use the entry map at function entry
263 pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc)
266 // We do not have a valid pcdata value but there might be a
267 // stackmap for this function. It is likely that we are looking
268 // at the function prologue, assume so and hope for the best.
271 stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))
274 if stkmap != nil && stkmap.n > 0 {
275 bv = stackmapdata(stkmap, pcdata)
280 // Dump main body of stack frame.
281 dumpint(tagStackFrame)
282 dumpint(uint64(s.sp)) // lowest address in frame
283 dumpint(uint64(child.depth)) // # of frames deep on the stack
284 dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
285 dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents
286 dumpint(uint64(f.entry()))
287 dumpint(uint64(s.pc))
288 dumpint(uint64(s.continpc))
291 name = "unknown function"
295 // Dump fields in the outargs section
296 if child.args.n >= 0 {
297 dumpbv(&child.args, child.argoff)
299 // conservative - everything might be a pointer
300 for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
301 dumpint(fieldKindPtr)
306 // Dump fields in the local vars section
308 // No locals information, dump everything.
309 for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
310 dumpint(fieldKindPtr)
313 } else if stkmap.n < 0 {
314 // Locals size information, dump just the locals.
315 size := uintptr(-stkmap.n)
316 for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
317 dumpint(fieldKindPtr)
320 } else if stkmap.n > 0 {
321 // Locals bitmap information, scan just the pointers in
323 dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
325 dumpint(fieldKindEol)
327 // Record arg info for parent.
328 child.argoff = s.argp - s.fp
329 child.arglen = s.argBytes()
330 child.sp = (*uint8)(unsafe.Pointer(s.sp))
332 stkmap = (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))
334 child.args = stackmapdata(stkmap, pcdata)
341 func dumpgoroutine(gp *g) {
342 var sp, pc, lr uintptr
343 if gp.syscallsp != 0 {
353 dumpint(tagGoroutine)
354 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
357 dumpint(uint64(gp.gopc))
358 dumpint(uint64(readgstatus(gp)))
359 dumpbool(isSystemGoroutine(gp, false))
360 dumpbool(false) // isbackground
361 dumpint(uint64(gp.waitsince))
362 dumpstr(gp.waitreason.String())
363 dumpint(uint64(uintptr(gp.sched.ctxt)))
364 dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
365 dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
366 dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
375 for u.initAt(pc, sp, lr, gp, 0); u.valid(); u.next() {
376 dumpframe(&u.frame, &child)
379 // dump defer & panic records
380 for d := gp._defer; d != nil; d = d.link {
382 dumpint(uint64(uintptr(unsafe.Pointer(d))))
383 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
384 dumpint(uint64(d.sp))
385 dumpint(uint64(d.pc))
386 fn := *(**funcval)(unsafe.Pointer(&d.fn))
387 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
389 // d.fn can be nil for open-coded defers
392 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
394 dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
396 for p := gp._panic; p != nil; p = p.link {
398 dumpint(uint64(uintptr(unsafe.Pointer(p))))
399 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
400 eface := efaceOf(&p.arg)
401 dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
402 dumpint(uint64(uintptr(eface.data)))
403 dumpint(0) // was p->defer, no longer recorded
404 dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
411 // goroutines & stacks
412 forEachG(func(gp *g) {
413 status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
416 print("runtime: unexpected G.status ", hex(status), "\n")
417 throw("dumpgs in STW - bad status")
428 func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
429 dumpint(tagQueuedFinalizer)
430 dumpint(uint64(uintptr(obj)))
431 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
432 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
433 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
434 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
438 // To protect mheap_.allspans.
441 // TODO(mwhudson): dump datamask etc from all objects
444 dumpint(uint64(firstmoduledata.data))
445 dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
446 dumpfields(firstmoduledata.gcdatamask)
450 dumpint(uint64(firstmoduledata.bss))
451 dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
452 dumpfields(firstmoduledata.gcbssmask)
455 for _, s := range mheap_.allspans {
456 if s.state.get() == mSpanInUse {
458 for sp := s.specials; sp != nil; sp = sp.next {
459 if sp.kind != _KindSpecialFinalizer {
462 spf := (*specialfinalizer)(unsafe.Pointer(sp))
463 p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
464 dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
470 iterate_finq(finq_callback)
473 // Bit vector of free marks.
474 // Needs to be as big as the largest number of objects per span.
475 var freemark [_PageSize / 8]bool
478 // To protect mheap_.allspans.
481 for _, s := range mheap_.allspans {
482 if s.state.get() != mSpanInUse {
487 n := (s.npages << _PageShift) / size
488 if n > uintptr(len(freemark)) {
489 throw("freemark array doesn't have enough entries")
492 for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ {
493 if s.isFree(uintptr(freeIndex)) {
494 freemark[freeIndex] = true
498 for j := uintptr(0); j < n; j, p = j+1, p+size {
503 dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
511 if *(*byte)(unsafe.Pointer(&x)) == 1 {
512 dumpbool(false) // little-endian ptrs
514 dumpbool(true) // big-endian ptrs
516 dumpint(goarch.PtrSize)
517 var arenaStart, arenaEnd uintptr
518 for i1 := range mheap_.arenas {
519 if mheap_.arenas[i1] == nil {
522 for i, ha := range mheap_.arenas[i1] {
526 base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
527 if arenaStart == 0 || base < arenaStart {
530 if base+heapArenaBytes > arenaEnd {
531 arenaEnd = base + heapArenaBytes
535 dumpint(uint64(arenaStart))
536 dumpint(uint64(arenaEnd))
537 dumpstr(goarch.GOARCH)
538 dumpstr(buildVersion)
539 dumpint(uint64(ncpu))
542 func itab_callback(tab *itab) {
546 dumpint(uint64(uintptr(unsafe.Pointer(tab))))
547 dumpint(uint64(uintptr(unsafe.Pointer(t))))
551 iterate_itabs(itab_callback)
555 for mp := allm; mp != nil; mp = mp.alllink {
557 dumpint(uint64(uintptr(unsafe.Pointer(mp))))
558 dumpint(uint64(mp.id))
564 func dumpmemstats(m *MemStats) {
567 // These ints should be identical to the exported
568 // MemStats structure and should be ordered the same
572 dumpint(m.TotalAlloc)
581 dumpint(m.HeapReleased)
582 dumpint(m.HeapObjects)
583 dumpint(m.StackInuse)
585 dumpint(m.MSpanInuse)
587 dumpint(m.MCacheInuse)
589 dumpint(m.BuckHashSys)
594 dumpint(m.PauseTotalNs)
595 for i := 0; i < 256; i++ {
596 dumpint(m.PauseNs[i])
598 dumpint(uint64(m.NumGC))
601 func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
602 stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
604 dumpint(uint64(uintptr(unsafe.Pointer(b))))
605 dumpint(uint64(size))
606 dumpint(uint64(nstk))
607 for i := uintptr(0); i < nstk; i++ {
621 buf[n] = "0123456789abcdef"[pc&15]
636 if i > 0 && pc > f.entry() {
639 file, line := funcline(f, pc)
641 dumpint(uint64(line))
644 dumpint(uint64(allocs))
645 dumpint(uint64(frees))
649 // To protect mheap_.allspans.
652 iterate_memprof(dumpmemprof_callback)
653 for _, s := range mheap_.allspans {
654 if s.state.get() != mSpanInUse {
657 for sp := s.specials; sp != nil; sp = sp.next {
658 if sp.kind != _KindSpecialProfile {
661 spp := (*specialprofile)(unsafe.Pointer(sp))
662 p := s.base() + uintptr(spp.special.offset)
663 dumpint(tagAllocSample)
665 dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
670 var dumphdr = []byte("go1.7 heap dump\n")
672 func mdump(m *MemStats) {
675 // make sure we're done sweeping
676 for _, s := range mheap_.allspans {
677 if s.state.get() == mSpanInUse {
681 memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
682 dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
695 func writeheapdump_m(fd uintptr, m *MemStats) {
699 casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
704 // Call dump routine.
710 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
714 casgstatus(gp.m.curg, _Gwaiting, _Grunning)
717 // dumpint() the kind & offset of each field in an object.
718 func dumpfields(bv bitvector) {
720 dumpint(fieldKindEol)
723 func makeheapobjbv(p uintptr, size uintptr) bitvector {
724 // Extend the temp buffer if necessary.
725 nptr := size / goarch.PtrSize
726 if uintptr(len(tmpbuf)) < nptr/8+1 {
728 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
731 p := sysAlloc(n, &memstats.other_sys)
733 throw("heapdump: out of memory")
735 tmpbuf = (*[1 << 30]byte)(p)[:n]
737 // Convert heap bitmap to pointer bitmap.
738 for i := uintptr(0); i < nptr/8+1; i++ {
741 if goexperiment.AllocHeaders {
743 tp := s.typePointersOf(p, size)
746 if tp, addr = tp.next(p + size); addr == 0 {
749 i := (addr - p) / goarch.PtrSize
750 tmpbuf[i/8] |= 1 << (i % 8)
753 hbits := heapBitsForAddr(p, size)
756 hbits, addr = hbits.next()
760 i := (addr - p) / goarch.PtrSize
761 tmpbuf[i/8] |= 1 << (i % 8)
764 return bitvector{int32(nptr), &tmpbuf[0]}