]> Cypherpunks.ru repositories - gostls13.git/commitdiff
[dev.cc] all: merge default (e4ab8f908aac) into dev.cc
authorRuss Cox <rsc@golang.org>
Thu, 20 Nov 2014 16:48:08 +0000 (11:48 -0500)
committerRuss Cox <rsc@golang.org>
Thu, 20 Nov 2014 16:48:08 +0000 (11:48 -0500)
TBR=austin
CC=golang-codereviews
https://golang.org/cl/179040044

1  2 
src/cmd/5g/reg.c
src/run.bash
src/runtime/chan.go
src/runtime/heapdump.go
src/runtime/malloc.go
src/runtime/mgc0.go
src/runtime/proc.go
src/runtime/race1.go
src/runtime/race_amd64.s
src/runtime/select.go
src/runtime/stubs.go

Simple merge
diff --cc src/run.bash
Simple merge
Simple merge
index c942e01639953256a8b3ceb62e2d3c695230113d,0000000000000000000000000000000000000000..0c1a60c8bb85608c7295c54b40a162f5de5a5c49
mode 100644,000000..100644
--- /dev/null
@@@ -1,729 -1,0 +1,729 @@@
- // http://code.google.com/p/go-wiki/wiki/heapdump14
 +// Copyright 2014 The Go Authors. All rights reserved.
 +// Use of this source code is governed by a BSD-style
 +// license that can be found in the LICENSE file.
 +
 +// Implementation of runtime/debug.WriteHeapDump.  Writes all
 +// objects in the heap plus additional info (roots, threads,
 +// finalizers, etc.) to a file.
 +
 +// The format of the dumped file is described at
++// http://golang.org/s/go14heapdump.
 +
 +package runtime
 +
 +import "unsafe"
 +
 +const (
 +      fieldKindEol       = 0
 +      fieldKindPtr       = 1
 +      fieldKindIface     = 2
 +      fieldKindEface     = 3
 +      tagEOF             = 0
 +      tagObject          = 1
 +      tagOtherRoot       = 2
 +      tagType            = 3
 +      tagGoroutine       = 4
 +      tagStackFrame      = 5
 +      tagParams          = 6
 +      tagFinalizer       = 7
 +      tagItab            = 8
 +      tagOSThread        = 9
 +      tagMemStats        = 10
 +      tagQueuedFinalizer = 11
 +      tagData            = 12
 +      tagBSS             = 13
 +      tagDefer           = 14
 +      tagPanic           = 15
 +      tagMemProf         = 16
 +      tagAllocSample     = 17
 +)
 +
 +var dumpfd uintptr // fd to write the dump to.
 +var tmpbuf []byte
 +
 +// buffer of pending write data
 +const (
 +      bufSize = 4096
 +)
 +
 +var buf [bufSize]byte
 +var nbuf uintptr
 +
 +func dwrite(data unsafe.Pointer, len uintptr) {
 +      if len == 0 {
 +              return
 +      }
 +      if nbuf+len <= bufSize {
 +              copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
 +              nbuf += len
 +              return
 +      }
 +
 +      write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
 +      if len >= bufSize {
 +              write(dumpfd, data, int32(len))
 +              nbuf = 0
 +      } else {
 +              copy(buf[:], (*[bufSize]byte)(data)[:len])
 +              nbuf = len
 +      }
 +}
 +
 +func dwritebyte(b byte) {
 +      dwrite(unsafe.Pointer(&b), 1)
 +}
 +
 +func flush() {
 +      write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
 +      nbuf = 0
 +}
 +
 +// Cache of types that have been serialized already.
 +// We use a type's hash field to pick a bucket.
 +// Inside a bucket, we keep a list of types that
 +// have been serialized so far, most recently used first.
 +// Note: when a bucket overflows we may end up
 +// serializing a type more than once.  That's ok.
 +const (
 +      typeCacheBuckets = 256
 +      typeCacheAssoc   = 4
 +)
 +
 +type typeCacheBucket struct {
 +      t [typeCacheAssoc]*_type
 +}
 +
 +var typecache [typeCacheBuckets]typeCacheBucket
 +
 +// dump a uint64 in a varint format parseable by encoding/binary
 +func dumpint(v uint64) {
 +      var buf [10]byte
 +      var n int
 +      for v >= 0x80 {
 +              buf[n] = byte(v | 0x80)
 +              n++
 +              v >>= 7
 +      }
 +      buf[n] = byte(v)
 +      n++
 +      dwrite(unsafe.Pointer(&buf), uintptr(n))
 +}
 +
 +func dumpbool(b bool) {
 +      if b {
 +              dumpint(1)
 +      } else {
 +              dumpint(0)
 +      }
 +}
 +
 +// dump varint uint64 length followed by memory contents
 +func dumpmemrange(data unsafe.Pointer, len uintptr) {
 +      dumpint(uint64(len))
 +      dwrite(data, len)
 +}
 +
 +func dumpslice(b []byte) {
 +      dumpint(uint64(len(b)))
 +      if len(b) > 0 {
 +              dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
 +      }
 +}
 +
 +func dumpstr(s string) {
 +      sp := (*stringStruct)(unsafe.Pointer(&s))
 +      dumpmemrange(sp.str, uintptr(sp.len))
 +}
 +
 +// dump information for a type
 +func dumptype(t *_type) {
 +      if t == nil {
 +              return
 +      }
 +
 +      // If we've definitely serialized the type before,
 +      // no need to do it again.
 +      b := &typecache[t.hash&(typeCacheBuckets-1)]
 +      if t == b.t[0] {
 +              return
 +      }
 +      for i := 1; i < typeCacheAssoc; i++ {
 +              if t == b.t[i] {
 +                      // Move-to-front
 +                      for j := i; j > 0; j-- {
 +                              b.t[j] = b.t[j-1]
 +                      }
 +                      b.t[0] = t
 +                      return
 +              }
 +      }
 +
 +      // Might not have been dumped yet.  Dump it and
 +      // remember we did so.
 +      for j := typeCacheAssoc - 1; j > 0; j-- {
 +              b.t[j] = b.t[j-1]
 +      }
 +      b.t[0] = t
 +
 +      // dump the type
 +      dumpint(tagType)
 +      dumpint(uint64(uintptr(unsafe.Pointer(t))))
 +      dumpint(uint64(t.size))
 +      if t.x == nil || t.x.pkgpath == nil || t.x.name == nil {
 +              dumpstr(*t._string)
 +      } else {
 +              pkgpath := (*stringStruct)(unsafe.Pointer(&t.x.pkgpath))
 +              name := (*stringStruct)(unsafe.Pointer(&t.x.name))
 +              dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
 +              dwrite(pkgpath.str, uintptr(pkgpath.len))
 +              dwritebyte('.')
 +              dwrite(name.str, uintptr(name.len))
 +      }
 +      dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
 +}
 +
 +// dump an object
 +func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
 +      dumpbvtypes(&bv, obj)
 +      dumpint(tagObject)
 +      dumpint(uint64(uintptr(obj)))
 +      dumpmemrange(obj, size)
 +      dumpfields(bv)
 +}
 +
 +func dumpotherroot(description string, to unsafe.Pointer) {
 +      dumpint(tagOtherRoot)
 +      dumpstr(description)
 +      dumpint(uint64(uintptr(to)))
 +}
 +
 +func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
 +      dumpint(tagFinalizer)
 +      dumpint(uint64(uintptr(obj)))
 +      dumpint(uint64(uintptr(unsafe.Pointer(fn))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(fint))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(ot))))
 +}
 +
 +type childInfo struct {
 +      // Information passed up from the callee frame about
 +      // the layout of the outargs region.
 +      argoff uintptr   // where the arguments start in the frame
 +      arglen uintptr   // size of args region
 +      args   bitvector // if args.n >= 0, pointer map of args region
 +      sp     *uint8    // callee sp
 +      depth  uintptr   // depth in call stack (0 == most recent)
 +}
 +
 +// dump kinds & offsets of interesting fields in bv
 +func dumpbv(cbv *bitvector, offset uintptr) {
 +      bv := gobv(*cbv)
 +      for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
 +              switch bv.bytedata[i/8] >> (i % 8) & 3 {
 +              default:
 +                      gothrow("unexpected pointer bits")
 +              case _BitsDead:
 +                      // BitsDead has already been processed in makeheapobjbv.
 +                      // We should only see it in stack maps, in which case we should continue processing.
 +              case _BitsScalar:
 +                      // ok
 +              case _BitsPointer:
 +                      dumpint(fieldKindPtr)
 +                      dumpint(uint64(offset + i/_BitsPerPointer*ptrSize))
 +              }
 +      }
 +}
 +
 +func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
 +      child := (*childInfo)(arg)
 +      f := s.fn
 +
 +      // Figure out what we can about our stack map
 +      pc := s.pc
 +      if pc != f.entry {
 +              pc--
 +      }
 +      pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc)
 +      if pcdata == -1 {
 +              // We do not have a valid pcdata value but there might be a
 +              // stackmap for this function.  It is likely that we are looking
 +              // at the function prologue, assume so and hope for the best.
 +              pcdata = 0
 +      }
 +      stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
 +
 +      // Dump any types we will need to resolve Efaces.
 +      if child.args.n >= 0 {
 +              dumpbvtypes(&child.args, unsafe.Pointer(s.sp+child.argoff))
 +      }
 +      var bv bitvector
 +      if stkmap != nil && stkmap.n > 0 {
 +              bv = stackmapdata(stkmap, pcdata)
 +              dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n/_BitsPerPointer*ptrSize)))
 +      } else {
 +              bv.n = -1
 +      }
 +
 +      // Dump main body of stack frame.
 +      dumpint(tagStackFrame)
 +      dumpint(uint64(s.sp))                              // lowest address in frame
 +      dumpint(uint64(child.depth))                       // # of frames deep on the stack
 +      dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
 +      dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
 +      dumpint(uint64(f.entry))
 +      dumpint(uint64(s.pc))
 +      dumpint(uint64(s.continpc))
 +      name := gofuncname(f)
 +      if name == "" {
 +              name = "unknown function"
 +      }
 +      dumpstr(name)
 +
 +      // Dump fields in the outargs section
 +      if child.args.n >= 0 {
 +              dumpbv(&child.args, child.argoff)
 +      } else {
 +              // conservative - everything might be a pointer
 +              for off := child.argoff; off < child.argoff+child.arglen; off += ptrSize {
 +                      dumpint(fieldKindPtr)
 +                      dumpint(uint64(off))
 +              }
 +      }
 +
 +      // Dump fields in the local vars section
 +      if stkmap == nil {
 +              // No locals information, dump everything.
 +              for off := child.arglen; off < s.varp-s.sp; off += ptrSize {
 +                      dumpint(fieldKindPtr)
 +                      dumpint(uint64(off))
 +              }
 +      } else if stkmap.n < 0 {
 +              // Locals size information, dump just the locals.
 +              size := uintptr(-stkmap.n)
 +              for off := s.varp - size - s.sp; off < s.varp-s.sp; off += ptrSize {
 +                      dumpint(fieldKindPtr)
 +                      dumpint(uint64(off))
 +              }
 +      } else if stkmap.n > 0 {
 +              // Locals bitmap information, scan just the pointers in
 +              // locals.
 +              dumpbv(&bv, s.varp-uintptr(bv.n)/_BitsPerPointer*ptrSize-s.sp)
 +      }
 +      dumpint(fieldKindEol)
 +
 +      // Record arg info for parent.
 +      child.argoff = s.argp - s.fp
 +      child.arglen = s.arglen
 +      child.sp = (*uint8)(unsafe.Pointer(s.sp))
 +      child.depth++
 +      stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
 +      if stkmap != nil {
 +              child.args = stackmapdata(stkmap, pcdata)
 +      } else {
 +              child.args.n = -1
 +      }
 +      return true
 +}
 +
 +func dumpgoroutine(gp *g) {
 +      var sp, pc, lr uintptr
 +      if gp.syscallsp != 0 {
 +              sp = gp.syscallsp
 +              pc = gp.syscallpc
 +              lr = 0
 +      } else {
 +              sp = gp.sched.sp
 +              pc = gp.sched.pc
 +              lr = gp.sched.lr
 +      }
 +
 +      dumpint(tagGoroutine)
 +      dumpint(uint64(uintptr(unsafe.Pointer(gp))))
 +      dumpint(uint64(sp))
 +      dumpint(uint64(gp.goid))
 +      dumpint(uint64(gp.gopc))
 +      dumpint(uint64(readgstatus(gp)))
 +      dumpbool(gp.issystem)
 +      dumpbool(false) // isbackground
 +      dumpint(uint64(gp.waitsince))
 +      dumpstr(gp.waitreason)
 +      dumpint(uint64(uintptr(gp.sched.ctxt)))
 +      dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
 +
 +      // dump stack
 +      var child childInfo
 +      child.args.n = -1
 +      child.arglen = 0
 +      child.sp = nil
 +      child.depth = 0
 +      gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
 +
 +      // dump defer & panic records
 +      for d := gp._defer; d != nil; d = d.link {
 +              dumpint(tagDefer)
 +              dumpint(uint64(uintptr(unsafe.Pointer(d))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(gp))))
 +              dumpint(uint64(d.argp))
 +              dumpint(uint64(d.pc))
 +              dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
 +      }
 +      for p := gp._panic; p != nil; p = p.link {
 +              dumpint(tagPanic)
 +              dumpint(uint64(uintptr(unsafe.Pointer(p))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(gp))))
 +              eface := (*eface)(unsafe.Pointer(&p.arg))
 +              dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
 +              dumpint(0) // was p->defer, no longer recorded
 +              dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
 +      }
 +}
 +
 +func dumpgs() {
 +      // goroutines & stacks
 +      for i := 0; uintptr(i) < allglen; i++ {
 +              gp := allgs[i]
 +              status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
 +              switch status {
 +              default:
 +                      print("runtime: unexpected G.status ", hex(status), "\n")
 +                      gothrow("dumpgs in STW - bad status")
 +              case _Gdead:
 +                      // ok
 +              case _Grunnable,
 +                      _Gsyscall,
 +                      _Gwaiting:
 +                      dumpgoroutine(gp)
 +              }
 +      }
 +}
 +
 +func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
 +      dumpint(tagQueuedFinalizer)
 +      dumpint(uint64(uintptr(obj)))
 +      dumpint(uint64(uintptr(unsafe.Pointer(fn))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(fint))))
 +      dumpint(uint64(uintptr(unsafe.Pointer(ot))))
 +}
 +
 +func dumproots() {
 +      // data segment
 +      dumpbvtypes(&gcdatamask, unsafe.Pointer(&data))
 +      dumpint(tagData)
 +      dumpint(uint64(uintptr(unsafe.Pointer(&data))))
 +      dumpmemrange(unsafe.Pointer(&data), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)))
 +      dumpfields(gcdatamask)
 +
 +      // bss segment
 +      dumpbvtypes(&gcbssmask, unsafe.Pointer(&bss))
 +      dumpint(tagBSS)
 +      dumpint(uint64(uintptr(unsafe.Pointer(&bss))))
 +      dumpmemrange(unsafe.Pointer(&bss), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
 +      dumpfields(gcbssmask)
 +
 +      // MSpan.types
 +      allspans := h_allspans
 +      for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
 +              s := allspans[spanidx]
 +              if s.state == _MSpanInUse {
 +                      // Finalizers
 +                      for sp := s.specials; sp != nil; sp = sp.next {
 +                              if sp.kind != _KindSpecialFinalizer {
 +                                      continue
 +                              }
 +                              spf := (*specialfinalizer)(unsafe.Pointer(sp))
 +                              p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset))
 +                              dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
 +                      }
 +              }
 +      }
 +
 +      // Finalizer queue
 +      iterate_finq(finq_callback)
 +}
 +
 +// Bit vector of free marks.
 +// Needs to be as big as the largest number of objects per span.
 +var freemark [_PageSize / 8]bool
 +
 +func dumpobjs() {
 +      for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
 +              s := h_allspans[i]
 +              if s.state != _MSpanInUse {
 +                      continue
 +              }
 +              p := uintptr(s.start << _PageShift)
 +              size := s.elemsize
 +              n := (s.npages << _PageShift) / size
 +              if n > uintptr(len(freemark)) {
 +                      gothrow("freemark array doesn't have enough entries")
 +              }
 +              for l := s.freelist; l != nil; l = l.next {
 +                      freemark[(uintptr(unsafe.Pointer(l))-p)/size] = true
 +              }
 +              for j := uintptr(0); j < n; j, p = j+1, p+size {
 +                      if freemark[j] {
 +                              freemark[j] = false
 +                              continue
 +                      }
 +                      dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
 +              }
 +      }
 +}
 +
 +func dumpparams() {
 +      dumpint(tagParams)
 +      x := uintptr(1)
 +      if *(*byte)(unsafe.Pointer(&x)) == 1 {
 +              dumpbool(false) // little-endian ptrs
 +      } else {
 +              dumpbool(true) // big-endian ptrs
 +      }
 +      dumpint(ptrSize)
 +      dumpint(uint64(mheap_.arena_start))
 +      dumpint(uint64(mheap_.arena_used))
 +      dumpint(thechar)
 +      dumpstr(goexperiment)
 +      dumpint(uint64(ncpu))
 +}
 +
 +func itab_callback(tab *itab) {
 +      t := tab._type
 +      // Dump a map from itab* to the type of its data field.
 +      // We want this map so we can deduce types of interface referents.
 +      if t.kind&kindDirectIface == 0 {
 +              // indirect - data slot is a pointer to t.
 +              dumptype(t.ptrto)
 +              dumpint(tagItab)
 +              dumpint(uint64(uintptr(unsafe.Pointer(tab))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(t.ptrto))))
 +      } else if t.kind&kindNoPointers == 0 {
 +              // t is pointer-like - data slot is a t.
 +              dumptype(t)
 +              dumpint(tagItab)
 +              dumpint(uint64(uintptr(unsafe.Pointer(tab))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(t))))
 +      } else {
 +              // Data slot is a scalar.  Dump type just for fun.
 +              // With pointer-only interfaces, this shouldn't happen.
 +              dumptype(t)
 +              dumpint(tagItab)
 +              dumpint(uint64(uintptr(unsafe.Pointer(tab))))
 +              dumpint(uint64(uintptr(unsafe.Pointer(t))))
 +      }
 +}
 +
 +func dumpitabs() {
 +      iterate_itabs(itab_callback)
 +}
 +
 +func dumpms() {
 +      for mp := allm; mp != nil; mp = mp.alllink {
 +              dumpint(tagOSThread)
 +              dumpint(uint64(uintptr(unsafe.Pointer(mp))))
 +              dumpint(uint64(mp.id))
 +              dumpint(mp.procid)
 +      }
 +}
 +
 +func dumpmemstats() {
 +      dumpint(tagMemStats)
 +      dumpint(memstats.alloc)
 +      dumpint(memstats.total_alloc)
 +      dumpint(memstats.sys)
 +      dumpint(memstats.nlookup)
 +      dumpint(memstats.nmalloc)
 +      dumpint(memstats.nfree)
 +      dumpint(memstats.heap_alloc)
 +      dumpint(memstats.heap_sys)
 +      dumpint(memstats.heap_idle)
 +      dumpint(memstats.heap_inuse)
 +      dumpint(memstats.heap_released)
 +      dumpint(memstats.heap_objects)
 +      dumpint(memstats.stacks_inuse)
 +      dumpint(memstats.stacks_sys)
 +      dumpint(memstats.mspan_inuse)
 +      dumpint(memstats.mspan_sys)
 +      dumpint(memstats.mcache_inuse)
 +      dumpint(memstats.mcache_sys)
 +      dumpint(memstats.buckhash_sys)
 +      dumpint(memstats.gc_sys)
 +      dumpint(memstats.other_sys)
 +      dumpint(memstats.next_gc)
 +      dumpint(memstats.last_gc)
 +      dumpint(memstats.pause_total_ns)
 +      for i := 0; i < 256; i++ {
 +              dumpint(memstats.pause_ns[i])
 +      }
 +      dumpint(uint64(memstats.numgc))
 +}
 +
 +func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
 +      stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
 +      dumpint(tagMemProf)
 +      dumpint(uint64(uintptr(unsafe.Pointer(b))))
 +      dumpint(uint64(size))
 +      dumpint(uint64(nstk))
 +      for i := uintptr(0); i < nstk; i++ {
 +              pc := stk[i]
 +              f := findfunc(pc)
 +              if f == nil {
 +                      var buf [64]byte
 +                      n := len(buf)
 +                      n--
 +                      buf[n] = ')'
 +                      if pc == 0 {
 +                              n--
 +                              buf[n] = '0'
 +                      } else {
 +                              for pc > 0 {
 +                                      n--
 +                                      buf[n] = "0123456789abcdef"[pc&15]
 +                                      pc >>= 4
 +                              }
 +                      }
 +                      n--
 +                      buf[n] = 'x'
 +                      n--
 +                      buf[n] = '0'
 +                      n--
 +                      buf[n] = '('
 +                      dumpslice(buf[n:])
 +                      dumpstr("?")
 +                      dumpint(0)
 +              } else {
 +                      dumpstr(gofuncname(f))
 +                      if i > 0 && pc > f.entry {
 +                              pc--
 +                      }
 +                      file, line := funcline(f, pc)
 +                      dumpstr(file)
 +                      dumpint(uint64(line))
 +              }
 +      }
 +      dumpint(uint64(allocs))
 +      dumpint(uint64(frees))
 +}
 +
 +func dumpmemprof() {
 +      iterate_memprof(dumpmemprof_callback)
 +      allspans := h_allspans
 +      for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
 +              s := allspans[spanidx]
 +              if s.state != _MSpanInUse {
 +                      continue
 +              }
 +              for sp := s.specials; sp != nil; sp = sp.next {
 +                      if sp.kind != _KindSpecialProfile {
 +                              continue
 +                      }
 +                      spp := (*specialprofile)(unsafe.Pointer(sp))
 +                      p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset)
 +                      dumpint(tagAllocSample)
 +                      dumpint(uint64(p))
 +                      dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
 +              }
 +      }
 +}
 +
 +var dumphdr = []byte("go1.4 heap dump\n")
 +
 +func mdump() {
 +      // make sure we're done sweeping
 +      for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
 +              s := h_allspans[i]
 +              if s.state == _MSpanInUse {
 +                      mSpan_EnsureSwept(s)
 +              }
 +      }
 +      memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
 +      dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
 +      dumpparams()
 +      dumpitabs()
 +      dumpobjs()
 +      dumpgs()
 +      dumpms()
 +      dumproots()
 +      dumpmemstats()
 +      dumpmemprof()
 +      dumpint(tagEOF)
 +      flush()
 +}
 +
 +func writeheapdump_m(fd uintptr) {
 +      _g_ := getg()
 +      casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
 +      _g_.waitreason = "dumping heap"
 +
 +      // Update stats so we can dump them.
 +      // As a side effect, flushes all the MCaches so the MSpan.freelist
 +      // lists contain all the free objects.
 +      updatememstats(nil)
 +
 +      // Set dump file.
 +      dumpfd = fd
 +
 +      // Call dump routine.
 +      mdump()
 +
 +      // Reset dump file.
 +      dumpfd = 0
 +      if tmpbuf != nil {
 +              sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
 +              tmpbuf = nil
 +      }
 +
 +      casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
 +}
 +
 +// dumpint() the kind & offset of each field in an object.
 +func dumpfields(bv bitvector) {
 +      dumpbv(&bv, 0)
 +      dumpint(fieldKindEol)
 +}
 +
 +// The heap dump reader needs to be able to disambiguate
 +// Eface entries.  So it needs to know every type that might
 +// appear in such an entry.  The following routine accomplishes that.
 +// TODO(rsc, khr): Delete - no longer possible.
 +
 +// Dump all the types that appear in the type field of
 +// any Eface described by this bit vector.
 +func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
 +}
 +
 +func makeheapobjbv(p uintptr, size uintptr) bitvector {
 +      // Extend the temp buffer if necessary.
 +      nptr := size / ptrSize
 +      if uintptr(len(tmpbuf)) < nptr*_BitsPerPointer/8+1 {
 +              if tmpbuf != nil {
 +                      sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
 +              }
 +              n := nptr*_BitsPerPointer/8 + 1
 +              p := sysAlloc(n, &memstats.other_sys)
 +              if p == nil {
 +                      gothrow("heapdump: out of memory")
 +              }
 +              tmpbuf = (*[1 << 30]byte)(p)[:n]
 +      }
 +      // Copy and compact the bitmap.
 +      var i uintptr
 +      for i = 0; i < nptr; i++ {
 +              off := (p + i*ptrSize - mheap_.arena_start) / ptrSize
 +              bitp := (*uint8)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
 +              shift := uint8((off % wordsPerBitmapByte) * gcBits)
 +              bits := (*bitp >> (shift + 2)) & _BitsMask
 +              if bits == _BitsDead {
 +                      break // end of heap object
 +              }
 +              tmpbuf[i*_BitsPerPointer/8] &^= (_BitsMask << ((i * _BitsPerPointer) % 8))
 +              tmpbuf[i*_BitsPerPointer/8] |= bits << ((i * _BitsPerPointer) % 8)
 +      }
 +      return bitvector{int32(i * _BitsPerPointer), &tmpbuf[0]}
 +}
Simple merge
Simple merge
Simple merge
index 4c580429c8690dd478783f8a837617b5259cdf09,0000000000000000000000000000000000000000..2ec2bee65b6e9fc9d8042765b67c92e09d93383a
mode 100644,000000..100644
--- /dev/null
@@@ -1,304 -1,0 +1,335 @@@
-               uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(addr) && uintptr(addr) < uintptr(unsafe.Pointer(&enoptrbss))
 +// Copyright 2011 The Go Authors. All rights reserved.
 +// Use of this source code is governed by a BSD-style
 +// license that can be found in the LICENSE file.
 +
 +// Implementation of the race detector API.
 +// +build race
 +
 +package runtime
 +
 +import "unsafe"
 +
 +// Race runtime functions called via runtime·racecall.
 +//go:linkname __tsan_init __tsan_init
 +var __tsan_init byte
 +
 +//go:linkname __tsan_fini __tsan_fini
 +var __tsan_fini byte
 +
 +//go:linkname __tsan_map_shadow __tsan_map_shadow
 +var __tsan_map_shadow byte
 +
 +//go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
 +var __tsan_finalizer_goroutine byte
 +
 +//go:linkname __tsan_go_start __tsan_go_start
 +var __tsan_go_start byte
 +
 +//go:linkname __tsan_go_end __tsan_go_end
 +var __tsan_go_end byte
 +
 +//go:linkname __tsan_malloc __tsan_malloc
 +var __tsan_malloc byte
 +
 +//go:linkname __tsan_acquire __tsan_acquire
 +var __tsan_acquire byte
 +
 +//go:linkname __tsan_release __tsan_release
 +var __tsan_release byte
 +
 +//go:linkname __tsan_release_merge __tsan_release_merge
 +var __tsan_release_merge byte
 +
 +//go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
 +var __tsan_go_ignore_sync_begin byte
 +
 +//go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
 +var __tsan_go_ignore_sync_end byte
 +
 +// Mimic what cmd/cgo would do.
 +//go:cgo_import_static __tsan_init
 +//go:cgo_import_static __tsan_fini
 +//go:cgo_import_static __tsan_map_shadow
 +//go:cgo_import_static __tsan_finalizer_goroutine
 +//go:cgo_import_static __tsan_go_start
 +//go:cgo_import_static __tsan_go_end
 +//go:cgo_import_static __tsan_malloc
 +//go:cgo_import_static __tsan_acquire
 +//go:cgo_import_static __tsan_release
 +//go:cgo_import_static __tsan_release_merge
 +//go:cgo_import_static __tsan_go_ignore_sync_begin
 +//go:cgo_import_static __tsan_go_ignore_sync_end
 +
 +// These are called from race_amd64.s.
 +//go:cgo_import_static __tsan_read
 +//go:cgo_import_static __tsan_read_pc
 +//go:cgo_import_static __tsan_read_range
 +//go:cgo_import_static __tsan_write
 +//go:cgo_import_static __tsan_write_pc
 +//go:cgo_import_static __tsan_write_range
 +//go:cgo_import_static __tsan_func_enter
 +//go:cgo_import_static __tsan_func_exit
 +
 +//go:cgo_import_static __tsan_go_atomic32_load
 +//go:cgo_import_static __tsan_go_atomic64_load
 +//go:cgo_import_static __tsan_go_atomic32_store
 +//go:cgo_import_static __tsan_go_atomic64_store
 +//go:cgo_import_static __tsan_go_atomic32_exchange
 +//go:cgo_import_static __tsan_go_atomic64_exchange
 +//go:cgo_import_static __tsan_go_atomic32_fetch_add
 +//go:cgo_import_static __tsan_go_atomic64_fetch_add
 +//go:cgo_import_static __tsan_go_atomic32_compare_exchange
 +//go:cgo_import_static __tsan_go_atomic64_compare_exchange
 +
++// start/end of global data (data+bss).
++var racedatastart uintptr
++var racedataend uintptr
++
 +// start/end of heap for race_amd64.s
 +var racearenastart uintptr
 +var racearenaend uintptr
 +
 +func racefuncenter(uintptr)
 +func racefuncexit()
 +func racereadrangepc1(uintptr, uintptr, uintptr)
 +func racewriterangepc1(uintptr, uintptr, uintptr)
 +func racesymbolizethunk(uintptr)
 +
 +// racecall allows calling an arbitrary function f from C race runtime
 +// with up to 4 uintptr arguments.
 +func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
 +
 +// checks if the address has shadow (i.e. heap or data/bss)
 +//go:nosplit
 +func isvalidaddr(addr unsafe.Pointer) bool {
 +      return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
-       start := uintptr(unsafe.Pointer(&noptrdata)) &^ (_PageSize - 1)
-       size := round(uintptr(unsafe.Pointer(&enoptrbss))-start, _PageSize)
++              racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
 +}
 +
 +//go:nosplit
 +func raceinit() uintptr {
 +      // cgo is required to initialize libc, which is used by race runtime
 +      if !iscgo {
 +              gothrow("raceinit: race build must use cgo")
 +      }
 +
 +      var racectx uintptr
 +      racecall(&__tsan_init, uintptr(unsafe.Pointer(&racectx)), funcPC(racesymbolizethunk), 0, 0)
 +
 +      // Round data segment to page boundaries, because it's used in mmap().
++      start := ^uintptr(0)
++      end := uintptr(0)
++      if start > uintptr(unsafe.Pointer(&noptrdata)) {
++              start = uintptr(unsafe.Pointer(&noptrdata))
++      }
++      if start > uintptr(unsafe.Pointer(&data)) {
++              start = uintptr(unsafe.Pointer(&data))
++      }
++      if start > uintptr(unsafe.Pointer(&noptrbss)) {
++              start = uintptr(unsafe.Pointer(&noptrbss))
++      }
++      if start > uintptr(unsafe.Pointer(&bss)) {
++              start = uintptr(unsafe.Pointer(&bss))
++      }
++      if end < uintptr(unsafe.Pointer(&enoptrdata)) {
++              end = uintptr(unsafe.Pointer(&enoptrdata))
++      }
++      if end < uintptr(unsafe.Pointer(&edata)) {
++              end = uintptr(unsafe.Pointer(&edata))
++      }
++      if end < uintptr(unsafe.Pointer(&enoptrbss)) {
++              end = uintptr(unsafe.Pointer(&enoptrbss))
++      }
++      if end < uintptr(unsafe.Pointer(&ebss)) {
++              end = uintptr(unsafe.Pointer(&ebss))
++      }
++      size := round(end-start, _PageSize)
 +      racecall(&__tsan_map_shadow, start, size, 0, 0)
++      racedatastart = start
++      racedataend = start + size
 +
 +      return racectx
 +}
 +
 +//go:nosplit
 +func racefini() {
 +      racecall(&__tsan_fini, 0, 0, 0, 0)
 +}
 +
 +//go:nosplit
 +func racemapshadow(addr unsafe.Pointer, size uintptr) {
 +      if racearenastart == 0 {
 +              racearenastart = uintptr(addr)
 +      }
 +      if racearenaend < uintptr(addr)+size {
 +              racearenaend = uintptr(addr) + size
 +      }
 +      racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
 +}
 +
 +//go:nosplit
 +func racemalloc(p unsafe.Pointer, sz uintptr) {
 +      racecall(&__tsan_malloc, uintptr(p), sz, 0, 0)
 +}
 +
 +//go:nosplit
 +func racegostart(pc uintptr) uintptr {
 +      _g_ := getg()
 +      var spawng *g
 +      if _g_.m.curg != nil {
 +              spawng = _g_.m.curg
 +      } else {
 +              spawng = _g_
 +      }
 +
 +      var racectx uintptr
 +      racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
 +      return racectx
 +}
 +
 +//go:nosplit
 +func racegoend() {
 +      racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
 +}
 +
 +//go:nosplit
 +func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
 +      _g_ := getg()
 +      if _g_ != _g_.m.curg {
 +              // The call is coming from manual instrumentation of Go code running on g0/gsignal.
 +              // Not interesting.
 +              return
 +      }
 +      if callpc != 0 {
 +              racefuncenter(callpc)
 +      }
 +      racewriterangepc1(uintptr(addr), sz, pc)
 +      if callpc != 0 {
 +              racefuncexit()
 +      }
 +}
 +
 +//go:nosplit
 +func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
 +      _g_ := getg()
 +      if _g_ != _g_.m.curg {
 +              // The call is coming from manual instrumentation of Go code running on g0/gsignal.
 +              // Not interesting.
 +              return
 +      }
 +      if callpc != 0 {
 +              racefuncenter(callpc)
 +      }
 +      racereadrangepc1(uintptr(addr), sz, pc)
 +      if callpc != 0 {
 +              racefuncexit()
 +      }
 +}
 +
 +//go:nosplit
 +func racewriteobjectpc(addr unsafe.Pointer, t *_type, callpc, pc uintptr) {
 +      kind := t.kind & _KindMask
 +      if kind == _KindArray || kind == _KindStruct {
 +              racewriterangepc(addr, t.size, callpc, pc)
 +      } else {
 +              racewritepc(addr, callpc, pc)
 +      }
 +}
 +
 +//go:nosplit
 +func racereadobjectpc(addr unsafe.Pointer, t *_type, callpc, pc uintptr) {
 +      kind := t.kind & _KindMask
 +      if kind == _KindArray || kind == _KindStruct {
 +              racereadrangepc(addr, t.size, callpc, pc)
 +      } else {
 +              racereadpc(addr, callpc, pc)
 +      }
 +}
 +
 +//go:nosplit
 +func raceacquire(addr unsafe.Pointer) {
 +      raceacquireg(getg(), addr)
 +}
 +
 +//go:nosplit
 +func raceacquireg(gp *g, addr unsafe.Pointer) {
 +      if getg().raceignore != 0 || !isvalidaddr(addr) {
 +              return
 +      }
 +      racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
 +}
 +
 +//go:nosplit
 +func racerelease(addr unsafe.Pointer) {
 +      _g_ := getg()
 +      if _g_.raceignore != 0 || !isvalidaddr(addr) {
 +              return
 +      }
 +      racereleaseg(_g_, addr)
 +}
 +
 +//go:nosplit
 +func racereleaseg(gp *g, addr unsafe.Pointer) {
 +      if getg().raceignore != 0 || !isvalidaddr(addr) {
 +              return
 +      }
 +      racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
 +}
 +
 +//go:nosplit
 +func racereleasemerge(addr unsafe.Pointer) {
 +      racereleasemergeg(getg(), addr)
 +}
 +
 +//go:nosplit
 +func racereleasemergeg(gp *g, addr unsafe.Pointer) {
 +      if getg().raceignore != 0 || !isvalidaddr(addr) {
 +              return
 +      }
 +      racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
 +}
 +
 +//go:nosplit
 +func racefingo() {
 +      racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
 +}
 +
 +//go:nosplit
 +
 +func RaceAcquire(addr unsafe.Pointer) {
 +      raceacquire(addr)
 +}
 +
 +//go:nosplit
 +
 +func RaceRelease(addr unsafe.Pointer) {
 +      racerelease(addr)
 +}
 +
 +//go:nosplit
 +
 +func RaceReleaseMerge(addr unsafe.Pointer) {
 +      racereleasemerge(addr)
 +}
 +
 +//go:nosplit
 +
 +// RaceEnable re-enables handling of race events in the current goroutine.
 +func RaceDisable() {
 +      _g_ := getg()
 +      if _g_.raceignore == 0 {
 +              racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
 +      }
 +      _g_.raceignore++
 +}
 +
 +//go:nosplit
 +
 +// RaceDisable disables handling of race events in the current goroutine.
 +func RaceEnable() {
 +      _g_ := getg()
 +      _g_.raceignore--
 +      if _g_.raceignore == 0 {
 +              racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
 +      }
 +}
index a7f44870a8671e908efe3d6497ad7b5639a5ad86,a96d9de123fab8bf9a9ffa3f30d79b29e310df53..d54d9798f032ccc6c2f8043e3ab9c6d87f008a07
@@@ -139,22 -138,20 +139,20 @@@ TEXT    racecalladdr<>(SB), NOSPLIT, $0-
        get_tls(R12)
        MOVQ    g(R12), R14
        MOVQ    g_racectx(R14), RARG0   // goroutine context
-       // Check that addr is within [arenastart, arenaend) or within [noptrdata, enoptrbss).
+       // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
        CMPQ    RARG1, runtime·racearenastart(SB)
 -      JB      racecalladdr_data
 +      JB      data
        CMPQ    RARG1, runtime·racearenaend(SB)
 -      JB      racecalladdr_call
 -racecalladdr_data:
 +      JB      call
 +data:
-       MOVQ    $runtime·noptrdata(SB), R13
-       CMPQ    RARG1, R13
+       CMPQ    RARG1, runtime·racedatastart(SB)
 -      JB      racecalladdr_ret
 +      JB      ret
-       MOVQ    $runtime·enoptrbss(SB), R13
-       CMPQ    RARG1, R13
+       CMPQ    RARG1, runtime·racedataend(SB)
 -      JAE     racecalladdr_ret
 -racecalladdr_call:
 +      JAE     ret
 +call:
        MOVQ    AX, AX          // w/o this 6a miscompiles this function
        JMP     racecall<>(SB)
 -racecalladdr_ret:
 +ret:
        RET
  
  // func runtime·racefuncenter(pc uintptr)
Simple merge
Simple merge