}
}
}
+
+func TestTimePprof(t *testing.T) {
+ fn := runTestProg(t, "testprog", "TimeProf")
+ fn = strings.TrimSpace(fn)
+ defer os.Remove(fn)
+
+ cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-top", "-nodecount=1", fn))
+ cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
+ top, err := cmd.CombinedOutput()
+ t.Logf("%s", top)
+ if err != nil {
+ t.Error(err)
+ } else if bytes.Contains(top, []byte("ExternalCode")) {
+ t.Error("profiler refers to ExternalCode")
+ }
+}
func _LostExternalCode() { _LostExternalCode() }
func _GC() { _GC() }
func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
+func _VDSO() { _VDSO() }
// Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
var lostAtomic64Count uint64
// Collect Go stack that leads to the call.
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
}
+ if n == 0 && mp != nil && mp.vdsoSP != 0 {
+ n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
+ }
if n == 0 {
// If all of the above has failed, account it against abstract "System" or "GC".
n = 2
- // "ExternalCode" is better than "etext".
- if pc > firstmoduledata.etext {
+ if inVDSOPage(pc) {
+ pc = funcPC(_VDSO) + sys.PCQuantum
+ } else if pc > firstmoduledata.etext {
+ // "ExternalCode" is better than "etext".
pc = funcPC(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
libcallg guintptr
syscall libcall // stores syscall parameters on windows
+ vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call)
+ vdsoPC uintptr // PC for traceback while in VDSO call
+
mOS
}
get_tls(CX)
MOVL g(CX), AX
- MOVL g_m(AX), CX
+ MOVL g_m(AX), SI // SI unchanged by C code.
- CMPL AX, m_curg(CX) // Only switch if on curg.
+ // Set vdsoPC and vdsoSP for SIGPROF traceback.
+ MOVL 0(SP), DX
+ MOVL DX, m_vdsoPC(SI)
+ LEAL sec+0(SP), DX
+ MOVL DX, m_vdsoSP(SI)
+
+ CMPL AX, m_curg(SI) // Only switch if on curg.
JNE noswitch
- MOVL m_g0(CX), DX
+ MOVL m_g0(SI), DX
MOVL (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack
noswitch:
MOVL 12(SP), BX // nsec
MOVL BP, SP // Restore real SP
+ MOVL $0, m_vdsoSP(SI)
// sec is in AX, nsec in BX
MOVL AX, sec_lo+0(FP)
get_tls(CX)
MOVL g(CX), AX
- MOVL g_m(AX), CX
+ MOVL g_m(AX), SI // SI unchanged by C code.
+
+ // Set vdsoPC and vdsoSP for SIGPROF traceback.
+ MOVL 0(SP), DX
+ MOVL DX, m_vdsoPC(SI)
+ LEAL ret+0(SP), DX
+ MOVL DX, m_vdsoSP(SI)
- CMPL AX, m_curg(CX) // Only switch if on curg.
+ CMPL AX, m_curg(SI) // Only switch if on curg.
JNE noswitch
- MOVL m_g0(CX), DX
+ MOVL m_g0(SI), DX
MOVL (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack
noswitch:
MOVL 12(SP), BX // nsec
MOVL BP, SP // Restore real SP
+ MOVL $0, m_vdsoSP(SI)
// sec is in AX, nsec in BX
// convert to DX:AX nsec
get_tls(CX)
MOVQ g(CX), AX
- MOVQ g_m(AX), CX
+ MOVQ g_m(AX), BX // BX unchanged by C code.
- CMPQ AX, m_curg(CX) // Only switch if on curg.
+ // Set vdsoPC and vdsoSP for SIGPROF traceback.
+ MOVQ 0(SP), DX
+ MOVQ DX, m_vdsoPC(BX)
+ LEAQ sec+0(SP), DX
+ MOVQ DX, m_vdsoSP(BX)
+
+ CMPQ AX, m_curg(BX) // Only switch if on curg.
JNE noswitch
- MOVQ m_g0(CX), DX
+ MOVQ m_g0(BX), DX
MOVQ (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack
noswitch:
MOVQ 0(SP), AX // sec
MOVQ 8(SP), DX // nsec
MOVQ BP, SP // Restore real SP
+ MOVQ $0, m_vdsoSP(BX)
MOVQ AX, sec+0(FP)
MOVL DX, nsec+8(FP)
RET
MOVL 8(SP), DX // usec
IMULQ $1000, DX
MOVQ BP, SP // Restore real SP
+ MOVQ $0, m_vdsoSP(BX)
MOVQ AX, sec+0(FP)
MOVL DX, nsec+8(FP)
RET
TEXT runtime·nanotime(SB),NOSPLIT,$0-8
// Switch to g0 stack. See comment above in runtime·walltime.
- MOVQ SP, BP // Save old SP; BX unchanged by C code.
+ MOVQ SP, BP // Save old SP; BP unchanged by C code.
get_tls(CX)
MOVQ g(CX), AX
- MOVQ g_m(AX), CX
+ MOVQ g_m(AX), BX // BX unchanged by C code.
+
+ // Set vdsoPC and vdsoSP for SIGPROF traceback.
+ MOVQ 0(SP), DX
+ MOVQ DX, m_vdsoPC(BX)
+ LEAQ ret+0(SP), DX
+ MOVQ DX, m_vdsoSP(BX)
- CMPQ AX, m_curg(CX) // Only switch if on curg.
+ CMPQ AX, m_curg(BX) // Only switch if on curg.
JNE noswitch
- MOVQ m_g0(CX), DX
+ MOVQ m_g0(BX), DX
MOVQ (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack
noswitch:
MOVQ 0(SP), AX // sec
MOVQ 8(SP), DX // nsec
MOVQ BP, SP // Restore real SP
+ MOVQ $0, m_vdsoSP(BX)
// sec is in AX, nsec in DX
// return nsec in AX
IMULQ $1000000000, AX
MOVQ 0(SP), AX // sec
MOVL 8(SP), DX // usec
MOVQ BP, SP // Restore real SP
+ MOVQ $0, m_vdsoSP(BX)
IMULQ $1000, DX
// sec is in AX, nsec in DX
// return nsec in AX
// Save old SP. Use R13 instead of SP to avoid linker rewriting the offsets.
MOVW R13, R4 // R4 is unchanged by C code.
- MOVW g_m(g), R1
- MOVW m_curg(R1), R0
+ MOVW g_m(g), R5 // R5 is unchanged by C code.
+
+ // Set vdsoPC and vdsoSP for SIGPROF traceback.
+ MOVW LR, m_vdsoPC(R5)
+ MOVW R13, m_vdsoSP(R5)
+
+ MOVW m_curg(R5), R0
CMP g, R0 // Only switch if on curg.
B.NE noswitch
- MOVW m_g0(R1), R0
+ MOVW m_g0(R5), R0
MOVW (g_sched+gobuf_sp)(R0), R13 // Set SP to g0 stack
noswitch:
MOVW 12(R13), R2 // nsec
MOVW R4, R13 // Restore real SP
+ MOVW $0, R1
+ MOVW R1, m_vdsoSP(R5)
MOVW R0, sec_lo+0(FP)
- MOVW $0, R1
MOVW R1, sec_hi+4(FP)
MOVW R2, nsec+8(FP)
RET
// Save old SP. Use R13 instead of SP to avoid linker rewriting the offsets.
MOVW R13, R4 // R4 is unchanged by C code.
- MOVW g_m(g), R1
- MOVW m_curg(R1), R0
+ MOVW g_m(g), R5 // R5 is unchanged by C code.
+
+ // Set vdsoPC and vdsoSP for SIGPROF traceback.
+ MOVW LR, m_vdsoPC(R5)
+ MOVW R13, m_vdsoSP(R5)
+
+ MOVW m_curg(R5), R0
CMP g, R0 // Only switch if on curg.
B.NE noswitch
- MOVW m_g0(R1), R0
+ MOVW m_g0(R5), R0
MOVW (g_sched+gobuf_sp)(R0), R13 // Set SP to g0 stack
noswitch:
MOVW 12(R13), R2 // nsec
MOVW R4, R13 // Restore real SP
+ MOVW $0, R4
+ MOVW R4, m_vdsoSP(R5)
MOVW $1000000000, R3
MULLU R0, R3, (R1, R0)
- MOVW $0, R4
ADD.S R2, R0
ADC R4, R1
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("TimeProf", TimeProf)
+}
+
+func TimeProf() {
+ f, err := ioutil.TempFile("", "timeprof")
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ t0 := time.Now()
+ // We should get a profiling signal 100 times a second,
+ // so running for 1/10 second should be sufficient.
+ for time.Since(t0) < time.Second/10 {
+ }
+
+ pprof.StopCPUProfile()
+
+ name := f.Name()
+ if err := f.Close(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ fmt.Println(name)
+}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!386,!amd64,!arm !linux
+
+package runtime
+
+// A dummy version of inVDSOPage for targets that don't use a VDSO.
+
+func inVDSOPage(pc uintptr) bool {
+ return false
+}
vdsoParseSymbols(info1, vdsoFindVersion(info1, &linux26))
}
}
+
+// vdsoMarker returns whether PC is on the VDSO page.
+func inVDSOPage(pc uintptr) bool {
+ for _, k := range vdsoSymbolKeys {
+ if *k.ptr != 0 {
+ page := *k.ptr &^ (physPageSize - 1)
+ return pc >= page && pc < page+physPageSize
+ }
+ }
+ return false
+}