MOVW g, R0
RET
-TEXT runtime·getcallerpc(SB),NOSPLIT,$4-8
- MOVW 8(R13), R0 // LR saved by caller
- MOVW R0, ret+4(FP)
+TEXT runtime·getcallerpc(SB),NOSPLIT,$-4-4
+ MOVW 0(R13), R0 // LR saved by caller
+ MOVW R0, ret+0(FP)
RET
TEXT runtime·emptyfunc(SB),0,$0-0
MOVD savedR27-8(SP), R27
RET
-TEXT runtime·getcallerpc(SB),NOSPLIT,$8-16
- MOVD 16(RSP), R0 // LR saved by caller
- MOVD R0, ret+8(FP)
+TEXT runtime·getcallerpc(SB),NOSPLIT,$-8-8
+ MOVD 0(RSP), R0 // LR saved by caller
+ MOVD R0, ret+0(FP)
RET
TEXT runtime·abort(SB),NOSPLIT,$-8-0
JAL runtime·save_g(SB)
RET
-TEXT runtime·getcallerpc(SB),NOSPLIT,$8-16
- MOVV 16(R29), R1 // LR saved by caller
- MOVV R1, ret+8(FP)
+TEXT runtime·getcallerpc(SB),NOSPLIT,$-8-8
+ MOVV 0(R29), R1 // LR saved by caller
+ MOVV R1, ret+0(FP)
RET
TEXT runtime·abort(SB),NOSPLIT,$-8-0
JAL runtime·save_g(SB)
RET
-TEXT runtime·getcallerpc(SB),NOSPLIT,$4-8
- MOVW 8(R29), R1 // LR saved by caller
- MOVW R1, ret+4(FP)
+TEXT runtime·getcallerpc(SB),NOSPLIT,$-4-4
+ MOVW 0(R29), R1 // LR saved by caller
+ MOVW R1, ret+0(FP)
RET
TEXT runtime·abort(SB),NOSPLIT,$0-0
MOVD R4, LR
RET
-TEXT runtime·getcallerpc(SB),NOSPLIT,$8-16
- MOVD FIXED_FRAME+8(R1), R3 // LR saved by caller
- MOVD R3, ret+8(FP)
+TEXT runtime·getcallerpc(SB),NOSPLIT|NOFRAME,$0-8
+ MOVD 0(R1), R3 // LR saved by caller
+ MOVD R3, ret+0(FP)
RET
TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
MOVD R1, LR
RET
-TEXT runtime·getcallerpc(SB),NOSPLIT,$8-16
- MOVD 16(R15), R3 // LR saved by caller
- MOVD R3, ret+8(FP)
+TEXT runtime·getcallerpc(SB),NOSPLIT|NOFRAME,$0-8
+ MOVD 0(R15), R3 // LR saved by caller
+ MOVD R3, ret+0(FP)
RET
TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
// entry point for c <- x from compiled code
//go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer) {
- chansend(c, elem, true, getcallerpc(unsafe.Pointer(&c)))
+ chansend(c, elem, true, getcallerpc())
}
/*
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&c))
+ callerpc := getcallerpc()
racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
racerelease(unsafe.Pointer(c))
}
// }
//
func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
- return chansend(c, elem, false, getcallerpc(unsafe.Pointer(&c)))
+ return chansend(c, elem, false, getcallerpc())
}
// compiler implements
//go:linkname reflect_chansend reflect.chansend
func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
- return chansend(c, elem, !nb, getcallerpc(unsafe.Pointer(&c)))
+ return chansend(c, elem, !nb, getcallerpc())
}
//go:linkname reflect_chanrecv reflect.chanrecv
// hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
pc := funcPC(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
pc := funcPC(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
pc := funcPC(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
pc := funcPC(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
// Both need to have zeroed hiter since the struct contains pointers.
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
}
func mapiternext(it *hiter) {
h := it.h
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&it))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
}
if h.flags&hashWriting != 0 {
return 0
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&h))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
}
return h.count
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
}
if h == nil || h.count == 0 {
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
}
if h == nil || h.count == 0 {
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc()
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
}
if h == nil || h.count == 0 {
func convT2E(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2E))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2E))
}
if msanenabled {
msanread(elem, t.size)
func convT2E16(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2E16))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2E16))
}
if msanenabled {
msanread(elem, t.size)
func convT2E32(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2E32))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2E32))
}
if msanenabled {
msanread(elem, t.size)
func convT2E64(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2E64))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2E64))
}
if msanenabled {
msanread(elem, t.size)
func convT2Estring(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2Estring))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Estring))
}
if msanenabled {
msanread(elem, t.size)
func convT2Eslice(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2Eslice))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Eslice))
}
if msanenabled {
msanread(elem, t.size)
func convT2Enoptr(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2Enoptr))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Enoptr))
}
if msanenabled {
msanread(elem, t.size)
func convT2I(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&tab)), funcPC(convT2I))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2I))
}
if msanenabled {
msanread(elem, t.size)
func convT2I16(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&tab)), funcPC(convT2I16))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2I16))
}
if msanenabled {
msanread(elem, t.size)
func convT2I32(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&tab)), funcPC(convT2I32))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2I32))
}
if msanenabled {
msanread(elem, t.size)
func convT2I64(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&tab)), funcPC(convT2I64))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2I64))
}
if msanenabled {
msanread(elem, t.size)
func convT2Istring(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&tab)), funcPC(convT2Istring))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Istring))
}
if msanenabled {
msanread(elem, t.size)
func convT2Islice(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&tab)), funcPC(convT2Islice))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Islice))
}
if msanenabled {
msanread(elem, t.size)
func convT2Inoptr(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&tab)), funcPC(convT2Inoptr))
+ raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Inoptr))
}
if msanenabled {
msanread(elem, t.size)
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
- raceWriteObjectPC(typ, dst, getcallerpc(unsafe.Pointer(&typ)), funcPC(reflect_typedmemmove))
- raceReadObjectPC(typ, src, getcallerpc(unsafe.Pointer(&typ)), funcPC(reflect_typedmemmove))
+ raceWriteObjectPC(typ, dst, getcallerpc(), funcPC(reflect_typedmemmove))
+ raceReadObjectPC(typ, src, getcallerpc(), funcPC(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.size)
srcp := src.array
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&typ))
+ callerpc := getcallerpc()
pc := funcPC(slicecopy)
racewriterangepc(dstp, uintptr(n)*typ.size, callerpc, pc)
racereadrangepc(srcp, uintptr(n)*typ.size, callerpc, pc)
size := uintptr(n) * elemType.size
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&elemType))
+ callerpc := getcallerpc()
pc := funcPC(reflect_typedslicecopy)
racewriterangepc(dst.array, size, callerpc, pc)
racereadrangepc(src.array, size, callerpc, pc)
r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.active.frees)
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&r)), funcPC(MemProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(MemProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
r.Count = bp.count
r.Cycles = bp.cycles
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&p)), funcPC(BlockProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(BlockProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
// Save current goroutine.
sp := getcallersp(unsafe.Pointer(&p))
- pc := getcallerpc(unsafe.Pointer(&p))
+ pc := getcallerpc()
systemstack(func() {
saveg(pc, sp, gp, &r[0])
})
if len(buf) > 0 {
gp := getg()
sp := getcallersp(unsafe.Pointer(&buf))
- pc := getcallerpc(unsafe.Pointer(&buf))
+ pc := getcallerpc()
systemstack(func() {
g0 := getg()
// Force traceback=1 to override GOTRACEBACK setting,
}
if gp.m.curg == nil || gp == gp.m.curg {
goroutineheader(gp)
- pc := getcallerpc(unsafe.Pointer(&p))
+ pc := getcallerpc()
sp := getcallersp(unsafe.Pointer(&p))
systemstack(func() {
traceback(pc, sp, 0, gp)
gp.m.traceback = 2
print("tracefree(", p, ", ", hex(size), ")\n")
goroutineheader(gp)
- pc := getcallerpc(unsafe.Pointer(&p))
+ pc := getcallerpc()
sp := getcallersp(unsafe.Pointer(&p))
systemstack(func() {
traceback(pc, sp, 0, gp)
if mp.profilehz != 0 {
// leave pc/sp for cpu profiler
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc(unsafe.Pointer(&fn))
+ mp.libcallpc = getcallerpc()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp(unsafe.Pointer(&fn))
// Until the copy completes, we can only call nosplit routines.
sp := getcallersp(unsafe.Pointer(&siz))
argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
- callerpc := getcallerpc(unsafe.Pointer(&siz))
+ callerpc := getcallerpc()
d := newdefer(siz)
if d._panic != nil {
//go:nosplit
func dopanic(unused int) {
- pc := getcallerpc(unsafe.Pointer(&unused))
+ pc := getcallerpc()
sp := getcallersp(unsafe.Pointer(&unused))
gp := getg()
systemstack(func() {
// Standard syscall entry used by the go syscall library and normal cgo calls.
//go:nosplit
func entersyscall(dummy int32) {
- reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
+ reentersyscall(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
}
func entersyscall_sysmon() {
_g_.m.p.ptr().syscalltick++
// Leave SP around for GC and traceback.
- pc := getcallerpc(unsafe.Pointer(&dummy))
+ pc := getcallerpc()
sp := getcallersp(unsafe.Pointer(&dummy))
save(pc, sp)
_g_.syscallsp = _g_.sched.sp
systemstack(entersyscallblock_handoff)
// Resave for traceback during blocked call.
- save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
+ save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
_g_.m.locks--
}
//go:nosplit
func newproc(siz int32, fn *funcval) {
argp := add(unsafe.Pointer(&fn), sys.PtrSize)
- pc := getcallerpc(unsafe.Pointer(&siz))
+ pc := getcallerpc()
systemstack(func() {
newproc1(fn, (*uint8)(argp), siz, 0, pc)
})
}
func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) {
- pc := getcallerpc(unsafe.Pointer(&sel))
+ pc := getcallerpc()
i := sel.ncase
if i >= sel.tcase {
throw("selectsend: too many cases")
}
func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) {
- pc := getcallerpc(unsafe.Pointer(&sel))
+ pc := getcallerpc()
i := sel.ncase
if i >= sel.tcase {
throw("selectrecv: too many cases")
}
func selectdefault(sel *hselect) {
- pc := getcallerpc(unsafe.Pointer(&sel))
+ pc := getcallerpc()
i := sel.ncase
if i >= sel.tcase {
throw("selectdefault: too many cases")
// The SSA backend might prefer the new length or to return only ptr/cap and save stack space.
func growslice(et *_type, old slice, cap int) slice {
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&et))
+ callerpc := getcallerpc()
racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice))
}
if msanenabled {
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&to))
+ callerpc := getcallerpc()
pc := funcPC(slicecopy)
racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc)
racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc)
}
if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&to))
+ callerpc := getcallerpc()
pc := funcPC(slicestringcopy)
racewriterangepc(unsafe.Pointer(&to[0]), uintptr(n), callerpc, pc)
}
if raceenabled {
racereadrangepc(unsafe.Pointer(&b[0]),
uintptr(l),
- getcallerpc(unsafe.Pointer(&buf)),
+ getcallerpc(),
funcPC(slicebytetostring))
}
if msanenabled {
if raceenabled && len(b) > 0 {
racereadrangepc(unsafe.Pointer(&b[0]),
uintptr(len(b)),
- getcallerpc(unsafe.Pointer(&b)),
+ getcallerpc(),
funcPC(slicebytetostringtmp))
}
if msanenabled && len(b) > 0 {
if raceenabled && len(a) > 0 {
racereadrangepc(unsafe.Pointer(&a[0]),
uintptr(len(a))*unsafe.Sizeof(a[0]),
- getcallerpc(unsafe.Pointer(&buf)),
+ getcallerpc(),
funcPC(slicerunetostring))
}
if msanenabled && len(a) > 0 {
// getcallerpc returns the program counter (PC) of its caller's caller.
// getcallersp returns the stack pointer (SP) of its caller's caller.
-// For both, the argp must be a pointer to the caller's first function argument.
+// argp must be a pointer to the caller's first function argument.
// The implementation may or may not use argp, depending on
// the architecture. The implementation may be a compiler
// intrinsic; there is not necessarily code implementing this
// For example:
//
// func f(arg1, arg2, arg3 int) {
-// pc := getcallerpc(unsafe.Pointer(&arg1))
+// pc := getcallerpc()
// sp := getcallersp(unsafe.Pointer(&arg1))
// }
//
// immediately and can only be passed to nosplit functions.
//go:noescape
-func getcallerpc(argp unsafe.Pointer) uintptr
+func getcallerpc() uintptr
//go:nosplit
func getcallersp(argp unsafe.Pointer) uintptr {
func callers(skip int, pcbuf []uintptr) int {
sp := getcallersp(unsafe.Pointer(&skip))
- pc := getcallerpc(unsafe.Pointer(&skip))
+ pc := getcallerpc()
gp := getg()
var n int
systemstack(func() {