return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ },
+ sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
return nil
},
sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
+ alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
- case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32:
+ case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32, OpAtomicLoadAcq64:
loadAddr.add(v.Args[0].ID)
case OpMove:
loadAddr.add(v.Args[1].ID)
// atomic intrinsics
(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
-(AtomicLoadAcq32 ptr mem) => (LoweredAtomicLoad32 [0] ptr mem)
+(AtomicLoadAcq(32|64) ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
-(AtomicStoreRel32 ptr val mem) => (LoweredAtomicStore32 [0] ptr val mem)
+(AtomicStoreRel(32|64) ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
//(AtomicStorePtrNoWB ptr val mem) => (STLR ptr val mem)
(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
{name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicLoadAcq64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
{name: "AtomicStore8", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicStoreRel64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
{name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
OpAtomicLoad64
OpAtomicLoadPtr
OpAtomicLoadAcq32
+ OpAtomicLoadAcq64
OpAtomicStore8
OpAtomicStore32
OpAtomicStore64
OpAtomicStorePtrNoWB
OpAtomicStoreRel32
+ OpAtomicStoreRel64
OpAtomicExchange32
OpAtomicExchange64
OpAtomicAdd32
argLen: 2,
generic: true,
},
+ {
+ name: "AtomicLoadAcq64",
+ argLen: 2,
+ generic: true,
+ },
{
name: "AtomicStore8",
argLen: 3,
hasSideEffects: true,
generic: true,
},
+ {
+ name: "AtomicStoreRel64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
{
name: "AtomicExchange32",
argLen: 3,
return rewriteValuePPC64_OpAtomicLoad8(v)
case OpAtomicLoadAcq32:
return rewriteValuePPC64_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadAcq64:
+ return rewriteValuePPC64_OpAtomicLoadAcq64(v)
case OpAtomicLoadPtr:
return rewriteValuePPC64_OpAtomicLoadPtr(v)
case OpAtomicOr8:
return rewriteValuePPC64_OpAtomicStore8(v)
case OpAtomicStoreRel32:
return rewriteValuePPC64_OpAtomicStoreRel32(v)
+ case OpAtomicStoreRel64:
+ return rewriteValuePPC64_OpAtomicStoreRel64(v)
case OpAvg64u:
return rewriteValuePPC64_OpAvg64u(v)
case OpBitLen32:
return true
}
}
+func rewriteValuePPC64_OpAtomicLoadAcq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq64 ptr mem)
+ // result: (LoweredAtomicLoad64 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
+func rewriteValuePPC64_OpAtomicStoreRel64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel64 ptr val mem)
+ // result: (LoweredAtomicStore64 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
func rewriteValuePPC64_OpAvg64u(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return p
}
+ // Allow sync package to access lightweight atomic functions limited to the runtime.
+ if p.Standard && strings.HasPrefix(importerPath, "sync") && p.ImportPath == "runtime/internal/atomic" {
+ return p
+ }
+
// Internal is present.
// Map import path back to directory corresponding to parent of internal.
if i > 0 {
TEXT ·StoreRel(SB), NOSPLIT, $0-8
JMP ·Store(SB)
+TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-8
+ JMP runtime∕internal∕atomic·Store(SB)
+
// uint64 atomicload64(uint64 volatile* addr);
TEXT ·Load64(SB), NOSPLIT, $0-12
NO_LOCAL_POINTERS
TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
JMP runtime∕internal∕atomic·Store(SB)
+TEXT runtime∕internal∕atomic·StoreRel64(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Store64(SB)
+
+TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Store64(SB)
+
TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9
MOVQ ptr+0(FP), BX
MOVB val+8(FP), AX
TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8
B ·Load(SB)
+TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-8
+ B ·Load(SB)
+
TEXT ·Casuintptr(SB),NOSPLIT,$0-13
B ·Cas(SB)
TEXT ·StoreRel(SB),NOSPLIT,$0-8
B ·Store(SB)
+TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8
+ B ·Store(SB)
+
TEXT ·Xadduintptr(SB),NOSPLIT,$0-12
B ·Xadd(SB)
TEXT ·StoreRel(SB), NOSPLIT, $0-12
JMP ·Store(SB)
+TEXT ·StoreRel64(SB), NOSPLIT, $0-16
+ JMP ·Store64(SB)
+
+TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
+ JMP ·Store64(SB)
+
TEXT ·Store(SB), NOSPLIT, $0-12
MOVV ptr+0(FP), R1
MOVW val+8(FP), R2
TEXT ·StoreRel(SB),NOSPLIT,$0-8
JMP ·Store(SB)
+TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8
+ JMP ·Store(SB)
+
// void Or8(byte volatile*, byte);
TEXT ·Or8(SB),NOSPLIT,$0-5
MOVW ptr+0(FP), R1
TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16
BR runtime∕internal∕atomic·Load64(SB)
+TEXT runtime∕internal∕atomic·LoadAcquintptr(SB), NOSPLIT|NOFRAME, $0-16
+ BR runtime∕internal∕atomic·LoadAcq64(SB)
+
TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
BR runtime∕internal∕atomic·Load64(SB)
TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
BR runtime∕internal∕atomic·Store64(SB)
+TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-16
+ BR runtime∕internal∕atomic·StoreRel64(SB)
+
TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
BR runtime∕internal∕atomic·Xadd64(SB)
MOVW R4, 0(R3)
RET
+TEXT runtime∕internal∕atomic·StoreRel64(SB), NOSPLIT, $0-16
+ MOVD ptr+0(FP), R3
+ MOVD val+8(FP), R4
+ LWSYNC
+ MOVD R4, 0(R3)
+ RET
+
// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3
return *ptr
}
+//go:nosplit
+//go:noinline
+func LoadAcquintptr(ptr *uintptr) uintptr {
+ return *ptr
+}
+
//go:noescape
func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
+
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
return *ptr
}
+//go:nosplit
+//go:noinline
+func LoadAcq64(ptr *uint64) uint64 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func LoadAcquintptr(ptr *uintptr) uintptr {
+ return *ptr
+}
+
//go:noescape
func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+//go:noescape
+func StoreRel64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
+
// StorepNoWB performs *ptr = val atomically and without a write
// barrier.
//
//go:noescape
func StoreRel(addr *uint32, v uint32)
+//go:noescape
+func StoreReluintptr(addr *uintptr, v uintptr)
+
//go:nosplit
func goCas64(addr *uint64, old, new uint64) bool {
if uintptr(unsafe.Pointer(addr))&7 != 0 {
//go:noescape
func LoadAcq(addr *uint32) uint32
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
//go:noescape
func Cas64(addr *uint64, old, new uint64) bool
//go:noescape
func LoadAcq(addr *uint32) uint32
+//go:noescape
+func LoadAcq64(ptr *uint64) uint64
+
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
//go:noescape
func Or8(ptr *uint8, val uint8)
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+
+//go:noescape
+func StoreRel64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
TEXT ·LoadAcq(SB),NOSPLIT,$0-12
B ·Load(SB)
+// uint64 runtime∕internal∕atomic·LoadAcquintptr(uint64 volatile* addr)
+TEXT ·LoadAcq64(SB),NOSPLIT,$0-16
+ B ·Load64(SB)
+
+// uintptr runtime∕internal∕atomic·LoadAcq64(uintptr volatile* addr)
+TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16
+ B ·Load64(SB)
+
TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
B runtime∕internal∕atomic·Store64(SB)
TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
B runtime∕internal∕atomic·Store(SB)
+TEXT runtime∕internal∕atomic·StoreRel64(SB), NOSPLIT, $0-16
+ B runtime∕internal∕atomic·Store64(SB)
+
+TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-16
+ B runtime∕internal∕atomic·Store64(SB)
+
TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
MOVD ptr+0(FP), R0
MOVW val+8(FP), R1
//go:noescape
func LoadAcq(ptr *uint32) uint32
+//go:noescape
+func LoadAcq64(ptr *uint64) uint64
+
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
//go:noescape
func And8(ptr *uint8, val uint8)
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+
+//go:noescape
+func StoreRel64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr)
TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
JMP atomic·Load(SB)
+
+// uint64 runtime∕internal∕atomic·LoadAcq64(uint64 volatile* ptr)
+TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16
+ JMP atomic·Load64(SB)
+
+// uintptr runtime∕internal∕atomic·LoadAcquintptr(uintptr volatile* ptr)
+TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
+ JMP atomic·Load64(SB)
//go:noescape
func LoadAcq(ptr *uint32) uint32
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
//go:noescape
func And8(ptr *uint8, val uint8)
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
+
//go:noescape
func CasRel(addr *uint32, old, new uint32) bool
//go:noescape
func LoadAcq(ptr *uint32) uint32
+//go:noescape
+func LoadAcq64(ptr *uint64) uint64
+
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
//go:noescape
func And8(ptr *uint8, val uint8)
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+//go:noescape
+func StoreRel64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
+
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
#include "textflag.h"
+
+// For more details about how various memory models are
+// enforced on POWER, the following paper provides more
+// details about how they enforce C/C++ like models. This
+// gives context about why the strange looking code
+// sequences below work.
+//
+// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
+
// uint32 runtime∕internal∕atomic·Load(uint32 volatile* ptr)
TEXT ·Load(SB),NOSPLIT|NOFRAME,$-8-12
MOVD ptr+0(FP), R3
MOVWZ 0(R3), R3
CMPW R3, R3, CR7
BC 4, 30, 1(PC) // bne- cr7, 0x4
+ ISYNC
MOVW R3, ret+8(FP)
RET
+
+// uint64 runtime∕internal∕atomic·LoadAcq64(uint64 volatile* ptr)
+TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$-8-16
+ MOVD ptr+0(FP), R3
+ MOVD 0(R3), R3
+ CMP R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7, 0x4
+ ISYNC
+ MOVD R3, ret+8(FP)
+ RET
//go:noescape
func LoadAcq(ptr *uint32) uint32
+//go:noescape
+func LoadAcq64(ptr *uint64) uint64
+
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
//go:noescape
func Or8(ptr *uint8, val uint8)
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+
+//go:noescape
+func StoreRel64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
JMP ·Load(SB)
+TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16
+ JMP ·Load64(SB)
+
+TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
+ JMP ·Load64(SB)
+
// func Loadp(ptr unsafe.Pointer) unsafe.Pointer
TEXT ·Loadp(SB),NOSPLIT,$0-16
JMP ·Load64(SB)
TEXT ·StoreRel(SB), NOSPLIT, $0-12
JMP ·Store(SB)
+TEXT ·StoreRel64(SB), NOSPLIT, $0-16
+ JMP ·Store64(SB)
+
+TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
+ JMP ·Store64(SB)
+
// func Xchg(ptr *uint32, new uint32) uint32
TEXT ·Xchg(SB), NOSPLIT, $0-20
MOV ptr+0(FP), A0
return *ptr
}
+//go:nosplit
+//go:noinline
+func LoadAcq64(ptr *uint64) uint64 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func LoadAcquintptr(ptr *uintptr) uintptr {
+ return *ptr
+}
+
//go:noescape
func Store(ptr *uint32, val uint32)
*ptr = val
}
+//go:nosplit
+//go:noinline
+func StoreRel64(ptr *uint64, val uint64) {
+ *ptr = val
+}
+
+//go:nosplit
+//go:noinline
+func StoreReluintptr(ptr *uintptr, val uintptr) {
+ *ptr = val
+}
+
//go:noescape
func And8(ptr *uint8, val uint8)
return *ptr
}
+//go:nosplit
+//go:noinline
+func LoadAcq64(ptr *uint64) uint64 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func LoadAcquintptr(ptr *uintptr) uintptr {
+ return *ptr
+}
+
//go:nosplit
//go:noinline
func Load8(ptr *uint8) uint8 {
*ptr = val
}
+//go:nosplit
+//go:noinline
+func StoreRel64(ptr *uint64, val uint64) {
+ *ptr = val
+}
+
+//go:nosplit
+//go:noinline
+func StoreReluintptr(ptr *uintptr, val uintptr) {
+ *ptr = val
+}
+
//go:nosplit
//go:noinline
func Store8(ptr *uint8, val uint8) {
import (
"internal/race"
"runtime"
+ runtimeatomic "runtime/internal/atomic"
"sync/atomic"
"unsafe"
)
func (p *Pool) getSlow(pid int) interface{} {
// See the comment in pin regarding ordering of the loads.
- size := atomic.LoadUintptr(&p.localSize) // load-acquire
- locals := p.local // load-consume
+ size := runtimeatomic.LoadAcquintptr(&p.localSize) // load-acquire
+ locals := p.local // load-consume
// Try to steal one element from other procs.
for i := 0; i < int(size); i++ {
l := indexLocal(locals, (pid+i+1)%int(size))
// Try the victim cache. We do this after attempting to steal
// from all primary caches because we want objects in the
// victim cache to age out if at all possible.
- size = atomic.LoadUintptr(&p.victimSize)
+ size = runtimeatomic.Loaduintptr(&p.victimSize)
if uintptr(pid) >= size {
return nil
}
// Since we've disabled preemption, GC cannot happen in between.
// Thus here we must observe local at least as large localSize.
// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
- s := atomic.LoadUintptr(&p.localSize) // load-acquire
- l := p.local // load-consume
+ s := runtimeatomic.LoadAcquintptr(&p.localSize) // load-acquire
+ l := p.local // load-consume
if uintptr(pid) < s {
return indexLocal(l, pid), pid
}
// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
size := runtime.GOMAXPROCS(0)
local := make([]poolLocal, size)
- atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
- atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
+ atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
+ runtimeatomic.StoreReluintptr(&p.localSize, uintptr(size)) // store-release
return &local[pid], pid
}