1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
12 // Pinner represents a set of pinned Go objects. An object can be pinned with
13 // the Pin method and all pinned objects of a Pinner can be unpinned with the
19 // Pin a Go object. The object will not be moved or freed by the garbage
20 // collector until the Unpin method has been called. The pointer to a pinned
21 // object can be directly stored in C memory or can be contained in Go memory
22 // passed to C functions. If the pinned object iftself contains pointers to Go
23 // objects, these objects must be pinned separately if they are going to be
24 // accessed from C code. The argument must be a pointer of any type or an
25 // unsafe.Pointer. It must be a pointer to an object allocated by calling new,
26 // by taking the address of a composite literal, or by taking the address of a
27 // local variable. If one of these conditions is not met, Pin will panic.
28 func (p *Pinner) Pin(pointer any) {
30 // Check the pinner cache first.
32 if pp := mp.p.ptr(); pp != nil {
33 p.pinner = pp.pinnerCache
39 // Didn't get anything from the pinner cache.
40 p.pinner = new(pinner)
41 p.refs = p.refStore[:0]
43 // We set this finalizer once and never clear it. Thus, if the
44 // pinner gets cached, we'll reuse it, along with its finalizer.
45 // This lets us avoid the relatively expensive SetFinalizer call
46 // when reusing from the cache. The finalizer however has to be
47 // resilient to an empty pinner being finalized, which is done
48 // by checking p.refs' length.
49 SetFinalizer(p.pinner, func(i *pinner) {
51 i.unpin() // only required to make the test idempotent
57 ptr := pinnerGetPtr(&pointer)
59 p.refs = append(p.refs, ptr)
62 // Unpin all pinned objects of the Pinner.
63 func (p *Pinner) Unpin() {
67 if pp := mp.p.ptr(); pp != nil && pp.pinnerCache == nil {
68 // Put the pinner back in the cache, but only if the
69 // cache is empty. If application code is reusing Pinners
70 // on its own, we want to leave the backing store in place
71 // so reuse is more efficient.
72 pp.pinnerCache = p.pinner
80 pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil))
85 refStore [pinnerRefStoreSize]unsafe.Pointer
88 func (p *pinner) unpin() {
89 if p == nil || p.refs == nil {
92 for i := range p.refs {
93 setPinned(p.refs[i], false)
95 // The following two lines make all pointers to references
96 // in p.refs unreachable, either by deleting them or dropping
97 // p.refs' backing store (if it was not backed by refStore).
98 p.refStore = [pinnerRefStoreSize]unsafe.Pointer{}
99 p.refs = p.refStore[:0]
102 func pinnerGetPtr(i *any) unsafe.Pointer {
106 panic(errorString("runtime.Pinner: argument is nil"))
108 if kind := etyp.Kind_ & kindMask; kind != kindPtr && kind != kindUnsafePointer {
109 panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType(etyp).string()))
111 if inUserArenaChunk(uintptr(e.data)) {
112 // Arena-allocated objects are not eligible for pinning.
113 panic(errorString("runtime.Pinner: object was allocated into an arena"))
118 // isPinned checks if a Go pointer is pinned.
119 // nosplit, because it's called from nosplit code in cgocheck.
122 func isPinned(ptr unsafe.Pointer) bool {
123 span := spanOfHeap(uintptr(ptr))
125 // this code is only called for Go pointer, so this must be a
126 // linker-allocated global object.
129 pinnerBits := span.getPinnerBits()
130 // these pinnerBits might get unlinked by a concurrently running sweep, but
131 // that's OK because gcBits don't get cleared until the following GC cycle
132 // (nextMarkBitArenaEpoch)
133 if pinnerBits == nil {
136 objIndex := span.objIndex(uintptr(ptr))
137 pinState := pinnerBits.ofObject(objIndex)
138 KeepAlive(ptr) // make sure ptr is alive until we are done so the span can't be freed
139 return pinState.isPinned()
142 // setPinned marks or unmarks a Go pointer as pinned.
143 func setPinned(ptr unsafe.Pointer, pin bool) {
144 span := spanOfHeap(uintptr(ptr))
146 if isGoPointerWithoutSpan(ptr) {
147 // this is a linker-allocated or zero size object, nothing to do.
150 panic(errorString("runtime.Pinner.Pin: argument is not a Go pointer"))
153 // ensure that the span is swept, b/c sweeping accesses the specials list
157 KeepAlive(ptr) // make sure ptr is still alive after span is swept
159 objIndex := span.objIndex(uintptr(ptr))
161 lock(&span.speciallock) // guard against concurrent calls of setPinned on same span
163 pinnerBits := span.getPinnerBits()
164 if pinnerBits == nil {
165 pinnerBits = span.newPinnerBits()
166 span.setPinnerBits(pinnerBits)
168 pinState := pinnerBits.ofObject(objIndex)
170 if pinState.isPinned() {
171 // multiple pins on same object, set multipin bit
172 pinState.setMultiPinned(true)
173 // and increase the pin counter
174 // TODO(mknyszek): investigate if systemstack is necessary here
176 offset := objIndex * span.elemsize
177 span.incPinCounter(offset)
181 pinState.setPinned(true)
185 if pinState.isPinned() {
186 if pinState.isMultiPinned() {
188 // TODO(mknyszek): investigate if systemstack is necessary here
190 offset := objIndex * span.elemsize
191 exists = span.decPinCounter(offset)
194 // counter is 0, clear multipin bit
195 pinState.setMultiPinned(false)
198 // no multipins recorded. unpin object.
199 pinState.setPinned(false)
202 // unpinning unpinned object, bail out
203 throw("runtime.Pinner: object already unpinned")
206 unlock(&span.speciallock)
211 type pinState struct {
217 // nosplit, because it's called by isPinned, which is nosplit
220 func (v *pinState) isPinned() bool {
221 return (v.byteVal & v.mask) != 0
224 func (v *pinState) isMultiPinned() bool {
225 return (v.byteVal & (v.mask << 1)) != 0
228 func (v *pinState) setPinned(val bool) {
232 func (v *pinState) setMultiPinned(val bool) {
236 // set sets the pin bit of the pinState to val. If multipin is true, it
237 // sets/unsets the multipin bit instead.
238 func (v *pinState) set(val bool, multipin bool) {
244 atomic.Or8(v.bytep, mask)
246 atomic.And8(v.bytep, ^mask)
250 // pinnerBits is the same type as gcBits but has different methods.
251 type pinnerBits gcBits
253 // ofObject returns the pinState of the n'th object.
254 // nosplit, because it's called by isPinned, which is nosplit
257 func (p *pinnerBits) ofObject(n uintptr) pinState {
258 bytep, mask := (*gcBits)(p).bitp(n * 2)
259 byteVal := atomic.Load8(bytep)
260 return pinState{bytep, byteVal, mask}
263 func (s *mspan) pinnerBitSize() uintptr {
264 return divRoundUp(s.nelems*2, 8)
267 // newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this
268 // span's pinner bits. newPinneBits is used to mark objects that are pinned.
269 // They are copied when the span is swept.
270 func (s *mspan) newPinnerBits() *pinnerBits {
271 return (*pinnerBits)(newMarkBits(s.nelems * 2))
274 // nosplit, because it's called by isPinned, which is nosplit
277 func (s *mspan) getPinnerBits() *pinnerBits {
278 return (*pinnerBits)(atomic.Loadp(unsafe.Pointer(&s.pinnerBits)))
281 func (s *mspan) setPinnerBits(p *pinnerBits) {
282 atomicstorep(unsafe.Pointer(&s.pinnerBits), unsafe.Pointer(p))
285 // refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the
286 // next GC cycle. If it does not contain any pinned objects, pinnerBits of the
287 // span is set to nil.
288 func (s *mspan) refreshPinnerBits() {
289 p := s.getPinnerBits()
295 bytes := alignUp(s.pinnerBitSize(), 8)
297 // Iterate over each 8-byte chunk and check for pins. Note that
298 // newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we
299 // don't have to worry about edge cases, irrelevant bits will simply be
301 for _, x := range unsafe.Slice((*uint64)(unsafe.Pointer(&p.x)), bytes/8) {
309 newPinnerBits := s.newPinnerBits()
310 memmove(unsafe.Pointer(&newPinnerBits.x), unsafe.Pointer(&p.x), bytes)
311 s.setPinnerBits(newPinnerBits)
317 // incPinCounter is only called for multiple pins of the same object and records
318 // the _additional_ pins.
319 func (span *mspan) incPinCounter(offset uintptr) {
320 var rec *specialPinCounter
321 ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
323 lock(&mheap_.speciallock)
324 rec = (*specialPinCounter)(mheap_.specialPinCounterAlloc.alloc())
325 unlock(&mheap_.speciallock)
326 // splice in record, fill in offset.
327 rec.special.offset = uint16(offset)
328 rec.special.kind = _KindSpecialPinCounter
329 rec.special.next = *ref
330 *ref = (*special)(unsafe.Pointer(rec))
331 spanHasSpecials(span)
333 rec = (*specialPinCounter)(unsafe.Pointer(*ref))
338 // decPinCounter decreases the counter. If the counter reaches 0, the counter
339 // special is deleted and false is returned. Otherwise true is returned.
340 func (span *mspan) decPinCounter(offset uintptr) bool {
341 ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
343 throw("runtime.Pinner: decreased non-existing pin counter")
345 counter := (*specialPinCounter)(unsafe.Pointer(*ref))
347 if counter.counter == 0 {
348 *ref = counter.special.next
349 if span.specials == nil {
350 spanHasNoSpecials(span)
352 lock(&mheap_.speciallock)
353 mheap_.specialPinCounterAlloc.free(unsafe.Pointer(counter))
354 unlock(&mheap_.speciallock)
361 func pinnerGetPinCounter(addr unsafe.Pointer) *uintptr {
362 _, span, objIndex := findObject(uintptr(addr), 0, 0)
363 offset := objIndex * span.elemsize
364 t, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
368 counter := (*specialPinCounter)(unsafe.Pointer(*t))
369 return &counter.counter
372 // to be able to test that the GC panics when a pinned pointer is leaking, this
373 // panic function is a variable, that can be overwritten by a test.
374 var pinnerLeakPanic = func() {
375 panic(errorString("runtime.Pinner: found leaking pinned pointer; forgot to call Unpin()?"))