1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
12 // A Pinner is a set of Go objects each pinned to a fixed location in memory. The
13 // [Pin] method pins one object, while [Unpin] unpins all pinned objects. See their
14 // comments for more information.
19 // Pin pins a Go object, preventing it from being moved or freed by the garbage
20 // collector until the Unpin method has been called.
22 // A pointer to a pinned object can be directly stored in C memory or can be
23 // contained in Go memory passed to C functions. If the pinned object itself
24 // contains pointers to Go objects, these objects must be pinned separately if they
25 // are going to be accessed from C code.
27 // The argument must be a pointer of any type or an unsafe.Pointer.
28 // It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.
29 func (p *Pinner) Pin(pointer any) {
31 // Check the pinner cache first.
33 if pp := mp.p.ptr(); pp != nil {
34 p.pinner = pp.pinnerCache
40 // Didn't get anything from the pinner cache.
41 p.pinner = new(pinner)
42 p.refs = p.refStore[:0]
44 // We set this finalizer once and never clear it. Thus, if the
45 // pinner gets cached, we'll reuse it, along with its finalizer.
46 // This lets us avoid the relatively expensive SetFinalizer call
47 // when reusing from the cache. The finalizer however has to be
48 // resilient to an empty pinner being finalized, which is done
49 // by checking p.refs' length.
50 SetFinalizer(p.pinner, func(i *pinner) {
52 i.unpin() // only required to make the test idempotent
58 ptr := pinnerGetPtr(&pointer)
59 if setPinned(ptr, true) {
60 p.refs = append(p.refs, ptr)
64 // Unpin unpins all pinned objects of the Pinner.
65 func (p *Pinner) Unpin() {
69 if pp := mp.p.ptr(); pp != nil && pp.pinnerCache == nil {
70 // Put the pinner back in the cache, but only if the
71 // cache is empty. If application code is reusing Pinners
72 // on its own, we want to leave the backing store in place
73 // so reuse is more efficient.
74 pp.pinnerCache = p.pinner
82 pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil))
87 refStore [pinnerRefStoreSize]unsafe.Pointer
90 func (p *pinner) unpin() {
91 if p == nil || p.refs == nil {
94 for i := range p.refs {
95 setPinned(p.refs[i], false)
97 // The following two lines make all pointers to references
98 // in p.refs unreachable, either by deleting them or dropping
99 // p.refs' backing store (if it was not backed by refStore).
100 p.refStore = [pinnerRefStoreSize]unsafe.Pointer{}
101 p.refs = p.refStore[:0]
104 func pinnerGetPtr(i *any) unsafe.Pointer {
108 panic(errorString("runtime.Pinner: argument is nil"))
110 if kind := etyp.Kind_ & kindMask; kind != kindPtr && kind != kindUnsafePointer {
111 panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType(etyp).string()))
113 if inUserArenaChunk(uintptr(e.data)) {
114 // Arena-allocated objects are not eligible for pinning.
115 panic(errorString("runtime.Pinner: object was allocated into an arena"))
120 // isPinned checks if a Go pointer is pinned.
121 // nosplit, because it's called from nosplit code in cgocheck.
124 func isPinned(ptr unsafe.Pointer) bool {
125 span := spanOfHeap(uintptr(ptr))
127 // this code is only called for Go pointer, so this must be a
128 // linker-allocated global object.
131 pinnerBits := span.getPinnerBits()
132 // these pinnerBits might get unlinked by a concurrently running sweep, but
133 // that's OK because gcBits don't get cleared until the following GC cycle
134 // (nextMarkBitArenaEpoch)
135 if pinnerBits == nil {
138 objIndex := span.objIndex(uintptr(ptr))
139 pinState := pinnerBits.ofObject(objIndex)
140 KeepAlive(ptr) // make sure ptr is alive until we are done so the span can't be freed
141 return pinState.isPinned()
144 // setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer.
145 // It will be ignored while try to pin a non-Go pointer,
146 // and it will be panic while try to unpin a non-Go pointer,
147 // which should not happen in normal usage.
148 func setPinned(ptr unsafe.Pointer, pin bool) bool {
149 span := spanOfHeap(uintptr(ptr))
152 panic(errorString("tried to unpin non-Go pointer"))
154 // This is a linker-allocated, zero size object or other object,
155 // nothing to do, silently ignore it.
159 // ensure that the span is swept, b/c sweeping accesses the specials list
163 KeepAlive(ptr) // make sure ptr is still alive after span is swept
165 objIndex := span.objIndex(uintptr(ptr))
167 lock(&span.speciallock) // guard against concurrent calls of setPinned on same span
169 pinnerBits := span.getPinnerBits()
170 if pinnerBits == nil {
171 pinnerBits = span.newPinnerBits()
172 span.setPinnerBits(pinnerBits)
174 pinState := pinnerBits.ofObject(objIndex)
176 if pinState.isPinned() {
177 // multiple pins on same object, set multipin bit
178 pinState.setMultiPinned(true)
179 // and increase the pin counter
180 // TODO(mknyszek): investigate if systemstack is necessary here
182 offset := objIndex * span.elemsize
183 span.incPinCounter(offset)
187 pinState.setPinned(true)
191 if pinState.isPinned() {
192 if pinState.isMultiPinned() {
194 // TODO(mknyszek): investigate if systemstack is necessary here
196 offset := objIndex * span.elemsize
197 exists = span.decPinCounter(offset)
200 // counter is 0, clear multipin bit
201 pinState.setMultiPinned(false)
204 // no multipins recorded. unpin object.
205 pinState.setPinned(false)
208 // unpinning unpinned object, bail out
209 throw("runtime.Pinner: object already unpinned")
212 unlock(&span.speciallock)
217 type pinState struct {
223 // nosplit, because it's called by isPinned, which is nosplit
226 func (v *pinState) isPinned() bool {
227 return (v.byteVal & v.mask) != 0
230 func (v *pinState) isMultiPinned() bool {
231 return (v.byteVal & (v.mask << 1)) != 0
234 func (v *pinState) setPinned(val bool) {
238 func (v *pinState) setMultiPinned(val bool) {
242 // set sets the pin bit of the pinState to val. If multipin is true, it
243 // sets/unsets the multipin bit instead.
244 func (v *pinState) set(val bool, multipin bool) {
250 atomic.Or8(v.bytep, mask)
252 atomic.And8(v.bytep, ^mask)
256 // pinnerBits is the same type as gcBits but has different methods.
257 type pinnerBits gcBits
259 // ofObject returns the pinState of the n'th object.
260 // nosplit, because it's called by isPinned, which is nosplit
263 func (p *pinnerBits) ofObject(n uintptr) pinState {
264 bytep, mask := (*gcBits)(p).bitp(n * 2)
265 byteVal := atomic.Load8(bytep)
266 return pinState{bytep, byteVal, mask}
269 func (s *mspan) pinnerBitSize() uintptr {
270 return divRoundUp(uintptr(s.nelems)*2, 8)
273 // newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this
274 // span's pinner bits. newPinneBits is used to mark objects that are pinned.
275 // They are copied when the span is swept.
276 func (s *mspan) newPinnerBits() *pinnerBits {
277 return (*pinnerBits)(newMarkBits(uintptr(s.nelems) * 2))
280 // nosplit, because it's called by isPinned, which is nosplit
283 func (s *mspan) getPinnerBits() *pinnerBits {
284 return (*pinnerBits)(atomic.Loadp(unsafe.Pointer(&s.pinnerBits)))
287 func (s *mspan) setPinnerBits(p *pinnerBits) {
288 atomicstorep(unsafe.Pointer(&s.pinnerBits), unsafe.Pointer(p))
291 // refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the
292 // next GC cycle. If it does not contain any pinned objects, pinnerBits of the
293 // span is set to nil.
294 func (s *mspan) refreshPinnerBits() {
295 p := s.getPinnerBits()
301 bytes := alignUp(s.pinnerBitSize(), 8)
303 // Iterate over each 8-byte chunk and check for pins. Note that
304 // newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we
305 // don't have to worry about edge cases, irrelevant bits will simply be
307 for _, x := range unsafe.Slice((*uint64)(unsafe.Pointer(&p.x)), bytes/8) {
315 newPinnerBits := s.newPinnerBits()
316 memmove(unsafe.Pointer(&newPinnerBits.x), unsafe.Pointer(&p.x), bytes)
317 s.setPinnerBits(newPinnerBits)
323 // incPinCounter is only called for multiple pins of the same object and records
324 // the _additional_ pins.
325 func (span *mspan) incPinCounter(offset uintptr) {
326 var rec *specialPinCounter
327 ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
329 lock(&mheap_.speciallock)
330 rec = (*specialPinCounter)(mheap_.specialPinCounterAlloc.alloc())
331 unlock(&mheap_.speciallock)
332 // splice in record, fill in offset.
333 rec.special.offset = uint16(offset)
334 rec.special.kind = _KindSpecialPinCounter
335 rec.special.next = *ref
336 *ref = (*special)(unsafe.Pointer(rec))
337 spanHasSpecials(span)
339 rec = (*specialPinCounter)(unsafe.Pointer(*ref))
344 // decPinCounter decreases the counter. If the counter reaches 0, the counter
345 // special is deleted and false is returned. Otherwise true is returned.
346 func (span *mspan) decPinCounter(offset uintptr) bool {
347 ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
349 throw("runtime.Pinner: decreased non-existing pin counter")
351 counter := (*specialPinCounter)(unsafe.Pointer(*ref))
353 if counter.counter == 0 {
354 *ref = counter.special.next
355 if span.specials == nil {
356 spanHasNoSpecials(span)
358 lock(&mheap_.speciallock)
359 mheap_.specialPinCounterAlloc.free(unsafe.Pointer(counter))
360 unlock(&mheap_.speciallock)
367 func pinnerGetPinCounter(addr unsafe.Pointer) *uintptr {
368 _, span, objIndex := findObject(uintptr(addr), 0, 0)
369 offset := objIndex * span.elemsize
370 t, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
374 counter := (*specialPinCounter)(unsafe.Pointer(*t))
375 return &counter.counter
378 // to be able to test that the GC panics when a pinned pointer is leaking, this
379 // panic function is a variable, that can be overwritten by a test.
380 var pinnerLeakPanic = func() {
381 panic(errorString("runtime.Pinner: found leaking pinned pointer; forgot to call Unpin()?"))