1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: finalizers and block profiling.
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
15 // finblock is allocated from non-GC'd memory, so any heap pointers
16 // must be specially handled.
19 type finblock struct {
24 fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
27 var finlock mutex // protects the following variables
28 var fing *g // goroutine that runs finalizers
29 var finq *finblock // list of finalizers that are to be executed
30 var finc *finblock // cache of free blocks
31 var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
34 var allfin *finblock // list of all blocks
36 // NOTE: Layout known to queuefinalizer.
37 type finalizer struct {
38 fn *funcval // function to call (may be a heap pointer)
39 arg unsafe.Pointer // ptr to object (may be a heap pointer)
40 nret uintptr // bytes of return values from fn
41 fint *_type // type of first argument of fn
42 ot *ptrtype // type of ptr to object (may be a heap pointer)
45 var finalizer1 = [...]byte{
46 // Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
47 // Each byte describes 8 words.
48 // Need 8 Finalizers described by 5 bytes before pattern repeats:
49 // ptr ptr INT ptr ptr
50 // ptr ptr INT ptr ptr
51 // ptr ptr INT ptr ptr
52 // ptr ptr INT ptr ptr
53 // ptr ptr INT ptr ptr
54 // ptr ptr INT ptr ptr
55 // ptr ptr INT ptr ptr
56 // ptr ptr INT ptr ptr
59 // ptr ptr INT ptr ptr ptr ptr INT
60 // ptr ptr ptr ptr INT ptr ptr ptr
61 // ptr INT ptr ptr ptr ptr INT ptr
62 // ptr ptr ptr INT ptr ptr ptr ptr
63 // INT ptr ptr ptr ptr INT ptr ptr
65 // Assumptions about Finalizer layout checked below.
66 1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
67 1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
68 1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
69 1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
70 0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
73 func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
75 if finq == nil || finq.cnt == uint32(len(finq.fin)) {
77 finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
80 if finptrmask[0] == 0 {
81 // Build pointer mask for Finalizer array in block.
82 // Check assumptions made in finalizer1 array above.
83 if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
84 unsafe.Offsetof(finalizer{}.fn) != 0 ||
85 unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
86 unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
87 unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
88 unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
89 throw("finalizer out of sync")
91 for i := range finptrmask {
92 finptrmask[i] = finalizer1[i%len(finalizer1)]
101 f := &finq.fin[finq.cnt]
102 atomic.Xadd(&finq.cnt, +1) // Sync with markroots
113 func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
114 for fb := allfin; fb != nil; fb = fb.alllink {
115 for i := uint32(0); i < fb.cnt; i++ {
117 callback(f.fn, f.arg, f.nret, f.fint, f.ot)
125 if fingwait && fingwake {
140 // start the finalizer goroutine exactly once
141 if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
146 // This is the goroutine that runs all of the finalizers
161 goparkunlock(&finlock, "finalizer wait", traceEvGoBlock, 1)
169 for i := fb.cnt; i > 0; i-- {
172 framesz := unsafe.Sizeof((interface{})(nil)) + f.nret
173 if framecap < framesz {
174 // The frame does not contain pointers interesting for GC,
175 // all not yet finalized objects are stored in finq.
176 // If we do not mark it as FlagNoScan,
177 // the last finalized object is not collected.
178 frame = mallocgc(framesz, nil, true)
183 throw("missing type in runfinq")
185 // frame is effectively uninitialized
186 // memory. That means we have to clear
187 // it before writing to it to avoid
188 // confusing the write barrier.
189 *(*[2]uintptr)(frame) = [2]uintptr{}
190 switch f.fint.kind & kindMask {
192 // direct use of pointer
193 *(*unsafe.Pointer)(frame) = f.arg
195 ityp := (*interfacetype)(unsafe.Pointer(f.fint))
196 // set up with empty interface
197 (*eface)(frame)._type = &f.ot.typ
198 (*eface)(frame).data = f.arg
199 if len(ityp.mhdr) != 0 {
200 // convert to interface with methods
201 // this conversion is guaranteed to succeed - we checked in SetFinalizer
202 assertE2I(ityp, *(*eface)(frame), (*iface)(frame))
205 throw("bad kind in runfinq")
208 reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
211 // Drop finalizer queue heap references
212 // before hiding them from markroot.
213 // This also ensures these will be
214 // clear if we reuse the finalizer.
218 atomic.Store(&fb.cnt, i-1)
230 // SetFinalizer sets the finalizer associated with obj to the provided
231 // finalizer function. When the garbage collector finds an unreachable block
232 // with an associated finalizer, it clears the association and runs
233 // finalizer(obj) in a separate goroutine. This makes obj reachable again,
234 // but now without an associated finalizer. Assuming that SetFinalizer
235 // is not called again, the next time the garbage collector sees
236 // that obj is unreachable, it will free obj.
238 // SetFinalizer(obj, nil) clears any finalizer associated with obj.
240 // The argument obj must be a pointer to an object allocated by calling
241 // new, by taking the address of a composite literal, or by taking the
242 // address of a local variable.
243 // The argument finalizer must be a function that takes a single argument
244 // to which obj's type can be assigned, and can have arbitrary ignored return
245 // values. If either of these is not true, SetFinalizer may abort the
248 // Finalizers are run in dependency order: if A points at B, both have
249 // finalizers, and they are otherwise unreachable, only the finalizer
250 // for A runs; once A is freed, the finalizer for B can run.
251 // If a cyclic structure includes a block with a finalizer, that
252 // cycle is not guaranteed to be garbage collected and the finalizer
253 // is not guaranteed to run, because there is no ordering that
254 // respects the dependencies.
256 // The finalizer for obj is scheduled to run at some arbitrary time after
257 // obj becomes unreachable.
258 // There is no guarantee that finalizers will run before a program exits,
259 // so typically they are useful only for releasing non-memory resources
260 // associated with an object during a long-running program.
261 // For example, an os.File object could use a finalizer to close the
262 // associated operating system file descriptor when a program discards
263 // an os.File without calling Close, but it would be a mistake
264 // to depend on a finalizer to flush an in-memory I/O buffer such as a
265 // bufio.Writer, because the buffer would not be flushed at program exit.
267 // It is not guaranteed that a finalizer will run if the size of *obj is
270 // It is not guaranteed that a finalizer will run for objects allocated
271 // in initializers for package-level variables. Such objects may be
272 // linker-allocated, not heap-allocated.
274 // A finalizer may run as soon as an object becomes unreachable.
275 // In order to use finalizers correctly, the program must ensure that
276 // the object is reachable until it is no longer required.
277 // Objects stored in global variables, or that can be found by tracing
278 // pointers from a global variable, are reachable. For other objects,
279 // pass the object to a call of the KeepAlive function to mark the
280 // last point in the function where the object must be reachable.
282 // For example, if p points to a struct that contains a file descriptor d,
283 // and p has a finalizer that closes that file descriptor, and if the last
284 // use of p in a function is a call to syscall.Write(p.d, buf, size), then
285 // p may be unreachable as soon as the program enters syscall.Write. The
286 // finalizer may run at that moment, closing p.d, causing syscall.Write
287 // to fail because it is writing to a closed file descriptor (or, worse,
288 // to an entirely different file descriptor opened by a different goroutine).
289 // To avoid this problem, call runtime.KeepAlive(p) after the call to
292 // A single goroutine runs all finalizers for a program, sequentially.
293 // If a finalizer must run for a long time, it should do so by starting
295 func SetFinalizer(obj interface{}, finalizer interface{}) {
297 // debug.sbrk never frees memory, so no finalizers run
298 // (and we don't have the data structures to record them).
304 throw("runtime.SetFinalizer: first argument is nil")
306 if etyp.kind&kindMask != kindPtr {
307 throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer")
309 ot := (*ptrtype)(unsafe.Pointer(etyp))
311 throw("nil elem type!")
314 // find the containing object
315 _, base, _ := findObject(e.data)
318 // 0-length objects are okay.
319 if e.data == unsafe.Pointer(&zerobase) {
323 // Global initializers might be linker-allocated.
324 // var Foo = &Object{}
326 // runtime.SetFinalizer(Foo, nil)
328 // The relevant segments are: noptrdata, data, bss, noptrbss.
329 // We cannot assume they are in any order or even contiguous,
330 // due to external linking.
331 for datap := &firstmoduledata; datap != nil; datap = datap.next {
332 if datap.noptrdata <= uintptr(e.data) && uintptr(e.data) < datap.enoptrdata ||
333 datap.data <= uintptr(e.data) && uintptr(e.data) < datap.edata ||
334 datap.bss <= uintptr(e.data) && uintptr(e.data) < datap.ebss ||
335 datap.noptrbss <= uintptr(e.data) && uintptr(e.data) < datap.enoptrbss {
339 throw("runtime.SetFinalizer: pointer not in allocated block")
343 // As an implementation detail we allow to set finalizers for an inner byte
344 // of an object if it could come from tiny alloc (see mallocgc for details).
345 if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
346 throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
350 f := efaceOf(&finalizer)
353 // switch to system stack and remove finalizer
355 removefinalizer(e.data)
360 if ftyp.kind&kindMask != kindFunc {
361 throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function")
363 ft := (*functype)(unsafe.Pointer(ftyp))
365 throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot")
368 throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
375 case fint.kind&kindMask == kindPtr:
376 if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
377 // ok - not same type, but both pointers,
378 // one or the other is unnamed, and same element type, so assignable.
381 case fint.kind&kindMask == kindInterface:
382 ityp := (*interfacetype)(unsafe.Pointer(fint))
383 if len(ityp.mhdr) == 0 {
384 // ok - satisfies empty interface
387 if assertE2I2(ityp, *efaceOf(&obj), nil) {
391 throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
393 // compute size needed for return parameters
395 for _, t := range ft.out() {
396 nret = round(nret, uintptr(t.align)) + uintptr(t.size)
398 nret = round(nret, sys.PtrSize)
400 // make sure we have a finalizer goroutine
404 if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
405 throw("runtime.SetFinalizer: finalizer already set")
410 // Look up pointer v in heap. Return the span containing the object,
411 // the start of the object, and the size of the object. If the object
412 // does not exist, return nil, nil, 0.
413 func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
416 if sys.PtrSize == 4 && c.local_nlookup >= 1<<30 {
417 // purge cache stats to prevent overflow
424 arena_start := mheap_.arena_start
425 arena_used := mheap_.arena_used
426 if uintptr(v) < arena_start || uintptr(v) >= arena_used {
429 p := uintptr(v) >> pageShift
430 q := p - arena_start>>pageShift
435 x = unsafe.Pointer(s.base())
437 if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
444 if s.sizeclass != 0 {
445 x = add(x, (uintptr(v)-uintptr(x))/n*n)
450 // Mark KeepAlive as noinline so that the current compiler will ensure
451 // that the argument is alive at the point of the function call.
452 // If it were inlined, it would disappear, and there would be nothing
453 // keeping the argument alive. Perhaps a future compiler will recognize
454 // runtime.KeepAlive specially and do something more efficient.
457 // KeepAlive marks its argument as currently reachable.
458 // This ensures that the object is not freed, and its finalizer is not run,
459 // before the point in the program where KeepAlive is called.
461 // A very simplified example showing where KeepAlive is required:
462 // type File struct { d int }
463 // d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
464 // // ... do something if err != nil ...
466 // runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
468 // n, err := syscall.Read(p.d, buf[:])
469 // // Ensure p is not finalized until Read returns.
470 // runtime.KeepAlive(p)
471 // // No more uses of p after this point.
473 // Without the KeepAlive call, the finalizer could run at the start of
474 // syscall.Read, closing the file descriptor before syscall.Read makes
475 // the actual system call.
476 func KeepAlive(interface{}) {}