objects of the same type.
In general, types that are allocated using any of these should be
-marked `//go:notinheap` (see below).
+marked as not in heap by embedding `runtime/internal/sys.NotInHeap`.
Objects that are allocated in unmanaged memory **must not** contain
heap pointers unless the following rules are also obeyed:
The conversion from pointer to uintptr must appear in the argument list of any
call to this function. This directive is used for some low-level system call
implementations.
-
-go:notinheap
-------------
-
-`go:notinheap` applies to type declarations. It indicates that a type
-must never be allocated from the GC'd heap or on the stack.
-Specifically, pointers to this type must always fail the
-`runtime.inheap` check. The type may be used for global variables, or
-for objects in unmanaged memory (e.g., allocated with `sysAlloc`,
-`persistentalloc`, `fixalloc`, or from a manually-managed span).
-Specifically:
-
-1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap
- allocation of T are disallowed. (Though implicit allocations are
- disallowed in the runtime anyway.)
-
-2. A pointer to a regular type (other than `unsafe.Pointer`) cannot be
- converted to a pointer to a `go:notinheap` type, even if they have
- the same underlying type.
-
-3. Any type that contains a `go:notinheap` type is itself
- `go:notinheap`. Structs and arrays are `go:notinheap` if their
- elements are. Maps and channels of `go:notinheap` types are
- disallowed. To keep things explicit, any type declaration where the
- type is implicitly `go:notinheap` must be explicitly marked
- `go:notinheap` as well.
-
-4. Write barriers on pointers to `go:notinheap` types can be omitted.
-
-The last point is the real benefit of `go:notinheap`. The runtime uses
-it for low-level internal structures to avoid memory barriers in the
-scheduler and the memory allocator where they are illegal or simply
-inefficient. This mechanism is reasonably safe and does not compromise
-the readability of the runtime.
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
//
// To obtain a dlogger, call dlog(). When done with the dlogger, call
// end().
-//
-//go:notinheap
type dlogger struct {
+ _ sys.NotInHeap
w debugLogWriter
// allLink is the next dlogger in the allDloggers list.
// overwrite old records. Hence, it maintains a reader that consumes
// the log as it gets overwritten. That reader state is where an
// actual log reader would start.
-//
-//go:notinheap
type debugLogWriter struct {
+ _ sys.NotInHeap
write uint64
data debugLogBuf
buf [10]byte
}
-//go:notinheap
-type debugLogBuf [debugLogBytes]byte
+type debugLogBuf struct {
+ _ sys.NotInHeap
+ b [debugLogBytes]byte
+}
const (
// debugLogHeaderSize is the number of bytes in the framing
//go:nosplit
func (l *debugLogWriter) ensure(n uint64) {
- for l.write+n >= l.r.begin+uint64(len(l.data)) {
+ for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
// Consume record at begin.
if l.r.skip() == ^uint64(0) {
// Wrapped around within a record.
//go:nosplit
func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
- l.data[pos%uint64(len(l.data))] = uint8(size)
- l.data[(pos+1)%uint64(len(l.data))] = uint8(size >> 8)
+ l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
+ l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
return size <= 0xFFFF
}
l.ensure(1)
pos := l.write
l.write++
- l.data[pos%uint64(len(l.data))] = x
+ l.data.b[pos%uint64(len(l.data.b))] = x
}
//go:nosplit
pos := l.write
l.write += uint64(len(x))
for len(x) > 0 {
- n := copy(l.data[pos%uint64(len(l.data)):], x)
+ n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
pos += uint64(n)
x = x[n:]
}
//go:nosplit
func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
- return uint16(r.data[pos%uint64(len(r.data))]) |
- uint16(r.data[(pos+1)%uint64(len(r.data))])<<8
+ return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
+ uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
}
//go:nosplit
func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
var b [8]byte
for i := range b {
- b[i] = r.data[pos%uint64(len(r.data))]
+ b[i] = r.data.b[pos%uint64(len(r.data.b))]
pos++
}
return uint64(b[0]) | uint64(b[1])<<8 |
pos := r.begin + debugLogHeaderSize
var u uint64
for i := uint(0); ; i += 7 {
- b := r.data[pos%uint64(len(r.data))]
+ b := r.data.b[pos%uint64(len(r.data.b))]
pos++
u |= uint64(b&^0x80) << i
if b&0x80 == 0 {
func (r *debugLogReader) uvarint() uint64 {
var u uint64
for i := uint(0); ; i += 7 {
- b := r.data[r.begin%uint64(len(r.data))]
+ b := r.data.b[r.begin%uint64(len(r.data.b))]
r.begin++
u |= uint64(b&^0x80) << i
if b&0x80 == 0 {
}
func (r *debugLogReader) printVal() bool {
- typ := r.data[r.begin%uint64(len(r.data))]
+ typ := r.data.b[r.begin%uint64(len(r.data.b))]
r.begin++
switch typ {
break
}
for sl > 0 {
- b := r.data[r.begin%uint64(len(r.data)):]
+ b := r.data.b[r.begin%uint64(len(r.data.b)):]
if uint64(len(b)) > sl {
b = b[:sl]
}
}
// mspan wrapper for testing.
-//
-//go:notinheap
type MSpan mspan
// Allocate an mspan for testing.
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+// TODO: make this as a compiler intrinsic type, and remove go:notinheap
+//
+//go:notinheap
+type nih struct{}
+
+// NotInHeap is a type must never be allocated from the GC'd heap or on the stack,
+// and is called not-in-heap.
+//
+// Other types can embed NotInHeap to make it not-in-heap. Specifically, pointers
+// to these types must always fail the `runtime.inheap` check. The type may be used
+// for global variables, or for objects in unmanaged memory (e.g., allocated with
+// `sysAlloc`, `persistentalloc`, r`fixalloc`, or from a manually-managed span).
+//
+// Specifically:
+//
+// 1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap
+// allocation of T are disallowed. (Though implicit allocations are
+// disallowed in the runtime anyway.)
+//
+// 2. A pointer to a regular type (other than `unsafe.Pointer`) cannot be
+// converted to a pointer to a not-in-heap type, even if they have the
+// same underlying type.
+//
+// 3. Any type that containing a not-in-heap type is itself considered as not-in-heap.
+//
+// - Structs and arrays are not-in-heap if their elements are not-in-heap.
+// - Maps and channels contains no-in-heap types are disallowed.
+//
+// 4. Write barriers on pointers to not-in-heap types can be omitted.
+//
+// The last point is the real benefit of NotInHeap. The runtime uses
+// it for low-level internal structures to avoid memory barriers in the
+// scheduler and the memory allocator where they are illegal or simply
+// inefficient. This mechanism is reasonably safe and does not compromise
+// the readability of the runtime.
+type NotInHeap struct{ _ nih }
// The returned memory will be zeroed.
// sysStat must be non-nil.
//
-// Consider marking persistentalloc'd types go:notinheap.
+// Consider marking persistentalloc'd types not in heap by embedding
+// runtime/internal/sys.NotInHeap.
func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
var p *notInHeap
systemstack(func() {
// notInHeap is off-heap memory allocated by a lower-level allocator
// like sysAlloc or persistentAlloc.
//
-// In general, it's better to use real types marked as go:notinheap,
-// but this serves as a generic type for situations where that isn't
-// possible (like in the allocators).
+// In general, it's better to use real types which embed
+// runtime/internal/sys.NotInHeap, but this serves as a generic type
+// for situations where that isn't possible (like in the allocators).
//
// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
-//
-//go:notinheap
-type notInHeap struct{}
+type notInHeap struct{ _ sys.NotInHeap }
func (p *notInHeap) add(bytes uintptr) *notInHeap {
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}
func (s *mspan) markBitsForBase() markBits {
- return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
+ return markBits{&s.gcmarkBits.x, uint8(1), 0}
}
// isMarked reports whether mark bit m is set.
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
-//
-//go:notinheap
type mcache struct {
+ _ sys.NotInHeap
+
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
nextSample uintptr // trigger heap sample after allocating this many bytes
package runtime
-import "runtime/internal/atomic"
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+)
// Central list of free objects of a given size.
-//
-//go:notinheap
type mcentral struct {
+ _ sys.NotInHeap
spanclass spanClass
// partial and full contain two mspan sets: one of swept in-use
import (
"internal/goarch"
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
// per-arena bitmap with a bit for every word in the arena. The mark
// is stored on the bit corresponding to the first word of the marked
// allocation.
-//
-//go:notinheap
-type checkmarksMap [heapArenaBytes / goarch.PtrSize / 8]uint8
+type checkmarksMap struct {
+ _ sys.NotInHeap
+ b [heapArenaBytes / goarch.PtrSize / 8]uint8
+}
// If useCheckmark is true, marking of an object uses the checkmark
// bits instead of the standard mark bits.
arena.checkmarks = bitmap
} else {
// Otherwise clear the existing bitmap.
- for i := range bitmap {
- bitmap[i] = 0
+ for i := range bitmap.b {
+ bitmap.b[i] = 0
}
}
}
ai := arenaIndex(obj)
arena := mheap_.arenas[ai.l1()][ai.l2()]
- arenaWord := (obj / heapArenaBytes / 8) % uintptr(len(arena.checkmarks))
+ arenaWord := (obj / heapArenaBytes / 8) % uintptr(len(arena.checkmarks.b))
mask := byte(1 << ((obj / heapArenaBytes) % 8))
- bytep := &arena.checkmarks[arenaWord]
+ bytep := &arena.checkmarks.b[arenaWord]
if atomic.Load8(bytep)&mask != 0 {
// Already checkmarked.
"internal/abi"
"internal/goarch"
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
// finblock is allocated from non-GC'd memory, so any heap pointers
// must be specially handled. GC currently assumes that the finalizer
// queue does not grow during marking (but it can shrink).
-//
-//go:notinheap
type finblock struct {
+ _ sys.NotInHeap
alllink *finblock
next *finblock
cnt uint32
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// FixAlloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around sysAlloc to manage its
// Callers can keep state in the object but the first word is
// smashed by freeing and reallocating.
//
-// Consider marking fixalloc'd types go:notinheap.
+// Consider marking fixalloc'd types not in heap by embedding
+// runtime/internal/sys.NotInHeap.
type fixalloc struct {
size uintptr
first func(arg, p unsafe.Pointer) // called first time p is returned
// this cannot be used by some of the internal GC structures. For example when
// the sweeper is placing an unmarked object on the free list it does not want the
// write barrier to be called since that could result in the object being reachable.
-//
-//go:notinheap
type mlink struct {
+ _ sys.NotInHeap
next *mlink
}
import (
"internal/goarch"
+ "runtime/internal/sys"
"unsafe"
)
// Buffer for pointers found during stack tracing.
// Must be smaller than or equal to workbuf.
-//
-//go:notinheap
type stackWorkBuf struct {
+ _ sys.NotInHeap
stackWorkBufHdr
obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
}
// Header declaration must come after the buf declaration above, because of issue #14620.
-//
-//go:notinheap
type stackWorkBufHdr struct {
+ _ sys.NotInHeap
workbufhdr
next *stackWorkBuf // linked list of workbufs
// Note: we could theoretically repurpose lfnode.next as this next pointer.
// Buffer for stack objects found on a goroutine stack.
// Must be smaller than or equal to workbuf.
-//
-//go:notinheap
type stackObjectBuf struct {
+ _ sys.NotInHeap
stackObjectBufHdr
obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
}
-//go:notinheap
type stackObjectBufHdr struct {
+ _ sys.NotInHeap
workbufhdr
next *stackObjectBuf
}
// A stackObject represents a variable on the stack that has had
// its address taken.
-//
-//go:notinheap
type stackObject struct {
+ _ sys.NotInHeap
off uint32 // offset above stack.lo
size uint32 // size of object
r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned.
import (
"internal/goarch"
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
nobj int
}
-//go:notinheap
type workbuf struct {
+ _ sys.NotInHeap
workbufhdr
// account for the above fields
obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
"internal/cpu"
"internal/goarch"
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
//
// mheap must not be heap-allocated because it contains mSpanLists,
// which must not be heap-allocated.
-//
-//go:notinheap
type mheap struct {
+ _ sys.NotInHeap
+
// lock must only be acquired on the system stack, otherwise a g
// could self-deadlock if its stack grows with the lock held.
lock mutex
// A heapArena stores metadata for a heap arena. heapArenas are stored
// outside of the Go heap and accessed via the mheap_.arenas index.
-//
-//go:notinheap
type heapArena struct {
+ _ sys.NotInHeap
+
// bitmap stores the pointer/scalar bitmap for the words in
// this arena. See mbitmap.go for a description.
// This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit).
// arenaHint is a hint for where to grow the heap arenas. See
// mheap_.arenaHints.
-//
-//go:notinheap
type arenaHint struct {
+ _ sys.NotInHeap
addr uintptr
down bool
next *arenaHint
}
// mSpanList heads a linked list of spans.
-//
-//go:notinheap
type mSpanList struct {
+ _ sys.NotInHeap
first *mspan // first span in list, or nil if none
last *mspan // last span in list, or nil if none
}
-//go:notinheap
type mspan struct {
+ _ sys.NotInHeap
next *mspan // next span in list, or nil if none
prev *mspan // previous span in list, or nil if none
list *mSpanList // For debugging. TODO: Remove.
// if that happens.
)
-//go:notinheap
type special struct {
+ _ sys.NotInHeap
next *special // linked list in span
offset uint16 // span offset of object
kind byte // kind of special
//
// specialfinalizer is allocated from non-GC'd memory, so any heap
// pointers must be specially handled.
-//
-//go:notinheap
type specialfinalizer struct {
+ _ sys.NotInHeap
special special
fn *funcval // May be a heap pointer.
nret uintptr
}
// The described object is being heap profiled.
-//
-//go:notinheap
type specialprofile struct {
+ _ sys.NotInHeap
special special
b *bucket
}
}
}
-// gcBits is an alloc/mark bitmap. This is always used as *gcBits.
-//
-//go:notinheap
-type gcBits uint8
+// gcBits is an alloc/mark bitmap. This is always used as gcBits.x.
+type gcBits struct {
+ _ sys.NotInHeap
+ x uint8
+}
// bytep returns a pointer to the n'th byte of b.
func (b *gcBits) bytep(n uintptr) *uint8 {
- return addb((*uint8)(b), n)
+ return addb(&b.x, n)
}
// bitp returns a pointer to the byte containing bit n and a mask for
next uintptr // *gcBits triggers recursive type bug. (issue 14620)
}
-//go:notinheap
type gcBitsArena struct {
+ _ sys.NotInHeap
// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
free uintptr // free is the index into bits of the next free byte; read/write atomically
next *gcBitsArena
import (
"internal/abi"
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
// creation, including its next and allnext links.
//
// No heap pointers.
-//
-//go:notinheap
type bucket struct {
+ _ sys.NotInHeap
next *bucket
allnext *bucket
typ bucketType // memBucket or blockBucket (includes mutexProfile)
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
// Network poller descriptor.
//
// No heap pointers.
-//
-//go:notinheap
type pollDesc struct {
+ _ sys.NotInHeap
link *pollDesc // in pollcache, protected by pollcache.lock
fd uintptr // constant for pollDesc usage lifetime
// makeArg converts pd to an interface{}.
// makeArg does not do any allocation. Normally, such
// a conversion requires an allocation because pointers to
-// go:notinheap types (which pollDesc is) must be stored
-// in interfaces indirectly. See issue 42076.
+// types which embed runtime/internal/sys.NotInHeap (which pollDesc is)
+// must be stored in interfaces indirectly. See issue 42076.
func (pd *pollDesc) makeArg() (i any) {
x := (*eface)(unsafe.Pointer(&i))
x._type = pdType
cap int
}
-// A notInHeapSlice is a slice backed by go:notinheap memory.
+// A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory.
type notInHeapSlice struct {
array *notInHeap
len int
_ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
}
-//go:notinheap
type stackpoolItem struct {
+ _ sys.NotInHeap
mu mutex
span mSpanList
}
}
// traceBuf is per-P tracing buffer.
-//
-//go:notinheap
type traceBuf struct {
+ _ sys.NotInHeap
traceBufHeader
arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
}
// manipulated in contexts where write barriers are not allowed, so
// this is necessary.
//
-// TODO: Since traceBuf is now go:notinheap, this isn't necessary.
+// TODO: Since traceBuf is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
type traceBufPtr uintptr
func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
// traceAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceAllocBlocks do
// not need write barriers.
-//
-//go:notinheap
type traceAllocBlock struct {
+ _ sys.NotInHeap
next traceAllocBlockPtr
data [64<<10 - goarch.PtrSize]byte
}
-// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
+// TODO: Since traceAllocBlock is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
type traceAllocBlockPtr uintptr
func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }