ot = duint8(lsym, ot, t.Align) // fieldAlign
i = kinds[t.Etype]
- if !types.Haspointers(t) {
- i |= objabi.KindNoPointers
- }
if isdirectiface(t) {
i |= objabi.KindDirectIface
}
KindUnsafePointer
KindDirectIface = 1 << 5
KindGCProg = 1 << 6
- KindNoPointers = 1 << 7
KindMask = (1 << 5) - 1
)
const (
kindDirectIface = 1 << 5
kindGCProg = 1 << 6 // Type.gc points to GC program
- kindNoPointers = 1 << 7
kindMask = (1 << 5) - 1
)
for i := uintptr(0); i < ft.ptrdata/ptrSize; i++ {
gc = append(gc, gcdata[i/8]>>(i%8)&1)
}
- ptrs = ft.kind&kindNoPointers == 0
+ ptrs = ft.ptrdata != 0
return
}
typ := v.Type().Elem().(*rtype)
size := typ.Size()
- hasPtr := typ.kind&kindNoPointers == 0
+ hasPtr := typ.ptrdata != 0
// Some common & small cases, without using memmove:
if hasPtr {
const (
kindDirectIface = 1 << 5
kindGCProg = 1 << 6 // Type.gc points to GC program
- kindNoPointers = 1 << 7
kindMask = (1 << 5) - 1
)
func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
-func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
+func (t *rtype) pointers() bool { return t.ptrdata != 0 }
func (t *rtype) common() *rtype { return t }
)
func bucketOf(ktyp, etyp *rtype) *rtype {
- // See comment on hmap.overflow in ../runtime/map.go.
- var kind uint8
- if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
- ktyp.size <= maxKeySize && etyp.size <= maxValSize {
- kind = kindNoPointers
- }
-
if ktyp.size > maxKeySize {
ktyp = PtrTo(ktyp).(*rtype)
}
panic("reflect: bad size computation in MapOf")
}
- if kind != kindNoPointers {
+ if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
mask := make([]byte, (nptr+7)/8)
base := bucketSize / ptrSize
- if ktyp.kind&kindNoPointers == 0 {
+ if ktyp.ptrdata != 0 {
if ktyp.kind&kindGCProg != 0 {
panic("reflect: unexpected GC program in MapOf")
}
}
base += bucketSize * ktyp.size / ptrSize
- if etyp.kind&kindNoPointers == 0 {
+ if etyp.ptrdata != 0 {
if etyp.kind&kindGCProg != 0 {
panic("reflect: unexpected GC program in MapOf")
}
b := &rtype{
align: ptrSize,
size: size,
- kind: kind,
+ kind: uint8(Struct),
ptrdata: ptrdata,
gcdata: gcdata,
}
repr = make([]byte, 0, 64)
fset = map[string]struct{}{} // fields' names
- hasPtr = false // records whether at least one struct-field is a pointer
hasGCProg = false // records whether a struct-field type has a GCProg
)
if ft.kind&kindGCProg != 0 {
hasGCProg = true
}
- if ft.pointers() {
- hasPtr = true
- }
// Update string and hash
name := f.name.name()
if len(methods) > 0 {
typ.tflag |= tflagUncommon
}
- if !hasPtr {
- typ.kind |= kindNoPointers
- } else {
- typ.kind &^= kindNoPointers
- }
if hasGCProg {
lastPtrField := 0
array.len = uintptr(count)
array.slice = SliceOf(elem).(*rtype)
- array.kind &^= kindNoPointers
switch {
- case typ.kind&kindNoPointers != 0 || array.size == 0:
+ case typ.ptrdata == 0 || array.size == 0:
// No pointers.
- array.kind |= kindNoPointers
array.gcdata = nil
array.ptrdata = 0
}
if ptrmap.n > 0 {
x.gcdata = &ptrmap.data[0]
- } else {
- x.kind |= kindNoPointers
}
var s string
}
func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
- if t.kind&kindNoPointers != 0 {
+ if t.ptrdata == 0 {
return
}
// depending on indir. The top parameter is whether we are at the top
// level, where Go pointers are allowed.
func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
- if t.kind&kindNoPointers != 0 {
+ if t.ptrdata == 0 {
// If the type has no pointers there is nothing to do.
return
}
if !top {
panic(errorString(msg))
}
- if st.elem.kind&kindNoPointers != 0 {
+ if st.elem.ptrdata == 0 {
return
}
for i := 0; i < s.cap; i++ {
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if typ.kind&kindNoPointers != 0 {
+ if typ.ptrdata == 0 {
return
}
if !cgoIsGoPointer(src) {
//go:nosplit
//go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst, src slice, n int) {
- if typ.kind&kindNoPointers != 0 {
+ if typ.ptrdata == 0 {
return
}
if !cgoIsGoPointer(src.array) {
//go:nowritebarrier
//go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
- if typ.kind&kindNoPointers != 0 {
+ if typ.ptrdata == 0 {
return
}
c = (*hchan)(mallocgc(hchanSize, nil, true))
// Race detector uses this location for synchronization.
c.buf = c.raceaddr()
- case elem.kind&kindNoPointers != 0:
+ case elem.ptrdata == 0:
// Elements do not contain pointers.
// Allocate hchan and buf in one call.
c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
dwritebyte('.')
dwrite(name.str, uintptr(name.len))
}
- dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
+ dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
}
// dump an object
dataSize := size
c := gomcache()
var x unsafe.Pointer
- noscan := typ == nil || typ.kind&kindNoPointers != 0
+ noscan := typ == nil || typ.ptrdata == 0
if size <= maxSmallSize {
if noscan && size < maxTinySize {
// Tiny allocator.
ovf = (*bmap)(newobject(t.bucket))
}
h.incrnoverflow()
- if t.bucket.kind&kindNoPointers != 0 {
+ if t.bucket.ptrdata == 0 {
h.createOverflow()
*h.extra.overflow = append(*h.extra.overflow, ovf)
}
// but may not be empty.
buckets = dirtyalloc
size := t.bucket.size * nbuckets
- if t.bucket.kind&kindNoPointers == 0 {
+ if t.bucket.ptrdata != 0 {
memclrHasPointers(buckets, size)
} else {
memclrNoHeapPointers(buckets, size)
// Only clear key if there are pointers in it.
if t.indirectkey() {
*(*unsafe.Pointer)(k) = nil
- } else if t.key.kind&kindNoPointers == 0 {
+ } else if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue() {
*(*unsafe.Pointer)(v) = nil
- } else if t.elem.kind&kindNoPointers == 0 {
+ } else if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
- if t.bucket.kind&kindNoPointers != 0 {
+ if t.bucket.ptrdata == 0 {
// Allocate the current slice and remember pointers to both current and old.
// This preserves all relevant overflow buckets alive even if
// the table grows and/or overflow buckets are added to the table
}
}
// Unlink the overflow buckets & clear key/value to help GC.
- if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
continue
}
// Only clear key if there are pointers in it.
- if t.key.kind&kindNoPointers == 0 {
+ if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
- if t.elem.kind&kindNoPointers == 0 {
+ if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
+ if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
}
}
// Unlink the overflow buckets & clear key/value to help GC.
- if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
continue
}
// Only clear key if there are pointers in it.
- if t.key.kind&kindNoPointers == 0 {
+ if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
- if t.elem.kind&kindNoPointers == 0 {
+ if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
+ if t.key.ptrdata != 0 && writeBarrier.enabled {
if sys.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
}
}
// Unlink the overflow buckets & clear key/value to help GC.
- if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
// Clear key's pointer.
k.str = nil
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
- if t.elem.kind&kindNoPointers == 0 {
+ if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
}
// Unlink the overflow buckets & clear key/value to help GC.
// Unlink the overflow buckets & clear key/value to help GC.
- if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
if dst == src {
return
}
- if typ.kind&kindNoPointers == 0 {
+ if typ.ptrdata != 0 {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.size)
}
// There's a race here: if some other goroutine can write to
// dst and src point off bytes into the value and only copies size bytes.
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if writeBarrier.needed && typ.kind&kindNoPointers == 0 && size >= sys.PtrSize {
+ if writeBarrier.needed && typ.ptrdata != 0 && size >= sys.PtrSize {
// Pointer-align start address for bulk barrier.
adst, asrc, asize := dst, src, size
if frag := -off & (sys.PtrSize - 1); frag != 0 {
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr) {
- if writeBarrier.needed && typ != nil && typ.kind&kindNoPointers == 0 && size >= sys.PtrSize {
+ if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
return n
}
- // Note: No point in checking typ.kind&kindNoPointers here:
+ // Note: No point in checking typ.ptrdata here:
// compiler only emits calls to typedslicecopy for types with pointers,
// and growslice and reflect_typedslicecopy check for pointers
// before calling typedslicecopy.
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
- if elemType.kind&kindNoPointers != 0 {
+ if elemType.ptrdata == 0 {
n := dst.len
if n > src.len {
n = src.len
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
- if typ.kind&kindNoPointers == 0 {
+ if typ.ptrdata != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, typ.size)
}
memclrNoHeapPointers(ptr, typ.size)
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
- if typ.kind&kindNoPointers == 0 {
+ if typ.ptrdata != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, size)
}
memclrNoHeapPointers(ptr, size)
// memclrHasPointers clears n bytes of typed memory starting at ptr.
// The caller must ensure that the type of the object at ptr has
-// pointers, usually by checking typ.kind&kindNoPointers. However, ptr
+// pointers, usually by checking typ.ptrdata. However, ptr
// does not have to point to the start of the allocation.
//
//go:nosplit
// The pointer bitmap is not maintained for allocations containing
// no pointers at all; any caller of bulkBarrierPreWrite must first
// make sure the underlying allocation contains pointers, usually
-// by checking typ.kind&kindNoPointers.
+// by checking typ.ptrdata.
//
// Callers must perform cgo checks if writeBarrier.cgo.
//
if uintptr(e.data) != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
- if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
+ if ot.elem == nil || ot.elem.ptrdata != 0 || ot.elem.size >= maxTinySize {
throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
}
}
}
var p unsafe.Pointer
- if et.kind&kindNoPointers != 0 {
+ if et.ptrdata == 0 {
p = mallocgc(capmem, nil, false)
// The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length).
// Only clear the part that will not be overwritten.
kindDirectIface = 1 << 5
kindGCProg = 1 << 6
- kindNoPointers = 1 << 7
kindMask = (1 << 5) - 1
)