import (
"encoding/binary"
"fmt"
- "internal/buildcfg"
"os"
"sort"
"strings"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/staticdata"
"cmd/compile/internal/typebits"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
// runtime interface and reflection data structures
var (
- signatmu sync.Mutex // protects signatset and signatslice
- signatset = make(map[*types.Type]struct{})
- signatslice []*types.Type
+ // protects signatset and signatslice
+ signatmu sync.Mutex
+ // Tracking which types need runtime type descriptor
+ signatset = make(map[*types.Type]struct{})
+ // Queue of types wait to be generated runtime type descriptor
+ signatslice []typeAndStr
gcsymmu sync.Mutex // protects gcsymset and gcsymslice
gcsymset = make(map[*types.Type]struct{})
elemtype := t.Elem()
types.CalcSize(keytype)
types.CalcSize(elemtype)
- if keytype.Width > MAXKEYSIZE {
+ if keytype.Size() > MAXKEYSIZE {
keytype = types.NewPtr(keytype)
}
- if elemtype.Width > MAXELEMSIZE {
+ if elemtype.Size() > MAXELEMSIZE {
elemtype = types.NewPtr(elemtype)
}
if BUCKETSIZE < 8 {
base.Fatalf("bucket size too small for proper alignment")
}
- if keytype.Align > BUCKETSIZE {
+ if uint8(keytype.Alignment()) > BUCKETSIZE {
base.Fatalf("key align too big for %v", t)
}
- if elemtype.Align > BUCKETSIZE {
+ if uint8(elemtype.Alignment()) > BUCKETSIZE {
base.Fatalf("elem align too big for %v", t)
}
- if keytype.Width > MAXKEYSIZE {
+ if keytype.Size() > MAXKEYSIZE {
base.Fatalf("key size to large for %v", t)
}
- if elemtype.Width > MAXELEMSIZE {
+ if elemtype.Size() > MAXELEMSIZE {
base.Fatalf("elem size to large for %v", t)
}
- if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
+ if t.Key().Size() > MAXKEYSIZE && !keytype.IsPtr() {
base.Fatalf("key indirect incorrect for %v", t)
}
- if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
+ if t.Elem().Size() > MAXELEMSIZE && !elemtype.IsPtr() {
base.Fatalf("elem indirect incorrect for %v", t)
}
- if keytype.Width%int64(keytype.Align) != 0 {
+ if keytype.Size()%keytype.Alignment() != 0 {
base.Fatalf("key size not a multiple of key align for %v", t)
}
- if elemtype.Width%int64(elemtype.Align) != 0 {
+ if elemtype.Size()%elemtype.Alignment() != 0 {
base.Fatalf("elem size not a multiple of elem align for %v", t)
}
- if bucket.Align%keytype.Align != 0 {
+ if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
base.Fatalf("bucket align not multiple of key align %v", t)
}
- if bucket.Align%elemtype.Align != 0 {
+ if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
base.Fatalf("bucket align not multiple of elem align %v", t)
}
- if keys.Offset%int64(keytype.Align) != 0 {
+ if keys.Offset%keytype.Alignment() != 0 {
base.Fatalf("bad alignment of keys in bmap for %v", t)
}
- if elems.Offset%int64(elemtype.Align) != 0 {
+ if elems.Offset%elemtype.Alignment() != 0 {
base.Fatalf("bad alignment of elems in bmap for %v", t)
}
// Double-check that overflow field is final memory in struct,
// with no padding at end.
- if overflow.Offset != bucket.Width-int64(types.PtrSize) {
+ if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
base.Fatalf("bad offset of overflow in bmap for %v", t)
}
// The size of hmap should be 48 bytes on 64 bit
// and 28 bytes on 32 bit platforms.
- if size := int64(8 + 5*types.PtrSize); hmap.Width != size {
- base.Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
+ if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
+ base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
}
t.MapType().Hmap = hmap
hiter := types.NewStruct(types.NoPkg, fields)
hiter.SetNoalg(true)
types.CalcSize(hiter)
- if hiter.Width != int64(12*types.PtrSize) {
- base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*types.PtrSize)
+ if hiter.Size() != int64(12*types.PtrSize) {
+ base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
}
t.MapType().Hiter = hiter
hiter.StructType().Map = t
if f.Type.Recv() == nil {
base.Fatalf("receiver with no type on %v method %v %v", mt, f.Sym, f)
}
- if f.Nointerface() {
+ if f.Nointerface() && !t.IsFullyInstantiated() {
+ // Skip creating method wrappers if f is nointerface. But, if
+ // t is an instantiated type, we still have to call
+ // methodWrapper, because methodWrapper generates the actual
+ // generic method on the type as well.
continue
}
type_: typecheck.NewMethodType(f.Type, t),
mtype: typecheck.NewMethodType(f.Type, nil),
}
+ if f.Nointerface() {
+ // In the case of a nointerface method on an instantiated
+ // type, don't actually apppend the typeSig.
+ continue
+ }
ms = append(ms, sig)
}
// ptrToThis typeOff
// }
ot := 0
- ot = objw.Uintptr(lsym, ot, uint64(t.Width))
+ ot = objw.Uintptr(lsym, ot, uint64(t.Size()))
ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
ot = objw.Uint32(lsym, ot, types.TypeHash(t))
ot = objw.Uint8(lsym, ot, tflag)
// runtime (and common sense) expects alignment to be a power of two.
- i := int(t.Align)
+ i := int(uint8(t.Alignment()))
if i == 0 {
i = 1
}
if i&(i-1) != 0 {
- base.Fatalf("invalid alignment %d for %v", t.Align, t)
+ base.Fatalf("invalid alignment %d for %v", uint8(t.Alignment()), t)
}
- ot = objw.Uint8(lsym, ot, t.Align) // align
- ot = objw.Uint8(lsym, ot, t.Align) // fieldAlign
+ ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // align
+ ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // fieldAlign
i = kinds[t.Kind()]
if types.IsDirectIface(t) {
func writeType(t *types.Type) *obj.LSym {
t = formalType(t)
- if t.IsUntyped() {
+ if t.IsUntyped() || t.HasTParam() {
base.Fatalf("writeType %v", t)
}
base.Fatalf("unresolved defined type: %v", tbase)
}
- dupok := 0
- if tbase.Sym() == nil || tbase.HasShape() { // TODO(mdempsky): Probably need DUPOK for instantiated types too.
- dupok = obj.DUPOK
- }
-
if !NeedEmit(tbase) {
if i := typecheck.BaseTypeIndex(t); i >= 0 {
lsym.Pkg = tbase.Sym().Pkg.Prefix
var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf.
- if t.Key().Width > MAXKEYSIZE {
+ if t.Key().Size() > MAXKEYSIZE {
ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 1 // indirect key
} else {
- ot = objw.Uint8(lsym, ot, uint8(t.Key().Width))
+ ot = objw.Uint8(lsym, ot, uint8(t.Key().Size()))
}
- if t.Elem().Width > MAXELEMSIZE {
+ if t.Elem().Size() > MAXELEMSIZE {
ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 2 // indirect value
} else {
- ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width))
+ ot = objw.Uint8(lsym, ot, uint8(t.Elem().Size()))
}
- ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Width))
+ ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Size()))
if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key
}
}
ot = dextratypeData(lsym, ot, t)
- objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
+ objw.Global(lsym, int32(ot), int16(obj.DUPOK|obj.RODATA))
+ // Note: DUPOK is required to ensure that we don't end up with more
+ // than one type descriptor for a given type.
// The linker will leave a table of all the typelinks for
// types in the binary, so the runtime can find them.
}
if _, ok := signatset[t]; !ok {
signatset[t] = struct{}{}
- signatslice = append(signatslice, t)
+ signatslice = append(signatslice, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
}
}
func WriteRuntimeTypes() {
- // Process signatset. Use a loop, as writeType adds
- // entries to signatset while it is being processed.
- signats := make([]typeAndStr, len(signatslice))
+ // Process signatslice. Use a loop, as writeType adds
+ // entries to signatslice while it is being processed.
for len(signatslice) > 0 {
- signats = signats[:0]
- // Transfer entries to a slice and sort, for reproducible builds.
- for _, t := range signatslice {
- signats = append(signats, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
- delete(signatset, t)
- }
- signatslice = signatslice[:0]
+ signats := signatslice
+ // Sort for reproducible builds.
sort.Sort(typesByString(signats))
for _, ts := range signats {
t := ts.t
writeType(types.NewPtr(t))
}
}
+ signatslice = signatslice[len(signats):]
}
// Emit GC data symbols.
if base.Flag.MSan {
dimportpath(types.NewPkg("runtime/msan", ""))
}
+ if base.Flag.ASan {
+ dimportpath(types.NewPkg("runtime/asan", ""))
+ }
dimportpath(types.NewPkg("main", ""))
}
// For non-trivial arrays, the program describes the full t.Width size.
func dgcprog(t *types.Type, write bool) (*obj.LSym, int64) {
types.CalcSize(t)
- if t.Width == types.BADWIDTH {
+ if t.Size() == types.BADWIDTH {
base.Fatalf("dgcprog: %v badwidth", t)
}
lsym := TypeLinksymPrefix(".gcprog", t)
p.emit(t, 0)
offset := p.w.BitIndex() * int64(types.PtrSize)
p.end()
- if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Width {
- base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
+ if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Size() {
+ base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Size())
}
return lsym, offset
}
if !t.HasPointers() {
return
}
- if t.Width == int64(types.PtrSize) {
+ if t.Size() == int64(types.PtrSize) {
p.w.Ptr(offset / int64(types.PtrSize))
return
}
elem = elem.Elem()
}
- if !p.w.ShouldRepeat(elem.Width/int64(types.PtrSize), count) {
+ if !p.w.ShouldRepeat(elem.Size()/int64(types.PtrSize), count) {
// Cheaper to just emit the bits.
for i := int64(0); i < count; i++ {
- p.emit(elem, offset+i*elem.Width)
+ p.emit(elem, offset+i*elem.Size())
}
return
}
p.emit(elem, offset)
- p.w.ZeroUntil((offset + elem.Width) / int64(types.PtrSize))
- p.w.Repeat(elem.Width/int64(types.PtrSize), count-1)
+ p.w.ZeroUntil((offset + elem.Size()) / int64(types.PtrSize))
+ p.w.Repeat(elem.Size()/int64(types.PtrSize), count-1)
case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
// Local defined type; our responsibility.
return true
- case base.Ctxt.Pkgpath == "runtime" && (sym.Pkg == types.BuiltinPkg || sym.Pkg == ir.Pkgs.Unsafe):
+ case base.Ctxt.Pkgpath == "runtime" && (sym.Pkg == types.BuiltinPkg || sym.Pkg == types.UnsafePkg):
// Package runtime is responsible for including code for builtin
// types (predeclared and package unsafe).
return true
// We don't need a dictionary if we are reaching a method (possibly via an
// embedded field) which is an interface method.
if !types.IsInterfaceMethod(method.Type) {
- rcvr1 := rcvr
- if rcvr1.IsPtr() {
- rcvr1 = rcvr.Elem()
- }
+ rcvr1 := deref(rcvr)
if len(rcvr1.RParams()) > 0 {
// If rcvr has rparams, remember method as generic, which
// means we need to add a dictionary to the wrapper.
generic = true
- targs := rcvr1.RParams()
- for _, t := range targs {
- if t.HasShape() {
- base.Fatalf("method on type instantiated with shapes targ:%+v rcvr:%+v", t, rcvr)
- }
+ if rcvr.HasShape() {
+ base.Fatalf("method on type instantiated with shapes, rcvr:%+v", rcvr)
}
}
}
return lsym
}
+ methodrcvr := method.Type.Recv().Type
// For generic methods, we need to generate the wrapper even if the receiver
// types are identical, because we want to add the dictionary.
- if !generic && types.Identical(rcvr, method.Type.Recv().Type) {
+ if !generic && types.Identical(rcvr, methodrcvr) {
return lsym
}
nthis := ir.AsNode(tfn.Type().Recv().Nname)
- methodrcvr := method.Type.Recv().Type
indirect := rcvr.IsPtr() && rcvr.Elem() == methodrcvr
// generate nil pointer check for better error
// the TOC to the appropriate value for that module. But if it returns
// directly to the wrapper's caller, nothing will reset it to the correct
// value for that function.
- //
- // Disable tailcall for RegabiArgs for now. The IR does not connect the
- // arguments with the OTAILCALL node, and the arguments are not marshaled
- // correctly.
- if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs && !generic {
- // generate tail call: adjust pointer receiver and jump to embedded method.
- left := dot.X // skip final .M
- if !left.Type().IsPtr() {
- left = typecheck.NodAddr(left)
- }
- as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
- fn.Body.Append(as)
- fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !generic {
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+ fn.Body.Append(ir.NewTailCallStmt(base.Pos, call))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
var call *ir.CallExpr
if generic && dot.X != nthis {
- // TODO: for now, we don't try to generate dictionary wrappers for
- // any methods involving embedded fields, because we're not
- // generating the needed dictionaries in instantiateMethods.
+ // If there is embedding involved, then we should do the
+ // normal non-generic embedding wrapper below, which calls
+ // the wrapper for the real receiver type using dot as an
+ // argument. There is no need for generic processing (adding
+ // a dictionary) for this wrapper.
generic = false
}
if generic {
- var args []ir.Node
- var targs []*types.Type
- if rcvr.IsPtr() {
- targs = rcvr.Elem().RParams()
- } else {
- targs = rcvr.RParams()
- }
- if strings.HasPrefix(ir.MethodSym(orig, method.Sym).Name, ".inst.") {
- fmt.Printf("%s\n", ir.MethodSym(orig, method.Sym).Name)
- panic("multiple .inst.")
- }
+ targs := deref(rcvr).RParams()
// The wrapper for an auto-generated pointer/non-pointer
// receiver method should share the same dictionary as the
// corresponding original (user-written) method.
baseOrig := orig
- if baseOrig.IsPtr() && !method.Type.Recv().Type.IsPtr() {
+ if baseOrig.IsPtr() && !methodrcvr.IsPtr() {
baseOrig = baseOrig.Elem()
- } else if !baseOrig.IsPtr() && method.Type.Recv().Type.IsPtr() {
+ } else if !baseOrig.IsPtr() && methodrcvr.IsPtr() {
baseOrig = types.NewPtr(baseOrig)
}
- args = append(args, getDictionary(ir.MethodSym(baseOrig, method.Sym), targs))
+ args := []ir.Node{getDictionary(ir.MethodSym(baseOrig, method.Sym), targs)}
if indirect {
args = append(args, ir.NewStarExpr(base.Pos, dot.X))
} else if methodrcvr.IsPtr() && methodrcvr.Elem() == dot.X.Type() {
// Target method uses shaped names.
targs2 := make([]*types.Type, len(targs))
for i, t := range targs {
- targs2[i] = typecheck.Shapify(t)
+ targs2[i] = typecheck.Shapify(t, i)
}
targs = targs2
- sym := typecheck.MakeInstName(ir.MethodSym(methodrcvr, method.Sym), targs, true)
+ sym := typecheck.MakeFuncInstSym(ir.MethodSym(methodrcvr, method.Sym), targs, false, true)
if sym.Def == nil {
// Currently we make sure that we have all the instantiations
// we need by generating them all in ../noder/stencil.go:instantiateMethods
}
dot := n.X.(*ir.SelectorExpr)
ityp := dot.X.Type()
+ if ityp.HasShape() {
+ // Here we're calling a method on a generic interface. Something like:
+ //
+ // type I[T any] interface { foo() T }
+ // func f[T any](x I[T]) {
+ // ... = x.foo()
+ // }
+ // f[int](...)
+ // f[string](...)
+ //
+ // In this case, in f we're calling foo on a generic interface.
+ // Which method could that be? Normally we could match the method
+ // both by name and by type. But in this case we don't really know
+ // the type of the method we're calling. It could be func()int
+ // or func()string. So we match on just the function name, instead
+ // of both the name and the type used for the non-generic case below.
+ // TODO: instantiations at least know the shape of the instantiated
+ // type, and the linker could do more complicated matching using
+ // some sort of fuzzy shape matching. For now, only use the name
+ // of the method for matching.
+ r := obj.Addrel(ir.CurFunc.LSym)
+ // We use a separate symbol just to tell the linker the method name.
+ // (The symbol itself is not needed in the final binary.)
+ r.Sym = staticdata.StringSym(src.NoXPos, dot.Sel.Name)
+ r.Type = objabi.R_USEGENERICIFACEMETHOD
+ return
+ }
+
tsym := TypeLinksym(ityp)
r := obj.Addrel(ir.CurFunc.LSym)
r.Sym = tsym
r.Type = objabi.R_USEIFACEMETHOD
}
-// MarkUsedIfaceMethodIndex marks that that method number ix (in the AllMethods list)
-// of interface type ityp is used, and should be attached to lsym.
-func MarkUsedIfaceMethodIndex(lsym *obj.LSym, ityp *types.Type, ix int) {
- tsym := TypeLinksym(ityp)
- r := obj.Addrel(lsym)
- r.Sym = tsym
- r.Add = InterfaceMethodOffset(ityp, int64(ix))
- r.Type = objabi.R_USEIFACEMETHOD
-}
-
// getDictionary returns the dictionary for the given named generic function
// or method, with the given type arguments.
func getDictionary(gf *types.Sym, targs []*types.Type) ir.Node {
}
}
- sym := typecheck.MakeDictName(gf, targs, true)
+ sym := typecheck.MakeDictSym(gf, targs, true)
- // Initialize the dictionary, if we haven't yet already.
+ // Dictionary should already have been generated by instantiateMethods().
if lsym := sym.Linksym(); len(lsym.P) == 0 {
base.Fatalf("Dictionary should have already been generated: %s.%s", sym.Pkg.Path, sym.Name)
}
- // Make a node referencing the dictionary symbol.
- n := typecheck.NewName(sym)
- n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
- n.SetTypecheck(1)
- n.Class = ir.PEXTERN
- sym.Def = n
+ // Make (or reuse) a node referencing the dictionary symbol.
+ var n *ir.Name
+ if sym.Def != nil {
+ n = sym.Def.(*ir.Name)
+ } else {
+ n = typecheck.NewName(sym)
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+ }
// Return the address of the dictionary.
np := typecheck.NodAddr(n)
np.SetTypecheck(1)
return np
}
+
+func deref(t *types.Type) *types.Type {
+ if t.IsPtr() {
+ return t.Elem()
+ }
+ return t
+}