// To print out a new table, run: go test -run Formats -v.
var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
- "*cmd/compile/internal/gc.Field %p": "",
- "*cmd/compile/internal/gc.Field %v": "",
"*cmd/compile/internal/gc.Mpflt %v": "",
"*cmd/compile/internal/gc.Mpint %v": "",
"*cmd/compile/internal/gc.Node %#v": "",
"*cmd/compile/internal/gc.Node %j": "",
"*cmd/compile/internal/gc.Node %p": "",
"*cmd/compile/internal/gc.Node %v": "",
- "*cmd/compile/internal/gc.Sym %+v": "",
- "*cmd/compile/internal/gc.Sym %-v": "",
- "*cmd/compile/internal/gc.Sym %0S": "",
- "*cmd/compile/internal/gc.Sym %S": "",
- "*cmd/compile/internal/gc.Sym %p": "",
- "*cmd/compile/internal/gc.Sym %v": "",
- "*cmd/compile/internal/gc.Type %#v": "",
- "*cmd/compile/internal/gc.Type %+v": "",
- "*cmd/compile/internal/gc.Type %-S": "",
- "*cmd/compile/internal/gc.Type %0S": "",
- "*cmd/compile/internal/gc.Type %L": "",
- "*cmd/compile/internal/gc.Type %S": "",
- "*cmd/compile/internal/gc.Type %p": "",
- "*cmd/compile/internal/gc.Type %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
"*cmd/compile/internal/ssa.Value %s": "",
"*cmd/compile/internal/ssa.Value %v": "",
"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
+ "*cmd/compile/internal/types.Field %p": "",
+ "*cmd/compile/internal/types.Field %v": "",
+ "*cmd/compile/internal/types.Sym %+v": "",
+ "*cmd/compile/internal/types.Sym %-v": "",
+ "*cmd/compile/internal/types.Sym %0S": "",
+ "*cmd/compile/internal/types.Sym %S": "",
+ "*cmd/compile/internal/types.Sym %p": "",
+ "*cmd/compile/internal/types.Sym %v": "",
+ "*cmd/compile/internal/types.Type %#v": "",
+ "*cmd/compile/internal/types.Type %+v": "",
+ "*cmd/compile/internal/types.Type %-S": "",
+ "*cmd/compile/internal/types.Type %0S": "",
+ "*cmd/compile/internal/types.Type %L": "",
+ "*cmd/compile/internal/types.Type %S": "",
+ "*cmd/compile/internal/types.Type %p": "",
+ "*cmd/compile/internal/types.Type %v": "",
"*cmd/internal/obj.Addr %v": "",
"*cmd/internal/obj.LSym %v": "",
"*cmd/internal/obj.Prog %s": "",
"cmd/compile/internal/gc.Class %d": "",
"cmd/compile/internal/gc.Ctype %d": "",
"cmd/compile/internal/gc.Ctype %v": "",
- "cmd/compile/internal/gc.EType %d": "",
- "cmd/compile/internal/gc.EType %s": "",
- "cmd/compile/internal/gc.EType %v": "",
"cmd/compile/internal/gc.Level %d": "",
"cmd/compile/internal/gc.Level %v": "",
"cmd/compile/internal/gc.Node %#v": "",
"cmd/compile/internal/syntax.token %d": "",
"cmd/compile/internal/syntax.token %q": "",
"cmd/compile/internal/syntax.token %s": "",
+ "cmd/compile/internal/types.EType %d": "",
+ "cmd/compile/internal/types.EType %s": "",
+ "cmd/compile/internal/types.EType %v": "",
"cmd/internal/src.Pos %s": "",
"cmd/internal/src.Pos %v": "",
"cmd/internal/src.XPos %v": "",
package gc
-import "fmt"
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
// AlgKind describes the kind of algorithms used for comparing and
// hashing a Type.
)
// IsComparable reports whether t is a comparable type.
-func (t *Type) IsComparable() bool {
+func IsComparable(t *types.Type) bool {
a, _ := algtype1(t)
return a != ANOEQ
}
// IsRegularMemory reports whether t can be compared/hashed as regular memory.
-func (t *Type) IsRegularMemory() bool {
+func IsRegularMemory(t *types.Type) bool {
a, _ := algtype1(t)
return a == AMEM
}
// IncomparableField returns an incomparable Field of struct Type t, if any.
-func (t *Type) IncomparableField() *Field {
+func IncomparableField(t *types.Type) *types.Field {
for _, f := range t.FieldSlice() {
- if !f.Type.IsComparable() {
+ if !IsComparable(f.Type) {
return f
}
}
// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible.
-func algtype(t *Type) AlgKind {
+func algtype(t *types.Type) AlgKind {
a, _ := algtype1(t)
if a == AMEM {
switch t.Width {
// algtype1 returns the AlgKind used for comparing and hashing Type t.
// If it returns ANOEQ, it also returns the component type of t that
// makes it incomparable.
-func algtype1(t *Type) (AlgKind, *Type) {
+func algtype1(t *types.Type) (AlgKind, *types.Type) {
if t.Broke() {
return AMEM, nil
}
}
// Generate a helper function to compute the hash of a value of type t.
-func genhash(sym *Sym, t *Type) {
+func genhash(sym *types.Sym, t *types.Type) {
if Debug['r'] != 0 {
fmt.Printf("genhash %v %v\n", sym, t)
}
tfn := nod(OTFUNC, nil, nil)
fn.Func.Nname.Name.Param.Ntype = tfn
- n := namedfield("p", typPtr(t))
+ n := namedfield("p", types.NewPtr(t))
tfn.List.Append(n)
np := n.Left
- n = namedfield("h", Types[TUINTPTR])
+ n = namedfield("h", types.Types[TUINTPTR])
tfn.List.Append(n)
nh := n.Left
- n = anonfield(Types[TUINTPTR]) // return value
+ n = anonfield(types.Types[TUINTPTR]) // return value
tfn.Rlist.Append(n)
funchdr(fn)
default:
Fatalf("genhash %v", t)
- case TARRAY:
+ case types.TARRAY:
// An array of pure memory would be handled by the
// standard algorithm, so the element type must not be
// pure memory.
n := nod(ORANGE, nil, nod(OIND, np, nil))
ni := newname(lookup("i"))
- ni.Type = Types[TINT]
+ ni.Type = types.Types[TINT]
n.List.Set1(ni)
n.SetColas(true)
colasdefn(n.List.Slice(), n)
fn.Nbody.Append(n)
- case TSTRUCT:
+ case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
// and calling specific hash functions for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
}
// Hash non-memory fields with appropriate hash function.
- if !f.Type.IsRegularMemory() {
+ if !IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
call := nod(OCALL, hashel, nil)
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
safemode = old_safemode
}
-func hashfor(t *Type) *Node {
- var sym *Sym
+func hashfor(t *types.Type) *Node {
+ var sym *types.Sym
switch a, _ := algtype1(t); a {
case AMEM:
n := newname(sym)
n.Class = PFUNC
tfn := nod(OTFUNC, nil, nil)
- tfn.List.Append(anonfield(typPtr(t)))
- tfn.List.Append(anonfield(Types[TUINTPTR]))
- tfn.Rlist.Append(anonfield(Types[TUINTPTR]))
+ tfn.List.Append(anonfield(types.NewPtr(t)))
+ tfn.List.Append(anonfield(types.Types[TUINTPTR]))
+ tfn.Rlist.Append(anonfield(types.Types[TUINTPTR]))
tfn = typecheck(tfn, Etype)
n.Type = tfn.Type
return n
// geneq generates a helper function to
// check equality of two values of type t.
-func geneq(sym *Sym, t *Type) {
+func geneq(sym *types.Sym, t *types.Type) {
if Debug['r'] != 0 {
fmt.Printf("geneq %v %v\n", sym, t)
}
tfn := nod(OTFUNC, nil, nil)
fn.Func.Nname.Name.Param.Ntype = tfn
- n := namedfield("p", typPtr(t))
+ n := namedfield("p", types.NewPtr(t))
tfn.List.Append(n)
np := n.Left
- n = namedfield("q", typPtr(t))
+ n = namedfield("q", types.NewPtr(t))
tfn.List.Append(n)
nq := n.Left
- n = anonfield(Types[TBOOL])
+ n = anonfield(types.Types[TBOOL])
tfn.Rlist.Append(n)
funchdr(fn)
nrange := nod(ORANGE, nil, nod(OIND, np, nil))
ni := newname(lookup("i"))
- ni.Type = Types[TINT]
+ ni.Type = types.Types[TINT]
nrange.List.Set1(ni)
nrange.SetColas(true)
colasdefn(nrange.List.Slice(), nrange)
}
// Compare non-memory fields with field equality.
- if !f.Type.IsRegularMemory() {
+ if !IsRegularMemory(f.Type) {
and(eqfield(np, nq, f.Sym))
i++
continue
// eqfield returns the node
// p.field == q.field
-func eqfield(p *Node, q *Node, field *Sym) *Node {
+func eqfield(p *Node, q *Node, field *types.Sym) *Node {
nx := nodSym(OXDOT, p, field)
ny := nodSym(OXDOT, q, field)
ne := nod(OEQ, nx, ny)
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
-func eqmem(p *Node, q *Node, field *Sym, size int64) *Node {
+func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
nx.Etype = 1 // does not escape
ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
return call
}
-func eqmemfunc(size int64, t *Type) (fn *Node, needsize bool) {
+func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
switch size {
default:
fn = syslook("memequal")
// t is the parent struct type, and start is the field index at which to start the run.
// size is the length in bytes of the memory included in the run.
// next is the index just after the end of the memory run.
-func memrun(t *Type, start int) (size int64, next int) {
+func memrun(t *types.Type, start int) (size int64, next int) {
next = start
for {
next++
break
}
// Also, stop before a blank or non-memory field.
- if f := t.Field(next); isblanksym(f.Sym) || !f.Type.IsRegularMemory() {
+ if f := t.Field(next); isblanksym(f.Sym) || !IsRegularMemory(f.Type) {
break
}
}
// ispaddedfield reports whether the i'th field of struct type t is followed
// by padding.
-func ispaddedfield(t *Type, i int) bool {
+func ispaddedfield(t *types.Type, i int) bool {
if !t.IsStruct() {
Fatalf("ispaddedfield called non-struct %v", t)
}
package gc
import (
+ "cmd/compile/internal/types"
"sort"
)
// expandiface computes the method set for interface type t by
// expanding embedded interfaces.
-func expandiface(t *Type) {
- var fields []*Field
+func expandiface(t *types.Type) {
+ var fields []*types.Field
for _, m := range t.Methods().Slice() {
if m.Sym != nil {
fields = append(fields, m)
}
if !m.Type.IsInterface() {
- yyerrorl(m.Nname.Pos, "interface contains embedded non-interface %v", m.Type)
+ yyerrorl(asNode(m.Nname).Pos, "interface contains embedded non-interface %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.Fields().Slice() {
- f := newField()
+ f := types.NewField()
f.Type = t1.Type
f.SetBroke(t1.Broke())
f.Sym = t1.Sym
// Access fields directly to avoid recursively calling dowidth
// within Type.Fields().
- t.Extra.(*InterType).fields.Set(fields)
+ t.Extra.(*types.InterType).Fields.Set(fields)
}
-func offmod(t *Type) {
+func offmod(t *types.Type) {
o := int32(0)
for _, f := range t.Fields().Slice() {
f.Offset = int64(o)
}
}
-func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
+func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
starto := o
maxalign := int32(flag)
if maxalign < 1 {
o = Rnd(o, int64(f.Type.Align))
}
f.Offset = o
- if f.Nname != nil {
+ if asNode(f.Nname) != nil {
// addrescapes has similar code to update these offsets.
// Usually addrescapes runs after widstruct,
// in which case we could drop this,
// NOTE(rsc): This comment may be stale.
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
- if f.Nname.Name.Param.Stackcopy != nil {
- f.Nname.Name.Param.Stackcopy.Xoffset = o
- f.Nname.Xoffset = 0
+ if asNode(f.Nname).Name.Param.Stackcopy != nil {
+ asNode(f.Nname).Name.Param.Stackcopy.Xoffset = o
+ asNode(f.Nname).Xoffset = 0
} else {
- f.Nname.Xoffset = o
+ asNode(f.Nname).Xoffset = o
}
}
return o
}
-func dowidth(t *Type) {
+func dowidth(t *types.Type) {
if Widthptr == 0 {
Fatalf("dowidth without betypeinit")
}
if t.Width == -2 {
if !t.Broke() {
t.SetBroke(true)
- yyerrorl(t.nod.Pos, "invalid recursive type %v", t)
+ yyerrorl(asNode(t.Nod).Pos, "invalid recursive type %v", t)
}
t.Width = 0
defercalc++
lno := lineno
- if t.nod != nil {
- lineno = t.nod.Pos
+ if asNode(t.Nod) != nil {
+ lineno = asNode(t.Nod).Pos
}
t.Width = -2
// make fake type to check later to
// trigger channel argument check.
- t1 := typChanArgs(t)
+ t1 := types.NewChanArgs(t)
checkwidth(t1)
case TCHANARGS:
if t.Elem() == nil {
break
}
- if t.isDDDArray() {
+ if t.IsDDDArray() {
if !t.Broke() {
yyerror("use of [...] array outside of array literal")
t.SetBroke(true)
// make fake type to check later to
// trigger function argument computation.
case TFUNC:
- t1 := typFuncArgs(t)
+ t1 := types.NewFuncArgs(t)
checkwidth(t1)
w = int64(Widthptr) // width of func type is pointer
w = widstruct(t1, t1.Recvs(), 0, 0)
w = widstruct(t1, t1.Params(), w, Widthreg)
w = widstruct(t1, t1.Results(), w, Widthreg)
- t1.Extra.(*FuncType).Argwid = w
+ t1.Extra.(*types.FuncType).Argwid = w
if w%int64(Widthreg) != 0 {
Warn("bad type %v %d\n", t1, w)
}
// is needed immediately. checkwidth makes sure the
// size is evaluated eventually.
-var deferredTypeStack []*Type
+var deferredTypeStack []*types.Type
-func checkwidth(t *Type) {
+func checkwidth(t *types.Type) {
if t == nil {
return
}
import (
"bufio"
"bytes"
+ "cmd/compile/internal/types"
"encoding/binary"
"fmt"
"math/big"
// object -> index maps, indexed in order of serialization
strIndex map[string]int
- pkgIndex map[*Pkg]int
- typIndex map[*Type]int
+ pkgIndex map[*types.Pkg]int
+ typIndex map[*types.Type]int
funcList []*Func
// position encoding
p := exporter{
out: out,
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
- pkgIndex: make(map[*Pkg]int),
- typIndex: make(map[*Type]int),
+ pkgIndex: make(map[*types.Pkg]int),
+ typIndex: make(map[*types.Type]int),
posInfoFormat: true,
trace: trace,
}
p.tracef("\n")
}
- if sym.isAlias() {
+ if IsAlias(sym) {
Fatalf("exporter: unexpected type alias %v in inlined function body", sym)
}
return p.written
}
-func (p *exporter) pkg(pkg *Pkg) {
+func (p *exporter) pkg(pkg *types.Pkg) {
if pkg == nil {
Fatalf("exporter: unexpected nil pkg")
}
p.string(pkg.Path)
}
-func unidealType(typ *Type, val Val) *Type {
+func unidealType(typ *types.Type, val Val) *types.Type {
// Untyped (ideal) constants get their own type. This decouples
// the constant type from the encoding of the constant value.
if typ == nil || typ.IsUntyped() {
return typ
}
-func (p *exporter) obj(sym *Sym) {
+func (p *exporter) obj(sym *types.Sym) {
// Exported objects may be from different packages because they
// may be re-exported via an exported alias or as dependencies in
// exported inlined function bodies. Thus, exported object names
// pulled in via inlined function bodies. In that case the package
// qualifier is not needed. Possible space optimization.)
- n := sym.Def
+ n := asNode(sym.Def)
switch n.Op {
case OLITERAL:
// constant
Fatalf("exporter: export of incomplete type %v", sym)
}
- if sym.isAlias() {
+ if IsAlias(sym) {
p.tag(aliasTag)
p.pos(n)
p.qualifiedName(sym)
p.pos(n)
p.qualifiedName(sym)
- sig := sym.Def.Type
- inlineable := isInlineable(sym.Def)
+ sig := asNode(sym.Def).Type
+ inlineable := isInlineable(asNode(sym.Def))
p.paramList(sig.Params(), inlineable)
p.paramList(sig.Results(), inlineable)
var f *Func
if inlineable {
- f = sym.Def.Func
+ f = asNode(sym.Def).Func
reexportdeplist(f.Inl)
}
p.funcList = append(p.funcList, f)
p.tag(varTag)
p.pos(n)
p.qualifiedName(sym)
- p.typ(sym.Def.Type)
+ p.typ(asNode(sym.Def).Type)
}
default:
return false
}
-var errorInterface *Type // lazily initialized
+var errorInterface *types.Type // lazily initialized
-func (p *exporter) typ(t *Type) {
+func (p *exporter) typ(t *types.Type) {
if t == nil {
Fatalf("exporter: nil type")
}
// write underlying type
orig := t.Orig
- if orig == errortype {
+ if orig == types.Errortype {
// The error type is the only predeclared type which has
// a composite underlying type. When we encode that type,
// make sure to encode the underlying interface rather than
// sort methods for reproducible export format
// TODO(gri) Determine if they are already sorted
// in which case we can drop this step.
- var methods []*Field
+ var methods []*types.Field
for _, m := range t.Methods().Slice() {
methods = append(methods, m)
}
Fatalf("invalid symbol name: %s (%v)", m.Sym.Name, m.Sym)
}
- p.pos(m.Nname)
+ p.pos(asNode(m.Nname))
p.fieldSym(m.Sym, false)
sig := m.Type
- mfn := sig.Nname()
+ mfn := asNode(sig.FuncType().Nname)
inlineable := isInlineable(mfn)
p.paramList(sig.Recvs(), inlineable)
// otherwise we have a type literal
switch t.Etype {
case TARRAY:
- if t.isDDDArray() {
+ if t.IsDDDArray() {
Fatalf("array bounds should be known at export time: %v", t)
}
p.tag(arrayTag)
}
}
-func (p *exporter) qualifiedName(sym *Sym) {
+func (p *exporter) qualifiedName(sym *types.Sym) {
p.string(sym.Name)
p.pkg(sym.Pkg)
}
-func (p *exporter) fieldList(t *Type) {
+func (p *exporter) fieldList(t *types.Type) {
if p.trace && t.NumFields() > 0 {
p.tracef("fields {>")
defer p.tracef("<\n} ")
}
}
-func (p *exporter) field(f *Field) {
- p.pos(f.Nname)
+func (p *exporter) field(f *types.Field) {
+ p.pos(asNode(f.Nname))
p.fieldName(f)
p.typ(f.Type)
p.string(f.Note)
}
-func (p *exporter) methodList(t *Type) {
- var embeddeds, methods []*Field
+func (p *exporter) methodList(t *types.Type) {
+ var embeddeds, methods []*types.Field
for _, m := range t.Methods().Slice() {
if m.Sym != nil {
if p.trace {
p.tracef("\n")
}
- p.pos(m.Nname)
+ p.pos(asNode(m.Nname))
p.typ(m.Type)
}
if p.trace && len(embeddeds) > 0 {
}
}
-func (p *exporter) method(m *Field) {
- p.pos(m.Nname)
+func (p *exporter) method(m *types.Field) {
+ p.pos(asNode(m.Nname))
p.methodName(m.Sym)
p.paramList(m.Type.Params(), false)
p.paramList(m.Type.Results(), false)
}
-func (p *exporter) fieldName(t *Field) {
+func (p *exporter) fieldName(t *types.Field) {
name := t.Sym.Name
if t.Embedded != 0 {
// anonymous field - we distinguish between 3 cases:
}
// methodName is like qualifiedName but it doesn't record the package for exported names.
-func (p *exporter) methodName(sym *Sym) {
+func (p *exporter) methodName(sym *types.Sym) {
p.string(sym.Name)
if !exportname(sym.Name) {
p.pkg(sym.Pkg)
}
}
-func basetypeName(t *Type) string {
+func basetypeName(t *types.Type) string {
s := t.Sym
if s == nil && t.IsPtr() {
s = t.Elem().Sym // deref
return "" // unnamed type
}
-func (p *exporter) paramList(params *Type, numbered bool) {
+func (p *exporter) paramList(params *types.Type, numbered bool) {
if !params.IsFuncArgStruct() {
Fatalf("exporter: parameter list expected")
}
}
}
-func (p *exporter) param(q *Field, n int, numbered bool) {
+func (p *exporter) param(q *types.Field, n int, numbered bool) {
t := q.Type
if q.Isddd() {
// create a fake type to encode ... just for the p.typ call
- t = typDDDField(t.Elem())
+ t = types.NewDDDField(t.Elem())
}
p.typ(t)
if n > 0 {
p.string(q.Note)
}
-func parName(f *Field, numbered bool) string {
+func parName(f *types.Field, numbered bool) string {
s := f.Sym
if s == nil {
return ""
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
// ~r%d is a (formerly) unnamed result.
- if f.Nname != nil {
- if f.Nname.Orig != nil {
- s = f.Nname.Orig.Sym
+ if asNode(f.Nname) != nil {
+ if asNode(f.Nname).Orig != nil {
+ s = asNode(f.Nname).Orig.Sym
if s != nil && s.Name[0] == '~' {
if s.Name[1] == 'r' { // originally an unnamed result
return "" // s = nil
// from other names in their context after inlining (i.e., the parameter numbering
// is a form of parameter rewriting). See issue 4326 for an example and test case.
if numbered {
- if !strings.Contains(name, "·") && f.Nname != nil && f.Nname.Name != nil && f.Nname.Name.Vargen > 0 {
- name = fmt.Sprintf("%s·%d", name, f.Nname.Name.Vargen) // append Vargen
+ if !strings.Contains(name, "·") && asNode(f.Nname) != nil && asNode(f.Nname).Name != nil && asNode(f.Nname).Name.Vargen > 0 {
+ name = fmt.Sprintf("%s·%d", name, asNode(f.Nname).Name.Vargen) // append Vargen
}
} else {
if i := strings.Index(name, "·"); i > 0 {
}
}
-func (p *exporter) fieldSym(s *Sym, short bool) {
+func (p *exporter) fieldSym(s *types.Sym, short bool) {
name := s.Name
// remove leading "type." in method names ("(T).m" -> "m")
// untype returns the "pseudo" untyped type for a Ctype (import/export use only).
// (we can't use an pre-initialized array because we must be sure all types are
// set up)
-func untype(ctype Ctype) *Type {
+func untype(ctype Ctype) *types.Type {
switch ctype {
case CTINT:
- return idealint
+ return types.Idealint
case CTRUNE:
- return idealrune
+ return types.Idealrune
case CTFLT:
- return idealfloat
+ return types.Idealfloat
case CTCPLX:
- return idealcomplex
+ return types.Idealcomplex
case CTSTR:
- return idealstring
+ return types.Idealstring
case CTBOOL:
- return idealbool
+ return types.Idealbool
case CTNIL:
- return Types[TNIL]
+ return types.Types[TNIL]
}
Fatalf("exporter: unknown Ctype")
return nil
}
-var predecl []*Type // initialized lazily
+var predecl []*types.Type // initialized lazily
-func predeclared() []*Type {
+func predeclared() []*types.Type {
if predecl == nil {
// initialize lazily to be sure that all
// elements have been initialized before
- predecl = []*Type{
+ predecl = []*types.Type{
// basic types
- Types[TBOOL],
- Types[TINT],
- Types[TINT8],
- Types[TINT16],
- Types[TINT32],
- Types[TINT64],
- Types[TUINT],
- Types[TUINT8],
- Types[TUINT16],
- Types[TUINT32],
- Types[TUINT64],
- Types[TUINTPTR],
- Types[TFLOAT32],
- Types[TFLOAT64],
- Types[TCOMPLEX64],
- Types[TCOMPLEX128],
- Types[TSTRING],
+ types.Types[TBOOL],
+ types.Types[TINT],
+ types.Types[TINT8],
+ types.Types[TINT16],
+ types.Types[TINT32],
+ types.Types[TINT64],
+ types.Types[TUINT],
+ types.Types[TUINT8],
+ types.Types[TUINT16],
+ types.Types[TUINT32],
+ types.Types[TUINT64],
+ types.Types[TUINTPTR],
+ types.Types[TFLOAT32],
+ types.Types[TFLOAT64],
+ types.Types[TCOMPLEX64],
+ types.Types[TCOMPLEX128],
+ types.Types[TSTRING],
// basic type aliases
- bytetype,
- runetype,
+ types.Bytetype,
+ types.Runetype,
// error
- errortype,
+ types.Errortype,
// untyped types
untype(CTBOOL),
untype(CTNIL),
// package unsafe
- Types[TUNSAFEPTR],
+ types.Types[TUNSAFEPTR],
// invalid type (package contains errors)
- Types[Txxx],
+ types.Types[Txxx],
// any type, for builtin export data
- Types[TANY],
+ types.Types[TANY],
}
}
return predecl
import (
"bufio"
+ "cmd/compile/internal/types"
"cmd/internal/src"
"encoding/binary"
"fmt"
type importer struct {
in *bufio.Reader
- imp *Pkg // imported package
- buf []byte // reused for reading strings
- version int // export format version
+ imp *types.Pkg // imported package
+ buf []byte // reused for reading strings
+ version int // export format version
// object lists, in order of deserialization
strList []string
- pkgList []*Pkg
- typList []*Type
+ pkgList []*types.Pkg
+ typList []*types.Type
funcList []*Node // nil entry means already declared
trackAllTypes bool
// for delayed type verification
- cmpList []struct{ pt, t *Type }
+ cmpList []struct{ pt, t *types.Type }
// position encoding
posInfoFormat bool
}
// Import populates imp from the serialized package data read from in.
-func Import(imp *Pkg, in *bufio.Reader) {
+func Import(imp *types.Pkg, in *bufio.Reader) {
inimport = true
defer func() { inimport = false }()
// the same name appears in an error message.
var numImport = make(map[string]int)
-func (p *importer) pkg() *Pkg {
+func (p *importer) pkg() *types.Pkg {
// if the package was seen before, i is its index (>= 0)
i := p.tagOrIndex()
if i >= 0 {
return pkg
}
-func idealType(typ *Type) *Type {
+func idealType(typ *types.Type) *types.Type {
if typ.IsUntyped() {
// canonicalize ideal types
- typ = Types[TIDEAL]
+ typ = types.Types[TIDEAL]
}
return typ
}
sig := functypefield(nil, params, result)
importsym(p.imp, sym, ONAME)
- if sym.Def != nil && sym.Def.Op == ONAME {
+ if asNode(sym.Def) != nil && asNode(sym.Def).Op == ONAME {
// function was imported before (via another import)
- if !eqtype(sig, sym.Def.Type) {
- p.formatErrorf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, sym.Def.Type, sig)
+ if !eqtype(sig, asNode(sym.Def).Type) {
+ p.formatErrorf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, asNode(sym.Def).Type, sig)
}
p.funcList = append(p.funcList, nil)
break
return xpos
}
-func (p *importer) newtyp(etype EType) *Type {
- t := typ(etype)
+func (p *importer) newtyp(etype types.EType) *types.Type {
+ t := types.New(etype)
if p.trackAllTypes {
p.typList = append(p.typList, t)
}
}
// importtype declares that pt, an imported named type, has underlying type t.
-func (p *importer) importtype(pt, t *Type) {
+func (p *importer) importtype(pt, t *types.Type) {
if pt.Etype == TFORW {
- copytype(pt.nod, t)
+ copytype(asNode(pt.Nod), t)
pt.Sym.Importdef = p.imp
pt.Sym.Lastlineno = lineno
- declare(pt.nod, PEXTERN)
+ declare(asNode(pt.Nod), PEXTERN)
checkwidth(pt)
} else {
// pt.Orig and t must be identical.
if p.trackAllTypes {
// If we track all types, t may not be fully set up yet.
// Collect the types and verify identity later.
- p.cmpList = append(p.cmpList, struct{ pt, t *Type }{pt, t})
+ p.cmpList = append(p.cmpList, struct{ pt, t *types.Type }{pt, t})
} else if !eqtype(pt.Orig, t) {
yyerror("inconsistent definition for type %v during import\n\t%L (in %q)\n\t%L (in %q)", pt.Sym, pt, pt.Sym.Importdef.Path, t, p.imp.Path)
}
}
}
-func (p *importer) typ() *Type {
+func (p *importer) typ() *types.Type {
// if the type was seen before, i is its index (>= 0)
i := p.tagOrIndex()
if i >= 0 {
}
// otherwise, i is the type tag (< 0)
- var t *Type
+ var t *types.Type
switch i {
case namedTag:
p.pos()
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
// out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there.
- n.Type.SetNname(n)
+ n.Type.FuncType().Nname = asTypesNode(n)
if Debug['E'] > 0 {
fmt.Printf("import [%q] meth %v \n", p.imp.Path, n)
t = p.newtyp(TARRAY)
bound := p.int64()
elem := p.typ()
- t.Extra = &ArrayType{Elem: elem, Bound: bound}
+ t.Extra = &types.ArrayType{Elem: elem, Bound: bound}
case sliceTag:
t = p.newtyp(TSLICE)
elem := p.typ()
- t.Extra = SliceType{Elem: elem}
+ t.Extra = types.SliceType{Elem: elem}
case dddTag:
t = p.newtyp(TDDDFIELD)
- t.Extra = DDDFieldType{T: p.typ()}
+ t.Extra = types.DDDFieldType{T: p.typ()}
case structTag:
t = p.newtyp(TSTRUCT)
checkwidth(t)
case pointerTag:
- t = p.newtyp(Tptr)
- t.Extra = PtrType{Elem: p.typ()}
+ t = p.newtyp(types.Tptr)
+ t.Extra = types.PtrType{Elem: p.typ()}
case signatureTag:
t = p.newtyp(TFUNC)
case interfaceTag:
if ml := p.methodList(); len(ml) == 0 {
- t = Types[TINTER]
+ t = types.Types[TINTER]
} else {
t = p.newtyp(TINTER)
t.SetInterface(ml)
case chanTag:
t = p.newtyp(TCHAN)
ct := t.ChanType()
- ct.Dir = ChanDir(p.int())
+ ct.Dir = types.ChanDir(p.int())
ct.Elem = p.typ()
default:
return t
}
-func (p *importer) qualifiedName() *Sym {
+func (p *importer) qualifiedName() *types.Sym {
name := p.string()
pkg := p.pkg()
return pkg.Lookup(name)
}
-func (p *importer) fieldList() (fields []*Field) {
+func (p *importer) fieldList() (fields []*types.Field) {
if n := p.int(); n > 0 {
- fields = make([]*Field, n)
+ fields = make([]*types.Field, n)
for i := range fields {
fields[i] = p.field()
}
return
}
-func (p *importer) field() *Field {
+func (p *importer) field() *types.Field {
p.pos()
sym, alias := p.fieldName()
typ := p.typ()
note := p.string()
- f := newField()
+ f := types.NewField()
if sym.Name == "" {
// anonymous field: typ must be T or *T and T must be a type name
s := typ.Sym
}
f.Sym = sym
- f.Nname = newname(sym)
+ f.Nname = asTypesNode(newname(sym))
f.Type = typ
f.Note = note
return f
}
-func (p *importer) methodList() (methods []*Field) {
+func (p *importer) methodList() (methods []*types.Field) {
for n := p.int(); n > 0; n-- {
- f := newField()
- f.Nname = newname(nblank.Sym)
- f.Nname.Pos = p.pos()
+ f := types.NewField()
+ f.Nname = asTypesNode(newname(nblank.Sym))
+ asNode(f.Nname).Pos = p.pos()
f.Type = p.typ()
methods = append(methods, f)
}
return
}
-func (p *importer) method() *Field {
+func (p *importer) method() *types.Field {
p.pos()
sym := p.methodName()
params := p.paramList()
result := p.paramList()
- f := newField()
+ f := types.NewField()
f.Sym = sym
- f.Nname = newname(sym)
+ f.Nname = asTypesNode(newname(sym))
f.Type = functypefield(fakethisfield(), params, result)
return f
}
-func (p *importer) fieldName() (*Sym, bool) {
+func (p *importer) fieldName() (*types.Sym, bool) {
name := p.string()
if p.version == 0 && name == "_" {
// version 0 didn't export a package for _ field names
return pkg.Lookup(name), alias
}
-func (p *importer) methodName() *Sym {
+func (p *importer) methodName() *types.Sym {
name := p.string()
if p.version == 0 && name == "_" {
// version 0 didn't export a package for _ method names
return pkg.Lookup(name)
}
-func (p *importer) paramList() []*Field {
+func (p *importer) paramList() []*types.Field {
i := p.int()
if i == 0 {
return nil
named = false
}
// i > 0
- fs := make([]*Field, i)
+ fs := make([]*types.Field, i)
for i := range fs {
fs[i] = p.param(named)
}
return fs
}
-func (p *importer) param(named bool) *Field {
- f := newField()
+func (p *importer) param(named bool) *types.Field {
+ f := types.NewField()
f.Type = p.typ()
if f.Type.Etype == TDDDFIELD {
// TDDDFIELD indicates wrapped ... slice type
- f.Type = typSlice(f.Type.DDDField())
+ f.Type = types.NewSlice(f.Type.DDDField())
f.SetIsddd(true)
}
pkg = p.pkg()
}
f.Sym = pkg.Lookup(name)
- f.Nname = newname(f.Sym)
+ f.Nname = asTypesNode(newname(f.Sym))
}
// TODO(gri) This is compiler-specific (escape info).
return f
}
-func (p *importer) value(typ *Type) (x Val) {
+func (p *importer) value(typ *types.Type) (x Val) {
switch tag := p.tagOrIndex(); tag {
case falseTag:
x.U = false
case int64Tag:
u := new(Mpint)
u.SetInt64(p.int64())
- u.Rune = typ == idealrune
+ u.Rune = typ == types.Idealrune
x.U = u
case floatTag:
f := newMpflt()
p.float(f)
- if typ == idealint || typ.IsInteger() {
+ if typ == types.Idealint || typ.IsInteger() {
// uncommon case: large int encoded as float
u := new(Mpint)
u.SetFloat(f)
// (issue 16317).
if typ.IsUnsafePtr() {
n = nod(OCONV, n, nil)
- n.Type = Types[TUINTPTR]
+ n.Type = types.Types[TUINTPTR]
}
n = nod(OCONV, n, nil)
n.Type = typ
case OASOP:
n := nodl(p.pos(), OASOP, nil, nil)
- n.Etype = EType(p.int())
+ n.Etype = types.EType(p.int())
n.Left = p.expr()
if !p.bool() {
n.Right = nodintconst(1)
return
}
-func (p *importer) fieldSym() *Sym {
+func (p *importer) fieldSym() *types.Sym {
name := p.string()
pkg := localpkg
if !exportname(name) {
return pkg.Lookup(name)
}
-func (p *importer) sym() *Sym {
+func (p *importer) sym() *types.Sym {
name := p.string()
pkg := localpkg
if name != "_" {
package gc
+import "cmd/compile/internal/types"
+
var runtimeDecls = [...]struct {
name string
tag int
{"support_popcnt", varTag, 11},
}
-func runtimeTypes() []*Type {
- var typs [112]*Type
- typs[0] = bytetype
- typs[1] = typPtr(typs[0])
- typs[2] = Types[TANY]
- typs[3] = typPtr(typs[2])
+func runtimeTypes() []*types.Type {
+ var typs [112]*types.Type
+ typs[0] = types.Bytetype
+ typs[1] = types.NewPtr(typs[0])
+ typs[2] = types.Types[TANY]
+ typs[3] = types.NewPtr(typs[2])
typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[5] = functype(nil, nil, nil)
- typs[6] = Types[TINTER]
+ typs[6] = types.Types[TINTER]
typs[7] = functype(nil, []*Node{anonfield(typs[6])}, nil)
- typs[8] = Types[TINT32]
- typs[9] = typPtr(typs[8])
+ typs[8] = types.Types[TINT32]
+ typs[9] = types.NewPtr(typs[8])
typs[10] = functype(nil, []*Node{anonfield(typs[9])}, []*Node{anonfield(typs[6])})
- typs[11] = Types[TBOOL]
+ typs[11] = types.Types[TBOOL]
typs[12] = functype(nil, []*Node{anonfield(typs[11])}, nil)
- typs[13] = Types[TFLOAT64]
+ typs[13] = types.Types[TFLOAT64]
typs[14] = functype(nil, []*Node{anonfield(typs[13])}, nil)
- typs[15] = Types[TINT64]
+ typs[15] = types.Types[TINT64]
typs[16] = functype(nil, []*Node{anonfield(typs[15])}, nil)
- typs[17] = Types[TUINT64]
+ typs[17] = types.Types[TUINT64]
typs[18] = functype(nil, []*Node{anonfield(typs[17])}, nil)
- typs[19] = Types[TCOMPLEX128]
+ typs[19] = types.Types[TCOMPLEX128]
typs[20] = functype(nil, []*Node{anonfield(typs[19])}, nil)
- typs[21] = Types[TSTRING]
+ typs[21] = types.Types[TSTRING]
typs[22] = functype(nil, []*Node{anonfield(typs[21])}, nil)
typs[23] = functype(nil, []*Node{anonfield(typs[2])}, nil)
- typs[24] = typArray(typs[0], 32)
- typs[25] = typPtr(typs[24])
+ typs[24] = types.NewArray(typs[0], 32)
+ typs[25] = types.NewPtr(typs[24])
typs[26] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
typs[27] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21]), anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
typs[28] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21]), anonfield(typs[21]), anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
typs[29] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21]), anonfield(typs[21]), anonfield(typs[21]), anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
- typs[30] = typSlice(typs[21])
+ typs[30] = types.NewSlice(typs[21])
typs[31] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[30])}, []*Node{anonfield(typs[21])})
- typs[32] = Types[TINT]
+ typs[32] = types.Types[TINT]
typs[33] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[32])})
typs[34] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[11])})
- typs[35] = typArray(typs[0], 4)
- typs[36] = typPtr(typs[35])
+ typs[35] = types.NewArray(typs[0], 4)
+ typs[36] = types.NewPtr(typs[35])
typs[37] = functype(nil, []*Node{anonfield(typs[36]), anonfield(typs[15])}, []*Node{anonfield(typs[21])})
- typs[38] = typSlice(typs[0])
+ typs[38] = types.NewSlice(typs[0])
typs[39] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[38])}, []*Node{anonfield(typs[21])})
typs[40] = functype(nil, []*Node{anonfield(typs[38])}, []*Node{anonfield(typs[21])})
- typs[41] = runetype
- typs[42] = typSlice(typs[41])
+ typs[41] = types.Runetype
+ typs[42] = types.NewSlice(typs[41])
typs[43] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[42])}, []*Node{anonfield(typs[21])})
typs[44] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21])}, []*Node{anonfield(typs[38])})
- typs[45] = typArray(typs[41], 32)
- typs[46] = typPtr(typs[45])
+ typs[45] = types.NewArray(typs[41], 32)
+ typs[46] = types.NewPtr(typs[45])
typs[47] = functype(nil, []*Node{anonfield(typs[46]), anonfield(typs[21])}, []*Node{anonfield(typs[42])})
typs[48] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[32])}, []*Node{anonfield(typs[41]), anonfield(typs[32])})
- typs[49] = Types[TUINTPTR]
+ typs[49] = types.Types[TUINTPTR]
typs[50] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2]), anonfield(typs[49])}, []*Node{anonfield(typs[32])})
typs[51] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
typs[52] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[54] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[11])})
typs[55] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[56] = functype(nil, []*Node{anonfield(typs[1])}, nil)
- typs[57] = typPtr(typs[49])
- typs[58] = Types[TUNSAFEPTR]
+ typs[57] = types.NewPtr(typs[49])
+ typs[58] = types.Types[TUNSAFEPTR]
typs[59] = functype(nil, []*Node{anonfield(typs[57]), anonfield(typs[58]), anonfield(typs[58])}, []*Node{anonfield(typs[11])})
- typs[60] = typMap(typs[2], typs[2])
+ typs[60] = types.NewMap(typs[2], typs[2])
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[60])})
typs[62] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[63] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, nil)
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, nil)
typs[70] = functype(nil, []*Node{anonfield(typs[3])}, nil)
- typs[71] = typChan(typs[2], Cboth)
+ typs[71] = types.NewChan(typs[2], types.Cboth)
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[71])})
- typs[73] = typChan(typs[2], Crecv)
+ typs[73] = types.NewChan(typs[2], types.Crecv)
typs[74] = functype(nil, []*Node{anonfield(typs[73]), anonfield(typs[3])}, nil)
typs[75] = functype(nil, []*Node{anonfield(typs[73]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
- typs[76] = typChan(typs[2], Csend)
+ typs[76] = types.NewChan(typs[2], types.Csend)
typs[77] = functype(nil, []*Node{anonfield(typs[76]), anonfield(typs[3])}, nil)
- typs[78] = typArray(typs[0], 3)
+ typs[78] = types.NewArray(typs[0], 3)
typs[79] = tostruct([]*Node{namedfield("enabled", typs[11]), namedfield("pad", typs[78]), namedfield("needed", typs[11]), namedfield("cgo", typs[11]), namedfield("alignme", typs[17])})
typs[80] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[2])}, nil)
typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
typs[84] = functype(nil, []*Node{anonfield(typs[76]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
typs[85] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[73])}, []*Node{anonfield(typs[11])})
- typs[86] = typPtr(typs[11])
+ typs[86] = types.NewPtr(typs[11])
typs[87] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[86]), anonfield(typs[73])}, []*Node{anonfield(typs[11])})
typs[88] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[8])}, nil)
typs[89] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[76]), anonfield(typs[3])}, nil)
typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[73]), anonfield(typs[3]), anonfield(typs[86])}, nil)
typs[91] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[32])})
- typs[92] = typSlice(typs[2])
+ typs[92] = types.NewSlice(typs[2])
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[32])}, []*Node{anonfield(typs[92])})
typs[94] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[92])})
typs[95] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[92]), anonfield(typs[32])}, []*Node{anonfield(typs[92])})
typs[101] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])})
typs[102] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[15])})
typs[103] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[17])})
- typs[104] = Types[TUINT32]
+ typs[104] = types.Types[TUINT32]
typs[105] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[104])})
typs[106] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[13])})
typs[107] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[13])})
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
)
func (l *cfLabel) used() bool { return l.useNode != nil }
// label returns the label associated with sym, creating it if necessary.
-func (c *controlflow) label(sym *Sym) *cfLabel {
+func (c *controlflow) label(sym *types.Sym) *cfLabel {
lab := c.labels[sym.Name]
if lab == nil {
lab = new(cfLabel)
// Decide what to complain about. Unwind to.Sym until where it
// forked from from.Sym, and keep track of the innermost block
// and declaration we jumped into/over.
- var block *Sym
- var dcl *Sym
+ var block *types.Sym
+ var dcl *types.Sym
// If to.Sym is longer, unwind until it's the same length.
ts := to.Sym
// dcldepth returns the declaration depth for a dclstack Sym; that is,
// the sum of the block nesting level and the number of declarations
// in scope.
-func dcldepth(s *Sym) int {
+func dcldepth(s *types.Sym) int {
n := 0
for ; s != nil; s = s.Link {
n++
package gc
import (
+ "cmd/compile/internal/types"
"fmt"
)
var closurename_closgen int
-func closurename(n *Node) *Sym {
+func closurename(n *Node) *types.Sym {
if n.Sym != nil {
return n.Sym
}
}
if Debug['m'] > 1 {
- var name *Sym
+ var name *types.Sym
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
name = v.Name.Curfn.Func.Nname.Sym
}
f := xfunc.Func.Nname
// We are going to insert captured variables before input args.
- var params []*Field
+ var params []*types.Field
var decls []*Node
for _, v := range func_.Func.Cvars.Slice() {
if v.Op == OXXX {
continue
}
- fld := newField()
- fld.Funarg = FunargParams
+ fld := types.NewField()
+ fld.Funarg = types.FunargParams
if v.Name.Byval() {
// If v is captured by value, we merely downgrade it to PPARAM.
v.Class = PPARAM
- fld.Nname = v
+ fld.Nname = asTypesNode(v)
} else {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = typPtr(v.Type)
+ addr.Type = types.NewPtr(v.Type)
addr.Class = PPARAM
v.Name.Param.Heapaddr = addr
- fld.Nname = addr
+ fld.Nname = asTypesNode(addr)
}
- fld.Type = fld.Nname.Type
- fld.Sym = fld.Nname.Sym
+ fld.Type = asNode(fld.Nname).Type
+ fld.Sym = asNode(fld.Nname).Sym
params = append(params, fld)
- decls = append(decls, fld.Nname)
+ decls = append(decls, asNode(fld.Nname))
}
if len(params) > 0 {
cv.Type = v.Type
if !v.Name.Byval() {
- cv.Type = typPtr(v.Type)
+ cv.Type = types.NewPtr(v.Type)
}
offset = Rnd(offset, int64(cv.Type.Align))
cv.Xoffset = offset
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = typPtr(v.Type)
+ addr.Type = types.NewPtr(v.Type)
addr.Class = PAUTO
addr.SetUsed(true)
addr.Name.Curfn = xfunc
typ := nod(OTSTRUCT, nil, nil)
- typ.List.Set1(namedfield(".F", Types[TUINTPTR]))
+ typ.List.Set1(namedfield(".F", types.Types[TUINTPTR]))
for _, v := range func_.Func.Cvars.Slice() {
if v.Op == OXXX {
continue
return walkexpr(clos, init)
}
-func typecheckpartialcall(fn *Node, sym *Sym) {
+func typecheckpartialcall(fn *Node, sym *types.Sym) {
switch fn.Op {
case ODOTINTER, ODOTMETH:
break
fn.Type = xfunc.Type
}
-var makepartialcall_gopkg *Pkg
+var makepartialcall_gopkg *types.Pkg
-func makepartialcall(fn *Node, t0 *Type, meth *Sym) *Node {
+func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
var p string
rcvrtype := fn.Left.Type
Fatalf("missing base type for %v", rcvrtype)
}
- var spkg *Pkg
+ var spkg *types.Pkg
if basetype.Sym != nil {
spkg = basetype.Sym.Pkg
}
sym := spkg.Lookup(p)
if sym.Uniq() {
- return sym.Def
+ return asNode(sym.Def)
}
sym.SetUniq(true)
ptr.Type = rcvrtype
body = append(body, nod(OAS, ptr, cv))
} else {
- ptr.Type = typPtr(rcvrtype)
+ ptr.Type = types.NewPtr(rcvrtype)
body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
}
xfunc.Nbody.Set(body)
xfunc = typecheck(xfunc, Etop)
- sym.Def = xfunc
+ sym.Def = asTypesNode(xfunc)
xtop = append(xtop, xfunc)
Curfn = savecurfn
}
typ := nod(OTSTRUCT, nil, nil)
- typ.List.Set1(namedfield("F", Types[TUINTPTR]))
+ typ.List.Set1(namedfield("F", types.Types[TUINTPTR]))
typ.List.Append(namedfield("R", n.Left.Type))
clos := nod(OCOMPLIT, nil, nod(OIND, typ, nil))
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"math/big"
"strings"
// truncate float literal fv to 32-bit or 64-bit precision
// according to type; return truncated value.
-func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
+func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
if t == nil {
return oldv
}
// convert large precision literal floating
// into limited precision (float64 or float32)
switch t.Etype {
- case TFLOAT64:
+ case types.TFLOAT64:
d := fv.Float64()
fv.SetFloat64(d)
// implicit conversion.
// The result of convlit MUST be assigned back to n, e.g.
// n.Left = convlit(n.Left, t)
-func convlit(n *Node, t *Type) *Node {
+func convlit(n *Node, t *types.Type) *Node {
return convlit1(n, t, false, noReuse)
}
// It returns a new node if necessary.
// The result of convlit1 MUST be assigned back to n, e.g.
// n.Left = convlit1(n.Left, t, explicit, reuse)
-func convlit1(n *Node, t *Type, explicit bool, reuse canReuseNode) *Node {
+func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
if n == nil || t == nil || n.Type == nil || t.IsUntyped() || n.Type == t {
return n
}
switch n.Op {
default:
- if n.Type == idealbool {
+ if n.Type == types.Idealbool {
if t.IsBoolean() {
n.Type = t
} else {
- n.Type = Types[TBOOL]
+ n.Type = types.Types[TBOOL]
}
}
default:
// If trying to convert to non-complex type,
// leave as complex128 and let typechecker complain.
- t = Types[TCOMPLEX128]
+ t = types.Types[TCOMPLEX128]
fallthrough
- case TCOMPLEX128:
+ case types.TCOMPLEX128:
n.Type = t
- n.Left = convlit(n.Left, Types[TFLOAT64])
- n.Right = convlit(n.Right, Types[TFLOAT64])
+ n.Left = convlit(n.Left, types.Types[TFLOAT64])
+ n.Right = convlit(n.Right, types.Types[TFLOAT64])
case TCOMPLEX64:
n.Type = t
- n.Left = convlit(n.Left, Types[TFLOAT32])
- n.Right = convlit(n.Right, Types[TFLOAT32])
+ n.Left = convlit(n.Left, types.Types[TFLOAT32])
+ n.Right = convlit(n.Right, types.Types[TFLOAT32])
}
}
}
ct := consttype(n)
- var et EType
+ var et types.EType
if ct < 0 {
goto bad
}
et = t.Etype
if et == TINTER {
- if ct == CTNIL && n.Type == Types[TNIL] {
+ if ct == CTNIL && n.Type == types.Types[TNIL] {
n.Type = t
return n
}
case CTCPLX:
overflow(n.Val(), t)
}
- } else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
+ } else if et == types.TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
n.SetVal(tostr(n.Val()))
} else {
goto bad
return v
}
-func doesoverflow(v Val, t *Type) bool {
+func doesoverflow(v Val, t *types.Type) bool {
switch u := v.U.(type) {
case *Mpint:
if !t.IsInteger() {
return false
}
-func overflow(v Val, t *Type) {
+func overflow(v Val, t *types.Type) {
// v has already been converted
// to appropriate form for t.
if t == nil || t.Etype == TIDEAL {
nr := n.Right
var rv Val
var lno src.XPos
- var wr EType
+ var wr types.EType
var v Val
var norig *Node
var nn *Node
case OCOM_ | CTINT_,
OCOM_ | CTRUNE_:
- var et EType = Txxx
+ var et types.EType = Txxx
if nl.Type != nil {
et = nl.Type.Etype
}
// right must be unsigned.
// left can be ideal.
case OLSH, ORSH:
- nr = defaultlit(nr, Types[TUINT])
+ nr = defaultlit(nr, types.Types[TUINT])
n.Right = nr
if nr.Type != nil && (nr.Type.IsSigned() || !nr.Type.IsInteger()) {
Fatalf("nodlit ctype %d", v.Ctype())
case CTSTR:
- n.Type = idealstring
+ n.Type = types.Idealstring
case CTBOOL:
- n.Type = idealbool
+ n.Type = types.Idealbool
case CTINT, CTRUNE, CTFLT, CTCPLX:
- n.Type = Types[TIDEAL]
+ n.Type = types.Types[TIDEAL]
case CTNIL:
- n.Type = Types[TNIL]
+ n.Type = types.Types[TNIL]
}
return n
c := new(Mpcplx)
n := nod(OLITERAL, nil, nil)
- n.Type = Types[TIDEAL]
+ n.Type = types.Types[TIDEAL]
n.SetVal(Val{c})
if r.Ctype() != CTFLT || i.Ctype() != CTFLT {
// The result of defaultlit MUST be assigned back to n, e.g.
// n.Left = defaultlit(n.Left, t)
-func defaultlit(n *Node, t *Type) *Node {
+func defaultlit(n *Node, t *types.Type) *Node {
return defaultlitreuse(n, t, noReuse)
}
// The result of defaultlitreuse MUST be assigned back to n, e.g.
// n.Left = defaultlitreuse(n.Left, t, reuse)
-func defaultlitreuse(n *Node, t *Type, reuse canReuseNode) *Node {
+func defaultlitreuse(n *Node, t *types.Type, reuse canReuseNode) *Node {
if n == nil || !n.Type.IsUntyped() {
return n
}
lno := setlineno(n)
ctype := idealkind(n)
- var t1 *Type
+ var t1 *types.Type
switch ctype {
default:
if t != nil {
}
if n.Val().Ctype() == CTSTR {
- t1 := Types[TSTRING]
+ t1 := types.Types[TSTRING]
n = convlit1(n, t1, false, reuse)
break
}
Fatalf("defaultlit: idealkind is CTxxx: %+v", n)
case CTBOOL:
- t1 := Types[TBOOL]
+ t1 := types.Types[TBOOL]
if t != nil && t.IsBoolean() {
t1 = t
}
n = convlit1(n, t1, false, reuse)
case CTINT:
- t1 = Types[TINT]
+ t1 = types.Types[TINT]
goto num
case CTRUNE:
- t1 = runetype
+ t1 = types.Runetype
goto num
case CTFLT:
- t1 = Types[TFLOAT64]
+ t1 = types.Types[TFLOAT64]
goto num
case CTCPLX:
- t1 = Types[TCOMPLEX128]
+ t1 = types.Types[TCOMPLEX128]
goto num
}
}
if l.Type.IsBoolean() {
- l = convlit(l, Types[TBOOL])
- r = convlit(r, Types[TBOOL])
+ l = convlit(l, types.Types[TBOOL])
+ r = convlit(r, types.Types[TBOOL])
}
lkind := idealkind(l)
rkind := idealkind(r)
if lkind == CTCPLX || rkind == CTCPLX {
- l = convlit(l, Types[TCOMPLEX128])
- r = convlit(r, Types[TCOMPLEX128])
+ l = convlit(l, types.Types[TCOMPLEX128])
+ r = convlit(r, types.Types[TCOMPLEX128])
return l, r
}
if lkind == CTFLT || rkind == CTFLT {
- l = convlit(l, Types[TFLOAT64])
- r = convlit(r, Types[TFLOAT64])
+ l = convlit(l, types.Types[TFLOAT64])
+ r = convlit(r, types.Types[TFLOAT64])
return l, r
}
if lkind == CTRUNE || rkind == CTRUNE {
- l = convlit(l, runetype)
- r = convlit(r, runetype)
+ l = convlit(l, types.Runetype)
+ r = convlit(r, types.Runetype)
return l, r
}
- l = convlit(l, Types[TINT])
- r = convlit(r, Types[TINT])
+ l = convlit(l, types.Types[TINT])
+ r = convlit(r, types.Types[TINT])
return l, r
}
}
case ONAME:
- l := n.Sym.Def
+ l := asNode(n.Sym.Def)
if l != nil && l.Op == OLITERAL && n.Val().Ctype() != CTNIL {
return true
}
case ONONAME:
- if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
+ if asNode(n.Sym.Def) != nil && asNode(n.Sym.Def).Op == OIOTA {
return true
}
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"strings"
// Finally, the Syms in this list are not "real" Syms as they don't actually
// represent object names. Sym is just a convenient type for saving shadowed
// Sym definitions, and only a subset of its fields are actually used.
-var dclstack *Sym
+var dclstack *types.Sym
-func dcopy(a, b *Sym) {
+func dcopy(a, b *types.Sym) {
a.Pkg = b.Pkg
a.Name = b.Name
a.Def = b.Def
a.Lastlineno = b.Lastlineno
}
-func push() *Sym {
- d := new(Sym)
+func push() *types.Sym {
+ d := new(types.Sym)
d.Lastlineno = lineno
d.Link = dclstack
dclstack = d
// pushdcl pushes the current declaration for symbol s (if any) so that
// it can be shadowed by a new declaration within a nested block scope.
-func pushdcl(s *Sym) *Sym {
+func pushdcl(s *types.Sym) *types.Sym {
d := push()
dcopy(d, s)
return d
}
// redeclare emits a diagnostic about symbol s being redeclared somewhere.
-func redeclare(s *Sym, where string) {
+func redeclare(s *types.Sym, where string) {
if !s.Lastlineno.IsKnown() {
var tmp string
if s.Origpkg != nil {
s.Block = block
s.Lastlineno = lineno
- s.Def = n
+ s.Def = asTypesNode(n)
n.Name.Vargen = int32(gen)
n.Name.Funcdepth = funcdepth
n.Class = ctxt
autoexport(n, ctxt)
}
-func addvar(n *Node, t *Type, ctxt Class) {
+func addvar(n *Node, t *types.Type, ctxt Class) {
if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
Fatalf("addvar: n=%v t=%v nil", n, t)
}
}
// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *Sym) *Node {
+func newnoname(s *types.Sym) *Node {
if s == nil {
Fatalf("newnoname nil")
}
// newfuncname generates a new name node for a function or method.
// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
-func newfuncname(s *Sym) *Node {
+func newfuncname(s *types.Sym) *Node {
n := newname(s)
n.Func = new(Func)
n.Func.SetIsHiddenClosure(Curfn != nil)
// this generates a new name node for a name
// being declared.
-func dclname(s *Sym) *Node {
+func dclname(s *types.Sym) *Node {
n := newname(s)
n.Op = ONONAME // caller will correct it
return n
}
-func typenod(t *Type) *Node {
+func typenod(t *types.Type) *Node {
// if we copied another type with *t = *u
// then t->nod might be out of date, so
// check t->nod->type too
- if t.nod == nil || t.nod.Type != t {
- t.nod = nod(OTYPE, nil, nil)
- t.nod.Type = t
- t.nod.Sym = t.Sym
+ if asNode(t.Nod) == nil || asNode(t.Nod).Type != t {
+ t.Nod = asTypesNode(nod(OTYPE, nil, nil))
+ asNode(t.Nod).Type = t
+ asNode(t.Nod).Sym = t.Sym
}
- return t.nod
+ return asNode(t.Nod)
}
-func anonfield(typ *Type) *Node {
+func anonfield(typ *types.Type) *Node {
return nod(ODCLFIELD, nil, typenod(typ))
}
-func namedfield(s string, typ *Type) *Node {
+func namedfield(s string, typ *types.Type) *Node {
return nod(ODCLFIELD, newname(lookup(s)), typenod(typ))
}
// oldname returns the Node that declares symbol s in the current scope.
// If no such Node currently exists, an ONONAME Node is returned instead.
-func oldname(s *Sym) *Node {
- n := s.Def
+func oldname(s *types.Sym) *Node {
+ n := asNode(s.Def)
if n == nil {
// Maybe a top-level declaration will come along later to
// define s. resolve will check s.Def again once all input
// Same as funcargs, except run over an already constructed TFUNC.
// This happens during import, where the hidden_fndcl rule has
// used functype directly to parse the function's type.
-func funcargs2(t *Type) {
+func funcargs2(t *types.Type) {
if t.Etype != TFUNC {
Fatalf("funcargs2 %v", t)
}
for _, ft := range t.Recvs().Fields().Slice() {
- if ft.Nname == nil || ft.Nname.Sym == nil {
+ if asNode(ft.Nname) == nil || asNode(ft.Nname).Sym == nil {
continue
}
- n := ft.Nname // no need for newname(ft->nname->sym)
+ n := asNode(ft.Nname) // no need for newname(ft->nname->sym)
n.Type = ft.Type
declare(n, PPARAM)
}
for _, ft := range t.Params().Fields().Slice() {
- if ft.Nname == nil || ft.Nname.Sym == nil {
+ if asNode(ft.Nname) == nil || asNode(ft.Nname).Sym == nil {
continue
}
- n := ft.Nname
+ n := asNode(ft.Nname)
n.Type = ft.Type
declare(n, PPARAM)
}
for _, ft := range t.Results().Fields().Slice() {
- if ft.Nname == nil || ft.Nname.Sym == nil {
+ if asNode(ft.Nname) == nil || asNode(ft.Nname).Sym == nil {
continue
}
- n := ft.Nname
+ n := asNode(ft.Nname)
n.Type = ft.Type
declare(n, PPARAMOUT)
}
// structs, functions, and methods.
// they don't belong here, but where do they belong?
-func checkembeddedtype(t *Type) {
+func checkembeddedtype(t *types.Type) {
if t == nil {
return
}
}
}
-func structfield(n *Node) *Field {
+func structfield(n *Node) *types.Field {
lno := lineno
lineno = n.Pos
Fatalf("structfield: oops %v\n", n)
}
- f := newField()
+ f := types.NewField()
f.SetIsddd(n.Isddd())
if n.Right != nil {
}
if n.Left != nil && n.Left.Op == ONAME {
- f.Nname = n.Left
+ f.Nname = asTypesNode(n.Left)
f.Embedded = n.Embedded
- f.Sym = f.Nname.Sym
+ f.Sym = asNode(f.Nname).Sym
}
lineno = lno
// checkdupfields emits errors for duplicately named fields or methods in
// a list of struct or interface types.
-func checkdupfields(what string, ts ...*Type) {
- seen := make(map[*Sym]bool)
+func checkdupfields(what string, ts ...*types.Type) {
+ seen := make(map[*types.Sym]bool)
for _, t := range ts {
for _, f := range t.Fields().Slice() {
- if f.Sym == nil || isblanksym(f.Sym) || f.Nname == nil {
+ if f.Sym == nil || isblanksym(f.Sym) || asNode(f.Nname) == nil {
continue
}
if seen[f.Sym] {
- yyerrorl(f.Nname.Pos, "duplicate %s %s", what, f.Sym.Name)
+ yyerrorl(asNode(f.Nname).Pos, "duplicate %s %s", what, f.Sym.Name)
continue
}
seen[f.Sym] = true
// convert a parsed id/type list into
// a type for struct/interface/arglist
-func tostruct(l []*Node) *Type {
- t := typ(TSTRUCT)
+func tostruct(l []*Node) *types.Type {
+ t := types.New(TSTRUCT)
tostruct0(t, l)
return t
}
-func tostruct0(t *Type, l []*Node) {
+func tostruct0(t *types.Type, l []*Node) {
if t == nil || !t.IsStruct() {
Fatalf("struct expected")
}
- fields := make([]*Field, len(l))
+ fields := make([]*types.Field, len(l))
for i, n := range l {
f := structfield(n)
if f.Broke() {
}
}
-func tofunargs(l []*Node, funarg Funarg) *Type {
- t := typ(TSTRUCT)
+func tofunargs(l []*Node, funarg types.Funarg) *types.Type {
+ t := types.New(TSTRUCT)
t.StructType().Funarg = funarg
- fields := make([]*Field, len(l))
+ fields := make([]*types.Field, len(l))
for i, n := range l {
f := structfield(n)
f.Funarg = funarg
return t
}
-func tofunargsfield(fields []*Field, funarg Funarg) *Type {
- t := typ(TSTRUCT)
+func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type {
+ t := types.New(TSTRUCT)
t.StructType().Funarg = funarg
for _, f := range fields {
f.Funarg = funarg
// esc.go needs to find f given a PPARAM to add the tag.
- if f.Nname != nil && f.Nname.Class == PPARAM {
- f.Nname.Name.Param.Field = f
+ if asNode(f.Nname) != nil && asNode(f.Nname).Class == PPARAM {
+ asNode(f.Nname).Name.Param.Field = f
}
}
t.SetFields(fields)
return t
}
-func interfacefield(n *Node) *Field {
+func interfacefield(n *Node) *types.Field {
lno := lineno
lineno = n.Pos
n.Right = nil
}
- f := newField()
+ f := types.NewField()
if n.Left != nil {
- f.Nname = n.Left
- f.Sym = f.Nname.Sym
+ f.Nname = asTypesNode(n.Left)
+ f.Sym = asNode(f.Nname).Sym
} else {
// Placeholder ONAME just to hold Pos.
// TODO(mdempsky): Add Pos directly to Field instead.
- f.Nname = newname(nblank.Sym)
+ f.Nname = asTypesNode(newname(nblank.Sym))
}
f.Type = n.Type
return f
}
-func tointerface(l []*Node) *Type {
+func tointerface(l []*Node) *types.Type {
if len(l) == 0 {
- return Types[TINTER]
+ return types.Types[TINTER]
}
- t := typ(TINTER)
+ t := types.New(TINTER)
tointerface0(t, l)
return t
}
-func tointerface0(t *Type, l []*Node) *Type {
+func tointerface0(t *types.Type, l []*Node) *types.Type {
if t == nil || !t.IsInterface() {
Fatalf("interface expected")
}
- var fields []*Field
+ var fields []*types.Field
for _, n := range l {
f := interfacefield(n)
if f.Broke() {
return t
}
-func embedded(s *Sym, pkg *Pkg) *Node {
+func embedded(s *types.Sym, pkg *types.Pkg) *Node {
const (
CenterDot = 0xB7
)
}
// thisT is the singleton type used for interface method receivers.
-var thisT *Type
+var thisT *types.Type
func fakethis() *Node {
if thisT == nil {
- thisT = typPtr(typ(TSTRUCT))
+ thisT = types.NewPtr(types.New(TSTRUCT))
}
return anonfield(thisT)
}
-func fakethisfield() *Field {
+func fakethisfield() *types.Field {
if thisT == nil {
- thisT = typPtr(typ(TSTRUCT))
+ thisT = types.NewPtr(types.New(TSTRUCT))
}
- f := newField()
+ f := types.NewField()
f.Type = thisT
return f
}
// Is this field a method on an interface?
// Those methods have thisT as the receiver.
// (See fakethis above.)
-func isifacemethod(f *Type) bool {
+func isifacemethod(f *types.Type) bool {
return f.Recv().Type == thisT
}
// turn a parsed function declaration into a type
-func functype(this *Node, in, out []*Node) *Type {
- t := typ(TFUNC)
+func functype(this *Node, in, out []*Node) *types.Type {
+ t := types.New(TFUNC)
functype0(t, this, in, out)
return t
}
-func functype0(t *Type, this *Node, in, out []*Node) {
+func functype0(t *types.Type, this *Node, in, out []*Node) {
if t == nil || t.Etype != TFUNC {
Fatalf("function type expected")
}
if this != nil {
rcvr = []*Node{this}
}
- t.FuncType().Receiver = tofunargs(rcvr, FunargRcvr)
- t.FuncType().Results = tofunargs(out, FunargResults)
- t.FuncType().Params = tofunargs(in, FunargParams)
+ t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
+ t.FuncType().Results = tofunargs(out, types.FunargResults)
+ t.FuncType().Params = tofunargs(in, types.FunargParams)
checkdupfields("argument", t.Recvs(), t.Results(), t.Params())
}
}
-func functypefield(this *Field, in, out []*Field) *Type {
- t := typ(TFUNC)
+func functypefield(this *types.Field, in, out []*types.Field) *types.Type {
+ t := types.New(TFUNC)
functypefield0(t, this, in, out)
return t
}
-func functypefield0(t *Type, this *Field, in, out []*Field) {
- var rcvr []*Field
+func functypefield0(t *types.Type, this *types.Field, in, out []*types.Field) {
+ var rcvr []*types.Field
if this != nil {
- rcvr = []*Field{this}
+ rcvr = []*types.Field{this}
}
- t.FuncType().Receiver = tofunargsfield(rcvr, FunargRcvr)
- t.FuncType().Results = tofunargsfield(out, FunargRcvr)
- t.FuncType().Params = tofunargsfield(in, FunargRcvr)
+ t.FuncType().Receiver = tofunargsfield(rcvr, types.FunargRcvr)
+ t.FuncType().Results = tofunargsfield(out, types.FunargRcvr)
+ t.FuncType().Params = tofunargsfield(in, types.FunargRcvr)
t.FuncType().Outnamed = false
- if len(out) > 0 && out[0].Nname != nil && out[0].Nname.Orig != nil {
- s := out[0].Nname.Orig.Sym
+ if len(out) > 0 && asNode(out[0].Nname) != nil && asNode(out[0].Nname).Orig != nil {
+ s := asNode(out[0].Nname).Orig.Sym
if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
t.FuncType().Outnamed = true
}
}
}
-var methodsym_toppkg *Pkg
+var methodsym_toppkg *types.Pkg
-func methodsym(nsym *Sym, t0 *Type, iface bool) *Sym {
+func methodsym(nsym *types.Sym, t0 *types.Type, iface bool) *types.Sym {
if t0 == nil {
Fatalf("methodsym: nil receiver type")
}
// if t0 == *t and t0 has a sym,
// we want to see *t, not t0, in the method name.
if t != t0 && t0.Sym != nil {
- t0 = typPtr(t)
+ t0 = types.NewPtr(t)
}
suffix := ""
}
}
- var spkg *Pkg
+ var spkg *types.Pkg
if s != nil {
spkg = s.Pkg
}
// methodname is a misnomer because this now returns a Sym, rather
// than an ONAME.
// TODO(mdempsky): Reconcile with methodsym.
-func methodname(s *Sym, recv *Type) *Sym {
+func methodname(s *types.Sym, recv *types.Type) *types.Sym {
star := false
if recv.IsPtr() {
star = true
// Add a method, declared as a function.
// - msym is the method symbol
// - t is function type (with receiver)
-func addmethod(msym *Sym, t *Type, local, nointerface bool) {
+func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) {
if msym == nil {
Fatalf("no method symbol")
}
return
}
- f := newField()
+ f := types.NewField()
f.Sym = msym
- f.Nname = newname(msym)
+ f.Nname = asTypesNode(newname(msym))
f.Type = t
f.SetNointerface(nointerface)
dclcontext = PEXTERN
}
-func (s *Sym) funcsymname() string {
+func funcsymname(s *types.Sym) string {
return s.Name + "·f"
}
// funcsym returns s·f.
-func funcsym(s *Sym) *Sym {
- sf, existed := s.Pkg.LookupOK(s.funcsymname())
+func funcsym(s *types.Sym) *types.Sym {
+ sf, existed := s.Pkg.LookupOK(funcsymname(s))
// Don't export s·f when compiling for dynamic linking.
// When dynamically linking, the necessary function
// symbols will be created explicitly with makefuncsym.
// but DUPOK doesn't work across shared library boundaries.
// So instead, when dynamic linking, we only create
// the s·f stubs in s's package.
-func makefuncsym(s *Sym) {
+func makefuncsym(s *types.Sym) {
if !Ctxt.Flag_dynlink {
Fatalf("makefuncsym dynlink")
}
// not get a funcsym.
return
}
- if _, existed := s.Pkg.LookupOK(s.funcsymname()); !existed {
+ if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed {
funcsyms = append(funcsyms, s)
}
}
func (c *nowritebarrierrecChecker) visitcall(n *Node) {
fn := n.Left
if n.Op == OCALLMETH {
- fn = n.Left.Sym.Def
+ fn = asNode(n.Left.Sym.Def)
}
if fn == nil || fn.Op != ONAME || fn.Class != PFUNC || fn.Name.Defn == nil {
return
package gc
import (
+ "cmd/compile/internal/types"
"fmt"
"strconv"
"strings"
if n.Op == OCALLFUNC || n.Op == OCALLMETH {
fn := n.Left
if n.Op == OCALLMETH {
- fn = n.Left.Sym.Def
+ fn = asNode(n.Left.Sym.Def)
}
if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Name.Defn != nil {
m := v.visit(fn.Name.Defn)
}
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *Node) *Sym {
+func funcSym(fn *Node) *types.Sym {
if fn == nil || fn.Func.Nname == nil {
return nil
}
}
// curfnSym returns n.Curfn.Nname.Sym if no nils are encountered along the way.
-func (e *EscState) curfnSym(n *Node) *Sym {
+func (e *EscState) curfnSym(n *Node) *types.Sym {
nE := e.nodeEscState(n)
return funcSym(nE.Curfn)
}
case PPARAM:
lnE.Loopdepth = 1
- if ln.Type != nil && !haspointers(ln.Type) {
+ if ln.Type != nil && !types.Haspointers(ln.Type) {
break
}
if Curfn.Nbody.Len() == 0 && !Curfn.Noescape() {
// after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc
// if(n.Left.Sym.Label != nil)
// fatal("escape analysis messed up analyzing label: %+N", n);
- n.Left.Sym.Label = &nonlooping
+ n.Left.Sym.Label = asTypesNode(&nonlooping)
case OGOTO:
if n.Left == nil || n.Left.Sym == nil {
// If we come past one that's uninitialized, this must be a (harmless) forward jump
// but if it's set to nonlooping the label must have preceded this goto.
- if n.Left.Sym.Label == &nonlooping {
- n.Left.Sym.Label = &looping
+ if asNode(n.Left.Sym.Label) == &nonlooping {
+ n.Left.Sym.Label = asTypesNode(&looping)
}
}
}
case OLABEL:
- if n.Left.Sym.Label == &nonlooping {
+ if asNode(n.Left.Sym.Label) == &nonlooping {
if Debug['m'] > 2 {
fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
}
- } else if n.Left.Sym.Label == &looping {
+ } else if asNode(n.Left.Sym.Label) == &looping {
if Debug['m'] > 2 {
fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
}
a := nod(OADDR, src, nil)
a.Pos = src.Pos
e.nodeEscState(a).Loopdepth = e.nodeEscState(src).Loopdepth
- a.Type = typPtr(src.Type)
+ a.Type = types.NewPtr(src.Type)
e.escflows(dst, a, e.stepAssign(nil, originalDst, src, dstwhy))
// Flowing multiple returns to a single dst happens when
// A non-pointer escaping from a struct does not concern us.
case ODOT:
- if src.Type != nil && !haspointers(src.Type) {
+ if src.Type != nil && !types.Haspointers(src.Type) {
break
}
fallthrough
case ODOTTYPE,
ODOTTYPE2:
- if src.Type != nil && !haspointers(src.Type) {
+ if src.Type != nil && !types.Haspointers(src.Type) {
break
}
e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy))
e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth
ind.Pos = n.Pos
t := n.Type
- if t.IsKind(Tptr) {
+ if t.IsKind(types.Tptr) {
// This should model our own sloppy use of OIND to encode
// decreasing levels of indirection; i.e., "indirecting" an array
// might yield the type of an element. To be enhanced...
return (e &^ (bitsMaskForTag << shift)) | encodedFlow
}
-func (e *EscState) initEscRetval(call *Node, fntype *Type) {
+func (e *EscState) initEscRetval(call *Node, fntype *types.Type) {
cE := e.nodeEscState(call)
cE.Retval.Set(nil) // Suspect this is not nil for indirect calls.
for i, f := range fntype.Results().Fields().Slice() {
// different for methods vs plain functions and for imported vs
// this-package
func (e *EscState) esccall(call *Node, parent *Node) {
- var fntype *Type
+ var fntype *types.Type
var indirect bool
var fn *Node
switch call.Op {
indirect = fn.Op != ONAME || fn.Class != PFUNC
case OCALLMETH:
- fn = call.Left.Sym.Def
+ fn = asNode(call.Left.Sym.Def)
if fn != nil {
fntype = fn.Type
} else {
if call.Op != OCALLFUNC {
rf := fntype.Recv()
r := call.Left.Left
- if haspointers(rf.Type) {
+ if types.Haspointers(rf.Type) {
e.escassignSinkWhy(call, r, "receiver in indirect call")
}
} else { // indirect and OCALLFUNC = could be captured variables, too. (#14409)
if n.Isddd() && !call.Isddd() {
// Introduce ODDDARG node to represent ... allocation.
arg = nod(ODDDARG, nil, nil)
- arr := typArray(n.Type.Elem(), int64(len(args)))
- arg.Type = typPtr(arr) // make pointer so it will be tracked
+ arr := types.NewArray(n.Type.Elem(), int64(len(args)))
+ arg.Type = types.NewPtr(arr) // make pointer so it will be tracked
arg.Pos = call.Pos
e.track(arg)
call.Right = arg
if call.Op != OCALLFUNC {
rf := fntype.Recv()
r := call.Left.Left
- if haspointers(rf.Type) {
+ if types.Haspointers(rf.Type) {
e.escassignfromtag(rf.Note, cE.Retval, r, call)
}
}
// Introduce ODDDARG node to represent ... allocation.
arg = nod(ODDDARG, nil, nil)
arg.Pos = call.Pos
- arr := typArray(param.Type.Elem(), int64(len(rest)))
- arg.Type = typPtr(arr) // make pointer so it will be tracked
+ arr := types.NewArray(param.Type.Elem(), int64(len(rest)))
+ arg.Type = types.NewPtr(arr) // make pointer so it will be tracked
e.track(arg)
call.Right = arg
}
}
- if haspointers(param.Type) && e.escassignfromtag(note, cE.Retval, arg, call)&EscMask == EscNone && parent.Op != ODEFER && parent.Op != OPROC {
+ if types.Haspointers(param.Type) && e.escassignfromtag(note, cE.Retval, arg, call)&EscMask == EscNone && parent.Op != ODEFER && parent.Op != OPROC {
a := arg
for a.Op == OCONVNOP {
a = a.Left
}
// Don't bother building a graph for scalars.
- if src.Type != nil && !haspointers(src.Type) && !isReflectHeaderDataField(src) {
+ if src.Type != nil && !types.Haspointers(src.Type) && !isReflectHeaderDataField(src) {
if Debug['m'] > 3 {
fmt.Printf("%v::NOT flows:: %S <- %S\n", linestr(lineno), dst, src)
}
func (e *EscState) esctag(fn *Node) {
fn.Esc = EscFuncTagged
- name := func(s *Sym, narg int) string {
+ name := func(s *types.Sym, narg int) string {
if s != nil {
return s.Name
}
if fn.Nbody.Len() == 0 {
if fn.Noescape() {
for _, f := range fn.Type.Params().Fields().Slice() {
- if haspointers(f.Type) {
+ if types.Haspointers(f.Type) {
f.Note = mktag(EscNone)
}
}
switch ln.Esc & EscMask {
case EscNone, // not touched by escflood
EscReturn:
- if haspointers(ln.Type) { // don't bother tagging for scalars
+ if types.Haspointers(ln.Type) { // don't bother tagging for scalars
if ln.Name.Param.Field.Note != uintptrEscapesTag {
ln.Name.Param.Field.Note = mktag(int(ln.Esc))
}
import (
"bufio"
"bytes"
+ "cmd/compile/internal/types"
"cmd/internal/bio"
"fmt"
"unicode"
}
// Ensure original types are on exportlist before type aliases.
- if n.Sym.isAlias() {
- exportlist = append(exportlist, n.Sym.Def)
+ if IsAlias(n.Sym) {
+ exportlist = append(exportlist, asNode(n.Sym.Def))
}
exportlist = append(exportlist, n)
// exportedsym reports whether a symbol will be visible
// to files that import our package.
-func exportedsym(sym *Sym) bool {
+func exportedsym(sym *types.Sym) bool {
// Builtins are visible everywhere.
if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
return true
}
// methodbyname sorts types by symbol name.
-type methodbyname []*Field
+type methodbyname []*types.Field
func (x methodbyname) Len() int { return len(x) }
func (x methodbyname) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// (use empty package map to avoid collisions)
savedPkgMap := pkgMap
savedPkgs := pkgs
- pkgMap = make(map[string]*Pkg)
+ pkgMap = make(map[string]*types.Pkg)
pkgs = nil
Import(mkpkg(""), bufio.NewReader(©)) // must not die
pkgs = savedPkgs
// importsym declares symbol s as an imported object representable by op.
// pkg is the package being imported
-func importsym(pkg *Pkg, s *Sym, op Op) {
- if s.Def != nil && s.Def.Op != op {
+func importsym(pkg *types.Pkg, s *types.Sym, op Op) {
+ if asNode(s.Def) != nil && asNode(s.Def).Op != op {
pkgstr := fmt.Sprintf("during import %q", pkg.Path)
redeclare(s, pkgstr)
}
// mark the symbol so it is not reexported
- if s.Def == nil {
+ if asNode(s.Def) == nil {
if exportname(s.Name) || initname(s.Name) {
s.SetExport(true)
} else {
// pkgtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// pkg is the package being imported
-func pkgtype(pkg *Pkg, s *Sym) *Type {
+func pkgtype(pkg *types.Pkg, s *types.Sym) *types.Type {
importsym(pkg, s, OTYPE)
- if s.Def == nil || s.Def.Op != OTYPE {
- t := typ(TFORW)
+ if asNode(s.Def) == nil || asNode(s.Def).Op != OTYPE {
+ t := types.New(TFORW)
t.Sym = s
- s.Def = typenod(t)
- s.Def.Name = new(Name)
+ s.Def = asTypesNode(typenod(t))
+ asNode(s.Def).Name = new(Name)
}
- if s.Def.Type == nil {
+ if asNode(s.Def).Type == nil {
Fatalf("pkgtype %v", s)
}
- return s.Def.Type
+ return asNode(s.Def).Type
}
// importconst declares symbol s as an imported constant with type t and value n.
// pkg is the package being imported
-func importconst(pkg *Pkg, s *Sym, t *Type, n *Node) {
+func importconst(pkg *types.Pkg, s *types.Sym, t *types.Type, n *Node) {
importsym(pkg, s, OLITERAL)
n = convlit(n, t)
- if s.Def != nil { // TODO: check if already the same.
+ if asNode(s.Def) != nil { // TODO: check if already the same.
return
}
// importvar declares symbol s as an imported variable with type t.
// pkg is the package being imported
-func importvar(pkg *Pkg, s *Sym, t *Type) {
+func importvar(pkg *types.Pkg, s *types.Sym, t *types.Type) {
importsym(pkg, s, ONAME)
- if s.Def != nil && s.Def.Op == ONAME {
- if eqtype(t, s.Def.Type) {
+ if asNode(s.Def) != nil && asNode(s.Def).Op == ONAME {
+ if eqtype(t, asNode(s.Def).Type) {
return
}
- yyerror("inconsistent definition for var %v during import\n\t%v (in %q)\n\t%v (in %q)", s, s.Def.Type, s.Importdef.Path, t, pkg.Path)
+ yyerror("inconsistent definition for var %v during import\n\t%v (in %q)\n\t%v (in %q)", s, asNode(s.Def).Type, s.Importdef.Path, t, pkg.Path)
}
n := newname(s)
// importalias declares symbol s as an imported type alias with type t.
// pkg is the package being imported
-func importalias(pkg *Pkg, s *Sym, t *Type) {
+func importalias(pkg *types.Pkg, s *types.Sym, t *types.Type) {
importsym(pkg, s, OTYPE)
- if s.Def != nil && s.Def.Op == OTYPE {
- if eqtype(t, s.Def.Type) {
+ if asNode(s.Def) != nil && asNode(s.Def).Op == OTYPE {
+ if eqtype(t, asNode(s.Def).Type) {
return
}
- yyerror("inconsistent definition for type alias %v during import\n\t%v (in %q)\n\t%v (in %q)", s, s.Def.Type, s.Importdef.Path, t, pkg.Path)
+ yyerror("inconsistent definition for type alias %v during import\n\t%v (in %q)\n\t%v (in %q)", s, asNode(s.Def).Type, s.Importdef.Path, t, pkg.Path)
}
n := newname(s)
package gc
import (
+ "cmd/compile/internal/types"
"fmt"
"strconv"
"strings"
//
// %v *Val Constant values
//
-// %v *Sym Symbols
+// %v *types.Sym Symbols
// %S unqualified identifier in any mode
// Flags: +,- #: mode (see below)
// 0: in export mode: unqualified identifier if exported, qualified if not
//
-// %v *Type Types
+// %v *types.Type Types
// %S omit "func" and receiver in function types
// %L definition instead of name.
// Flags: +,- #: mode (see below)
// Flags: those of *Node
// .: separate items with ',' instead of ';'
-// *Sym, *Type, and *Node types use the flags below to set the format mode
+// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode
const (
FErr = iota
FDbg
)
// The mode flags '+', '-', and '#' are sticky; they persist through
-// recursions of *Node, *Type, and *Sym values. The ' ' flag is
-// sticky only on *Type recursions and only used in %-/*Sym mode.
+// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is
+// sticky only on *types.Type recursions and only used in %-/*types.Sym mode.
//
-// Example: given a *Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
+// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
// Useful format combinations:
// TODO(gri): verify these
// %#v Go format
// %L "foo (type Bar)" for error messages
//
-// *Type:
+// *types.Type:
// %#v Go format
// %#L type definition instead of name
// %#S omit"func" and receiver in function signature
fmtOpTypeId Op
fmtOpTypeIdName Op
- fmtTypeErr Type
- fmtTypeDbg Type
- fmtTypeTypeId Type
- fmtTypeTypeIdName Type
+ fmtTypeErr types.Type
+ fmtTypeDbg types.Type
+ fmtTypeTypeId types.Type
+ fmtTypeTypeIdName types.Type
- fmtSymErr Sym
- fmtSymDbg Sym
- fmtSymTypeId Sym
- fmtSymTypeIdName Sym
+ fmtSymErr types.Sym
+ fmtSymDbg types.Sym
+ fmtSymTypeId types.Sym
+ fmtSymTypeIdName types.Sym
fmtNodesErr Nodes
fmtNodesDbg Nodes
func (o fmtOpTypeIdName) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeIdName) }
func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) }
-func (t *fmtTypeErr) Format(s fmt.State, verb rune) { (*Type)(t).format(s, verb, FErr) }
-func (t *fmtTypeDbg) Format(s fmt.State, verb rune) { (*Type)(t).format(s, verb, FDbg) }
-func (t *fmtTypeTypeId) Format(s fmt.State, verb rune) { (*Type)(t).format(s, verb, FTypeId) }
-func (t *fmtTypeTypeIdName) Format(s fmt.State, verb rune) { (*Type)(t).format(s, verb, FTypeIdName) }
-func (t *Type) Format(s fmt.State, verb rune) { t.format(s, verb, FErr) }
+func (t *fmtTypeErr) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FErr) }
+func (t *fmtTypeDbg) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FDbg) }
+func (t *fmtTypeTypeId) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FTypeId) }
+func (t *fmtTypeTypeIdName) Format(s fmt.State, verb rune) {
+ typeFormat((*types.Type)(t), s, verb, FTypeIdName)
+}
+
+// func (t *types.Type) Format(s fmt.State, verb rune) // in package types
+
+func (y *fmtSymErr) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FErr) }
+func (y *fmtSymDbg) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FDbg) }
+func (y *fmtSymTypeId) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FTypeId) }
+func (y *fmtSymTypeIdName) Format(s fmt.State, verb rune) {
+ symFormat((*types.Sym)(y), s, verb, FTypeIdName)
+}
-func (y *fmtSymErr) Format(s fmt.State, verb rune) { (*Sym)(y).format(s, verb, FErr) }
-func (y *fmtSymDbg) Format(s fmt.State, verb rune) { (*Sym)(y).format(s, verb, FDbg) }
-func (y *fmtSymTypeId) Format(s fmt.State, verb rune) { (*Sym)(y).format(s, verb, FTypeId) }
-func (y *fmtSymTypeIdName) Format(s fmt.State, verb rune) { (*Sym)(y).format(s, verb, FTypeIdName) }
-func (y *Sym) Format(s fmt.State, verb rune) { y.format(s, verb, FErr) }
+// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) }
func (n fmtNodesErr) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FErr) }
func (n fmtNodesDbg) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FDbg) }
args[i] = fmtOpErr(arg)
case *Node:
args[i] = (*fmtNodeErr)(arg)
- case *Type:
+ case *types.Type:
args[i] = (*fmtTypeErr)(arg)
- case *Sym:
+ case *types.Sym:
args[i] = (*fmtSymErr)(arg)
case Nodes:
args[i] = fmtNodesErr(arg)
- case Val, int32, int64, string, EType:
+ case Val, int32, int64, string, types.EType:
// OK: printing these types doesn't depend on mode
default:
Fatalf("mode.prepareArgs type %T", arg)
args[i] = fmtOpDbg(arg)
case *Node:
args[i] = (*fmtNodeDbg)(arg)
- case *Type:
+ case *types.Type:
args[i] = (*fmtTypeDbg)(arg)
- case *Sym:
+ case *types.Sym:
args[i] = (*fmtSymDbg)(arg)
case Nodes:
args[i] = fmtNodesDbg(arg)
- case Val, int32, int64, string, EType:
+ case Val, int32, int64, string, types.EType:
// OK: printing these types doesn't depend on mode
default:
Fatalf("mode.prepareArgs type %T", arg)
args[i] = fmtOpTypeId(arg)
case *Node:
args[i] = (*fmtNodeTypeId)(arg)
- case *Type:
+ case *types.Type:
args[i] = (*fmtTypeTypeId)(arg)
- case *Sym:
+ case *types.Sym:
args[i] = (*fmtSymTypeId)(arg)
case Nodes:
args[i] = fmtNodesTypeId(arg)
- case Val, int32, int64, string, EType:
+ case Val, int32, int64, string, types.EType:
// OK: printing these types doesn't depend on mode
default:
Fatalf("mode.prepareArgs type %T", arg)
args[i] = fmtOpTypeIdName(arg)
case *Node:
args[i] = (*fmtNodeTypeIdName)(arg)
- case *Type:
+ case *types.Type:
args[i] = (*fmtTypeTypeIdName)(arg)
- case *Sym:
+ case *types.Sym:
args[i] = (*fmtSymTypeIdName)(arg)
case Nodes:
args[i] = fmtNodesTypeIdName(arg)
- case Val, int32, int64, string, EType:
+ case Val, int32, int64, string, types.EType:
// OK: printing these types doesn't depend on mode
default:
Fatalf("mode.prepareArgs type %T", arg)
s%^ ........*\]%&~%g
s%~ %%g
*/
-var etnames = []string{
- Txxx: "Txxx",
- TINT: "INT",
- TUINT: "UINT",
- TINT8: "INT8",
- TUINT8: "UINT8",
- TINT16: "INT16",
- TUINT16: "UINT16",
- TINT32: "INT32",
- TUINT32: "UINT32",
- TINT64: "INT64",
- TUINT64: "UINT64",
- TUINTPTR: "UINTPTR",
- TFLOAT32: "FLOAT32",
- TFLOAT64: "FLOAT64",
- TCOMPLEX64: "COMPLEX64",
- TCOMPLEX128: "COMPLEX128",
- TBOOL: "BOOL",
- TPTR32: "PTR32",
- TPTR64: "PTR64",
- TFUNC: "FUNC",
- TARRAY: "ARRAY",
- TSLICE: "SLICE",
- TSTRUCT: "STRUCT",
- TCHAN: "CHAN",
- TMAP: "MAP",
- TINTER: "INTER",
- TFORW: "FORW",
- TSTRING: "STRING",
- TUNSAFEPTR: "TUNSAFEPTR",
- TANY: "ANY",
- TIDEAL: "TIDEAL",
- TNIL: "TNIL",
- TBLANK: "TBLANK",
- TFUNCARGS: "TFUNCARGS",
- TCHANARGS: "TCHANARGS",
- TDDDFIELD: "TDDDFIELD",
-}
-
-func (et EType) String() string {
- if int(et) < len(etnames) && etnames[et] != "" {
- return etnames[et]
- }
- return fmt.Sprintf("E-%d", et)
-}
-func (s *Sym) symfmt(flag FmtFlag, mode fmtMode) string {
+func symfmt(s *types.Sym, flag FmtFlag, mode fmtMode) string {
if s.Pkg != nil && flag&FmtShort == 0 {
switch mode {
case FErr: // This is for the user
TBLANK: "blank",
}
-func (t *Type) typefmt(flag FmtFlag, mode fmtMode, depth int) string {
+func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
if t == nil {
return "<T>"
}
- if t == bytetype || t == runetype {
+ if t == types.Bytetype || t == types.Runetype {
// in %-T mode collapse rune and byte with their originals.
switch mode {
case FTypeIdName, FTypeId:
- t = Types[t.Etype]
+ t = types.Types[t.Etype]
default:
- return t.Sym.sconv(FmtShort, mode)
+ return sconv(t.Sym, FmtShort, mode)
}
}
- if t == errortype {
+ if t == types.Errortype {
return "error"
}
// Unless the 'l' flag was specified, if the type has a name, just print that name.
- if flag&FmtLong == 0 && t.Sym != nil && t != Types[t.Etype] {
+ if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] {
switch mode {
case FTypeId, FTypeIdName:
if flag&FmtShort != 0 {
if t.Vargen != 0 {
- return mode.Sprintf("%v·%d", t.Sym.sconv(FmtShort, mode), t.Vargen)
+ return mode.Sprintf("%v·%d", sconv(t.Sym, FmtShort, mode), t.Vargen)
}
- return t.Sym.sconv(FmtShort, mode)
+ return sconv(t.Sym, FmtShort, mode)
}
if mode == FTypeIdName {
- return t.Sym.sconv(FmtUnsigned, mode)
+ return sconv(t.Sym, FmtUnsigned, mode)
}
if t.Sym.Pkg == localpkg && t.Vargen != 0 {
}
}
- return t.Sym.modeString(mode)
+ return smodeString(t.Sym, mode)
}
if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
prefix := ""
- if mode == FErr && (t == idealbool || t == idealstring) {
+ if mode == FErr && (t == types.Idealbool || t == types.Idealstring) {
prefix = "untyped "
}
return prefix + basicnames[t.Etype]
}
if mode == FDbg {
- return t.Etype.String() + "-" + t.typefmt(flag, 0, depth)
+ return t.Etype.String() + "-" + typefmt(t, flag, 0, depth)
}
switch t.Etype {
switch mode {
case FTypeId, FTypeIdName:
if flag&FmtShort != 0 {
- return "*" + t.Elem().tconv(FmtShort, mode, depth)
+ return "*" + tconv(t.Elem(), FmtShort, mode, depth)
}
}
- return "*" + t.Elem().modeString(mode, depth)
+ return "*" + tmodeString(t.Elem(), mode, depth)
case TARRAY:
- if t.isDDDArray() {
- return "[...]" + t.Elem().modeString(mode, depth)
+ if t.IsDDDArray() {
+ return "[...]" + tmodeString(t.Elem(), mode, depth)
}
- return "[" + strconv.FormatInt(t.NumElem(), 10) + "]" + t.Elem().modeString(mode, depth)
+ return "[" + strconv.FormatInt(t.NumElem(), 10) + "]" + tmodeString(t.Elem(), mode, depth)
case TSLICE:
- return "[]" + t.Elem().modeString(mode, depth)
+ return "[]" + tmodeString(t.Elem(), mode, depth)
case TCHAN:
switch t.ChanDir() {
- case Crecv:
- return "<-chan " + t.Elem().modeString(mode, depth)
+ case types.Crecv:
+ return "<-chan " + tmodeString(t.Elem(), mode, depth)
- case Csend:
- return "chan<- " + t.Elem().modeString(mode, depth)
+ case types.Csend:
+ return "chan<- " + tmodeString(t.Elem(), mode, depth)
}
- if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == Crecv {
- return "chan (" + t.Elem().modeString(mode, depth) + ")"
+ if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv {
+ return "chan (" + tmodeString(t.Elem(), mode, depth) + ")"
}
- return "chan " + t.Elem().modeString(mode, depth)
+ return "chan " + tmodeString(t.Elem(), mode, depth)
case TMAP:
- return "map[" + t.Key().modeString(mode, depth) + "]" + t.Val().modeString(mode, depth)
+ return "map[" + tmodeString(t.Key(), mode, depth) + "]" + tmodeString(t.Val(), mode, depth)
case TINTER:
if t.IsEmptyInterface() {
// Wrong interface definitions may have types lacking a symbol.
break
case exportname(f.Sym.Name):
- buf = append(buf, f.Sym.sconv(FmtShort, mode)...)
+ buf = append(buf, sconv(f.Sym, FmtShort, mode)...)
default:
- buf = append(buf, f.Sym.sconv(FmtUnsigned, mode)...)
+ buf = append(buf, sconv(f.Sym, FmtUnsigned, mode)...)
}
- buf = append(buf, f.Type.tconv(FmtShort, mode, depth)...)
+ buf = append(buf, tconv(f.Type, FmtShort, mode, depth)...)
}
if t.NumFields() != 0 {
buf = append(buf, ' ')
} else {
if t.Recv() != nil {
buf = append(buf, "method"...)
- buf = append(buf, t.Recvs().modeString(mode, depth)...)
+ buf = append(buf, tmodeString(t.Recvs(), mode, depth)...)
buf = append(buf, ' ')
}
buf = append(buf, "func"...)
}
- buf = append(buf, t.Params().modeString(mode, depth)...)
+ buf = append(buf, tmodeString(t.Params(), mode, depth)...)
switch t.Results().NumFields() {
case 0:
case 1:
buf = append(buf, ' ')
- buf = append(buf, t.Results().Field(0).Type.modeString(mode, depth)...) // struct->field->field's type
+ buf = append(buf, tmodeString(t.Results().Field(0).Type, mode, depth)...) // struct->field->field's type
default:
buf = append(buf, ' ')
- buf = append(buf, t.Results().modeString(mode, depth)...)
+ buf = append(buf, tmodeString(t.Results(), mode, depth)...)
}
return string(buf)
// Format the bucket struct for map[x]y as map.bucket[x]y.
// This avoids a recursive print that generates very long names.
if mt.Bucket == t {
- return "map.bucket[" + m.Key().modeString(mode, depth) + "]" + m.Val().modeString(mode, depth)
+ return "map.bucket[" + tmodeString(m.Key(), mode, depth) + "]" + tmodeString(m.Val(), mode, depth)
}
if mt.Hmap == t {
- return "map.hdr[" + m.Key().modeString(mode, depth) + "]" + m.Val().modeString(mode, depth)
+ return "map.hdr[" + tmodeString(m.Key(), mode, depth) + "]" + tmodeString(m.Val(), mode, depth)
}
if mt.Hiter == t {
- return "map.iter[" + m.Key().modeString(mode, depth) + "]" + m.Val().modeString(mode, depth)
+ return "map.iter[" + tmodeString(m.Key(), mode, depth) + "]" + tmodeString(m.Val(), mode, depth)
}
Fatalf("unknown internal map type")
case TFORW:
if t.Sym != nil {
- return "undefined " + t.Sym.modeString(mode)
+ return "undefined " + smodeString(t.Sym, mode)
}
return "undefined"
return
}
if n.Sym != nil {
- fmt.Fprint(s, n.Sym.modeString(mode))
+ fmt.Fprint(s, smodeString(n.Sym, mode))
return
}
}
n.Orig.exprfmt(s, prec, mode)
return
}
- if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != idealbool && n.Type != idealstring {
+ if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != types.Idealbool && n.Type != types.Idealstring {
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
- if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == Crecv) {
+ if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
mode.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
return
} else {
}
fallthrough
case OPACK, ONONAME:
- fmt.Fprint(s, n.Sym.modeString(mode))
+ fmt.Fprint(s, smodeString(n.Sym, mode))
case OTYPE:
if n.Type == nil && n.Sym != nil {
- fmt.Fprint(s, n.Sym.modeString(mode))
+ fmt.Fprint(s, smodeString(n.Sym, mode))
return
}
mode.Fprintf(s, "%v", n.Type)
mode.Fprintf(s, "map[%v]%v", n.Left, n.Right)
case OTCHAN:
- switch ChanDir(n.Etype) {
- case Crecv:
+ switch types.ChanDir(n.Etype) {
+ case types.Crecv:
mode.Fprintf(s, "<-chan %v", n.Left)
- case Csend:
+ case types.Csend:
mode.Fprintf(s, "chan<- %v", n.Left)
default:
- if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && ChanDir(n.Left.Etype) == Crecv {
+ if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && types.ChanDir(n.Left.Etype) == types.Crecv {
mode.Fprintf(s, "chan (%v)", n.Left)
} else {
mode.Fprintf(s, "chan %v", n.Left)
}
// "%S" suppresses qualifying with package
-func (s *Sym) format(f fmt.State, verb rune, mode fmtMode) {
+func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) {
switch verb {
case 'v', 'S':
- fmt.Fprint(f, s.sconv(fmtFlag(f, verb), mode))
+ fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode))
default:
- fmt.Fprintf(f, "%%!%c(*Sym=%p)", verb, s)
+ fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
}
}
-func (s *Sym) String() string { return s.sconv(0, FErr) }
-func (s *Sym) modeString(mode fmtMode) string { return s.sconv(0, mode) }
+func smodeString(s *types.Sym, mode fmtMode) string { return sconv(s, 0, mode) }
// See #16897 before changing the implementation of sconv.
-func (s *Sym) sconv(flag FmtFlag, mode fmtMode) string {
+func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string {
if flag&FmtLong != 0 {
panic("linksymfmt")
}
}
flag, mode = flag.update(mode)
- return s.symfmt(flag, mode)
-}
-
-func (t *Type) String() string {
- // This is an external entry point, so we pass depth 0 to tconv.
- // The implementation of tconv (including typefmt and fldconv)
- // must take care not to use a type in a formatting string
- // to avoid resetting the recursion counter.
- return t.tconv(0, FErr, 0)
-}
-
-func (t *Type) modeString(mode fmtMode, depth int) string {
- return t.tconv(0, mode, depth)
-}
-
-// ShortString generates a short description of t.
-// It is used in autogenerated method names, reflection,
-// and itab names.
-func (t *Type) ShortString() string {
- return t.tconv(FmtLeft, FErr, 0)
+ return symfmt(s, flag, mode)
}
-// LongString generates a complete description of t.
-// It is useful for reflection,
-// or when a unique fingerprint or hash of a type is required.
-func (t *Type) LongString() string {
- return t.tconv(FmtLeft|FmtUnsigned, FErr, 0)
+func tmodeString(t *types.Type, mode fmtMode, depth int) string {
+ return tconv(t, 0, mode, depth)
}
-func fldconv(f *Field, flag FmtFlag, mode fmtMode, depth int) string {
+func fldconv(f *types.Field, flag FmtFlag, mode fmtMode, depth int) string {
if f == nil {
return "<T>"
}
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
// ~r%d is a (formerly) unnamed result.
- if mode == FErr && f.Nname != nil {
- if f.Nname.Orig != nil {
- s = f.Nname.Orig.Sym
+ if mode == FErr && asNode(f.Nname) != nil {
+ if asNode(f.Nname).Orig != nil {
+ s = asNode(f.Nname).Orig.Sym
if s != nil && s.Name[0] == '~' {
if s.Name[1] == 'r' { // originally an unnamed result
s = nil
}
if s != nil && f.Embedded == 0 {
- if f.Funarg != FunargNone {
- name = f.Nname.modeString(mode)
+ if f.Funarg != types.FunargNone {
+ name = asNode(f.Nname).modeString(mode)
} else if flag&FmtLong != 0 {
name = mode.Sprintf("%0S", s)
if !exportname(name) && flag&FmtUnsigned == 0 {
- name = s.modeString(mode) // qualify non-exported names (used on structs, not on funarg)
+ name = smodeString(s, mode) // qualify non-exported names (used on structs, not on funarg)
}
} else {
- name = s.modeString(mode)
+ name = smodeString(s, mode)
}
}
}
var typ string
if f.Isddd() {
- typ = "..." + f.Type.Elem().modeString(mode, depth)
+ typ = "..." + tmodeString(f.Type.Elem(), mode, depth)
} else {
- typ = f.Type.modeString(mode, depth)
+ typ = tmodeString(f.Type, mode, depth)
}
str := typ
str = name + " " + typ
}
- if flag&FmtShort == 0 && f.Funarg == FunargNone && f.Note != "" {
+ if flag&FmtShort == 0 && f.Funarg == types.FunargNone && f.Note != "" {
str += " " + strconv.Quote(f.Note)
}
// "%L" print definition, not name
// "%S" omit 'func' and receiver from function types, short type names
-func (t *Type) format(s fmt.State, verb rune, mode fmtMode) {
+func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) {
switch verb {
case 'v', 'S', 'L':
// This is an external entry point, so we pass depth 0 to tconv.
// See comments in Type.String.
- fmt.Fprint(s, t.tconv(fmtFlag(s, verb), mode, 0))
+ fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode, 0))
default:
fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
}
// See #16897 before changing the implementation of tconv.
-func (t *Type) tconv(flag FmtFlag, mode fmtMode, depth int) string {
+func tconv(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
if t == nil {
return "<T>"
}
flag |= FmtUnsigned
}
- str := t.typefmt(flag, mode, depth+1)
+ str := typefmt(t, flag, mode, depth+1)
return str
}
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
- heapaddr := temp(typPtr(n.Type))
+ heapaddr := temp(types.NewPtr(n.Type))
heapaddr.Sym = lookup("&" + n.Sym.Name)
heapaddr.Orig.Sym = heapaddr.Sym
// Start with a buffer big enough to hold a large n.
b := []byte(prefix + " ")[:len(prefix)]
b = strconv.AppendInt(b, int64(n), 10)
- return internString(b)
+ _ = b
+ return types.InternString(b)
}
// make a new Node off the books
-func tempnamel(pos src.XPos, curfn *Node, nn *Node, t *Type) {
+func tempnamel(pos src.XPos, curfn *Node, nn *Node, t *types.Type) {
if curfn == nil {
Fatalf("no curfn for tempname")
}
Fatalf("tempname called with nil type")
}
- s := &Sym{
+ s := &types.Sym{
Name: autotmpname(len(curfn.Func.Dcl)),
Pkg: localpkg,
}
n := newnamel(pos, s)
- s.Def = n
+ s.Def = asTypesNode(n)
n.Type = t
n.Class = PAUTO
n.Esc = EscNever
*nn = *n
}
-func temp(t *Type) *Node {
+func temp(t *types.Type) *Node {
var n Node
tempnamel(lineno, Curfn, &n, t)
- n.Sym.Def.SetUsed(true)
+ asNode(n.Sym.Def).SetUsed(true)
return n.Orig
}
-func tempAt(pos src.XPos, curfn *Node, t *Type) *Node {
+func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
var n Node
tempnamel(pos, curfn, &n, t)
- n.Sym.Def.SetUsed(true)
+ asNode(n.Sym.Def).SetUsed(true)
return n.Orig
}
import (
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/obj"
"cmd/internal/src"
)
const (
- BADWIDTH = -1000000000
+ BADWIDTH = types.BADWIDTH
MaxStackVarSize = 10 * 1024 * 1024
)
-type Pkg struct {
- Name string // package name, e.g. "sys"
- Path string // string literal used in import statement, e.g. "runtime/internal/sys"
- Pathsym *obj.LSym
- Prefix string // escaped path for use in symbol table
- Imported bool // export data of this package was parsed
- Direct bool // imported directly
- Syms map[string]*Sym
-}
-
-// isRuntime reports whether p is package runtime.
-func (p *Pkg) isRuntime() bool {
+// isRuntimePkg reports whether p is package runtime.
+func isRuntimePkg(p *types.Pkg) bool {
if compiling_runtime && p == localpkg {
return true
}
return p.Path == "runtime"
}
-// Sym represents an object name. Most commonly, this is a Go identifier naming
-// an object declared within a package, but Syms are also used to name internal
-// synthesized objects.
-//
-// As an exception, field and method names that are exported use the Sym
-// associated with localpkg instead of the package that declared them. This
-// allows using Sym pointer equality to test for Go identifier uniqueness when
-// handling selector expressions.
-type Sym struct {
- Link *Sym
- Importdef *Pkg // where imported definition was found
- Linkname string // link name
-
- // saved and restored by dcopy
- Pkg *Pkg
- Name string // object name
- Def *Node // definition: ONAME OTYPE OPACK or OLITERAL
- Lastlineno src.XPos // last declaration for diagnostic
- Block int32 // blocknumber to catch redeclaration
-
- flags bitset8
- Label *Node // corresponding label (ephemeral)
- Origpkg *Pkg // original package for . import
- Lsym *obj.LSym
-}
-
-const (
- symExport = 1 << iota // added to exportlist (no need to add again)
- symPackage
- symExported // already written out by export
- symUniq
- symSiggen
- symAsm
- symAlgGen
-)
-
-func (sym *Sym) Export() bool { return sym.flags&symExport != 0 }
-func (sym *Sym) Package() bool { return sym.flags&symPackage != 0 }
-func (sym *Sym) Exported() bool { return sym.flags&symExported != 0 }
-func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 }
-func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 }
-func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 }
-func (sym *Sym) AlgGen() bool { return sym.flags&symAlgGen != 0 }
-
-func (sym *Sym) SetExport(b bool) { sym.flags.set(symExport, b) }
-func (sym *Sym) SetPackage(b bool) { sym.flags.set(symPackage, b) }
-func (sym *Sym) SetExported(b bool) { sym.flags.set(symExported, b) }
-func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) }
-func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) }
-func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) }
-func (sym *Sym) SetAlgGen(b bool) { sym.flags.set(symAlgGen, b) }
-
-func (sym *Sym) isAlias() bool {
- return sym.Def != nil && sym.Def.Sym != sym
-}
-
// The Class of a variable/function describes the "storage class"
// of a variable or function. During parsing, storage classes are
// called declaration contexts.
var Debug_checknil int
var Debug_typeassert int
-var localpkg *Pkg // package being compiled
+var localpkg *types.Pkg // package being compiled
var inimport bool // set during import
-var itabpkg *Pkg // fake pkg for itab entries
+var itabpkg *types.Pkg // fake pkg for itab entries
-var itablinkpkg *Pkg // fake package for runtime itab entries
+var itablinkpkg *types.Pkg // fake package for runtime itab entries
-var Runtimepkg *Pkg // fake package runtime
+var Runtimepkg *types.Pkg // fake package runtime
-var racepkg *Pkg // package runtime/race
+var racepkg *types.Pkg // package runtime/race
-var msanpkg *Pkg // package runtime/msan
+var msanpkg *types.Pkg // package runtime/msan
-var typepkg *Pkg // fake package for runtime type info (headers)
+var typepkg *types.Pkg // fake package for runtime type info (headers)
-var unsafepkg *Pkg // package unsafe
+var unsafepkg *types.Pkg // package unsafe
-var trackpkg *Pkg // fake package for field tracking
+var trackpkg *types.Pkg // fake package for field tracking
-var mappkg *Pkg // fake package for map zero value
+var mappkg *types.Pkg // fake package for map zero value
var zerosize int64
-var Tptr EType // either TPTR32 or TPTR64
-
var myimportpath string
var localimport string
var asmhdr string
-var simtype [NTYPE]EType
+var simtype [NTYPE]types.EType
var (
isforw [NTYPE]bool
var importlist []*Node // imported functions and methods with inlinable bodies
-var funcsyms []*Sym
+var funcsyms []*types.Sym
var dclcontext Class // PEXTERN/PAUTO
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
)
if nam.Name.Readonly() {
flags = obj.RODATA
}
- if nam.Type != nil && !haspointers(nam.Type) {
+ if nam.Type != nil && !types.Haspointers(nam.Type) {
flags |= obj.NOPTR
}
Ctxt.Globl(s, nam.Type.Width, flags)
}
-func ggloblsym(s *Sym, width int32, flags int16) {
+func ggloblsym(s *types.Sym, width int32, flags int16) {
ggloblLSym(Linksym(s), width, flags)
}
Ctxt.Globl(s, int64(width), int(flags))
}
-func isfat(t *Type) bool {
+func isfat(t *types.Type) bool {
if t != nil {
switch t.Etype {
case TSTRUCT, TARRAY, TSLICE, TSTRING,
}
// nodarg returns a Node for the function argument denoted by t,
-// which is either the entire function argument or result struct (t is a struct *Type)
-// or a specific argument (t is a *Field within a struct *Type).
+// which is either the entire function argument or result struct (t is a struct *types.Type)
+// or a specific argument (t is a *types.Field within a struct *types.Type).
//
// If fp is 0, the node is for use by a caller invoking the given
// function, preparing the arguments before the call
func nodarg(t interface{}, fp int) *Node {
var n *Node
- var funarg Funarg
+ var funarg types.Funarg
switch t := t.(type) {
default:
Fatalf("bad nodarg %T(%v)", t, t)
- case *Type:
+ case *types.Type:
// Entire argument struct, not just one arg
if !t.IsFuncArgStruct() {
Fatalf("nodarg: bad type %v", t)
}
n.Xoffset = first.Offset
- case *Field:
+ case *types.Field:
funarg = t.Funarg
if fp == 1 {
// NOTE(rsc): This should be using t.Nname directly,
// toward time for the Go 1.7 beta).
// At some quieter time (assuming we've never seen these Fatalfs happen)
// we could change this code to use "expect" directly.
- expect := t.Nname
+ expect := asNode(t.Nname)
if expect.isParamHeapCopy() {
expect = expect.Name.Param.Stackcopy
}
for _, n := range Curfn.Func.Dcl {
if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
if n != expect {
- Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, t.Nname, t.Nname, t.Nname.Op)
+ Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, asNode(t.Nname), asNode(t.Nname), asNode(t.Nname).Op)
}
return n
}
Fatalf("nodarg: offset not computed for %v", t)
}
n.Xoffset = t.Offset
- n.Orig = t.Nname
+ n.Orig = asNode(t.Nname)
}
// Rewrite argument named _ to __,
case 1: // reading arguments inside call
n.Class = PPARAM
- if funarg == FunargResults {
+ if funarg == types.FunargResults {
n.Class = PPARAMOUT
}
}
package gc
+import "cmd/compile/internal/types"
+
// a function named init is a special case.
// it is called by the initialization before
// main is run. to make it unique within a
var renameinit_initgen int
-func renameinit() *Sym {
+func renameinit() *types.Sym {
renameinit_initgen++
return lookupN("init.", renameinit_initgen)
}
}
// are there any imported init functions
- for _, s := range initSyms {
+ for _, s := range types.InitSyms {
if s.Def != nil {
return true
}
// (1)
gatevar := newname(lookup("initdone·"))
- addvar(gatevar, Types[TUINT8], PEXTERN)
+ addvar(gatevar, types.Types[TUINT8], PEXTERN)
// (2)
fn := nod(ODCLFUNC, nil, nil)
r = append(r, a)
// (6)
- for _, s := range initSyms {
+ for _, s := range types.InitSyms {
if s.Def != nil && s != initsym {
// could check that it is fn of no args/returns
- a = nod(OCALL, s.Def, nil)
+ a = nod(OCALL, asNode(s.Def), nil)
r = append(r, a)
}
}
if s.Def == nil {
break
}
- a = nod(OCALL, s.Def, nil)
+ a = nod(OCALL, asNode(s.Def), nil)
r = append(r, a)
}
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
)
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *Node) *Pkg {
+func fnpkg(fn *Node) *types.Pkg {
if fn.IsMethod() {
// method
rcvr := fn.Type.Recv().Type
// hack, TODO, check for better way to link method nodes back to the thing with the ->inl
// this is so export can find the body of a method
- fn.Type.SetNname(n)
+ fn.Type.FuncType().Nname = asTypesNode(n)
if Debug['m'] > 1 {
fmt.Printf("%v: can inline %#v as: %#v { %#v }\n", fn.Line(), n, fn.Type, n.Func.Inl)
}
if n.isMethodCalledAsFunction() {
- if d := n.Left.Sym.Def; d != nil && d.Func.Inl.Len() != 0 {
+ if d := asNode(n.Left.Sym.Def); d != nil && d.Func.Inl.Len() != 0 {
*budget -= d.Func.InlCost
break
}
if t.Nname() == nil {
Fatalf("no function definition for [%p] %+v\n", t, t)
}
- if inlfn := t.Nname().Func; inlfn.Inl.Len() != 0 {
+ if inlfn := asNode(t.FuncType().Nname).Func; inlfn.Inl.Len() != 0 {
*budget -= inlfn.InlCost
break
}
}
if n.Left.Func != nil && n.Left.Func.Inl.Len() != 0 && !isIntrinsicCall(n) { // normal case
n = mkinlcall(n, n.Left, n.Isddd())
- } else if n.isMethodCalledAsFunction() && n.Left.Sym.Def != nil {
- n = mkinlcall(n, n.Left.Sym.Def, n.Isddd())
+ } else if n.isMethodCalledAsFunction() && asNode(n.Left.Sym.Def) != nil {
+ n = mkinlcall(n, asNode(n.Left.Sym.Def), n.Isddd())
}
case OCALLMETH:
Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type)
}
- n = mkinlcall(n, n.Left.Type.Nname(), n.Isddd())
+ n = mkinlcall(n, asNode(n.Left.Type.FuncType().Nname), n.Isddd())
}
lineno = lno
return n
}
-func tinlvar(t *Field, inlvars map[*Node]*Node) *Node {
- if t.Nname != nil && !isblank(t.Nname) {
- inlvar := inlvars[t.Nname]
+func tinlvar(t *types.Field, inlvars map[*Node]*Node) *Node {
+ if asNode(t.Nname) != nil && !isblank(asNode(t.Nname)) {
+ inlvar := inlvars[asNode(t.Nname)]
if inlvar == nil {
- Fatalf("missing inlvar for %v\n", t.Nname)
+ Fatalf("missing inlvar for %v\n", asNode(t.Nname))
}
return inlvar
}
// temporaries for return values.
var m *Node
for _, t := range fn.Type.Results().Fields().Slice() {
- if t != nil && t.Nname != nil && !isblank(t.Nname) {
- m = inlvar(t.Nname)
+ if t != nil && asNode(t.Nname) != nil && !isblank(asNode(t.Nname)) {
+ m = inlvar(asNode(t.Nname))
m = typecheck(m, Erv)
- inlvars[t.Nname] = m
+ inlvars[asNode(t.Nname)] = m
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
// method call with a receiver.
t := fn.Type.Recv()
- if t != nil && t.Nname != nil && !isblank(t.Nname) && inlvars[t.Nname] == nil {
- Fatalf("missing inlvar for %v\n", t.Nname)
+ if t != nil && t.Nname != nil && !isblank(asNode(t.Nname)) && inlvars[asNode(t.Nname)] == nil {
+ Fatalf("missing inlvar for %v\n", asNode(t.Nname))
}
if n.Left.Left == nil {
Fatalf("method call without receiver: %+v", n)
// check if inlined function is variadic.
variadic := false
- var varargtype *Type
+ var varargtype *types.Type
varargcount := 0
for _, t := range fn.Type.Params().Fields().Slice() {
if t.Isddd() {
// append receiver inlvar to LHS.
t := fn.Type.Recv()
- if t != nil && t.Nname != nil && !isblank(t.Nname) && inlvars[t.Nname] == nil {
- Fatalf("missing inlvar for %v\n", t.Nname)
+ if t != nil && t.Nname != nil && !isblank(asNode(t.Nname)) && inlvars[asNode(t.Nname)] == nil {
+ Fatalf("missing inlvar for %v\n", asNode(t.Nname))
}
if t == nil {
Fatalf("method call unknown receiver type: %+v", n)
}
} else {
// match arguments except final variadic (unless the call is dotted itself)
- t, it := iterFields(fn.Type.Params())
+ t, it := types.IterFields(fn.Type.Params())
for t != nil {
if li >= n.List.Len() {
break
as.Right = nodnil()
as.Right.Type = varargtype
} else {
- varslicetype := typSlice(varargtype.Elem())
+ varslicetype := types.NewSlice(varargtype.Elem())
as.Right = nod(OCOMPLIT, nil, typenod(varslicetype))
as.Right.List.Set(varargs)
}
}
// Synthesize a variable to store the inlined function's results in.
-func retvar(t *Field, i int) *Node {
+func retvar(t *types.Field, i int) *Node {
n := newname(lookupN("~r", i))
n.Type = t.Type
n.Class = PAUTO
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
-func argvar(t *Type, i int) *Node {
+func argvar(t *types.Type, i int) *Node {
n := newname(lookupN("~arg", i))
n.Type = t.Elem()
n.Class = PAUTO
return 0
}
-var internedStrings = map[string]string{}
-
-func internString(b []byte) string {
- s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
- if !ok {
- s = string(b)
- internedStrings[s] = s
- }
- return s
-}
-
// pragcgo is called concurrently if files are parsed concurrently.
func (p *noder) pragcgo(pos src.Pos, text string) string {
f := pragmaFields(text)
"bufio"
"bytes"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"cmd/internal/sys"
Widthptr = thearch.LinkArch.PtrSize
Widthreg = thearch.LinkArch.RegSize
+ // initialize types package
+ // (we need to do this to break dependencies that otherwise
+ // would lead to import cycles)
+ types.Widthptr = Widthptr
+ types.Dowidth = dowidth
+ types.Fatalf = Fatalf
+ types.Sconv = func(s *types.Sym, flag, mode int) string {
+ return sconv(s, FmtFlag(flag), fmtMode(mode))
+ }
+ types.Tconv = func(t *types.Type, flag, mode, depth int) string {
+ return tconv(t, FmtFlag(flag), fmtMode(mode), depth)
+ }
+ types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
+ symFormat(sym, s, verb, fmtMode(mode))
+ }
+ types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
+ typeFormat(t, s, verb, fmtMode(mode))
+ }
+ types.Cmptyp = cmptyp
+ types.FieldName = func(f *types.Field) string {
+ return f.Sym.Name
+ }
+ types.TypeLinkSym = func(t *types.Type) *obj.LSym {
+ return Linksym(typenamesym(t))
+ }
+ types.FmtLeft = int(FmtLeft)
+ types.FmtUnsigned = int(FmtUnsigned)
+ types.FErr = FErr
+
initUniverse()
blockgen = 1
inimport = false
}
-func importfile(f *Val) *Pkg {
+func importfile(f *Val) *types.Pkg {
path_, ok := f.U.(string)
if !ok {
yyerror("import path must be a string")
func clearImports() {
for _, s := range localpkg.Syms {
- if s.Def == nil {
+ if asNode(s.Def) == nil {
continue
}
- if s.Def.Op == OPACK {
+ if asNode(s.Def).Op == OPACK {
// throw away top-level package name leftover
// from previous file.
// leave s->block set to cause redeclaration
// errors if a conflicting top-level name is
// introduced by a different file.
- if !s.Def.Used() && nsyntaxerrors == 0 {
- pkgnotused(s.Def.Pos, s.Def.Name.Pkg.Path, s.Name)
+ if !asNode(s.Def).Used() && nsyntaxerrors == 0 {
+ pkgnotused(asNode(s.Def).Pos, asNode(s.Def).Name.Pkg.Path, s.Name)
}
s.Def = nil
continue
}
- if s.isAlias() {
+ if IsAlias(s) {
// throw away top-level name left over
// from previous import . "x"
- if s.Def.Name != nil && s.Def.Name.Pack != nil && !s.Def.Name.Pack.Used() && nsyntaxerrors == 0 {
- pkgnotused(s.Def.Name.Pack.Pos, s.Def.Name.Pack.Name.Pkg.Path, "")
- s.Def.Name.Pack.SetUsed(true)
+ if asNode(s.Def).Name != nil && asNode(s.Def).Name.Pack != nil && !asNode(s.Def).Name.Pack.Used() && nsyntaxerrors == 0 {
+ pkgnotused(asNode(s.Def).Name.Pack.Pos, asNode(s.Def).Name.Pack.Name.Pkg.Path, "")
+ asNode(s.Def).Name.Pack.SetUsed(true)
}
s.Def = nil
}
}
}
+
+func IsAlias(sym *types.Sym) bool {
+ return sym.Def != nil && asNode(sym.Def).Sym != sym
+}
fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.")
fmt.Fprintln(&b)
fmt.Fprintln(&b, "package gc")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, `import "cmd/compile/internal/types"`)
mkbuiltin(&b, "runtime")
fmt.Fprintln(w, "}")
fmt.Fprintln(w)
- fmt.Fprintf(w, "func %sTypes() []*Type {\n", name)
- fmt.Fprintf(w, "var typs [%d]*Type\n", len(interner.typs))
+ fmt.Fprintf(w, "func %sTypes() []*types.Type {\n", name)
+ fmt.Fprintf(w, "var typs [%d]*types.Type\n", len(interner.typs))
for i, typ := range interner.typs {
fmt.Fprintf(w, "typs[%d] = %s\n", i, typ)
}
case *ast.Ident:
switch t.Name {
case "byte":
- return "bytetype"
+ return "types.Bytetype"
case "rune":
- return "runetype"
+ return "types.Runetype"
}
- return fmt.Sprintf("Types[T%s]", strings.ToUpper(t.Name))
+ return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name))
case *ast.SelectorExpr:
if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
log.Fatalf("unhandled type: %#v", t)
}
- return "Types[TUNSAFEPTR]"
+ return "types.Types[TUNSAFEPTR]"
case *ast.ArrayType:
if t.Len == nil {
- return fmt.Sprintf("typSlice(%s)", i.subtype(t.Elt))
+ return fmt.Sprintf("types.NewSlice(%s)", i.subtype(t.Elt))
}
- return fmt.Sprintf("typArray(%s, %d)", i.subtype(t.Elt), intconst(t.Len))
+ return fmt.Sprintf("types.NewArray(%s, %d)", i.subtype(t.Elt), intconst(t.Len))
case *ast.ChanType:
- dir := "Cboth"
+ dir := "types.Cboth"
switch t.Dir {
case ast.SEND:
- dir = "Csend"
+ dir = "types.Csend"
case ast.RECV:
- dir = "Crecv"
+ dir = "types.Crecv"
}
- return fmt.Sprintf("typChan(%s, %s)", i.subtype(t.Value), dir)
+ return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir)
case *ast.FuncType:
return fmt.Sprintf("functype(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
case *ast.InterfaceType:
if len(t.Methods.List) != 0 {
log.Fatal("non-empty interfaces unsupported")
}
- return "Types[TINTER]"
+ return "types.Types[TINTER]"
case *ast.MapType:
- return fmt.Sprintf("typMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
+ return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
case *ast.StarExpr:
- return fmt.Sprintf("typPtr(%s)", i.subtype(t.X))
+ return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X))
case *ast.StructType:
return fmt.Sprintf("tostruct(%s)", i.fields(t.Fields, true))
"unicode/utf8"
"cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
)
ipkg.Direct = true
- var my *Sym
+ var my *types.Sym
if imp.LocalPkgName != nil {
my = p.name(imp.LocalPkgName)
} else {
lineno = pack.Pos
redeclare(my, "as imported package name")
}
- my.Def = pack
+ my.Def = asTypesNode(pack)
my.Lastlineno = pack.Pos
my.Block = 1 // at top level
}
return p.nod(expr, OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value))
case *syntax.ChanType:
n := p.nod(expr, OTCHAN, p.typeExpr(expr.Elem), nil)
- n.Etype = EType(p.chanDir(expr.Dir))
+ n.Etype = types.EType(p.chanDir(expr.Dir))
return n
case *syntax.TypeSwitchGuard:
return nil
}
-func (p *noder) chanDir(dir syntax.ChanDir) ChanDir {
+func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
switch dir {
case 0:
- return Cboth
+ return types.Cboth
case syntax.SendOnly:
- return Csend
+ return types.Csend
case syntax.RecvOnly:
- return Crecv
+ return types.Crecv
}
panic("unhandled ChanDir")
}
return n
}
-func (p *noder) packname(expr syntax.Expr) *Sym {
+func (p *noder) packname(expr syntax.Expr) *types.Sym {
switch expr := expr.(type) {
case *syntax.Name:
name := p.name(expr)
return name
case *syntax.SelectorExpr:
name := p.name(expr.X.(*syntax.Name))
- var pkg *Pkg
- if name.Def == nil || name.Def.Op != OPACK {
+ var pkg *types.Pkg
+ if asNode(name.Def) == nil || asNode(name.Def).Op != OPACK {
yyerror("%v is not a package", name)
pkg = localpkg
} else {
- name.Def.SetUsed(true)
- pkg = name.Def.Name.Pkg
+ asNode(name.Def).SetUsed(true)
+ pkg = asNode(name.Def).Name.Pkg
}
return restrictlookup(expr.Sel.Value, pkg)
}
if stmt.Op != 0 && stmt.Op != syntax.Def {
n := p.nod(stmt, OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
- n.Etype = EType(p.binOp(stmt.Op))
+ n.Etype = types.EType(p.binOp(stmt.Op))
return n
}
if ln.Class != PPARAMOUT {
break
}
- if ln.Sym.Def != ln {
+ if asNode(ln.Sym.Def) != ln {
yyerror("%s is shadowed during return", ln.Sym.Name)
}
}
}
}
-func (p *noder) name(name *syntax.Name) *Sym {
+func (p *noder) name(name *syntax.Name) *types.Sym {
return lookup(name.Value)
}
return 0
}
-func mkname(sym *Sym) *Node {
+func mkname(sym *types.Sym) *Node {
n := oldname(sym)
if n.Name != nil && n.Name.Pack != nil {
n.Name.Pack.SetUsed(true)
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/obj"
"crypto/sha256"
}
for _, exportn := range exportlist {
s := exportn.Sym
- n := s.Def
+ n := asNode(s.Def)
if n == nil {
continue
}
}
if n.Type.Etype == TFUNC && n.Class == PFUNC {
// function
- ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type})
+ ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
} else {
// variable
- ptabs = append(ptabs, ptabEntry{s: s, t: typPtr(s.Def.Type)})
+ ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
}
}
}
}
for _, s := range funcsyms {
- sf := s.Pkg.Lookup(s.funcsymname())
+ sf := s.Pkg.Lookup(funcsymname(s))
dsymptr(sf, 0, s, 0)
ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
funcsyms = nil
}
-func Linksym(s *Sym) *obj.LSym {
+func Linksym(s *types.Sym) *obj.LSym {
if s == nil {
return nil
}
return ls
}
-func duintxx(s *Sym, off int, v uint64, wid int) int {
+func duintxx(s *types.Sym, off int, v uint64, wid int) int {
return duintxxLSym(Linksym(s), off, v, wid)
}
return int(obj.Setuintxx(Ctxt, s, int64(off), v, int64(wid)))
}
-func duint8(s *Sym, off int, v uint8) int {
+func duint8(s *types.Sym, off int, v uint8) int {
return duintxx(s, off, uint64(v), 1)
}
-func duint16(s *Sym, off int, v uint16) int {
+func duint16(s *types.Sym, off int, v uint16) int {
return duintxx(s, off, uint64(v), 2)
}
-func duint32(s *Sym, off int, v uint32) int {
+func duint32(s *types.Sym, off int, v uint32) int {
return duintxx(s, off, uint64(v), 4)
}
-func duintptr(s *Sym, off int, v uint64) int {
+func duintptr(s *types.Sym, off int, v uint64) int {
return duintxx(s, off, v, Widthptr)
}
-func dbvec(s *Sym, off int, bv bvec) int {
+func dbvec(s *types.Sym, off int, bv bvec) int {
// Runtime reads the bitmaps as byte arrays. Oblige.
for j := 0; int32(j) < bv.n; j += 8 {
word := bv.b[j/32]
slicebytes_gen++
symname := fmt.Sprintf(".gobytes.%d", slicebytes_gen)
sym := localpkg.Lookup(symname)
- sym.Def = newname(sym)
+ sym.Def = asTypesNode(newname(sym))
off := dsname(sym, 0, s)
ggloblsym(sym, int32(off), obj.NOPTR|obj.LOCAL)
duintxx(nam.Sym, off, uint64(len), Widthint)
}
-func dsname(s *Sym, off int, t string) int {
+func dsname(s *types.Sym, off int, t string) int {
return dsnameLSym(Linksym(s), off, t)
}
return off + len(t)
}
-func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
+func dsymptr(s *types.Sym, off int, x *types.Sym, xoff int) int {
return dsymptrLSym(Linksym(s), off, Linksym(x), xoff)
}
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
)
// Ordertemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, ordertemp emits code to zero the temporary.
-func ordertemp(t *Type, order *Order, clear bool) *Node {
+func ordertemp(t *types.Type, order *Order, clear bool) *Node {
var_ := temp(t)
if clear {
a := nod(OAS, var_, nil)
// (The other candidate would be map access, but map access
// returns a pointer to the result data instead of taking a pointer
// to be filled in.)
-func ordercopyexpr(n *Node, t *Type, order *Order, clear int) *Node {
+func ordercopyexpr(n *Node, t *types.Type, order *Order, clear int) *Node {
var_ := ordertemp(t, order, clear != 0)
a := nod(OAS, var_, n)
a = typecheck(a, Etop)
// ordermapkeytemp prepares n to be a key in a map runtime call and returns n.
// It should only be used for map runtime calls which have *_fast* versions.
-func ordermapkeytemp(t *Type, n *Node, order *Order) *Node {
+func ordermapkeytemp(t *types.Type, n *Node, order *Order) *Node {
// Most map calls need to take the address of the key.
// Exception: map*_fast* calls. See golang.org/issue/19015.
if mapfast(t) == mapslow {
orderexprlist(n.List, order)
n.Rlist.First().Left = orderexpr(n.Rlist.First().Left, order, nil) // arg to recv
ch := n.Rlist.First().Left.Type
- tmp1 := ordertemp(ch.Elem(), order, haspointers(ch.Elem()))
- tmp2 := ordertemp(Types[TBOOL], order, false)
+ tmp1 := ordertemp(ch.Elem(), order, types.Haspointers(ch.Elem()))
+ tmp2 := ordertemp(types.Types[TBOOL], order, false)
order.out = append(order.out, n)
r := nod(OAS, n.List.First(), tmp1)
r = typecheck(r, Etop)
// make copy.
r := n.Right
- if r.Type.IsString() && r.Type != Types[TSTRING] {
+ if r.Type.IsString() && r.Type != types.Types[TSTRING] {
r = nod(OCONV, r, nil)
- r.Type = Types[TSTRING]
+ r.Type = types.Types[TSTRING]
r = typecheck(r, Erv)
}
n.Right = ordercopyexpr(r, r.Type, order, 0)
// n->alloc is the temp for the iterator.
- prealloc[n] = ordertemp(Types[TUINT8], order, true)
+ prealloc[n] = ordertemp(types.Types[TUINT8], order, true)
}
for i := range n.List.Slice() {
n.List.SetIndex(i, orderexprinplace(n.List.Index(i), order))
n2.Ninit.Append(tmp2)
}
- r.Left = ordertemp(r.Right.Left.Type.Elem(), order, haspointers(r.Right.Left.Type.Elem()))
+ r.Left = ordertemp(r.Right.Left.Type.Elem(), order, types.Haspointers(r.Right.Left.Type.Elem()))
tmp2 = nod(OAS, tmp1, r.Left)
tmp2 = typecheck(tmp2, Etop)
n2.Ninit.Append(tmp2)
n2.Ninit.Append(tmp2)
}
- r.List.Set1(ordertemp(Types[TBOOL], order, false))
+ r.List.Set1(ordertemp(types.Types[TBOOL], order, false))
tmp2 = okas(tmp1, r.List.First())
tmp2 = typecheck(tmp2, Etop)
n2.Ninit.Append(tmp2)
orderexprlist(n.List, order)
if n.List.Len() > 5 {
- t := typArray(Types[TSTRING], int64(n.List.Len()))
+ t := types.NewArray(types.Types[TSTRING], int64(n.List.Len()))
prealloc[n] = ordertemp(t, order, false)
}
case OCLOSURE:
if n.Noescape() && n.Func.Cvars.Len() > 0 {
- prealloc[n] = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
+ prealloc[n] = ordertemp(types.Types[TUINT8], order, false) // walk will fill in correct type
}
case OARRAYLIT, OSLICELIT, OCALLPART:
orderexprlist(n.List, order)
orderexprlist(n.Rlist, order)
if n.Noescape() {
- prealloc[n] = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
+ prealloc[n] = ordertemp(types.Types[TUINT8], order, false) // walk will fill in correct type
}
case ODDDARG:
left := []*Node{}
for _, l := range n.List.Slice() {
if !isblank(l) {
- tmp := ordertemp(l.Type, order, haspointers(l.Type))
+ tmp := ordertemp(l.Type, order, types.Haspointers(l.Type))
tmplist = append(tmplist, tmp)
left = append(left, l)
}
var tmp1, tmp2 *Node
if !isblank(n.List.First()) {
typ := n.Rlist.First().Type
- tmp1 = ordertemp(typ, order, haspointers(typ))
+ tmp1 = ordertemp(typ, order, types.Haspointers(typ))
}
if !isblank(n.List.Second()) {
- tmp2 = ordertemp(Types[TBOOL], order, false)
+ tmp2 = ordertemp(types.Types[TBOOL], order, false)
}
order.out = append(order.out, n)
import (
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
// "Portable" code generation.
-func makefuncdatasym(pp *Progs, nameprefix string, funcdatakind int64, curfn *Node) *Sym {
+func makefuncdatasym(pp *Progs, nameprefix string, funcdatakind int64, curfn *Node) *types.Sym {
// This symbol requires a unique, reproducible name;
// unique to avoid duplicate symbols,
// and reproducible for reproducible builds and toolstash.
return a.Used()
}
- ap := haspointers(a.Type)
- bp := haspointers(b.Type)
+ ap := types.Haspointers(a.Type)
+ bp := types.Haspointers(b.Type)
if ap != bp {
return ap
}
}
if f.Config.NeedsFpScratch && scratchUsed {
- s.scratchFpMem = tempAt(src.NoXPos, s.curfn, Types[TUINT64])
+ s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
}
s.stksize += w
s.stksize = Rnd(s.stksize, int64(n.Type.Align))
- if haspointers(n.Type) {
+ if types.Haspointers(n.Type) {
s.stkptrsize = s.stksize
}
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
// struct fields that it used.
-func fieldtrack(fnsym *obj.LSym, tracked map[*Sym]struct{}) {
+func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
if fnsym == nil {
return
}
return
}
- trackSyms := make([]*Sym, 0, len(tracked))
+ trackSyms := make([]*types.Sym, 0, len(tracked))
for sym := range tracked {
trackSyms = append(trackSyms, sym)
}
}
}
-type symByName []*Sym
+type symByName []*types.Sym
func (a symByName) Len() int { return len(a) }
func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
package gc
import (
+ "cmd/compile/internal/types"
"reflect"
"sort"
"testing"
)
-func typeWithoutPointers() *Type {
- t := typ(TSTRUCT)
- f := &Field{Type: typ(TINT)}
- t.SetFields([]*Field{f})
+func typeWithoutPointers() *types.Type {
+ t := types.New(TSTRUCT)
+ f := &types.Field{Type: types.New(TINT)}
+ t.SetFields([]*types.Field{f})
return t
}
-func typeWithPointers() *Type {
- t := typ(TSTRUCT)
- f := &Field{Type: typ(TPTR64)}
- t.SetFields([]*Field{f})
+func typeWithPointers() *types.Type {
+ t := types.New(TSTRUCT)
+ f := &types.Field{Type: types.New(TPTR64)}
+ t.SetFields([]*types.Field{f})
return t
}
true,
},
{
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{flags: nameNeedzero}},
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{flags: nameNeedzero}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}},
true,
},
{
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}},
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{flags: nameNeedzero}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{flags: nameNeedzero}},
false,
},
{
- Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
- Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
+ Node{Class: PAUTO, Type: &types.Type{Width: 1}, Name: &Name{}},
+ Node{Class: PAUTO, Type: &types.Type{Width: 2}, Name: &Name{}},
false,
},
{
- Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
- Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
+ Node{Class: PAUTO, Type: &types.Type{Width: 2}, Name: &Name{}},
+ Node{Class: PAUTO, Type: &types.Type{Width: 1}, Name: &Name{}},
true,
},
{
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "abc"}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "xyz"}},
true,
},
{
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "abc"}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "abc"}},
false,
},
{
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
- Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "xyz"}},
+ Node{Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "abc"}},
false,
},
}
func TestStackvarSort(t *testing.T) {
inp := []*Node{
- {Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, flags: nodeUsed, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{flags: nameNeedzero}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
+ {Class: PFUNC, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PFUNC, Xoffset: 0, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PFUNC, Xoffset: 10, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PFUNC, Xoffset: 20, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, flags: nodeUsed, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{flags: nameNeedzero}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{Width: 1}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{Width: 2}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "abc"}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "xyz"}},
}
want := []*Node{
- {Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, flags: nodeUsed, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{flags: nameNeedzero}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
- {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
- {Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &Sym{}},
+ {Class: PFUNC, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PFUNC, Xoffset: 0, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PFUNC, Xoffset: 10, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PFUNC, Xoffset: 20, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, flags: nodeUsed, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{flags: nameNeedzero}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{Width: 2}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{Width: 1}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "abc"}},
+ {Class: PAUTO, Type: &types.Type{}, Name: &Name{}, Sym: &types.Sym{Name: "xyz"}},
+ {Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &types.Sym{}},
}
// haspointers updates Type.Haspointers as a side effect, so
// exercise this function on all inputs so that reflect.DeepEqual
// doesn't produce false positives.
for i := range want {
- haspointers(want[i].Type)
- haspointers(inp[i].Type)
+ types.Haspointers(want[i].Type)
+ types.Haspointers(inp[i].Type)
}
sort.Sort(byStackVar(inp))
import (
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"crypto/md5"
"fmt"
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *Node) bool {
- return n.Op == ONAME && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && haspointers(n.Type)
+ return n.Op == ONAME && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && types.Haspointers(n.Type)
}
// getvariables returns the list of on-stack variables that we need to track.
// and then simply copied into bv at the correct offset on future calls with
// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1
// accounts for 40% of the 6g execution time.
-func onebitwalktype1(t *Type, xoffset *int64, bv bvec) {
+func onebitwalktype1(t *types.Type, xoffset *int64, bv bvec) {
if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
}
fmt.Printf("\n")
}
-func finishgclocals(sym *Sym) {
+func finishgclocals(sym *types.Sym) {
ls := Linksym(sym)
ls.Name = fmt.Sprintf("gclocals·%x", md5.Sum(ls.P))
ls.Set(obj.AttrDuplicateOK, true)
// first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The
// remaining bytes are the raw bitmaps.
-func livenessemit(lv *Liveness, argssym, livesym *Sym) {
+func livenessemit(lv *Liveness, argssym, livesym *types.Sym) {
args := bvalloc(argswords(lv))
aoff := duint32(argssym, 0, uint32(len(lv.livevars))) // number of bitmaps
aoff = duint32(argssym, aoff, uint32(args.n)) // number of bits in each bitmap
// pointer variables in the function and emits a runtime data
// structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index.
-func liveness(e *ssafn, f *ssa.Func, argssym, livesym *Sym) map[*ssa.Value]int {
+func liveness(e *ssafn, f *ssa.Func, argssym, livesym *types.Sym) map[*ssa.Value]int {
// Construct the global liveness state.
vars := getvariables(e.curfn)
lv := newliveness(e.curfn, f, vars, e.stkptrsize)
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"strings"
// getcallerpc. We use -widthptr(FP) for x86.
// BUG: this will not work on arm.
nodpc := *nodfp
- nodpc.Type = Types[TUINTPTR]
+ nodpc.Type = types.Types[TUINTPTR]
nodpc.Xoffset = int64(-Widthptr)
nd := mkcall("racefuncenter", nil, nil, &nodpc)
fn.Func.Enter.Prepend(nd)
instrumentnode(&n.Left, init, 0, 0)
if n.Left.Type.IsMap() {
n1 := nod(OCONVNOP, n.Left, nil)
- n1.Type = typPtr(Types[TUINT8])
+ n1.Type = types.NewPtr(types.Types[TUINT8])
n1 = nod(OIND, n1, nil)
n1 = typecheck(n1, Erv)
callinstr(&n1, init, 0, skip)
func uintptraddr(n *Node) *Node {
r := nod(OADDR, n, nil)
r.SetBounded(true)
- r = conv(r, Types[TUNSAFEPTR])
- r = conv(r, Types[TUINTPTR])
+ r = conv(r, types.Types[TUNSAFEPTR])
+ r = conv(r, types.Types[TUINTPTR])
return r
}
func detachexpr(n *Node, init *Nodes) *Node {
addr := nod(OADDR, n, nil)
- l := temp(typPtr(n.Type))
+ l := temp(types.NewPtr(n.Type))
as := nod(OAS, l, addr)
as = typecheck(as, Etop)
as = walkexpr(as, init)
package gc
-import "unicode/utf8"
+import (
+ "cmd/compile/internal/types"
+ "unicode/utf8"
+)
// range
func typecheckrange(n *Node) {
var toomany int
var why string
- var t1 *Type
- var t2 *Type
+ var t1 *types.Type
+ var t2 *types.Type
var v1 *Node
var v2 *Node
var ls []*Node
goto out
case TARRAY, TSLICE:
- t1 = Types[TINT]
+ t1 = types.Types[TINT]
t2 = t.Elem()
case TMAP:
}
case TSTRING:
- t1 = Types[TINT]
- t2 = runetype
+ t1 = types.Types[TINT]
+ t2 = types.Runetype
}
if n.List.Len() > 2 || toomany != 0 {
// orderstmt arranged for a copy of the array/slice variable if needed.
ha := a
- hv1 := temp(Types[TINT])
- hn := temp(Types[TINT])
+ hv1 := temp(types.Types[TINT])
+ hn := temp(types.Types[TINT])
var hp *Node
init = append(init, nod(OAS, hv1, nil))
init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
if v2 != nil {
- hp = temp(typPtr(n.Type.Elem()))
+ hp = temp(types.NewPtr(n.Type.Elem()))
tmp := nod(OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
tmp.Type = hp.Type
tmp.Typecheck = 1
- tmp.Right.Type = Types[Tptr]
+ tmp.Right.Type = types.Types[types.Tptr]
tmp.Right.Typecheck = 1
a = nod(OAS, hp, tmp)
a = typecheck(a, Etop)
hv1 := temp(t.Elem())
hv1.Typecheck = 1
- if haspointers(t.Elem()) {
+ if types.Haspointers(t.Elem()) {
init = append(init, nod(OAS, hv1, nil))
}
- hb := temp(Types[TBOOL])
+ hb := temp(types.Types[TBOOL])
n.Left = nod(ONE, hb, nodbool(false))
a := nod(OAS2RECV, nil, nil)
// orderstmt arranged for a copy of the string variable.
ha := a
- hv1 := temp(Types[TINT])
- hv1t := temp(Types[TINT])
- hv2 := temp(runetype)
+ hv1 := temp(types.Types[TINT])
+ hv1t := temp(types.Types[TINT])
+ hv2 := temp(types.Runetype)
// hv1 := 0
init = append(init, nod(OAS, hv1, nil))
// hv2 := rune(ha[hv1])
nind := nod(OINDEX, ha, hv1)
nind.SetBounded(true)
- body = append(body, nod(OAS, hv2, conv(nind, runetype)))
+ body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
// if hv2 < utf8.RuneSelf
nif := nod(OIF, nil, nil)
n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
// hp = &a[0]
- hp := temp(Types[TUNSAFEPTR])
+ hp := temp(types.Types[TUNSAFEPTR])
tmp := nod(OINDEX, a, nodintconst(0))
tmp.SetBounded(true)
tmp = nod(OADDR, tmp, nil)
tmp = nod(OCONVNOP, tmp, nil)
- tmp.Type = Types[TUNSAFEPTR]
+ tmp.Type = types.Types[TUNSAFEPTR]
n.Nbody.Append(nod(OAS, hp, tmp))
// hn = len(a) * sizeof(elem(a))
- hn := temp(Types[TUINTPTR])
+ hn := temp(types.Types[TUINTPTR])
tmp = nod(OLEN, a, nil)
tmp = nod(OMUL, tmp, nodintconst(elemsize))
- tmp = conv(tmp, Types[TUINTPTR])
+ tmp = conv(tmp, types.Types[TUINTPTR])
n.Nbody.Append(nod(OAS, hn, tmp))
var fn *Node
- if haspointers(a.Type.Elem()) {
+ if types.Haspointers(a.Type.Elem()) {
// memclrHasPointers(hp, hn)
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/gcprog"
"cmd/internal/obj"
"cmd/internal/src"
)
type itabEntry struct {
- t, itype *Type
- sym *Sym
+ t, itype *types.Type
+ sym *types.Sym
// symbol of the itab itself;
// filled in lazily after typecheck
}
type ptabEntry struct {
- s *Sym
- t *Type
+ s *types.Sym
+ t *types.Type
}
// runtime interface and reflection data structures
-var signatlist []*Type
+var signatlist []*types.Type
var itabs []itabEntry
var ptabs []ptabEntry
type Sig struct {
name string
- pkg *Pkg
- isym *Sym
- tsym *Sym
- type_ *Type
- mtype *Type
+ pkg *types.Pkg
+ isym *types.Sym
+ tsym *types.Sym
+ type_ *types.Type
+ mtype *types.Type
offset int32
}
MAXVALSIZE = 128
)
-func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
-func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
-func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{})
+func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
+func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
+func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
if t.Sym == nil && len(methods(t)) == 0 {
return 0
}
return 4 + 2 + 2 + 4 + 4
}
-func makefield(name string, t *Type) *Field {
- f := newField()
+func makefield(name string, t *types.Type) *types.Field {
+ f := types.NewField()
f.Type = t
- f.Sym = nopkg.Lookup(name)
+ f.Sym = types.Nopkg.Lookup(name)
return f
}
-func mapbucket(t *Type) *Type {
+func mapbucket(t *types.Type) *types.Type {
if t.MapType().Bucket != nil {
return t.MapType().Bucket
}
- bucket := typ(TSTRUCT)
+ bucket := types.New(TSTRUCT)
keytype := t.Key()
valtype := t.Val()
dowidth(keytype)
dowidth(valtype)
if keytype.Width > MAXKEYSIZE {
- keytype = typPtr(keytype)
+ keytype = types.NewPtr(keytype)
}
if valtype.Width > MAXVALSIZE {
- valtype = typPtr(valtype)
+ valtype = types.NewPtr(valtype)
}
- field := make([]*Field, 0, 5)
+ field := make([]*types.Field, 0, 5)
// The first field is: uint8 topbits[BUCKETSIZE].
- arr := typArray(Types[TUINT8], BUCKETSIZE)
+ arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE)
field = append(field, makefield("topbits", arr))
- arr = typArray(keytype, BUCKETSIZE)
+ arr = types.NewArray(keytype, BUCKETSIZE)
arr.SetNoalg(true)
field = append(field, makefield("keys", arr))
- arr = typArray(valtype, BUCKETSIZE)
+ arr = types.NewArray(valtype, BUCKETSIZE)
arr.SetNoalg(true)
field = append(field, makefield("values", arr))
// then it would end with an extra 32-bit padding field.
// Preempt that by emitting the padding here.
if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr {
- field = append(field, makefield("pad", Types[TUINTPTR]))
+ field = append(field, makefield("pad", types.Types[TUINTPTR]))
}
// If keys and values have no pointers, the map implementation
// Arrange for the bucket to have no pointers by changing
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in ../../../../runtime/hashmap.go.
- otyp := typPtr(bucket)
- if !haspointers(t.Val()) && !haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE {
- otyp = Types[TUINTPTR]
+ otyp := types.NewPtr(bucket)
+ if !types.Haspointers(t.Val()) && !types.Haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE {
+ otyp = types.Types[TUINTPTR]
}
ovf := makefield("overflow", otyp)
field = append(field, ovf)
// Builds a type representing a Hmap structure for the given map type.
// Make sure this stays in sync with ../../../../runtime/hashmap.go!
-func hmap(t *Type) *Type {
+func hmap(t *types.Type) *types.Type {
if t.MapType().Hmap != nil {
return t.MapType().Hmap
}
bucket := mapbucket(t)
- fields := []*Field{
- makefield("count", Types[TINT]),
- makefield("flags", Types[TUINT8]),
- makefield("B", Types[TUINT8]),
- makefield("noverflow", Types[TUINT16]),
- makefield("hash0", Types[TUINT32]),
- makefield("buckets", typPtr(bucket)),
- makefield("oldbuckets", typPtr(bucket)),
- makefield("nevacuate", Types[TUINTPTR]),
- makefield("overflow", Types[TUNSAFEPTR]),
- }
-
- h := typ(TSTRUCT)
+ fields := []*types.Field{
+ makefield("count", types.Types[TINT]),
+ makefield("flags", types.Types[TUINT8]),
+ makefield("B", types.Types[TUINT8]),
+ makefield("noverflow", types.Types[TUINT16]),
+ makefield("hash0", types.Types[TUINT32]),
+ makefield("buckets", types.NewPtr(bucket)),
+ makefield("oldbuckets", types.NewPtr(bucket)),
+ makefield("nevacuate", types.Types[TUINTPTR]),
+ makefield("overflow", types.Types[TUNSAFEPTR]),
+ }
+
+ h := types.New(TSTRUCT)
h.SetNoalg(true)
h.SetLocal(t.Local())
h.SetFields(fields)
return h
}
-func hiter(t *Type) *Type {
+func hiter(t *types.Type) *types.Type {
if t.MapType().Hiter != nil {
return t.MapType().Hiter
}
// checkBucket uintptr
// }
// must match ../../../../runtime/hashmap.go:hiter.
- var field [12]*Field
- field[0] = makefield("key", typPtr(t.Key()))
- field[1] = makefield("val", typPtr(t.Val()))
- field[2] = makefield("t", typPtr(Types[TUINT8]))
- field[3] = makefield("h", typPtr(hmap(t)))
- field[4] = makefield("buckets", typPtr(mapbucket(t)))
- field[5] = makefield("bptr", typPtr(mapbucket(t)))
- field[6] = makefield("overflow0", Types[TUNSAFEPTR])
- field[7] = makefield("overflow1", Types[TUNSAFEPTR])
- field[8] = makefield("startBucket", Types[TUINTPTR])
- field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I
- field[10] = makefield("bucket", Types[TUINTPTR])
- field[11] = makefield("checkBucket", Types[TUINTPTR])
+ var field [12]*types.Field
+ field[0] = makefield("key", types.NewPtr(t.Key()))
+ field[1] = makefield("val", types.NewPtr(t.Val()))
+ field[2] = makefield("t", types.NewPtr(types.Types[TUINT8]))
+ field[3] = makefield("h", types.NewPtr(hmap(t)))
+ field[4] = makefield("buckets", types.NewPtr(mapbucket(t)))
+ field[5] = makefield("bptr", types.NewPtr(mapbucket(t)))
+ field[6] = makefield("overflow0", types.Types[TUNSAFEPTR])
+ field[7] = makefield("overflow1", types.Types[TUNSAFEPTR])
+ field[8] = makefield("startBucket", types.Types[TUINTPTR])
+ field[9] = makefield("stuff", types.Types[TUINTPTR]) // offset+wrapped+B+I
+ field[10] = makefield("bucket", types.Types[TUINTPTR])
+ field[11] = makefield("checkBucket", types.Types[TUINTPTR])
// build iterator struct holding the above fields
- i := typ(TSTRUCT)
+ i := types.New(TSTRUCT)
i.SetNoalg(true)
i.SetFields(field[:])
dowidth(i)
// f is method type, with receiver.
// return function type, receiver as first argument (or not).
-func methodfunc(f *Type, receiver *Type) *Type {
+func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
var in []*Node
if receiver != nil {
d := nod(ODCLFIELD, nil, nil)
// methods returns the methods of the non-interface type t, sorted by name.
// Generates stub functions as needed.
-func methods(t *Type) []*Sig {
+func methods(t *types.Type) []*Sig {
// method type
mt := methtype(t)
it := t
if !isdirectiface(it) {
- it = typPtr(t)
+ it = types.NewPtr(t)
}
// make list of methods for t,
}
// imethods returns the methods of the interface type t, sorted by name.
-func imethods(t *Type) []*Sig {
+func imethods(t *types.Type) []*Sig {
var methods []*Sig
for _, f := range t.Fields().Slice() {
if f.Type.Etype != TFUNC || f.Sym == nil {
return methods
}
-func dimportpath(p *Pkg) {
+func dimportpath(p *types.Pkg) {
if p.Pathsym != nil {
return
}
p.Pathsym = s
}
-func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
+func dgopkgpath(s *types.Sym, ot int, pkg *types.Pkg) int {
return dgopkgpathLSym(Linksym(s), ot, pkg)
}
-func dgopkgpathLSym(s *obj.LSym, ot int, pkg *Pkg) int {
+func dgopkgpathLSym(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil {
return duintxxLSym(s, ot, 0, Widthptr)
}
}
// dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol.
-func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int {
+func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil {
return duintxxLSym(s, ot, 0, 4)
}
// isExportedField reports whether a struct field is exported.
// It also returns the package to use for PkgPath for an unexported field.
-func isExportedField(ft *Field) (bool, *Pkg) {
+func isExportedField(ft *types.Field) (bool, *types.Pkg) {
if ft.Sym != nil && ft.Embedded == 0 {
return exportname(ft.Sym.Name), ft.Sym.Pkg
} else {
}
// dnameField dumps a reflect.name for a struct field.
-func dnameField(s *Sym, ot int, spkg *Pkg, ft *Field) int {
+func dnameField(s *types.Sym, ot int, spkg *types.Pkg, ft *types.Field) int {
var name string
if ft.Sym != nil {
name = ft.Sym.Name
}
// dnameData writes the contents of a reflect.name into s at offset ot.
-func dnameData(s *obj.LSym, ot int, name, tag string, pkg *Pkg, exported bool) int {
+func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
if len(name) > 1<<16-1 {
Fatalf("name too long: %s", name)
}
var dnameCount int
// dname creates a reflect.name for a struct field or method.
-func dname(name, tag string, pkg *Pkg, exported bool) *obj.LSym {
+func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
// Write out data as "type.." to signal two things to the
// linker, first that when dynamically linking, the symbol
// should be moved to a relro section, and second that the
// dextratype dumps the fields of a runtime.uncommontype.
// dataAdd is the offset in bytes after the header where the
// backing array of the []method field is written (by dextratypeData).
-func dextratype(s *Sym, ot int, t *Type, dataAdd int) int {
+func dextratype(s *types.Sym, ot int, t *types.Type, dataAdd int) int {
m := methods(t)
if t.Sym == nil && len(m) == 0 {
return ot
return ot
}
-func typePkg(t *Type) *Pkg {
+func typePkg(t *types.Type) *types.Pkg {
tsym := t.Sym
if tsym == nil {
switch t.Etype {
}
}
}
- if tsym != nil && t != Types[t.Etype] && t != errortype {
+ if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype {
return tsym.Pkg
}
return nil
// dextratypeData dumps the backing array for the []method field of
// runtime.uncommontype.
-func dextratypeData(s *Sym, ot int, t *Type) int {
+func dextratypeData(s *types.Sym, ot int, t *types.Type) int {
lsym := Linksym(s)
for _, a := range methods(t) {
// ../../../../runtime/type.go:/method
exported := exportname(a.name)
- var pkg *Pkg
+ var pkg *types.Pkg
if !exported && a.pkg != typePkg(t) {
pkg = a.pkg
}
// typeptrdata returns the length in bytes of the prefix of t
// containing pointer data. Anything after this offset is scalar data.
-func typeptrdata(t *Type) int64 {
- if !haspointers(t) {
+func typeptrdata(t *types.Type) int64 {
+ if !types.Haspointers(t) {
return 0
}
case TSTRUCT:
// Find the last field that has pointers.
- var lastPtrField *Field
+ var lastPtrField *types.Field
for _, t1 := range t.Fields().Slice() {
- if haspointers(t1.Type) {
+ if types.Haspointers(t1.Type) {
lastPtrField = t1
}
}
tflagNamed = 1 << 2
)
-var dcommontype_algarray *Sym
+var dcommontype_algarray *types.Sym
// dcommontype dumps the contents of a reflect.rtype (runtime._type).
-func dcommontype(s *Sym, ot int, t *Type) int {
+func dcommontype(s *types.Sym, ot int, t *types.Type) int {
if ot != 0 {
Fatalf("dcommontype %d", ot)
}
}
dowidth(t)
alg := algtype(t)
- var algsym *Sym
+ var algsym *types.Sym
if alg == ASPECIAL || alg == AMEM {
algsym = dalgsym(t)
}
sptrWeak := true
- var sptr *Sym
- if !t.IsPtr() || t.ptrTo != nil {
- tptr := typPtr(t)
+ var sptr *types.Sym
+ if !t.IsPtr() || t.PtrBase != nil {
+ tptr := types.NewPtr(t)
if t.Sym != nil || methods(tptr) != nil {
sptrWeak = false
}
ot = duint8(s, ot, t.Align) // fieldAlign
i = kinds[t.Etype]
- if !haspointers(t) {
+ if !types.Haspointers(t) {
i |= obj.KindNoPointers
}
if isdirectiface(t) {
return ot
}
-func typesym(t *Type) *Sym {
+func typesym(t *types.Type) *types.Sym {
name := t.ShortString()
// Use a separate symbol name for Noalg types for #17752.
// tracksym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
-func tracksym(t *Type, f *Field) *Sym {
+func tracksym(t *types.Type, f *types.Field) *types.Sym {
return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name)
}
-func typesymprefix(prefix string, t *Type) *Sym {
+func typesymprefix(prefix string, t *types.Type) *types.Sym {
p := prefix + "." + t.ShortString()
s := typepkg.Lookup(p)
return s
}
-func typenamesym(t *Type) *Sym {
+func typenamesym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
Fatalf("typename %v", t)
}
s := typesym(t)
if s.Def == nil {
n := newnamel(src.NoXPos, s)
- n.Type = Types[TUINT8]
+ n.Type = types.Types[TUINT8]
n.Class = PEXTERN
n.Typecheck = 1
- s.Def = n
+ s.Def = asTypesNode(n)
signatlist = append(signatlist, t)
}
- return s.Def.Sym
+ return asNode(s.Def).Sym
}
-func typename(t *Type) *Node {
+func typename(t *types.Type) *Node {
s := typenamesym(t)
- n := nod(OADDR, s.Def, nil)
- n.Type = typPtr(s.Def.Type)
+ n := nod(OADDR, asNode(s.Def), nil)
+ n.Type = types.NewPtr(asNode(s.Def).Type)
n.SetAddable(true)
n.Typecheck = 1
return n
}
-func itabname(t, itype *Type) *Node {
+func itabname(t, itype *types.Type) *Node {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
Fatalf("itabname(%v, %v)", t, itype)
}
s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
if s.Def == nil {
n := newname(s)
- n.Type = Types[TUINT8]
+ n.Type = types.Types[TUINT8]
n.Class = PEXTERN
n.Typecheck = 1
- s.Def = n
+ s.Def = asTypesNode(n)
itabs = append(itabs, itabEntry{t: t, itype: itype, sym: s})
}
- n := nod(OADDR, s.Def, nil)
- n.Type = typPtr(s.Def.Type)
+ n := nod(OADDR, asNode(s.Def), nil)
+ n.Type = types.NewPtr(asNode(s.Def).Type)
n.SetAddable(true)
n.Typecheck = 1
return n
// isreflexive reports whether t has a reflexive equality operator.
// That is, if x==x for all x of type t.
-func isreflexive(t *Type) bool {
+func isreflexive(t *types.Type) bool {
switch t.Etype {
case TBOOL,
TINT,
// needkeyupdate reports whether map updates with t as a key
// need the key to be updated.
-func needkeyupdate(t *Type) bool {
+func needkeyupdate(t *types.Type) bool {
switch t.Etype {
case TBOOL,
TINT,
}
}
-func dtypesym(t *Type) *Sym {
+func dtypesym(t *types.Type) *types.Sym {
// Replace byte, rune aliases with real type.
// They've been separate internally to make error messages
// better, but we have to merge them in the reflect tables.
- if t == bytetype || t == runetype {
- t = Types[t.Etype]
+ if t == types.Bytetype || t == types.Runetype {
+ t = types.Types[t.Etype]
}
if t.IsUntyped() {
dupok = obj.DUPOK
}
- if myimportpath == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
+ if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc
goto ok
}
case TARRAY:
// ../../../../runtime/type.go:/arrayType
s1 := dtypesym(t.Elem())
- t2 := typSlice(t.Elem())
+ t2 := types.NewSlice(t.Elem())
s2 := dtypesym(t2)
ot = dcommontype(s, ot, t)
ot = dsymptr(s, ot, s1, 0)
// ../../../../runtime/type.go:/interfaceType
ot = dcommontype(s, ot, t)
- var tpkg *Pkg
- if t.Sym != nil && t != Types[t.Etype] && t != errortype {
+ var tpkg *types.Pkg
+ if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
tpkg = t.Sym.Pkg
}
ot = dgopkgpath(s, ot, tpkg)
for _, a := range m {
// ../../../../runtime/type.go:/imethod
exported := exportname(a.name)
- var pkg *Pkg
+ var pkg *types.Pkg
if !exported && a.pkg != tpkg {
pkg = a.pkg
}
// for the given concrete type and interface
// type, return the (sorted) set of methods
// on the concrete type that implement the interface
-func genfun(t, it *Type) []*obj.LSym {
+func genfun(t, it *types.Type) []*obj.LSym {
if t == nil || it == nil {
return nil
}
t := signatlist[i]
dtypesym(t)
if t.Sym != nil {
- dtypesym(typPtr(t))
+ dtypesym(types.NewPtr(t))
}
}
// another possible choice would be package main,
// but using runtime means fewer copies in .6 files.
if myimportpath == "runtime" {
- for i := EType(1); i <= TBOOL; i++ {
- dtypesym(typPtr(Types[i]))
+ for i := types.EType(1); i <= TBOOL; i++ {
+ dtypesym(types.NewPtr(types.Types[i]))
}
- dtypesym(typPtr(Types[TSTRING]))
- dtypesym(typPtr(Types[TUNSAFEPTR]))
+ dtypesym(types.NewPtr(types.Types[TSTRING]))
+ dtypesym(types.NewPtr(types.Types[TUNSAFEPTR]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
- dtypesym(typPtr(errortype))
+ dtypesym(types.NewPtr(types.Errortype))
- dtypesym(functype(nil, []*Node{anonfield(errortype)}, []*Node{anonfield(Types[TSTRING])}))
+ dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(Runtimepkg)
}
}
-type pkgByPath []*Pkg
+type pkgByPath []*types.Pkg
func (a pkgByPath) Len() int { return len(a) }
func (a pkgByPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
func (a pkgByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func dalgsym(t *Type) *Sym {
- var s *Sym
- var hashfunc *Sym
- var eqfunc *Sym
+func dalgsym(t *types.Type) *types.Sym {
+ var s *types.Sym
+ var hashfunc *types.Sym
+ var eqfunc *types.Sym
// dalgsym is only called for a type that needs an algorithm table,
// which implies that the type is comparable (or else it would use ANOEQ).
// dgcsym emits and returns a data symbol containing GC information for type t,
// along with a boolean reporting whether the UseGCProg bit should be set in
// the type kind, and the ptrdata field to record in the reflect type information.
-func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) {
+func dgcsym(t *types.Type) (sym *types.Sym, useGCProg bool, ptrdata int64) {
ptrdata = typeptrdata(t)
if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
sym = dgcptrmask(t)
}
// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
-func dgcptrmask(t *Type) *Sym {
+func dgcptrmask(t *types.Type) *types.Sym {
ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
fillptrmask(t, ptrmask)
p := fmt.Sprintf("gcbits.%x", ptrmask)
// fillptrmask fills in ptrmask with 1s corresponding to the
// word offsets in t that hold pointers.
// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
-func fillptrmask(t *Type, ptrmask []byte) {
+func fillptrmask(t *types.Type, ptrmask []byte) {
for i := range ptrmask {
ptrmask[i] = 0
}
- if !haspointers(t) {
+ if !types.Haspointers(t) {
return
}
// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
// In practice, the size is typeptrdata(t) except for non-trivial arrays.
// For non-trivial arrays, the program describes the full t.Width size.
-func dgcprog(t *Type) (*Sym, int64) {
+func dgcprog(t *types.Type) (*types.Sym, int64) {
dowidth(t)
if t.Width == BADWIDTH {
Fatalf("dgcprog: %v badwidth", t)
}
type GCProg struct {
- sym *Sym
+ sym *types.Sym
symoff int
w gcprog.Writer
}
var Debug_gcprog int // set by -d gcprog
-func (p *GCProg) init(sym *Sym) {
+func (p *GCProg) init(sym *types.Sym) {
p.sym = sym
p.symoff = 4 // first 4 bytes hold program length
p.w.Init(p.writeByte)
}
}
-func (p *GCProg) emit(t *Type, offset int64) {
+func (p *GCProg) emit(t *types.Type, offset int64) {
dowidth(t)
- if !haspointers(t) {
+ if !types.Haspointers(t) {
return
}
if t.Width == int64(Widthptr) {
s := mappkg.Lookup("zero")
if s.Def == nil {
x := newname(s)
- x.Type = Types[TUINT8]
+ x.Type = types.Types[TUINT8]
x.Class = PEXTERN
x.Typecheck = 1
- s.Def = x
+ s.Def = asTypesNode(x)
}
- z := nod(OADDR, s.Def, nil)
- z.Type = typPtr(Types[TUINT8])
+ z := nod(OADDR, asNode(s.Def), nil)
+ z.Type = types.NewPtr(types.Types[TUINT8])
z.SetAddable(true)
z.Typecheck = 1
return z
package gc
import (
+ "cmd/compile/internal/types"
"reflect"
"sort"
"testing"
func TestSortingByMethodNameAndPackagePath(t *testing.T) {
data := []*Sig{
- &Sig{name: "b", pkg: &Pkg{Path: "abc"}},
+ &Sig{name: "b", pkg: &types.Pkg{Path: "abc"}},
&Sig{name: "b", pkg: nil},
&Sig{name: "c", pkg: nil},
- &Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
+ &Sig{name: "c", pkg: &types.Pkg{Path: "uvw"}},
&Sig{name: "c", pkg: nil},
- &Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
- &Sig{name: "a", pkg: &Pkg{Path: "abc"}},
+ &Sig{name: "b", pkg: &types.Pkg{Path: "xyz"}},
+ &Sig{name: "a", pkg: &types.Pkg{Path: "abc"}},
&Sig{name: "b", pkg: nil},
}
want := []*Sig{
- &Sig{name: "a", pkg: &Pkg{Path: "abc"}},
+ &Sig{name: "a", pkg: &types.Pkg{Path: "abc"}},
&Sig{name: "b", pkg: nil},
&Sig{name: "b", pkg: nil},
- &Sig{name: "b", pkg: &Pkg{Path: "abc"}},
- &Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
+ &Sig{name: "b", pkg: &types.Pkg{Path: "abc"}},
+ &Sig{name: "b", pkg: &types.Pkg{Path: "xyz"}},
&Sig{name: "c", pkg: nil},
&Sig{name: "c", pkg: nil},
- &Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
+ &Sig{name: "c", pkg: &types.Pkg{Path: "uvw"}},
}
if len(data) != len(want) {
t.Fatal("want and data must match")
package gc
+import "cmd/compile/internal/types"
+
// select
func typecheckselect(sel *Node) {
var ncase *Node
case OSEND:
// if selectnbsend(c, v) { body } else { default body }
ch := n.Left
- r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), Types[TBOOL], &r.Ninit, ch, n.Right)
+ r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right)
case OSELRECV:
// if c != nil && selectnbrecv(&v, c) { body } else { default body }
r = nod(OIF, nil, nil)
r.Ninit.Set(cas.Ninit.Slice())
ch := n.Right.Left
- r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, n.Left, ch)
+ r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, n.Left, ch)
case OSELRECV2:
// if c != nil && selectnbrecv2(&v, c) { body } else { default body }
r = nod(OIF, nil, nil)
r.Ninit.Set(cas.Ninit.Slice())
ch := n.Right.Left
- r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, n.Left, n.List.First(), ch)
+ r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, n.Left, n.List.First(), ch)
}
r.Left = typecheck(r.Left, Erv)
r = nod(OAS, selv, nil)
r = typecheck(r, Etop)
init = append(init, r)
- var_ = conv(conv(nod(OADDR, selv, nil), Types[TUNSAFEPTR]), typPtr(Types[TUINT8]))
+ var_ = conv(conv(nod(OADDR, selv, nil), types.Types[TUNSAFEPTR]), types.NewPtr(types.Types[TUINT8]))
r = mkcall("newselect", nil, nil, var_, nodintconst(selv.Type.Width), nodintconst(sel.Xoffset))
r = typecheck(r, Etop)
init = append(init, r)
// run the select
setlineno(sel)
- chosen = temp(Types[TINT])
- r = nod(OAS, chosen, mkcall("selectgo", Types[TINT], nil, var_))
+ chosen = temp(types.Types[TINT])
+ r = nod(OAS, chosen, mkcall("selectgo", types.Types[TINT], nil, var_))
r = typecheck(r, Etop)
init = append(init, r)
}
// Keep in sync with src/runtime/select.go.
-func selecttype(size int64) *Type {
+func selecttype(size int64) *types.Type {
// TODO(dvyukov): it's possible to generate Scase only once
// and then cache; and also cache Select per size.
scase := tostruct([]*Node{
- namedfield("elem", typPtr(Types[TUINT8])),
- namedfield("chan", typPtr(Types[TUINT8])),
- namedfield("pc", Types[TUINTPTR]),
- namedfield("kind", Types[TUINT16]),
- namedfield("receivedp", typPtr(Types[TUINT8])),
- namedfield("releasetime", Types[TUINT64]),
+ namedfield("elem", types.NewPtr(types.Types[TUINT8])),
+ namedfield("chan", types.NewPtr(types.Types[TUINT8])),
+ namedfield("pc", types.Types[TUINTPTR]),
+ namedfield("kind", types.Types[TUINT16]),
+ namedfield("receivedp", types.NewPtr(types.Types[TUINT8])),
+ namedfield("releasetime", types.Types[TUINT64]),
})
scase.SetNoalg(true)
scase.SetLocal(true)
sel := tostruct([]*Node{
- namedfield("tcase", Types[TUINT16]),
- namedfield("ncase", Types[TUINT16]),
- namedfield("pollorder", typPtr(Types[TUINT8])),
- namedfield("lockorder", typPtr(Types[TUINT8])),
- namedfield("scase", typArray(scase, size)),
- namedfield("lockorderarr", typArray(Types[TUINT16], size)),
- namedfield("pollorderarr", typArray(Types[TUINT16], size)),
+ namedfield("tcase", types.Types[TUINT16]),
+ namedfield("ncase", types.Types[TUINT16]),
+ namedfield("pollorder", types.NewPtr(types.Types[TUINT8])),
+ namedfield("lockorder", types.NewPtr(types.Types[TUINT8])),
+ namedfield("scase", types.NewArray(scase, size)),
+ namedfield("lockorderarr", types.NewArray(types.Types[TUINT16], size)),
+ namedfield("pollorderarr", types.NewArray(types.Types[TUINT16], size)),
})
sel.SetNoalg(true)
sel.SetLocal(true)
package gc
-import "fmt"
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
// static initialization
const (
if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class == PFUNC {
// Methods called as Type.Method(receiver, ...).
// Definitions for method expressions are stored in type->nname.
- init1(n.Type.Nname(), out)
+ init1(asNode(n.Type.FuncType().Nname), out)
}
if n.Op != ONAME {
init2list(n.Func.Closure.Nbody, out)
}
if n.Op == ODOTMETH || n.Op == OCALLPART {
- init2(n.Type.Nname(), out)
+ init2(asNode(n.Type.FuncType().Nname), out)
}
}
initplan(r)
// Init slice.
bound := r.Right.Int64()
- ta := typArray(r.Type.Elem(), bound)
+ ta := types.NewArray(r.Type.Elem(), bound)
a := staticname(ta)
inittemps[r] = a
n := *l
*out = append(*out, nod(OAS, a, val))
}
ptr := nod(OADDR, a, nil)
- n.Type = typPtr(val.Type)
+ n.Type = types.NewPtr(val.Type)
gdata(&n, ptr, Widthptr)
}
// staticname returns a name backed by a static data symbol.
// Callers should call n.Name.SetReadonly(true) on the
// returned node for readonly nodes.
-func staticname(t *Type) *Node {
+func staticname(t *types.Type) *Node {
// Don't use lookupN; it interns the resulting string, but these are all unique.
n := newname(lookup(fmt.Sprintf("statictmp_%d", statuniqgen)))
statuniqgen++
func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// make an array type corresponding the number of elements we have
- t := typArray(n.Type.Elem(), n.Right.Int64())
+ t := types.NewArray(n.Type.Elem(), n.Right.Int64())
dowidth(t)
if ctxt == inNonInitFunction {
}
var v Node
- nodconst(&v, Types[TINT], t.NumElem())
+ nodconst(&v, types.Types[TINT], t.NumElem())
nam.Xoffset += int64(array_array)
gdata(&nam, nod(OADDR, vstat, nil), Widthptr)
}
// make new auto *array (3 declare)
- vauto := temp(typPtr(t))
+ vauto := temp(types.NewPtr(t))
// set auto to point at new temp or heap (3 assign)
var a *Node
// For a large number of static entries, put them in an array and loop.
// build types [count]Tindex and [count]Tvalue
- tk := typArray(n.Type.Key(), int64(len(stat)))
- tv := typArray(n.Type.Val(), int64(len(stat)))
+ tk := types.NewArray(n.Type.Key(), int64(len(stat)))
+ tv := types.NewArray(n.Type.Val(), int64(len(stat)))
// TODO(josharian): suppress alg generation for these types?
dowidth(tk)
// for i = 0; i < len(vstatk); i++ {
// map[vstatk[i]] = vstatv[i]
// }
- i := temp(Types[TINT])
+ i := temp(types.Types[TINT])
rhs := nod(OINDEX, vstatv, i)
rhs.SetBounded(true)
package gc
import (
+ "cmd/compile/internal/types"
"reflect"
"testing"
"unsafe"
{Name{}, 36, 56},
{Param{}, 28, 56},
{Node{}, 84, 136},
- {Sym{}, 60, 104},
- {Type{}, 52, 88},
- {MapType{}, 20, 40},
- {ForwardType{}, 20, 32},
- {FuncType{}, 28, 48},
- {StructType{}, 12, 24},
- {InterType{}, 4, 8},
- {ChanType{}, 8, 16},
- {ArrayType{}, 12, 16},
- {DDDFieldType{}, 4, 8},
- {FuncArgsType{}, 4, 8},
- {ChanArgsType{}, 4, 8},
- {PtrType{}, 4, 8},
- {SliceType{}, 4, 8},
+ // TODO(gri) test the ones below in the types package
+ {types.Sym{}, 60, 104},
+ {types.Type{}, 52, 88},
+ {types.MapType{}, 20, 40},
+ {types.ForwardType{}, 20, 32},
+ {types.FuncType{}, 28, 48},
+ {types.StructType{}, 12, 24},
+ {types.InterType{}, 4, 8},
+ {types.ChanType{}, 8, 16},
+ {types.ArrayType{}, 12, 16},
+ {types.DDDFieldType{}, 4, 8},
+ {types.FuncArgsType{}, 4, 8},
+ {types.ChanArgsType{}, 4, 8},
+ {types.PtrType{}, 4, 8},
+ {types.SliceType{}, 4, 8},
}
for _, tt := range tests {
"sort"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"cmd/internal/sys"
var ssaCache *ssa.Cache
func initssaconfig() {
- types := ssa.Types{
- Bool: Types[TBOOL],
- Int8: Types[TINT8],
- Int16: Types[TINT16],
- Int32: Types[TINT32],
- Int64: Types[TINT64],
- UInt8: Types[TUINT8],
- UInt16: Types[TUINT16],
- UInt32: Types[TUINT32],
- UInt64: Types[TUINT64],
- Float32: Types[TFLOAT32],
- Float64: Types[TFLOAT64],
- Int: Types[TINT],
- Uintptr: Types[TUINTPTR],
- String: Types[TSTRING],
- BytePtr: typPtr(Types[TUINT8]),
- Int32Ptr: typPtr(Types[TINT32]),
- UInt32Ptr: typPtr(Types[TUINT32]),
- IntPtr: typPtr(Types[TINT]),
- UintptrPtr: typPtr(Types[TUINTPTR]),
- Float32Ptr: typPtr(Types[TFLOAT32]),
- Float64Ptr: typPtr(Types[TFLOAT64]),
- BytePtrPtr: typPtr(typPtr(Types[TUINT8])),
+ types_ := ssa.Types{
+ Bool: types.Types[TBOOL],
+ Int8: types.Types[TINT8],
+ Int16: types.Types[TINT16],
+ Int32: types.Types[TINT32],
+ Int64: types.Types[TINT64],
+ UInt8: types.Types[TUINT8],
+ UInt16: types.Types[TUINT16],
+ UInt32: types.Types[TUINT32],
+ UInt64: types.Types[TUINT64],
+ Float32: types.Types[TFLOAT32],
+ Float64: types.Types[TFLOAT64],
+ Int: types.Types[TINT],
+ Uintptr: types.Types[TUINTPTR],
+ String: types.Types[TSTRING],
+ BytePtr: types.NewPtr(types.Types[TUINT8]),
+ Int32Ptr: types.NewPtr(types.Types[TINT32]),
+ UInt32Ptr: types.NewPtr(types.Types[TUINT32]),
+ IntPtr: types.NewPtr(types.Types[TINT]),
+ UintptrPtr: types.NewPtr(types.Types[TUINTPTR]),
+ Float32Ptr: types.NewPtr(types.Types[TFLOAT32]),
+ Float64Ptr: types.NewPtr(types.Types[TFLOAT64]),
+ BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])),
}
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
- _ = typPtr(Types[TINTER]) // *interface{}
- _ = typPtr(typPtr(Types[TSTRING])) // **string
- _ = typPtr(typPtr(idealstring)) // **string
- _ = typPtr(typSlice(Types[TINTER])) // *[]interface{}
- _ = typPtr(typPtr(bytetype)) // **byte
- _ = typPtr(typSlice(bytetype)) // *[]byte
- _ = typPtr(typSlice(Types[TSTRING])) // *[]string
- _ = typPtr(typSlice(idealstring)) // *[]string
- _ = typPtr(typPtr(typPtr(Types[TUINT8]))) // ***uint8
- _ = typPtr(Types[TINT16]) // *int16
- _ = typPtr(Types[TINT64]) // *int64
- _ = typPtr(errortype) // *error
- typPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types, Ctxt, Debug['N'] == 0)
+ _ = types.NewPtr(types.Types[TINTER]) // *interface{}
+ _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
+ _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string
+ _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
+ _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
+ _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
+ _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string
+ _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
+ _ = types.NewPtr(types.Types[TINT16]) // *int16
+ _ = types.NewPtr(types.Types[TINT64]) // *int64
+ _ = types.NewPtr(types.Errortype) // *error
+ types.NewPtrCacheEnabled = false
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0)
if thearch.LinkArch.Name == "386" {
ssaConfig.Set387(thearch.Use387)
}
s.labeledNodes = map[*Node]*ssaLabel{}
s.fwdVars = map[*Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
- s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
- s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
+ s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem
switch n.Class {
case PPARAM, PPARAMOUT:
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
- s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, typPtr(n.Type), aux, s.sp)
+ s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp)
if n.Class == PPARAMOUT && s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
}
// label returns the label associated with sym, creating it if necessary.
-func (s *state) label(sym *Sym) *ssaLabel {
+func (s *state) label(sym *types.Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
var (
// dummy node for the memory variable
- memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
+ memVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "mem"}}
// dummy nodes for temporary variables
- ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
- lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}}
- newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
- capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
- typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
- okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
+ ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "ptr"}}
+ lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "len"}}
+ newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "newlen"}}
+ capVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "cap"}}
+ typVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "typ"}}
+ okVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "ok"}}
)
// startBlock sets the current block we're generating code in to b.
func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekPos(), t) }
func (s *state) constBool(c bool) *ssa.Value {
- return s.f.ConstBool(s.peekPos(), Types[TBOOL], c)
+ return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c)
}
func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
return s.f.ConstInt8(s.peekPos(), t, c)
return
}
- var t *Type
+ var t *types.Type
if n.Right != nil {
t = n.Right.Type
} else {
type opAndType struct {
op Op
- etype EType
+ etype types.EType
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
}
-func (s *state) concreteEtype(t *Type) EType {
+func (s *state) concreteEtype(t *types.Type) types.EType {
e := t.Etype
switch e {
default:
}
}
-func (s *state) ssaOp(op Op, t *Type) ssa.Op {
+func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
return x
}
-func floatForComplex(t *Type) *Type {
+func floatForComplex(t *types.Type) *types.Type {
if t.Size() == 8 {
- return Types[TFLOAT32]
+ return types.Types[TFLOAT32]
} else {
- return Types[TFLOAT64]
+ return types.Types[TFLOAT64]
}
}
type opAndTwoTypes struct {
op Op
- etype1 EType
- etype2 EType
+ etype1 types.EType
+ etype2 types.EType
}
type twoTypes struct {
- etype1 EType
- etype2 EType
+ etype1 types.EType
+ etype2 types.EType
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
- intermediateType EType
+ intermediateType types.EType
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
}
-func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
+func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
case OARRAYBYTESTRTMP:
slice := s.expr(n.Left)
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
- len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
case OSTRARRAYBYTETMP:
str := s.expr(n.Left)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
- len := s.newValue1(ssa.OpStringLen, Types[TINT], str)
+ len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
case OCFUNC:
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: Linksym(n.Left.Sym)})
// "value" of a function is the address of the function's closure
sym := Linksym(funcsym(n.Sym))
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: sym})
- return s.entryNewValue1A(ssa.OpAddr, typPtr(n.Type), aux, s.sb)
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type)
i := &u.Imag
switch n.Type.Size() {
case 8:
- pt := Types[TFLOAT32]
+ pt := types.Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat32(pt, r.Float32()),
s.constFloat32(pt, i.Float32()))
case 16:
- pt := Types[TFLOAT64]
+ pt := types.Types[TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, i.Float64()))
if op2 == ssa.OpCopy {
return s.newValue1(op1, n.Type, x)
}
- return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
+ return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
if n.Left.Type.IsComplex() {
pt := floatForComplex(n.Left.Type)
op := s.ssaOp(OEQ, pt)
- r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
- i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
- c := s.newValue2(ssa.OpAndB, Types[TBOOL], r, i)
+ r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
switch n.Op {
case OEQ:
return c
case ONE:
- return s.newValue1(ssa.OpNot, Types[TBOOL], c)
+ return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
default:
s.Fatalf("ordered complex compare %v", n.Op)
}
}
- return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
+ return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
case OMUL:
a := s.expr(n.Left)
b := s.expr(n.Right)
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
+ wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
+ wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
b.AddEdgeTo(bResult)
s.startBlock(bResult)
- return s.variable(n, Types[TBOOL])
+ return s.variable(n, types.Types[TBOOL])
case OCOMPLEX:
r := s.expr(n.Left)
i := s.expr(n.Right)
return s.addr(n.Left, n.Bounded())
case OINDREGSP:
- addr := s.constOffPtrSP(typPtr(n.Type), n.Xoffset)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OIND:
case ODOTPTR:
p := s.exprPtr(n.Left, false, n.Pos)
- p = s.newValue1I(ssa.OpOffPtr, typPtr(n.Type), n.Xoffset, p)
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
+ return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
}
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
if !n.Bounded() {
- len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
+ len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
s.boundsCheck(i, len)
}
ptrtyp := s.f.Config.Types.BytePtr
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
- return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
+ return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem())
case n.Left.Type.IsSlice():
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
- z := s.constInt(Types[TINT], 0)
+ z := s.constInt(types.Types[TINT], 0)
s.boundsCheck(z, z)
// The return value won't be live, return junk.
return s.newValue0(ssa.OpUnknown, n.Type)
}
i = s.extendIndex(i, panicindex)
if !n.Bounded() {
- s.boundsCheck(i, s.constInt(Types[TINT], bound))
+ s.boundsCheck(i, s.constInt(types.Types[TINT], bound))
}
return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
}
if n.Op == OCAP {
op = ssa.OpSliceCap
}
- return s.newValue1(op, Types[TINT], s.expr(n.Left))
+ return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
case n.Left.Type.IsString(): // string; not reachable for OCAP
- return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
+ return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
case n.Left.Type.IsMap(), n.Left.Type.IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.Left))
default: // array
- return s.constInt(Types[TINT], n.Left.Type.NumElem())
+ return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
}
case OSPTR:
// *(ptr+len+2) = e3
et := n.Type.Elem()
- pt := typPtr(et)
+ pt := types.NewPtr(et)
// Evaluate slice
sn := n.List.First() // the slice node is the first in the list
// Decide if we need to grow
nargs := int64(n.List.Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
- l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
- c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
- nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
+ l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
+ c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
+ nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
- cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
+ cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c)
s.vars[&ptrVar] = p
if !inplace {
// Call growslice
s.startBlock(grow)
taddr := s.expr(n.Left)
- r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
+ r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op == ONAME {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], capaddr, r[2], s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capaddr, r[2], s.mem())
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, pt, addr, r[0], s.mem())
// load the value we just stored to avoid having to spill it
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
s.vars[&lenVar] = r[1] // avoid a spill in the fast path
} else {
s.vars[&ptrVar] = r[0]
- s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
+ s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
s.vars[&capVar] = r[2]
}
s.startBlock(assign)
if inplace {
- l = s.variable(&lenVar, Types[TINT]) // generates phi for len
- nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
+ l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
+ nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenaddr, nl, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenaddr, nl, s.mem())
}
// Evaluate args
p = s.variable(&ptrVar, pt) // generates phi for ptr
if !inplace {
- nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
- c = s.variable(&capVar, Types[TINT]) // generates phi for cap
+ nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
+ c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
- addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
+ addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0)
} else {
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
- z := s.constInt(Types[TINT], 0)
+ z := s.constInt(types.Types[TINT], 0)
s.boundsCheck(z, z)
return
}
}
// Rewrite to a = [1]{v}
i = s.extendIndex(i, panicindex)
- s.boundsCheck(i, s.constInt(Types[TINT], 1))
+ s.boundsCheck(i, s.constInt(types.Types[TINT], 1))
v := s.newValue1(ssa.OpArrayMake1, t, right)
s.assign(left.Left, v, false, 0)
return
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
- t = Types[TUNSAFEPTR]
+ t = types.Types[TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
}
// zeroVal returns the zero value for type t.
-func (s *state) zeroVal(t *Type) *ssa.Value {
+func (s *state) zeroVal(t *types.Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case t.IsComplex():
switch t.Size() {
case 8:
- z := s.constFloat32(Types[TFLOAT32], 0)
+ z := s.constFloat32(types.Types[TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
- z := s.constFloat64(Types[TFLOAT64], 0)
+ z := s.constFloat64(types.Types[TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
- v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
+ v.AddArg(s.zeroVal(t.FieldType(i).(*types.Type)))
}
return v
case t.IsArray():
// when not instrumenting.
slice := args[0]
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
- len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, Types[TINT], args[0])
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("runtime/internal/sys", "Ctz64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, Types[TINT], args[0])
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("runtime/internal/sys", "Bswap32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0])
+ return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap64, Types[TUINT64], args[0])
+ return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem())
+ v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem())
+ v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
+ v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
+ v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
+ v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
+ v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
/******** math ********/
addF("math", "Sqrt",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpSqrt, Types[TFLOAT64], args[0])
+ return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, Types[TINT], args[0])
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("math/bits", "TrailingZeros32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, Types[TINT], args[0])
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("math/bits", "TrailingZeros16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to32, Types[TUINT32], args[0])
- c := s.constInt32(Types[TUINT32], 1<<16)
- y := s.newValue2(ssa.OpOr32, Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, Types[TINT], y)
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
+ c := s.constInt32(types.Types[TUINT32], 1<<16)
+ y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
},
sys.ARM, sys.MIPS)
addF("math/bits", "TrailingZeros16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to64, Types[TUINT64], args[0])
- c := s.constInt64(Types[TUINT64], 1<<16)
- y := s.newValue2(ssa.OpOr64, Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, Types[TINT], y)
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
+ c := s.constInt64(types.Types[TUINT64], 1<<16)
+ y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
},
sys.AMD64, sys.ARM64, sys.S390X)
addF("math/bits", "TrailingZeros8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to32, Types[TUINT32], args[0])
- c := s.constInt32(Types[TUINT32], 1<<8)
- y := s.newValue2(ssa.OpOr32, Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, Types[TINT], y)
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
+ c := s.constInt32(types.Types[TUINT32], 1<<8)
+ y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
},
sys.ARM, sys.MIPS)
addF("math/bits", "TrailingZeros8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to64, Types[TUINT64], args[0])
- c := s.constInt64(Types[TUINT64], 1<<8)
- y := s.newValue2(ssa.OpOr64, Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, Types[TINT], y)
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
+ c := s.constInt64(types.Types[TUINT64], 1<<8)
+ y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen64, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("math/bits", "Len32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.IntSize == 4 {
- return s.newValue1(ssa.OpBitLen32, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
}
- x := s.newValue1(ssa.OpZeroExt32to64, Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("math/bits", "Len16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.IntSize == 4 {
- x := s.newValue1(ssa.OpZeroExt16to32, Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt16to64, Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
// Note: disabled on AMD64 because the Go code is faster!
addF("math/bits", "Len8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.IntSize == 4 {
- x := s.newValue1(ssa.OpZeroExt8to32, Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt8to64, Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("math/bits", "Len",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.IntSize == 4 {
- return s.newValue1(ssa.OpBitLen32, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
}
- return s.newValue1(ssa.OpBitLen64, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev64, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev32, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev16, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev8, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.IntSize == 4 {
- return s.newValue1(ssa.OpBitRev32, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
}
- return s.newValue1(ssa.OpBitRev64, Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
},
sys.ARM64)
makeOnesCount := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: Linksym(syslook("support_popcnt").Sym)})
- addr := s.entryNewValue1A(ssa.OpAddr, Types[TBOOL].PtrTo(), aux, s.sb)
- v := s.newValue2(ssa.OpLoad, Types[TBOOL], addr, s.mem())
+ aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: types.Types[TBOOL], Sym: Linksym(syslook("support_popcnt").Sym)})
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
+ v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
if s.config.IntSize == 4 {
op = op32
}
- s.vars[n] = s.newValue1(op, Types[TINT], args[0])
+ s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
a := s.call(n, callNormal)
- s.vars[n] = s.newValue2(ssa.OpLoad, Types[TINT], a, s.mem())
+ s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem())
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, Types[TINT])
+ return s.variable(n, types.Types[TINT])
}
}
addF("math/bits", "OnesCount64",
/******** math/big ********/
add("math/big", "mulWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1])
+ return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
},
sys.ArchAMD64)
add("math/big", "divWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1], args[2])
+ return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
},
sys.ArchAMD64)
}
// findIntrinsic returns a function which builds the SSA equivalent of the
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
-func findIntrinsic(sym *Sym) intrinsicBuilder {
+func findIntrinsic(sym *types.Sym) intrinsicBuilder {
if ssa.IntrinsicsDisable {
return nil
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *Node, k callKind) *ssa.Value {
- var sym *Sym // target symbol (if static)
+ var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
n2.Name.Curfn = s.curfn
n2.Class = PFUNC
n2.Pos = fn.Pos
- n2.Type = Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
+ n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
closure = s.expr(n2)
// Note: receiver is already assigned in n.List, so we don't
// want to set it here.
Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
}
i := s.expr(fn.Left)
- itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
+ itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
if k != callNormal {
s.nilCheck(itab)
}
itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
if k == callNormal {
- codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
+ codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem())
} else {
closure = itab
}
- rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
+ rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
}
dowidth(fn.Type)
stksize := fn.Type.ArgWidth() // includes receiver
argStart += int64(2 * Widthptr)
}
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], addr, rcvr, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem())
}
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
argStart := Ctxt.FixedFrameSize()
- argsize := s.constInt32(Types[TUINT32], int32(stksize))
+ argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINT32], addr, argsize, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINT32], addr, argsize, s.mem())
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], addr, closure, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
case k == callGo:
call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Newproc, s.mem())
case closure != nil:
- codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
+ codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem())
call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
case codeptr != nil:
call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
return nil
}
fp := res.Field(0)
- return s.constOffPtrSP(typPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
+ return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
-func etypesign(e EType) int8 {
+func etypesign(e types.EType) int8 {
switch e {
case TINT8, TINT16, TINT32, TINT64, TINT:
return -1
// If bounded is true then this address does not require a nil check for its operand
// even if that would otherwise be implied.
func (s *state) addr(n *Node, bounded bool) *ssa.Value {
- t := typPtr(n.Type)
+ t := types.NewPtr(n.Type)
switch n.Op {
case ONAME:
switch n.Class {
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
- len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
if !n.Bounded() {
s.boundsCheck(i, len)
}
a := s.addr(n.Left, bounded)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
- len := s.constInt(Types[TINT], n.Left.Type.NumElem())
+ len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
if !n.Bounded() {
s.boundsCheck(i, len)
}
- return s.newValue2(ssa.OpPtrIndex, typPtr(n.Left.Type.Elem()), a, i)
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
}
case OIND:
return s.exprPtr(n.Left, bounded, n.Pos)
}
// canSSA reports whether variables of type t are SSA-able.
-func canSSAType(t *Type) bool {
+func canSSAType(t *types.Type) bool {
dowidth(t)
if t.Width > int64(4*Widthptr) {
// 4*Widthptr is an arbitrary constant. We want it
}
// bounds check
- cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
+ cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
s.check(cmp, panicindex)
}
}
// bounds check
- cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
+ cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
s.check(cmp, panicslice)
}
}
if needcheck {
// do a size-appropriate check for zero
- cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
+ cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
-func (s *state) rtcall(fn *obj.LSym, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
+func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
off := Ctxt.FixedFrameSize()
for _, arg := range args {
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = Rnd(off, t.Alignment())
- ptr := s.constOffPtrSP(typPtr(t), off)
+ ptr := s.constOffPtrSP(types.NewPtr(t), off)
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
off += t.Size()
}
}
// do *left = right for type t.
-func (s *state) storeType(t *Type, left, right *ssa.Value, skip skipMask) {
- if skip == 0 && (!haspointers(t) || ssa.IsStackAddr(left)) {
+func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) {
+ if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
return
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
- if skip&skipPtr == 0 && haspointers(t) {
+ if skip&skipPtr == 0 && types.Haspointers(t) {
s.storeTypePtrs(t, left, right)
}
}
// do *left = right for all scalar (non-pointer) parts of t.
-func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
+func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
if skip&skipLen != 0 {
return
}
- len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
+ len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenAddr, len, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
- len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenAddr, len, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
- cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
+ cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.IntSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], capAddr, cap, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], left, itab, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
- s.storeTypeScalars(ft.(*Type), addr, val, 0)
+ s.storeTypeScalars(ft.(*types.Type), addr, val, 0)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
}
// do *left = right for all pointer parts of t.
-func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
+func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
- if !haspointers(ft.(*Type)) {
+ if !types.Haspointers(ft.(*types.Type)) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
- s.storeTypePtrs(ft.(*Type), addr, val)
+ s.storeTypePtrs(ft.(*types.Type), addr, val)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// t is a slice, ptr to array, or string type.
-func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
- var elemtype *Type
- var ptrtype *Type
+func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
+ var elemtype *types.Type
+ var ptrtype *types.Type
var ptr *ssa.Value
var len *ssa.Value
var cap *ssa.Value
- zero := s.constInt(Types[TINT], 0)
+ zero := s.constInt(types.Types[TINT], 0)
switch {
case t.IsSlice():
elemtype = t.Elem()
- ptrtype = typPtr(elemtype)
+ ptrtype = types.NewPtr(elemtype)
ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
- len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
- cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
+ len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
+ cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
case t.IsString():
- elemtype = Types[TUINT8]
- ptrtype = typPtr(elemtype)
+ elemtype = types.Types[TUINT8]
+ ptrtype = types.NewPtr(elemtype)
ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
- len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
+ len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
elemtype = t.Elem().Elem()
- ptrtype = typPtr(elemtype)
+ ptrtype = types.NewPtr(elemtype)
s.nilCheck(v)
ptr = v
- len = s.constInt(Types[TINT], t.Elem().NumElem())
+ len = s.constInt(types.Types[TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
// rptr = p + delta&mask(rcap)
// result = (SliceMake rptr rlen rcap)
// where mask(x) is 0 if x==0 and -1 if x>0.
- subOp := s.ssaOp(OSUB, Types[TINT])
- mulOp := s.ssaOp(OMUL, Types[TINT])
- andOp := s.ssaOp(OAND, Types[TINT])
- rlen := s.newValue2(subOp, Types[TINT], j, i)
+ subOp := s.ssaOp(OSUB, types.Types[TINT])
+ mulOp := s.ssaOp(OMUL, types.Types[TINT])
+ andOp := s.ssaOp(OAND, types.Types[TINT])
+ rlen := s.newValue2(subOp, types.Types[TINT], j, i)
var rcap *ssa.Value
switch {
case t.IsString():
case j == k:
rcap = rlen
default:
- rcap = s.newValue2(subOp, Types[TINT], k, i)
+ rcap = s.newValue2(subOp, types.Types[TINT], k, i)
}
var rptr *ssa.Value
rptr = ptr
} else {
// delta = # of bytes to offset pointer by.
- delta := s.newValue2(mulOp, Types[TINT], i, s.constInt(Types[TINT], elemtype.Width))
+ delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width))
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
- mask := s.newValue1(ssa.OpSlicemask, Types[TINT], rcap)
- delta = s.newValue2(andOp, Types[TINT], delta, mask)
+ mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
+ delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
// Compute rptr = ptr + delta
rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta)
}
one: (*state).constInt64,
}
-func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
-func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
- cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
+ cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
cvtF2F: ssa.OpCvt64Fto32F,
}
-func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
-func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
- cmp := s.newValue2(ssa.OpGeq32, Types[TBOOL], x, s.zeroVal(ft))
+ cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- a1 := s.newValue1(ssa.OpCvt32to64F, Types[TFLOAT64], x)
- twoToThe32 := s.constFloat64(Types[TFLOAT64], float64(1<<32))
- a2 := s.newValue2(ssa.OpAdd64F, Types[TFLOAT64], a1, twoToThe32)
+ a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
+ twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
+ a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
// return *(((*int)n)+1)
// }
lenType := n.Type
- nilValue := s.constNil(Types[TUINTPTR])
- cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
+ nilValue := s.constNil(types.Types[TUINTPTR])
+ cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
cutoff: 2147483648,
}
-func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
-func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
-func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
-func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
+func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
- cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, cutoff)
+ cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
- cond := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], itab, s.constNil(byteptr))
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
if n.Left.Type.IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
}
if n.Left.Type.IsEmptyInterface() {
if commaok {
- call := s.rtcall(assertE2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertE2I, true, []*Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
}
if commaok {
- call := s.rtcall(assertI2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertI2I, true, []*Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
}
if Debug_typeassert > 0 {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
}
- cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], itab, targetITab)
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
if direct {
return s.newValue1(ssa.OpIData, n.Type, iface), nil
}
- p := s.newValue1(ssa.OpIData, typPtr(n.Type), iface)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil
}
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
- valVar := &Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "val"}}
+ valVar := &Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "val"}}
// type assertion succeeded
s.startBlock(bOk)
if direct {
s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
} else {
- p := s.newValue1(ssa.OpIData, typPtr(n.Type), iface)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
}
} else {
- p := s.newValue1(ssa.OpIData, typPtr(n.Type), iface)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
store := s.newValue3I(ssa.OpMove, ssa.TypeMem, n.Type.Size(), addr, p, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem())
}
- resok = s.variable(&okVar, Types[TBOOL])
+ resok = s.variable(&okVar, types.Types[TBOOL])
delete(s.vars, &okVar)
return res, resok
}
// truncate 64-bit indexes on 32-bit pointer archs. Test the
// high word and branch to out-of-bounds failure if it is not 0.
if Debug['B'] == 0 {
- hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v)
- cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0))
+ hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v)
+ cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
s.check(cmp, panicfn)
}
- return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v)
+ return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v)
}
// Extend value to the required size
s.Fatalf("bad unsigned index extension %s", v.Type)
}
}
- return s.newValue1(op, Types[TINT], v)
+ return s.newValue1(op, types.Types[TINT], v)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
log bool
}
-// StringData returns a symbol (a *Sym wrapped in an interface) which
+// StringData returns a symbol (a *types.Sym wrapped in an interface) which
// is the data component of a global string constant containing s.
func (e *ssafn) StringData(s string) interface{} {
if aux, ok := e.strings[s]; ok {
e.strings = make(map[string]interface{})
}
data := stringsym(s)
- aux := &ssa.ExternSymbol{Typ: idealstring, Sym: data}
+ aux := &ssa.ExternSymbol{Typ: types.Idealstring, Sym: data}
e.strings[s] = aux
return aux
}
func (e *ssafn) Auto(pos src.XPos, t ssa.Type) ssa.GCNode {
- n := tempAt(pos, e.curfn, t.(*Type)) // Note: adds new auto to e.curfn.Func.Dcl list
+ n := tempAt(pos, e.curfn, t.(*types.Type)) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
- ptrType := typPtr(Types[TUINT8])
- lenType := Types[TINT]
+ ptrType := types.NewPtr(types.Types[TUINT8])
+ lenType := types.Types[TINT]
if n.Class == PAUTO && !n.Addrtaken() {
// Split this string up into two separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos)
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
- t := typPtr(Types[TUINT8])
+ t := types.NewPtr(types.Types[TUINT8])
if n.Class == PAUTO && !n.Addrtaken() {
// Split this interface up into two separate variables.
f := ".itab"
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
- ptrType := typPtr(name.Type.ElemType().(*Type))
- lenType := Types[TINT]
+ ptrType := types.NewPtr(name.Type.ElemType().(*types.Type))
+ lenType := types.Types[TINT]
if n.Class == PAUTO && !n.Addrtaken() {
// Split this slice up into three separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos)
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
- var t *Type
+ var t *types.Type
if s == 8 {
- t = Types[TFLOAT64]
+ t = types.Types[TFLOAT64]
} else {
- t = Types[TFLOAT32]
+ t = types.Types[TFLOAT32]
}
if n.Class == PAUTO && !n.Addrtaken() {
// Split this complex up into two separate variables.
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
- var t *Type
+ var t *types.Type
if name.Type.IsSigned() {
- t = Types[TINT32]
+ t = types.Types[TINT32]
} else {
- t = Types[TUINT32]
+ t = types.Types[TUINT32]
}
if n.Class == PAUTO && !n.Addrtaken() {
// Split this int64 up into two separate variables.
h := e.namedAuto(n.Sym.Name+".hi", t, n.Pos)
- l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32], n.Pos)
- return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
+ l := e.namedAuto(n.Sym.Name+".lo", types.Types[TUINT32], n.Pos)
+ return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: types.Types[TUINT32], Off: 0}
}
// Return the two parts of the larger variable.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off + 4}
+ return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
}
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
+ return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
// namedAuto returns a new AUTO variable with the given name and type.
// These are exposed to the debugger.
func (e *ssafn) namedAuto(name string, typ ssa.Type, pos src.XPos) ssa.GCNode {
- t := typ.(*Type)
- s := &Sym{Name: name, Pkg: localpkg}
+ t := typ.(*types.Type)
+ s := &types.Sym{Name: name, Pkg: localpkg}
n := new(Node)
n.Name = new(Name)
n.Pos = pos
n.Orig = n
- s.Def = n
- s.Def.SetUsed(true)
+ s.Def = asTypesNode(n)
+ asNode(s.Def).SetUsed(true)
n.Sym = s
n.Type = t
n.Class = PAUTO
}
func (e *ssafn) CanSSA(t ssa.Type) bool {
- return canSSAType(t.(*Type))
+ return canSSAType(t.(*types.Type))
}
func (e *ssafn) Line(pos src.XPos) string {
import (
"bytes"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"crypto/md5"
return lno
}
-func lookup(name string) *Sym {
+func lookup(name string) *types.Sym {
return localpkg.Lookup(name)
}
// lookupN looks up the symbol starting with prefix and ending with
// the decimal n. If prefix is too long, lookupN panics.
-func lookupN(prefix string, n int) *Sym {
+func lookupN(prefix string, n int) *types.Sym {
var buf [20]byte // plenty long enough for all current users
copy(buf[:], prefix)
b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
return newname(lookupN(prefix, int(n)))
}
-var initSyms []*Sym
-
-var nopkg = &Pkg{
- Syms: make(map[string]*Sym),
-}
-
-func (pkg *Pkg) Lookup(name string) *Sym {
- s, _ := pkg.LookupOK(name)
- return s
-}
-
-// LookupOK looks up name in pkg and reports whether it previously existed.
-func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) {
- if pkg == nil {
- pkg = nopkg
- }
- if s := pkg.Syms[name]; s != nil {
- return s, true
- }
-
- s = &Sym{
- Name: name,
- Pkg: pkg,
- }
- if name == "init" {
- initSyms = append(initSyms, s)
- }
- pkg.Syms[name] = s
- return s, false
-}
-
-func (pkg *Pkg) LookupBytes(name []byte) *Sym {
- if pkg == nil {
- pkg = nopkg
- }
- if s := pkg.Syms[string(name)]; s != nil {
- return s
- }
- str := internString(name)
- return pkg.Lookup(str)
-}
-
-func restrictlookup(name string, pkg *Pkg) *Sym {
+func restrictlookup(name string, pkg *types.Pkg) *types.Sym {
if !exportname(name) && pkg != localpkg {
yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
}
// find all the exported symbols in package opkg
// and make them available in the current package
-func importdot(opkg *Pkg, pack *Node) {
- var s1 *Sym
+func importdot(opkg *types.Pkg, pack *Node) {
+ var s1 *types.Sym
var pkgerror string
n := 0
s1.Def = s.Def
s1.Block = s.Block
- if s1.Def.Name == nil {
- Dump("s1def", s1.Def)
+ if asNode(s1.Def).Name == nil {
+ Dump("s1def", asNode(s1.Def))
Fatalf("missing Name")
}
- s1.Def.Name.Pack = pack
+ asNode(s1.Def).Name.Pack = pack
s1.Origpkg = opkg
n++
}
}
// newname returns a new ONAME Node associated with symbol s.
-func newname(s *Sym) *Node {
+func newname(s *types.Sym) *Node {
n := newnamel(lineno, s)
n.Name.Curfn = Curfn
return n
// newname returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting n.Name.Curfn.
-func newnamel(pos src.XPos, s *Sym) *Node {
+func newnamel(pos src.XPos, s *types.Sym) *Node {
if s == nil {
Fatalf("newnamel nil")
}
// nodSym makes a Node with Op op and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op Op, left *Node, sym *Sym) *Node {
+func nodSym(op Op, left *Node, sym *types.Sym) *Node {
n := nod(op, left, nil)
n.Sym = sym
return n
}
// methcmp sorts by symbol, then by package path for unexported symbols.
-type methcmp []*Field
+type methcmp []*types.Field
func (x methcmp) Len() int { return len(x) }
func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
c.SetAddable(true)
c.SetVal(Val{new(Mpint)})
c.Val().U.(*Mpint).SetInt64(v)
- c.Type = Types[TIDEAL]
+ c.Type = types.Types[TIDEAL]
return c
}
c.SetAddable(true)
c.SetVal(Val{newMpflt()})
c.Val().U.(*Mpflt).Set(v)
- c.Type = Types[TIDEAL]
+ c.Type = types.Types[TIDEAL]
return c
}
-func nodconst(n *Node, t *Type, v int64) {
+func nodconst(n *Node, t *types.Type, v int64) {
*n = Node{}
n.Op = OLITERAL
n.SetAddable(true)
func nodnil() *Node {
c := nodintconst(0)
c.SetVal(Val{new(NilVal)})
- c.Type = Types[TNIL]
+ c.Type = types.Types[TNIL]
return c
}
func nodbool(b bool) *Node {
c := nodintconst(0)
c.SetVal(Val{b})
- c.Type = idealbool
+ c.Type = types.Idealbool
return c
}
return Isconst(n.Orig, CTNIL)
}
-func isptrto(t *Type, et EType) bool {
+func isptrto(t *types.Type, et types.EType) bool {
if t == nil {
return false
}
return isblanksym(n.Sym)
}
-func isblanksym(s *Sym) bool {
+func isblanksym(s *types.Sym) bool {
return s != nil && s.Name == "_"
}
// methtype returns the underlying type, if any,
// that owns methods with receiver parameter t.
// The result is either a named type or an anonymous struct.
-func methtype(t *Type) *Type {
+func methtype(t *types.Type) *types.Type {
if t == nil {
return nil
}
return nil
}
-func cplxsubtype(et EType) EType {
+func cplxsubtype(et types.EType) types.EType {
switch et {
case TCOMPLEX64:
return TFLOAT32
// named, it is only identical to the other if they are the same
// pointer (t1 == t2), so there's no chance of chasing cycles
// ad infinitum, so no need for a depth counter.
-func eqtype(t1, t2 *Type) bool {
+func eqtype(t1, t2 *types.Type) bool {
return eqtype1(t1, t2, true, nil)
}
// eqtypeIgnoreTags is like eqtype but it ignores struct tags for struct identity.
-func eqtypeIgnoreTags(t1, t2 *Type) bool {
+func eqtypeIgnoreTags(t1, t2 *types.Type) bool {
return eqtype1(t1, t2, false, nil)
}
type typePair struct {
- t1 *Type
- t2 *Type
+ t1 *types.Type
+ t2 *types.Type
}
-func eqtype1(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
+func eqtype1(t1, t2 *types.Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
if t1 == t2 {
return true
}
// separate for error messages. Treat them as equal.
switch t1.Etype {
case TUINT8:
- return (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype)
+ return (t1 == types.Types[TUINT8] || t1 == types.Bytetype) && (t2 == types.Types[TUINT8] || t2 == types.Bytetype)
case TINT32:
- return (t1 == Types[TINT32] || t1 == runetype) && (t2 == Types[TINT32] || t2 == runetype)
+ return (t1 == types.Types[TINT32] || t1 == types.Runetype) && (t2 == types.Types[TINT32] || t2 == types.Runetype)
default:
return false
}
// Check parameters and result parameters for type equality.
// We intentionally ignore receiver parameters for type
// equality, because they're never relevant.
- for _, f := range paramsResults {
+ for _, f := range types.ParamsResults {
// Loop over fields in structs, ignoring argument names.
fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
if len(fs1) != len(fs2) {
// Are t1 and t2 equal struct types when field names are ignored?
// For deciding whether the result struct from g can be copied
// directly when compiling f(g()).
-func eqtypenoname(t1 *Type, t2 *Type) bool {
+func eqtypenoname(t1 *types.Type, t2 *types.Type) bool {
if t1 == nil || t2 == nil || !t1.IsStruct() || !t2.IsStruct() {
return false
}
// Is type src assignment compatible to type dst?
// If so, return op code to use in conversion.
// If not, return 0.
-func assignop(src *Type, dst *Type, why *string) Op {
+func assignop(src *types.Type, dst *types.Type, why *string) Op {
if why != nil {
*why = ""
}
// 3. dst is an interface type and src implements dst.
if dst.IsInterface() && src.Etype != TNIL {
- var missing, have *Field
+ var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
return OCONVIFACE
}
if src.IsInterface() && dst.Etype != TBLANK {
- var missing, have *Field
+ var missing, have *types.Field
var ptr int
if why != nil && implements(dst, src, &missing, &have, &ptr) {
*why = ": need type assertion"
// 4. src is a bidirectional channel value, dst is a channel type,
// src and dst have identical element types, and
// either src or dst is not a named type.
- if src.IsChan() && src.ChanDir() == Cboth && dst.IsChan() {
+ if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
if eqtype(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
return OCONVNOP
}
// Can we convert a value of type src to a value of type dst?
// If so, return op code to use in conversion (maybe OCONVNOP).
// If not, return 0.
-func convertop(src *Type, dst *Type, why *string) Op {
+func convertop(src *types.Type, dst *types.Type, why *string) Op {
if why != nil {
*why = ""
}
}
if src.IsSlice() && dst.IsString() {
- if src.Elem().Etype == bytetype.Etype {
+ if src.Elem().Etype == types.Bytetype.Etype {
return OARRAYBYTESTR
}
- if src.Elem().Etype == runetype.Etype {
+ if src.Elem().Etype == types.Runetype.Etype {
return OARRAYRUNESTR
}
}
// 7. src is a string and dst is []byte or []rune.
// String to slice.
if src.IsString() && dst.IsSlice() {
- if dst.Elem().Etype == bytetype.Etype {
+ if dst.Elem().Etype == types.Bytetype.Etype {
return OSTRARRAYBYTE
}
- if dst.Elem().Etype == runetype.Etype {
+ if dst.Elem().Etype == types.Runetype.Etype {
return OSTRARRAYRUNE
}
}
return 0
}
-func assignconv(n *Node, t *Type, context string) *Node {
+func assignconv(n *Node, t *types.Type, context string) *Node {
return assignconvfn(n, t, func() string { return context })
}
// Convert node n for assignment to type t.
-func assignconvfn(n *Node, t *Type, context func() string) *Node {
+func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
if n == nil || n.Type == nil || n.Type.Broke() {
return n
}
// Convert ideal bool from comparison to plain bool
// if the next step is non-bool (like interface{}).
- if n.Type == idealbool && !t.IsBoolean() {
+ if n.Type == types.Idealbool && !t.IsBoolean() {
if n.Op == ONAME || n.Op == OLITERAL {
r := nod(OCONVNOP, n, nil)
- r.Type = Types[TBOOL]
+ r.Type = types.Types[TBOOL]
r.Typecheck = 1
r.SetImplicit(true)
n = r
if s == nil || s.Def == nil {
Fatalf("syslook: can't find runtime.%s", name)
}
- return s.Def
+ return asNode(s.Def)
}
// typehash computes a hash value for type t to use in type switch statements.
-func typehash(t *Type) uint32 {
+func typehash(t *types.Type) uint32 {
p := t.LongString()
// Using MD5 is overkill, but reduces accidental collisions.
n.SetHasCall(b)
}
-func badtype(op Op, tl *Type, tr *Type) {
+func badtype(op Op, tl *types.Type, tr *types.Type) {
fmt_ := ""
if tl != nil {
fmt_ += fmt.Sprintf("\n\t%v", tl)
return cheapexpr(n, init)
}
-func copyexpr(n *Node, t *Type, init *Nodes) *Node {
+func copyexpr(n *Node, t *types.Type, init *Nodes) *Node {
l := temp(t)
a := nod(OAS, l, n)
a = typecheck(a, Etop)
// A Dlist stores a pointer to a TFIELD Type embedded within
// a TSTRUCT or TINTER Type.
type Dlist struct {
- field *Field
+ field *types.Field
}
// dotlist is used by adddot1 to record the path of embedded fields
// lookdot0 returns the number of fields or methods named s associated
// with Type t. If exactly one exists, it will be returned in *save
// (if save is not nil).
-func lookdot0(s *Sym, t *Type, save **Field, ignorecase bool) int {
+func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
u := t
if u.IsPtr() {
u = u.Elem()
// in reverse order. If none exist, more will indicate whether t contains any
// embedded fields at depth d, so callers can decide whether to retry at
// a greater depth.
-func adddot1(s *Sym, t *Type, d int, save **Field, ignorecase bool) (c int, more bool) {
+func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
if t.Recur() {
return
}
t.SetRecur(true)
- var u *Type
+ var u *types.Type
d--
if d < 0 {
// We've reached our target depth. If t has any fields/methods
// a selection expression x.f, where x is of type t and f is the symbol s.
// If no such path exists, dotpath returns nil.
// If there are multiple shortest paths to the same depth, ambig is true.
-func dotpath(s *Sym, t *Type, save **Field, ignorecase bool) (path []Dlist, ambig bool) {
+func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []Dlist, ambig bool) {
// The embedding of types within structs imposes a tree structure onto
// types: structs parent the types they embed, and types parent their
// fields or methods. Our goal here is to find the shortest path to
// with unique tasks and they return
// the actual methods.
type Symlink struct {
- field *Field
+ field *types.Field
followptr bool
}
var slist []Symlink
-func expand0(t *Type, followptr bool) {
+func expand0(t *types.Type, followptr bool) {
u := t
if u.IsPtr() {
followptr = true
}
}
-func expand1(t *Type, top, followptr bool) {
+func expand1(t *types.Type, top, followptr bool) {
if t.Recur() {
return
}
t.SetRecur(false)
}
-func expandmeth(t *Type) {
+func expandmeth(t *types.Type) {
if t == nil || t.AllMethods().Len() != 0 {
return
}
expand1(t, true, false)
// check each method to be uniquely reachable
- var ms []*Field
+ var ms []*types.Field
for i, sl := range slist {
slist[i].field = nil
sl.field.Sym.SetUniq(false)
- var f *Field
+ var f *types.Field
if path, _ := dotpath(sl.field.Sym, t, &f, false); path == nil {
continue
}
}
// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl *Type, mustname bool) []*Node {
+func structargs(tl *types.Type, mustname bool) []*Node {
var args []*Node
gen := 0
for _, t := range tl.Fields().Slice() {
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
-func genwrapper(rcvr *Type, method *Field, newnam *Sym, iface int) {
+func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym, iface int) {
if false && Debug['r'] != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
// that the interface call will pass in.
// Add a dummy padding argument after the
// receiver to make up the difference.
- tpad := typArray(Types[TUINT8], int64(Widthptr)-rcvr.Width)
+ tpad := types.NewArray(types.Types[TUINT8], int64(Widthptr)-rcvr.Width)
pad := namedfield(".pad", tpad)
l = append(l, pad)
}
funccompile(fn)
}
-func hashmem(t *Type) *Node {
+func hashmem(t *types.Type) *Node {
sym := Runtimepkg.Lookup("memhash")
n := newname(sym)
n.Class = PFUNC
tfn := nod(OTFUNC, nil, nil)
- tfn.List.Append(anonfield(typPtr(t)))
- tfn.List.Append(anonfield(Types[TUINTPTR]))
- tfn.List.Append(anonfield(Types[TUINTPTR]))
- tfn.Rlist.Append(anonfield(Types[TUINTPTR]))
+ tfn.List.Append(anonfield(types.NewPtr(t)))
+ tfn.List.Append(anonfield(types.Types[TUINTPTR]))
+ tfn.List.Append(anonfield(types.Types[TUINTPTR]))
+ tfn.Rlist.Append(anonfield(types.Types[TUINTPTR]))
tfn = typecheck(tfn, Etype)
n.Type = tfn.Type
return n
}
-func ifacelookdot(s *Sym, t *Type, followptr *bool, ignorecase bool) *Field {
+func ifacelookdot(s *types.Sym, t *types.Type, followptr *bool, ignorecase bool) *types.Field {
*followptr = false
if t == nil {
return nil
}
- var m *Field
+ var m *types.Field
path, ambig := dotpath(s, t, &m, ignorecase)
if path == nil {
if ambig {
return m
}
-func implements(t, iface *Type, m, samename **Field, ptr *int) bool {
+func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
t0 := t
if t == nil {
return false
return n
}
-func ngotype(n *Node) *Sym {
+func ngotype(n *Node) *types.Sym {
if n.Type != nil {
return typenamesym(n.Type)
}
return s
}
-var pkgMap = make(map[string]*Pkg)
-var pkgs []*Pkg
+var pkgMap = make(map[string]*types.Pkg)
+var pkgs []*types.Pkg
-func mkpkg(path string) *Pkg {
+func mkpkg(path string) *types.Pkg {
if p := pkgMap[path]; p != nil {
return p
}
- p := new(Pkg)
+ p := new(types.Pkg)
p.Path = path
p.Prefix = pathtoprefix(path)
- p.Syms = make(map[string]*Sym)
+ p.Syms = make(map[string]*types.Sym)
pkgMap[path] = p
pkgs = append(pkgs, p)
return p
// Can this type be stored directly in an interface word?
// Yes, if the representation is a single pointer.
-func isdirectiface(t *Type) bool {
+func isdirectiface(t *types.Type) bool {
switch t.Etype {
case TPTR32,
TPTR64,
// itabType loads the _type field from a runtime.itab struct.
func itabType(itab *Node) *Node {
typ := nodSym(ODOTPTR, itab, nil)
- typ.Type = typPtr(Types[TUINT8])
+ typ.Type = types.NewPtr(types.Types[TUINT8])
typ.Typecheck = 1
typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab
typ.SetBounded(true) // guaranteed not to fault
// ifaceData loads the data field from an interface.
// The concrete type must be known to have type t.
// It follows the pointer if !isdirectiface(t).
-func ifaceData(n *Node, t *Type) *Node {
+func ifaceData(n *Node, t *types.Type) *Node {
ptr := nodSym(OIDATA, n, nil)
if isdirectiface(t) {
ptr.Type = t
ptr.Typecheck = 1
return ptr
}
- ptr.Type = typPtr(t)
+ ptr.Type = types.NewPtr(t)
ptr.SetBounded(true)
ptr.Typecheck = 1
ind := nod(OIND, ptr, nil)
ind.Typecheck = 1
return ind
}
-
-// iet returns 'T' if t is a concrete type,
-// 'I' if t is an interface type, and 'E' if t is an empty interface type.
-// It is used to build calls to the conv* and assert* runtime routines.
-func (t *Type) iet() byte {
- if t.IsEmptyInterface() {
- return 'E'
- }
- if t.IsInterface() {
- return 'I'
- }
- return 'T'
-}
package gc
import (
+ "cmd/compile/internal/types"
"sort"
)
var nilonly string
var top int
- var t *Type
+ var t *types.Type
if n.Left != nil && n.Left.Op == OTYPESW {
// type switch
n.Left = defaultlit(n.Left, nil)
t = n.Left.Type
} else {
- t = Types[TBOOL]
+ t = types.Types[TBOOL]
}
if t != nil {
switch {
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
case t.IsSlice():
nilonly = "slice"
- case t.IsArray() && !t.IsComparable():
+ case t.IsArray() && !IsComparable(t):
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
case t.IsStruct():
- if f := t.IncomparableField(); f != nil {
+ if f := IncomparableField(t); f != nil {
yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, f.Type)
}
case t.Etype == TFUNC:
}
case nilonly != "" && !isnil(n1):
yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
- case t.IsInterface() && !n1.Type.IsInterface() && !n1.Type.IsComparable():
+ case t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type):
yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
}
// type switch
case Etype:
- var missing, have *Field
+ var missing, have *types.Field
var ptr int
switch {
case n1.Op == OLITERAL && n1.Type.IsKind(TNIL):
a = typecheck(a, Etop)
cas = append(cas, a)
- s.okname = temp(Types[TBOOL])
+ s.okname = temp(types.Types[TBOOL])
s.okname = typecheck(s.okname, Erv)
- s.hashname = temp(Types[TUINT32])
+ s.hashname = temp(types.Types[TUINT32])
s.hashname = typecheck(s.hashname, Erv)
// set up labels and jumps
// Load hash from type or itab.
h := nodSym(ODOTPTR, itab, nil)
- h.Type = Types[TUINT32]
+ h.Type = types.Types[TUINT32]
h.Typecheck = 1
if cond.Right.Type.IsEmptyInterface() {
h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
import (
"cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
"cmd/internal/src"
)
Rlist Nodes
// most nodes
- Type *Type
+ Type *types.Type
Orig *Node // original form, for printing, and tracking copies of ONAMEs
// func
// ONAME, OTYPE, OPACK, OLABEL, some OLITERAL
Name *Name
- Sym *Sym // various
+ Sym *types.Sym // various
E interface{} // Opt or Val, see methods below
// Various. Usually an offset into a struct. For example:
Esc uint16 // EscXXX
Op Op
- Etype EType // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg, ChanDir for OTCHAN, for OINDEXMAP 1=LHS,0=RHS
- Class Class // PPARAM, PAUTO, PEXTERN, etc
- Embedded uint8 // ODCLFIELD embedded type
- Walkdef uint8 // tracks state during typecheckdef; 2 == loop detected
- Typecheck uint8 // tracks state during typechecking; 2 == loop detected
+ Etype types.EType // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg, ChanDir for OTCHAN, for OINDEXMAP 1=LHS,0=RHS
+ Class Class // PPARAM, PAUTO, PEXTERN, etc
+ Embedded uint8 // ODCLFIELD embedded type
+ Walkdef uint8 // tracks state during typecheckdef; 2 == loop detected
+ Typecheck uint8 // tracks state during typechecking; 2 == loop detected
Initorder uint8
Likely int8 // likeliness of if statement
hasVal int8 // +1 for Val, -1 for Opt, 0 for not yet set
// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL).
type Name struct {
- Pack *Node // real package for import . names
- Pkg *Pkg // pkg for OPACK nodes
- Defn *Node // initializing assignment
- Curfn *Node // function for local variables
- Param *Param // additional fields for ONAME, OTYPE
- Decldepth int32 // declaration loop depth, increased for every loop or label
- Vargen int32 // unique name for ONAME within a function. Function outputs are numbered starting at one.
+ Pack *Node // real package for import . names
+ Pkg *types.Pkg // pkg for OPACK nodes
+ Defn *Node // initializing assignment
+ Curfn *Node // function for local variables
+ Param *Param // additional fields for ONAME, OTYPE
+ Decldepth int32 // declaration loop depth, increased for every loop or label
+ Vargen int32 // unique name for ONAME within a function. Function outputs are numbered starting at one.
Funcdepth int32
flags bitset8
Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
// ONAME PPARAM
- Field *Field // TFIELD in arg struct
+ Field *types.Field // TFIELD in arg struct
// ONAME closure linkage
// Consider:
// Func holds Node fields used only with function-like nodes.
type Func struct {
- Shortname *Sym
+ Shortname *types.Sym
Enter Nodes // for example, allocate and initialize memory for escaping parameters
Exit Nodes
Cvars Nodes // closure params
Inldcl Nodes // copy of dcl for use in inlining
Closgen int
Outerfunc *Node // outer function (for closure)
- FieldTrack map[*Sym]struct{}
+ FieldTrack map[*types.Sym]struct{}
Ntype *Node // signature
Top int // top context (Ecall, Eproc, etc)
Closure *Node // OCLOSURE <-> ODCLFUNC
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
// resolve ONONAME to definition, if any.
func resolve(n *Node) *Node {
if n != nil && n.Op == ONONAME && n.Sym != nil {
- r := n.Sym.Def
+ r := asNode(n.Sym.Def)
if r != nil {
if r.Op != OIOTA {
n = r
TIDEAL: "untyped number",
}
-func typekind(t *Type) string {
+func typekind(t *types.Type) string {
if t.IsSlice() {
return "slice"
}
}
switch consttype(n) {
case CTINT, CTRUNE, CTFLT, CTCPLX:
- n = defaultlit(n, Types[TINT])
+ n = defaultlit(n, types.Types[TINT])
}
n = defaultlit(n, nil)
ok |= Erv
if n.Type == nil && n.Val().Ctype() == CTSTR {
- n.Type = idealstring
+ n.Type = types.Idealstring
}
break OpSwitch
return n
}
- var t *Type
+ var t *types.Type
if n.Left == nil {
- t = typSlice(r.Type)
+ t = types.NewSlice(r.Type)
} else if n.Left.Op == ODDD {
if top&Ecomplit == 0 {
if !n.Diag() {
n.Type = nil
return n
}
- t = typDDDArray(r.Type)
+ t = types.NewDDDArray(r.Type)
} else {
n.Left = indexlit(typecheck(n.Left, Erv))
l := n.Left
}
v := l.Val()
- if doesoverflow(v, Types[TINT]) {
+ if doesoverflow(v, types.Types[TINT]) {
yyerror("array bound is too large")
n.Type = nil
return n
n.Type = nil
return n
}
- t = typArray(r.Type, bound)
+ t = types.NewArray(r.Type, bound)
}
n.Op = OTYPE
n.Type = t
n.Left = nil
n.Right = nil
- if !t.isDDDArray() {
+ if !t.IsDDDArray() {
checkwidth(t)
}
yyerror("go:notinheap map value not allowed")
}
n.Op = OTYPE
- n.Type = typMap(l.Type, r.Type)
+ n.Type = types.NewMap(l.Type, r.Type)
// map key validation
alg, bad := algtype1(l.Type)
if l.Type.NotInHeap() {
yyerror("chan of go:notinheap type not allowed")
}
- t := typChan(l.Type, ChanDir(n.Etype)) // TODO(marvin): Fix Node.EType type union.
+ t := types.NewChan(l.Type, types.ChanDir(n.Etype)) // TODO(marvin): Fix Node.EType type union.
n.Op = OTYPE
n.Type = t
n.Left = nil
if l.Op == OTYPE {
ok |= Etype
n.Op = OTYPE
- n.Type = typPtr(l.Type)
+ n.Type = types.NewPtr(l.Type)
n.Left = nil
break OpSwitch
}
op = n.Op
}
if op == OLSH || op == ORSH {
- r = defaultlit(r, Types[TUINT])
+ r = defaultlit(r, types.Types[TUINT])
n.Right = r
t := r.Type
if !t.IsInteger() || t.IsSigned() {
if r.Type.Etype != TBLANK {
aop = assignop(l.Type, r.Type, nil)
if aop != 0 {
- if r.Type.IsInterface() && !l.Type.IsInterface() && !l.Type.IsComparable() {
+ if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
n.Type = nil
return n
if l.Type.Etype != TBLANK {
aop = assignop(r.Type, l.Type, nil)
if aop != 0 {
- if l.Type.IsInterface() && !r.Type.IsInterface() && !r.Type.IsComparable() {
+ if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
n.Type = nil
return n
// okfor allows any array == array, map == map, func == func.
// restrict to slice/map/func == nil and nil == slice/map/func.
- if l.Type.IsArray() && !l.Type.IsComparable() {
+ if l.Type.IsArray() && !IsComparable(l.Type) {
yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type)
n.Type = nil
return n
}
if l.Type.IsStruct() {
- if f := l.Type.IncomparableField(); f != nil {
+ if f := IncomparableField(l.Type); f != nil {
yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
n.Type = nil
return n
t = l.Type
if iscmp[n.Op] {
evconst(n)
- t = idealbool
+ t = types.Idealbool
if n.Op != OLITERAL {
l, r = defaultlit2(l, r, true)
n.Left = l
if et == TSTRING {
if iscmp[n.Op] {
// TODO(marvin): Fix Node.EType type union.
- n.Etype = EType(n.Op)
+ n.Etype = types.EType(n.Op)
n.Op = OCMPSTR
} else if n.Op == OADD {
// create OADDSTR node with list of strings in x + y + z + (w + v) + ...
} else // leave alone for back end
if r.Type.IsInterface() == l.Type.IsInterface() {
// TODO(marvin): Fix Node.EType type union.
- n.Etype = EType(n.Op)
+ n.Etype = types.EType(n.Op)
n.Op = OCMPIFACE
}
}
n.Type = nil
return n
}
- n.Type = typPtr(t)
+ n.Type = types.NewPtr(t)
break OpSwitch
case OCOMPLIT:
}
if n.Type != nil && !n.Type.IsInterface() {
- var missing, have *Field
+ var missing, have *types.Field
var ptr int
if !implements(n.Type, t, &missing, &have, &ptr) {
if have != nil && have.Sym == missing.Sym {
case TSTRING, TARRAY, TSLICE:
n.Right = indexlit(n.Right)
if t.IsString() {
- n.Type = bytetype
+ n.Type = types.Bytetype
} else {
n.Type = t.Elem()
}
l = n.Left
}
t := l.Type
- var tp *Type
+ var tp *types.Type
if t.IsString() {
if hasmax {
yyerror("invalid operation %v (3-index slice of string)", n)
n.Op = OSLICESTR
} else if t.IsPtr() && t.Elem().IsArray() {
tp = t.Elem()
- n.Type = typSlice(tp.Elem())
+ n.Type = types.NewSlice(tp.Elem())
dowidth(n.Type)
if hasmax {
n.Op = OSLICE3ARR
n.Left = defaultlit(n.Left, nil)
l = n.Left
if l.Op == OTYPE {
- if n.Isddd() || l.Type.isDDDArray() {
+ if n.Isddd() || l.Type.IsDDDArray() {
if !l.Type.Broke() {
yyerror("invalid use of ... in type conversion to %v", l.Type)
}
if t.Results().NumFields() == 1 {
n.Type = l.Type.Results().Field(0).Type
- if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Sym.Pkg.isRuntime() && n.Left.Sym.Name == "getg" {
+ if n.Op == OCALLFUNC && n.Left.Op == ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
// so that the ordering pass can make sure to preserve the semantics of the original code
// any side effects disappear; ignore init
var r Node
- nodconst(&r, Types[TUINTPTR], evalunsafe(n))
+ nodconst(&r, types.Types[TUINTPTR], evalunsafe(n))
r.Orig = n
n = &r
n.Orig = r
}
- n.Type = Types[cplxsubtype(t.Etype)]
+ n.Type = types.Types[cplxsubtype(t.Etype)]
break OpSwitch
}
case TSTRING:
if Isconst(l, CTSTR) {
var r Node
- nodconst(&r, Types[TINT], int64(len(l.Val().U.(string))))
+ nodconst(&r, types.Types[TINT], int64(len(l.Val().U.(string))))
r.Orig = n
n = &r
}
break
}
var r Node
- nodconst(&r, Types[TINT], t.NumElem())
+ nodconst(&r, types.Types[TINT], t.NumElem())
r.Orig = n
n = &r
}
- n.Type = Types[TINT]
+ n.Type = types.Types[TINT]
break OpSwitch
badcall1:
}
t = n.List.First().Type
- l = t.Field(0).Nname
- r = t.Field(1).Nname
+ l = asNode(t.Field(0).Nname)
+ r = asNode(t.Field(1).Nname)
} else {
if !twoarg(n) {
n.Type = nil
return n
}
- var t *Type
+ var t *types.Type
switch l.Type.Etype {
default:
yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
return n
case TIDEAL:
- t = Types[TIDEAL]
+ t = types.Types[TIDEAL]
case TFLOAT32:
- t = Types[TCOMPLEX64]
+ t = types.Types[TCOMPLEX64]
case TFLOAT64:
- t = Types[TCOMPLEX128]
+ t = types.Types[TCOMPLEX128]
}
if l.Op == OLITERAL && r.Op == OLITERAL {
}
// Unpack multiple-return result before type-checking.
- var funarg *Type
+ var funarg *types.Type
if t.IsFuncArgStruct() {
funarg = t
t = t.Field(0).Type
}
if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() {
- args.SetSecond(defaultlit(args.Second(), Types[TSTRING]))
+ args.SetSecond(defaultlit(args.Second(), types.Types[TSTRING]))
break OpSwitch
}
n.Left = args.First()
n.Right = args.Second()
n.List.Set(nil)
- n.Type = Types[TINT]
+ n.Type = types.Types[TINT]
n.Left = typecheck(n.Left, Erv)
n.Right = typecheck(n.Right, Erv)
if n.Left.Type == nil || n.Right.Type == nil {
// copy([]byte, string)
if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
- if eqtype(n.Left.Type.Elem(), bytetype) {
+ if eqtype(n.Left.Type.Elem(), types.Bytetype) {
break OpSwitch
}
yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
l = args[i]
i++
l = typecheck(l, Erv)
- l = defaultlit(l, Types[TINT])
+ l = defaultlit(l, types.Types[TINT])
if l.Type == nil {
n.Type = nil
return n
l = args[i]
i++
l = typecheck(l, Erv)
- l = defaultlit(l, Types[TINT])
+ l = defaultlit(l, types.Types[TINT])
if l.Type == nil {
n.Type = nil
return n
}
n.Left = l
- n.Type = typPtr(t)
+ n.Type = types.NewPtr(t)
break OpSwitch
case OPRINT, OPRINTN:
for i1, n1 := range ls {
// Special case for print: int constant is int64, not int.
if Isconst(n1, CTINT) {
- ls[i1] = defaultlit(ls[i1], Types[TINT64])
+ ls[i1] = defaultlit(ls[i1], types.Types[TINT64])
} else {
ls[i1] = defaultlit(ls[i1], nil)
}
return n
}
n.Left = typecheck(n.Left, Erv)
- n.Left = defaultlit(n.Left, Types[TINTER])
+ n.Left = defaultlit(n.Left, types.Types[TINTER])
if n.Left.Type == nil {
n.Type = nil
return n
return n
}
- n.Type = Types[TINTER]
+ n.Type = types.Types[TINTER]
break OpSwitch
case OCLOSURE:
if !t.IsInterface() {
Fatalf("OITAB of %v", t)
}
- n.Type = typPtr(Types[TUINTPTR])
+ n.Type = types.NewPtr(types.Types[TUINTPTR])
break OpSwitch
case OIDATA:
Fatalf("OSPTR of %v", t)
}
if t.IsString() {
- n.Type = typPtr(Types[TUINT8])
+ n.Type = types.NewPtr(types.Types[TUINT8])
} else {
- n.Type = typPtr(t.Elem())
+ n.Type = types.NewPtr(t.Elem())
}
break OpSwitch
case OCFUNC:
ok |= Erv
n.Left = typecheck(n.Left, Erv)
- n.Type = Types[TUINTPTR]
+ n.Type = types.Types[TUINTPTR]
break OpSwitch
case OCONVNOP:
return n
}
-func checksliceindex(l *Node, r *Node, tp *Type) bool {
+func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
t := r.Type
if t == nil {
return false
return true
}
-func lookdot1(errnode *Node, s *Sym, t *Type, fs *Fields, dostrcmp int) *Field {
- var r *Field
+func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+ var r *types.Field
for _, f := range fs.Slice() {
if dostrcmp != 0 && f.Sym.Name == s.Name {
return f
return r
}
-func looktypedot(n *Node, t *Type, dostrcmp int) bool {
+func looktypedot(n *Node, t *types.Type, dostrcmp int) bool {
s := n.Sym
if t.IsInterface() {
return true
}
-func derefall(t *Type) *Type {
- for t != nil && t.Etype == Tptr {
+func derefall(t *types.Type) *types.Type {
+ for t != nil && t.Etype == types.Tptr {
t = t.Elem()
}
return t
}
-type typeSym struct {
- t *Type
- s *Sym
+type typeSymKey struct {
+ t *types.Type
+ s *types.Sym
}
-// dotField maps (*Type, *Sym) pairs to the corresponding struct field (*Type with Etype==TFIELD).
+// dotField maps (*types.Type, *types.Sym) pairs to the corresponding struct field (*types.Type with Etype==TFIELD).
// It is a cache for use during usefield in walk.go, only enabled when field tracking.
-var dotField = map[typeSym]*Field{}
+var dotField = map[typeSymKey]*types.Field{}
-func lookdot(n *Node, t *Type, dostrcmp int) *Field {
+func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
s := n.Sym
dowidth(t)
- var f1 *Field
+ var f1 *types.Field
if t.IsStruct() || t.IsInterface() {
f1 = lookdot1(n, s, t, t.Fields(), dostrcmp)
}
- var f2 *Field
+ var f2 *types.Field
if n.Left.Type == t || n.Left.Type.Sym == nil {
mt := methtype(t)
if mt != nil {
n.Xoffset = f1.Offset
n.Type = f1.Type
if obj.Fieldtrack_enabled > 0 {
- dotField[typeSym{t.Orig, s}] = f1
+ dotField[typeSymKey{t.Orig, s}] = f1
}
if t.IsInterface() {
if n.Left.Type.IsPtr() {
dowidth(tt)
rcvr := f2.Type.Recv().Type
if !eqtype(rcvr, tt) {
- if rcvr.Etype == Tptr && eqtype(rcvr.Elem(), tt) {
+ if rcvr.Etype == types.Tptr && eqtype(rcvr.Elem(), tt) {
checklvalue(n.Left, "call pointer method on")
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
- } else if tt.Etype == Tptr && rcvr.Etype != Tptr && eqtype(tt.Elem(), rcvr) {
+ } else if tt.Etype == types.Tptr && rcvr.Etype != types.Tptr && eqtype(tt.Elem(), rcvr) {
n.Left = nod(OIND, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
- } else if tt.Etype == Tptr && tt.Elem().Etype == Tptr && eqtype(derefall(tt), derefall(rcvr)) {
+ } else if tt.Etype == types.Tptr && tt.Elem().Etype == types.Tptr && eqtype(derefall(tt), derefall(rcvr)) {
yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
- for tt.Etype == Tptr {
+ for tt.Etype == types.Tptr {
// Stop one level early for method with pointer receiver.
- if rcvr.Etype == Tptr && tt.Elem().Etype != Tptr {
+ if rcvr.Etype == types.Tptr && tt.Elem().Etype != types.Tptr {
break
}
n.Left = nod(OIND, n.Left, nil)
pll = ll
ll = ll.Left
}
- if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && ll.Type.Sym.Def != nil && ll.Type.Sym.Def.Op == OTYPE {
+ if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && asNode(ll.Type.Sym.Def) != nil && asNode(ll.Type.Sym.Def).Op == OTYPE {
// It is invalid to automatically dereference a named pointer type when selecting a method.
// Make n->left == ll to clarify error message.
n.Left = ll
return true
}
-func hasddd(t *Type) bool {
+func hasddd(t *types.Type) bool {
for _, tl := range t.Fields().Slice() {
if tl.Isddd() {
return true
}
// typecheck assignment: type list = expression list
-func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl Nodes, desc func() string) {
- var t *Type
+func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, desc func() string) {
+ var t *types.Type
var n *Node
var n1 int
var n2 int
goto out
}
-func errorDetails(nl Nodes, tstruct *Type, isddd bool) string {
+func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
// If we don't know any type at a call site, let's suppress any return
// message signatures. See Issue https://golang.org/issues/19012.
if tstruct == nil {
// sigrepr is a type's representation to the outside world,
// in string representations of return signatures
// e.g in error messages about wrong arguments to return.
-func (t *Type) sigrepr() string {
+func sigrepr(t *types.Type) string {
switch t {
default:
return t.String()
- case Types[TIDEAL]:
+ case types.Types[TIDEAL]:
// "untyped number" is not commonly used
// outside of the compiler, so let's use "number".
return "number"
- case idealstring:
+ case types.Idealstring:
return "string"
- case idealbool:
+ case types.Idealbool:
return "bool"
}
}
var typeStrings []string
if nl.Len() == 1 && nl.First().Type != nil && nl.First().Type.IsFuncArgStruct() {
for _, f := range nl.First().Type.Fields().Slice() {
- typeStrings = append(typeStrings, f.Type.sigrepr())
+ typeStrings = append(typeStrings, sigrepr(f.Type))
}
} else {
for _, n := range nl.Slice() {
- typeStrings = append(typeStrings, n.Type.sigrepr())
+ typeStrings = append(typeStrings, sigrepr(n.Type))
}
}
// iscomptype reports whether type t is a composite literal type
// or a pointer to one.
-func iscomptype(t *Type) bool {
+func iscomptype(t *types.Type) bool {
if t.IsPtr() {
t = t.Elem()
}
}
}
-func pushtype(n *Node, t *Type) {
+func pushtype(n *Node, t *types.Type) {
if n == nil || n.Op != OCOMPLIT || !iscomptype(t) {
return
}
}
var length, i int64
- checkBounds := t.IsArray() && !t.isDDDArray()
+ checkBounds := t.IsArray() && !t.IsDDDArray()
nl := n.List.Slice()
for i2, l := range nl {
setlineno(l)
}
}
- if t.isDDDArray() {
+ if t.IsDDDArray() {
t.SetNumElem(length)
}
if t.IsSlice() {
}
}
-func checkassignto(src *Type, dst *Node) {
+func checkassignto(src *types.Type, dst *Node) {
var why string
if assignop(src, dst.Type, &why) == 0 {
}
l := n.List.Second()
if l.Type != nil && !l.Type.IsBoolean() {
- checkassignto(Types[TBOOL], l)
+ checkassignto(types.Types[TBOOL], l)
}
if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
- l.Type = Types[TBOOL]
+ l.Type = types.Types[TBOOL]
}
goto out
}
return
}
n.Type = t
- t.SetNname(n.Func.Nname)
+ t.FuncType().Nname = asTypesNode(n.Func.Nname)
rcvr := t.Recv()
if rcvr != nil && n.Func.Shortname != nil {
n.Func.Nname.Sym = methodname(n.Func.Shortname, rcvr.Type)
var methodqueue []*Node
func domethod(n *Node) {
- nt := n.Type.Nname()
+ nt := asNode(n.Type.FuncType().Nname)
nt = typecheck(nt, Etype)
if nt.Type == nil {
// type check failed; leave empty func
// TODO(mdempsky): Fix Type rekinding.
n.Type.Etype = TFUNC
- n.Type.nod = nil
+ n.Type.Nod = nil
return
}
// TODO(mdempsky): Fix Type rekinding.
*n.Type = *nt.Type
- n.Type.nod = nil
+ n.Type.Nod = nil
checkwidth(n.Type)
}
// tracks the line numbers at which forward types are first used as map keys
var mapqueue []mapqueueval
-func copytype(n *Node, t *Type) {
+func copytype(n *Node, t *types.Type) {
if t.Etype == TFORW {
// This type isn't computed yet; when it is, update n.
- t.ForwardType().Copyto = append(t.ForwardType().Copyto, n)
+ t.ForwardType().Copyto = append(t.ForwardType().Copyto, asTypesNode(n))
return
}
embedlineno := n.Type.ForwardType().Embedlineno
l := n.Type.ForwardType().Copyto
- ptrTo := n.Type.ptrTo
- sliceOf := n.Type.sliceOf
+ ptrBase := n.Type.PtrBase
+ sliceOf := n.Type.SliceOf
// TODO(mdempsky): Fix Type rekinding.
*n.Type = *t
// to the existing type, but the method set of an interface
// type [...] remains unchanged."
if !t.IsInterface() {
- t.methods = Fields{}
- t.allMethods = Fields{}
+ *t.Methods() = types.Fields{}
+ *t.AllMethods() = types.Fields{}
}
- t.nod = n
+ t.Nod = asTypesNode(n)
t.SetDeferwidth(false)
- t.ptrTo = ptrTo
- t.sliceOf = sliceOf
+ t.PtrBase = ptrBase
+ t.SliceOf = sliceOf
// Propagate go:notinheap pragma from the Name to the Type.
if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma&NotInHeap != 0 {
// Update nodes waiting on this type.
for _, n := range l {
- copytype(n, t)
+ copytype(asNode(n), t)
}
// Double-check use of type as embedded type.
}
for _, e := range mapqueue {
lineno = e.lno
- if !e.n.Type.IsComparable() {
+ if !IsComparable(e.n.Type) {
yyerror("invalid map key type %v", e.n.Type)
}
}
n.SetDiag(true)
goto ret
}
- n.Sym.Def = p.Ntype
+ n.Sym.Def = asTypesNode(p.Ntype)
}
break
}
defercheckwidth()
}
n.Walkdef = 1
- n.Type = typ(TFORW)
- n.Type.nod = n
+ n.Type = types.New(TFORW)
+ n.Type.Nod = asTypesNode(n)
n.Type.Sym = n.Sym // TODO(gri) this also happens in typecheckdeftype(n) - where should it happen?
nerrors0 := nerrors
typecheckdeftype(n)
return n
}
-func checkmake(t *Type, arg string, n *Node) bool {
+func checkmake(t *types.Type, arg string, n *Node) bool {
if !n.Type.IsInteger() && n.Type.Etype != TIDEAL {
yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
return false
}
// defaultlit is necessary for non-constants too: n might be 1.1<<k.
- n = defaultlit(n, Types[TINT])
+ n = defaultlit(n, types.Types[TINT])
return true
}
implicit.SetHasBreak(true)
}
} else {
- lab := n.Left.Sym.Label
+ lab := asNode(n.Left.Sym.Label)
if lab != nil {
lab.SetHasBreak(true)
}
if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] {
switch n.Name.Defn.Op {
case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
- n.Left.Sym.Label = n.Name.Defn
+ n.Left.Sym.Label = asTypesNode(n.Name.Defn)
markbreak(n.Name.Defn, n.Name.Defn)
n.Left.Sym.Label = nil
i++
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// convenience constants
+const (
+ Txxx = types.Txxx
+
+ TINT8 = types.TINT8
+ TUINT8 = types.TUINT8
+ TINT16 = types.TINT16
+ TUINT16 = types.TUINT16
+ TINT32 = types.TINT32
+ TUINT32 = types.TUINT32
+ TINT64 = types.TINT64
+ TUINT64 = types.TUINT64
+ TINT = types.TINT
+ TUINT = types.TUINT
+ TUINTPTR = types.TUINTPTR
+
+ TCOMPLEX64 = types.TCOMPLEX64
+ TCOMPLEX128 = types.TCOMPLEX128
+
+ TFLOAT32 = types.TFLOAT32
+ TFLOAT64 = types.TFLOAT64
+
+ TBOOL = types.TBOOL
+
+ TPTR32 = types.TPTR32
+ TPTR64 = types.TPTR64
+
+ TFUNC = types.TFUNC
+ TSLICE = types.TSLICE
+ TARRAY = types.TARRAY
+ TSTRUCT = types.TSTRUCT
+ TCHAN = types.TCHAN
+ TMAP = types.TMAP
+ TINTER = types.TINTER
+ TFORW = types.TFORW
+ TANY = types.TANY
+ TSTRING = types.TSTRING
+ TUNSAFEPTR = types.TUNSAFEPTR
+
+ // pseudo-types for literals
+ TIDEAL = types.TIDEAL
+ TNIL = types.TNIL
+ TBLANK = types.TBLANK
+
+ // pseudo-types for frame layout
+ TFUNCARGS = types.TFUNCARGS
+ TCHANARGS = types.TCHANARGS
+
+ // pseudo-types for import/export
+ TDDDFIELD = types.TDDDFIELD // wrapper: contained type is a ... field
+
+ NTYPE = types.NTYPE
+)
+
+func cmpForNe(x bool) ssa.Cmp {
+ if x {
+ return ssa.CMPlt
+ }
+ return ssa.CMPgt
+}
+
+func cmpsym(r, s *types.Sym) ssa.Cmp {
+ if r == s {
+ return ssa.CMPeq
+ }
+ if r == nil {
+ return ssa.CMPlt
+ }
+ if s == nil {
+ return ssa.CMPgt
+ }
+ // Fast sort, not pretty sort
+ if len(r.Name) != len(s.Name) {
+ return cmpForNe(len(r.Name) < len(s.Name))
+ }
+ if r.Pkg != s.Pkg {
+ if len(r.Pkg.Prefix) != len(s.Pkg.Prefix) {
+ return cmpForNe(len(r.Pkg.Prefix) < len(s.Pkg.Prefix))
+ }
+ if r.Pkg.Prefix != s.Pkg.Prefix {
+ return cmpForNe(r.Pkg.Prefix < s.Pkg.Prefix)
+ }
+ }
+ if r.Name != s.Name {
+ return cmpForNe(r.Name < s.Name)
+ }
+ return ssa.CMPeq
+}
+
+// cmptyp compares two *Types t and x, returning ssa.CMPlt,
+// ssa.CMPeq, ssa.CMPgt as t<x, t==x, t>x, for an arbitrary
+// and optimizer-centric notion of comparison.
+func cmptyp(t, x *types.Type) ssa.Cmp {
+ // This follows the structure of eqtype in subr.go
+ // with two exceptions.
+ // 1. Symbols are compared more carefully because a <,=,> result is desired.
+ // 2. Maps are treated specially to avoid endless recursion -- maps
+ // contain an internal data type not expressible in Go source code.
+ if t == x {
+ return ssa.CMPeq
+ }
+ if t == nil {
+ return ssa.CMPlt
+ }
+ if x == nil {
+ return ssa.CMPgt
+ }
+
+ if t.Etype != x.Etype {
+ return cmpForNe(t.Etype < x.Etype)
+ }
+
+ if t.Sym != nil || x.Sym != nil {
+ // Special case: we keep byte and uint8 separate
+ // for error messages. Treat them as equal.
+ switch t.Etype {
+ case TUINT8:
+ if (t == types.Types[TUINT8] || t == types.Bytetype) && (x == types.Types[TUINT8] || x == types.Bytetype) {
+ return ssa.CMPeq
+ }
+
+ case TINT32:
+ if (t == types.Types[types.Runetype.Etype] || t == types.Runetype) && (x == types.Types[types.Runetype.Etype] || x == types.Runetype) {
+ return ssa.CMPeq
+ }
+ }
+ }
+
+ if c := cmpsym(t.Sym, x.Sym); c != ssa.CMPeq {
+ return c
+ }
+
+ if x.Sym != nil {
+ // Syms non-nil, if vargens match then equal.
+ if t.Vargen != x.Vargen {
+ return cmpForNe(t.Vargen < x.Vargen)
+ }
+ return ssa.CMPeq
+ }
+ // both syms nil, look at structure below.
+
+ switch t.Etype {
+ case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
+ TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
+ return ssa.CMPeq
+ }
+
+ switch t.Etype {
+ case TMAP:
+ if c := cmptyp(t.Key(), x.Key()); c != ssa.CMPeq {
+ return c
+ }
+ return cmptyp(t.Val(), x.Val())
+
+ case TPTR32, TPTR64, TSLICE:
+ // No special cases for these, they are handled
+ // by the general code after the switch.
+
+ case TSTRUCT:
+ if t.StructType().Map == nil {
+ if x.StructType().Map != nil {
+ return ssa.CMPlt // nil < non-nil
+ }
+ // to the fallthrough
+ } else if x.StructType().Map == nil {
+ return ssa.CMPgt // nil > non-nil
+ } else if t.StructType().Map.MapType().Bucket == t {
+ // Both have non-nil Map
+ // Special case for Maps which include a recursive type where the recursion is not broken with a named type
+ if x.StructType().Map.MapType().Bucket != x {
+ return ssa.CMPlt // bucket maps are least
+ }
+ return cmptyp(t.StructType().Map, x.StructType().Map)
+ } else if x.StructType().Map.MapType().Bucket == x {
+ return ssa.CMPgt // bucket maps are least
+ } // If t != t.Map.Bucket, fall through to general case
+
+ fallthrough
+ case TINTER:
+ t1, ti := types.IterFields(t)
+ x1, xi := types.IterFields(x)
+ for ; t1 != nil && x1 != nil; t1, x1 = ti.Next(), xi.Next() {
+ if t1.Embedded != x1.Embedded {
+ return cmpForNe(t1.Embedded < x1.Embedded)
+ }
+ if t1.Note != x1.Note {
+ return cmpForNe(t1.Note < x1.Note)
+ }
+ if c := cmpsym(t1.Sym, x1.Sym); c != ssa.CMPeq {
+ return c
+ }
+ if c := cmptyp(t1.Type, x1.Type); c != ssa.CMPeq {
+ return c
+ }
+ }
+ if t1 != x1 {
+ return cmpForNe(t1 == nil)
+ }
+ return ssa.CMPeq
+
+ case TFUNC:
+ for _, f := range types.RecvsParamsResults {
+ // Loop over fields in structs, ignoring argument names.
+ ta, ia := types.IterFields(f(t))
+ tb, ib := types.IterFields(f(x))
+ for ; ta != nil && tb != nil; ta, tb = ia.Next(), ib.Next() {
+ if ta.Isddd() != tb.Isddd() {
+ return cmpForNe(!ta.Isddd())
+ }
+ if c := cmptyp(ta.Type, tb.Type); c != ssa.CMPeq {
+ return c
+ }
+ }
+ if ta != tb {
+ return cmpForNe(ta == nil)
+ }
+ }
+ return ssa.CMPeq
+
+ case TARRAY:
+ if t.NumElem() != x.NumElem() {
+ return cmpForNe(t.NumElem() < x.NumElem())
+ }
+
+ case TCHAN:
+ if t.ChanDir() != x.ChanDir() {
+ return cmpForNe(t.ChanDir() < x.ChanDir())
+ }
+
+ default:
+ e := fmt.Sprintf("Do not know how to compare %v with %v", t, x)
+ panic(e)
+ }
+
+ // Common element type comparison for TARRAY, TCHAN, TPTR32, TPTR64, and TSLICE.
+ return cmptyp(t.Elem(), x.Elem())
+}
--- /dev/null
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements convertions between *types.Node and *Node.
+// TODO(gri) try to eliminate these soon
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "unsafe"
+)
+
+func asNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) }
+func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) }
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// TODO(gri) This file should probably become part of package types.
+
package gc
+import "cmd/compile/internal/types"
+
// builtinpkg is a fake package that declares the universe block.
-var builtinpkg *Pkg
+var builtinpkg *types.Pkg
-var itable *Type // distinguished *byte
+var itable *types.Type // distinguished *byte
var basicTypes = [...]struct {
name string
- etype EType
+ etype types.EType
}{
{"int8", TINT8},
{"int16", TINT16},
var typedefs = [...]struct {
name string
- etype EType
+ etype types.EType
width *int
- sameas32 EType
- sameas64 EType
+ sameas32 types.EType
+ sameas64 types.EType
}{
{"int", TINT, &Widthint, TINT32, TINT64},
{"uint", TUINT, &Widthint, TUINT32, TUINT64},
func lexinit() {
for _, s := range basicTypes {
etype := s.etype
- if int(etype) >= len(Types) {
+ if int(etype) >= len(types.Types) {
Fatalf("lexinit: %s bad etype", s.name)
}
s2 := builtinpkg.Lookup(s.name)
- t := Types[etype]
+ t := types.Types[etype]
if t == nil {
- t = typ(etype)
+ t = types.New(etype)
t.Sym = s2
if etype != TANY && etype != TSTRING {
dowidth(t)
}
- Types[etype] = t
+ types.Types[etype] = t
}
- s2.Def = typenod(t)
- s2.Def.Name = new(Name)
+ s2.Def = asTypesNode(typenod(t))
+ asNode(s2.Def).Name = new(Name)
}
for _, s := range builtinFuncs {
// TODO(marvin): Fix Node.EType type union.
s2 := builtinpkg.Lookup(s.name)
- s2.Def = newname(s2)
- s2.Def.Etype = EType(s.op)
+ s2.Def = asTypesNode(newname(s2))
+ asNode(s2.Def).Etype = types.EType(s.op)
}
for _, s := range unsafeFuncs {
s2 := unsafepkg.Lookup(s.name)
- s2.Def = newname(s2)
- s2.Def.Etype = EType(s.op)
+ s2.Def = asTypesNode(newname(s2))
+ asNode(s2.Def).Etype = types.EType(s.op)
}
- idealstring = typ(TSTRING)
- idealbool = typ(TBOOL)
- Types[TANY] = typ(TANY)
+ types.Idealstring = types.New(TSTRING)
+ types.Idealbool = types.New(TBOOL)
+ types.Types[TANY] = types.New(TANY)
s := builtinpkg.Lookup("true")
- s.Def = nodbool(true)
- s.Def.Sym = lookup("true")
- s.Def.Name = new(Name)
- s.Def.Type = idealbool
+ s.Def = asTypesNode(nodbool(true))
+ asNode(s.Def).Sym = lookup("true")
+ asNode(s.Def).Name = new(Name)
+ asNode(s.Def).Type = types.Idealbool
s = builtinpkg.Lookup("false")
- s.Def = nodbool(false)
- s.Def.Sym = lookup("false")
- s.Def.Name = new(Name)
- s.Def.Type = idealbool
+ s.Def = asTypesNode(nodbool(false))
+ asNode(s.Def).Sym = lookup("false")
+ asNode(s.Def).Name = new(Name)
+ asNode(s.Def).Type = types.Idealbool
s = lookup("_")
s.Block = -100
- s.Def = newname(s)
- Types[TBLANK] = typ(TBLANK)
- s.Def.Type = Types[TBLANK]
- nblank = s.Def
+ s.Def = asTypesNode(newname(s))
+ types.Types[TBLANK] = types.New(TBLANK)
+ asNode(s.Def).Type = types.Types[TBLANK]
+ nblank = asNode(s.Def)
s = builtinpkg.Lookup("_")
s.Block = -100
- s.Def = newname(s)
- Types[TBLANK] = typ(TBLANK)
- s.Def.Type = Types[TBLANK]
+ s.Def = asTypesNode(newname(s))
+ types.Types[TBLANK] = types.New(TBLANK)
+ asNode(s.Def).Type = types.Types[TBLANK]
- Types[TNIL] = typ(TNIL)
+ types.Types[TNIL] = types.New(TNIL)
s = builtinpkg.Lookup("nil")
var v Val
v.U = new(NilVal)
- s.Def = nodlit(v)
- s.Def.Sym = s
- s.Def.Name = new(Name)
+ s.Def = asTypesNode(nodlit(v))
+ asNode(s.Def).Sym = s
+ asNode(s.Def).Name = new(Name)
s = builtinpkg.Lookup("iota")
- s.Def = nod(OIOTA, nil, nil)
- s.Def.Sym = s
- s.Def.Name = new(Name)
+ s.Def = asTypesNode(nod(OIOTA, nil, nil))
+ asNode(s.Def).Sym = s
+ asNode(s.Def).Name = new(Name)
}
func typeinit() {
Fatalf("typeinit before betypeinit")
}
- for et := EType(0); et < NTYPE; et++ {
+ for et := types.EType(0); et < NTYPE; et++ {
simtype[et] = et
}
- Types[TPTR32] = typ(TPTR32)
- dowidth(Types[TPTR32])
+ types.Types[TPTR32] = types.New(TPTR32)
+ dowidth(types.Types[TPTR32])
- Types[TPTR64] = typ(TPTR64)
- dowidth(Types[TPTR64])
+ types.Types[TPTR64] = types.New(TPTR64)
+ dowidth(types.Types[TPTR64])
- t := typ(TUNSAFEPTR)
- Types[TUNSAFEPTR] = t
+ t := types.New(TUNSAFEPTR)
+ types.Types[TUNSAFEPTR] = t
t.Sym = unsafepkg.Lookup("Pointer")
- t.Sym.Def = typenod(t)
- t.Sym.Def.Name = new(Name)
- dowidth(Types[TUNSAFEPTR])
+ t.Sym.Def = asTypesNode(typenod(t))
+ asNode(t.Sym.Def).Name = new(Name)
+ dowidth(types.Types[TUNSAFEPTR])
- Tptr = TPTR32
+ types.Tptr = TPTR32
if Widthptr == 8 {
- Tptr = TPTR64
+ types.Tptr = TPTR64
}
for et := TINT8; et <= TUINT64; et++ {
isforw[TFORW] = true
// initialize okfor
- for et := EType(0); et < NTYPE; et++ {
+ for et := types.EType(0); et < NTYPE; et++ {
if isInt[et] || et == TIDEAL {
okforeq[et] = true
okforcmp[et] = true
minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
// for walk to use in error messages
- Types[TFUNC] = functype(nil, nil, nil)
+ types.Types[TFUNC] = functype(nil, nil, nil)
// types used in front end
- // types[TNIL] got set early in lexinit
- Types[TIDEAL] = typ(TIDEAL)
+ // types.Types[TNIL] got set early in lexinit
+ types.Types[TIDEAL] = types.New(TIDEAL)
- Types[TINTER] = typ(TINTER)
+ types.Types[TINTER] = types.New(TINTER)
// simple aliases
- simtype[TMAP] = Tptr
+ simtype[TMAP] = types.Tptr
- simtype[TCHAN] = Tptr
- simtype[TFUNC] = Tptr
- simtype[TUNSAFEPTR] = Tptr
+ simtype[TCHAN] = types.Tptr
+ simtype[TFUNC] = types.Tptr
+ simtype[TUNSAFEPTR] = types.Tptr
array_array = int(Rnd(0, int64(Widthptr)))
array_nel = int(Rnd(int64(array_array)+int64(Widthptr), int64(Widthint)))
// string is same as slice wo the cap
sizeof_String = int(Rnd(int64(array_nel)+int64(Widthint), int64(Widthptr)))
- dowidth(Types[TSTRING])
- dowidth(idealstring)
+ dowidth(types.Types[TSTRING])
+ dowidth(types.Idealstring)
- itable = typPtr(Types[TUINT8])
+ itable = types.NewPtr(types.Types[TUINT8])
}
-func makeErrorInterface() *Type {
- field := newField()
- field.Type = Types[TSTRING]
- f := functypefield(fakethisfield(), nil, []*Field{field})
+func makeErrorInterface() *types.Type {
+ field := types.NewField()
+ field.Type = types.Types[TSTRING]
+ f := functypefield(fakethisfield(), nil, []*types.Field{field})
- field = newField()
+ field = types.NewField()
field.Sym = lookup("Error")
field.Type = f
- t := typ(TINTER)
- t.SetInterface([]*Field{field})
+ t := types.New(TINTER)
+ t.SetInterface([]*types.Field{field})
return t
}
func lexinit1() {
// error type
s := builtinpkg.Lookup("error")
- errortype = makeErrorInterface()
- errortype.Sym = s
+ types.Errortype = makeErrorInterface()
+ types.Errortype.Sym = s
// TODO: If we can prove that it's safe to set errortype.Orig here
// than we don't need the special errortype/errorInterface case in
// bexport.go. See also issue #15920.
// errortype.Orig = makeErrorInterface()
- s.Def = typenod(errortype)
+ s.Def = asTypesNode(typenod(types.Errortype))
// We create separate byte and rune types for better error messages
- // rather than just creating type alias *Sym's for the uint8 and
+ // rather than just creating type alias *types.Sym's for the uint8 and
// int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false.
// TODO(gri) Should we get rid of this special case (at the cost
// of less informative error messages involving bytes and runes)?
// byte alias
s = builtinpkg.Lookup("byte")
- bytetype = typ(TUINT8)
- bytetype.Sym = s
- s.Def = typenod(bytetype)
- s.Def.Name = new(Name)
+ types.Bytetype = types.New(TUINT8)
+ types.Bytetype.Sym = s
+ s.Def = asTypesNode(typenod(types.Bytetype))
+ asNode(s.Def).Name = new(Name)
// rune alias
s = builtinpkg.Lookup("rune")
- runetype = typ(TINT32)
- runetype.Sym = s
- s.Def = typenod(runetype)
- s.Def.Name = new(Name)
+ types.Runetype = types.New(TINT32)
+ types.Runetype.Sym = s
+ s.Def = asTypesNode(typenod(types.Runetype))
+ asNode(s.Def).Name = new(Name)
// backend-dependent builtin types (e.g. int).
for _, s := range typedefs {
minintval[s.etype] = minintval[sameas]
maxintval[s.etype] = maxintval[sameas]
- t := typ(s.etype)
+ t := types.New(s.etype)
t.Sym = s1
- Types[s.etype] = t
- s1.Def = typenod(t)
- s1.Def.Name = new(Name)
+ types.Types[s.etype] = t
+ s1.Def = asTypesNode(typenod(t))
+ asNode(s1.Def).Name = new(Name)
s1.Origpkg = builtinpkg
dowidth(t)
}
nodfp = newname(lookup(".fp"))
- nodfp.Type = Types[TINT32]
+ nodfp.Type = types.Types[TINT32]
nodfp.Class = PPARAM
nodfp.SetUsed(true)
}
package gc
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/sys"
"fmt"
// Build name of function for interface conversion.
// Not all names are possible
// (e.g., we'll never generate convE2E or convE2I or convI2E).
-func convFuncName(from, to *Type) string {
- tkind := to.iet()
- switch from.iet() {
+func convFuncName(from, to *types.Type) string {
+ tkind := to.Tie()
+ switch from.Tie() {
case 'I':
switch tkind {
case 'I':
switch {
case from.Size() == 2 && from.Align == 2:
return "convT2E16"
- case from.Size() == 4 && from.Align == 4 && !haspointers(from):
+ case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
return "convT2E32"
- case from.Size() == 8 && from.Align == Types[TUINT64].Align && !haspointers(from):
+ case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
return "convT2E64"
case from.IsString():
return "convT2Estring"
case from.IsSlice():
return "convT2Eslice"
- case !haspointers(from):
+ case !types.Haspointers(from):
return "convT2Enoptr"
}
return "convT2E"
switch {
case from.Size() == 2 && from.Align == 2:
return "convT2I16"
- case from.Size() == 4 && from.Align == 4 && !haspointers(from):
+ case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
return "convT2I32"
- case from.Size() == 8 && from.Align == Types[TUINT64].Align && !haspointers(from):
+ case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
return "convT2I64"
case from.IsString():
return "convT2Istring"
case from.IsSlice():
return "convT2Islice"
- case !haspointers(from):
+ case !types.Haspointers(from):
return "convT2Inoptr"
}
return "convT2I"
}
}
- Fatalf("unknown conv func %c2%c", from.iet(), to.iet())
+ Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
panic("unreachable")
}
// don't generate a = *var if a is _
if !isblank(a) {
- var_ := temp(typPtr(t.Val()))
+ var_ := temp(types.NewPtr(t.Val()))
var_.Typecheck = 1
var_.SetNonNil(true) // mapaccess always returns a non-nil pointer
n.List.SetFirst(var_)
if staticbytes == nil {
staticbytes = newname(Runtimepkg.Lookup("staticbytes"))
staticbytes.Class = PEXTERN
- staticbytes.Type = typArray(Types[TUINT8], 256)
+ staticbytes.Type = types.NewArray(types.Types[TUINT8], 256)
zerobase = newname(Runtimepkg.Lookup("zerobase"))
zerobase.Class = PEXTERN
- zerobase.Type = Types[TUINTPTR]
+ zerobase.Type = types.Types[TUINTPTR]
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
init.Append(nod(OAS, c, n.Left))
// Get the itab out of the interface.
- tmp := temp(typPtr(Types[TUINT8]))
+ tmp := temp(types.NewPtr(types.Types[TUINT8]))
init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), Erv)))
// Get the type out of the itab.
init.Append(nif)
// Build the result.
- e := nod(OEFACE, tmp, ifaceData(c, typPtr(Types[TUINT8])))
+ e := nod(OEFACE, tmp, ifaceData(c, types.NewPtr(types.Types[TUINT8])))
e.Type = n.Type // assign type manually, typecheck doesn't understand OEFACE.
e.Typecheck = 1
n = e
if thearch.LinkArch.Family == sys.ARM || thearch.LinkArch.Family == sys.MIPS {
if n.Left.Type.IsFloat() {
if n.Type.Etype == TINT64 {
- n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break
}
if n.Type.Etype == TUINT64 {
- n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break
}
}
if n.Type.IsFloat() {
if n.Left.Type.Etype == TINT64 {
- n = conv(mkcall("int64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TINT64])), n.Type)
+ n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type)
break
}
if n.Left.Type.Etype == TUINT64 {
- n = conv(mkcall("uint64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT64])), n.Type)
+ n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type)
break
}
}
if thearch.LinkArch.Family == sys.I386 {
if n.Left.Type.IsFloat() {
if n.Type.Etype == TINT64 {
- n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break
}
if n.Type.Etype == TUINT64 {
- n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break
}
if n.Type.Etype == TUINT32 || n.Type.Etype == TUINT || n.Type.Etype == TUINTPTR {
- n = mkcall("float64touint32", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ n = mkcall("float64touint32", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break
}
}
if n.Type.IsFloat() {
if n.Left.Type.Etype == TINT64 {
- n = conv(mkcall("int64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TINT64])), n.Type)
+ n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type)
break
}
if n.Left.Type.Etype == TUINT64 {
- n = conv(mkcall("uint64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT64])), n.Type)
+ n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type)
break
}
if n.Left.Type.Etype == TUINT32 || n.Left.Type.Etype == TUINT || n.Left.Type.Etype == TUINTPTR {
- n = conv(mkcall("uint32tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT32])), n.Type)
+ n = conv(mkcall("uint32tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT32])), n.Type)
break
}
}
if isComplex[et] && n.Op == ODIV {
t := n.Type
- n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
+ n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
n = conv(n, t)
break
}
} else {
fn += "mod"
}
- n = mkcall(fn, n.Type, init, conv(n.Left, Types[et]), conv(n.Right, Types[et]))
+ n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
}
case OINDEX:
}
if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero
- n = mkcall1(mapfn(mapaccess1[fast], t), typPtr(t.Val()), init, typename(t), map_, key)
+ n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Val()), init, typename(t), map_, key)
} else {
z := zeroaddr(w)
- n = mkcall1(mapfn("mapaccess1_fat", t), typPtr(t.Val()), init, typename(t), map_, key, z)
+ n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Val()), init, typename(t), map_, key, z)
}
}
- n.Type = typPtr(t.Val())
+ n.Type = types.NewPtr(t.Val())
n.SetNonNil(true) // mapaccess1* and mapassign always return non-nil pointers.
n = nod(OIND, n, nil)
n.Type = t.Val()
n.Left = cheapexpr(n.Left, init)
n.Right = cheapexpr(n.Right, init)
- r = mkcall("eqstring", Types[TBOOL], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+ r = mkcall("eqstring", types.Types[TBOOL], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
// quick check of len before full compare for == or !=
// eqstring assumes that the lengths are equal
r = walkexpr(r, nil)
} else {
// sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+ r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
// TODO(marvin): Fix Node.EType type union.
r = nod(Op(n.Etype), r, nodintconst(0))
}
n = mkcall1(fn, nil, init, n.Left)
case OMAKECHAN:
- n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]))
+ n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64]))
case OMAKEMAP:
t := n.Type
fn := syslook("makemap")
fn = substArgTypes(fn, hmap(t), mapbucket(t), t.Key(), t.Val())
- n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64]), a, r)
case OMAKESLICE:
l := n.Left
}
// var arr [r]T
// n = arr[:l]
- t = typArray(t.Elem(), nonnegintconst(r)) // [r]T
+ t = types.NewArray(t.Elem(), nonnegintconst(r)) // [r]T
var_ := temp(t)
a := nod(OAS, var_, nil) // zero temp
a = typecheck(a, Etop)
len, cap := l, r
fnname := "makeslice64"
- argtype := Types[TINT64]
+ argtype := types.Types[TINT64]
// typechecking guarantees that TIDEAL len/cap are positive and fit in an int.
// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
(cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
fnname = "makeslice"
- argtype = Types[TINT]
+ argtype = types.Types[TINT]
}
fn := syslook(fnname)
case ORUNESTR:
a := nodnil()
if n.Esc == EscNone {
- t := typArray(Types[TUINT8], 4)
+ t := types.NewArray(types.Types[TUINT8], 4)
var_ := temp(t)
a = nod(OADDR, var_, nil)
}
// intstring(*[4]byte, rune)
- n = mkcall("intstring", n.Type, init, a, conv(n.Left, Types[TINT64]))
+ n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
case OARRAYBYTESTR:
a := nodnil()
if n.Esc == EscNone {
// Create temporary buffer for string on stack.
- t := typArray(Types[TUINT8], tmpstringbufsize)
+ t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
if n.Esc == EscNone {
// Create temporary buffer for string on stack.
- t := typArray(Types[TUINT8], tmpstringbufsize)
+ t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
if n.Esc == EscNone {
// Create temporary buffer for slice on stack.
- t := typArray(Types[TUINT8], tmpstringbufsize)
+ t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
- n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, Types[TSTRING]))
+ n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
case OSTRARRAYBYTETMP:
// []byte(string) conversion that creates a slice
if n.Esc == EscNone {
// Create temporary buffer for slice on stack.
- t := typArray(Types[TINT32], tmpstringbufsize)
+ t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
rt := nod(OITAB, n.Right, nil)
ld := nod(OIDATA, n.Left, nil)
rd := nod(OIDATA, n.Right, nil)
- ld.Type = Types[TUNSAFEPTR]
- rd.Type = Types[TUNSAFEPTR]
+ ld.Type = types.Types[TUNSAFEPTR]
+ rd.Type = types.Types[TUNSAFEPTR]
ld.Typecheck = 1
rd.Typecheck = 1
call := mkcall1(fn, n.Type, init, lt, ld, rd)
// return 1 if this implies a function call
// evaluating the lv or a function call
// in the conversion of the types
-func fncall(l *Node, rt *Type) bool {
+func fncall(l *Node, rt *types.Type) bool {
if l.HasCall() || l.Op == OINDEXMAP {
return true
}
// check assign type list to
// a expression list. called in
// expr-list = func()
-func ascompatet(op Op, nl Nodes, nr *Type) []*Node {
+func ascompatet(op Op, nl Nodes, nr *types.Type) []*Node {
if nl.Len() != nr.NumFields() {
Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
}
}
// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *Type, args []*Node, init *Nodes, ddd *Node) *Node {
+func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node {
esc := uint16(EscUnknown)
if ddd != nil {
esc = ddd.Esc
// a type list. called in
// return expr-list
// func(expr-list)
-func ascompatte(call *Node, isddd bool, lhs *Type, rhs []*Node, fp int, init *Nodes) []*Node {
+func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node {
var nn []*Node
// f(g()) where g has multiple return values
var r *Node
var n *Node
var on *Node
- var t *Type
- var et EType
+ var t *types.Type
+ var et types.EType
op := nn.Op
all := nn.List
if n.Op == OLITERAL {
switch n.Val().Ctype() {
case CTRUNE:
- n = defaultlit(n, runetype)
+ n = defaultlit(n, types.Runetype)
case CTINT:
- n = defaultlit(n, Types[TINT64])
+ n = defaultlit(n, types.Types[TINT64])
case CTFLT:
- n = defaultlit(n, Types[TFLOAT64])
+ n = defaultlit(n, types.Types[TFLOAT64])
}
}
if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
- n = defaultlit(n, Types[TINT64])
+ n = defaultlit(n, types.Types[TINT64])
}
n = defaultlit(n, nil)
all.SetIndex(i1, n)
on = substArgTypes(on, n.Type) // any-1
} else if isInt[et] {
if et == TUINT64 {
- if t.Sym.Pkg.isRuntime() && t.Sym.Name == "hex" {
+ if isRuntimePkg(t.Sym.Pkg) && t.Sym.Name == "hex" {
on = syslook("printhex")
} else {
on = syslook("printuint")
return r
}
-func callnew(t *Type) *Node {
+func callnew(t *types.Type) *Node {
if t.NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t)
}
dowidth(t)
fn := syslook("newobject")
fn = substArgTypes(fn, t)
- v := mkcall1(fn, typPtr(t), nil, typename(t))
+ v := mkcall1(fn, types.NewPtr(t), nil, typename(t))
v.SetNonNil(true)
return v
}
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
func isReflectHeaderDataField(l *Node) bool {
- if l.Type != Types[TUINTPTR] {
+ if l.Type != types.Types[TUINTPTR] {
return false
}
- var tsym *Sym
+ var tsym *types.Sym
switch l.Op {
case ODOT:
tsym = l.Left.Type.Sym
// No write barrier for write of non-pointers.
dowidth(l.Type)
- if !haspointers(l.Type) {
+ if !types.Haspointers(l.Type) {
return false
}
n.Typecheck = 1
- var lt *Type
- var rt *Type
+ var lt *types.Type
+ var rt *types.Type
if n.Left == nil || n.Right == nil {
goto out
}
// paramstoheap returns code to allocate memory for heap-escaped parameters
// and to copy non-result prameters' values from the stack.
-func paramstoheap(params *Type) []*Node {
+func paramstoheap(params *types.Type) []*Node {
var nn []*Node
for _, t := range params.Fields().Slice() {
- v := t.Nname
+ v := asNode(t.Nname)
if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
v = nil
}
lno := lineno
lineno = Curfn.Pos
for _, f := range Curfn.Type.Results().Fields().Slice() {
- if v := f.Nname; v != nil && v.Name.Param.Heapaddr != nil {
+ if v := asNode(f.Nname); v != nil && v.Name.Param.Heapaddr != nil {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:livenessepilogue.
// returnsfromheap returns code to copy values for heap-escaped parameters
// back to the stack.
-func returnsfromheap(params *Type) []*Node {
+func returnsfromheap(params *types.Type) []*Node {
var nn []*Node
for _, t := range params.Fields().Slice() {
- v := t.Nname
+ v := asNode(t.Nname)
if v == nil {
continue
}
lineno = lno
}
-func vmkcall(fn *Node, t *Type, init *Nodes, va []*Node) *Node {
+func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
if fn.Type == nil || fn.Type.Etype != TFUNC {
Fatalf("mkcall %v %v", fn, fn.Type)
}
return r
}
-func mkcall(name string, t *Type, init *Nodes, args ...*Node) *Node {
+func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
return vmkcall(syslook(name), t, init, args)
}
-func mkcall1(fn *Node, t *Type, init *Nodes, args ...*Node) *Node {
+func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
return vmkcall(fn, t, init, args)
}
-func conv(n *Node, t *Type) *Node {
+func conv(n *Node, t *types.Type) *Node {
if eqtype(n.Type, t) {
return n
}
// We cannot use conv, because we allow converting bool to uint8 here,
// which is forbidden in user code.
func byteindex(n *Node) *Node {
- if eqtype(n.Type, Types[TUINT8]) {
+ if eqtype(n.Type, types.Types[TUINT8]) {
return n
}
n = nod(OCONV, n, nil)
- n.Type = Types[TUINT8]
+ n.Type = types.Types[TUINT8]
n.Typecheck = 1
return n
}
-func chanfn(name string, n int, t *Type) *Node {
+func chanfn(name string, n int, t *types.Type) *Node {
if !t.IsChan() {
Fatalf("chanfn %v", t)
}
return fn
}
-func mapfn(name string, t *Type) *Node {
+func mapfn(name string, t *types.Type) *Node {
if !t.IsMap() {
Fatalf("mapfn %v", t)
}
return fn
}
-func mapfndel(name string, t *Type) *Node {
+func mapfndel(name string, t *types.Type) *Node {
if !t.IsMap() {
Fatalf("mapfn %v", t)
}
var mapassign mapnames = mkmapnames("mapassign")
var mapdelete mapnames = mkmapnames("mapdelete")
-func mapfast(t *Type) int {
+func mapfast(t *types.Type) int {
// Check ../../runtime/hashmap.go:maxValueSize before changing.
if t.Val().Width > 128 {
return mapslow
return mapslow
}
-func writebarrierfn(name string, l *Type, r *Type) *Node {
+func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)
return fn
// Don't allocate the buffer if the result won't fit.
if sz < tmpstringbufsize {
// Create temporary buffer for result string on stack.
- t := typArray(Types[TUINT8], tmpstringbufsize)
+ t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
buf = nod(OADDR, temp(t), nil)
}
// build list of string arguments
args := []*Node{buf}
for _, n2 := range n.List.Slice() {
- args = append(args, conv(n2, Types[TSTRING]))
+ args = append(args, conv(n2, types.Types[TSTRING]))
}
var fn string
// large numbers of strings are passed to the runtime as a slice.
fn = "concatstrings"
- t := typSlice(Types[TSTRING])
+ t := types.NewSlice(types.Types[TSTRING])
slice := nod(OCOMPLIT, nil, typenod(t))
if prealloc[n] != nil {
prealloc[slice] = prealloc[n]
l = append(l, nod(OAS, s, l1)) // s = l1
// n := len(s) + len(l2)
- nn := temp(Types[TINT])
+ nn := temp(types.Types[TINT])
l = append(l, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
// if uint(n) > uint(cap(s))
nif := nod(OIF, nil, nil)
nif.Left = nod(OGT, nod(OCONV, nn, nil), nod(OCONV, nod(OCAP, s, nil), nil))
- nif.Left.Left.Type = Types[TUINT]
- nif.Left.Right.Type = Types[TUINT]
+ nif.Left.Left.Type = types.Types[TUINT]
+ nif.Left.Right.Type = types.Types[TUINT]
// instantiate growslice(Type*, []any, int) []any
fn := syslook("growslice")
nt.Etype = 1
l = append(l, nod(OAS, s, nt))
- if haspointers(l1.Type.Elem()) {
+ if types.Haspointers(l1.Type.Elem()) {
// copy(s[len(l1):], l2)
nptr1 := nod(OSLICE, s, nil)
nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
fn = substArgTypes(fn, l1.Type, l2.Type)
var ln Nodes
ln.Set(l)
- nt := mkcall1(fn, Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2)
+ nt := mkcall1(fn, types.Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2)
l = append(ln.Slice(), nt)
} else if instrumenting && !compiling_runtime {
// rely on runtime to instrument copy.
fn = substArgTypes(fn, l1.Type, l2.Type)
var ln Nodes
ln.Set(l)
- nt := mkcall1(fn, Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width))
+ nt := mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width))
l = append(ln.Slice(), nt)
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
var ln Nodes
ln.Set(l)
- nwid := cheapexpr(conv(nod(OLEN, l2, nil), Types[TUINTPTR]), &ln)
+ nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &ln)
nwid = nod(OMUL, nwid, nodintconst(s.Type.Elem().Width))
nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid)
l = append(l, nx)
- nn := temp(Types[TINT])
+ nn := temp(types.Types[TINT])
l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
// Also works if b is a string.
//
func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
- if haspointers(n.Left.Type.Elem()) {
+ if types.Haspointers(n.Left.Type.Elem()) {
fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right)
}
nfrm := nod(OSPTR, nr, nil)
nto := nod(OSPTR, nl, nil)
- nlen := temp(Types[TINT])
+ nlen := temp(types.Types[TINT])
// n = len(to)
l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
fn := syslook("memmove")
fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
- nwid := temp(Types[TUINTPTR])
- l = append(l, nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
+ nwid := temp(types.Types[TUINTPTR])
+ l = append(l, nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR])))
nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
l = append(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
return nlen
}
-func eqfor(t *Type, needsize *int) *Node {
+func eqfor(t *types.Type, needsize *int) *Node {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
n := newname(sym)
n.Class = PFUNC
ntype := nod(OTFUNC, nil, nil)
- ntype.List.Append(anonfield(typPtr(t)))
- ntype.List.Append(anonfield(typPtr(t)))
- ntype.Rlist.Append(anonfield(Types[TBOOL]))
+ ntype.List.Append(anonfield(types.NewPtr(t)))
+ ntype.List.Append(anonfield(types.NewPtr(t)))
+ ntype.Rlist.Append(anonfield(types.Types[TBOOL]))
ntype = typecheck(ntype, Etype)
n.Type = ntype.Type
*needsize = 0
tab := nod(OITAB, l, nil)
rtyp := typename(r.Type)
if l.Type.IsEmptyInterface() {
- tab.Type = typPtr(Types[TUINT8])
+ tab.Type = types.NewPtr(types.Types[TUINT8])
tab.Typecheck = 1
eqtype = nod(eq, tab, rtyp)
} else {
}
// eq algs take pointers
- pl := temp(typPtr(t))
+ pl := temp(types.NewPtr(t))
al := nod(OAS, pl, nod(OADDR, cmpl, nil))
al.Right.Etype = 1 // addr does not escape
al = typecheck(al, Etop)
init.Append(al)
- pr := temp(typPtr(t))
+ pr := temp(types.NewPtr(t))
ar := nod(OAS, pr, nod(OADDR, cmpr, nil))
ar.Right.Etype = 1 // addr does not escape
ar = typecheck(ar, Etop)
// This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a),
// which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a),
// which is equivalent to uint(b-a) < uint(c-a).
- ut := b.Type.toUnsigned()
+ ut := b.Type.ToUnsigned()
lhs := conv(nod(OSUB, b, a), ut)
rhs := nodintconst(bound)
if negateResult {
}
p0 := t.Params().Field(0)
res0 := t.Results().Field(0)
- var res1 *Field
+ var res1 *types.Field
if t.Results().NumFields() == 2 {
res1 = t.Results().Field(1)
}
if t.IsPtr() {
t = t.Elem()
}
- field := dotField[typeSym{t.Orig, n.Sym}]
+ field := dotField[typeSymKey{t.Orig, n.Sym}]
if field == nil {
Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
}
sym := tracksym(outer, field)
if Curfn.Func.FieldTrack == nil {
- Curfn.Func.FieldTrack = make(map[*Sym]struct{})
+ Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
}
Curfn.Func.FieldTrack[sym] = struct{}{}
}
a = walkexpr(a, init)
return a
}
+
+// substArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+// The result of substArgTypes MUST be assigned back to old, e.g.
+// n.Left = substArgTypes(n.Left, t1, t2)
+func substArgTypes(old *Node, types_ ...*types.Type) *Node {
+ n := *old // make shallow copy
+
+ for _, t := range types_ {
+ dowidth(t)
+ }
+ n.Type = types.SubstAny(n.Type, &types_)
+ if len(types_) > 0 {
+ Fatalf("substArgTypes: too many argument types")
+ }
+ return &n
+}
--- /dev/null
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "cmd/internal/obj"
+
+type Pkg struct {
+ Name string // package name, e.g. "sys"
+ Path string // string literal used in import statement, e.g. "runtime/internal/sys"
+ Pathsym *obj.LSym
+ Prefix string // escaped path for use in symbol table
+ Imported bool // export data of this package was parsed
+ Direct bool // imported directly
+ Syms map[string]*Sym
+}
+
+var Nopkg = &Pkg{
+ Syms: make(map[string]*Sym),
+}
+
+func (pkg *Pkg) Lookup(name string) *Sym {
+ s, _ := pkg.LookupOK(name)
+ return s
+}
+
+var InitSyms []*Sym
+
+// LookupOK looks up name in pkg and reports whether it previously existed.
+func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) {
+ if pkg == nil {
+ pkg = Nopkg
+ }
+ if s := pkg.Syms[name]; s != nil {
+ return s, true
+ }
+
+ s = &Sym{
+ Name: name,
+ Pkg: pkg,
+ }
+ if name == "init" {
+ InitSyms = append(InitSyms, s)
+ }
+ pkg.Syms[name] = s
+ return s, false
+}
+
+func (pkg *Pkg) LookupBytes(name []byte) *Sym {
+ if pkg == nil {
+ pkg = Nopkg
+ }
+ if s := pkg.Syms[string(name)]; s != nil {
+ return s
+ }
+ str := InternString(name)
+ return pkg.Lookup(str)
+}
+
+var internedStrings = map[string]string{}
+
+func InternString(b []byte) string {
+ s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
+ if !ok {
+ s = string(b)
+ internedStrings[s] = s
+ }
+ return s
+}
--- /dev/null
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// Sym represents an object name. Most commonly, this is a Go identifier naming
+// an object declared within a package, but Syms are also used to name internal
+// synthesized objects.
+//
+// As an exception, field and method names that are exported use the Sym
+// associated with localpkg instead of the package that declared them. This
+// allows using Sym pointer equality to test for Go identifier uniqueness when
+// handling selector expressions.
+type Sym struct {
+ Link *Sym
+ Importdef *Pkg // where imported definition was found
+ Linkname string // link name
+
+ // saved and restored by dcopy
+ Pkg *Pkg
+ Name string // object name
+ Def *Node // definition: ONAME OTYPE OPACK or OLITERAL
+ Lastlineno src.XPos // last declaration for diagnostic
+ Block int32 // blocknumber to catch redeclaration
+
+ flags bitset8
+ Label *Node // corresponding label (ephemeral)
+ Origpkg *Pkg // original package for . import
+ Lsym *obj.LSym
+}
+
+const (
+ symExport = 1 << iota // added to exportlist (no need to add again)
+ symPackage
+ symExported // already written out by export
+ symUniq
+ symSiggen
+ symAsm
+ symAlgGen
+)
+
+func (sym *Sym) Export() bool { return sym.flags&symExport != 0 }
+func (sym *Sym) Package() bool { return sym.flags&symPackage != 0 }
+func (sym *Sym) Exported() bool { return sym.flags&symExported != 0 }
+func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 }
+func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 }
+func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 }
+func (sym *Sym) AlgGen() bool { return sym.flags&symAlgGen != 0 }
+
+func (sym *Sym) SetExport(b bool) { sym.flags.set(symExport, b) }
+func (sym *Sym) SetPackage(b bool) { sym.flags.set(symPackage, b) }
+func (sym *Sym) SetExported(b bool) { sym.flags.set(symExported, b) }
+func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) }
+func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) }
+func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) }
+func (sym *Sym) SetAlgGen(b bool) { sym.flags.set(symAlgGen, b) }
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file provides methods that let us export a Type as an ../ssa:Type.
-// We don't export this package's Type directly because it would lead
-// to an import cycle with this package and ../ssa.
-// TODO: move Type to its own package, then we don't need to dance around import cycles.
-
-package gc
+package types
import (
"cmd/compile/internal/ssa"
"fmt"
)
+// Dummy Node so we can refer to *Node without actually
+// having a gc.Node. Necessary to break import cycles.
+// TODO(gri) try to eliminate soon
+type Node struct{ _ int }
+
// EType describes a kind of type.
type EType uint8
var (
// Predeclared alias types. Kept separate for better error messages.
- bytetype *Type
- runetype *Type
+ Bytetype *Type
+ Runetype *Type
// Predeclared error interface type.
- errortype *Type
+ Errortype *Type
// Types to represent untyped string and boolean constants.
- idealstring *Type
- idealbool *Type
+ Idealstring *Type
+ Idealbool *Type
// Types to represent untyped numeric constants.
// Note: Currently these are only used within the binary export
// data format. The rest of the compiler only uses Types[TIDEAL].
- idealint = typ(TIDEAL)
- idealrune = typ(TIDEAL)
- idealfloat = typ(TIDEAL)
- idealcomplex = typ(TIDEAL)
+ Idealint = New(TIDEAL)
+ Idealrune = New(TIDEAL)
+ Idealfloat = New(TIDEAL)
+ Idealcomplex = New(TIDEAL)
)
// A Type represents a Go type.
methods Fields
allMethods Fields
- nod *Node // canonical OTYPE node
+ Nod *Node // canonical OTYPE node
Orig *Type // original type (type literal or predefined type)
- sliceOf *Type
- ptrTo *Type
+ SliceOf *Type
+ PtrBase *Type
Sym *Sym // symbol containing name, for named types
Vargen int32 // unique name for OTYPE/ONAME
// InterType contains Type fields specific to interface types.
type InterType struct {
- fields Fields
+ Fields Fields
}
// PtrType contains Type fields specific to pointer types.
*f.s = append(*f.s, s...)
}
-// typ returns a new Type of the specified kind.
-func typ(et EType) *Type {
+// New returns a new Type of the specified kind.
+func New(et EType) *Type {
t := &Type{
Etype: et,
Width: BADWIDTH,
return t
}
-// typArray returns a new fixed-length array Type.
-func typArray(elem *Type, bound int64) *Type {
+// NewArray returns a new fixed-length array Type.
+func NewArray(elem *Type, bound int64) *Type {
if bound < 0 {
- Fatalf("typArray: invalid bound %v", bound)
+ Fatalf("NewArray: invalid bound %v", bound)
}
- t := typ(TARRAY)
+ t := New(TARRAY)
t.Extra = &ArrayType{Elem: elem, Bound: bound}
t.SetNotInHeap(elem.NotInHeap())
return t
}
-// typSlice returns the slice Type with element type elem.
-func typSlice(elem *Type) *Type {
- if t := elem.sliceOf; t != nil {
+// NewSlice returns the slice Type with element type elem.
+func NewSlice(elem *Type) *Type {
+ if t := elem.SliceOf; t != nil {
if t.Elem() != elem {
Fatalf("elem mismatch")
}
return t
}
- t := typ(TSLICE)
+ t := New(TSLICE)
t.Extra = SliceType{Elem: elem}
- elem.sliceOf = t
+ elem.SliceOf = t
return t
}
-// typDDDArray returns a new [...]T array Type.
-func typDDDArray(elem *Type) *Type {
- t := typ(TARRAY)
+// NewDDDArray returns a new [...]T array Type.
+func NewDDDArray(elem *Type) *Type {
+ t := New(TARRAY)
t.Extra = &ArrayType{Elem: elem, Bound: -1}
t.SetNotInHeap(elem.NotInHeap())
return t
}
-// typChan returns a new chan Type with direction dir.
-func typChan(elem *Type, dir ChanDir) *Type {
- t := typ(TCHAN)
+// NewChan returns a new chan Type with direction dir.
+func NewChan(elem *Type, dir ChanDir) *Type {
+ t := New(TCHAN)
ct := t.ChanType()
ct.Elem = elem
ct.Dir = dir
return t
}
-// typMap returns a new map Type with key type k and element (aka value) type v.
-func typMap(k, v *Type) *Type {
- t := typ(TMAP)
+// NewMap returns a new map Type with key type k and element (aka value) type v.
+func NewMap(k, v *Type) *Type {
+ t := New(TMAP)
mt := t.MapType()
mt.Key = k
mt.Val = v
return t
}
-// typPtrCacheEnabled controls whether *T Types are cached in T.
+// NewPtrCacheEnabled controls whether *T Types are cached in T.
// Caching is disabled just before starting the backend.
// This allows the backend to run concurrently.
-var typPtrCacheEnabled = true
+var NewPtrCacheEnabled = true
-// typPtr returns the pointer type pointing to t.
-func typPtr(elem *Type) *Type {
+// NewPtr returns the pointer type pointing to t.
+func NewPtr(elem *Type) *Type {
if elem == nil {
- Fatalf("typPtr: pointer to elem Type is nil")
+ Fatalf("NewPtr: pointer to elem Type is nil")
}
- if t := elem.ptrTo; t != nil {
+ if t := elem.PtrBase; t != nil {
if t.Elem() != elem {
- Fatalf("typPtr: elem mismatch")
+ Fatalf("NewPtr: elem mismatch")
}
return t
}
if Tptr == 0 {
- Fatalf("typPtr: Tptr not initialized")
+ Fatalf("NewPtr: Tptr not initialized")
}
- t := typ(Tptr)
+ t := New(Tptr)
t.Extra = PtrType{Elem: elem}
t.Width = int64(Widthptr)
t.Align = uint8(Widthptr)
- if typPtrCacheEnabled {
- elem.ptrTo = t
+ if NewPtrCacheEnabled {
+ elem.PtrBase = t
}
return t
}
-// typDDDField returns a new TDDDFIELD type for slice type s.
-func typDDDField(s *Type) *Type {
- t := typ(TDDDFIELD)
+// NewDDDField returns a new TDDDFIELD type for slice type s.
+func NewDDDField(s *Type) *Type {
+ t := New(TDDDFIELD)
t.Extra = DDDFieldType{T: s}
return t
}
-// typChanArgs returns a new TCHANARGS type for channel type c.
-func typChanArgs(c *Type) *Type {
- t := typ(TCHANARGS)
+// NewChanArgs returns a new TCHANARGS type for channel type c.
+func NewChanArgs(c *Type) *Type {
+ t := New(TCHANARGS)
t.Extra = ChanArgsType{T: c}
return t
}
-// typFuncArgs returns a new TFUNCARGS type for func type f.
-func typFuncArgs(f *Type) *Type {
- t := typ(TFUNCARGS)
+// NewFuncArgs returns a new TFUNCARGS type for func type f.
+func NewFuncArgs(f *Type) *Type {
+ t := New(TFUNCARGS)
t.Extra = FuncArgsType{T: f}
return t
}
-func newField() *Field {
+func NewField() *Field {
return &Field{
Offset: BADWIDTH,
}
}
-// substArgTypes substitutes the given list of types for
-// successive occurrences of the "any" placeholder in the
-// type syntax expression n.Type.
-// The result of substArgTypes MUST be assigned back to old, e.g.
-// n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *Node, types ...*Type) *Node {
- n := *old // make shallow copy
-
- for _, t := range types {
- dowidth(t)
- }
- n.Type = substAny(n.Type, &types)
- if len(types) > 0 {
- Fatalf("substArgTypes: too many argument types")
- }
- return &n
-}
-
-// substAny walks t, replacing instances of "any" with successive
+// SubstAny walks t, replacing instances of "any" with successive
// elements removed from types. It returns the substituted type.
-func substAny(t *Type, types *[]*Type) *Type {
+func SubstAny(t *Type, types *[]*Type) *Type {
if t == nil {
return nil
}
*types = (*types)[1:]
case TPTR32, TPTR64:
- elem := substAny(t.Elem(), types)
+ elem := SubstAny(t.Elem(), types)
if elem != t.Elem() {
t = t.Copy()
t.Extra = PtrType{Elem: elem}
}
case TARRAY:
- elem := substAny(t.Elem(), types)
+ elem := SubstAny(t.Elem(), types)
if elem != t.Elem() {
t = t.Copy()
t.Extra.(*ArrayType).Elem = elem
}
case TSLICE:
- elem := substAny(t.Elem(), types)
+ elem := SubstAny(t.Elem(), types)
if elem != t.Elem() {
t = t.Copy()
t.Extra = SliceType{Elem: elem}
}
case TCHAN:
- elem := substAny(t.Elem(), types)
+ elem := SubstAny(t.Elem(), types)
if elem != t.Elem() {
t = t.Copy()
t.Extra.(*ChanType).Elem = elem
}
case TMAP:
- key := substAny(t.Key(), types)
- val := substAny(t.Val(), types)
+ key := SubstAny(t.Key(), types)
+ val := SubstAny(t.Val(), types)
if key != t.Key() || val != t.Val() {
t = t.Copy()
t.Extra.(*MapType).Key = key
}
case TFUNC:
- recvs := substAny(t.Recvs(), types)
- params := substAny(t.Params(), types)
- results := substAny(t.Results(), types)
+ recvs := SubstAny(t.Recvs(), types)
+ params := SubstAny(t.Params(), types)
+ results := SubstAny(t.Results(), types)
if recvs != t.Recvs() || params != t.Params() || results != t.Results() {
t = t.Copy()
t.FuncType().Receiver = recvs
fields := t.FieldSlice()
var nfs []*Field
for i, f := range fields {
- nft := substAny(f.Type, types)
+ nft := SubstAny(f.Type, types)
if nft == f.Type {
continue
}
s []*Field
}
-// iterFields returns the first field or method in struct or interface type t
+// IterFields returns the first field or method in struct or interface type t
// and an Iter value to continue iterating across the rest.
-func iterFields(t *Type) (*Field, Iter) {
+func IterFields(t *Type) (*Field, Iter) {
return t.Fields().Iter()
}
return s.Field(0)
}
-// recvsParamsResults stores the accessor functions for a function Type's
+// RecvsParamsResults stores the accessor functions for a function Type's
// receiver, parameters, and result parameters, in that order.
// It can be used to iterate over all of a function's parameter lists.
-var recvsParamsResults = [3]func(*Type) *Type{
+var RecvsParamsResults = [3]func(*Type) *Type{
(*Type).Recvs, (*Type).Params, (*Type).Results,
}
-// paramsResults is like recvsParamsResults, but omits receiver parameters.
-var paramsResults = [2]func(*Type) *Type{
+// ParamsResults is like RecvsParamsResults, but omits receiver parameters.
+var ParamsResults = [2]func(*Type) *Type{
(*Type).Params, (*Type).Results,
}
case TSTRUCT:
return &t.Extra.(*StructType).fields
case TINTER:
- dowidth(t)
- return &t.Extra.(*InterType).fields
+ Dowidth(t)
+ return &t.Extra.(*InterType).Fields
}
Fatalf("Fields: type %v does not have fields", t)
return nil
t.Methods().Set(methods)
}
-func (t *Type) isDDDArray() bool {
+func (t *Type) IsDDDArray() bool {
if t.Etype != TARRAY {
return false
}
}
func (t *Type) Size() int64 {
- dowidth(t)
+ Dowidth(t)
return t.Width
}
func (t *Type) Alignment() int64 {
- dowidth(t)
+ Dowidth(t)
return int64(t.Align)
}
// for error messages. Treat them as equal.
switch t.Etype {
case TUINT8:
- if (t == Types[TUINT8] || t == bytetype) && (x == Types[TUINT8] || x == bytetype) {
+ if (t == Types[TUINT8] || t == Bytetype) && (x == Types[TUINT8] || x == Bytetype) {
return ssa.CMPeq
}
case TINT32:
- if (t == Types[runetype.Etype] || t == runetype) && (x == Types[runetype.Etype] || x == runetype) {
+ if (t == Types[Runetype.Etype] || t == Runetype) && (x == Types[Runetype.Etype] || x == Runetype) {
return ssa.CMPeq
}
}
return ssa.CMPeq
case TFUNC:
- for _, f := range recvsParamsResults {
+ for _, f := range RecvsParamsResults {
// Loop over fields in structs, ignoring argument names.
tfs := f(t).FieldSlice()
xfs := f(x).FieldSlice()
TUINTPTR: TUINTPTR,
}
-// toUnsigned returns the unsigned equivalent of integer type t.
-func (t *Type) toUnsigned() *Type {
+// ToUnsigned returns the unsigned equivalent of integer type t.
+func (t *Type) ToUnsigned() *Type {
if !t.IsInteger() {
Fatalf("unsignedType(%v)", t)
}
return t.Elem()
}
func (t *Type) PtrTo() ssa.Type {
- return typPtr(t)
+ return NewPtr(t)
}
func (t *Type) NumFields() int {
return t.Field(i).Offset
}
func (t *Type) FieldName(i int) string {
- return t.Field(i).Sym.Name
+ return FieldName(t.Field(i))
}
func (t *Type) NumElem() int64 {
}
// SetNumElem sets the number of elements in an array type.
-// The only allowed use is on array types created with typDDDArray.
-// For other uses, create a new array with typArray instead.
+// The only allowed use is on array types created with NewDDDArray.
+// For other uses, create a new array with NewArray instead.
func (t *Type) SetNumElem(n int64) {
t.wantEtype(TARRAY)
at := t.Extra.(*ArrayType)
if t == nil {
return false
}
- if t == idealstring || t == idealbool {
+ if t == Idealstring || t == Idealbool {
return true
}
switch t.Etype {
return false
}
-func haspointers(t *Type) bool {
+func Haspointers(t *Type) bool {
switch t.Etype {
case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL:
if t.NumElem() == 0 { // empty array has no pointers
return false
}
- return haspointers(t.Elem())
+ return Haspointers(t.Elem())
case TSTRUCT:
for _, t1 := range t.Fields().Slice() {
- if haspointers(t1.Type) {
+ if Haspointers(t1.Type) {
return true
}
}
if t.IsPtr() && t.Elem().NotInHeap() {
return false
}
- return haspointers(t)
+ return Haspointers(t)
}
func (t *Type) Symbol() *obj.LSym {
- return Linksym(typenamesym(t))
+ return TypeLinkSym(t)
+}
+
+// Tie returns 'T' if t is a concrete type,
+// 'I' if t is an interface type, and 'E' if t is an empty interface type.
+// It is used to build calls to the conv* and assert* runtime routines.
+func (t *Type) Tie() byte {
+ if t.IsEmptyInterface() {
+ return 'E'
+ }
+ if t.IsInterface() {
+ return 'I'
+ }
+ return 'T'
}
--- /dev/null
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const BADWIDTH = -1000000000
+
+// Initialized by frontend. Exists only here.
+var Tptr EType // either TPTR32 or TPTR64
+
+// The following variables must be initialized early by the frontend.
+// They are here to break import cycles.
+// TODO(gri) eliminate these dependencies.
+var (
+ Widthptr int
+ Dowidth func(*Type)
+ Fatalf func(string, ...interface{})
+ Sconv func(*Sym, int, int) string // orig: func sconv(s *Sym, flag FmtFlag, mode fmtMode) string
+ Tconv func(*Type, int, int, int) string // orig: func tconv(t *Type, flag FmtFlag, mode fmtMode, depth int) string
+ FormatSym func(*Sym, fmt.State, rune, int) // orig: func symFormat(sym *Sym, s fmt.State, verb rune, mode fmtMode)
+ FormatType func(*Type, fmt.State, rune, int) // orig: func typeFormat(t *Type, s fmt.State, verb rune, mode fmtMode)
+ Cmptyp func(_, _ *Type) ssa.Cmp
+ FieldName func(*Field) string
+ TypeLinkSym func(*Type) *obj.LSym
+
+ FmtLeft int
+ FmtUnsigned int
+ FErr int
+)
+
+func (s *Sym) String() string {
+ return Sconv(s, 0, FErr)
+}
+
+func (sym *Sym) Format(s fmt.State, verb rune) {
+ FormatSym(sym, s, verb, FErr)
+}
+
+func (t *Type) String() string {
+ // This is an external entry point, so we pass depth 0 to tconv.
+ // The implementation of tconv (including typefmt and fldconv)
+ // must take care not to use a type in a formatting string
+ // to avoid resetting the recursion counter.
+ return Tconv(t, 0, FErr, 0)
+}
+
+// ShortString generates a short description of t.
+// It is used in autogenerated method names, reflection,
+// and itab names.
+func (t *Type) ShortString() string {
+ return Tconv(t, FmtLeft, FErr, 0)
+}
+
+// LongString generates a complete description of t.
+// It is useful for reflection,
+// or when a unique fingerprint or hash of a type is required.
+func (t *Type) LongString() string {
+ return Tconv(t, FmtLeft|FmtUnsigned, FErr, 0)
+}
+
+func (t *Type) Format(s fmt.State, verb rune) {
+ FormatType(t, s, verb, FErr)
+}
+
+type bitset8 uint8
+
+func (f *bitset8) set(mask uint8, b bool) {
+ if b {
+ *(*uint8)(f) |= mask
+ } else {
+ *(*uint8)(f) &^= mask
+ }
+}
+
+var etnames = []string{
+ Txxx: "Txxx",
+ TINT: "INT",
+ TUINT: "UINT",
+ TINT8: "INT8",
+ TUINT8: "UINT8",
+ TINT16: "INT16",
+ TUINT16: "UINT16",
+ TINT32: "INT32",
+ TUINT32: "UINT32",
+ TINT64: "INT64",
+ TUINT64: "UINT64",
+ TUINTPTR: "UINTPTR",
+ TFLOAT32: "FLOAT32",
+ TFLOAT64: "FLOAT64",
+ TCOMPLEX64: "COMPLEX64",
+ TCOMPLEX128: "COMPLEX128",
+ TBOOL: "BOOL",
+ TPTR32: "PTR32",
+ TPTR64: "PTR64",
+ TFUNC: "FUNC",
+ TARRAY: "ARRAY",
+ TSLICE: "SLICE",
+ TSTRUCT: "STRUCT",
+ TCHAN: "CHAN",
+ TMAP: "MAP",
+ TINTER: "INTER",
+ TFORW: "FORW",
+ TSTRING: "STRING",
+ TUNSAFEPTR: "TUNSAFEPTR",
+ TANY: "ANY",
+ TIDEAL: "TIDEAL",
+ TNIL: "TNIL",
+ TBLANK: "TBLANK",
+ TFUNCARGS: "TFUNCARGS",
+ TCHANARGS: "TCHANARGS",
+ TDDDFIELD: "TDDDFIELD",
+}
+
+func (et EType) String() string {
+ if int(et) < len(etnames) && etnames[et] != "" {
+ return etnames[et]
+ }
+ return fmt.Sprintf("E-%d", et)
+}
"cmd/compile/internal/mips",
"cmd/compile/internal/mips64",
"cmd/compile/internal/ppc64",
+ "cmd/compile/internal/types",
"cmd/compile/internal/s390x",
"cmd/compile/internal/ssa",
"cmd/compile/internal/syntax",